summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-19 19:45:58 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-19 19:45:58 +0300
commit249be8511b269495bc95cb8bdfdd5840b2ba73c0 (patch)
tree6920bde053faa0284b52b2a9c9695f5516520377 /drivers
parent3bfe1fc46794631366faa3ef075e1b0ff7ba120a (diff)
parenteec4844fae7c033a0c1fc1eb3b8517aeb8b6cc49 (diff)
downloadlinux-249be8511b269495bc95cb8bdfdd5840b2ba73c0.tar.xz
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton: "The rest of MM and a kernel-wide procfs cleanup. Summary of the more significant patches: - Patch series "mm/memory_hotplug: Factor out memory block devicehandling", v3. David Hildenbrand. Some spring-cleaning of the memory hotplug code, notably in drivers/base/memory.c - "mm: thp: fix false negative of shmem vma's THP eligibility". Yang Shi. Fix /proc/pid/smaps output for THP pages used in shmem. - "resource: fix locking in find_next_iomem_res()" + 1. Nadav Amit. Bugfix and speedup for kernel/resource.c - Patch series "mm: Further memory block device cleanups", David Hildenbrand. More spring-cleaning of the memory hotplug code. - Patch series "mm: Sub-section memory hotplug support". Dan Williams. Generalise the memory hotplug code so that pmem can use it more completely. Then remove the hacks from the libnvdimm code which were there to work around the memory-hotplug code's constraints. - "proc/sysctl: add shared variables for range check", Matteo Croce. We have about 250 instances of int zero; ... .extra1 = &zero, in the tree. This is a tree-wide sweep to make all those private "zero"s and "one"s use global variables. Alas, it isn't practical to make those two global integers const" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (38 commits) proc/sysctl: add shared variables for range check mm: migrate: remove unused mode argument mm/sparsemem: cleanup 'section number' data types libnvdimm/pfn: stop padding pmem namespaces to section alignment libnvdimm/pfn: fix fsdax-mode namespace info-block zero-fields mm/devm_memremap_pages: enable sub-section remap mm: document ZONE_DEVICE memory-model implications mm/sparsemem: support sub-section hotplug mm/sparsemem: prepare for sub-section ranges mm: kill is_dev_zone() helper mm/hotplug: kill is_dev_zone() usage in __remove_pages() mm/sparsemem: convert kmalloc_section_memmap() to populate_section_memmap() mm/hotplug: prepare shrink_{zone, pgdat}_span for sub-section removal mm/sparsemem: add helpers track active portions of a section at boot mm/sparsemem: introduce a SECTION_IS_EARLY flag mm/sparsemem: introduce struct mem_section_usage drivers/base/memory.c: get rid of find_memory_block_hinted() mm/memory_hotplug: move and simplify walk_memory_blocks() mm/memory_hotplug: rename walk_memory_range() and pass start+size instead of pfns mm: make register_mem_sect_under_node() static ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_memhotplug.c19
-rw-r--r--drivers/base/firmware_loader/fallback_table.c13
-rw-r--r--drivers/base/memory.c219
-rw-r--r--drivers/base/node.c35
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/nvdimm/dax_devs.c2
-rw-r--r--drivers/nvdimm/pfn.h15
-rw-r--r--drivers/nvdimm/pfn_devs.c95
-rw-r--r--drivers/tty/tty_ldisc.c6
-rw-r--r--drivers/xen/balloon.c7
11 files changed, 197 insertions, 228 deletions
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index db013dc21c02..e294f44a7850 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -155,16 +155,6 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
return 0;
}
-static unsigned long acpi_meminfo_start_pfn(struct acpi_memory_info *info)
-{
- return PFN_DOWN(info->start_addr);
-}
-
-static unsigned long acpi_meminfo_end_pfn(struct acpi_memory_info *info)
-{
- return PFN_UP(info->start_addr + info->length-1);
-}
-
static int acpi_bind_memblk(struct memory_block *mem, void *arg)
{
return acpi_bind_one(&mem->dev, arg);
@@ -173,9 +163,8 @@ static int acpi_bind_memblk(struct memory_block *mem, void *arg)
static int acpi_bind_memory_blocks(struct acpi_memory_info *info,
struct acpi_device *adev)
{
- return walk_memory_range(acpi_meminfo_start_pfn(info),
- acpi_meminfo_end_pfn(info), adev,
- acpi_bind_memblk);
+ return walk_memory_blocks(info->start_addr, info->length, adev,
+ acpi_bind_memblk);
}
static int acpi_unbind_memblk(struct memory_block *mem, void *arg)
@@ -186,8 +175,8 @@ static int acpi_unbind_memblk(struct memory_block *mem, void *arg)
static void acpi_unbind_memory_blocks(struct acpi_memory_info *info)
{
- walk_memory_range(acpi_meminfo_start_pfn(info),
- acpi_meminfo_end_pfn(info), NULL, acpi_unbind_memblk);
+ walk_memory_blocks(info->start_addr, info->length, NULL,
+ acpi_unbind_memblk);
}
static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 776dd69cf5be..ba9d30b28edc 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -16,9 +16,6 @@
* firmware fallback configuration table
*/
-static unsigned int zero;
-static unsigned int one = 1;
-
struct firmware_fallback_config fw_fallback_config = {
.force_sysfs_fallback = IS_ENABLED(CONFIG_FW_LOADER_USER_HELPER_FALLBACK),
.loading_timeout = 60,
@@ -26,6 +23,7 @@ struct firmware_fallback_config fw_fallback_config = {
};
EXPORT_SYMBOL_GPL(fw_fallback_config);
+#ifdef CONFIG_SYSCTL
struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
@@ -33,8 +31,8 @@ struct ctl_table firmware_config_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{
.procname = "ignore_sysfs_fallback",
@@ -42,9 +40,10 @@ struct ctl_table firmware_config_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
EXPORT_SYMBOL_GPL(firmware_config_table);
+#endif
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f180427e48f4..20c39d1bcef8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -34,11 +34,21 @@ static DEFINE_MUTEX(mem_sysfs_mutex);
static int sections_per_block;
-static inline int base_memory_block_id(int section_nr)
+static inline unsigned long base_memory_block_id(unsigned long section_nr)
{
return section_nr / sections_per_block;
}
+static inline unsigned long pfn_to_block_id(unsigned long pfn)
+{
+ return base_memory_block_id(pfn_to_section_nr(pfn));
+}
+
+static inline unsigned long phys_to_block_id(unsigned long phys)
+{
+ return pfn_to_block_id(PFN_DOWN(phys));
+}
+
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
@@ -126,9 +136,9 @@ static ssize_t phys_index_show(struct device *dev,
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- unsigned long i, pfn;
- int ret = 1;
struct memory_block *mem = to_memory_block(dev);
+ unsigned long pfn;
+ int ret = 1, i;
if (mem->state != MEM_ONLINE)
goto out;
@@ -578,23 +588,13 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
return 0;
}
-/*
- * A reference for the returned object is held and the reference for the
- * hinted object is released.
- */
-struct memory_block *find_memory_block_hinted(struct mem_section *section,
- struct memory_block *hint)
+/* A reference for the returned memory block device is acquired. */
+static struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
- int block_id = base_memory_block_id(__section_nr(section));
- struct device *hintdev = hint ? &hint->dev : NULL;
struct device *dev;
- dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
- if (hint)
- put_device(&hint->dev);
- if (!dev)
- return NULL;
- return to_memory_block(dev);
+ dev = subsys_find_device_by_id(&memory_subsys, block_id, NULL);
+ return dev ? to_memory_block(dev) : NULL;
}
/*
@@ -607,7 +607,9 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
*/
struct memory_block *find_memory_block(struct mem_section *section)
{
- return find_memory_block_hinted(section, NULL);
+ unsigned long block_id = base_memory_block_id(__section_nr(section));
+
+ return find_memory_block_by_id(block_id);
}
static struct attribute *memory_memblk_attrs[] = {
@@ -652,20 +654,22 @@ int register_memory(struct memory_block *memory)
}
static int init_memory_block(struct memory_block **memory,
- struct mem_section *section, unsigned long state)
+ unsigned long block_id, unsigned long state)
{
struct memory_block *mem;
unsigned long start_pfn;
- int scn_nr;
int ret = 0;
+ mem = find_memory_block_by_id(block_id);
+ if (mem) {
+ put_device(&mem->dev);
+ return -EEXIST;
+ }
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
- scn_nr = __section_nr(section);
- mem->start_section_nr =
- base_memory_block_id(scn_nr) * sections_per_block;
+ mem->start_section_nr = block_id * sections_per_block;
mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
mem->state = state;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
@@ -677,97 +681,101 @@ static int init_memory_block(struct memory_block **memory,
return ret;
}
-static int add_memory_block(int base_section_nr)
+static int add_memory_block(unsigned long base_section_nr)
{
+ int ret, section_count = 0;
struct memory_block *mem;
- int i, ret, section_count = 0, section_nr;
+ unsigned long nr;
- for (i = base_section_nr;
- i < base_section_nr + sections_per_block;
- i++) {
- if (!present_section_nr(i))
- continue;
- if (section_count == 0)
- section_nr = i;
- section_count++;
- }
+ for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
+ nr++)
+ if (present_section_nr(nr))
+ section_count++;
if (section_count == 0)
return 0;
- ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
+ ret = init_memory_block(&mem, base_memory_block_id(base_section_nr),
+ MEM_ONLINE);
if (ret)
return ret;
mem->section_count = section_count;
return 0;
}
+static void unregister_memory(struct memory_block *memory)
+{
+ if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
+ return;
+
+ /* drop the ref. we got via find_memory_block() */
+ put_device(&memory->dev);
+ device_unregister(&memory->dev);
+}
+
/*
- * need an interface for the VM to add new memory regions,
- * but without onlining it.
+ * Create memory block devices for the given memory area. Start and size
+ * have to be aligned to memory block granularity. Memory block devices
+ * will be initialized as offline.
*/
-int hotplug_memory_register(int nid, struct mem_section *section)
+int create_memory_block_devices(unsigned long start, unsigned long size)
{
- int ret = 0;
+ const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
+ unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
struct memory_block *mem;
+ unsigned long block_id;
+ int ret = 0;
- mutex_lock(&mem_sysfs_mutex);
+ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())))
+ return -EINVAL;
- mem = find_memory_block(section);
- if (mem) {
- mem->section_count++;
- put_device(&mem->dev);
- } else {
- ret = init_memory_block(&mem, section, MEM_OFFLINE);
+ mutex_lock(&mem_sysfs_mutex);
+ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
+ ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
if (ret)
- goto out;
- mem->section_count++;
+ break;
+ mem->section_count = sections_per_block;
+ }
+ if (ret) {
+ end_block_id = block_id;
+ for (block_id = start_block_id; block_id != end_block_id;
+ block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ mem->section_count = 0;
+ unregister_memory(mem);
+ }
}
-
-out:
mutex_unlock(&mem_sysfs_mutex);
return ret;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
-static void
-unregister_memory(struct memory_block *memory)
-{
- BUG_ON(memory->dev.bus != &memory_subsys);
-
- /* drop the ref. we got via find_memory_block() */
- put_device(&memory->dev);
- device_unregister(&memory->dev);
-}
-
-void unregister_memory_section(struct mem_section *section)
+/*
+ * Remove memory block devices for the given memory area. Start and size
+ * have to be aligned to memory block granularity. Memory block devices
+ * have to be offline.
+ */
+void remove_memory_block_devices(unsigned long start, unsigned long size)
{
+ const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
+ const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
struct memory_block *mem;
+ unsigned long block_id;
- if (WARN_ON_ONCE(!present_section(section)))
+ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())))
return;
mutex_lock(&mem_sysfs_mutex);
-
- /*
- * Some users of the memory hotplug do not want/need memblock to
- * track all sections. Skip over those.
- */
- mem = find_memory_block(section);
- if (!mem)
- goto out_unlock;
-
- unregister_mem_sect_under_nodes(mem, __section_nr(section));
-
- mem->section_count--;
- if (mem->section_count == 0)
+ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ if (WARN_ON_ONCE(!mem))
+ continue;
+ mem->section_count = 0;
+ unregister_memory_block_under_nodes(mem);
unregister_memory(mem);
- else
- put_device(&mem->dev);
-
-out_unlock:
+ }
mutex_unlock(&mem_sysfs_mutex);
}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
/* return true if the memory block is offlined, otherwise, return false */
bool is_memblock_offlined(struct memory_block *mem)
@@ -804,10 +812,9 @@ static const struct attribute_group *memory_root_attr_groups[] = {
*/
int __init memory_dev_init(void)
{
- unsigned int i;
int ret;
int err;
- unsigned long block_sz;
+ unsigned long block_sz, nr;
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
@@ -821,9 +828,9 @@ int __init memory_dev_init(void)
* during boot and have been initialized
*/
mutex_lock(&mem_sysfs_mutex);
- for (i = 0; i <= __highest_present_section_nr;
- i += sections_per_block) {
- err = add_memory_block(i);
+ for (nr = 0; nr <= __highest_present_section_nr;
+ nr += sections_per_block) {
+ err = add_memory_block(nr);
if (!ret)
ret = err;
}
@@ -834,3 +841,43 @@ out:
printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
return ret;
}
+
+/**
+ * walk_memory_blocks - walk through all present memory blocks overlapped
+ * by the range [start, start + size)
+ *
+ * @start: start address of the memory range
+ * @size: size of the memory range
+ * @arg: argument passed to func
+ * @func: callback for each memory section walked
+ *
+ * This function walks through all present memory blocks overlapped by the
+ * range [start, start + size), calling func on each memory block.
+ *
+ * In case func() returns an error, walking is aborted and the error is
+ * returned.
+ */
+int walk_memory_blocks(unsigned long start, unsigned long size,
+ void *arg, walk_memory_blocks_func_t func)
+{
+ const unsigned long start_block_id = phys_to_block_id(start);
+ const unsigned long end_block_id = phys_to_block_id(start + size - 1);
+ struct memory_block *mem;
+ unsigned long block_id;
+ int ret = 0;
+
+ if (!size)
+ return 0;
+
+ for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ if (!mem)
+ continue;
+
+ ret = func(mem, arg);
+ put_device(&mem->dev);
+ if (ret)
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index aa878fbcf705..75b7e6f6535b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -753,7 +753,8 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
/* register memory section under specified node if it spans that node */
-int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
+static int register_mem_sect_under_node(struct memory_block *mem_blk,
+ void *arg)
{
int ret, nid = *(int *)arg;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
@@ -802,23 +803,18 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
return 0;
}
-/* unregister memory section under all nodes that it spans */
-int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
- unsigned long phys_index)
+/*
+ * Unregister memory block device under all nodes that it spans.
+ * Has to be called with mem_sysfs_mutex held (due to unlinked_nodes).
+ */
+void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
- NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
unsigned long pfn, sect_start_pfn, sect_end_pfn;
+ static nodemask_t unlinked_nodes;
- if (!mem_blk) {
- NODEMASK_FREE(unlinked_nodes);
- return -EFAULT;
- }
- if (!unlinked_nodes)
- return -ENOMEM;
- nodes_clear(*unlinked_nodes);
-
- sect_start_pfn = section_nr_to_pfn(phys_index);
- sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
+ nodes_clear(unlinked_nodes);
+ sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
+ sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
int nid;
@@ -827,21 +823,20 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
continue;
if (!node_online(nid))
continue;
- if (node_test_and_set(nid, *unlinked_nodes))
+ if (node_test_and_set(nid, unlinked_nodes))
continue;
sysfs_remove_link(&node_devices[nid]->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
sysfs_remove_link(&mem_blk->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
}
- NODEMASK_FREE(unlinked_nodes);
- return 0;
}
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
{
- return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
- register_mem_sect_under_node);
+ return walk_memory_blocks(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
+ register_mem_sect_under_node);
}
#ifdef CONFIG_HUGETLBFS
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3d8162d28730..a700c5c3d167 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -274,8 +274,6 @@
#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
-static int zero;
-static int one = 1;
static u32 i915_perf_stream_paranoid = true;
/* The maximum exponent the hardware accepts is 63 (essentially it selects one
@@ -3366,8 +3364,8 @@ static struct ctl_table oa_table[] = {
.maxlen = sizeof(i915_perf_stream_paranoid),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{
.procname = "oa_max_sample_rate",
@@ -3375,7 +3373,7 @@ static struct ctl_table oa_table[] = {
.maxlen = sizeof(i915_oa_max_sample_rate),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .extra1 = SYSCTL_ZERO,
.extra2 = &oa_sample_rate_hard_limit,
},
{}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 894da5abdc55..ebd35fc35290 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1197,8 +1197,6 @@ static struct kmsg_dumper hv_kmsg_dumper = {
};
static struct ctl_table_header *hv_ctl_table_hdr;
-static int zero;
-static int one = 1;
/*
* sysctl option to allow the user to control whether kmsg data should be
@@ -1211,8 +1209,8 @@ static struct ctl_table hv_ctl_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE
},
{}
};
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 49fc18ee0565..6d22b0f83b3b 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -118,7 +118,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
nvdimm_bus_unlock(&ndns->dev);
if (!dax_dev)
return -ENOMEM;
- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, DAX_SIG);
dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index f58b849e455b..7381673b7b70 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -28,22 +28,9 @@ struct nd_pfn_sb {
__le32 end_trunc;
/* minor-version-2 record the base alignment of the mapping */
__le32 align;
+ /* minor-version-3 guarantee the padding and flags are zero */
u8 padding[4000];
__le64 checksum;
};
-#ifdef CONFIG_SPARSEMEM
-#define PFN_SECTION_ALIGN_DOWN(x) SECTION_ALIGN_DOWN(x)
-#define PFN_SECTION_ALIGN_UP(x) SECTION_ALIGN_UP(x)
-#else
-/*
- * In this case ZONE_DEVICE=n and we will disable 'pfn' device support,
- * but we still want pmem to compile.
- */
-#define PFN_SECTION_ALIGN_DOWN(x) (x)
-#define PFN_SECTION_ALIGN_UP(x) (x)
-#endif
-
-#define PHYS_SECTION_ALIGN_DOWN(x) PFN_PHYS(PFN_SECTION_ALIGN_DOWN(PHYS_PFN(x)))
-#define PHYS_SECTION_ALIGN_UP(x) PFN_PHYS(PFN_SECTION_ALIGN_UP(PHYS_PFN(x)))
#endif /* __NVDIMM_PFN_H */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 55fb6b7433ed..df2bdbd22450 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -412,6 +412,15 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
return 0;
}
+/**
+ * nd_pfn_validate - read and validate info-block
+ * @nd_pfn: fsdax namespace runtime state / properties
+ * @sig: 'devdax' or 'fsdax' signature
+ *
+ * Upon return the info-block buffer contents (->pfn_sb) are
+ * indeterminate when validation fails, and a coherent info-block
+ * otherwise.
+ */
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
@@ -557,7 +566,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
nvdimm_bus_unlock(&ndns->dev);
if (!pfn_dev)
return -ENOMEM;
- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn = to_nd_pfn(pfn_dev);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, PFN_SIG);
@@ -578,14 +587,14 @@ static u32 info_block_reserve(void)
}
/*
- * We hotplug memory at section granularity, pad the reserved area from
- * the previous section base to the namespace base address.
+ * We hotplug memory at sub-section granularity, pad the reserved area
+ * from the previous section base to the namespace base address.
*/
static unsigned long init_altmap_base(resource_size_t base)
{
unsigned long base_pfn = PHYS_PFN(base);
- return PFN_SECTION_ALIGN_DOWN(base_pfn);
+ return SUBSECTION_ALIGN_DOWN(base_pfn);
}
static unsigned long init_altmap_reserve(resource_size_t base)
@@ -593,7 +602,7 @@ static unsigned long init_altmap_reserve(resource_size_t base)
unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
- reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
+ reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
return reserve;
}
@@ -623,8 +632,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
- nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
- - offset) / PAGE_SIZE);
+ nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset));
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
"number of pfns truncated from %lld to %ld\n",
@@ -640,60 +648,20 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
return 0;
}
-static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
-{
- return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
- ALIGN_DOWN(phys, nd_pfn->align));
-}
-
-/*
- * Check if pmem collides with 'System RAM', or other regions when
- * section aligned. Trim it accordingly.
- */
-static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
-{
- struct nd_namespace_common *ndns = nd_pfn->ndns;
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
- const resource_size_t start = nsio->res.start;
- const resource_size_t end = start + resource_size(&nsio->res);
- resource_size_t adjust, size;
-
- *start_pad = 0;
- *end_trunc = 0;
-
- adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
- size = resource_size(&nsio->res) + adjust;
- if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED
- || nd_region_conflict(nd_region, start - adjust, size))
- *start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
-
- /* Now check that end of the range does not collide. */
- adjust = PHYS_SECTION_ALIGN_UP(end) - end;
- size = resource_size(&nsio->res) + adjust;
- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED
- || !IS_ALIGNED(end, nd_pfn->align)
- || nd_region_conflict(nd_region, start, size))
- *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
-}
-
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- u32 start_pad, end_trunc, reserve = info_block_reserve();
resource_size_t start, size;
struct nd_region *nd_region;
+ unsigned long npfns, align;
struct nd_pfn_sb *pfn_sb;
- unsigned long npfns;
phys_addr_t offset;
const char *sig;
u64 checksum;
int rc;
- pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
if (!pfn_sb)
return -ENOMEM;
@@ -702,11 +670,14 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
sig = DAX_SIG;
else
sig = PFN_SIG;
+
rc = nd_pfn_validate(nd_pfn, sig);
if (rc != -ENODEV)
return rc;
/* no info block, do init */;
+ memset(pfn_sb, 0, sizeof(*pfn_sb));
+
nd_region = to_nd_region(nd_pfn->dev.parent);
if (nd_region->ro) {
dev_info(&nd_pfn->dev,
@@ -715,43 +686,35 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
return -ENXIO;
}
- memset(pfn_sb, 0, sizeof(*pfn_sb));
-
- trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
- if (start_pad + end_trunc)
- dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
- dev_name(&ndns->dev), start_pad + end_trunc);
-
/*
* Note, we use 64 here for the standard size of struct page,
* debugging options may cause it to be larger in which case the
* implementation will limit the pfns advertised through
* ->direct_access() to those that are included in the memmap.
*/
- start = nsio->res.start + start_pad;
+ start = nsio->res.start;
size = resource_size(&nsio->res);
- npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
- / PAGE_SIZE);
+ npfns = PHYS_PFN(size - SZ_8K);
+ align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
* The altmap should be padded out to the block size used
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*/
- offset = ALIGN(start + reserve + 64 * npfns,
- max(nd_pfn->align, PMD_SIZE)) - start;
+ offset = ALIGN(start + SZ_8K + 64 * npfns, align) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
- offset = ALIGN(start + reserve, nd_pfn->align) - start;
+ offset = ALIGN(start + SZ_8K, align) - start;
else
return -ENXIO;
- if (offset + start_pad + end_trunc >= size) {
+ if (offset >= size) {
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
dev_name(&ndns->dev));
return -ENXIO;
}
- npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
+ npfns = PHYS_PFN(size - offset);
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
pfn_sb->dataoff = cpu_to_le64(offset);
pfn_sb->npfns = cpu_to_le64(npfns);
@@ -759,9 +722,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
- pfn_sb->version_minor = cpu_to_le16(2);
- pfn_sb->start_pad = cpu_to_le32(start_pad);
- pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+ pfn_sb->version_minor = cpu_to_le16(3);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index fde8d4073e74..4c49f53afa3e 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -855,8 +855,6 @@ void tty_ldisc_deinit(struct tty_struct *tty)
tty->ldisc = NULL;
}
-static int zero;
-static int one = 1;
static struct ctl_table tty_table[] = {
{
.procname = "ldisc_autoload",
@@ -864,8 +862,8 @@ static struct ctl_table tty_table[] = {
.maxlen = sizeof(tty_ldisc_autoload),
.mode = 0644,
.proc_handler = proc_dointvec,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index d37dd5bb7a8f..37a36c6b9f93 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -77,9 +77,6 @@ static int xen_hotplug_unpopulated;
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-static int zero;
-static int one = 1;
-
static struct ctl_table balloon_table[] = {
{
.procname = "hotplug_unpopulated",
@@ -87,8 +84,8 @@ static struct ctl_table balloon_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};