summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@linux.intel.com>2018-10-27 01:07:45 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-27 02:26:34 +0300
commitf682a97a00591def7cefbb5003dc04045028e405 (patch)
treecc526a57d98ef506d9514264439230f19c7478dd
parent85cfb245060e45640fa3447f8b0bad5e8bd3bdaf (diff)
downloadlinux-f682a97a00591def7cefbb5003dc04045028e405.tar.xz
mm: provide kernel parameter to allow disabling page init poisoning
Patch series "Address issues slowing persistent memory initialization", v5. The main thing this patch set achieves is that it allows us to initialize each node worth of persistent memory independently. As a result we reduce page init time by about 2 minutes because instead of taking 30 to 40 seconds per node and going through each node one at a time, we process all 4 nodes in parallel in the case of a 12TB persistent memory setup spread evenly over 4 nodes. This patch (of 3): On systems with a large amount of memory it can take a significant amount of time to initialize all of the page structs with the PAGE_POISON_PATTERN value. I have seen it take over 2 minutes to initialize a system with over 12TB of RAM. In order to work around the issue I had to disable CONFIG_DEBUG_VM and then the boot time returned to something much more reasonable as the arch_add_memory call completed in milliseconds versus seconds. However in doing that I had to disable all of the other VM debugging on the system. In order to work around a kernel that might have CONFIG_DEBUG_VM enabled on a system that has a large amount of memory I have added a new kernel parameter named "vm_debug" that can be set to "-" in order to disable it. Link: http://lkml.kernel.org/r/20180925201921.3576.84239.stgit@localhost.localdomain Reviewed-by: Pavel Tatashin <pavel.tatashin@microsoft.com> Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt12
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--mm/debug.c46
-rw-r--r--mm/memblock.c5
-rw-r--r--mm/sparse.c4
5 files changed, 69 insertions, 6 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 8022d902e770..dcd082576e79 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4839,6 +4839,18 @@
This is actually a boot loader parameter; the value is
passed to the kernel using a special protocol.
+ vm_debug[=options] [KNL] Available with CONFIG_DEBUG_VM=y.
+ May slow down system boot speed, especially when
+ enabled on systems with a large amount of memory.
+ All options are enabled by default, and this
+ interface is meant to allow for selectively
+ enabling or disabling specific virtual memory
+ debugging features.
+
+ Available options are:
+ P Enable page structure init time poisoning
+ - Disable all of the above options
+
vmalloc=nn[KMG] [KNL,BOOT] Forces the vmalloc area to have an exact
size of <nn>. This can be used to increase the
minimum size (128MB on x86). It can also be used to
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4d99504f6496..934f91ef3f54 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -163,6 +163,14 @@ static inline int PagePoisoned(const struct page *page)
return page->flags == PAGE_POISON_PATTERN;
}
+#ifdef CONFIG_DEBUG_VM
+void page_init_poison(struct page *page, size_t size);
+#else
+static inline void page_init_poison(struct page *page, size_t size)
+{
+}
+#endif
+
/*
* Page flags policies wrt compound pages
*
diff --git a/mm/debug.c b/mm/debug.c
index bd10aad8539a..cdacba12e09a 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -13,6 +13,7 @@
#include <trace/events/mmflags.h>
#include <linux/migrate.h>
#include <linux/page_owner.h>
+#include <linux/ctype.h>
#include "internal.h"
@@ -175,4 +176,49 @@ void dump_mm(const struct mm_struct *mm)
);
}
+static bool page_init_poisoning __read_mostly = true;
+
+static int __init setup_vm_debug(char *str)
+{
+ bool __page_init_poisoning = true;
+
+ /*
+ * Calling vm_debug with no arguments is equivalent to requesting
+ * to enable all debugging options we can control.
+ */
+ if (*str++ != '=' || !*str)
+ goto out;
+
+ __page_init_poisoning = false;
+ if (*str == '-')
+ goto out;
+
+ while (*str) {
+ switch (tolower(*str)) {
+ case'p':
+ __page_init_poisoning = true;
+ break;
+ default:
+ pr_err("vm_debug option '%c' unknown. skipped\n",
+ *str);
+ }
+
+ str++;
+ }
+out:
+ if (page_init_poisoning && !__page_init_poisoning)
+ pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
+
+ page_init_poisoning = __page_init_poisoning;
+
+ return 1;
+}
+__setup("vm_debug", setup_vm_debug);
+
+void page_init_poison(struct page *page, size_t size)
+{
+ if (page_init_poisoning)
+ memset(page, PAGE_POISON_PATTERN, size);
+}
+EXPORT_SYMBOL_GPL(page_init_poison);
#endif /* CONFIG_DEBUG_VM */
diff --git a/mm/memblock.c b/mm/memblock.c
index 237944479d25..a85315083b5a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1444,10 +1444,9 @@ void * __init memblock_virt_alloc_try_nid_raw(
ptr = memblock_virt_alloc_internal(size, align,
min_addr, max_addr, nid);
-#ifdef CONFIG_DEBUG_VM
if (ptr && size > 0)
- memset(ptr, PAGE_POISON_PATTERN, size);
-#endif
+ page_init_poison(ptr, size);
+
return ptr;
}
diff --git a/mm/sparse.c b/mm/sparse.c
index 10b07eea9a6e..67ad061f7fb8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -696,13 +696,11 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
goto out;
}
-#ifdef CONFIG_DEBUG_VM
/*
* Poison uninitialized struct pages in order to catch invalid flags
* combinations.
*/
- memset(memmap, PAGE_POISON_PATTERN, sizeof(struct page) * PAGES_PER_SECTION);
-#endif
+ page_init_poison(memmap, sizeof(struct page) * PAGES_PER_SECTION);
section_mark_present(ms);
sparse_init_one_section(ms, section_nr, memmap, usemap);