summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorEric B Munson <emunson@mgebm.net>2011-01-14 02:47:28 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-14 04:32:49 +0300
commit73ae31e5986a4c0ee84bfd13ccd9b57a98956f6f (patch)
tree03e8c6e4559d5f77ec8f0fdf79addeecbef65ed9 /mm/hugetlb.c
parentadbe8726dc2a3805630d517270db17e3af86e526 (diff)
downloadlinux-73ae31e5986a4c0ee84bfd13ccd9b57a98956f6f.tar.xz
hugetlb: fix handling of parse errors in sysfs
When parsing changes to the huge page pool sizes made from userspace via the sysfs interface, bogus input values are being covered up by nr_hugepages_store_common and nr_overcommit_hugepages_store returning 0 when strict_strtoul returns an error. This can cause an infinite loop in the nr_hugepages_store code. This patch changes the return value for these functions to -EINVAL when strict_strtoul returns an error. Signed-off-by: Eric B Munson <emunson@mgebm.net> Reported-by: CAI Qian <caiqian@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Eric B Munson <emunson@mgebm.net> Cc: Michal Hocko <mhocko@suse.cz> Cc: Nishanth Aravamudan <nacc@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ce8e5bb6f031..bb0b7c128015 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1375,10 +1375,8 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
err = strict_strtoul(buf, 10, &count);
- if (err) {
- err = 0; /* This seems wrong */
+ if (err)
goto out;
- }
h = kobj_to_hstate(kobj, &nid);
if (h->order >= MAX_ORDER) {
@@ -1469,7 +1467,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
err = strict_strtoul(buf, 10, &input);
if (err)
- return 0;
+ return err;
spin_lock(&hugetlb_lock);
h->nr_overcommit_huge_pages = input;