summaryrefslogtreecommitdiff
path: root/arch/x86/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2017-11-15 17:36:06 +0300
committerThomas Gleixner <tglx@linutronix.de>2017-11-16 13:43:11 +0300
commit1e0f25dbf2464df8445dd40881f4d9e732434947 (patch)
tree2075553f47e8085b712c21f1c8b60b4e5bbcef00 /arch/x86/mm/hugetlbpage.c
parent6e2a3064d6a86094fecc20cd430fd96aaa801687 (diff)
downloadlinux-1e0f25dbf2464df8445dd40881f4d9e732434947.tar.xz
x86/mm: Prevent non-MAP_FIXED mapping across DEFAULT_MAP_WINDOW border
In case of 5-level paging, the kernel does not place any mapping above 47-bit, unless userspace explicitly asks for it. Userspace can request an allocation from the full address space by specifying the mmap address hint above 47-bit. Nicholas noticed that the current implementation violates this interface: If user space requests a mapping at the end of the 47-bit address space with a length which causes the mapping to cross the 47-bit border (DEFAULT_MAP_WINDOW), then the vma is partially in the address space below and above. Sanity check the mmap address hint so that start and end of the resulting vma are on the same side of the 47-bit border. If that's not the case fall back to the code path which ignores the address hint and allocate from the regular address space below 47-bit. To make the checks consistent, mask out the address hints lower bits (either PAGE_MASK or huge_page_mask()) instead of using ALIGN() which can push them up to the next boundary. [ tglx: Moved the address check to a function and massaged comment and changelog ] Reported-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lkml.kernel.org/r/20171115143607.81541-1-kirill.shutemov@linux.intel.com
Diffstat (limited to 'arch/x86/mm/hugetlbpage.c')
-rw-r--r--arch/x86/mm/hugetlbpage.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 8ae0000cbdb3..00b296617ca4 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -158,6 +158,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len > TASK_SIZE)
return -ENOMEM;
+ /* No address checking. See comment at mmap_address_hint_valid() */
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
@@ -165,12 +166,16 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
+ addr &= huge_page_mask(h);
+ if (!mmap_address_hint_valid(addr, len))
+ goto get_unmapped_area;
+
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
}
+
+get_unmapped_area:
if (mm->get_unmapped_area == arch_get_unmapped_area)
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);