summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c22
-rw-r--r--mm/kasan/generic.c2
-rw-r--r--mm/memory.c8
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/slub.c5
7 files changed, 39 insertions, 12 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cbf32d2824fd..a2602969873d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4105,10 +4105,30 @@ retry_avoidcopy:
* may get SIGKILLed if it later faults.
*/
if (outside_reserve) {
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ pgoff_t idx;
+ u32 hash;
+
put_page(old_page);
BUG_ON(huge_pte_none(pte));
+ /*
+ * Drop hugetlb_fault_mutex and i_mmap_rwsem before
+ * unmapping. unmapping needs to hold i_mmap_rwsem
+ * in write mode. Dropping i_mmap_rwsem in read mode
+ * here is OK as COW mappings do not interact with
+ * PMD sharing.
+ *
+ * Reacquire both after unmap operation.
+ */
+ idx = vma_hugecache_offset(h, vma, haddr);
+ hash = hugetlb_fault_mutex_hash(mapping, idx);
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
+
unmap_ref_private(mm, vma, old_page, haddr);
- BUG_ON(huge_pte_none(pte));
+
+ i_mmap_lock_read(mapping);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
spin_lock(ptl);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep &&
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 1dd5a0f99372..5106b84b07d4 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -337,6 +337,8 @@ void kasan_record_aux_stack(void *addr)
cache = page->slab_cache;
object = nearest_obj(cache, page, addr);
alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (!alloc_meta)
+ return;
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
diff --git a/mm/memory.c b/mm/memory.c
index 7d608765932b..feff48e1465a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2892,11 +2892,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
entry = mk_pte(new_page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+
/*
* Clear the pte entry and flush it first, before updating the
- * pte with the new entry. This will avoid a race condition
- * seen in the presence of one thread doing SMC and another
- * thread doing COW.
+ * pte with the new entry, to keep TLBs on different CPUs in
+ * sync. This code used to set the new PTE then flush TLBs, but
+ * that left a window where the new PTE could be loaded into
+ * some TLBs while the old PTE remains in others.
*/
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
page_add_new_anon_rmap(new_page, vma, vmf->address, false);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index af41fb990820..f9d57b9be8c7 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -713,7 +713,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
* expects the zone spans the pfn range. All the pages in the range
* are reserved so nobody should be touching them so we should be safe
*/
- memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
+ memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0,
MEMINIT_HOTPLUG, altmap, migratetype);
set_zone_contiguous(zone);
diff --git a/mm/mremap.c b/mm/mremap.c
index c5590afe7165..f554320281cc 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -358,7 +358,9 @@ static unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr,
next = (old_addr + size) & mask;
/* even if next overflowed, extent below will be ok */
- extent = (next > old_end) ? old_end - old_addr : next - old_addr;
+ extent = next - old_addr;
+ if (extent > old_end - old_addr)
+ extent = old_end - old_addr;
next = (new_addr + size) & mask;
if (extent > next - new_addr)
extent = next - new_addr;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7a2c89b21115..bdbec4c98173 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -423,6 +423,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
return false;
+ if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
+ return true;
/*
* We start only with one section of pages, more pages are added as
* needed until the rest of deferred pages are initialized.
@@ -6116,7 +6118,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
* zone stats (e.g., nr_isolate_pageblock) are touched.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn,
+ unsigned long start_pfn, unsigned long zone_end_pfn,
enum meminit_context context,
struct vmem_altmap *altmap, int migratetype)
{
@@ -6152,7 +6154,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (context == MEMINIT_EARLY) {
if (overlap_memmap_init(zone, &pfn))
continue;
- if (defer_init(nid, pfn, end_pfn))
+ if (defer_init(nid, pfn, zone_end_pfn))
break;
}
@@ -6266,7 +6268,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
if (end_pfn > start_pfn) {
size = end_pfn - start_pfn;
- memmap_init_zone(size, nid, zone, start_pfn,
+ memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
}
}
diff --git a/mm/slub.c b/mm/slub.c
index 0c8b43a5b3b0..dc5b42e700b8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1619,9 +1619,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else
page = __alloc_pages_node(node, flags, order);
- if (page)
- account_slab_page(page, order, s);
-
return page;
}
@@ -1774,6 +1771,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->objects = oo_objects(oo);
+ account_slab_page(page, oo_order(oo), s);
+
page->slab_cache = s;
__SetPageSlab(page);
if (page_is_pfmemalloc(page))