summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2021-05-08 03:28:02 +0300
committerVlastimil Babka <vbabka@suse.cz>2021-09-04 02:12:21 +0300
commit0b303fb402862dcb7948eeeed2439bd8c99948b5 (patch)
treecf582cf0547f8120a341b4e56cda547e0fac0d1d /mm/slub.c
parente500059ba55268e1c5212632e4f21e45f54dc6d9 (diff)
downloadlinux-0b303fb402862dcb7948eeeed2439bd8c99948b5.tar.xz
mm, slub: do initial checks in ___slab_alloc() with irqs enabled
As another step of shortening irq disabled sections in ___slab_alloc(), delay disabling irqs until we pass the initial checks if there is a cached percpu slab and it's suitable for our allocation. Now we have to recheck c->page after actually disabling irqs as an allocation in irq handler might have replaced it. Because we call pfmemalloc_match() as one of the checks, we might hit VM_BUG_ON_PAGE(!PageSlab(page)) in PageSlabPfmemalloc in case we get interrupted and the page is freed. Thus introduce a pfmemalloc_match_unsafe() variant that lacks the PageSlab check. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mel Gorman <mgorman@techsingularity.net>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c54
1 files changed, 45 insertions, 9 deletions
diff --git a/mm/slub.c b/mm/slub.c
index dda05cc83eef..6295695d8515 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2621,6 +2621,19 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
}
/*
+ * A variant of pfmemalloc_match() that tests page flags without asserting
+ * PageSlab. Intended for opportunistic checks before taking a lock and
+ * rechecking that nobody else freed the page under us.
+ */
+static inline bool pfmemalloc_match_unsafe(struct page *page, gfp_t gfpflags)
+{
+ if (unlikely(__PageSlabPfmemalloc(page)))
+ return gfp_pfmemalloc_allowed(gfpflags);
+
+ return true;
+}
+
+/*
* Check the page->freelist of a page and either transfer the freelist to the
* per cpu freelist or deactivate the page.
*
@@ -2682,8 +2695,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_SLOWPATH);
- local_irq_save(flags);
- page = c->page;
+reread_page:
+
+ page = READ_ONCE(c->page);
if (!page) {
/*
* if the node is not online or has no normal memory, just
@@ -2692,6 +2706,11 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(node != NUMA_NO_NODE &&
!node_isset(node, slab_nodes)))
node = NUMA_NO_NODE;
+ local_irq_save(flags);
+ if (unlikely(c->page)) {
+ local_irq_restore(flags);
+ goto reread_page;
+ }
goto new_slab;
}
redo:
@@ -2706,8 +2725,7 @@ redo:
goto redo;
} else {
stat(s, ALLOC_NODE_MISMATCH);
- deactivate_slab(s, page, c->freelist, c);
- goto new_slab;
+ goto deactivate_slab;
}
}
@@ -2716,12 +2734,15 @@ redo:
* PFMEMALLOC but right now, we are losing the pfmemalloc
* information when the page leaves the per-cpu allocator
*/
- if (unlikely(!pfmemalloc_match(page, gfpflags))) {
- deactivate_slab(s, page, c->freelist, c);
- goto new_slab;
- }
+ if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
+ goto deactivate_slab;
- /* must check again c->freelist in case of cpu migration or IRQ */
+ /* must check again c->page in case IRQ handler changed it */
+ local_irq_save(flags);
+ if (unlikely(page != c->page)) {
+ local_irq_restore(flags);
+ goto reread_page;
+ }
freelist = c->freelist;
if (freelist)
goto load_freelist;
@@ -2737,6 +2758,9 @@ redo:
stat(s, ALLOC_REFILL);
load_freelist:
+
+ lockdep_assert_irqs_disabled();
+
/*
* freelist is pointing to the list of objects to be used.
* page is pointing to the page from which the objects are obtained.
@@ -2748,11 +2772,23 @@ load_freelist:
local_irq_restore(flags);
return freelist;
+deactivate_slab:
+
+ local_irq_save(flags);
+ if (page != c->page) {
+ local_irq_restore(flags);
+ goto reread_page;
+ }
+ deactivate_slab(s, page, c->freelist, c);
+
new_slab:
+ lockdep_assert_irqs_disabled();
+
if (slub_percpu_partial(c)) {
page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page);
+ local_irq_restore(flags);
stat(s, CPU_PARTIAL_ALLOC);
goto redo;
}