summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c81
1 files changed, 44 insertions, 37 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a04c36e173c0..f4b33d2fddc1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2025,7 +2025,12 @@ static inline void *acquire_slab(struct kmem_cache *s,
return freelist;
}
+#ifdef CONFIG_SLUB_CPU_PARTIAL
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
+#else
+static inline void put_cpu_partial(struct kmem_cache *s, struct page *page,
+ int drain) { }
+#endif
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
/*
@@ -2459,14 +2464,6 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
__unfreeze_partials(s, partial_page);
}
-#else /* CONFIG_SLUB_CPU_PARTIAL */
-
-static inline void unfreeze_partials(struct kmem_cache *s) { }
-static inline void unfreeze_partials_cpu(struct kmem_cache *s,
- struct kmem_cache_cpu *c) { }
-
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
-
/*
* Put a page that was just frozen (in __slab_free|get_partial_node) into a
* partial page slot if available.
@@ -2476,46 +2473,56 @@ static inline void unfreeze_partials_cpu(struct kmem_cache *s,
*/
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
-#ifdef CONFIG_SLUB_CPU_PARTIAL
struct page *oldpage;
- int pages;
- int pobjects;
+ struct page *page_to_unfreeze = NULL;
+ unsigned long flags;
+ int pages = 0;
+ int pobjects = 0;
- preempt_disable();
- do {
- pages = 0;
- pobjects = 0;
- oldpage = this_cpu_read(s->cpu_slab->partial);
+ local_irq_save(flags);
+
+ oldpage = this_cpu_read(s->cpu_slab->partial);
- if (oldpage) {
+ if (oldpage) {
+ if (drain && oldpage->pobjects > slub_cpu_partial(s)) {
+ /*
+ * Partial array is full. Move the existing set to the
+ * per node partial list. Postpone the actual unfreezing
+ * outside of the critical section.
+ */
+ page_to_unfreeze = oldpage;
+ oldpage = NULL;
+ } else {
pobjects = oldpage->pobjects;
pages = oldpage->pages;
- if (drain && pobjects > slub_cpu_partial(s)) {
- /*
- * partial array is full. Move the existing
- * set to the per node partial list.
- */
- unfreeze_partials(s);
- oldpage = NULL;
- pobjects = 0;
- pages = 0;
- stat(s, CPU_PARTIAL_DRAIN);
- }
}
+ }
- pages++;
- pobjects += page->objects - page->inuse;
+ pages++;
+ pobjects += page->objects - page->inuse;
- page->pages = pages;
- page->pobjects = pobjects;
- page->next = oldpage;
+ page->pages = pages;
+ page->pobjects = pobjects;
+ page->next = oldpage;
- } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
- != oldpage);
- preempt_enable();
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
+ this_cpu_write(s->cpu_slab->partial, page);
+
+ local_irq_restore(flags);
+
+ if (page_to_unfreeze) {
+ __unfreeze_partials(s, page_to_unfreeze);
+ stat(s, CPU_PARTIAL_DRAIN);
+ }
}
+#else /* CONFIG_SLUB_CPU_PARTIAL */
+
+static inline void unfreeze_partials(struct kmem_cache *s) { }
+static inline void unfreeze_partials_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c) { }
+
+#endif /* CONFIG_SLUB_CPU_PARTIAL */
+
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
unsigned long flags;