summaryrefslogtreecommitdiff
path: root/mm/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h52
1 files changed, 52 insertions, 0 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 161ca34acb01..9f78e3167f19 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -109,6 +109,7 @@ struct memcg_cache_params {
#include <linux/kmemleak.h>
#include <linux/random.h>
#include <linux/sched/mm.h>
+#include <linux/kmemleak.h>
/*
* State of the slab allocator.
@@ -348,6 +349,18 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
return s->memcg_params.root_cache;
}
+static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
+{
+ /*
+ * page->mem_cgroup and page->obj_cgroups are sharing the same
+ * space. To distinguish between them in case we don't know for sure
+ * that the page is a slab page (e.g. page_cgroup_ino()), let's
+ * always set the lowest bit of obj_cgroups.
+ */
+ return (struct obj_cgroup **)
+ ((unsigned long)page->obj_cgroups & ~0x1UL);
+}
+
/*
* Expects a pointer to a slab page. Please note, that PageSlab() check
* isn't sufficient, as it returns true also for tail compound slab pages,
@@ -435,6 +448,28 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
}
+static inline int memcg_alloc_page_obj_cgroups(struct page *page,
+ struct kmem_cache *s, gfp_t gfp)
+{
+ unsigned int objects = objs_per_slab_page(s, page);
+ void *vec;
+
+ vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
+ page_to_nid(page));
+ if (!vec)
+ return -ENOMEM;
+
+ kmemleak_not_leak(vec);
+ page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL);
+ return 0;
+}
+
+static inline void memcg_free_page_obj_cgroups(struct page *page)
+{
+ kfree(page_obj_cgroups(page));
+ page->obj_cgroups = NULL;
+}
+
extern void slab_init_memcg_params(struct kmem_cache *);
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
@@ -484,6 +519,16 @@ static inline void memcg_uncharge_slab(struct page *page, int order,
{
}
+static inline int memcg_alloc_page_obj_cgroups(struct page *page,
+ struct kmem_cache *s, gfp_t gfp)
+{
+ return 0;
+}
+
+static inline void memcg_free_page_obj_cgroups(struct page *page)
+{
+}
+
static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
@@ -510,12 +555,18 @@ static __always_inline int charge_slab_page(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
+ int ret;
+
if (is_root_cache(s)) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
PAGE_SIZE << order);
return 0;
}
+ ret = memcg_alloc_page_obj_cgroups(page, s, gfp);
+ if (ret)
+ return ret;
+
return memcg_charge_slab(page, gfp, order, s);
}
@@ -528,6 +579,7 @@ static __always_inline void uncharge_slab_page(struct page *page, int order,
return;
}
+ memcg_free_page_obj_cgroups(page);
memcg_uncharge_slab(page, order, s);
}