summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c23
1 files changed, 20 insertions, 3 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fba566c51322..36896f3eb7f5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{
+ /*
+ * mm_update_next_owner() may clear mm->owner to NULL
+ * if it races with swapoff, page migration, etc.
+ * So this can be called with p == NULL.
+ */
+ if (unlikely(!p))
+ return NULL;
+
return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
struct mem_cgroup, css);
}
@@ -549,6 +557,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
if (likely(!memcg)) {
rcu_read_lock();
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!mem)) {
+ rcu_read_unlock();
+ kmem_cache_free(page_cgroup_cache, pc);
+ return 0;
+ }
/*
* For every charge from the cgroup, increment reference count
*/
@@ -796,14 +809,21 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
if (mem_cgroup_subsys.disabled)
return 0;
+ if (!mm)
+ return 0;
rcu_read_lock();
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!mem)) {
+ rcu_read_unlock();
+ return 0;
+ }
css_get(&mem->css);
rcu_read_unlock();
do {
progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
+ progress += res_counter_check_under_limit(&mem->res);
} while (!progress && --retry);
css_put(&mem->css);
@@ -1168,9 +1188,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
mem = mem_cgroup_from_cont(cont);
old_mem = mem_cgroup_from_cont(old_cont);
- if (mem == old_mem)
- goto out;
-
/*
* Only thread group leaders are allowed to migrate, the mm_struct is
* in effect owned by the leader