summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4cc962d566ce..1a2020d72ca0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2538,11 +2538,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
bool anon;
lock_page_cgroup(pc);
- if (unlikely(PageCgroupUsed(pc))) {
- unlock_page_cgroup(pc);
- __mem_cgroup_cancel_charge(memcg, nr_pages);
- return;
- }
+ VM_BUG_ON(PageCgroupUsed(pc));
/*
* we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point.
@@ -2807,8 +2803,19 @@ static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
struct mem_cgroup **memcgp)
{
struct mem_cgroup *memcg;
+ struct page_cgroup *pc;
int ret;
+ pc = lookup_page_cgroup(page);
+ /*
+ * Every swap fault against a single page tries to charge the
+ * page, bail as early as possible. shmem_unuse() encounters
+ * already charged pages, too. The USED bit is protected by
+ * the page lock, which serializes swap cache removal, which
+ * in turn serializes uncharging.
+ */
+ if (PageCgroupUsed(pc))
+ return 0;
if (!do_swap_account)
goto charge_cur_mm;
/*