summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c67
-rw-r--r--mm/migrate.c11
2 files changed, 38 insertions, 40 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0f692a2dbfcb..7eadcdad06f3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2976,7 +2976,8 @@ direct_uncharge:
* uncharge if !page_mapped(page)
*/
static struct mem_cgroup *
-__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
+__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
+ bool end_migration)
{
struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = 1;
@@ -3020,7 +3021,16 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
/* fallthrough */
case MEM_CGROUP_CHARGE_TYPE_DROP:
/* See mem_cgroup_prepare_migration() */
- if (page_mapped(page) || PageCgroupMigration(pc))
+ if (page_mapped(page))
+ goto unlock_out;
+ /*
+ * Pages under migration may not be uncharged. But
+ * end_migration() /must/ be the one uncharging the
+ * unused post-migration page and so it has to call
+ * here with the migration bit still set. See the
+ * res_counter handling below.
+ */
+ if (!end_migration && PageCgroupMigration(pc))
goto unlock_out;
break;
case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
@@ -3054,7 +3064,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
mem_cgroup_swap_statistics(memcg, true);
mem_cgroup_get(memcg);
}
- if (!mem_cgroup_is_root(memcg))
+ /*
+ * Migration does not charge the res_counter for the
+ * replacement page, so leave it alone when phasing out the
+ * page that is unused after the migration.
+ */
+ if (!end_migration && !mem_cgroup_is_root(memcg))
mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
return memcg;
@@ -3070,14 +3085,14 @@ void mem_cgroup_uncharge_page(struct page *page)
if (page_mapped(page))
return;
VM_BUG_ON(page->mapping && !PageAnon(page));
- __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON);
+ __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
}
void mem_cgroup_uncharge_cache_page(struct page *page)
{
VM_BUG_ON(page_mapped(page));
VM_BUG_ON(page->mapping);
- __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
+ __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
}
/*
@@ -3141,7 +3156,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
if (!swapout) /* this was a swap cache but the swap is unused ! */
ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
- memcg = __mem_cgroup_uncharge_common(page, ctype);
+ memcg = __mem_cgroup_uncharge_common(page, ctype, false);
/*
* record memcg information, if swapout && memcg != NULL,
@@ -3231,19 +3246,18 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
* Before starting migration, account PAGE_SIZE to mem_cgroup that the old
* page belongs to.
*/
-int mem_cgroup_prepare_migration(struct page *page,
- struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask)
+void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
+ struct mem_cgroup **memcgp)
{
struct mem_cgroup *memcg = NULL;
struct page_cgroup *pc;
enum charge_type ctype;
- int ret = 0;
*memcgp = NULL;
VM_BUG_ON(PageTransHuge(page));
if (mem_cgroup_disabled())
- return 0;
+ return;
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc);
@@ -3288,24 +3302,9 @@ int mem_cgroup_prepare_migration(struct page *page,
* we return here.
*/
if (!memcg)
- return 0;
+ return;
*memcgp = memcg;
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
- css_put(&memcg->css);/* drop extra refcnt */
- if (ret) {
- if (PageAnon(page)) {
- lock_page_cgroup(pc);
- ClearPageCgroupMigration(pc);
- unlock_page_cgroup(pc);
- /*
- * The old page may be fully unmapped while we kept it.
- */
- mem_cgroup_uncharge_page(page);
- }
- /* we'll need to revisit this error code (we have -EINTR) */
- return -ENOMEM;
- }
/*
* We charge new page before it's used/mapped. So, even if unlock_page()
* is called before end_migration, we can catch all events on this new
@@ -3318,8 +3317,12 @@ int mem_cgroup_prepare_migration(struct page *page,
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+ /*
+ * The page is committed to the memcg, but it's not actually
+ * charged to the res_counter since we plan on replacing the
+ * old one and only one page is going to be left afterwards.
+ */
__mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
- return ret;
}
/* remove redundant charge if migration failed*/
@@ -3341,6 +3344,12 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
used = newpage;
unused = oldpage;
}
+ anon = PageAnon(used);
+ __mem_cgroup_uncharge_common(unused,
+ anon ? MEM_CGROUP_CHARGE_TYPE_ANON
+ : MEM_CGROUP_CHARGE_TYPE_CACHE,
+ true);
+ css_put(&memcg->css);
/*
* We disallowed uncharge of pages under migration because mapcount
* of the page goes down to zero, temporarly.
@@ -3350,10 +3359,6 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
lock_page_cgroup(pc);
ClearPageCgroupMigration(pc);
unlock_page_cgroup(pc);
- anon = PageAnon(used);
- __mem_cgroup_uncharge_common(unused,
- anon ? MEM_CGROUP_CHARGE_TYPE_ANON
- : MEM_CGROUP_CHARGE_TYPE_CACHE);
/*
* If a page is a file cache, radix-tree replacement is very atomic
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c37c51565e5..77ed2d773705 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -683,7 +683,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
{
int rc = -EAGAIN;
int remap_swapcache = 1;
- int charge = 0;
struct mem_cgroup *mem;
struct anon_vma *anon_vma = NULL;
@@ -725,12 +724,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
/* charge against new page */
- charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
- if (charge == -ENOMEM) {
- rc = -ENOMEM;
- goto unlock;
- }
- BUG_ON(charge);
+ mem_cgroup_prepare_migration(page, newpage, &mem);
if (PageWriteback(page)) {
/*
@@ -820,8 +814,7 @@ skip_unmap:
put_anon_vma(anon_vma);
uncharge:
- if (!charge)
- mem_cgroup_end_migration(mem, page, newpage, rc == 0);
+ mem_cgroup_end_migration(mem, page, newpage, rc == 0);
unlock:
unlock_page(page);
out: