summaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-14 02:46:57 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-14 04:32:43 +0300
commitb9bbfbe30ae088cc88a4b2ba7732baeebd1a0162 (patch)
tree5f3d69ad2de2bdb8782180c7ce33bf1b9b190774 /mm/huge_memory.c
parent152c9ccb75548c027fa3103efa4fa4e19a345449 (diff)
downloadlinux-b9bbfbe30ae088cc88a4b2ba7732baeebd1a0162.tar.xz
thp: memcg huge memory
Add memcg charge/uncharge to hugepage faults in huge_memory.c. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c36
1 files changed, 31 insertions, 5 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 620891f4e54f..a313403b3c5e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -233,6 +233,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
VM_BUG_ON(!PageCompound(page));
pgtable = pte_alloc_one(mm, haddr);
if (unlikely(!pgtable)) {
+ mem_cgroup_uncharge_page(page);
put_page(page);
return VM_FAULT_OOM;
}
@@ -243,6 +244,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_none(*pmd))) {
spin_unlock(&mm->page_table_lock);
+ mem_cgroup_uncharge_page(page);
put_page(page);
pte_free(mm, pgtable);
} else {
@@ -286,6 +288,10 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = alloc_hugepage(transparent_hugepage_defrag(vma));
if (unlikely(!page))
goto out;
+ if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
+ put_page(page);
+ goto out;
+ }
return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
}
@@ -402,9 +408,17 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
for (i = 0; i < HPAGE_PMD_NR; i++) {
pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
vma, address);
- if (unlikely(!pages[i])) {
- while (--i >= 0)
+ if (unlikely(!pages[i] ||
+ mem_cgroup_newpage_charge(pages[i], mm,
+ GFP_KERNEL))) {
+ if (pages[i])
put_page(pages[i]);
+ mem_cgroup_uncharge_start();
+ while (--i >= 0) {
+ mem_cgroup_uncharge_page(pages[i]);
+ put_page(pages[i]);
+ }
+ mem_cgroup_uncharge_end();
kfree(pages);
ret |= VM_FAULT_OOM;
goto out;
@@ -455,8 +469,12 @@ out:
out_free_pages:
spin_unlock(&mm->page_table_lock);
- for (i = 0; i < HPAGE_PMD_NR; i++)
+ mem_cgroup_uncharge_start();
+ for (i = 0; i < HPAGE_PMD_NR; i++) {
+ mem_cgroup_uncharge_page(pages[i]);
put_page(pages[i]);
+ }
+ mem_cgroup_uncharge_end();
kfree(pages);
goto out;
}
@@ -501,14 +519,22 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
}
+ if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+ put_page(new_page);
+ put_page(page);
+ ret |= VM_FAULT_OOM;
+ goto out;
+ }
+
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
__SetPageUptodate(new_page);
spin_lock(&mm->page_table_lock);
put_page(page);
- if (unlikely(!pmd_same(*pmd, orig_pmd)))
+ if (unlikely(!pmd_same(*pmd, orig_pmd))) {
+ mem_cgroup_uncharge_page(new_page);
put_page(new_page);
- else {
+ } else {
pmd_t entry;
VM_BUG_ON(!PageHead(page));
entry = mk_pmd(new_page, vma->vm_page_prot);