From 0fe6e20b9c4c53b3e97096ee73a0857f60aad43f Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 28 May 2010 09:29:16 +0900 Subject: hugetlb, rmap: add reverse mapping for hugepage This patch adds reverse mapping feature for hugepage by introducing mapcount for shared/private-mapped hugepage and anon_vma for private-mapped hugepage. While hugepage is not currently swappable, reverse mapping can be useful for memory error handler. Without this patch, memory error handler cannot identify processes using the bad hugepage nor unmap it from them. That is: - for shared hugepage: we can collect processes using a hugepage through pagecache, but can not unmap the hugepage because of the lack of mapcount. - for privately mapped hugepage: we can neither collect processes nor unmap the hugepage. This patch solves these problems. This patch include the bug fix given by commit 23be7468e8, so reverts it. Dependency: "hugetlb: move definition of is_vm_hugetlb_page() to hugepage_inline.h" ChangeLog since May 24. - create hugetlb_inline.h and move is_vm_hugetlb_index() in it. - move functions setting up anon_vma for hugepage into mm/rmap.c. ChangeLog since May 13. - rebased to 2.6.34 - fix logic error (in case that private mapping and shared mapping coexist) - move is_vm_hugetlb_page() into include/linux/mm.h to use this function from linear_page_index() - define and use linear_hugepage_index() instead of compound_order() - use page_move_anon_rmap() in hugetlb_cow() - copy exclusive switch of __set_page_anon_rmap() into hugepage counterpart. - revert commit 24be7468 completely Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Andrew Morton Cc: Mel Gorman Cc: Andrea Arcangeli Cc: Larry Woodman Cc: Lee Schermerhorn Acked-by: Fengguang Wu Acked-by: Mel Gorman Signed-off-by: Andi Kleen --- mm/rmap.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) (limited to 'mm/rmap.c') diff --git a/mm/rmap.c b/mm/rmap.c index 38a336e2eea1..0ad53572eaf2 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -56,6 +56,7 @@ #include #include #include +#include #include @@ -326,6 +327,8 @@ vma_address(struct page *page, struct vm_area_struct *vma) pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); unsigned long address; + if (unlikely(is_vm_hugetlb_page(vma))) + pgoff = page->index << huge_page_order(page_hstate(page)); address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { /* page should be within @vma mapping range */ @@ -369,6 +372,12 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, pte_t *pte; spinlock_t *ptl; + if (unlikely(PageHuge(page))) { + pte = huge_pte_offset(mm, address); + ptl = &mm->page_table_lock; + goto check; + } + pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) return NULL; @@ -389,6 +398,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, } ptl = pte_lockptr(mm, pmd); +check: spin_lock(ptl); if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { *ptlp = ptl; @@ -873,6 +883,12 @@ void page_remove_rmap(struct page *page) page_clear_dirty(page); set_page_dirty(page); } + /* + * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED + * and not charged by memcg for now. + */ + if (unlikely(PageHuge(page))) + return; if (PageAnon(page)) { mem_cgroup_uncharge_page(page); __dec_zone_page_state(page, NR_ANON_PAGES); @@ -1445,3 +1461,46 @@ int rmap_walk(struct page *page, int (*rmap_one)(struct page *, return rmap_walk_file(page, rmap_one, arg); } #endif /* CONFIG_MIGRATION */ + +#ifdef CONFIG_HUGETLBFS +/* + * The following three functions are for anonymous (private mapped) hugepages. + * Unlike common anonymous pages, anonymous hugepages have no accounting code + * and no lru code, because we handle hugepages differently from common pages. + */ +static void __hugepage_set_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address, int exclusive) +{ + struct anon_vma *anon_vma = vma->anon_vma; + BUG_ON(!anon_vma); + if (!exclusive) { + struct anon_vma_chain *avc; + avc = list_entry(vma->anon_vma_chain.prev, + struct anon_vma_chain, same_vma); + anon_vma = avc->anon_vma; + } + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; + page->mapping = (struct address_space *) anon_vma; + page->index = linear_page_index(vma, address); +} + +void hugepage_add_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + struct anon_vma *anon_vma = vma->anon_vma; + int first; + BUG_ON(!anon_vma); + BUG_ON(address < vma->vm_start || address >= vma->vm_end); + first = atomic_inc_and_test(&page->_mapcount); + if (first) + __hugepage_set_anon_rmap(page, vma, address, 0); +} + +void hugepage_add_new_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + BUG_ON(address < vma->vm_start || address >= vma->vm_end); + atomic_set(&page->_mapcount, 0); + __hugepage_set_anon_rmap(page, vma, address, 1); +} +#endif /* CONFIG_HUGETLBFS */ -- cgit v1.2.3 From e3390f67a7267daa227380b6f1bbf13c7ddd4aff Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Tue, 15 Jun 2010 13:18:13 +0900 Subject: hwpoison: rename CONFIG CONFIG_HUGETLBFS controls hugetlbfs interface code. OTOH, CONFIG_HUGETLB_PAGE controls hugepage management code. So we should use CONFIG_HUGETLB_PAGE here. Signed-off-by: Naoya Horiguchi Signed-off-by: Andi Kleen --- include/linux/hugetlb_inline.h | 4 ++-- mm/rmap.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'mm/rmap.c') diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h index cf00b6df53dc..6931489a5c14 100644 --- a/include/linux/hugetlb_inline.h +++ b/include/linux/hugetlb_inline.h @@ -1,7 +1,7 @@ #ifndef _LINUX_HUGETLB_INLINE_H -#define _LINUX_HUGETLB_INLINE_H 1 +#define _LINUX_HUGETLB_INLINE_H -#ifdef CONFIG_HUGETLBFS +#ifdef CONFIG_HUGETLB_PAGE #include diff --git a/mm/rmap.c b/mm/rmap.c index 0ad53572eaf2..71bd30a147cf 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1462,7 +1462,7 @@ int rmap_walk(struct page *page, int (*rmap_one)(struct page *, } #endif /* CONFIG_MIGRATION */ -#ifdef CONFIG_HUGETLBFS +#ifdef CONFIG_HUGETLB_PAGE /* * The following three functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code @@ -1503,4 +1503,4 @@ void hugepage_add_new_anon_rmap(struct page *page, atomic_set(&page->_mapcount, 0); __hugepage_set_anon_rmap(page, vma, address, 1); } -#endif /* CONFIG_HUGETLBFS */ +#endif /* CONFIG_HUGETLB_PAGE */ -- cgit v1.2.3