summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2012-10-09 03:32:11 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 11:22:45 +0400
commit95e3441248053fc06bbb1dbbd34409a84211619e (patch)
tree8c8312184b515826ca4343fa81b77d703d291398 /mm
parentb12c4ad14ee0232ad47c2bef404b6d42a3578332 (diff)
downloadlinux-95e3441248053fc06bbb1dbbd34409a84211619e.tar.xz
mm: remain migratetype in freed page
The page allocator caches the pageblock information in page->private while it is in the PCP freelists but this is overwritten with the order of the page when freed to the buddy allocator. This patch stores the migratetype of the page in the page->index field so that it is available at all times when the page remain in free_list. This patch adds a new call site in __free_pages_ok so it might be overhead a bit but it's for high order allocation. So I believe damage isn't hurt. Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6aa0a8e89c5d..94fd283dde98 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -729,6 +729,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
int wasMlocked = __TestClearPageMlocked(page);
+ int migratetype;
if (!free_pages_prepare(page, order))
return;
@@ -737,8 +738,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order);
- free_one_page(page_zone(page), page, order,
- get_pageblock_migratetype(page));
+ migratetype = get_pageblock_migratetype(page);
+ set_freepage_migratetype(page, migratetype);
+ free_one_page(page_zone(page), page, order, migratetype);
local_irq_restore(flags);
}
@@ -959,6 +961,7 @@ static int move_freepages(struct zone *zone,
order = page_order(page);
list_move(&page->lru,
&zone->free_area[order].free_list[migratetype]);
+ set_freepage_migratetype(page, migratetype);
page += 1 << order;
pages_moved += 1 << order;
}