summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2014-01-24 03:53:38 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-24 04:36:53 +0400
commit6c14466cc00ff13121ae782d33d9df0fde20b124 (patch)
tree65cf09a45f47de60638fdd1fe3140f9b8bdc27b9 /mm
parent0eef615665ede1e0d603ea9ecca88c1da6f02234 (diff)
downloadlinux-6c14466cc00ff13121ae782d33d9df0fde20b124.tar.xz
mm: improve documentation of page_order
Developers occasionally try and optimise PFN scanners by using page_order but miss that in general it requires zone->lock. This has happened twice for compaction.c and rejected both times. This patch clarifies the documentation of page_order and adds a note to compaction.c why page_order is not used. [akpm@linux-foundation.org: tweaks] [lauraa@codeaurora.org: Corrected a page_zone(page)->lock reference] Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rafael Aquini <aquini@redhat.com> Acked-by: Minchan Kim <minchan@kernel.org> Cc: Laura Abbott <lauraa@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c5
-rw-r--r--mm/internal.h8
2 files changed, 9 insertions, 4 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index e0ab02d70f13..b48c5259ea33 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -523,7 +523,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
if (!isolation_suitable(cc, page))
goto next_pageblock;
- /* Skip if free */
+ /*
+ * Skip if free. page_order cannot be used without zone->lock
+ * as nothing prevents parallel allocations or buddy merging.
+ */
if (PageBuddy(page))
continue;
diff --git a/mm/internal.h b/mm/internal.h
index 7e145e8cd1e6..612c14f5e0f5 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -143,9 +143,11 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
#endif
/*
- * function for dealing with page's order in buddy system.
- * zone->lock is already acquired when we use these.
- * So, we don't need atomic page->flags operations here.
+ * This function returns the order of a free page in the buddy system. In
+ * general, page_zone(page)->lock must be held by the caller to prevent the
+ * page from being allocated in parallel and returning garbage as the order.
+ * If a caller does not hold page_zone(page)->lock, it must guarantee that the
+ * page cannot be allocated or merged in parallel.
*/
static inline unsigned long page_order(struct page *page)
{