summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2023-01-13 14:12:14 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-02-03 09:33:12 +0300
commiteb2e2b425c6984ca8034448a3f2c680622bd3d4d (patch)
treedce2ee2aff64a17bd8d086c6dc2040a8014dff95 /mm/page_alloc.c
parentc988dcbecf3fd5430921eaa3fe9054754f76d185 (diff)
downloadlinux-eb2e2b425c6984ca8034448a3f2c680622bd3d4d.tar.xz
mm/page_alloc: explicitly record high-order atomic allocations in alloc_flags
A high-order ALLOC_HARDER allocation is assumed to be atomic. While that is accurate, it changes later in the series. In preparation, explicitly record high-order atomic allocations in gfp_to_alloc_flags(). Link: https://lkml.kernel.org/r/20230113111217.14134-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: NeilBrown <neilb@suse.de> Cc: Thierry Reding <thierry.reding@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4f5c2e83cb7b..8a7e1cfa8353 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3724,10 +3724,20 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
* reserved for high-order atomic allocation, so order-0
* request should skip it.
*/
- if (order > 0 && alloc_flags & ALLOC_HARDER)
+ if (alloc_flags & ALLOC_HIGHATOMIC)
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (!page) {
page = __rmqueue(zone, order, migratetype, alloc_flags);
+
+ /*
+ * If the allocation fails, allow OOM handling access
+ * to HIGHATOMIC reserves as failing now is worse than
+ * failing a high-order atomic allocation in the
+ * future.
+ */
+ if (!page && (alloc_flags & ALLOC_OOM))
+ page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
+
if (!page) {
spin_unlock_irqrestore(&zone->lock, flags);
return NULL;
@@ -4041,8 +4051,10 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
return true;
}
#endif
- if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
+ if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
+ !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
return true;
+ }
}
return false;
}
@@ -4304,7 +4316,7 @@ try_this_zone:
* If this is a high-order atomic allocation then check
* if the pageblock should be reserved for the future
*/
- if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
+ if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
reserve_highatomic_pageblock(page, zone, order);
return page;
@@ -4831,7 +4843,7 @@ static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
}
static inline unsigned int
-gfp_to_alloc_flags(gfp_t gfp_mask)
+gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
{
unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
@@ -4857,8 +4869,13 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
* Not worth trying to allocate harder for __GFP_NOMEMALLOC even
* if it can't schedule.
*/
- if (!(gfp_mask & __GFP_NOMEMALLOC))
+ if (!(gfp_mask & __GFP_NOMEMALLOC)) {
alloc_flags |= ALLOC_HARDER;
+
+ if (order > 0)
+ alloc_flags |= ALLOC_HIGHATOMIC;
+ }
+
/*
* Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
* comment for __cpuset_node_allowed().
@@ -5066,7 +5083,7 @@ restart:
* kswapd needs to be woken up, and to avoid the cost of setting up
* alloc_flags precisely. So we do that now.
*/
- alloc_flags = gfp_to_alloc_flags(gfp_mask);
+ alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
/*
* We need to recalculate the starting point for the zonelist iterator