summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c44
1 files changed, 24 insertions, 20 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ffe2151d11ee..18ca33a1945d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4007,18 +4007,19 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
* __GFP_HIGH allows access to 50% of the min reserve as well
* as OOM.
*/
- if (alloc_flags & ALLOC_MIN_RESERVE)
+ if (alloc_flags & ALLOC_MIN_RESERVE) {
min -= min / 2;
- /*
- * Non-blocking allocations can access some of the reserve
- * with more access if also __GFP_HIGH. The reasoning is that
- * a non-blocking caller may incur a more severe penalty
- * if it cannot get memory quickly, particularly if it's
- * also __GFP_HIGH.
- */
- if (alloc_flags & ALLOC_HARDER)
- min -= min / 4;
+ /*
+ * Non-blocking allocations (e.g. GFP_ATOMIC) can
+ * access more reserves than just __GFP_HIGH. Other
+ * non-blocking allocations requests such as GFP_NOWAIT
+ * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
+ * access to the min reserve.
+ */
+ if (alloc_flags & ALLOC_NON_BLOCK)
+ min -= min / 4;
+ }
/*
* OOM victims can try even harder than the normal reserve
@@ -4869,28 +4870,30 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
* The caller may dip into page reserves a bit more if the caller
* cannot run direct reclaim, or if the caller has realtime scheduling
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
- * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_MIN_RESERVE(__GFP_HIGH).
+ * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
*/
alloc_flags |= (__force int)
(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
- if (gfp_mask & __GFP_ATOMIC) {
+ if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
/*
* Not worth trying to allocate harder for __GFP_NOMEMALLOC even
* if it can't schedule.
*/
if (!(gfp_mask & __GFP_NOMEMALLOC)) {
- alloc_flags |= ALLOC_HARDER;
+ alloc_flags |= ALLOC_NON_BLOCK;
if (order > 0)
alloc_flags |= ALLOC_HIGHATOMIC;
}
/*
- * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
- * comment for __cpuset_node_allowed().
+ * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
+ * GFP_ATOMIC) rather than fail, see the comment for
+ * __cpuset_node_allowed().
*/
- alloc_flags &= ~ALLOC_CPUSET;
+ if (alloc_flags & ALLOC_MIN_RESERVE)
+ alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_task(current)) && in_task())
alloc_flags |= ALLOC_MIN_RESERVE;
@@ -5321,12 +5324,13 @@ nopage:
WARN_ON_ONCE_GFP(costly_order, gfp_mask);
/*
- * Help non-failing allocations by giving them access to memory
- * reserves but do not use ALLOC_NO_WATERMARKS because this
+ * Help non-failing allocations by giving some access to memory
+ * reserves normally used for high priority non-blocking
+ * allocations but do not use ALLOC_NO_WATERMARKS because this
* could deplete whole memory reserves which would just make
- * the situation worse
+ * the situation worse.
*/
- page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
+ page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
if (page)
goto got_pg;