summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-23 03:32:04 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-23 03:32:04 +0300
commit5c6f4d68e2aca67e425b7227369ec9fde8adfb6d (patch)
treeb60f38675b4572047bcb840a89cb07329a5f6c22 /include
parentde7e71ef8bed222dd144d8878091ecb6d5dfd208 (diff)
parent99b80ac45f7ec351c2d1c9fbfec702213dcae566 (diff)
downloadlinux-5c6f4d68e2aca67e425b7227369ec9fde8adfb6d.tar.xz
Merge tag 'mm-stable-2024-05-22-17-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more mm updates from Andrew Morton: "A series from Dave Chinner which cleans up and fixes the handling of nested allocations within stackdepot and page-owner" * tag 'mm-stable-2024-05-22-17-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/page-owner: use gfp_nested_mask() instead of open coded masking stackdepot: use gfp_nested_mask() instead of open coded masking mm: lift gfp_kmemleak_mask() to gfp.h
Diffstat (limited to 'include')
-rw-r--r--include/linux/gfp.h25
1 files changed, 25 insertions, 0 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 450c2cbcf04b..7f9691d375f0 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -157,6 +157,31 @@ static inline int gfp_zonelist(gfp_t flags)
}
/*
+ * gfp flag masking for nested internal allocations.
+ *
+ * For code that needs to do allocations inside the public allocation API (e.g.
+ * memory allocation tracking code) the allocations need to obey the caller
+ * allocation context constrains to prevent allocation context mismatches (e.g.
+ * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
+ * situations.
+ *
+ * It is also assumed that these nested allocations are for internal kernel
+ * object storage purposes only and are not going to be used for DMA, etc. Hence
+ * we strip out all the zone information and leave just the context information
+ * intact.
+ *
+ * Further, internal allocations must fail before the higher level allocation
+ * can fail, so we must make them fail faster and fail silently. We also don't
+ * want them to deplete emergency reserves. Hence nested allocations must be
+ * prepared for these allocations to fail.
+ */
+static inline gfp_t gfp_nested_mask(gfp_t flags)
+{
+ return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
+ (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
+}
+
+/*
* We get the zone list from the current node and the gfp_mask.
* This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
* There are two zonelists per node, one for all zones with memory and