summaryrefslogtreecommitdiff
path: root/include/linux/gfp.h
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2015-09-09 01:03:53 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-09 01:35:28 +0300
commit0bc35a970c01c50e3bcc4b5a612787346024e5db (patch)
tree54724a480e193cd3edc795797fc73fdc677ab72d /include/linux/gfp.h
parent96db800f5d73cd5c49461253d45766e094f0f8c2 (diff)
downloadlinux-0bc35a970c01c50e3bcc4b5a612787346024e5db.tar.xz
mm: unify checks in alloc_pages_node() and __alloc_pages_node()
Perform the same debug checks in alloc_pages_node() as are done in __alloc_pages_node(), by making the former function a wrapper of the latter one. In addition to better diagnostics in DEBUG_VM builds for situations which have been already fatal (e.g. out-of-bounds node id), there are two visible changes for potential existing buggy callers of alloc_pages_node(): - calling alloc_pages_node() with any negative nid (e.g. due to arithmetic overflow) was treated as passing NUMA_NO_NODE and fallback to local node was applied. This will now be fatal. - calling alloc_pages_node() with an offline node will now be checked for DEBUG_VM builds. Since it's not fatal if the node has been previously online, and this patch may expose some existing buggy callers, change the VM_BUG_ON in __alloc_pages_node() to VM_WARN_ON. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/gfp.h')
-rw-r--r--include/linux/gfp.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index d2c142bc872e..4a12cae2fb0c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -310,23 +310,23 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
- VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ VM_WARN_ON(!node_online(nid));
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
/*
* Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
- * prefer the current CPU's node.
+ * prefer the current CPU's node. Otherwise node must be valid and online.
*/
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
- /* Unknown node is current node */
- if (nid < 0)
+ if (nid == NUMA_NO_NODE)
nid = numa_node_id();
- return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+ return __alloc_pages_node(nid, gfp_mask, order);
}
#ifdef CONFIG_NUMA