summaryrefslogtreecommitdiff
path: root/mm/cma.c
diff options
context:
space:
mode:
authorWeijie Yang <weijie.yang@samsung.com>2014-10-14 02:51:03 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-14 04:18:12 +0400
commit68faed630fc151a7a1c4853df00fb3dcacf782b4 (patch)
treed7381acab66692202b689195ee13230d83044dd6 /mm/cma.c
parent85c9f4b04a08f6bc770b77530c22d04103468b8f (diff)
downloadlinux-68faed630fc151a7a1c4853df00fb3dcacf782b4.tar.xz
mm/cma: fix cma bitmap aligned mask computing
The current cma bitmap aligned mask computation is incorrect. It could cause an unexpected alignment when using cma_alloc() if the wanted align order is larger than cma->order_per_bit. Take kvm for example (PAGE_SHIFT = 12), kvm_cma->order_per_bit is set to 6. When kvm_alloc_rma() tries to alloc kvm_rma_pages, it will use 15 as the expected align value. After using the current implementation however, we get 0 as cma bitmap aligned mask other than 511. This patch fixes the cma bitmap aligned mask calculation. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Weijie Yang <weijie.yang@samsung.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: <stable@vger.kernel.org> [3.17] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 474c644a0dc6..a951a3b3ed36 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -58,7 +58,9 @@ unsigned long cma_get_size(struct cma *cma)
static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
{
- return (1UL << (align_order >> cma->order_per_bit)) - 1;
+ if (align_order <= cma->order_per_bit)
+ return 0;
+ return (1UL << (align_order - cma->order_per_bit)) - 1;
}
static unsigned long cma_bitmap_maxno(struct cma *cma)