summaryrefslogtreecommitdiffstats
path: root/mm/cma.c
diff options
context:
space:
mode:
authorDanesh Petigara <dpetigara@broadcom.com>2015-03-12 16:25:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-03-12 18:46:07 -0700
commit850fc430f47aad52092deaaeb32b99f97f0e6aca (patch)
treea8be132a7ddc2bf8ce84b2a2abc6aae3891746ce /mm/cma.c
parent44fc80573cc760a7154f41fd0a958ee10eba1a81 (diff)
downloadop-kernel-dev-850fc430f47aad52092deaaeb32b99f97f0e6aca.zip
op-kernel-dev-850fc430f47aad52092deaaeb32b99f97f0e6aca.tar.gz
mm: cma: fix CMA aligned offset calculation
The CMA aligned offset calculation is incorrect for non-zero order_per_bit values. For example, if cma->order_per_bit=1, cma->base_pfn= 0x2f800000 and align_order=12, the function returns a value of 0x17c00 instead of 0x400. This patch fixes the CMA aligned offset calculation. The previous calculation was wrong and would return too-large values for the offset, so that when cma_alloc looks for free pages in the bitmap with the requested alignment > order_per_bit, it starts too far into the bitmap and so CMA allocations will fail despite there actually being plenty of free pages remaining. It will also probably have the wrong alignment. With this change, we will get the correct offset into the bitmap. One affected user is powerpc KVM, which has kvm_cma->order_per_bit set to KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, or 18 - 12 = 6. [gregory.0xf0@gmail.com: changelog additions] Signed-off-by: Danesh Petigara <dpetigara@broadcom.com> Reviewed-by: Gregory Fong <gregory.0xf0@gmail.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 75016fd..68ecb7a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
return (1UL << (align_order - cma->order_per_bit)) - 1;
}
+/*
+ * Find a PFN aligned to the specified order and return an offset represented in
+ * order_per_bits.
+ */
static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
{
- unsigned int alignment;
-
if (align_order <= cma->order_per_bit)
return 0;
- alignment = 1UL << (align_order - cma->order_per_bit);
- return ALIGN(cma->base_pfn, alignment) -
- (cma->base_pfn >> cma->order_per_bit);
+
+ return (ALIGN(cma->base_pfn, (1UL << align_order))
+ - cma->base_pfn) >> cma->order_per_bit;
}
static unsigned long cma_bitmap_maxno(struct cma *cma)
OpenPOWER on IntegriCloud