summaryrefslogtreecommitdiffstats
path: root/mm/cma.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c66
1 files changed, 54 insertions, 12 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 474c644..963bc4a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -58,7 +58,9 @@ unsigned long cma_get_size(struct cma *cma)
static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
{
- return (1UL << (align_order >> cma->order_per_bit)) - 1;
+ if (align_order <= cma->order_per_bit)
+ return 0;
+ return (1UL << (align_order - cma->order_per_bit)) - 1;
}
static unsigned long cma_bitmap_maxno(struct cma *cma)
@@ -141,6 +143,54 @@ static int __init cma_init_reserved_areas(void)
core_initcall(cma_init_reserved_areas);
/**
+ * cma_init_reserved_mem() - create custom contiguous area from reserved memory
+ * @base: Base address of the reserved area
+ * @size: Size of the reserved area (in bytes),
+ * @order_per_bit: Order of pages represented by one bit on bitmap.
+ * @res_cma: Pointer to store the created cma region.
+ *
+ * This function creates custom contiguous area from already reserved memory.
+ */
+int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ int order_per_bit, struct cma **res_cma)
+{
+ struct cma *cma;
+ phys_addr_t alignment;
+
+ /* Sanity checks */
+ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
+ pr_err("Not enough slots for CMA reserved regions!\n");
+ return -ENOSPC;
+ }
+
+ if (!size || !memblock_is_region_reserved(base, size))
+ return -EINVAL;
+
+ /* ensure minimal alignment requied by mm core */
+ alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+
+ /* alignment should be aligned with order_per_bit */
+ if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
+ return -EINVAL;
+
+ if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
+ return -EINVAL;
+
+ /*
+ * Each reserved area must be initialised later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ cma = &cma_areas[cma_area_count];
+ cma->base_pfn = PFN_DOWN(base);
+ cma->count = size >> PAGE_SHIFT;
+ cma->order_per_bit = order_per_bit;
+ *res_cma = cma;
+ cma_area_count++;
+
+ return 0;
+}
+
+/**
* cma_declare_contiguous() - reserve custom contiguous area
* @base: Base address of the reserved area optional, use 0 for any
* @size: Size of the reserved area (in bytes),
@@ -163,7 +213,6 @@ int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, struct cma **res_cma)
{
- struct cma *cma;
phys_addr_t memblock_end = memblock_end_of_DRAM();
phys_addr_t highmem_start = __pa(high_memory);
int ret = 0;
@@ -235,16 +284,9 @@ int __init cma_declare_contiguous(phys_addr_t base,
}
}
- /*
- * Each reserved area must be initialised later, when more kernel
- * subsystems (like slab allocator) are available.
- */
- cma = &cma_areas[cma_area_count];
- cma->base_pfn = PFN_DOWN(base);
- cma->count = size >> PAGE_SHIFT;
- cma->order_per_bit = order_per_bit;
- *res_cma = cma;
- cma_area_count++;
+ ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
+ if (ret)
+ goto err;
pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
(unsigned long)base);
OpenPOWER on IntegriCloud