summaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorCyril Chemparathy <cyril@ti.com>2012-07-21 15:55:04 -0400
committerWill Deacon <will.deacon@arm.com>2013-05-30 16:02:15 +0100
commit4756dcbfd37819a8359d3c69a22be2ee41666d0f (patch)
treea9c172f6940e607b9c0c9a3bd7393bf0cc03e940 /arch/arm/include
parenta7fbc0d62a4d46e642af889e7288fede5078bc46 (diff)
downloadop-kernel-dev-4756dcbfd37819a8359d3c69a22be2ee41666d0f.zip
op-kernel-dev-4756dcbfd37819a8359d3c69a22be2ee41666d0f.tar.gz
ARM: LPAE: accomodate >32-bit addresses for page table base
This patch redefines the early boot time use of the R4 register to steal a few low order bits (ARCH_PGD_SHIFT bits) on LPAE systems. This allows for up to 38-bit physical addresses. Signed-off-by: Cyril Chemparathy <cyril@ti.com> Signed-off-by: Vitaly Andrianov <vitalya@ti.com> Acked-by: Nicolas Pitre <nico@linaro.org> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Tested-by: Subash Patel <subash.rp@samsung.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/memory.h16
1 files changed, 16 insertions, 0 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 57870ab..e506088 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -18,6 +18,8 @@
#include <linux/types.h>
#include <linux/sizes.h>
+#include <asm/cache.h>
+
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
@@ -141,6 +143,20 @@
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+/*
+ * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
+ * around in head.S and proc-*.S are shifted by this amount, in order to
+ * leave spare high bits for systems with physical address extension. This
+ * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
+ * gives us about 38-bits or so.
+ */
+#ifdef CONFIG_ARM_LPAE
+#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
+#else
+#define ARCH_PGD_SHIFT 0
+#endif
+#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
+
#ifndef __ASSEMBLY__
/*
OpenPOWER on IntegriCloud