summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig14
-rw-r--r--arch/arm/mm/alignment.c19
-rw-r--r--arch/arm/mm/cache-fa.S12
-rw-r--r--arch/arm/mm/cache-l2x0.c78
-rw-r--r--arch/arm/mm/cache-v3.S10
-rw-r--r--arch/arm/mm/cache-v4.S10
-rw-r--r--arch/arm/mm/cache-v4wb.S12
-rw-r--r--arch/arm/mm/cache-v4wt.S12
-rw-r--r--arch/arm/mm/cache-v6.S58
-rw-r--r--arch/arm/mm/cache-v7.S57
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v6.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/dma-mapping.c10
-rw-r--r--arch/arm/mm/fault-armv.c40
-rw-r--r--arch/arm/mm/fault.c13
-rw-r--r--arch/arm/mm/flush.c69
-rw-r--r--arch/arm/mm/highmem.c24
-rw-r--r--arch/arm/mm/init.c186
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm/mm/mmap.c22
-rw-r--r--arch/arm/mm/mmu.c146
-rw-r--r--arch/arm/mm/pgd.c4
-rw-r--r--arch/arm/mm/proc-arm1020.S17
-rw-r--r--arch/arm/mm/proc-arm1020e.S17
-rw-r--r--arch/arm/mm/proc-arm1022.S17
-rw-r--r--arch/arm/mm/proc-arm1026.S17
-rw-r--r--arch/arm/mm/proc-arm6_7.S2
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S14
-rw-r--r--arch/arm/mm/proc-arm922.S14
-rw-r--r--arch/arm/mm/proc-arm925.S14
-rw-r--r--arch/arm/mm/proc-arm926.S14
-rw-r--r--arch/arm/mm/proc-arm940.S14
-rw-r--r--arch/arm/mm/proc-arm946.S14
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S15
-rw-r--r--arch/arm/mm/proc-macros.S22
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S49
-rw-r--r--arch/arm/mm/proc-v7.S115
-rw-r--r--arch/arm/mm/proc-xsc3.S14
-rw-r--r--arch/arm/mm/proc-xscale.S14
-rw-r--r--arch/arm/mm/tlb-v7.S33
49 files changed, 942 insertions, 300 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e1fd98f..4414a01 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -398,7 +398,7 @@ config CPU_V6
# ARMv6k
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
- depends on CPU_V6
+ depends on CPU_V6 || CPU_V7
default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
help
Say Y here if your ARMv6 processor supports the 'K' extension.
@@ -771,14 +771,22 @@ config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
- ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
- ARCH_TEGRA
+ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_S5PV310 || ARCH_TEGRA || \
+ ARCH_U8500 || ARCH_VEXPRESS_CA9X4
default y
select OUTER_CACHE
select OUTER_CACHE_SYNC
help
This option enables the L2x0 PrimeCell.
+config CACHE_PL310
+ bool
+ depends on CACHE_L2X0
+ default y if CPU_V7 && !CPU_V6
+ help
+ This option enables optimisations for the PL310 cache
+ controller.
+
config CACHE_TAUROS2
bool "Enable the Tauros2 L2 cache controller"
depends on (ARCH_DOVE || ARCH_MMP)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index d073b64..724ba3b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
- else
- set_cr(cr_no_alignment);
+ else {
+ /*
+ * We're about to disable the alignment trap and return to
+ * user space. But if an interrupt occurs before actually
+ * reaching user space, then the IRQ vector entry code will
+ * notice that we were still in kernel space and therefore
+ * the alignment trap won't be re-enabled in that case as it
+ * is presumed to be always on from kernel space.
+ * Let's prevent that race by disabling interrupts here (they
+ * are disabled on the way back to user space anyway in
+ * entry-common.S) and disable the alignment trap only if
+ * there is no work pending for this thread.
+ */
+ raw_local_irq_disable();
+ if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+ set_cr(cr_no_alignment);
+ }
return 0;
}
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 7148e53..1fa6f71 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -38,6 +38,17 @@
#define CACHE_DLIMIT (CACHE_DSIZE * 2)
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(fa_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(fa_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
@@ -233,6 +244,7 @@ ENDPROC(fa_dma_unmap_area)
.type fa_cache_fns, #object
ENTRY(fa_cache_fns)
+ .long fa_flush_icache_all
.long fa_flush_kern_cache_all
.long fa_flush_user_cache_all
.long fa_flush_user_cache_range
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9982eb3..170c9bb 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -28,14 +28,24 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
+static uint32_t l2x0_size;
-static inline void cache_wait(void __iomem *reg, unsigned long mask)
+static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
- /* wait for the operation to complete */
+ /* wait for cache operation by line or way to complete */
while (readl_relaxed(reg) & mask)
;
}
+#ifdef CONFIG_CACHE_PL310
+static inline void cache_wait(void __iomem *reg, unsigned long mask)
+{
+ /* cache operations by line are atomic on PL310 */
+}
+#else
+#define cache_wait cache_wait_way
+#endif
+
static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
@@ -103,14 +113,40 @@ static void l2x0_cache_sync(void)
spin_unlock_irqrestore(&l2x0_lock, flags);
}
-static inline void l2x0_inv_all(void)
+static void l2x0_flush_all(void)
+{
+ unsigned long flags;
+
+ /* clean all ways */
+ spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
+ cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
+ cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
+static void l2x0_clean_all(void)
+{
+ unsigned long flags;
+
+ /* clean all ways */
+ spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
+ cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
+ cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
+static void l2x0_inv_all(void)
{
unsigned long flags;
/* invalidate all ways */
spin_lock_irqsave(&l2x0_lock, flags);
+ /* Invalidating when L2 is enabled is a nono */
+ BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
- cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
+ cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -159,6 +195,11 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
void __iomem *base = l2x0_base;
unsigned long flags;
+ if ((end - start) >= l2x0_size) {
+ l2x0_clean_all();
+ return;
+ }
+
spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
@@ -184,6 +225,11 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
void __iomem *base = l2x0_base;
unsigned long flags;
+ if ((end - start) >= l2x0_size) {
+ l2x0_flush_all();
+ return;
+ }
+
spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
@@ -206,10 +252,20 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
spin_unlock_irqrestore(&l2x0_lock, flags);
}
+static void l2x0_disable(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&l2x0_lock, flags);
+ writel(0, l2x0_base + L2X0_CTRL);
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
__u32 cache_id;
+ __u32 way_size = 0;
int ways;
const char *type;
@@ -244,6 +300,13 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
l2x0_way_mask = (1 << ways) - 1;
/*
+ * L2 cache Size = Way size * Number of ways
+ */
+ way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
+ way_size = 1 << (way_size + 3);
+ l2x0_size = ways * way_size * SZ_1K;
+
+ /*
* Check if l2x0 controller is already enabled.
* If you are booting from non-secure mode
* accessing the below registers will fault.
@@ -263,8 +326,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
+ outer_cache.flush_all = l2x0_flush_all;
+ outer_cache.inv_all = l2x0_inv_all;
+ outer_cache.disable = l2x0_disable;
printk(KERN_INFO "%s cache controller enabled\n", type);
- printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
- ways, cache_id, aux);
+ printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
+ ways, cache_id, aux, l2x0_size);
}
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2ff3c5..2e2bc40 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -13,6 +13,15 @@
#include "proc-macros.S"
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v3_flush_icache_all)
+ mov pc, lr
+ENDPROC(v3_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -122,6 +131,7 @@ ENDPROC(v3_dma_map_area)
.type v3_cache_fns, #object
ENTRY(v3_cache_fns)
+ .long v3_flush_icache_all
.long v3_flush_kern_cache_all
.long v3_flush_user_cache_all
.long v3_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 4810f7e..a8fefb5 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -13,6 +13,15 @@
#include "proc-macros.S"
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4_flush_icache_all)
+ mov pc, lr
+ENDPROC(v4_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -134,6 +143,7 @@ ENDPROC(v4_dma_map_area)
.type v4_cache_fns, #object
ENTRY(v4_cache_fns)
+ .long v4_flush_icache_all
.long v4_flush_kern_cache_all
.long v4_flush_user_cache_all
.long v4_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index df8368a..d3644db 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -51,6 +51,17 @@ flush_base:
.text
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4wb_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(v4wb_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
@@ -244,6 +255,7 @@ ENDPROC(v4wb_dma_unmap_area)
.type v4wb_cache_fns, #object
ENTRY(v4wb_cache_fns)
+ .long v4wb_flush_icache_all
.long v4wb_flush_kern_cache_all
.long v4wb_flush_user_cache_all
.long v4wb_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 45c7031..49c2b66 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -41,6 +41,17 @@
#define CACHE_DLIMIT 16384
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4wt_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(v4wt_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -188,6 +199,7 @@ ENDPROC(v4wt_dma_map_area)
.type v4wt_cache_fns, #object
ENTRY(v4wt_cache_fns)
+ .long v4wt_flush_icache_all
.long v4wt_flush_kern_cache_all
.long v4wt_flush_user_cache_all
.long v4wt_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 86aa689..c96fa1b 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -21,18 +21,22 @@
#define D_CACHE_LINE_SIZE 32
#define BTB_FLUSH_SIZE 8
-#ifdef CONFIG_ARM_ERRATA_411920
/*
- * Invalidate the entire I cache (this code is a workaround for the ARM1136
- * erratum 411920 - Invalidate Instruction Cache operation can fail. This
- * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore.
+ * v6_flush_icache_all()
+ *
+ * Flush the whole I-cache.
+ *
+ * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
+ * This erratum is present in 1136, 1156 and 1176. It does not affect the
+ * MPCore.
*
- * Registers:
- * r0 - set to 0
- * r1 - corrupted
+ * Registers:
+ * r0 - set to 0
+ * r1 - corrupted
*/
-ENTRY(v6_icache_inval_all)
+ENTRY(v6_flush_icache_all)
mov r0, #0
+#ifdef CONFIG_ARM_ERRATA_411920
mrs r1, cpsr
cpsid ifa @ disable interrupts
mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
@@ -43,8 +47,11 @@ ENTRY(v6_icache_inval_all)
.rept 11 @ ARM Ltd recommends at least
nop @ 11 NOPs
.endr
- mov pc, lr
+#else
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
#endif
+ mov pc, lr
+ENDPROC(v6_flush_icache_all)
/*
* v6_flush_cache_all()
@@ -60,7 +67,7 @@ ENTRY(v6_flush_kern_cache_all)
#ifndef CONFIG_ARM_ERRATA_411920
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#else
- b v6_icache_inval_all
+ b v6_flush_icache_all
#endif
#else
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
@@ -138,7 +145,7 @@ ENTRY(v6_coherent_user_range)
#ifndef CONFIG_ARM_ERRATA_411920
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#else
- b v6_icache_inval_all
+ b v6_flush_icache_all
#endif
#else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
@@ -196,6 +203,10 @@ ENTRY(v6_flush_kern_dcache_area)
* - end - virtual end address of region
*/
v6_dma_inv_range:
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrb r2, [r0] @ read for ownership
+ strb r2, [r0] @ write for ownership
+#endif
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
@@ -204,6 +215,10 @@ v6_dma_inv_range:
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
tst r1, #D_CACHE_LINE_SIZE - 1
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrneb r2, [r1, #-1] @ read for ownership
+ strneb r2, [r1, #-1] @ write for ownership
+#endif
bic r1, r1, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
@@ -211,10 +226,6 @@ v6_dma_inv_range:
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
-#ifdef CONFIG_DMA_CACHE_RWFO
- ldr r2, [r0] @ read for ownership
- str r2, [r0] @ write for ownership
-#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
@@ -222,6 +233,10 @@ v6_dma_inv_range:
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrlo r2, [r0] @ read for ownership
+ strlo r2, [r0] @ write for ownership
+#endif
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -256,12 +271,12 @@ v6_dma_clean_range:
* - end - virtual end address of region
*/
ENTRY(v6_dma_flush_range)
- bic r0, r0, #D_CACHE_LINE_SIZE - 1
-1:
#ifdef CONFIG_DMA_CACHE_RWFO
- ldr r2, [r0] @ read for ownership
- str r2, [r0] @ write for ownership
+ ldrb r2, [r0] @ read for ownership
+ strb r2, [r0] @ write for ownership
#endif
+ bic r0, r0, #D_CACHE_LINE_SIZE - 1
+1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
@@ -269,6 +284,10 @@ ENTRY(v6_dma_flush_range)
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrlob r2, [r0] @ read for ownership
+ strlob r2, [r0] @ write for ownership
+#endif
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -312,6 +331,7 @@ ENDPROC(v6_dma_unmap_area)
.type v6_cache_fns, #object
ENTRY(v6_cache_fns)
+ .long v6_flush_icache_all
.long v6_flush_kern_cache_all
.long v6_flush_user_cache_all
.long v6_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 37c8157..6136e68 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -18,6 +18,21 @@
#include "proc-macros.S"
/*
+ * v7_flush_icache_all()
+ *
+ * Flush the whole I-cache.
+ *
+ * Registers:
+ * r0 - set to 0
+ */
+ENTRY(v7_flush_icache_all)
+ mov r0, #0
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
+ mov pc, lr
+ENDPROC(v7_flush_icache_all)
+
+/*
* v7_flush_dcache_all()
*
* Flush the whole D-cache.
@@ -91,11 +106,8 @@ ENTRY(v7_flush_kern_cache_all)
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
-#else
- mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
-#endif
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
@@ -161,21 +173,25 @@ ENTRY(v7_coherent_user_range)
UNWIND(.fnstart )
dcache_line_size r2, r3
sub r3, r2, #1
- bic r0, r0, r3
+ bic r12, r0, r3
1:
- USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification
+ USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
+ add r12, r12, r2
+ cmp r12, r1
+ blo 1b
dsb
- USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line
- add r0, r0, r2
+ icache_line_size r2, r3
+ sub r3, r2, #1
+ bic r12, r0, r3
2:
- cmp r0, r1
- blo 1b
+ USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line
+ add r12, r12, r2
+ cmp r12, r1
+ blo 2b
+3:
mov r0, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
-#else
- mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
-#endif
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
dsb
isb
mov pc, lr
@@ -185,10 +201,10 @@ ENTRY(v7_coherent_user_range)
* isn't mapped, just try the next page.
*/
9001:
- mov r0, r0, lsr #12
- mov r0, r0, lsl #12
- add r0, r0, #4096
- b 2b
+ mov r12, r12, lsr #12
+ mov r12, r12, lsl #12
+ add r12, r12, #4096
+ b 3b
UNWIND(.fnend )
ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)
@@ -309,6 +325,7 @@ ENDPROC(v7_dma_unmap_area)
.type v7_cache_fns, #object
ENTRY(v7_cache_fns)
+ .long v7_flush_icache_all
.long v7_flush_kern_cache_all
.long v7_flush_user_cache_all
.long v7_flush_user_cache_range
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 598c51a..b806151 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
{
void *kto = kmap_atomic(to, KM_USER1);
- if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index f55fa10..bdba6c6 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
- if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
/* FIXME: not highmem safe */
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 9920c0a..649bbcd 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
{
void *kto = kmap_atomic(to, KM_USER1);
- if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c704eed..ac6a361 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
* fragmentation of the DMA space, and also prevents allocations
* smaller than a section from crossing a section boundary.
*/
- bit = fls(size - 1) + 1;
+ bit = fls(size - 1);
if (bit > SECTION_SHIFT)
bit = SECTION_SHIFT;
align = 1 << bit;
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
}
} while (size -= PAGE_SIZE);
+ dsb();
+
return (void *)c->vm_start;
}
return NULL;
@@ -521,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
outer_inv_range(paddr, paddr + size);
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+
+ /*
+ * Mark the D-cache clean for this page to avoid extra flushing.
+ */
+ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+ set_bit(PG_dcache_clean, &page->flags);
}
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 9b906de..83e59f8 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -28,6 +28,7 @@
static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
+#if __LINUX_ARM_ARCH__ < 6
/*
* We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on.
@@ -65,6 +66,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret;
}
+#if USE_SPLIT_PTLOCKS
+/*
+ * If we are using split PTE locks, then we need to take the page
+ * lock here. Otherwise we are using shared mm->page_table_lock
+ * which is already locked, thus cannot take it.
+ */
+static inline void do_pte_lock(spinlock_t *ptl)
+{
+ /*
+ * Use nested version here to indicate that we are already
+ * holding one similar spinlock.
+ */
+ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+}
+
+static inline void do_pte_unlock(spinlock_t *ptl)
+{
+ spin_unlock(ptl);
+}
+#else /* !USE_SPLIT_PTLOCKS */
+static inline void do_pte_lock(spinlock_t *ptl) {}
+static inline void do_pte_unlock(spinlock_t *ptl) {}
+#endif /* USE_SPLIT_PTLOCKS */
+
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
{
@@ -88,13 +113,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
* open-code the spin-locking.
*/
ptl = pte_lockptr(vma->vm_mm, pmd);
- pte = pte_offset_map_nested(pmd, address);
- spin_lock(ptl);
+ pte = pte_offset_map(pmd, address);
+ do_pte_lock(ptl);
ret = do_adjust_pte(vma, address, pfn, pte);
- spin_unlock(ptl);
- pte_unmap_nested(pte);
+ do_pte_unlock(ptl);
+ pte_unmap(pte);
return ret;
}
@@ -141,7 +166,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
* a page table, or changing an existing PTE. Basically, there are two
* things that we need to take care of:
*
- * 1. If PG_dcache_dirty is set for the page, we need to ensure
+ * 1. If PG_dcache_clean is not set for the page, we need to ensure
* that any cache entries for the kernels virtual memory
* range are written back to the page.
* 2. If we have multiple shared mappings of the same space in
@@ -168,10 +193,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
return;
mapping = page_mapping(page);
-#ifndef CONFIG_SMP
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
-#endif
if (mapping) {
if (cache_is_vivt())
make_coherent(mapping, vma, addr, ptep, pfn);
@@ -179,6 +202,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
__flush_icache_all();
}
}
+#endif /* __LINUX_ARM_ARCH__ < 6 */
/*
* Check whether the write buffer has physical address aliasing
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 23b0b03..1e21e12 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -581,6 +581,19 @@ static struct fsr_info ifsr_info[] = {
{ do_bad, SIGBUS, 0, "unknown 31" },
};
+void __init
+hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
+ int sig, int code, const char *name)
+{
+ if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
+ BUG();
+
+ ifsr_info[nr].fn = fn;
+ ifsr_info[nr].sig = sig;
+ ifsr_info[nr].code = code;
+ ifsr_info[nr].name = name;
+}
+
asmlinkage void __exception
do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
{
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c6844cb..391ffae 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -17,6 +17,7 @@
#include <asm/smp_plat.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
+#include <asm/smp_plat.h>
#include "mm.h"
@@ -39,6 +40,18 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
: "cc");
}
+static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
+{
+ unsigned long colour = CACHE_COLOUR(vaddr);
+ unsigned long offset = vaddr & (PAGE_SIZE - 1);
+ unsigned long to;
+
+ set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
+ to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
+ flush_tlb_kernel_page(to);
+ flush_icache_range(to, to + len);
+}
+
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
@@ -89,16 +102,16 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
__flush_icache_all();
}
+
#else
-#define flush_pfn_alias(pfn,vaddr) do { } while (0)
+#define flush_pfn_alias(pfn,vaddr) do { } while (0)
+#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
#endif
-#ifdef CONFIG_SMP
static void flush_ptrace_access_other(void *args)
{
__flush_icache_all();
}
-#endif
static
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -118,15 +131,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
return;
}
- /* VIPT non-aliasing cache */
+ /* VIPT non-aliasing D-cache */
if (vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
- __cpuc_coherent_kern_range(addr, addr + len);
-#ifdef CONFIG_SMP
+ if (icache_is_vipt_aliasing())
+ flush_icache_alias(page_to_pfn(page), uaddr, len);
+ else
+ __cpuc_coherent_kern_range(addr, addr + len);
if (cache_ops_need_broadcast())
smp_call_function(flush_ptrace_access_other,
NULL, 1);
-#endif
}
}
@@ -215,6 +229,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
flush_dcache_mmap_unlock(mapping);
}
+#if __LINUX_ARM_ARCH__ >= 6
+void __sync_icache_dcache(pte_t pteval)
+{
+ unsigned long pfn;
+ struct page *page;
+ struct address_space *mapping;
+
+ if (!pte_present_user(pteval))
+ return;
+ if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
+ /* only flush non-aliasing VIPT caches for exec mappings */
+ return;
+ pfn = pte_pfn(pteval);
+ if (!pfn_valid(pfn))
+ return;
+
+ page = pfn_to_page(pfn);
+ if (cache_is_vipt_aliasing())
+ mapping = page_mapping(page);
+ else
+ mapping = NULL;
+
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ __flush_dcache_page(mapping, page);
+ /* pte_exec() already checked above for non-aliasing VIPT cache */
+ if (cache_is_vipt_nonaliasing() || pte_exec(pteval))
+ __flush_icache_all();
+}
+#endif
+
/*
* Ensure cache coherency between kernel mapping and userspace mapping
* of this page.
@@ -246,17 +290,16 @@ void flush_dcache_page(struct page *page)
mapping = page_mapping(page);
-#ifndef CONFIG_SMP
- if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
- set_bit(PG_dcache_dirty, &page->flags);
- else
-#endif
- {
+ if (!cache_ops_need_broadcast() &&
+ mapping && !mapping_mapped(mapping))
+ clear_bit(PG_dcache_clean, &page->flags);
+ else {
__flush_dcache_page(mapping, page);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, page);
else if (mapping)
__flush_icache_all();
+ set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 1fbdb55..c435fd9 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -36,18 +36,17 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
unsigned int idx;
unsigned long vaddr;
void *kmap;
+ int type;
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
-
#ifdef CONFIG_DEBUG_HIGHMEM
/*
* There is no cache coherency issue when non VIVT, so force the
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
if (kmap)
return kmap;
+ type = kmap_atomic_idx_push();
+
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void *)vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
+ int idx, type;
if (kvaddr >= (void *)FIXADDR_START) {
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -97,21 +101,23 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
#else
(void) idx; /* to kill a warning */
#endif
+ kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
{
- unsigned int idx;
unsigned long vaddr;
+ int idx, type;
pagefault_disable();
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7185b00..5164069 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -18,6 +18,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/sort.h>
#include <asm/mach-types.h>
#include <asm/sections.h>
@@ -121,9 +122,10 @@ void show_mem(void)
printk("%d pages swap cached\n", cached);
}
-static void __init find_limits(struct meminfo *mi,
- unsigned long *min, unsigned long *max_low, unsigned long *max_high)
+static void __init find_limits(unsigned long *min, unsigned long *max_low,
+ unsigned long *max_high)
{
+ struct meminfo *mi = &meminfo;
int i;
*min = -1UL;
@@ -147,13 +149,13 @@ static void __init find_limits(struct meminfo *mi,
}
}
-static void __init arm_bootmem_init(struct meminfo *mi,
- unsigned long start_pfn, unsigned long end_pfn)
+static void __init arm_bootmem_init(unsigned long start_pfn,
+ unsigned long end_pfn)
{
+ struct memblock_region *reg;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
- int i;
/*
* Allocate the bootmem bitmap page. This must be in a region
@@ -171,30 +173,39 @@ static void __init arm_bootmem_init(struct meminfo *mi,
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
- for_each_bank(i, mi) {
- struct membank *bank = &mi->bank[i];
- if (!bank->highmem)
- free_bootmem(bank_phys_start(bank), bank_phys_size(bank));
+ /* Free the lowmem regions from memblock into bootmem. */
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
- /*
- * Reserve the memblock reserved regions in bootmem.
- */
- for (i = 0; i < memblock.reserved.cnt; i++) {
- phys_addr_t start = memblock_start_pfn(&memblock.reserved, i);
- if (start >= start_pfn &&
- memblock_end_pfn(&memblock.reserved, i) <= end_pfn)
- reserve_bootmem_node(pgdat, __pfn_to_phys(start),
- memblock_size_bytes(&memblock.reserved, i),
- BOOTMEM_DEFAULT);
+ /* Reserve the lowmem memblock reserved regions in bootmem. */
+ for_each_memblock(reserved, reg) {
+ unsigned long start = memblock_region_reserved_base_pfn(reg);
+ unsigned long end = memblock_region_reserved_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ reserve_bootmem(__pfn_to_phys(start),
+ (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
}
-static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
- unsigned long max_low, unsigned long max_high)
+static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
+ unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- int i;
+ struct memblock_region *reg;
/*
* initialise the zones.
@@ -216,13 +227,20 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
* holes = node_size - sum(bank_sizes)
*/
memcpy(zhole_size, zone_size, sizeof(zhole_size));
- for_each_bank(i, mi) {
- int idx = 0;
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (start < max_low) {
+ unsigned long low_end = min(end, max_low);
+ zhole_size[0] -= low_end - start;
+ }
#ifdef CONFIG_HIGHMEM
- if (mi->bank[i].highmem)
- idx = ZONE_HIGHMEM;
+ if (end > max_low) {
+ unsigned long high_start = max(start, max_low);
+ zhole_size[ZONE_HIGHMEM] -= end - high_start;
+ }
#endif
- zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
}
/*
@@ -237,20 +255,7 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
#ifndef CONFIG_SPARSEMEM
int pfn_valid(unsigned long pfn)
{
- struct memblock_region *mem = &memblock.memory;
- unsigned int left = 0, right = mem->cnt;
-
- do {
- unsigned int mid = (right + left) / 2;
-
- if (pfn < memblock_start_pfn(mem, mid))
- right = mid;
- else if (pfn >= memblock_end_pfn(mem, mid))
- left = mid + 1;
- else
- return 1;
- } while (left < right);
- return 0;
+ return memblock_is_memory(pfn << PAGE_SHIFT);
}
EXPORT_SYMBOL(pfn_valid);
@@ -260,24 +265,34 @@ static void arm_memory_present(void)
#else
static void arm_memory_present(void)
{
- int i;
- for (i = 0; i < memblock.memory.cnt; i++)
- memory_present(0, memblock_start_pfn(&memblock.memory, i),
- memblock_end_pfn(&memblock.memory, i));
+ struct memblock_region *reg;
+
+ for_each_memblock(memory, reg)
+ memory_present(0, memblock_region_memory_base_pfn(reg),
+ memblock_region_memory_end_pfn(reg));
}
#endif
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
- memblock_reserve(__pa(_data), _end - _data);
+ memblock_reserve(__pa(_sdata), _end - _sdata);
#else
memblock_reserve(__pa(_stext), _end - _stext);
#endif
@@ -303,14 +318,13 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
void __init bootmem_init(void)
{
- struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
max_low = max_high = 0;
- find_limits(mi, &min, &max_low, &max_high);
+ find_limits(&min, &max_low, &max_high);
- arm_bootmem_init(mi, min, max_low);
+ arm_bootmem_init(min, max_low);
/*
* Sparsemem tries to allocate bootmem in memory_present(),
@@ -328,7 +342,7 @@ void __init bootmem_init(void)
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
- arm_bootmem_free(mi, min, max_low, max_high);
+ arm_bootmem_free(min, max_low, max_high);
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
@@ -422,6 +436,56 @@ static void __init free_unused_memmap(struct meminfo *mi)
}
}
+static void __init free_highpages(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+ struct memblock_region *mem, *res;
+
+ /* set highmem page free */
+ for_each_memblock(memory, mem) {
+ unsigned long start = memblock_region_memory_base_pfn(mem);
+ unsigned long end = memblock_region_memory_end_pfn(mem);
+
+ /* Ignore complete lowmem entries */
+ if (end <= max_low)
+ continue;
+
+ /* Truncate partial highmem entries */
+ if (start < max_low)
+ start = max_low;
+
+ /* Find and exclude any reserved regions */
+ for_each_memblock(reserved, res) {
+ unsigned long res_start, res_end;
+
+ res_start = memblock_region_reserved_base_pfn(res);
+ res_end = memblock_region_reserved_end_pfn(res);
+
+ if (res_end < start)
+ continue;
+ if (res_start < start)
+ res_start = start;
+ if (res_start > end)
+ res_start = end;
+ if (res_end > end)
+ res_end = end;
+ if (res_start != start)
+ totalhigh_pages += free_area(start, res_start,
+ NULL);
+ start = res_end;
+ if (start == end)
+ break;
+ }
+
+ /* And now free anything which remains */
+ if (start < end)
+ totalhigh_pages += free_area(start, end, NULL);
+ }
+ totalram_pages += totalhigh_pages;
+#endif
+}
+
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
@@ -430,6 +494,7 @@ static void __init free_unused_memmap(struct meminfo *mi)
void __init mem_init(void)
{
unsigned long reserved_pages, free_pages;
+ struct memblock_region *reg;
int i;
#ifdef CONFIG_HAVE_TCM
/* These pointers are filled in on TCM detection */
@@ -450,16 +515,7 @@ void __init mem_init(void)
__phys_to_pfn(__pa(swapper_pg_dir)), NULL);
#endif
-#ifdef CONFIG_HIGHMEM
- /* set highmem page free */
- for_each_bank (i, &meminfo) {
- unsigned long start = bank_pfn_start(&meminfo.bank[i]);
- unsigned long end = bank_pfn_end(&meminfo.bank[i]);
- if (start >= max_low_pfn + PHYS_PFN_OFFSET)
- totalhigh_pages += free_area(start, end, NULL);
- }
- totalram_pages += totalhigh_pages;
-#endif
+ free_highpages();
reserved_pages = free_pages = 0;
@@ -489,9 +545,11 @@ void __init mem_init(void)
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
- for (i = 0; i < meminfo.nr_banks; i++) {
- num_physpages += bank_pfn_size(&meminfo.bank[i]);
- printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
+ for_each_memblock(memory, reg) {
+ unsigned long pages = memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+ num_physpages += pages;
+ printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
@@ -545,7 +603,7 @@ void __init mem_init(void)
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(_data, _edata));
+ MLK_ROUNDUP(_sdata, _edata));
#undef MLK
#undef MLM
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ab50627..55c17a6 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,8 +204,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
*/
- if (WARN_ON(pfn_valid(pfn)))
- return NULL;
+ if (pfn_valid(pfn)) {
+ printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
+ "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
+ "will fail in the next kernel release. Please fix your driver.\n");
+ WARN_ON(1);
+ }
type = get_mem_type(mtype);
if (!type)
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 4f5b396..b0a9830 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -144,3 +144,25 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
}
+
+#ifdef CONFIG_STRICT_DEVMEM
+
+#include <linux/ioport.h>
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain
+ * address is valid. The argument is a physical page number.
+ * We mimic x86 here by disallowing access to system RAM as well as
+ * device-exclusive MMIO regions. This effectively disable read()/write()
+ * on /dev/mem.
+ */
+int devmem_is_allowed(unsigned long pfn)
+{
+ if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+ return 0;
+ if (!page_is_ram(pfn))
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6e1c4f6..72ad3e1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,7 +14,7 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
-#include <linux/sort.h>
+#include <linux/fs.h>
#include <asm/cputype.h>
#include <asm/sections.h>
@@ -246,6 +246,9 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_WRITE | L_PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@@ -254,21 +257,24 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_NONCACHED] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
+ .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_DTCM] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG |
- L_PTE_DIRTY | L_PTE_WRITE,
- .prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
- .domain = DOMAIN_KERNEL,
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_WRITE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
},
[MT_MEMORY_ITCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_USER | L_PTE_EXEC,
+ L_PTE_WRITE | L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_IO,
+ .domain = DOMAIN_KERNEL,
},
};
@@ -303,9 +309,8 @@ static void __init build_mem_type_table(void)
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
-#ifdef CONFIG_SMP
- cachepolicy = CPOLICY_WRITEALLOC;
-#endif
+ if (is_smp())
+ cachepolicy = CPOLICY_WRITEALLOC;
/*
* Strip out features not present on earlier architectures.
@@ -399,21 +404,22 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
-#ifndef CONFIG_SMP
/*
* Only use write-through for non-SMP systems
*/
- if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
+ if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
-#endif
/*
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
- if (arch_is_coherent() && cpu_is_xsc3())
+ if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
/*
* ARMv6 and above have extended page tables.
*/
@@ -426,20 +432,23 @@ static void __init build_mem_type_table(void)
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-#ifdef CONFIG_SMP
- /*
- * Mark memory with the "shared" attribute for SMP systems
- */
- user_pgprot |= L_PTE_SHARED;
- kern_pgprot |= L_PTE_SHARED;
- vecs_pgprot |= L_PTE_SHARED;
- mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
- mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
- mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
-#endif
+ if (is_smp()) {
+ /*
+ * Mark memory with the "shared" attribute
+ * for SMP systems
+ */
+ user_pgprot |= L_PTE_SHARED;
+ kern_pgprot |= L_PTE_SHARED;
+ vecs_pgprot |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
}
/*
@@ -475,6 +484,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
@@ -498,6 +509,19 @@ static void __init build_mem_type_table(void)
}
}
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ if (!pfn_valid(pfn))
+ return pgprot_noncached(vma_prot);
+ else if (file->f_flags & O_SYNC)
+ return pgprot_writecombine(vma_prot);
+ return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+#endif
+
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc(unsigned long sz)
@@ -720,13 +744,14 @@ static int __init early_vmalloc(char *arg)
}
early_param("vmalloc", early_vmalloc);
-phys_addr_t lowmem_end_addr;
+static phys_addr_t lowmem_limit __initdata = 0;
static void __init sanity_check_meminfo(void)
{
int i, j, highmem = 0;
- lowmem_end_addr = __pa(vmalloc_min - 1) + 1;
+ lowmem_limit = __pa(vmalloc_min - 1) + 1;
+ memblock_set_current_limit(lowmem_limit);
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
@@ -802,8 +827,7 @@ static void __init sanity_check_meminfo(void)
* rather difficult.
*/
reason = "with VIPT aliasing cache";
-#ifdef CONFIG_SMP
- } else if (tlb_ops_need_broadcast()) {
+ } else if (is_smp() && tlb_ops_need_broadcast()) {
/*
* kmap_high needs to occasionally flush TLB entries,
* however, if the TLB entries need to be broadcast
@@ -813,7 +837,6 @@ static void __init sanity_check_meminfo(void)
* (must not be called with irqs off)
*/
reason = "without hardware TLB ops broadcasting";
-#endif
}
if (reason) {
printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
@@ -829,6 +852,7 @@ static void __init sanity_check_meminfo(void)
static inline void prepare_page_table(void)
{
unsigned long addr;
+ phys_addr_t end;
/*
* Clear out all the mappings below the kernel image.
@@ -844,10 +868,17 @@ static inline void prepare_page_table(void)
pmd_clear(pmd_off_k(addr));
/*
+ * Find the end of the first block of lowmem.
+ */
+ end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
+ if (end >= lowmem_limit)
+ end = lowmem_limit;
+
+ /*
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the end of the vmalloc region.
*/
- for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
+ for (addr = __phys_to_virt(end);
addr < VMALLOC_END; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
}
@@ -964,37 +995,28 @@ static void __init kmap_init(void)
#endif
}
-static inline void map_memory_bank(struct membank *bank)
-{
- struct map_desc map;
-
- map.pfn = bank_pfn_start(bank);
- map.virtual = __phys_to_virt(bank_phys_start(bank));
- map.length = bank_phys_size(bank);
- map.type = MT_MEMORY;
-
- create_mapping(&map);
-}
-
static void __init map_lowmem(void)
{
- struct meminfo *mi = &meminfo;
- int i;
+ struct memblock_region *reg;
/* Map all the lowmem memory banks. */
- for (i = 0; i < mi->nr_banks; i++) {
- struct membank *bank = &mi->bank[i];
+ for_each_memblock(memory, reg) {
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
+ struct map_desc map;
+
+ if (end > lowmem_limit)
+ end = lowmem_limit;
+ if (start >= end)
+ break;
- if (!bank->highmem)
- map_memory_bank(bank);
- }
-}
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY;
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+ create_mapping(&map);
+ }
}
/*
@@ -1005,8 +1027,6 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
- sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index be5f58e..69bbfc6 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
goto no_pte;
init_pmd = pmd_offset(init_pgd, 0);
- init_pte = pte_offset_map_nested(init_pmd, 0);
+ init_pte = pte_offset_map(init_pmd, 0);
set_pte_ext(new_pte, *init_pte, 0);
- pte_unmap_nested(init_pte);
+ pte_unmap(init_pte);
pte_unmap(new_pte);
}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 203a4e9..bcf748d 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1020_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1020_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -351,6 +365,7 @@ ENTRY(arm1020_dma_unmap_area)
ENDPROC(arm1020_dma_unmap_area)
ENTRY(arm1020_cache_fns)
+ .long arm1020_flush_icache_all
.long arm1020_flush_kern_cache_all
.long arm1020_flush_user_cache_all
.long arm1020_flush_user_cache_range
@@ -430,7 +445,7 @@ ENTRY(cpu_arm1020_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __INIT
+ __CPUINIT
.type __arm1020_setup, #function
__arm1020_setup:
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 1a511e7..ab7ec26 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020e_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1020e_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1020e_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -337,6 +351,7 @@ ENTRY(arm1020e_dma_unmap_area)
ENDPROC(arm1020e_dma_unmap_area)
ENTRY(arm1020e_cache_fns)
+ .long arm1020e_flush_icache_all
.long arm1020e_flush_kern_cache_all
.long arm1020e_flush_user_cache_all
.long arm1020e_flush_user_cache_range
@@ -412,7 +427,7 @@ ENTRY(cpu_arm1020e_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __INIT
+ __CPUINIT
.type __arm1020e_setup, #function
__arm1020e_setup:
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 1ffa4eb..831c5e5 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1022_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1022_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1022_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -326,6 +340,7 @@ ENTRY(arm1022_dma_unmap_area)
ENDPROC(arm1022_dma_unmap_area)
ENTRY(arm1022_cache_fns)
+ .long arm1022_flush_icache_all
.long arm1022_flush_kern_cache_all
.long arm1022_flush_user_cache_all
.long arm1022_flush_user_cache_range
@@ -394,7 +409,7 @@ ENTRY(cpu_arm1022_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __INIT
+ __CPUINIT
.type __arm1022_setup, #function
__arm1022_setup:
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 5697c34..e3f7e9a 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1026_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1026_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1026_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -320,6 +334,7 @@ ENTRY(arm1026_dma_unmap_area)
ENDPROC(arm1026_dma_unmap_area)
ENTRY(arm1026_cache_fns)
+ .long arm1026_flush_icache_all
.long arm1026_flush_kern_cache_all
.long arm1026_flush_user_cache_all
.long arm1026_flush_user_cache_range
@@ -384,7 +399,7 @@ ENTRY(cpu_arm1026_set_pte_ext)
mov pc, lr
- __INIT
+ __CPUINIT
.type __arm1026_setup, #function
__arm1026_setup:
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 64e0b32..6a7be18 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -238,7 +238,7 @@ ENTRY(cpu_arm7_reset)
mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
mov pc, r0
- __INIT
+ __CPUINIT
.type __arm6_setup, #function
__arm6_setup: mov r0, #0
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 9d96824..c285395 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -113,7 +113,7 @@ ENTRY(cpu_arm720_reset)
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
- __INIT
+ __CPUINIT
.type __arm710_setup, #function
__arm710_setup:
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 6c1a9ab..38b27dc 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -55,7 +55,7 @@ ENTRY(cpu_arm740_reset)
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
- __INIT
+ __CPUINIT
.type __arm740_setup, #function
__arm740_setup:
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 6a850db..0c9786d 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -46,7 +46,7 @@ ENTRY(cpu_arm7tdmi_proc_fin)
ENTRY(cpu_arm7tdmi_reset)
mov pc, r0
- __INIT
+ __CPUINIT
.type __arm7tdmi_setup, #function
__arm7tdmi_setup:
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 86f80aa..6109f27 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -110,6 +110,17 @@ ENTRY(cpu_arm920_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm920_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm920_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -305,6 +316,7 @@ ENTRY(arm920_dma_unmap_area)
ENDPROC(arm920_dma_unmap_area)
ENTRY(arm920_cache_fns)
+ .long arm920_flush_icache_all
.long arm920_flush_kern_cache_all
.long arm920_flush_user_cache_all
.long arm920_flush_user_cache_range
@@ -375,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
#endif
mov pc, lr
- __INIT
+ __CPUINIT
.type __arm920_setup, #function
__arm920_setup:
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index f76ce9b..bb2f0f4 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -112,6 +112,17 @@ ENTRY(cpu_arm922_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm922_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm922_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -307,6 +318,7 @@ ENTRY(arm922_dma_unmap_area)
ENDPROC(arm922_dma_unmap_area)
ENTRY(arm922_cache_fns)
+ .long arm922_flush_icache_all
.long arm922_flush_kern_cache_all
.long arm922_flush_user_cache_all
.long arm922_flush_user_cache_range
@@ -379,7 +391,7 @@ ENTRY(cpu_arm922_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __INIT
+ __CPUINIT
.type __arm922_setup, #function
__arm922_setup:
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 657bd3f..c13e01a 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -145,6 +145,17 @@ ENTRY(cpu_arm925_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm925_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm925_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -362,6 +373,7 @@ ENTRY(arm925_dma_unmap_area)
ENDPROC(arm925_dma_unmap_area)
ENTRY(arm925_cache_fns)
+ .long arm925_flush_icache_all
.long arm925_flush_kern_cache_all
.long arm925_flush_user_cache_all
.long arm925_flush_user_cache_range
@@ -428,7 +440,7 @@ ENTRY(cpu_arm925_set_pte_ext)
#endif /* CONFIG_MMU */
mov pc, lr
- __INIT
+ __CPUINIT
.type __arm925_setup, #function
__arm925_setup:
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 73f1f3c..42eb431 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -111,6 +111,17 @@ ENTRY(cpu_arm926_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm926_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm926_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -325,6 +336,7 @@ ENTRY(arm926_dma_unmap_area)
ENDPROC(arm926_dma_unmap_area)
ENTRY(arm926_cache_fns)
+ .long arm926_flush_icache_all
.long arm926_flush_kern_cache_all
.long arm926_flush_user_cache_all
.long arm926_flush_user_cache_range
@@ -389,7 +401,7 @@ ENTRY(cpu_arm926_set_pte_ext)
#endif
mov pc, lr
- __INIT
+ __CPUINIT
.type __arm926_setup, #function
__arm926_setup:
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index fffb061..7b11cdb 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -68,6 +68,17 @@ ENTRY(cpu_arm940_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm940_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm940_flush_icache_all)
+
+/*
* flush_user_cache_all()
*/
ENTRY(arm940_flush_user_cache_all)
@@ -254,6 +265,7 @@ ENTRY(arm940_dma_unmap_area)
ENDPROC(arm940_dma_unmap_area)
ENTRY(arm940_cache_fns)
+ .long arm940_flush_icache_all
.long arm940_flush_kern_cache_all
.long arm940_flush_user_cache_all
.long arm940_flush_user_cache_range
@@ -264,7 +276,7 @@ ENTRY(arm940_cache_fns)
.long arm940_dma_unmap_area
.long arm940_dma_flush_range
- __INIT
+ __CPUINIT
.type __arm940_setup, #function
__arm940_setup:
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 249a605..1a5bbf08 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -75,6 +75,17 @@ ENTRY(cpu_arm946_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm946_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm946_flush_icache_all)
+
+/*
* flush_user_cache_all()
*/
ENTRY(arm946_flush_user_cache_all)
@@ -296,6 +307,7 @@ ENTRY(arm946_dma_unmap_area)
ENDPROC(arm946_dma_unmap_area)
ENTRY(arm946_cache_fns)
+ .long arm946_flush_icache_all
.long arm946_flush_kern_cache_all
.long arm946_flush_user_cache_all
.long arm946_flush_user_cache_range
@@ -317,7 +329,7 @@ ENTRY(cpu_arm946_dcache_clean_area)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
- __INIT
+ __CPUINIT
.type __arm946_setup, #function
__arm946_setup:
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index db47566..db67e31 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -46,7 +46,7 @@ ENTRY(cpu_arm9tdmi_proc_fin)
ENTRY(cpu_arm9tdmi_reset)
mov pc, r0
- __INIT
+ __CPUINIT
.type __arm9tdmi_setup, #function
__arm9tdmi_setup:
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 7803fdf..7c9ad62 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -134,7 +134,7 @@ ENTRY(cpu_fa526_set_pte_ext)
#endif
mov pc, lr
- __INIT
+ __CPUINIT
.type __fa526_setup, #function
__fa526_setup:
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index b304d01..b4597ed 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -124,6 +124,17 @@ ENTRY(cpu_feroceon_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(feroceon_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(feroceon_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -401,6 +412,7 @@ ENTRY(feroceon_dma_unmap_area)
ENDPROC(feroceon_dma_unmap_area)
ENTRY(feroceon_cache_fns)
+ .long feroceon_flush_icache_all
.long feroceon_flush_kern_cache_all
.long feroceon_flush_user_cache_all
.long feroceon_flush_user_cache_range
@@ -412,6 +424,7 @@ ENTRY(feroceon_cache_fns)
.long feroceon_dma_flush_range
ENTRY(feroceon_range_cache_fns)
+ .long feroceon_flush_icache_all
.long feroceon_flush_kern_cache_all
.long feroceon_flush_user_cache_all
.long feroceon_flush_user_cache_range
@@ -494,7 +507,7 @@ ENTRY(cpu_feroceon_set_pte_ext)
#endif
mov pc, lr
- __INIT
+ __CPUINIT
.type __feroceon_setup, #function
__feroceon_setup:
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 7d63bea..b795afd 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -61,17 +61,27 @@
.endm
/*
- * cache_line_size - get the cache line size from the CSIDR register
- * (available on ARMv7+). It assumes that the CSSR register was configured
- * to access the L1 data cache CSIDR.
+ * dcache_line_size - get the minimum D-cache line size from the CTR register
+ * on ARMv7.
*/
.macro dcache_line_size, reg, tmp
- mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR
- and \tmp, \tmp, #7 @ cache line size encoding
- mov \reg, #16 @ size offset
+ mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
+ lsr \tmp, \tmp, #16
+ and \tmp, \tmp, #0xf @ cache line size encoding
+ mov \reg, #4 @ bytes per word
mov \reg, \reg, lsl \tmp @ actual cache line size
.endm
+/*
+ * icache_line_size - get the minimum I-cache line size from the CTR register
+ * on ARMv7.
+ */
+ .macro icache_line_size, reg, tmp
+ mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
+ and \tmp, \tmp, #0xf @ cache line size encoding
+ mov \reg, #4 @ bytes per word
+ mov \reg, \reg, lsl \tmp @ actual cache line size
+ .endm
/*
* Sanity check the PTE configuration for the code below - which makes
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 5f6892f..4458ee6 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -338,7 +338,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
- __INIT
+ __CPUINIT
.type __mohawk_setup, #function
__mohawk_setup:
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index a201eb0..5aa8d59 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -156,7 +156,7 @@ ENTRY(cpu_sa110_set_pte_ext)
#endif
mov pc, lr
- __INIT
+ __CPUINIT
.type __sa110_setup, #function
__sa110_setup:
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 7ddc480..2ac4e6f 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -169,7 +169,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
#endif
mov pc, lr
- __INIT
+ __CPUINIT
.type __sa1100_setup, #function
__sa1100_setup:
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 22aac85..59a7e1f 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -30,13 +30,10 @@
#define TTB_RGN_WT (2 << 3)
#define TTB_RGN_WB (3 << 3)
-#ifndef CONFIG_SMP
-#define TTB_FLAGS TTB_RGN_WBWA
-#define PMD_FLAGS PMD_SECT_WB
-#else
-#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
-#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
-#endif
+#define TTB_FLAGS_UP TTB_RGN_WBWA
+#define PMD_FLAGS_UP PMD_SECT_WB
+#define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S
+#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
ENTRY(cpu_v6_proc_init)
mov pc, lr
@@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- orr r0, r0, #TTB_FLAGS
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -137,7 +135,7 @@ cpu_pj4_name:
.align
- __INIT
+ __CPUINIT
/*
* __v6_setup
@@ -156,9 +154,11 @@ cpu_pj4_name:
*/
__v6_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
+ ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode
+ ALT_UP(nop)
orr r0, r0, #0x20
- mcr p15, 0, r0, c1, c0, 1
+ ALT_SMP(mcr p15, 0, r0, c1, c0, 1)
+ ALT_UP(nop)
#endif
mov r0, #0
@@ -169,7 +169,8 @@ __v6_setup:
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
- orr r4, r4, #TTB_FLAGS
+ ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
+ ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
adr r5, v6_crval
@@ -192,6 +193,8 @@ __v6_setup:
v6_crval:
crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c
+ __INITDATA
+
.type v6_processor_functions, #object
ENTRY(v6_processor_functions)
.word v6_early_abort
@@ -205,6 +208,8 @@ ENTRY(v6_processor_functions)
.word cpu_v6_set_pte_ext
.size v6_processor_functions, . - v6_processor_functions
+ .section ".rodata"
+
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv6"
@@ -225,10 +230,16 @@ cpu_elf_name:
__v6_proc_info:
.long 0x0007b000
.long 0x0007f000
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
@@ -249,10 +260,16 @@ __v6_proc_info:
__pj4_v6_proc_info:
.long 0x560f5810
.long 0xff0ffff0
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6a8506d..9b9ff5d 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -30,15 +30,13 @@
#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
-#ifndef CONFIG_SMP
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
-#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB
-#define PMD_FLAGS PMD_SECT_WB
-#else
+#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
+#define PMD_FLAGS_UP PMD_SECT_WB
+
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
-#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
-#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
-#endif
+#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
+#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
ENTRY(cpu_v7_proc_init)
mov pc, lr
@@ -105,7 +103,8 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- orr r0, r0, #TTB_FLAGS
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
@@ -169,7 +168,7 @@ cpu_v7_name:
.ascii "ARMv7 Processor"
.align
- __INIT
+ __CPUINIT
/*
* __v7_setup
@@ -186,13 +185,15 @@ cpu_v7_name:
* It is assumed that:
* - cache type register is implemented
*/
-__v7_setup:
+__v7_ca9mp_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1
+ ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
+ ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
#endif
+__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
bl v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
and r10, r0, #0xff000000 @ ARM?
teq r10, #0x41000000
- bne 2f
+ bne 3f
and r5, r0, #0x00f00000 @ variant
and r6, r0, #0x0000000f @ revision
- orr r0, r6, r5, lsr #20-4 @ combine variant and revision
+ orr r6, r6, r5, lsr #20-4 @ combine variant and revision
+ ubfx r0, r0, #4, #12 @ primary part number
+ /* Cortex-A8 Errata */
+ ldr r10, =0x00000c08 @ Cortex-A8 primary part number
+ teq r0, r10
+ bne 2f
#ifdef CONFIG_ARM_ERRATA_430973
teq r5, #0x00100000 @ only present in r1p*
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
@@ -213,21 +219,50 @@ __v7_setup:
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_458693
- teq r0, #0x20 @ only present in r2p0
+ teq r6, #0x20 @ only present in r2p0
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
orreq r10, r10, #(1 << 5) @ set L1NEON to 1
orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_460075
- teq r0, #0x20 @ only present in r2p0
+ teq r6, #0x20 @ only present in r2p0
mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
tsteq r10, #1 << 22
orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
#endif
+ b 3f
+
+ /* Cortex-A9 Errata */
+2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
+ teq r0, r10
+ bne 3f
+#ifdef CONFIG_ARM_ERRATA_742230
+ cmp r6, #0x22 @ only present up to r2p2
+ mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orrle r10, r10, #1 << 4 @ set bit #4
+ mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+ teq r6, #0x20 @ present in r2p0
+ teqne r6, #0x21 @ present in r2p1
+ teqne r6, #0x22 @ present in r2p2
+ mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orreq r10, r10, #1 << 12 @ set bit #12
+ orreq r10, r10, #1 << 22 @ set bit #22
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_743622
+ teq r6, #0x20 @ present in r2p0
+ teqne r6, #0x21 @ present in r2p1
+ teqne r6, #0x22 @ present in r2p2
+ mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orreq r10, r10, #1 << 6 @ set bit #6
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
-2: mov r10, #0
+3: mov r10, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#endif
@@ -235,7 +270,8 @@ __v7_setup:
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
- orr r4, r4, #TTB_FLAGS
+ ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
+ ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
@@ -297,6 +333,8 @@ v7_crval:
__v7_setup_stack:
.space 4 * 11 @ 11 registers
+ __INITDATA
+
.type v7_processor_functions, #object
ENTRY(v7_processor_functions)
.word v7_early_abort
@@ -310,6 +348,8 @@ ENTRY(v7_processor_functions)
.word cpu_v7_set_pte_ext
.size v7_processor_functions, . - v7_processor_functions
+ .section ".rodata"
+
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv7"
@@ -323,6 +363,35 @@ cpu_elf_name:
.section ".proc.info.init", #alloc, #execinstr
+ .type __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+ .long 0x410fc090 @ Required ID value
+ .long 0xff0ffff0 @ Mask for ID
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_UP)
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_XN | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ W(b) __v7_ca9mp_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
+ .long cpu_v7_name
+ .long v7_processor_functions
+ .long v7wbi_tlb_fns
+ .long v6_user_fns
+ .long v7_cache_fns
+ .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
/*
* Match any ARMv7 processor core.
*/
@@ -330,15 +399,21 @@ cpu_elf_name:
__v7_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __v7_setup
+ W(b) __v7_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 361a51e..ec26355 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -141,6 +141,17 @@ ENTRY(cpu_xsc3_do_idle)
/* ================================= CACHE ================================ */
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(xsc3_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(xsc3_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -325,6 +336,7 @@ ENTRY(xsc3_dma_unmap_area)
ENDPROC(xsc3_dma_unmap_area)
ENTRY(xsc3_cache_fns)
+ .long xsc3_flush_icache_all
.long xsc3_flush_kern_cache_all
.long xsc3_flush_user_cache_all
.long xsc3_flush_user_cache_range
@@ -404,7 +416,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.align
- __INIT
+ __CPUINIT
.type __xsc3_setup, #function
__xsc3_setup:
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 1407597..523408c 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -181,6 +181,17 @@ ENTRY(cpu_xscale_do_idle)
/* ================================= CACHE ================================ */
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(xscale_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(xscale_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -397,6 +408,7 @@ ENTRY(xscale_dma_unmap_area)
ENDPROC(xscale_dma_unmap_area)
ENTRY(xscale_cache_fns)
+ .long xscale_flush_icache_all
.long xscale_flush_kern_cache_all
.long xscale_flush_user_cache_all
.long xscale_flush_user_cache_range
@@ -506,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.align
- __INIT
+ __CPUINIT
.type __xscale_setup, #function
__xscale_setup:
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index f3f288a..53cd5b4 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -13,6 +13,7 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
@@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range)
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
1:
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
-#else
- mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
-#endif
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov ip, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
-#else
- mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
-#endif
+ ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
+ ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range)
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
-#else
- mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
-#endif
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov r2, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
-#else
- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
-#endif
+ ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
+ ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
dsb
isb
mov pc, lr
@@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range)
ENTRY(v7wbi_tlb_fns)
.long v7wbi_flush_user_tlb_range
.long v7wbi_flush_kern_tlb_range
- .long v7wbi_tlb_flags
+ ALT_SMP(.long v7wbi_tlb_flags_smp)
+ ALT_UP(.long v7wbi_tlb_flags_up)
.size v7wbi_tlb_fns, . - v7wbi_tlb_fns
OpenPOWER on IntegriCloud