summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2007-02-05 14:47:51 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-02-08 14:49:27 +0000
commite6a5d66f58431c66c79e236f722a5ad7dd959ef3 (patch)
treef2153821e15aa4f5f30d02d0bc6d9a535ea168a4
parent9d99df4b10eef130dacb5f772cd589c625b03634 (diff)
downloadop-kernel-dev-e6a5d66f58431c66c79e236f722a5ad7dd959ef3.zip
op-kernel-dev-e6a5d66f58431c66c79e236f722a5ad7dd959ef3.tar.gz
[ARM] 4129/1: Add barriers after the TLB operations
The architecture specification states that TLB operations are guaranteed to be complete only after the execution of a DSB (Data Synchronisation Barrier, former Data Write Barrier or Drain Write Buffer). The branch target cache invalidation is also needed. The ISB (Instruction Synchronisation Barrier, formerly Prefetch Flush) is needed unless there will be a return from exception before the corresponding mapping is used (i.e. user mappings). Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/tlb-v6.S4
-rw-r--r--include/asm-arm/tlbflush.h50
2 files changed, 42 insertions, 12 deletions
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index fd6adde..20f84bb 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -53,6 +53,8 @@ ENTRY(v6wbi_flush_user_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
+ mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
+ mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
mov pc, lr
/*
@@ -80,7 +82,9 @@ ENTRY(v6wbi_flush_kern_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
+ mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
+ mcr p15, 0, r2, c7, c5, 4 @ prefetch flush
mov pc, lr
.section ".text.init", #alloc, #execinstr
diff --git a/include/asm-arm/tlbflush.h b/include/asm-arm/tlbflush.h
index cd10a0b..08c6991 100644
--- a/include/asm-arm/tlbflush.h
+++ b/include/asm-arm/tlbflush.h
@@ -247,7 +247,7 @@ static inline void local_flush_tlb_all(void)
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB))
- asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc");
+ dsb();
if (tlb_flag(TLB_V3_FULL))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
@@ -257,6 +257,15 @@ static inline void local_flush_tlb_all(void)
asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+
+ if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
+ TLB_V6_I_PAGE | TLB_V6_D_PAGE |
+ TLB_V6_I_ASID | TLB_V6_D_ASID)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -266,7 +275,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB))
- asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc");
+ dsb();
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
if (tlb_flag(TLB_V3_FULL))
@@ -285,6 +294,14 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
if (tlb_flag(TLB_V6_I_ASID))
asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
+
+ if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
+ TLB_V6_I_PAGE | TLB_V6_D_PAGE |
+ TLB_V6_I_ASID | TLB_V6_D_ASID)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
+ dsb();
+ }
}
static inline void
@@ -296,7 +313,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
if (tlb_flag(TLB_WB))
- asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero));
+ dsb();
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
if (tlb_flag(TLB_V3_PAGE))
@@ -317,6 +334,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
+
+ if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
+ TLB_V6_I_PAGE | TLB_V6_D_PAGE |
+ TLB_V6_I_ASID | TLB_V6_D_ASID)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
+ dsb();
+ }
}
static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -327,7 +352,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
kaddr &= PAGE_MASK;
if (tlb_flag(TLB_WB))
- asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc");
+ dsb();
if (tlb_flag(TLB_V3_PAGE))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc");
@@ -347,11 +372,14 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
- /* The ARM ARM states that the completion of a TLB maintenance
- * operation is only guaranteed by a DSB instruction
- */
- if (tlb_flag(TLB_V6_U_PAGE | TLB_V6_D_PAGE | TLB_V6_I_PAGE))
- asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc");
+ if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
+ TLB_V6_I_PAGE | TLB_V6_D_PAGE |
+ TLB_V6_I_ASID | TLB_V6_D_ASID)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
/*
@@ -369,15 +397,13 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
*/
static inline void flush_pmd_entry(pmd_t *pmd)
{
- const unsigned int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN))
asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd) : "cc");
if (tlb_flag(TLB_WB))
- asm("mcr p15, 0, %0, c7, c10, 4 @ flush_pmd"
- : : "r" (zero) : "cc");
+ dsb();
}
static inline void clean_pmd_entry(pmd_t *pmd)
OpenPOWER on IntegriCloud