From 8a4e3a9ead7e37ce1505602b564c15da09ac039f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 28 Feb 2013 17:47:36 +0100 Subject: ARM: 7659/1: mm: make mm->context.id an atomic64_t variable mm->context.id is updated under asid_lock when a new ASID is allocated to an mm_struct. However, it is also read without the lock when a task is being scheduled and checking whether or not the current ASID generation is up-to-date. If two threads of the same process are being scheduled in parallel and the bottom bits of the generation in their mm->context.id match the current generation (that is, the mm_struct has not been used for ~2^24 rollovers) then the non-atomic, lockless access to mm->context.id may yield the incorrect ASID. This patch fixes this issue by making mm->context.id and atomic64_t, ensuring that the generation is always read consistently. For code that only requires access to the ASID bits (e.g. TLB flushing by mm), then the value is accessed directly, which GCC converts to an ldrb. Cc: # 3.8 Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/mmu.h | 8 ++++---- arch/arm/include/asm/mmu_context.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 9f77e78..e3d5554 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -5,15 +5,15 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID - u64 id; + atomic64_t id; #endif - unsigned int vmalloc_seq; + unsigned int vmalloc_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID #define ASID_BITS 8 #define ASID_MASK ((~0ULL) << ASID_BITS) -#define ASID(mm) ((mm)->context.id & ~ASID_MASK) +#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK) #else #define ASID(mm) (0) #endif @@ -26,7 +26,7 @@ typedef struct { * modified for 2.6 by Hyok S. Choi */ typedef struct { - unsigned long end_brk; + unsigned long end_brk; } mm_context_t; #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index e1f644b..863a661 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); -#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) +#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) #else /* !CONFIG_CPU_HAS_ASID */ -- cgit v1.1 From 862c588f062fe9339a180cf6429e4df1855c376a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 28 Feb 2013 17:48:11 +0100 Subject: ARM: 7660/1: tlb: add branch predictor maintenance operations The ARM architecture requires explicit branch predictor maintenance when updating an instruction stream for a given virtual address. In reality, this isn't so much of a burden because the branch predictor is flushed during the cache maintenance required to make the new instructions visible to the I-side of the processor. However, there are still some cases where explicit flushing is required, so add a local_bp_flush_all operation to deal with this. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/tlbflush.h | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 6e924d3..4db8c88 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -34,10 +34,13 @@ #define TLB_V6_D_ASID (1 << 17) #define TLB_V6_I_ASID (1 << 18) +#define TLB_V6_BP (1 << 19) + /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ -#define TLB_V7_UIS_PAGE (1 << 19) -#define TLB_V7_UIS_FULL (1 << 20) -#define TLB_V7_UIS_ASID (1 << 21) +#define TLB_V7_UIS_PAGE (1 << 20) +#define TLB_V7_UIS_FULL (1 << 21) +#define TLB_V7_UIS_ASID (1 << 22) +#define TLB_V7_UIS_BP (1 << 23) #define TLB_BARRIER (1 << 28) #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ @@ -150,7 +153,8 @@ #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V6_I_FULL | TLB_V6_D_FULL | \ TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ - TLB_V6_I_ASID | TLB_V6_D_ASID) + TLB_V6_I_ASID | TLB_V6_D_ASID | \ + TLB_V6_BP) #ifdef CONFIG_CPU_TLB_V6 # define v6wbi_possible_flags v6wbi_tlb_flags @@ -166,9 +170,11 @@ #endif #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ - TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) + TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ + TLB_V7_UIS_ASID | TLB_V7_UIS_BP) #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ - TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) + TLB_V6_U_FULL | TLB_V6_U_PAGE | \ + TLB_V6_U_ASID | TLB_V6_BP) #ifdef CONFIG_CPU_TLB_V7 @@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) } } +static inline void local_flush_bp_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_V7_UIS_BP)) + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); + else if (tlb_flag(TLB_V6_BP)) + asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); + + if (tlb_flag(TLB_BARRIER)) + isb(); +} + /* * flush_pmd_entry * @@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd) #define flush_tlb_kernel_page local_flush_tlb_kernel_page #define flush_tlb_range local_flush_tlb_range #define flush_tlb_kernel_range local_flush_tlb_kernel_range +#define flush_bp_all local_flush_bp_all #else extern void flush_tlb_all(void); extern void flush_tlb_mm(struct mm_struct *mm); @@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); extern void flush_tlb_kernel_page(unsigned long kaddr); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_bp_all(void); #endif /* -- cgit v1.1 From 3f7d1fe108dbaefd0c57a41753fc2c90b395f458 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 28 Feb 2013 21:53:35 +0100 Subject: ARM: 7665/1: Wire up kcmp syscall Wire up kcmp syscall for ability to proceed checkpoint/restore procedure on ARM platform. Signed-off-by: Alexander Kartashov Signed-off-by: Cyrill Gorcunov Acked-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/include/uapi/asm/unistd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 4da7cde..af33b44 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -404,7 +404,7 @@ #define __NR_setns (__NR_SYSCALL_BASE+375) #define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) - /* 378 for kcmp */ +#define __NR_kcmp (__NR_SYSCALL_BASE+378) #define __NR_finit_module (__NR_SYSCALL_BASE+379) /* -- cgit v1.1 From 85323a991d40681023822e86ca95f38a75262026 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Thu, 7 Mar 2013 07:17:25 +0000 Subject: xen: arm: mandate EABI and use generic atomic operations. Rob Herring has observed that c81611c4e96f "xen: event channel arrays are xen_ulong_t and not unsigned long" introduced a compile failure when building without CONFIG_AEABI: /tmp/ccJaIZOW.s: Assembler messages: /tmp/ccJaIZOW.s:831: Error: even register required -- `ldrexd r5,r6,[r4]' Will Deacon pointed out that this is because OABI does not require even base registers for 64-bit values. We can avoid this by simply using the existing atomic64_xchg operation and the same containerof trick as used by the cmpxchg macros. However since this code is used on memory which is shared with the hypervisor we require proper atomic instructions and cannot use the generic atomic64 callbacks (which are based on spinlocks), therefore add a dependency on !GENERIC_ATOMIC64. Since we already depend on !CPU_V6 there isn't much downside to this. While thinking about this we also observed that OABI has different struct alignment requirements to EABI, which is a problem for hypercall argument structs which are shared with the hypervisor and which must be in EABI layout. Since I don't expect people to want to run OABI kernels on Xen depend on CONFIG_AEABI explicitly too (although it also happens to be enforced by the !GENERIC_ATOMIC64 requirement too). Signed-off-by: Ian Campbell Cc: Will Deacon Cc: Rob Herring Acked-by: Stefano Stabellini Cc: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm/include/asm/xen/events.h | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h index 5c27696..8b1f37b 100644 --- a/arch/arm/include/asm/xen/events.h +++ b/arch/arm/include/asm/xen/events.h @@ -2,6 +2,7 @@ #define _ASM_ARM_XEN_EVENTS_H #include +#include enum ipi_vector { XEN_PLACEHOLDER_VECTOR, @@ -15,26 +16,8 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) return raw_irqs_disabled_flags(regs->ARM_cpsr); } -/* - * We cannot use xchg because it does not support 8-byte - * values. However it is safe to use {ldr,dtd}exd directly because all - * platforms which Xen can run on support those instructions. - */ -static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val) -{ - xen_ulong_t oldval; - unsigned int tmp; - - wmb(); - asm volatile("@ xchg_xen_ulong\n" - "1: ldrexd %0, %H0, [%3]\n" - " strexd %1, %2, %H2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (oldval), "=&r" (tmp) - : "r" (val), "r" (ptr) - : "memory", "cc"); - return oldval; -} +#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \ + atomic64_t, \ + counter), (val)) #endif /* _ASM_ARM_XEN_EVENTS_H */ -- cgit v1.1