diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2010-01-26 19:09:42 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-15 21:39:51 +0000 |
commit | 11805bcfa411c816b7c76fc40724be6733c74ffc (patch) | |
tree | 09c5c6c91deffb60339f027779521bafa42921ce /arch/arm/include | |
parent | 48ab7e09e0a7c00a217f87e4b57dfbee9c603e79 (diff) | |
download | op-kernel-dev-11805bcfa411c816b7c76fc40724be6733c74ffc.zip op-kernel-dev-11805bcfa411c816b7c76fc40724be6733c74ffc.tar.gz |
ARM: 5905/1: ARM: Global ASID allocation on SMP
The current ASID allocation algorithm doesn't ensure the notification
of the other CPUs when the ASID rolls over. This may lead to two
processes using the same ASID (but different generation) or multiple
threads of the same process using different ASIDs.
This patch adds the broadcasting of the ASID rollover event to the
other CPUs. To avoid a race on multiple CPUs modifying "cpu_last_asid"
during the handling of the broadcast, the ASID numbering now starts at
"smp_processor_id() + 1". At rollover, the cpu_last_asid will be set
to NR_CPUS.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/mmu.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 15 |
2 files changed, 16 insertions, 0 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b561584..68870c7 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID unsigned int id; + spinlock_t id_lock; #endif unsigned int kvm_seq; } mm_context_t; diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index de6cefb..a0b3cac 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; +#ifdef CONFIG_SMP +DECLARE_PER_CPU(struct mm_struct *, current_mm); +#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); static inline void check_context(struct mm_struct *mm) { + /* + * This code is executed with interrupts enabled. Therefore, + * mm->context.id cannot be updated to the latest ASID version + * on a different CPU (and condition below not triggered) + * without first getting an IPI to reset the context. The + * alternative is to take a read_lock on mm->context.id_lock + * (after changing its type to rwlock_t). + */ if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) __new_context(mm); @@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { +#ifdef CONFIG_SMP + struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); + *crt_mm = next; +#endif check_context(next); cpu_switch_mm(next->pgd, next); if (cache_is_vivt()) |