diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-12-11 10:01:53 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-12-11 10:01:53 +0000 |
commit | 0fa5d3996dbda1ee9653c43d39b7ef159fb57ee7 (patch) | |
tree | 70f0adc3b86bb1511be6607c959506f6365fc2a9 /arch/arm/mm | |
parent | 0b99cb73105f0527c1c4096960796b8772343a39 (diff) | |
parent | 14318efb322e2fe1a034c69463d725209eb9d548 (diff) | |
download | op-kernel-dev-0fa5d3996dbda1ee9653c43d39b7ef159fb57ee7.zip op-kernel-dev-0fa5d3996dbda1ee9653c43d39b7ef159fb57ee7.tar.gz |
Merge branch 'devel-stable' into for-linus
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/context.c | 207 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 16 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-2level.S | 10 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-3level.S | 5 |
6 files changed, 132 insertions, 112 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 4e07eec..bc4a5e9 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -2,6 +2,9 @@ * linux/arch/arm/mm/context.c * * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -14,14 +17,40 @@ #include <linux/percpu.h> #include <asm/mmu_context.h> +#include <asm/smp_plat.h> #include <asm/thread_notify.h> #include <asm/tlbflush.h> +/* + * On ARMv6, we have the following structure in the Context ID: + * + * 31 7 0 + * +-------------------------+-----------+ + * | process ID | ASID | + * +-------------------------+-----------+ + * | context ID | + * +-------------------------------------+ + * + * The ASID is used to tag entries in the CPU caches and TLBs. + * The context ID is used by debuggers and trace logic, and + * should be unique within all running processes. + */ +#define ASID_FIRST_VERSION (1ULL << ASID_BITS) +#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) + +#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) +#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) + static DEFINE_RAW_SPINLOCK(cpu_asid_lock); -unsigned int cpu_last_asid = ASID_FIRST_VERSION; +static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); +static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); + +static DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(u64, reserved_asids); +static cpumask_t tlb_flush_pending; #ifdef CONFIG_ARM_LPAE -void cpu_set_reserved_ttbr0(void) +static void cpu_set_reserved_ttbr0(void) { unsigned long ttbl = __pa(swapper_pg_dir); unsigned long ttbh = 0; @@ -37,7 +66,7 @@ void cpu_set_reserved_ttbr0(void) isb(); } #else -void cpu_set_reserved_ttbr0(void) +static void cpu_set_reserved_ttbr0(void) { u32 ttb; /* Copy TTBR1 into TTBR0 */ @@ -84,124 +113,104 @@ static int __init contextidr_notifier_init(void) arch_initcall(contextidr_notifier_init); #endif -/* - * We fork()ed a process, and we need a new context for the child - * to run in. - */ -void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) +static void flush_context(unsigned int cpu) { - mm->context.id = 0; - raw_spin_lock_init(&mm->context.id_lock); -} + int i; + u64 asid; + + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + for_each_possible_cpu(i) { + if (i == cpu) { + asid = 0; + } else { + asid = atomic64_xchg(&per_cpu(active_asids, i), 0); + __set_bit(ASID_TO_IDX(asid), asid_map); + } + per_cpu(reserved_asids, i) = asid; + } -static void flush_context(void) -{ - cpu_set_reserved_ttbr0(); - local_flush_tlb_all(); - if (icache_is_vivt_asid_tagged()) { + /* Queue a TLB invalidate and flush the I-cache if necessary. */ + if (!tlb_ops_need_broadcast()) + cpumask_set_cpu(cpu, &tlb_flush_pending); + else + cpumask_setall(&tlb_flush_pending); + + if (icache_is_vivt_asid_tagged()) __flush_icache_all(); - dsb(); - } } -#ifdef CONFIG_SMP +static int is_reserved_asid(u64 asid) +{ + int cpu; + for_each_possible_cpu(cpu) + if (per_cpu(reserved_asids, cpu) == asid) + return 1; + return 0; +} -static void set_mm_context(struct mm_struct *mm, unsigned int asid) +static void new_context(struct mm_struct *mm, unsigned int cpu) { - unsigned long flags; + u64 asid = mm->context.id; + u64 generation = atomic64_read(&asid_generation); - /* - * Locking needed for multi-threaded applications where the - * same mm->context.id could be set from different CPUs during - * the broadcast. This function is also called via IPI so the - * mm->context.id_lock has to be IRQ-safe. - */ - raw_spin_lock_irqsave(&mm->context.id_lock, flags); - if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { + if (asid != 0 && is_reserved_asid(asid)) { /* - * Old version of ASID found. Set the new one and - * reset mm_cpumask(mm). + * Our current ASID was active during a rollover, we can + * continue to use it and this was just a false alarm. */ - mm->context.id = asid; + asid = generation | (asid & ~ASID_MASK); + } else { + /* + * Allocate a free ASID. If we can't find one, take a + * note of the currently active ASIDs and mark the TLBs + * as requiring flushes. + */ + asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); + if (asid == NUM_USER_ASIDS) { + generation = atomic64_add_return(ASID_FIRST_VERSION, + &asid_generation); + flush_context(cpu); + asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); + } + __set_bit(asid, asid_map); + asid = generation | IDX_TO_ASID(asid); cpumask_clear(mm_cpumask(mm)); } - raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); - /* - * Set the mm_cpumask(mm) bit for the current CPU. - */ - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + mm->context.id = asid; } -/* - * Reset the ASID on the current CPU. This function call is broadcast - * from the CPU handling the ASID rollover and holding cpu_asid_lock. - */ -static void reset_context(void *info) +void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { - unsigned int asid; + unsigned long flags; unsigned int cpu = smp_processor_id(); - struct mm_struct *mm = current->active_mm; - smp_rmb(); - asid = cpu_last_asid + cpu + 1; + if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) + __check_vmalloc_seq(mm); - flush_context(); - set_mm_context(mm, asid); - - /* set the new ASID */ - cpu_switch_mm(mm->pgd, mm); -} + /* + * Required during context switch to avoid speculative page table + * walking with the wrong TTBR. + */ + cpu_set_reserved_ttbr0(); -#else + if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) + && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) + goto switch_mm_fastpath; -static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) -{ - mm->context.id = asid; - cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); -} + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + /* Check that our ASID belongs to the current generation. */ + if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) + new_context(mm, cpu); -#endif + atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); + cpumask_set_cpu(cpu, mm_cpumask(mm)); -void __new_context(struct mm_struct *mm) -{ - unsigned int asid; + if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) + local_flush_tlb_all(); + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); - raw_spin_lock(&cpu_asid_lock); -#ifdef CONFIG_SMP - /* - * Check the ASID again, in case the change was broadcast from - * another CPU before we acquired the lock. - */ - if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - raw_spin_unlock(&cpu_asid_lock); - return; - } -#endif - /* - * At this point, it is guaranteed that the current mm (with - * an old ASID) isn't active on any other CPU since the ASIDs - * are changed simultaneously via IPI. - */ - asid = ++cpu_last_asid; - if (asid == 0) - asid = cpu_last_asid = ASID_FIRST_VERSION; - - /* - * If we've used up all our ASIDs, we need - * to start a new version and flush the TLB. - */ - if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = cpu_last_asid + smp_processor_id() + 1; - flush_context(); -#ifdef CONFIG_SMP - smp_wmb(); - smp_call_function(reset_context, NULL, 1); -#endif - cpu_last_asid += NR_CPUS; - } - - set_mm_context(mm, asid); - raw_spin_unlock(&cpu_asid_lock); +switch_mm_fastpath: + cpu_switch_mm(mm->pgd, mm); } diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 5dcc2fd..88fd86c 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys, } EXPORT_SYMBOL(ioremap_page); -void __check_kvm_seq(struct mm_struct *mm) +void __check_vmalloc_seq(struct mm_struct *mm) { unsigned int seq; do { - seq = init_mm.context.kvm_seq; + seq = init_mm.context.vmalloc_seq; memcpy(pgd_offset(mm, VMALLOC_START), pgd_offset_k(VMALLOC_START), sizeof(pgd_t) * (pgd_index(VMALLOC_END) - pgd_index(VMALLOC_START))); - mm->context.kvm_seq = seq; - } while (seq != init_mm.context.kvm_seq); + mm->context.vmalloc_seq = seq; + } while (seq != init_mm.context.vmalloc_seq); } #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) @@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) if (!pmd_none(pmd)) { /* * Clear the PMD from the page table, and - * increment the kvm sequence so others + * increment the vmalloc sequence so others * notice this change. * * Note: this is still racy on SMP machines. */ pmd_clear(pmdp); - init_mm.context.kvm_seq++; + init_mm.context.vmalloc_seq++; /* * Free the page table, if there was one. @@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) * Ensure that the active_mm is up to date - we want to * catch any use-after-iounmap cases. */ - if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) - __check_kvm_seq(current->active_mm); + if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) + __check_vmalloc_seq(current->active_mm); flush_tlb_kernel_range(virt, end); } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 941dfb9..99b47b9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -488,7 +488,7 @@ static void __init build_mem_type_table(void) #endif for (i = 0; i < 16; i++) { - unsigned long v = pgprot_val(protection_map[i]); + pteval_t v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); } diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index b29a226..eb6aa73 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -167,6 +167,10 @@ tst r1, #L_PTE_YOUNG tstne r1, #L_PTE_PRESENT moveq r3, #0 +#ifndef CONFIG_CPU_USE_DOMAINS + tstne r1, #L_PTE_NONE + movne r3, #0 +#endif str r3, [r0] mcr p15, 0, r0, c7, c10, 1 @ flush_pte diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index fd045e7..6d98c13 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -100,7 +100,11 @@ ENTRY(cpu_v7_set_pte_ext) orrne r3, r3, #PTE_EXT_XN tst r1, #L_PTE_YOUNG - tstne r1, #L_PTE_PRESENT + tstne r1, #L_PTE_VALID +#ifndef CONFIG_CPU_USE_DOMAINS + eorne r1, r1, #L_PTE_NONE + tstne r1, #L_PTE_NONE +#endif moveq r3, #0 ARM( str r3, [r0, #2048]! ) @@ -161,11 +165,11 @@ ENDPROC(cpu_v7_set_pte_ext) * TFR EV X F I D LR S * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced - * 1 0 110 0011 1100 .111 1101 < we want + * 01 0 110 0011 1100 .111 1101 < we want */ .align 2 .type v7_crval, #object v7_crval: - crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c + crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c .previous diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 8de0f1d..7b56386 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -65,8 +65,11 @@ ENDPROC(cpu_v7_switch_mm) */ ENTRY(cpu_v7_set_pte_ext) #ifdef CONFIG_MMU - tst r2, #L_PTE_PRESENT + tst r2, #L_PTE_VALID beq 1f + tst r3, #1 << (57 - 32) @ L_PTE_NONE + bicne r2, #L_PTE_VALID + bne 1f tst r3, #1 << (55 - 32) @ L_PTE_DIRTY orreq r2, #L_PTE_RDONLY 1: strd r2, r3, [r0] |