summaryrefslogtreecommitdiffstats
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/cache.h8
-rw-r--r--arch/arc/include/asm/irq.h4
-rw-r--r--arch/arc/include/asm/irqflags.h22
-rw-r--r--arch/arc/include/asm/mmu.h2
-rw-r--r--arch/arc/include/asm/mmu_context.h61
-rw-r--r--arch/arc/include/asm/setup.h2
-rw-r--r--arch/arc/include/asm/smp.h2
-rw-r--r--arch/arc/include/asm/tlbflush.h11
-rw-r--r--arch/arc/include/asm/unaligned.h3
10 files changed, 84 insertions, 32 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index d8dd660..5943f7f 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -46,3 +46,4 @@ generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index e4abdaa..2fd3162 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -17,13 +17,7 @@
#endif
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-/* For a rare case where customers have differently config I/D */
-#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
-#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
-
-#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
-#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
+#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1))
/*
* ARC700 doesn't cache any access in top 256M.
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index c0a7210..291a70d 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -18,8 +18,8 @@
#include <asm-generic/irq.h>
-extern void __init arc_init_IRQ(void);
-extern int __init get_hw_config_num_irq(void);
+extern void arc_init_IRQ(void);
+extern int get_hw_config_num_irq(void);
void arc_local_timer_setup(unsigned int cpu);
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index b68b53f..cb7efc2 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -151,16 +151,38 @@ static inline void arch_unmask_irq(unsigned int irq)
#else
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+
.macro IRQ_DISABLE scratch
lr \scratch, [status32]
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
+ TRACE_ASM_IRQ_DISABLE
.endm
.macro IRQ_ENABLE scratch
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
+ TRACE_ASM_IRQ_ENABLE
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index c2663b3..8c84ae9 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -48,7 +48,7 @@
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long asid; /* 8 bit MMU PID + Generation cycle */
+ unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 43a1b51..1fd467e 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -30,13 +30,13 @@
* "Fast Context Switch" i.e. no TLB flush on ctxt-switch
*
* Linux assigns each task a unique ASID. A simple round-robin allocation
- * of H/w ASID is done using software tracker @asid_cache.
+ * of H/w ASID is done using software tracker @asid_cpu.
* When it reaches max 255, the allocation cycle starts afresh by flushing
* the entire TLB and wrapping ASID back to zero.
*
* A new allocation cycle, post rollover, could potentially reassign an ASID
* to a different task. Thus the rule is to refresh the ASID in a new cycle.
- * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits
+ * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
* serve as cycle/generation indicator and natural 32 bit unsigned math
* automagically increments the generation when lower 8 bits rollover.
*/
@@ -47,9 +47,11 @@
#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
#define MM_CTXT_NO_ASID 0UL
-#define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK)
+#define asid_mm(mm, cpu) mm->context.asid[cpu]
+#define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
-extern unsigned int asid_cache;
+DECLARE_PER_CPU(unsigned int, asid_cache);
+#define asid_cpu(cpu) per_cpu(asid_cache, cpu)
/*
* Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
@@ -57,6 +59,7 @@ extern unsigned int asid_cache;
*/
static inline void get_new_mmu_context(struct mm_struct *mm)
{
+ const unsigned int cpu = smp_processor_id();
unsigned long flags;
local_irq_save(flags);
@@ -71,28 +74,28 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
* first need to destroy the context, setting it to invalid
* value.
*/
- if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK))
+ if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
goto set_hw;
/* move to new ASID and handle rollover */
- if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) {
+ if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
- flush_tlb_all();
+ local_flush_tlb_all();
/*
* Above checke for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/
- if (!asid_cache)
- asid_cache = MM_CTXT_FIRST_CYCLE;
+ if (!asid_cpu(cpu))
+ asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
}
/* Assign new ASID to tsk */
- mm->context.asid = asid_cache;
+ asid_mm(mm, cpu) = asid_cpu(cpu);
set_hw:
- write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE);
+ write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);
local_irq_restore(flags);
}
@@ -104,16 +107,45 @@ set_hw:
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
- mm->context.asid = MM_CTXT_NO_ASID;
+ int i;
+
+ for_each_possible_cpu(i)
+ asid_mm(mm, i) = MM_CTXT_NO_ASID;
+
return 0;
}
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ /* Needed to elide CONFIG_DEBUG_PREEMPT warning */
+ local_irq_save(flags);
+ asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
+ local_irq_restore(flags);
+}
+
/* Prepare the MMU for task: setup PID reg with allocated ASID
If task doesn't have an ASID (never alloc or stolen, get a new ASID)
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+ const int cpu = smp_processor_id();
+
+ /*
+ * Note that the mm_cpumask is "aggregating" only, we don't clear it
+ * for the switched-out task, unlike some other arches.
+ * It is used to enlist cpus for sending TLB flush IPIs and not sending
+ * it to CPUs where a task once ran-on, could cause stale TLB entry
+ * re-use, specially for a multi-threaded task.
+ * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
+ * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
+ * were to re-migrate to C1, it could access the unmapped region
+ * via any existing stale TLB entries.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
#ifndef CONFIG_SMP
/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
@@ -131,11 +163,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
#define activate_mm(prev, next) switch_mm(prev, next, NULL)
-static inline void destroy_context(struct mm_struct *mm)
-{
- mm->context.asid = MM_CTXT_NO_ASID;
-}
-
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 229e506..e10f8ce 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -31,7 +31,7 @@ struct cpuinfo_data {
extern int root_mountflags, end_mem;
extern int running_on_hw;
-void __init setup_processor(void);
+void setup_processor(void);
void __init setup_arch_memory(void);
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index c4fb211..eefc29f 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -30,7 +30,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
* APIs provided by arch SMP code to rest of arch code
*/
extern void __init smp_init_cpus(void);
-extern void __init first_lines_of_secondary(void);
+extern void first_lines_of_secondary(void);
extern const char *arc_platform_smp_cpuinfo(void);
/*
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
index b2f9bc7..71c7b2e 100644
--- a/arch/arc/include/asm/tlbflush.h
+++ b/arch/arc/include/asm/tlbflush.h
@@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-/* XXX: Revisit for SMP */
+#ifndef CONFIG_SMP
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
-
+#else
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+#endif /* CONFIG_SMP */
#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
index 60702f3..3e5f071 100644
--- a/arch/arc/include/asm/unaligned.h
+++ b/arch/arc/include/asm/unaligned.h
@@ -22,7 +22,8 @@ static inline int
misaligned_fixup(unsigned long address, struct pt_regs *regs,
struct callee_regs *cregs)
{
- return 0;
+ /* Not fixed */
+ return 1;
}
#endif
OpenPOWER on IntegriCloud