summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/arch_checks.c9
-rw-r--r--arch/blackfin/mach-common/cache.S22
-rw-r--r--arch/blackfin/mach-common/clocks-init.c2
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S24
-rw-r--r--arch/blackfin/mach-common/entry.S61
-rw-r--r--arch/blackfin/mach-common/interrupt.S12
-rw-r--r--arch/blackfin/mach-common/ints-priority.c126
-rw-r--r--arch/blackfin/mach-common/smp.c6
8 files changed, 174 insertions, 88 deletions
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index 98133b9..80d39b2 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -62,3 +62,12 @@
#if (CONFIG_BOOT_LOAD & 0x3)
# error "The kernel load address must be 4 byte aligned"
#endif
+
+/* The entire kernel must be able to make a 24bit pcrel call to start of L1 */
+#if ((0xffffffff - L1_CODE_START + 1) + CONFIG_BOOT_LOAD) > 0x1000000
+# error "The kernel load address is too high; keep it below 10meg for safety"
+#endif
+
+#if ANOMALY_05000448
+# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes.
+#endif
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index 3c98dac..aa0648c 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -66,11 +66,33 @@
/* Invalidate all instruction cache lines assocoiated with this memory area */
ENTRY(_blackfin_icache_flush_range)
+/*
+ * Walkaround to avoid loading wrong instruction after invalidating icache
+ * and following sequence is met.
+ *
+ * 1) One instruction address is cached in the instruction cache.
+ * 2) This instruction in SDRAM is changed.
+ * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
+ * 4) This instruction is executed again, but the old one is loaded.
+ */
+ P0 = R0;
+ IFLUSH[P0];
do_flush IFLUSH, , nop
ENDPROC(_blackfin_icache_flush_range)
/* Flush all cache lines assocoiated with this area of memory. */
ENTRY(_blackfin_icache_dcache_flush_range)
+/*
+ * Walkaround to avoid loading wrong instruction after invalidating icache
+ * and following sequence is met.
+ *
+ * 1) One instruction address is cached in the instruction cache.
+ * 2) This instruction in SDRAM is changed.
+ * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
+ * 4) This instruction is executed again, but the old one is loaded.
+ */
+ P0 = R0;
+ IFLUSH[P0];
do_flush FLUSH, IFLUSH
ENDPROC(_blackfin_icache_dcache_flush_range)
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
index 9dddb6f..3539365 100644
--- a/arch/blackfin/mach-common/clocks-init.c
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -17,7 +17,7 @@
#define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */
#define PLL_CTL_VAL \
(((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
- (PLL_BYPASS << 8) | (ANOMALY_05000265 ? 0x8000 : 0))
+ (PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000))
__attribute__((l1_text))
static void do_sync(void)
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 4da50bc..8009a51 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -376,10 +376,22 @@ ENTRY(_do_hibernate)
#endif
#ifdef PINT0_ASSIGN
+ PM_SYS_PUSH(PINT0_MASK_SET)
+ PM_SYS_PUSH(PINT1_MASK_SET)
+ PM_SYS_PUSH(PINT2_MASK_SET)
+ PM_SYS_PUSH(PINT3_MASK_SET)
PM_SYS_PUSH(PINT0_ASSIGN)
PM_SYS_PUSH(PINT1_ASSIGN)
PM_SYS_PUSH(PINT2_ASSIGN)
PM_SYS_PUSH(PINT3_ASSIGN)
+ PM_SYS_PUSH(PINT0_INVERT_SET)
+ PM_SYS_PUSH(PINT1_INVERT_SET)
+ PM_SYS_PUSH(PINT2_INVERT_SET)
+ PM_SYS_PUSH(PINT3_INVERT_SET)
+ PM_SYS_PUSH(PINT0_EDGE_SET)
+ PM_SYS_PUSH(PINT1_EDGE_SET)
+ PM_SYS_PUSH(PINT2_EDGE_SET)
+ PM_SYS_PUSH(PINT3_EDGE_SET)
#endif
PM_SYS_PUSH(EBIU_AMBCTL0)
@@ -714,10 +726,22 @@ ENTRY(_do_hibernate)
PM_SYS_POP(EBIU_AMBCTL0)
#ifdef PINT0_ASSIGN
+ PM_SYS_POP(PINT3_EDGE_SET)
+ PM_SYS_POP(PINT2_EDGE_SET)
+ PM_SYS_POP(PINT1_EDGE_SET)
+ PM_SYS_POP(PINT0_EDGE_SET)
+ PM_SYS_POP(PINT3_INVERT_SET)
+ PM_SYS_POP(PINT2_INVERT_SET)
+ PM_SYS_POP(PINT1_INVERT_SET)
+ PM_SYS_POP(PINT0_INVERT_SET)
PM_SYS_POP(PINT3_ASSIGN)
PM_SYS_POP(PINT2_ASSIGN)
PM_SYS_POP(PINT1_ASSIGN)
PM_SYS_POP(PINT0_ASSIGN)
+ PM_SYS_POP(PINT3_MASK_SET)
+ PM_SYS_POP(PINT2_MASK_SET)
+ PM_SYS_POP(PINT1_MASK_SET)
+ PM_SYS_POP(PINT0_MASK_SET)
#endif
#ifdef SICA_IWR1
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 88de053..21e65a3 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -600,6 +600,19 @@ ENTRY(_system_call)
p2 = [p2];
[p2+(TASK_THREAD+THREAD_KSP)] = sp;
+#ifdef CONFIG_IPIPE
+ r0 = sp;
+ SP += -12;
+ call ___ipipe_syscall_root;
+ SP += 12;
+ cc = r0 == 1;
+ if cc jump .Lsyscall_really_exit;
+ cc = r0 == -1;
+ if cc jump .Lresume_userspace;
+ r3 = [sp + PT_R3];
+ r4 = [sp + PT_R4];
+ p0 = [sp + PT_ORIG_P0];
+#endif /* CONFIG_IPIPE */
/* Check the System Call */
r7 = __NR_syscall;
@@ -654,6 +667,17 @@ ENTRY(_system_call)
r7 = r7 & r4;
.Lsyscall_resched:
+#ifdef CONFIG_IPIPE
+ cc = BITTST(r7, TIF_IRQ_SYNC);
+ if !cc jump .Lsyscall_no_irqsync;
+ [--sp] = reti;
+ r0 = [sp++];
+ SP += -12;
+ call ___ipipe_sync_root;
+ SP += 12;
+ jump .Lresume_userspace_1;
+.Lsyscall_no_irqsync:
+#endif
cc = BITTST(r7, TIF_NEED_RESCHED);
if !cc jump .Lsyscall_sigpending;
@@ -685,6 +709,10 @@ ENTRY(_system_call)
.Lsyscall_really_exit:
r5 = [sp + PT_RESERVED];
rets = r5;
+#ifdef CONFIG_IPIPE
+ [--sp] = reti;
+ r5 = [sp++];
+#endif /* CONFIG_IPIPE */
rts;
ENDPROC(_system_call)
@@ -771,6 +799,15 @@ _new_old_task:
ENDPROC(_resume)
ENTRY(_ret_from_exception)
+#ifdef CONFIG_IPIPE
+ [--sp] = rets;
+ SP += -12;
+ call ___ipipe_check_root
+ SP += 12
+ rets = [sp++];
+ cc = r0 == 0;
+ if cc jump 4f; /* not on behalf of Linux, get out */
+#endif /* CONFIG_IPIPE */
p2.l = lo(IPEND);
p2.h = hi(IPEND);
@@ -827,6 +864,28 @@ ENTRY(_ret_from_exception)
rts;
ENDPROC(_ret_from_exception)
+#ifdef CONFIG_IPIPE
+
+_sync_root_irqs:
+ [--sp] = reti; /* Reenable interrupts */
+ r0 = [sp++];
+ jump.l ___ipipe_sync_root
+
+_resume_kernel_from_int:
+ r0.l = _sync_root_irqs
+ r0.h = _sync_root_irqs
+ [--sp] = rets;
+ [--sp] = ( r7:4, p5:3 );
+ SP += -12;
+ call ___ipipe_call_irqtail
+ SP += 12;
+ ( r7:4, p5:3 ) = [sp++];
+ rets = [sp++];
+ rts
+#else
+#define _resume_kernel_from_int 2f
+#endif
+
ENTRY(_return_from_int)
/* If someone else already raised IRQ 15, do nothing. */
csync;
@@ -848,7 +907,7 @@ ENTRY(_return_from_int)
r1 = r0 - r1;
r2 = r0 & r1;
cc = r2 == 0;
- if !cc jump 2f;
+ if !cc jump _resume_kernel_from_int;
/* Lower the interrupt level to 15. */
p0.l = lo(EVT15);
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 43c4eb9..0069c2d 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -235,6 +235,7 @@ ENDPROC(_evt_system_call)
#ifdef CONFIG_IPIPE
ENTRY(___ipipe_call_irqtail)
+ p0 = r0;
r0.l = 1f;
r0.h = 1f;
reti = r0;
@@ -242,9 +243,6 @@ ENTRY(___ipipe_call_irqtail)
1:
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
- p0.l = ___ipipe_irq_tail_hook;
- p0.h = ___ipipe_irq_tail_hook;
- p0 = [p0];
sp += -12;
call (p0);
sp += 12;
@@ -259,7 +257,7 @@ ENTRY(___ipipe_call_irqtail)
p0.h = hi(EVT14);
[p0] = r0;
csync;
- r0 = 0x401f;
+ r0 = 0x401f (z);
sti r0;
raise 14;
[--sp] = reti; /* IRQs on. */
@@ -277,11 +275,7 @@ ENTRY(___ipipe_call_irqtail)
p0.h = _bfin_irq_flags;
r0 = [p0];
sti r0;
-#if 0 /* FIXME: this actually raises scheduling latencies */
- /* Reenable interrupts */
- [--sp] = reti;
- r0 = [sp++];
-#endif
rts;
ENDPROC(___ipipe_call_irqtail)
+
#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 2024945..a7d7b2d 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -161,11 +161,15 @@ static void bfin_core_unmask_irq(unsigned int irq)
static void bfin_internal_mask_irq(unsigned int irq)
{
+ unsigned long flags;
+
#ifdef CONFIG_BF53x
+ local_irq_save_hw(flags);
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
~(1 << SIC_SYSIRQ(irq)));
#else
unsigned mask_bank, mask_bit;
+ local_irq_save_hw(flags);
mask_bank = SIC_SYSIRQ(irq) / 32;
mask_bit = SIC_SYSIRQ(irq) % 32;
bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
@@ -175,15 +179,20 @@ static void bfin_internal_mask_irq(unsigned int irq)
~(1 << mask_bit));
#endif
#endif
+ local_irq_restore_hw(flags);
}
static void bfin_internal_unmask_irq(unsigned int irq)
{
+ unsigned long flags;
+
#ifdef CONFIG_BF53x
+ local_irq_save_hw(flags);
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
(1 << SIC_SYSIRQ(irq)));
#else
unsigned mask_bank, mask_bit;
+ local_irq_save_hw(flags);
mask_bank = SIC_SYSIRQ(irq) / 32;
mask_bit = SIC_SYSIRQ(irq) % 32;
bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
@@ -193,6 +202,7 @@ static void bfin_internal_unmask_irq(unsigned int irq)
(1 << mask_bit));
#endif
#endif
+ local_irq_restore_hw(flags);
}
#ifdef CONFIG_PM
@@ -390,7 +400,7 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
{
#ifdef CONFIG_IPIPE
- _set_irq_handler(irq, handle_edge_irq);
+ _set_irq_handler(irq, handle_level_irq);
#else
struct irq_desc *desc = irq_desc + irq;
/* May not call generic set_irq_handler() due to spinlock
@@ -1055,13 +1065,18 @@ int __init init_arch_irq(void)
#endif
default:
#ifdef CONFIG_IPIPE
- /*
- * We want internal interrupt sources to be masked, because
- * ISRs may trigger interrupts recursively (e.g. DMA), but
- * interrupts are _not_ masked at CPU level. So let's handle
- * them as level interrupts.
- */
- set_irq_handler(irq, handle_level_irq);
+ /*
+ * We want internal interrupt sources to be
+ * masked, because ISRs may trigger interrupts
+ * recursively (e.g. DMA), but interrupts are
+ * _not_ masked at CPU level. So let's handle
+ * most of them as level interrupts, except
+ * the timer interrupt which is special.
+ */
+ if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
+ set_irq_handler(irq, handle_simple_irq);
+ else
+ set_irq_handler(irq, handle_level_irq);
#else /* !CONFIG_IPIPE */
set_irq_handler(irq, handle_simple_irq);
#endif /* !CONFIG_IPIPE */
@@ -1123,9 +1138,8 @@ int __init init_arch_irq(void)
#ifdef CONFIG_IPIPE
for (irq = 0; irq < NR_IRQS; irq++) {
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
desc->ic_prio = __ipipe_get_irq_priority(irq);
- desc->thr_prio = __ipipe_get_irqthread_priority(irq);
}
#endif /* CONFIG_IPIPE */
@@ -1208,76 +1222,21 @@ int __ipipe_get_irq_priority(unsigned irq)
return IVG15;
}
-int __ipipe_get_irqthread_priority(unsigned irq)
-{
- int ient, prio;
- int demux_irq;
-
- /* The returned priority value is rescaled to [0..IVG13+1]
- * with 0 being the lowest effective priority level. */
-
- if (irq <= IRQ_CORETMR)
- return IVG13 - irq + 1;
-
- /* GPIO IRQs are given the priority of the demux
- * interrupt. */
- if (IS_GPIOIRQ(irq)) {
-#if defined(CONFIG_BF54x)
- u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
- demux_irq = (bank == 0 ? IRQ_PINT0 :
- bank == 1 ? IRQ_PINT1 :
- bank == 2 ? IRQ_PINT2 :
- IRQ_PINT3);
-#elif defined(CONFIG_BF561)
- demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
- irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
- IRQ_PROG0_INTA);
-#elif defined(CONFIG_BF52x)
- demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
- irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
- IRQ_PORTF_INTA);
-#else
- demux_irq = irq;
-#endif
- return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
- }
-
- /* The GPIO demux interrupt is given a lower priority
- * than the GPIO IRQs, so that its threaded handler
- * unmasks the interrupt line after the decoded IRQs
- * have been processed. */
- prio = PRIO_GPIODEMUX(irq);
- /* demux irq? */
- if (prio != -1)
- return IVG13 - prio;
-
- for (ient = 0; ient < NR_PERI_INTS; ient++) {
- struct ivgx *ivg = ivg_table + ient;
- if (ivg->irqno == irq) {
- for (prio = 0; prio <= IVG13-IVG7; prio++) {
- if (ivg7_13[prio].ifirst <= ivg &&
- ivg7_13[prio].istop > ivg)
- return IVG7 - prio;
- }
- }
- }
-
- return 0;
-}
-
/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
+ struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
+ struct ipipe_domain *this_domain = ipipe_current_domain;
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
- int irq;
+ int irq, s;
if (likely(vec == EVT_IVTMR_P)) {
irq = IRQ_CORETMR;
- goto handle_irq;
+ goto core_tick;
}
SSYNC();
@@ -1319,24 +1278,39 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
irq = ivg->irqno;
if (irq == IRQ_SYSTMR) {
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+core_tick:
+#else
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
+#endif
/* This is basically what we need from the register frame. */
__raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
__raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
- if (!ipipe_root_domain_p)
- __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
- else
+ if (this_domain != ipipe_root_domain)
__raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
+ else
+ __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
}
-handle_irq:
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
+core_tick:
+#endif
+ if (this_domain == ipipe_root_domain) {
+ s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
+ barrier();
+ }
ipipe_trace_irq_entry(irq);
__ipipe_handle_irq(irq, regs);
- ipipe_trace_irq_exit(irq);
+ ipipe_trace_irq_exit(irq);
- if (ipipe_root_domain_p)
- return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+ if (this_domain == ipipe_root_domain) {
+ set_thread_flag(TIF_IRQ_SYNC);
+ if (!s) {
+ __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
+ return !test_bit(IPIPE_STALL_FLAG, &p->status);
+ }
+ }
return 0;
}
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 77c9928..93eab61 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -158,10 +158,14 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
kfree(msg);
break;
case BFIN_IPI_CALL_FUNC:
+ spin_unlock(&msg_queue->lock);
ipi_call_function(cpu, msg);
+ spin_lock(&msg_queue->lock);
break;
case BFIN_IPI_CPU_STOP:
+ spin_unlock(&msg_queue->lock);
ipi_cpu_stop(cpu);
+ spin_lock(&msg_queue->lock);
kfree(msg);
break;
default:
@@ -457,7 +461,7 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
smp_flush_data.start = start;
smp_flush_data.end = end;
- if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
+ if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
OpenPOWER on IntegriCloud