diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-07-10 19:16:43 +1000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-07-10 19:16:43 +1000 |
commit | 50bba07d6a16ce4a3b4f6abb44bfd3645c046ef6 (patch) | |
tree | c76f080377dd611857fa33f38a2b8a2af4576f8b /arch/powerpc | |
parent | a8b91e43afd736fcebb0836359e5ddaeae45b2ab (diff) | |
parent | 50fb31cfed9218b439360caf7c0399b00042da15 (diff) | |
download | op-kernel-dev-50bba07d6a16ce4a3b4f6abb44bfd3645c046ef6.zip op-kernel-dev-50bba07d6a16ce4a3b4f6abb44bfd3645c046ef6.tar.gz |
Merge branch 'merge' into next
We want to bring in the latest IRQ fixes
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 48 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/pervasive.c | 11 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/processor_idle.c | 17 | ||||
-rw-r--r-- | arch/powerpc/xmon/xmon.c | 2 |
7 files changed, 70 insertions, 18 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 6eb75b8..0554ab0 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void) } #ifdef CONFIG_PPC_BOOK3E -#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); -#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); +#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory") +#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory") #else #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1) #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) @@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) return !regs->softe; } +extern bool prep_irq_for_idle(void); + #else /* CONFIG_PPC64 */ #define SET_MSR_EE(x) mtmsr(x) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 1b41502..1f017bb 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en) */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); -#ifdef CONFIG_TRACE_IRQFLAG +#ifdef CONFIG_TRACE_IRQFLAGS else { /* * We should already be hard disabled here. We had bugs @@ -286,6 +286,52 @@ void notrace restore_interrupts(void) __hard_irq_enable(); } +/* + * This is a helper to use when about to go into idle low-power + * when the latter has the side effect of re-enabling interrupts + * (such as calling H_CEDE under pHyp). + * + * You call this function with interrupts soft-disabled (this is + * already the case when ppc_md.power_save is called). The function + * will return whether to enter power save or just return. + * + * In the former case, it will have notified lockdep of interrupts + * being re-enabled and generally sanitized the lazy irq state, + * and in the latter case it will leave with interrupts hard + * disabled and marked as such, so the local_irq_enable() call + * in cpu_idle() will properly re-enable everything. + */ +bool prep_irq_for_idle(void) +{ + /* + * First we need to hard disable to ensure no interrupt + * occurs before we effectively enter the low power state + */ + hard_irq_disable(); + + /* + * If anything happened while we were soft-disabled, + * we return now and do not enter the low power state. + */ + if (lazy_irq_pending()) + return false; + + /* Tell lockdep we are about to re-enable */ + trace_hardirqs_on(); + + /* + * Mark interrupts as soft-enabled and clear the + * PACA_IRQ_HARD_DIS from the pending mask since we + * are about to hard enable as well as a side effect + * of entering the low power state. + */ + local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; + local_paca->soft_enabled = 1; + + /* Tell the caller to enter the low power state */ + return true; +} + #endif /* CONFIG_PPC64 */ int arch_show_interrupts(struct seq_file *p, int prec) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a84aafc..a1044f4 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) lwz r3,VCORE_NAPPING_THREADS(r5) lwz r4,VCPU_PTID(r9) li r0,1 - sldi r0,r0,r4 + sld r0,r0,r4 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ beq 43f mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 7c28589..39b1597 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -641,7 +641,7 @@ static void __init parse_drconf_memory(struct device_node *memory) unsigned int n, rc, ranges, is_kexec_kdump = 0; unsigned long lmb_size, base, size, sz; int nid; - struct assoc_arrays aa; + struct assoc_arrays aa = { .arrays = NULL }; n = of_get_drconf_memory(memory, &dm); if (!n) diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index efdacc8..d17e98b 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -42,11 +42,9 @@ static void cbe_power_save(void) { unsigned long ctrl, thread_switch_control; - /* - * We need to hard disable interrupts, the local_irq_enable() done by - * our caller upon return will hard re-enable. - */ - hard_irq_disable(); + /* Ensure our interrupt state is properly tracked */ + if (!prep_irq_for_idle()) + return; ctrl = mfspr(SPRN_CTRLF); @@ -81,6 +79,9 @@ static void cbe_power_save(void) */ ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); mtspr(SPRN_CTRLT, ctrl); + + /* Re-enable interrupts in MSR */ + __hard_irq_enable(); } static int cbe_system_reset_exception(struct pt_regs *regs) diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index a97ef66..7f5668b 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c @@ -100,15 +100,18 @@ out: static void check_and_cede_processor(void) { /* - * Interrupts are soft-disabled at this point, - * but not hard disabled. So an interrupt might have - * occurred before entering NAP, and would be potentially - * lost (edge events, decrementer events, etc...) unless - * we first hard disable then check. + * Ensure our interrupt state is properly tracked, + * also checks if no interrupt has occurred while we + * were soft-disabled */ - hard_irq_disable(); - if (!lazy_irq_pending()) + if (prep_irq_for_idle()) { cede_processor(); +#ifdef CONFIG_TRACE_IRQFLAGS + /* Ensure that H_CEDE returns with IRQs on */ + if (WARN_ON(!(mfmsr() & MSR_EE))) + __hard_irq_enable(); +#endif + } } static int dedicated_cede_loop(struct cpuidle_device *dev, diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 0f3ab06..eab3492 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -971,7 +971,7 @@ static int cpu_cmd(void) /* print cpus waiting or in xmon */ printf("cpus stopped:"); count = 0; - for (cpu = 0; cpu < NR_CPUS; ++cpu) { + for_each_possible_cpu(cpu) { if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { if (count == 0) printf(" %x", cpu); |