diff options
Diffstat (limited to 'arch/mn10300/include/asm/irqflags.h')
-rw-r--r-- | arch/mn10300/include/asm/irqflags.h | 111 |
1 files changed, 102 insertions, 9 deletions
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h index 5e529a1..7a7ae12 100644 --- a/arch/mn10300/include/asm/irqflags.h +++ b/arch/mn10300/include/asm/irqflags.h @@ -13,6 +13,9 @@ #define _ASM_IRQFLAGS_H #include <asm/cpu-regs.h> +#ifndef __ASSEMBLY__ +#include <linux/smp.h> +#endif /* * interrupt control @@ -23,11 +26,7 @@ * - level 6 - timer interrupt * - "enabled": run in IM7 */ -#ifdef CONFIG_MN10300_TTYSM -#define MN10300_CLI_LEVEL EPSW_IM_2 -#else -#define MN10300_CLI_LEVEL EPSW_IM_1 -#endif +#define MN10300_CLI_LEVEL (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT) #ifndef __ASSEMBLY__ @@ -64,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void) /* * we make sure arch_irq_enable() doesn't cause priority inversion */ -extern unsigned long __mn10300_irq_enabled_epsw; +extern unsigned long __mn10300_irq_enabled_epsw[]; static inline void arch_local_irq_enable(void) { unsigned long tmp; + int cpu = raw_smp_processor_id(); asm volatile( " mov epsw,%0 \n" @@ -76,8 +76,8 @@ static inline void arch_local_irq_enable(void) " or %2,%0 \n" " mov %0,epsw \n" : "=&d"(tmp) - : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) - : "memory"); + : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu]) + : "memory", "cc"); } static inline void arch_local_irq_restore(unsigned long flags) @@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags) static inline bool arch_irqs_disabled_flags(unsigned long flags) { - return (flags & EPSW_IM) <= MN10300_CLI_LEVEL; + return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7); } static inline bool arch_irqs_disabled(void) @@ -109,6 +109,9 @@ static inline bool arch_irqs_disabled(void) */ static inline void arch_safe_halt(void) { +#ifdef CONFIG_SMP + arch_local_irq_enable(); +#else asm volatile( " or %0,epsw \n" " nop \n" @@ -117,7 +120,97 @@ static inline void arch_safe_halt(void) : : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP) : "cc"); +#endif } +#define __sleep_cpu() \ +do { \ + asm volatile( \ + " bset %1,(%0)\n" \ + "1: btst %1,(%0)\n" \ + " bne 1b\n" \ + : \ + : "i"(&CPUM), "i"(CPUM_SLEEP) \ + : "cc" \ + ); \ +} while (0) + +static inline void arch_local_cli(void) +{ + asm volatile( + " and %0,epsw \n" + " nop \n" + " nop \n" + " nop \n" + : + : "i"(~EPSW_IE) + : "memory" + ); +} + +static inline unsigned long arch_local_cli_save(void) +{ + unsigned long flags = arch_local_save_flags(); + arch_local_cli(); + return flags; +} + +static inline void arch_local_sti(void) +{ + asm volatile( + " or %0,epsw \n" + : + : "i"(EPSW_IE) + : "memory"); +} + +static inline void arch_local_change_intr_mask_level(unsigned long level) +{ + asm volatile( + " and %0,epsw \n" + " or %1,epsw \n" + : + : "i"(~EPSW_IM), "i"(EPSW_IE | level) + : "cc", "memory"); +} + +#else /* !__ASSEMBLY__ */ + +#define LOCAL_SAVE_FLAGS(reg) \ + mov epsw,reg + +#define LOCAL_IRQ_DISABLE \ + and ~EPSW_IM,epsw; \ + or EPSW_IE|MN10300_CLI_LEVEL,epsw; \ + nop; \ + nop; \ + nop + +#define LOCAL_IRQ_ENABLE \ + or EPSW_IE|EPSW_IM_7,epsw + +#define LOCAL_IRQ_RESTORE(reg) \ + mov reg,epsw + +#define LOCAL_CLI_SAVE(reg) \ + mov epsw,reg; \ + and ~EPSW_IE,epsw; \ + nop; \ + nop; \ + nop + +#define LOCAL_CLI \ + and ~EPSW_IE,epsw; \ + nop; \ + nop; \ + nop + +#define LOCAL_STI \ + or EPSW_IE,epsw + +#define LOCAL_CHANGE_INTR_MASK_LEVEL(level) \ + and ~EPSW_IM,epsw; \ + or EPSW_IE|(level),epsw + #endif /* __ASSEMBLY__ */ #endif /* _ASM_IRQFLAGS_H */ |