diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2012-03-27 15:40:20 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2012-05-25 12:48:20 -0400 |
commit | 51007004f44c9588d70ffb77e1f52479bd5b0e37 (patch) | |
tree | ddf8dd2f83554ecbe9de0c690cfab3889308397b /arch/tile/include | |
parent | 76e10d158efb6d4516018846f60c2ab5501900bc (diff) | |
download | op-kernel-dev-51007004f44c9588d70ffb77e1f52479bd5b0e37.zip op-kernel-dev-51007004f44c9588d70ffb77e1f52479bd5b0e37.tar.gz |
arch/tile: use interrupt critical sections less
In general we want to avoid ever touching memory while within an
interrupt critical section, since the page fault path goes through
a different path from the hypervisor when in an interrupt critical
section, and we carefully decided with tilegx that we didn't need
to support this path in the kernel. (On tilepro we did implement
that path as part of supporting atomic instructions in software.)
In practice we always need to touch the kernel stack, since that's
where we store the interrupt state before releasing the critical
section, but this change cleans up a few things. The IRQ_ENABLE
macro is split up so that when we want to enable interrupts in a
deferred way (e.g. for cpu_idle or for interrupt return) we can
read the per-cpu enable mask before entering the critical section.
The cache-migration code is changed to use interrupt masking instead
of interrupt critical sections. And, the interrupt-entry code is
changed so that we defer loading "tp" from per-cpu data until after
we have released the interrupt critical section.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include')
-rw-r--r-- | arch/tile/include/asm/irqflags.h | 34 |
1 files changed, 26 insertions, 8 deletions
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 5db0ce5..b4e96fe 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -28,10 +28,10 @@ */ #if CHIP_HAS_AUX_PERF_COUNTERS() #define LINUX_MASKABLE_INTERRUPTS_HI \ - (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) + (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) #else #define LINUX_MASKABLE_INTERRUPTS_HI \ - (~(INT_MASK_HI(INT_PERF_COUNT))) + (~(INT_MASK_HI(INT_PERF_COUNT))) #endif #else @@ -90,6 +90,14 @@ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ } while (0) +#define interrupt_mask_save_mask() \ + (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \ + (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32)) +#define interrupt_mask_restore_mask(mask) do { \ + unsigned long long __m = (mask); \ + __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \ + __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \ +} while (0) #else #define interrupt_mask_set(n) \ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) @@ -101,6 +109,10 @@ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) #define interrupt_mask_reset_mask(mask) \ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) +#define interrupt_mask_save_mask() \ + __insn_mfspr(SPR_INTERRUPT_MASK_K) +#define interrupt_mask_restore_mask(mask) \ + __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask)) #endif /* @@ -122,7 +134,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); /* Disable all interrupts, including NMIs. */ #define arch_local_irq_disable_all() \ - interrupt_mask_set_mask(-1UL) + interrupt_mask_set_mask(-1ULL) /* Re-enable all maskable interrupts. */ #define arch_local_irq_enable() \ @@ -179,7 +191,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); #ifdef __tilegx__ #if INT_MEM_ERROR != 0 -# error Fix IRQ_DISABLED() macro +# error Fix IRQS_DISABLED() macro #endif /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ @@ -207,9 +219,10 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); mtspr SPR_INTERRUPT_MASK_SET_K, tmp /* Enable interrupts. */ -#define IRQ_ENABLE(tmp0, tmp1) \ +#define IRQ_ENABLE_LOAD(tmp0, tmp1) \ GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ - ld tmp0, tmp0; \ + ld tmp0, tmp0 +#define IRQ_ENABLE_APPLY(tmp0, tmp1) \ mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 #else /* !__tilegx__ */ @@ -253,17 +266,22 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp /* Enable interrupts. */ -#define IRQ_ENABLE(tmp0, tmp1) \ +#define IRQ_ENABLE_LOAD(tmp0, tmp1) \ GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ { \ lw tmp0, tmp0; \ addi tmp1, tmp0, 4 \ }; \ - lw tmp1, tmp1; \ + lw tmp1, tmp1 +#define IRQ_ENABLE_APPLY(tmp0, tmp1) \ mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 #endif +#define IRQ_ENABLE(tmp0, tmp1) \ + IRQ_ENABLE_LOAD(tmp0, tmp1); \ + IRQ_ENABLE_APPLY(tmp0, tmp1) + /* * Do the CPU's IRQ-state tracing from assembly code. We call a * C function, but almost everywhere we do, we don't mind clobbering |