summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/Kconfig13
-rw-r--r--arch/mips/kernel/entry.S2
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--include/asm-mips/irq.h2
4 files changed, 18 insertions, 1 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a00fabe..49f02e3 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1404,6 +1404,19 @@ config MIPS_MT_SMTC_INSTANT_REPLAY
it off), but ensures that IPIs are handled promptly even under
heavy I/O interrupt load.
+config MIPS_MT_SMTC_IM_BACKSTOP
+ bool "Use per-TC register bits as backstop for inhibited IM bits"
+ depends on MIPS_MT_SMTC
+ default y
+ help
+ To support multiple TC microthreads acting as "CPUs" within
+ a VPE, VPE-wide interrupt mask bits must be specially manipulated
+ during interrupt handling. To support legacy drivers and interrupt
+ controller management code, SMTC has a "backstop" to track and
+ if necessary restore the interrupt mask. This has some performance
+ impact on interrupt service overhead. Disable it only if you know
+ what you are doing.
+
config MIPS_VPE_LOADER_TOM
bool "Load VPE program into memory hidden from linux"
depends on MIPS_VPE_LOADER
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 686249c..e29598a 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -84,6 +84,7 @@ FEXPORT(restore_all) # restore full frame
LONG_S sp, TI_REGS($28)
jal deferred_smtc_ipi
LONG_S s0, TI_REGS($28)
+#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
mfc0 v0, CP0_TCSTATUS
ori v1, v0, TCSTATUS_IXMT
@@ -110,6 +111,7 @@ FEXPORT(restore_all) # restore full frame
_ehb
xor t0, t0, t3
mtc0 t0, CP0_TCCONTEXT
+#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
#endif /* CONFIG_MIPS_MT_SMTC */
.set noat
RESTORE_TEMP
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 297bd56..c0f19d6 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -243,9 +243,11 @@ NESTED(except_vec_vi_handler, 0, sp)
*/
mfc0 t1, CP0_STATUS
and t0, a0, t1
+#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
mfc0 t2, CP0_TCCONTEXT
or t0, t0, t2
mtc0 t0, CP0_TCCONTEXT
+#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
xor t1, t1, t0
mtc0 t1, CP0_STATUS
_ehb
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 3ca6a07..97102eb 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -24,7 +24,7 @@ static inline int irq_canonicalize(int irq)
#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
#endif
-#ifdef CONFIG_MIPS_MT_SMTC
+#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
/*
* Clear interrupt mask handling "backstop" if irq_hwmask
* entry so indicates. This implies that the ack() or end()
OpenPOWER on IntegriCloud