summaryrefslogtreecommitdiffstats
path: root/sys/mips
diff options
context:
space:
mode:
authorjchandra <jchandra@FreeBSD.org>2011-10-18 16:37:28 +0000
committerjchandra <jchandra@FreeBSD.org>2011-10-18 16:37:28 +0000
commit51d69a1aea63d654d051703a8ef3bca3d6998b2a (patch)
tree70ca18ceb01dcca91bd650fa1a50cdaf0ed55e21 /sys/mips
parentc0da433b0a908c85fe0547ba8054795948e39d30 (diff)
downloadFreeBSD-src-51d69a1aea63d654d051703a8ef3bca3d6998b2a.zip
FreeBSD-src-51d69a1aea63d654d051703a8ef3bca3d6998b2a.tar.gz
Fix wakeup latency when sleeping with 'wait'
If we handle an interrupt just before the 'wait' and the interrupt schedules some work, we need to skip the 'wait' call. The simple solution of calling sched_runnable() with interrupts disabled immediately before wait still leaves a window after the call and before 'wait' in which the same issue can occur. The solution implemented is to check the EPC in the interrupt handler, and if it is in a region before the 'wait' call, to fix up the EPC to skip the wait call. Reported/analysed by: adrian Fix suggested by: kib Reviewed by: jmallett, imp
Diffstat (limited to 'sys/mips')
-rw-r--r--sys/mips/include/md_var.h1
-rw-r--r--sys/mips/mips/exception.S40
-rw-r--r--sys/mips/mips/machdep.c27
3 files changed, 51 insertions, 17 deletions
diff --git a/sys/mips/include/md_var.h b/sys/mips/include/md_var.h
index c2a6155..6f65a0f 100644
--- a/sys/mips/include/md_var.h
+++ b/sys/mips/include/md_var.h
@@ -56,6 +56,7 @@ void MipsSwitchFPState(struct thread *, struct trapframe *);
u_long kvtop(void *addr);
int is_cacheable_mem(vm_paddr_t addr);
void mips_generic_reset(void);
+void mips_wait(void);
#define MIPS_DEBUG 0
diff --git a/sys/mips/mips/exception.S b/sys/mips/mips/exception.S
index 729391e..8b7307c 100644
--- a/sys/mips/mips/exception.S
+++ b/sys/mips/mips/exception.S
@@ -557,6 +557,33 @@ NNON_LEAF(MipsUserGenException, CALLFRAME_SIZ, ra)
.set at
END(MipsUserGenException)
+ .set push
+ .set noat
+NON_LEAF(mips_wait, CALLFRAME_SIZ, ra)
+ PTR_SUBU sp, sp, CALLFRAME_SIZ
+ .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
+ REG_S ra, CALLFRAME_RA(sp) # save RA
+ mfc0 t0, MIPS_COP_0_STATUS
+ xori t1, t0, MIPS_SR_INT_IE
+ mtc0 t1, MIPS_COP_0_STATUS
+ COP0_SYNC
+ jal sched_runnable
+ nop
+ REG_L ra, CALLFRAME_RA(sp)
+ mfc0 t0, MIPS_COP_0_STATUS
+ ori t1, t0, MIPS_SR_INT_IE
+ .align 4
+GLOBAL(MipsWaitStart) # this is 16 byte aligned
+ mtc0 t1, MIPS_COP_0_STATUS
+ bnez v0, MipsWaitEnd
+ nop
+ wait
+GLOBAL(MipsWaitEnd) # MipsWaitStart + 16
+ jr ra
+ PTR_ADDU sp, sp, CALLFRAME_SIZ
+END(mips_wait)
+ .set pop
+
/*----------------------------------------------------------------------------
*
* MipsKernIntr --
@@ -578,6 +605,19 @@ NNON_LEAF(MipsKernIntr, KERN_EXC_FRAME_SIZE, ra)
.set noat
PTR_SUBU sp, sp, KERN_EXC_FRAME_SIZE
.mask 0x80000000, (CALLFRAME_RA - KERN_EXC_FRAME_SIZE)
+
+/*
+ * Check for getting interrupts just before wait
+ */
+ MFC0 k0, MIPS_COP_0_EXC_PC
+ ori k0, 0xf
+ xori k0, 0xf # 16 byte align
+ PTR_LA k1, MipsWaitStart
+ bne k0, k1, 1f
+ nop
+ PTR_ADDU k1, 16 # skip over wait
+ MTC0 k1, MIPS_COP_0_EXC_PC
+1:
/*
* Save CPU state, building 'frame'.
*/
diff --git a/sys/mips/mips/machdep.c b/sys/mips/mips/machdep.c
index e348e41..405d457 100644
--- a/sys/mips/mips/machdep.c
+++ b/sys/mips/mips/machdep.c
@@ -163,6 +163,9 @@ extern char MipsTLBMiss[], MipsTLBMissEnd[];
/* Cache error handler */
extern char MipsCache[], MipsCacheEnd[];
+/* MIPS wait skip region */
+extern char MipsWaitStart[], MipsWaitEnd[];
+
extern char edata[], end[];
#ifdef DDB
extern vm_offset_t ksym_start, ksym_end;
@@ -327,6 +330,12 @@ void
mips_vector_init(void)
{
/*
+ * Make sure that the Wait region logic is not been
+ * changed
+ */
+ if (MipsWaitEnd - MipsWaitStart != 16)
+ panic("startup: MIPS wait region not correct");
+ /*
* Copy down exception vector code.
*/
if (MipsTLBMissEnd - MipsTLBMiss > 0x80)
@@ -485,24 +494,9 @@ spinlock_exit(void)
/*
* call platform specific code to halt (until next interrupt) for the idle loop
*/
-/*
- * This is disabled because of three issues:
- *
- * + By calling critical_enter(), any interrupt which occurs after that but
- * before the wait instruction will be handled but not serviced (in the case
- * of a netisr) because preemption is not allowed at this point;
- * + Any fast interrupt handler which schedules an immediate or fast callout
- * will not occur until the wait instruction is interrupted, as the clock
- * has already been set by cpu_idleclock();
- * + There is currently no known way to atomically enable interrupts and call
- * wait, which is how the i386/amd64 code gets around (1). Thus even if
- * interrupts were disabled and reenabled just before the wait call, any
- * interrupt that did occur may not interrupt wait.
- */
void
cpu_idle(int busy)
{
-#if 0
KASSERT((mips_rd_status() & MIPS_SR_INT_IE) != 0,
("interrupts disabled in idle process."));
KASSERT((mips_rd_status() & MIPS_INT_MASK) != 0,
@@ -512,12 +506,11 @@ cpu_idle(int busy)
critical_enter();
cpu_idleclock();
}
- __asm __volatile ("wait");
+ mips_wait();
if (!busy) {
cpu_activeclock();
critical_exit();
}
-#endif
}
int
OpenPOWER on IntegriCloud