summaryrefslogtreecommitdiffstats
path: root/sys/amd64/amd64
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2002-04-01 23:51:23 +0000
committerdillon <dillon@FreeBSD.org>2002-04-01 23:51:23 +0000
commit3ad295d41646d81ef12f3b5e99af833ef91f660f (patch)
treeec7d3de3f9fac7137b9779c10d8281315efa3647 /sys/amd64/amd64
parenta683bcc9224326e9d7bbf72dc76abaefe8a1d62b (diff)
downloadFreeBSD-src-3ad295d41646d81ef12f3b5e99af833ef91f660f.zip
FreeBSD-src-3ad295d41646d81ef12f3b5e99af833ef91f660f.tar.gz
Stage-2 commit of the critical*() code. This re-inlines cpu_critical_enter()
and cpu_critical_exit() and moves associated critical prototypes into their own header file, <arch>/<arch>/critical.h, which is only included by the three MI source files that need it. Backout and re-apply improperly comitted syntactical cleanups made to files that were still under active development. Backout improperly comitted program structure changes that moved localized declarations to the top of two procedures. Partially re-apply one of the program structure changes to move 'mask' into an intermediate block rather then in three separate sub-blocks to make the code more readable. Re-integrate bug fixes that Jake made to the sparc64 code. Note: In general, developers should not gratuitously move declarations out of sub-blocks. They are where they are for reasons of structure, grouping, readability, compiler-localizability, and to avoid developer-introduced bugs similar to several found in recent years in the VFS and VM code. Reviewed by: jake
Diffstat (limited to 'sys/amd64/amd64')
-rw-r--r--sys/amd64/amd64/apic_vector.S4
-rw-r--r--sys/amd64/amd64/critical.c93
2 files changed, 21 insertions, 76 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index f0e6497..34cc9f2 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -155,7 +155,7 @@ IDTVEC(vec_name) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
- call unpend ; \
+ call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
@@ -232,7 +232,7 @@ IDTVEC(vec_name) ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
- call unpend ; \
+ call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
diff --git a/sys/amd64/amd64/critical.c b/sys/amd64/amd64/critical.c
index 61f1a1f..4a983e4 100644
--- a/sys/amd64/amd64/critical.c
+++ b/sys/amd64/amd64/critical.c
@@ -15,6 +15,7 @@
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
+#include <machine/critical.h>
#ifdef SMP
#include <machine/privatespace.h>
@@ -30,7 +31,7 @@
#include <i386/isa/intr_machdep.h>
#endif
-void unpend(void); /* note: not static (called from assembly) */
+void i386_unpend(void); /* NOTE: not static, called from assembly */
/*
* Instrument our ability to run critical sections with interrupts
@@ -43,81 +44,23 @@ SYSCTL_INT(_debug, OID_AUTO, critical_mode,
CTLFLAG_RW, &critical_mode, 0, "");
/*
- * cpu_critical_enter:
- *
- * This routine is called from critical_enter() on the 0->1 transition
- * of td_critnest, prior to it being incremented to 1.
- *
- * If old-style critical section handling (critical_mode == 0), we
- * disable interrupts.
- *
- * If new-style critical section handling (criticla_mode != 0), we
- * do not have to do anything. However, as a side effect any
- * interrupts occuring while td_critnest is non-zero will be
- * deferred.
- */
-void
-cpu_critical_enter(void)
-{
- struct thread *td;
-
- if (critical_mode == 0) {
- td = curthread;
- td->td_md.md_savecrit = intr_disable();
- }
-}
-
-/*
- * cpu_critical_exit:
- *
- * This routine is called from critical_exit() on a 1->0 transition
- * of td_critnest, after it has been decremented to 0. We are
- * exiting the last critical section.
- *
- * If td_critnest is -1 this is the 'new' critical_enter()/exit()
- * code (the default critical_mode=1) and we do not have to do
- * anything unless PCPU_GET(int_pending) is non-zero.
- *
- * Note that the td->critnest (1->0) transition interrupt race against
- * our int_pending/unpend() check below is handled by the interrupt
- * code for us, so we do not have to do anything fancy.
- *
- * Otherwise td_critnest contains the saved hardware interrupt state
- * and will be restored. Since interrupts were hard-disabled there
- * will be no pending interrupts to dispatch (the 'original' code).
+ * cpu_unpend() - called from critical_exit() inline after quick
+ * interrupt-pending check.
*/
void
-cpu_critical_exit(void)
+cpu_unpend(void)
{
register_t eflags;
struct thread *td;
td = curthread;
- if (td->td_md.md_savecrit != (register_t)-1) {
- intr_restore(td->td_md.md_savecrit);
- td->td_md.md_savecrit = (register_t)-1;
- } else {
- /*
- * We may have to schedule pending interrupts. Create
- * conditions similar to an interrupt context and call
- * unpend().
- *
- * note: we do this even if we are in an interrupt
- * nesting level. Deep nesting is protected by
- * critical_*() and if we conditionalized it then we
- * would have to check int_pending again whenever
- * we decrement td_intr_nesting_level to 0.
- */
- if (PCPU_GET(int_pending)) {
- eflags = intr_disable();
- if (PCPU_GET(int_pending)) {
- ++td->td_intr_nesting_level;
- unpend();
- --td->td_intr_nesting_level;
- }
- intr_restore(eflags);
- }
+ eflags = intr_disable();
+ if (PCPU_GET(int_pending)) {
+ ++td->td_intr_nesting_level;
+ i386_unpend();
+ --td->td_intr_nesting_level;
}
+ intr_restore(eflags);
}
/*
@@ -147,24 +90,26 @@ cpu_thread_link(struct thread *td)
}
/*
- * Called from cpu_critical_exit() or called from the assembly vector code
+ * Called from cpu_unpend or called from the assembly vector code
* to process any interrupts which may have occured while we were in
* a critical section.
*
* - interrupts must be disabled
* - td_critnest must be 0
* - td_intr_nesting_level must be incremented by the caller
+ *
+ * NOT STATIC (called from assembly)
*/
void
-unpend(void)
+i386_unpend(void)
{
- int irq;
- u_int32_t mask;
-
KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0"));
KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1"));
curthread->td_critnest = 1;
for (;;) {
+ u_int32_t mask;
+ int irq;
+
/*
* Fast interrupts have priority
*/
@@ -207,7 +152,7 @@ unpend(void)
case 1: /* bit 1 - statclock */
mtx_lock_spin(&sched_lock);
statclock_process(curthread->td_kse,
- (register_t)unpend, 0);
+ (register_t)i386_unpend, 0);
mtx_unlock_spin(&sched_lock);
break;
}
OpenPOWER on IntegriCloud