summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/alpha/critical.c19
-rw-r--r--sys/alpha/include/cpufunc.h5
-rw-r--r--sys/alpha/include/critical.h72
-rw-r--r--sys/amd64/amd64/apic_vector.S4
-rw-r--r--sys/amd64/amd64/critical.c93
-rw-r--r--sys/amd64/include/cpufunc.h4
-rw-r--r--sys/amd64/include/critical.h111
-rw-r--r--sys/amd64/isa/atpic_vector.S4
-rw-r--r--sys/amd64/isa/icu_vector.S4
-rw-r--r--sys/amd64/isa/icu_vector.s4
-rw-r--r--sys/i386/i386/apic_vector.s4
-rw-r--r--sys/i386/i386/critical.c93
-rw-r--r--sys/i386/include/cpufunc.h4
-rw-r--r--sys/i386/include/critical.h111
-rw-r--r--sys/i386/isa/apic_vector.s4
-rw-r--r--sys/i386/isa/atpic_vector.s4
-rw-r--r--sys/i386/isa/icu_vector.s4
-rw-r--r--sys/ia64/ia64/critical.c19
-rw-r--r--sys/ia64/include/cpufunc.h5
-rw-r--r--sys/ia64/include/critical.h73
-rw-r--r--sys/kern/kern_fork.c1
-rw-r--r--sys/kern/kern_proc.c1
-rw-r--r--sys/kern/kern_switch.c1
-rw-r--r--sys/powerpc/include/cpufunc.h6
-rw-r--r--sys/powerpc/include/critical.h76
-rw-r--r--sys/powerpc/powerpc/critical.c20
-rw-r--r--sys/sparc64/include/cpufunc.h5
-rw-r--r--sys/sparc64/include/critical.h75
-rw-r--r--sys/sparc64/sparc64/critical.c21
29 files changed, 577 insertions, 270 deletions
diff --git a/sys/alpha/alpha/critical.c b/sys/alpha/alpha/critical.c
index 0bf9e9b..c7e705d 100644
--- a/sys/alpha/alpha/critical.c
+++ b/sys/alpha/alpha/critical.c
@@ -18,24 +18,7 @@
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
-
-void
-cpu_critical_enter(void)
-{
- struct thread *td;
-
- td = curthread;
- td->td_md.md_savecrit = intr_disable();
-}
-
-void
-cpu_critical_exit(void)
-{
- struct thread *td;
-
- td = curthread;
- intr_restore(td->td_md.md_savecrit);
-}
+#include <machine/critical.h>
/*
* cpu_critical_fork_exit() - cleanup after fork
diff --git a/sys/alpha/include/cpufunc.h b/sys/alpha/include/cpufunc.h
index 95d7e10..05bdbd4 100644
--- a/sys/alpha/include/cpufunc.h
+++ b/sys/alpha/include/cpufunc.h
@@ -59,11 +59,6 @@ intr_restore(register_t ipl)
alpha_pal_swpipl(ipl);
}
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
-void cpu_critical_fork_exit(void);
-void cpu_thread_link(struct thread *td);
-
#endif /* _KERNEL */
#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/alpha/include/critical.h b/sys/alpha/include/critical.h
new file mode 100644
index 0000000..dc5119c
--- /dev/null
+++ b/sys/alpha/include/critical.h
@@ -0,0 +1,72 @@
+/*-
+ * Copyright (c) 2002 Matthew Dillon. This code is distributed under
+ * the BSD copyright, /usr/src/COPYRIGHT.
+ *
+ * This file contains prototypes and high-level inlines related to
+ * machine-level critical function support:
+ *
+ * cpu_critical_enter() - inlined
+ * cpu_critical_exit() - inlined
+ * cpu_critical_fork_exit() - prototyped
+ * cpu_thread_link() - prototyped
+ * related support functions residing
+ * in <arch>/<arch>/critical.c - prototyped
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CRITICAL_H_
+#define _MACHINE_CRITICAL_H_
+
+__BEGIN_DECLS
+
+/*
+ * Prototypes - see <arch>/<arch>/critical.c
+ */
+void cpu_critical_fork_exit(void);
+void cpu_thread_link(struct thread *td);
+
+#ifdef __GNUC__
+
+/*
+ * cpu_critical_enter:
+ *
+ * This routine is called from critical_enter() on the 0->1 transition
+ * of td_critnest, prior to it being incremented to 1.
+ */
+static __inline void
+cpu_critical_enter(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ td->td_md.md_savecrit = intr_disable();
+}
+
+/*
+ * cpu_critical_exit:
+ *
+ * This routine is called from critical_exit() on a 1->0 transition
+ * of td_critnest, after it has been decremented to 0. We are
+ * exiting the last critical section.
+ */
+static __inline void
+cpu_critical_exit(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ intr_restore(td->td_md.md_savecrit);
+}
+
+#else /* !__GNUC__ */
+
+void cpu_critical_enter(void)
+void cpu_critical_exit(void)
+
+#endif /* __GNUC__ */
+
+__END_DECLS
+
+#endif /* !_MACHINE_CRITICAL_H_ */
+
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index f0e6497..34cc9f2 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -155,7 +155,7 @@ IDTVEC(vec_name) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
- call unpend ; \
+ call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
@@ -232,7 +232,7 @@ IDTVEC(vec_name) ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
- call unpend ; \
+ call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
diff --git a/sys/amd64/amd64/critical.c b/sys/amd64/amd64/critical.c
index 61f1a1f..4a983e4 100644
--- a/sys/amd64/amd64/critical.c
+++ b/sys/amd64/amd64/critical.c
@@ -15,6 +15,7 @@
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
+#include <machine/critical.h>
#ifdef SMP
#include <machine/privatespace.h>
@@ -30,7 +31,7 @@
#include <i386/isa/intr_machdep.h>
#endif
-void unpend(void); /* note: not static (called from assembly) */
+void i386_unpend(void); /* NOTE: not static, called from assembly */
/*
* Instrument our ability to run critical sections with interrupts
@@ -43,81 +44,23 @@ SYSCTL_INT(_debug, OID_AUTO, critical_mode,
CTLFLAG_RW, &critical_mode, 0, "");
/*
- * cpu_critical_enter:
- *
- * This routine is called from critical_enter() on the 0->1 transition
- * of td_critnest, prior to it being incremented to 1.
- *
- * If old-style critical section handling (critical_mode == 0), we
- * disable interrupts.
- *
- * If new-style critical section handling (criticla_mode != 0), we
- * do not have to do anything. However, as a side effect any
- * interrupts occuring while td_critnest is non-zero will be
- * deferred.
- */
-void
-cpu_critical_enter(void)
-{
- struct thread *td;
-
- if (critical_mode == 0) {
- td = curthread;
- td->td_md.md_savecrit = intr_disable();
- }
-}
-
-/*
- * cpu_critical_exit:
- *
- * This routine is called from critical_exit() on a 1->0 transition
- * of td_critnest, after it has been decremented to 0. We are
- * exiting the last critical section.
- *
- * If td_critnest is -1 this is the 'new' critical_enter()/exit()
- * code (the default critical_mode=1) and we do not have to do
- * anything unless PCPU_GET(int_pending) is non-zero.
- *
- * Note that the td->critnest (1->0) transition interrupt race against
- * our int_pending/unpend() check below is handled by the interrupt
- * code for us, so we do not have to do anything fancy.
- *
- * Otherwise td_critnest contains the saved hardware interrupt state
- * and will be restored. Since interrupts were hard-disabled there
- * will be no pending interrupts to dispatch (the 'original' code).
+ * cpu_unpend() - called from critical_exit() inline after quick
+ * interrupt-pending check.
*/
void
-cpu_critical_exit(void)
+cpu_unpend(void)
{
register_t eflags;
struct thread *td;
td = curthread;
- if (td->td_md.md_savecrit != (register_t)-1) {
- intr_restore(td->td_md.md_savecrit);
- td->td_md.md_savecrit = (register_t)-1;
- } else {
- /*
- * We may have to schedule pending interrupts. Create
- * conditions similar to an interrupt context and call
- * unpend().
- *
- * note: we do this even if we are in an interrupt
- * nesting level. Deep nesting is protected by
- * critical_*() and if we conditionalized it then we
- * would have to check int_pending again whenever
- * we decrement td_intr_nesting_level to 0.
- */
- if (PCPU_GET(int_pending)) {
- eflags = intr_disable();
- if (PCPU_GET(int_pending)) {
- ++td->td_intr_nesting_level;
- unpend();
- --td->td_intr_nesting_level;
- }
- intr_restore(eflags);
- }
+ eflags = intr_disable();
+ if (PCPU_GET(int_pending)) {
+ ++td->td_intr_nesting_level;
+ i386_unpend();
+ --td->td_intr_nesting_level;
}
+ intr_restore(eflags);
}
/*
@@ -147,24 +90,26 @@ cpu_thread_link(struct thread *td)
}
/*
- * Called from cpu_critical_exit() or called from the assembly vector code
+ * Called from cpu_unpend or called from the assembly vector code
* to process any interrupts which may have occured while we were in
* a critical section.
*
* - interrupts must be disabled
* - td_critnest must be 0
* - td_intr_nesting_level must be incremented by the caller
+ *
+ * NOT STATIC (called from assembly)
*/
void
-unpend(void)
+i386_unpend(void)
{
- int irq;
- u_int32_t mask;
-
KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0"));
KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1"));
curthread->td_critnest = 1;
for (;;) {
+ u_int32_t mask;
+ int irq;
+
/*
* Fast interrupts have priority
*/
@@ -207,7 +152,7 @@ unpend(void)
case 1: /* bit 1 - statclock */
mtx_lock_spin(&sched_lock);
statclock_process(curthread->td_kse,
- (register_t)unpend, 0);
+ (register_t)i386_unpend, 0);
mtx_unlock_spin(&sched_lock);
break;
}
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index 2b0abcc..d57839a 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -624,10 +624,6 @@ u_int rcr0(void);
u_int rcr3(void);
u_int rcr4(void);
void reset_dbregs(void);
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
-void cpu_critical_fork_exit(void);
-void cpu_thread_link(struct thread *td);
__END_DECLS
diff --git a/sys/amd64/include/critical.h b/sys/amd64/include/critical.h
new file mode 100644
index 0000000..6d14292
--- /dev/null
+++ b/sys/amd64/include/critical.h
@@ -0,0 +1,111 @@
+/*-
+ * Copyright (c) 2002 Matthew Dillon. This code is distributed under
+ * the BSD copyright, /usr/src/COPYRIGHT.
+ *
+ * This file contains prototypes and high-level inlines related to
+ * machine-level critical function support:
+ *
+ * cpu_critical_enter() - inlined
+ * cpu_critical_exit() - inlined
+ * cpu_critical_fork_exit() - prototyped
+ * cpu_thread_link() - prototyped
+ * related support functions residing
+ * in <arch>/<arch>/critical.c - prototyped
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CRITICAL_H_
+#define _MACHINE_CRITICAL_H_
+
+__BEGIN_DECLS
+
+extern int critical_mode;
+
+/*
+ * Prototypes - see <arch>/<arch>/critical.c
+ */
+void cpu_unpend(void);
+void cpu_critical_fork_exit(void);
+void cpu_thread_link(struct thread *td);
+
+#ifdef __GNUC__
+
+/*
+ * cpu_critical_enter:
+ *
+ * This routine is called from critical_enter() on the 0->1 transition
+ * of td_critnest, prior to it being incremented to 1.
+ *
+ * If old-style critical section handling (critical_mode == 0), we
+ * disable interrupts.
+ *
+ * If new-style critical section handling (criticla_mode != 0), we
+ * do not have to do anything. However, as a side effect any
+ * interrupts occuring while td_critnest is non-zero will be
+ * deferred.
+ */
+static __inline void
+cpu_critical_enter(void)
+{
+ if (critical_mode == 0) {
+ struct thread *td = curthread;
+ td->td_md.md_savecrit = intr_disable();
+ }
+}
+
+/*
+ * cpu_critical_exit:
+ *
+ * This routine is called from critical_exit() on a 1->0 transition
+ * of td_critnest, after it has been decremented to 0. We are
+ * exiting the last critical section.
+ *
+ * If td_critnest is -1 this is the 'new' critical_enter()/exit()
+ * code (the default critical_mode=1) and we do not have to do
+ * anything unless PCPU_GET(int_pending) is non-zero.
+ *
+ * Note that the td->critnest (1->0) transition interrupt race against
+ * our int_pending/unpend() check below is handled by the interrupt
+ * code for us, so we do not have to do anything fancy.
+ *
+ * Otherwise td_critnest contains the saved hardware interrupt state
+ * and will be restored. Since interrupts were hard-disabled there
+ * will be no pending interrupts to dispatch (the 'original' code).
+ */
+static __inline void
+cpu_critical_exit(void)
+{
+ struct thread *td = curthread;
+
+ if (td->td_md.md_savecrit != (register_t)-1) {
+ intr_restore(td->td_md.md_savecrit);
+ td->td_md.md_savecrit = (register_t)-1;
+ } else {
+ /*
+ * We may have to schedule pending interrupts. Create
+ * conditions similar to an interrupt context and call
+ * unpend().
+ *
+ * note: we do this even if we are in an interrupt
+ * nesting level. Deep nesting is protected by
+ * critical_*() and if we conditionalized it then we
+ * would have to check int_pending again whenever
+ * we decrement td_intr_nesting_level to 0.
+ */
+ if (PCPU_GET(int_pending))
+ cpu_unpend();
+ }
+}
+
+#else /* !__GNUC__ */
+
+void cpu_critical_enter(void)
+void cpu_critical_exit(void)
+
+#endif /* __GNUC__ */
+
+__END_DECLS
+
+#endif /* !_MACHINE_CRITICAL_H_ */
+
diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S
index be0e105..de369c1 100644
--- a/sys/amd64/isa/atpic_vector.S
+++ b/sys/amd64/isa/atpic_vector.S
@@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
- call unpend ; \
+ call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
@@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
- call unpend ; \
+ call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S
index be0e105..de369c1 100644
--- a/sys/amd64/isa/icu_vector.S
+++ b/sys/amd64/isa/icu_vector.S
@@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
- call unpend ; \
+ call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
@@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
- call unpend ; \
+ call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s
index be0e105..de369c1 100644
--- a/sys/amd64/isa/icu_vector.s
+++ b/sys/amd64/isa/icu_vector.s
@@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
- call unpend ; \
+ call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
@@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
- call unpend ; \
+ call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s
index f0e6497..34cc9f2 100644
--- a/sys/i386/i386/apic_vector.s
+++ b/sys/i386/i386/apic_vector.s
@@ -155,7 +155,7 @@ IDTVEC(vec_name) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
- call unpend ; \
+ call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
@@ -232,7 +232,7 @@ IDTVEC(vec_name) ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
- call unpend ; \
+ call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
diff --git a/sys/i386/i386/critical.c b/sys/i386/i386/critical.c
index 61f1a1f..4a983e4 100644
--- a/sys/i386/i386/critical.c
+++ b/sys/i386/i386/critical.c
@@ -15,6 +15,7 @@
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
+#include <machine/critical.h>
#ifdef SMP
#include <machine/privatespace.h>
@@ -30,7 +31,7 @@
#include <i386/isa/intr_machdep.h>
#endif
-void unpend(void); /* note: not static (called from assembly) */
+void i386_unpend(void); /* NOTE: not static, called from assembly */
/*
* Instrument our ability to run critical sections with interrupts
@@ -43,81 +44,23 @@ SYSCTL_INT(_debug, OID_AUTO, critical_mode,
CTLFLAG_RW, &critical_mode, 0, "");
/*
- * cpu_critical_enter:
- *
- * This routine is called from critical_enter() on the 0->1 transition
- * of td_critnest, prior to it being incremented to 1.
- *
- * If old-style critical section handling (critical_mode == 0), we
- * disable interrupts.
- *
- * If new-style critical section handling (criticla_mode != 0), we
- * do not have to do anything. However, as a side effect any
- * interrupts occuring while td_critnest is non-zero will be
- * deferred.
- */
-void
-cpu_critical_enter(void)
-{
- struct thread *td;
-
- if (critical_mode == 0) {
- td = curthread;
- td->td_md.md_savecrit = intr_disable();
- }
-}
-
-/*
- * cpu_critical_exit:
- *
- * This routine is called from critical_exit() on a 1->0 transition
- * of td_critnest, after it has been decremented to 0. We are
- * exiting the last critical section.
- *
- * If td_critnest is -1 this is the 'new' critical_enter()/exit()
- * code (the default critical_mode=1) and we do not have to do
- * anything unless PCPU_GET(int_pending) is non-zero.
- *
- * Note that the td->critnest (1->0) transition interrupt race against
- * our int_pending/unpend() check below is handled by the interrupt
- * code for us, so we do not have to do anything fancy.
- *
- * Otherwise td_critnest contains the saved hardware interrupt state
- * and will be restored. Since interrupts were hard-disabled there
- * will be no pending interrupts to dispatch (the 'original' code).
+ * cpu_unpend() - called from critical_exit() inline after quick
+ * interrupt-pending check.
*/
void
-cpu_critical_exit(void)
+cpu_unpend(void)
{
register_t eflags;
struct thread *td;
td = curthread;
- if (td->td_md.md_savecrit != (register_t)-1) {
- intr_restore(td->td_md.md_savecrit);
- td->td_md.md_savecrit = (register_t)-1;
- } else {
- /*
- * We may have to schedule pending interrupts. Create
- * conditions similar to an interrupt context and call
- * unpend().
- *
- * note: we do this even if we are in an interrupt
- * nesting level. Deep nesting is protected by
- * critical_*() and if we conditionalized it then we
- * would have to check int_pending again whenever
- * we decrement td_intr_nesting_level to 0.
- */
- if (PCPU_GET(int_pending)) {
- eflags = intr_disable();
- if (PCPU_GET(int_pending)) {
- ++td->td_intr_nesting_level;
- unpend();
- --td->td_intr_nesting_level;
- }
- intr_restore(eflags);
- }
+ eflags = intr_disable();
+ if (PCPU_GET(int_pending)) {
+ ++td->td_intr_nesting_level;
+ i386_unpend();
+ --td->td_intr_nesting_level;
}
+ intr_restore(eflags);
}
/*
@@ -147,24 +90,26 @@ cpu_thread_link(struct thread *td)
}
/*
- * Called from cpu_critical_exit() or called from the assembly vector code
+ * Called from cpu_unpend or called from the assembly vector code
* to process any interrupts which may have occured while we were in
* a critical section.
*
* - interrupts must be disabled
* - td_critnest must be 0
* - td_intr_nesting_level must be incremented by the caller
+ *
+ * NOT STATIC (called from assembly)
*/
void
-unpend(void)
+i386_unpend(void)
{
- int irq;
- u_int32_t mask;
-
KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0"));
KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1"));
curthread->td_critnest = 1;
for (;;) {
+ u_int32_t mask;
+ int irq;
+
/*
* Fast interrupts have priority
*/
@@ -207,7 +152,7 @@ unpend(void)
case 1: /* bit 1 - statclock */
mtx_lock_spin(&sched_lock);
statclock_process(curthread->td_kse,
- (register_t)unpend, 0);
+ (register_t)i386_unpend, 0);
mtx_unlock_spin(&sched_lock);
break;
}
diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h
index 2b0abcc..d57839a 100644
--- a/sys/i386/include/cpufunc.h
+++ b/sys/i386/include/cpufunc.h
@@ -624,10 +624,6 @@ u_int rcr0(void);
u_int rcr3(void);
u_int rcr4(void);
void reset_dbregs(void);
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
-void cpu_critical_fork_exit(void);
-void cpu_thread_link(struct thread *td);
__END_DECLS
diff --git a/sys/i386/include/critical.h b/sys/i386/include/critical.h
new file mode 100644
index 0000000..6d14292
--- /dev/null
+++ b/sys/i386/include/critical.h
@@ -0,0 +1,111 @@
+/*-
+ * Copyright (c) 2002 Matthew Dillon. This code is distributed under
+ * the BSD copyright, /usr/src/COPYRIGHT.
+ *
+ * This file contains prototypes and high-level inlines related to
+ * machine-level critical function support:
+ *
+ * cpu_critical_enter() - inlined
+ * cpu_critical_exit() - inlined
+ * cpu_critical_fork_exit() - prototyped
+ * cpu_thread_link() - prototyped
+ * related support functions residing
+ * in <arch>/<arch>/critical.c - prototyped
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CRITICAL_H_
+#define _MACHINE_CRITICAL_H_
+
+__BEGIN_DECLS
+
+extern int critical_mode;
+
+/*
+ * Prototypes - see <arch>/<arch>/critical.c
+ */
+void cpu_unpend(void);
+void cpu_critical_fork_exit(void);
+void cpu_thread_link(struct thread *td);
+
+#ifdef __GNUC__
+
+/*
+ * cpu_critical_enter:
+ *
+ * This routine is called from critical_enter() on the 0->1 transition
+ * of td_critnest, prior to it being incremented to 1.
+ *
+ * If old-style critical section handling (critical_mode == 0), we
+ * disable interrupts.
+ *
+ * If new-style critical section handling (criticla_mode != 0), we
+ * do not have to do anything. However, as a side effect any
+ * interrupts occuring while td_critnest is non-zero will be
+ * deferred.
+ */
+static __inline void
+cpu_critical_enter(void)
+{
+ if (critical_mode == 0) {
+ struct thread *td = curthread;
+ td->td_md.md_savecrit = intr_disable();
+ }
+}
+
+/*
+ * cpu_critical_exit:
+ *
+ * This routine is called from critical_exit() on a 1->0 transition
+ * of td_critnest, after it has been decremented to 0. We are
+ * exiting the last critical section.
+ *
+ * If td_critnest is -1 this is the 'new' critical_enter()/exit()
+ * code (the default critical_mode=1) and we do not have to do
+ * anything unless PCPU_GET(int_pending) is non-zero.
+ *
+ * Note that the td->critnest (1->0) transition interrupt race against
+ * our int_pending/unpend() check below is handled by the interrupt
+ * code for us, so we do not have to do anything fancy.
+ *
+ * Otherwise td_critnest contains the saved hardware interrupt state
+ * and will be restored. Since interrupts were hard-disabled there
+ * will be no pending interrupts to dispatch (the 'original' code).
+ */
+static __inline void
+cpu_critical_exit(void)
+{
+ struct thread *td = curthread;
+
+ if (td->td_md.md_savecrit != (register_t)-1) {
+ intr_restore(td->td_md.md_savecrit);
+ td->td_md.md_savecrit = (register_t)-1;
+ } else {
+ /*
+ * We may have to schedule pending interrupts. Create
+ * conditions similar to an interrupt context and call
+ * unpend().
+ *
+ * note: we do this even if we are in an interrupt
+ * nesting level. Deep nesting is protected by
+ * critical_*() and if we conditionalized it then we
+ * would have to check int_pending again whenever
+ * we decrement td_intr_nesting_level to 0.
+ */
+ if (PCPU_GET(int_pending))
+ cpu_unpend();
+ }
+}
+
+#else /* !__GNUC__ */
+
+void cpu_critical_enter(void)
+void cpu_critical_exit(void)
+
+#endif /* __GNUC__ */
+
+__END_DECLS
+
+#endif /* !_MACHINE_CRITICAL_H_ */
+
diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s
index f0e6497..34cc9f2 100644
--- a/sys/i386/isa/apic_vector.s
+++ b/sys/i386/isa/apic_vector.s
@@ -155,7 +155,7 @@ IDTVEC(vec_name) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
- call unpend ; \
+ call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
@@ -232,7 +232,7 @@ IDTVEC(vec_name) ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
- call unpend ; \
+ call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
diff --git a/sys/i386/isa/atpic_vector.s b/sys/i386/isa/atpic_vector.s
index be0e105..de369c1 100644
--- a/sys/i386/isa/atpic_vector.s
+++ b/sys/i386/isa/atpic_vector.s
@@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
- call unpend ; \
+ call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
@@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
- call unpend ; \
+ call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
diff --git a/sys/i386/isa/icu_vector.s b/sys/i386/isa/icu_vector.s
index be0e105..de369c1 100644
--- a/sys/i386/isa/icu_vector.s
+++ b/sys/i386/isa/icu_vector.s
@@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
- call unpend ; \
+ call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
@@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
- call unpend ; \
+ call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
diff --git a/sys/ia64/ia64/critical.c b/sys/ia64/ia64/critical.c
index 1698c03..5272b80 100644
--- a/sys/ia64/ia64/critical.c
+++ b/sys/ia64/ia64/critical.c
@@ -18,24 +18,7 @@
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
-
-void
-cpu_critical_enter(void)
-{
- struct thread *td;
-
- td = curthread;
- td->td_md.md_savecrit = intr_disable();
-}
-
-void
-cpu_critical_exit(void)
-{
- struct thread *td;
-
- td = curthread;
- intr_restore(td->td_md.md_savecrit);
-}
+#include <machine/critical.h>
/*
* cpu_critical_fork_exit() - cleanup after fork
diff --git a/sys/ia64/include/cpufunc.h b/sys/ia64/include/cpufunc.h
index e7cf818..5569ab0 100644
--- a/sys/ia64/include/cpufunc.h
+++ b/sys/ia64/include/cpufunc.h
@@ -300,11 +300,6 @@ intr_restore(critical_t psr)
__asm __volatile ("mov psr.l=%0;; srlz.d" :: "r" (psr));
}
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
-void cpu_critical_fork_exit(void);
-void cpu_thread_link(struct thread *td);
-
#endif /* _KERNEL */
#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/ia64/include/critical.h b/sys/ia64/include/critical.h
new file mode 100644
index 0000000..265edab
--- /dev/null
+++ b/sys/ia64/include/critical.h
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 2002 Matthew Dillon. This code is distributed under
+ * the BSD copyright, /usr/src/COPYRIGHT.
+ *
+ * This file contains prototypes and high-level inlines related to
+ * machine-level critical function support:
+ *
+ * cpu_critical_enter() - inlined
+ * cpu_critical_exit() - inlined
+ * cpu_critical_fork_exit() - prototyped
+ * cpu_thread_link() - prototyped
+ * related support functions residing
+ * in <arch>/<arch>/critical.c - prototyped
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CRITICAL_H_
+#define _MACHINE_CRITICAL_H_
+
+__BEGIN_DECLS
+
+/*
+ * Prototypes - see <arch>/<arch>/critical.c
+ */
+void cpu_critical_fork_exit(void);
+void cpu_thread_link(struct thread *td);
+
+#ifdef __GNUC__
+
+/*
+ * cpu_critical_enter:
+ *
+ * This routine is called from critical_enter() on the 0->1 transition
+ * of td_critnest, prior to it being incremented to 1.
+ */
+static __inline void
+cpu_critical_enter(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ td->td_md.md_savecrit = intr_disable();
+}
+
+/*
+ * cpu_critical_exit:
+ *
+ * This routine is called from critical_exit() on a 1->0 transition
+ * of td_critnest, after it has been decremented to 0. We are
+ * exiting the last critical section.
+ */
+static __inline void
+cpu_critical_exit(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ intr_restore(td->td_md.md_savecrit);
+}
+
+
+#else /* !__GNUC__ */
+
+void cpu_critical_enter(void)
+void cpu_critical_exit(void)
+
+#endif /* __GNUC__ */
+
+__END_DECLS
+
+#endif /* !_MACHINE_CRITICAL_H_ */
+
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index ddd962a..8782007 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -70,6 +70,7 @@
#include <sys/vmmeter.h>
#include <sys/user.h>
+#include <machine/critical.h>
static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index a1620bc..7e62eff 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -54,6 +54,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/uma.h>
+#include <machine/critical.h>
MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
MALLOC_DEFINE(M_SESSION, "session", "session header");
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index ccfb114..1b33f28 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -34,6 +34,7 @@
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/queue.h>
+#include <machine/critical.h>
/*
* Global run queue.
diff --git a/sys/powerpc/include/cpufunc.h b/sys/powerpc/include/cpufunc.h
index b788aa6..429e83b 100644
--- a/sys/powerpc/include/cpufunc.h
+++ b/sys/powerpc/include/cpufunc.h
@@ -132,12 +132,6 @@ powerpc_get_pcpup(void)
return(ret);
}
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
-void cpu_critical_fork_exit(void);
-void cpu_thread_link(struct thread *td);
-
-
#endif /* _KERNEL */
#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/powerpc/include/critical.h b/sys/powerpc/include/critical.h
new file mode 100644
index 0000000..08184bd
--- /dev/null
+++ b/sys/powerpc/include/critical.h
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 2002 Matthew Dillon. This code is distributed under
+ * the BSD copyright, /usr/src/COPYRIGHT.
+ *
+ * This file contains prototypes and high-level inlines related to
+ * machine-level critical function support:
+ *
+ * cpu_critical_enter() - inlined
+ * cpu_critical_exit() - inlined
+ * cpu_critical_fork_exit() - prototyped
+ * cpu_thread_link() - prototyped
+ * related support functions residing
+ * in <arch>/<arch>/critical.c - prototyped
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CRITICAL_H_
+#define _MACHINE_CRITICAL_H_
+
+__BEGIN_DECLS
+
+/*
+ * Prototypes - see <arch>/<arch>/critical.c
+ */
+void cpu_critical_fork_exit(void);
+void cpu_thread_link(struct thread *td);
+
+#ifdef __GNUC__
+
+/*
+ * cpu_critical_enter:
+ *
+ * This routine is called from critical_enter() on the 0->1 transition
+ * of td_critnest, prior to it being incremented to 1.
+ */
+
+static __inline void
+cpu_critical_enter(void)
+{
+ u_int msr;
+ struct thread *td = curthread;
+
+ msr = mfmsr();
+ td->td_md.md_savecrit = msr;
+ msr &= ~(PSL_EE | PSL_RI);
+ mtmsr(msr);
+}
+
+/*
+ * cpu_critical_exit:
+ *
+ * This routine is called from critical_exit() on a 1->0 transition
+ * of td_critnest, after it has been decremented to 0. We are
+ * exiting the last critical section.
+ */
+static __inline void
+cpu_critical_exit(void)
+{
+ struct thread *td = curthread;
+
+ mtmsr(td->td_md.md_savecrit);
+}
+
+
+#else /* !__GNUC__ */
+
+void cpu_critical_enter(void)
+void cpu_critical_exit(void)
+
+#endif /* __GNUC__ */
+
+__END_DECLS
+
+#endif /* !_MACHINE_CRITICAL_H_ */
+
diff --git a/sys/powerpc/powerpc/critical.c b/sys/powerpc/powerpc/critical.c
index cb80062..873498c 100644
--- a/sys/powerpc/powerpc/critical.c
+++ b/sys/powerpc/powerpc/critical.c
@@ -19,26 +19,6 @@
#include <sys/sysctl.h>
#include <sys/ucontext.h>
-void
-cpu_critical_enter(void)
-{
- u_int msr;
- struct thread *td = curthread;
-
- msr = mfmsr();
- td->td_md.md_savecrit = msr;
- msr &= ~(PSL_EE | PSL_RI);
- mtmsr(msr);
-}
-
-void
-cpu_critical_exit(void)
-{
- struct thread *td = curthread;
-
- mtmsr(td->td_md.md_savecrit);
-}
-
/*
* cpu_critical_fork_exit() - cleanup after fork
*/
diff --git a/sys/sparc64/include/cpufunc.h b/sys/sparc64/include/cpufunc.h
index 4a3dd7a..86e7ae5 100644
--- a/sys/sparc64/include/cpufunc.h
+++ b/sys/sparc64/include/cpufunc.h
@@ -224,9 +224,4 @@ ffs(int mask)
#undef LDNC_GEN
#undef STNC_GEN
-void cpu_critical_enter(void);
-void cpu_critical_exit(void);
-void cpu_critical_fork_exit(void);
-void cpu_thread_link(struct thread *td);
-
#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/sparc64/include/critical.h b/sys/sparc64/include/critical.h
new file mode 100644
index 0000000..eee9914
--- /dev/null
+++ b/sys/sparc64/include/critical.h
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2002 Matthew Dillon. This code is distributed under
+ * the BSD copyright, /usr/src/COPYRIGHT.
+ *
+ * This file contains prototypes and high-level inlines related to
+ * machine-level critical function support:
+ *
+ * cpu_critical_enter() - inlined
+ * cpu_critical_exit() - inlined
+ * cpu_critical_fork_exit() - prototyped
+ * cpu_thread_link() - prototyped
+ * related support functions residing
+ * in <arch>/<arch>/critical.c - prototyped
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CRITICAL_H_
+#define _MACHINE_CRITICAL_H_
+
+__BEGIN_DECLS
+
+/*
+ * Prototypes - see <arch>/<arch>/critical.c
+ */
+void cpu_critical_fork_exit(void);
+void cpu_thread_link(struct thread *td);
+
+#ifdef __GNUC__
+
+/*
+ * cpu_critical_enter:
+ *
+ * This routine is called from critical_enter() on the 0->1 transition
+ * of td_critnest, prior to it being incremented to 1.
+ */
+static __inline void
+cpu_critical_enter(void)
+{
+ struct thread *td;
+ critical_t pil;
+
+ td = curthread;
+ pil = rdpr(pil);
+ wrpr(pil, 0, 14);
+ td->td_md.md_savecrit = pil;
+}
+
+
+/*
+ * cpu_critical_exit:
+ *
+ * This routine is called from critical_exit() on a 1->0 transition
+ * of td_critnest, after it has been decremented to 0. We are
+ * exiting the last critical section.
+ */
+static __inline void
+cpu_critical_exit(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ wrpr(pil, td->td_md.md_savecrit, 0);
+}
+
+#else /* !__GNUC__ */
+
+void cpu_critical_enter(void)
+void cpu_critical_exit(void)
+
+#endif /* __GNUC__ */
+
+__END_DECLS
+
+#endif /* !_MACHINE_CRITICAL_H_ */
diff --git a/sys/sparc64/sparc64/critical.c b/sys/sparc64/sparc64/critical.c
index 5815eeb..6be10aa 100644
--- a/sys/sparc64/sparc64/critical.c
+++ b/sys/sparc64/sparc64/critical.c
@@ -19,27 +19,6 @@
#include <sys/sysctl.h>
#include <sys/ucontext.h>
-void
-cpu_critical_enter(void)
-{
- struct thread *td;
- critical_t pil;
-
- td = curthread;
- pil = rdpr(pil);
- wrpr(pil, 0, 14);
- td->td_md.md_savecrit = pil;
-}
-
-void
-cpu_critical_exit(void)
-{
- struct thread *td;
-
- td = curthread;
- wrpr(pil, td->td_md.md_savecrit, 0);
-}
-
/*
* cpu_critical_fork_exit() - cleanup after fork
*/
OpenPOWER on IntegriCloud