summaryrefslogtreecommitdiffstats
path: root/sys/amd64/include/critical.h
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2002-07-10 20:15:58 +0000
committerdillon <dillon@FreeBSD.org>2002-07-10 20:15:58 +0000
commite296b5e4108fe4c8e15dffbf96d1913bccfae056 (patch)
treefc7660e7d61818f708bd7c377f4d4b0495d1651a /sys/amd64/include/critical.h
parentba66015624c587e7cf2245a8cb934761e5f2cfaa (diff)
downloadFreeBSD-src-e296b5e4108fe4c8e15dffbf96d1913bccfae056.zip
FreeBSD-src-e296b5e4108fe4c8e15dffbf96d1913bccfae056.tar.gz
Remove the critmode sysctl - the new method for critical_enter/exit (already
the default) is now the only method for i386. Remove the paraphanalia that supported critmode. Remove td_critnest, clean up the assembly, and clean up (mostly remove) the old junk from cpu_critical_enter() and cpu_critical_exit().
Diffstat (limited to 'sys/amd64/include/critical.h')
-rw-r--r--sys/amd64/include/critical.h62
1 files changed, 17 insertions, 45 deletions
diff --git a/sys/amd64/include/critical.h b/sys/amd64/include/critical.h
index 6d14292..7cc7ff2 100644
--- a/sys/amd64/include/critical.h
+++ b/sys/amd64/include/critical.h
@@ -20,8 +20,6 @@
__BEGIN_DECLS
-extern int critical_mode;
-
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
@@ -37,22 +35,11 @@ void cpu_thread_link(struct thread *td);
* This routine is called from critical_enter() on the 0->1 transition
* of td_critnest, prior to it being incremented to 1.
*
- * If old-style critical section handling (critical_mode == 0), we
- * disable interrupts.
- *
- * If new-style critical section handling (criticla_mode != 0), we
- * do not have to do anything. However, as a side effect any
- * interrupts occuring while td_critnest is non-zero will be
- * deferred.
+ * If new-style critical section handling we do not have to do anything.
+ * However, as a side effect any interrupts occuring while td_critnest
+ * is non-zero will be deferred.
*/
-static __inline void
-cpu_critical_enter(void)
-{
- if (critical_mode == 0) {
- struct thread *td = curthread;
- td->td_md.md_savecrit = intr_disable();
- }
-}
+#define cpu_critical_enter()
/*
* cpu_critical_exit:
@@ -61,41 +48,26 @@ cpu_critical_enter(void)
* of td_critnest, after it has been decremented to 0. We are
* exiting the last critical section.
*
- * If td_critnest is -1 this is the 'new' critical_enter()/exit()
- * code (the default critical_mode=1) and we do not have to do
- * anything unless PCPU_GET(int_pending) is non-zero.
- *
* Note that the td->critnest (1->0) transition interrupt race against
* our int_pending/unpend() check below is handled by the interrupt
* code for us, so we do not have to do anything fancy.
- *
- * Otherwise td_critnest contains the saved hardware interrupt state
- * and will be restored. Since interrupts were hard-disabled there
- * will be no pending interrupts to dispatch (the 'original' code).
*/
static __inline void
cpu_critical_exit(void)
{
- struct thread *td = curthread;
-
- if (td->td_md.md_savecrit != (register_t)-1) {
- intr_restore(td->td_md.md_savecrit);
- td->td_md.md_savecrit = (register_t)-1;
- } else {
- /*
- * We may have to schedule pending interrupts. Create
- * conditions similar to an interrupt context and call
- * unpend().
- *
- * note: we do this even if we are in an interrupt
- * nesting level. Deep nesting is protected by
- * critical_*() and if we conditionalized it then we
- * would have to check int_pending again whenever
- * we decrement td_intr_nesting_level to 0.
- */
- if (PCPU_GET(int_pending))
- cpu_unpend();
- }
+ /*
+ * We may have to schedule pending interrupts. Create
+ * conditions similar to an interrupt context and call
+ * unpend().
+ *
+ * note: we do this even if we are in an interrupt
+ * nesting level. Deep nesting is protected by
+ * critical_*() and if we conditionalized it then we
+ * would have to check int_pending again whenever
+ * we decrement td_intr_nesting_level to 0.
+ */
+ if (PCPU_GET(int_pending))
+ cpu_unpend();
}
#else /* !__GNUC__ */
OpenPOWER on IntegriCloud