diff options
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/amd64/apic_vector.S | 4 | ||||
-rw-r--r-- | sys/amd64/amd64/critical.c | 93 | ||||
-rw-r--r-- | sys/amd64/include/cpufunc.h | 4 | ||||
-rw-r--r-- | sys/amd64/include/critical.h | 111 | ||||
-rw-r--r-- | sys/amd64/isa/atpic_vector.S | 4 | ||||
-rw-r--r-- | sys/amd64/isa/icu_vector.S | 4 | ||||
-rw-r--r-- | sys/amd64/isa/icu_vector.s | 4 |
7 files changed, 138 insertions, 86 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S index f0e6497..34cc9f2 100644 --- a/sys/amd64/amd64/apic_vector.S +++ b/sys/amd64/amd64/apic_vector.S @@ -155,7 +155,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -232,7 +232,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/amd64/amd64/critical.c b/sys/amd64/amd64/critical.c index 61f1a1f..4a983e4 100644 --- a/sys/amd64/amd64/critical.c +++ b/sys/amd64/amd64/critical.c @@ -15,6 +15,7 @@ #include <sys/proc.h> #include <sys/sysctl.h> #include <sys/ucontext.h> +#include <machine/critical.h> #ifdef SMP #include <machine/privatespace.h> @@ -30,7 +31,7 @@ #include <i386/isa/intr_machdep.h> #endif -void unpend(void); /* note: not static (called from assembly) */ +void i386_unpend(void); /* NOTE: not static, called from assembly */ /* * Instrument our ability to run critical sections with interrupts @@ -43,81 +44,23 @@ SYSCTL_INT(_debug, OID_AUTO, critical_mode, CTLFLAG_RW, &critical_mode, 0, ""); /* - * cpu_critical_enter: - * - * This routine is called from critical_enter() on the 0->1 transition - * of td_critnest, prior to it being incremented to 1. - * - * If old-style critical section handling (critical_mode == 0), we - * disable interrupts. - * - * If new-style critical section handling (criticla_mode != 0), we - * do not have to do anything. However, as a side effect any - * interrupts occuring while td_critnest is non-zero will be - * deferred. - */ -void -cpu_critical_enter(void) -{ - struct thread *td; - - if (critical_mode == 0) { - td = curthread; - td->td_md.md_savecrit = intr_disable(); - } -} - -/* - * cpu_critical_exit: - * - * This routine is called from critical_exit() on a 1->0 transition - * of td_critnest, after it has been decremented to 0. We are - * exiting the last critical section. - * - * If td_critnest is -1 this is the 'new' critical_enter()/exit() - * code (the default critical_mode=1) and we do not have to do - * anything unless PCPU_GET(int_pending) is non-zero. - * - * Note that the td->critnest (1->0) transition interrupt race against - * our int_pending/unpend() check below is handled by the interrupt - * code for us, so we do not have to do anything fancy. - * - * Otherwise td_critnest contains the saved hardware interrupt state - * and will be restored. Since interrupts were hard-disabled there - * will be no pending interrupts to dispatch (the 'original' code). + * cpu_unpend() - called from critical_exit() inline after quick + * interrupt-pending check. */ void -cpu_critical_exit(void) +cpu_unpend(void) { register_t eflags; struct thread *td; td = curthread; - if (td->td_md.md_savecrit != (register_t)-1) { - intr_restore(td->td_md.md_savecrit); - td->td_md.md_savecrit = (register_t)-1; - } else { - /* - * We may have to schedule pending interrupts. Create - * conditions similar to an interrupt context and call - * unpend(). - * - * note: we do this even if we are in an interrupt - * nesting level. Deep nesting is protected by - * critical_*() and if we conditionalized it then we - * would have to check int_pending again whenever - * we decrement td_intr_nesting_level to 0. - */ - if (PCPU_GET(int_pending)) { - eflags = intr_disable(); - if (PCPU_GET(int_pending)) { - ++td->td_intr_nesting_level; - unpend(); - --td->td_intr_nesting_level; - } - intr_restore(eflags); - } + eflags = intr_disable(); + if (PCPU_GET(int_pending)) { + ++td->td_intr_nesting_level; + i386_unpend(); + --td->td_intr_nesting_level; } + intr_restore(eflags); } /* @@ -147,24 +90,26 @@ cpu_thread_link(struct thread *td) } /* - * Called from cpu_critical_exit() or called from the assembly vector code + * Called from cpu_unpend or called from the assembly vector code * to process any interrupts which may have occured while we were in * a critical section. * * - interrupts must be disabled * - td_critnest must be 0 * - td_intr_nesting_level must be incremented by the caller + * + * NOT STATIC (called from assembly) */ void -unpend(void) +i386_unpend(void) { - int irq; - u_int32_t mask; - KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0")); KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1")); curthread->td_critnest = 1; for (;;) { + u_int32_t mask; + int irq; + /* * Fast interrupts have priority */ @@ -207,7 +152,7 @@ unpend(void) case 1: /* bit 1 - statclock */ mtx_lock_spin(&sched_lock); statclock_process(curthread->td_kse, - (register_t)unpend, 0); + (register_t)i386_unpend, 0); mtx_unlock_spin(&sched_lock); break; } diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h index 2b0abcc..d57839a 100644 --- a/sys/amd64/include/cpufunc.h +++ b/sys/amd64/include/cpufunc.h @@ -624,10 +624,6 @@ u_int rcr0(void); u_int rcr3(void); u_int rcr4(void); void reset_dbregs(void); -void cpu_critical_enter(void); -void cpu_critical_exit(void); -void cpu_critical_fork_exit(void); -void cpu_thread_link(struct thread *td); __END_DECLS diff --git a/sys/amd64/include/critical.h b/sys/amd64/include/critical.h new file mode 100644 index 0000000..6d14292 --- /dev/null +++ b/sys/amd64/include/critical.h @@ -0,0 +1,111 @@ +/*- + * Copyright (c) 2002 Matthew Dillon. This code is distributed under + * the BSD copyright, /usr/src/COPYRIGHT. + * + * This file contains prototypes and high-level inlines related to + * machine-level critical function support: + * + * cpu_critical_enter() - inlined + * cpu_critical_exit() - inlined + * cpu_critical_fork_exit() - prototyped + * cpu_thread_link() - prototyped + * related support functions residing + * in <arch>/<arch>/critical.c - prototyped + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_CRITICAL_H_ +#define _MACHINE_CRITICAL_H_ + +__BEGIN_DECLS + +extern int critical_mode; + +/* + * Prototypes - see <arch>/<arch>/critical.c + */ +void cpu_unpend(void); +void cpu_critical_fork_exit(void); +void cpu_thread_link(struct thread *td); + +#ifdef __GNUC__ + +/* + * cpu_critical_enter: + * + * This routine is called from critical_enter() on the 0->1 transition + * of td_critnest, prior to it being incremented to 1. + * + * If old-style critical section handling (critical_mode == 0), we + * disable interrupts. + * + * If new-style critical section handling (criticla_mode != 0), we + * do not have to do anything. However, as a side effect any + * interrupts occuring while td_critnest is non-zero will be + * deferred. + */ +static __inline void +cpu_critical_enter(void) +{ + if (critical_mode == 0) { + struct thread *td = curthread; + td->td_md.md_savecrit = intr_disable(); + } +} + +/* + * cpu_critical_exit: + * + * This routine is called from critical_exit() on a 1->0 transition + * of td_critnest, after it has been decremented to 0. We are + * exiting the last critical section. + * + * If td_critnest is -1 this is the 'new' critical_enter()/exit() + * code (the default critical_mode=1) and we do not have to do + * anything unless PCPU_GET(int_pending) is non-zero. + * + * Note that the td->critnest (1->0) transition interrupt race against + * our int_pending/unpend() check below is handled by the interrupt + * code for us, so we do not have to do anything fancy. + * + * Otherwise td_critnest contains the saved hardware interrupt state + * and will be restored. Since interrupts were hard-disabled there + * will be no pending interrupts to dispatch (the 'original' code). + */ +static __inline void +cpu_critical_exit(void) +{ + struct thread *td = curthread; + + if (td->td_md.md_savecrit != (register_t)-1) { + intr_restore(td->td_md.md_savecrit); + td->td_md.md_savecrit = (register_t)-1; + } else { + /* + * We may have to schedule pending interrupts. Create + * conditions similar to an interrupt context and call + * unpend(). + * + * note: we do this even if we are in an interrupt + * nesting level. Deep nesting is protected by + * critical_*() and if we conditionalized it then we + * would have to check int_pending again whenever + * we decrement td_intr_nesting_level to 0. + */ + if (PCPU_GET(int_pending)) + cpu_unpend(); + } +} + +#else /* !__GNUC__ */ + +void cpu_critical_enter(void) +void cpu_critical_exit(void) + +#endif /* __GNUC__ */ + +__END_DECLS + +#endif /* !_MACHINE_CRITICAL_H_ */ + diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S index be0e105..de369c1 100644 --- a/sys/amd64/isa/atpic_vector.S +++ b/sys/amd64/isa/atpic_vector.S @@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S index be0e105..de369c1 100644 --- a/sys/amd64/isa/icu_vector.S +++ b/sys/amd64/isa/icu_vector.S @@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s index be0e105..de369c1 100644 --- a/sys/amd64/isa/icu_vector.s +++ b/sys/amd64/isa/icu_vector.s @@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ |