diff options
Diffstat (limited to 'sys/i386')
-rw-r--r-- | sys/i386/i386/machdep.c | 7 | ||||
-rw-r--r-- | sys/i386/i386/mp_machdep.c | 62 | ||||
-rw-r--r-- | sys/i386/i386/mptable.c | 62 | ||||
-rw-r--r-- | sys/i386/i386/simplelock.s | 317 | ||||
-rw-r--r-- | sys/i386/include/asnames.h | 6 | ||||
-rw-r--r-- | sys/i386/include/lock.h | 56 | ||||
-rw-r--r-- | sys/i386/include/mptable.h | 62 | ||||
-rw-r--r-- | sys/i386/include/profile.h | 4 |
8 files changed, 114 insertions, 462 deletions
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index 42aff4e..531abf2 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -1896,6 +1896,13 @@ init386(first) LIST_INIT(&proc0.p_contested); mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); +#ifdef SMP + /* + * Interrupts can happen very early, so initialize imen_mtx here, rather + * than in init_locks(). + */ + mtx_init(&imen_mtx, "imen", MTX_SPIN); +#endif /* * Giant is used early for at least debugger traps and unexpected traps. diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c index 21118f2..598fbf2 100644 --- a/sys/i386/i386/mp_machdep.c +++ b/sys/i386/i386/mp_machdep.c @@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY { #define MP_ANNOUNCE_POST 0x19 /* used to hold the AP's until we are ready to release them */ -struct simplelock ap_boot_lock; +struct mtx ap_boot_mtx; /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ int current_postcode; @@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, * Local data and functions. */ +/* Set to 1 once we're ready to let the APs out of the pen. */ +static volatile int aps_ready = 0; + static int mp_capable; static u_int boot_address; static u_int base_memory; @@ -345,36 +348,40 @@ static void release_aps(void *dummy); */ /* critical region around IO APIC, apic_imen */ -struct simplelock imen_lock; +struct mtx imen_mtx; /* lock region used by kernel profiling */ -struct simplelock mcount_lock; +struct mtx mcount_mtx; #ifdef USE_COMLOCK /* locks com (tty) data/hardware accesses: a FASTINTR() */ -struct simplelock com_lock; +struct mtx com_mtx; #endif /* USE_COMLOCK */ /* lock around the MP rendezvous */ -static struct simplelock smp_rv_lock; +static struct mtx smp_rv_mtx; /* only 1 CPU can panic at a time :) */ -struct simplelock panic_lock; +struct mtx panic_mtx; static void init_locks(void) { - s_lock_init(&mcount_lock); + /* + * XXX The mcount mutex probably needs to be statically initialized, + * since it will be used even in the function calls that get us to this + * point. + */ + mtx_init(&mcount_mtx, "mcount", MTX_DEF); - s_lock_init(&imen_lock); - s_lock_init(&smp_rv_lock); - s_lock_init(&panic_lock); + mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN); + mtx_init(&panic_mtx, "panic", MTX_DEF); #ifdef USE_COMLOCK - s_lock_init(&com_lock); + mtx_init(&com_mtx, "com", MTX_SPIN); #endif /* USE_COMLOCK */ - s_lock_init(&ap_boot_lock); + mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN); } /* @@ -655,9 +662,6 @@ mp_enable(u_int boot_addr) /* initialize all SMP locks */ init_locks(); - /* obtain the ap_boot_lock */ - s_lock(&ap_boot_lock); - /* start each Application Processor */ start_all_aps(boot_addr); } @@ -2247,8 +2251,12 @@ ap_init(void) { u_int apic_id; + /* spin until all the AP's are ready */ + while (!aps_ready) + /* spin */ ; + /* lock against other AP's that are waking up */ - s_lock(&ap_boot_lock); + mtx_enter(&ap_boot_mtx, MTX_SPIN); /* BSP may have changed PTD while we're waiting for the lock */ cpu_invltlb(); @@ -2297,7 +2305,7 @@ ap_init(void) } /* let other AP's wake up now */ - s_unlock(&ap_boot_lock); + mtx_exit(&ap_boot_mtx, MTX_SPIN); /* wait until all the AP's are up */ while (smp_started == 0) @@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *), void (* teardown_func)(void *), void *arg) { - u_int efl; - + /* obtain rendezvous lock */ - s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */ + mtx_enter(&smp_rv_mtx, MTX_SPIN); /* set static function pointers */ smp_rv_setup_func = setup_func; @@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *), smp_rv_waiters[0] = 0; smp_rv_waiters[1] = 0; - /* disable interrupts on this CPU, save interrupt status */ - efl = read_eflags(); - write_eflags(efl & ~PSL_I); - - /* signal other processors, which will enter the IPI with interrupts off */ + /* + * signal other processors, which will enter the IPI with interrupts off + */ all_but_self_ipi(XRENDEZVOUS_OFFSET); /* call executor function */ smp_rendezvous_action(); - /* restore interrupt flag */ - write_eflags(efl); - /* release lock */ - s_unlock(&smp_rv_lock); + mtx_exit(&smp_rv_mtx, MTX_SPIN); } void release_aps(void *dummy __unused) { - s_unlock(&ap_boot_lock); + atomic_store_rel_int(&aps_ready, 1); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); diff --git a/sys/i386/i386/mptable.c b/sys/i386/i386/mptable.c index 21118f2..598fbf2 100644 --- a/sys/i386/i386/mptable.c +++ b/sys/i386/i386/mptable.c @@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY { #define MP_ANNOUNCE_POST 0x19 /* used to hold the AP's until we are ready to release them */ -struct simplelock ap_boot_lock; +struct mtx ap_boot_mtx; /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ int current_postcode; @@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, * Local data and functions. */ +/* Set to 1 once we're ready to let the APs out of the pen. */ +static volatile int aps_ready = 0; + static int mp_capable; static u_int boot_address; static u_int base_memory; @@ -345,36 +348,40 @@ static void release_aps(void *dummy); */ /* critical region around IO APIC, apic_imen */ -struct simplelock imen_lock; +struct mtx imen_mtx; /* lock region used by kernel profiling */ -struct simplelock mcount_lock; +struct mtx mcount_mtx; #ifdef USE_COMLOCK /* locks com (tty) data/hardware accesses: a FASTINTR() */ -struct simplelock com_lock; +struct mtx com_mtx; #endif /* USE_COMLOCK */ /* lock around the MP rendezvous */ -static struct simplelock smp_rv_lock; +static struct mtx smp_rv_mtx; /* only 1 CPU can panic at a time :) */ -struct simplelock panic_lock; +struct mtx panic_mtx; static void init_locks(void) { - s_lock_init(&mcount_lock); + /* + * XXX The mcount mutex probably needs to be statically initialized, + * since it will be used even in the function calls that get us to this + * point. + */ + mtx_init(&mcount_mtx, "mcount", MTX_DEF); - s_lock_init(&imen_lock); - s_lock_init(&smp_rv_lock); - s_lock_init(&panic_lock); + mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN); + mtx_init(&panic_mtx, "panic", MTX_DEF); #ifdef USE_COMLOCK - s_lock_init(&com_lock); + mtx_init(&com_mtx, "com", MTX_SPIN); #endif /* USE_COMLOCK */ - s_lock_init(&ap_boot_lock); + mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN); } /* @@ -655,9 +662,6 @@ mp_enable(u_int boot_addr) /* initialize all SMP locks */ init_locks(); - /* obtain the ap_boot_lock */ - s_lock(&ap_boot_lock); - /* start each Application Processor */ start_all_aps(boot_addr); } @@ -2247,8 +2251,12 @@ ap_init(void) { u_int apic_id; + /* spin until all the AP's are ready */ + while (!aps_ready) + /* spin */ ; + /* lock against other AP's that are waking up */ - s_lock(&ap_boot_lock); + mtx_enter(&ap_boot_mtx, MTX_SPIN); /* BSP may have changed PTD while we're waiting for the lock */ cpu_invltlb(); @@ -2297,7 +2305,7 @@ ap_init(void) } /* let other AP's wake up now */ - s_unlock(&ap_boot_lock); + mtx_exit(&ap_boot_mtx, MTX_SPIN); /* wait until all the AP's are up */ while (smp_started == 0) @@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *), void (* teardown_func)(void *), void *arg) { - u_int efl; - + /* obtain rendezvous lock */ - s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */ + mtx_enter(&smp_rv_mtx, MTX_SPIN); /* set static function pointers */ smp_rv_setup_func = setup_func; @@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *), smp_rv_waiters[0] = 0; smp_rv_waiters[1] = 0; - /* disable interrupts on this CPU, save interrupt status */ - efl = read_eflags(); - write_eflags(efl & ~PSL_I); - - /* signal other processors, which will enter the IPI with interrupts off */ + /* + * signal other processors, which will enter the IPI with interrupts off + */ all_but_self_ipi(XRENDEZVOUS_OFFSET); /* call executor function */ smp_rendezvous_action(); - /* restore interrupt flag */ - write_eflags(efl); - /* release lock */ - s_unlock(&smp_rv_lock); + mtx_exit(&smp_rv_mtx, MTX_SPIN); } void release_aps(void *dummy __unused) { - s_unlock(&ap_boot_lock); + atomic_store_rel_int(&aps_ready, 1); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); diff --git a/sys/i386/i386/simplelock.s b/sys/i386/i386/simplelock.s deleted file mode 100644 index ecfb562..0000000 --- a/sys/i386/i386/simplelock.s +++ /dev/null @@ -1,317 +0,0 @@ -/*- - * Copyright (c) 1997, by Steve Passe - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the developer may NOT be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -/* - * credit to Bruce Evans <bde@zeta.org.au> for help with asm optimization. - */ - -#include <machine/asmacros.h> /* miscellaneous macros */ -#include <i386/isa/intr_machdep.h> -#include <machine/psl.h> - -#include <machine/smptests.h> /** FAST_HI */ - -#include "assym.s" - -/* - * The following impliments the primitives described in i386/i386/param.h - * necessary for the Lite2 lock manager system. - * The major difference is that the "volatility" of the lock datum has been - * pushed down from the various functions to lock_data itself. - */ - -/* - * The simple-lock routines are the primitives out of which the lock - * package is built. The machine-dependent code must implement an - * atomic test_and_set operation that indivisibly sets the simple lock - * to non-zero and returns its old value. It also assumes that the - * setting of the lock to zero below is indivisible. Simple locks may - * only be used for exclusive locks. - * - * struct simplelock { - * volatile int lock_data; - * }; - */ - -/* - * void - * s_lock_init(struct simplelock *lkp) - * { - * lkp->lock_data = 0; - * } - */ -ENTRY(s_lock_init) - movl 4(%esp), %eax /* get the address of the lock */ - movl $0, (%eax) - ret - - -/* - * void - * s_lock(struct simplelock *lkp) - * { - * while (test_and_set(&lkp->lock_data)) - * continue; - * } - * - * Note: - * If the acquire fails we do a loop of reads waiting for the lock to - * become free instead of continually beating on the lock with xchgl. - * The theory here is that the CPU will stay within its cache until - * a write by the other CPU updates it, instead of continually updating - * the local cache (and thus causing external bus writes) with repeated - * writes to the lock. - */ -#ifndef SL_DEBUG - -ENTRY(s_lock) - movl 4(%esp), %eax /* get the address of the lock */ - movl $1, %ecx -setlock: - xchgl %ecx, (%eax) - testl %ecx, %ecx - jz gotit /* it was clear, return */ -wait: - cmpl $0, (%eax) /* wait to empty */ - jne wait /* still set... */ - jmp setlock /* empty again, try once more */ -gotit: - ret - -#else /* SL_DEBUG */ - -ENTRY(s_lock) - movl 4(%esp), %edx /* get the address of the lock */ -setlock: - movl PCPU(CPU_LOCKID), %ecx /* add cpu id portion */ - incl %ecx /* add lock portion */ - movl $0, %eax - lock - cmpxchgl %ecx, (%edx) - jz gotit /* it was clear, return */ - pushl %eax /* save what we xchanged */ - decl %eax /* remove lock portion */ - cmpl PCPU(CPU_LOCKID), %eax /* do we hold it? */ - je bad_slock /* yes, thats not good... */ - addl $4, %esp /* clear the stack */ -wait: - cmpl $0, (%edx) /* wait to empty */ - jne wait /* still set... */ - jmp setlock /* empty again, try once more */ -gotit: - ret - - ALIGN_TEXT -bad_slock: - /* %eax (current lock) is already on the stack */ - pushl %edx - pushl PCPU(CPUID) - pushl $bsl1 - call _panic - -bsl1: .asciz "rslock: cpu: %d, addr: 0x%08x, lock: 0x%08x" - -#endif /* SL_DEBUG */ - - -/* - * int - * s_lock_try(struct simplelock *lkp) - * { - * return (!test_and_set(&lkp->lock_data)); - * } - */ -#ifndef SL_DEBUG - -ENTRY(s_lock_try) - movl 4(%esp), %eax /* get the address of the lock */ - movl $1, %ecx - - xchgl %ecx, (%eax) - testl %ecx, %ecx - setz %al /* 1 if previous value was 0 */ - movzbl %al, %eax /* convert to an int */ - - ret - -#else /* SL_DEBUG */ - -ENTRY(s_lock_try) - movl 4(%esp), %edx /* get the address of the lock */ - movl PCPU(CPU_LOCKID), %ecx /* add cpu id portion */ - incl %ecx /* add lock portion */ - - xorl %eax, %eax - lock - cmpxchgl %ecx, (%edx) - setz %al /* 1 if previous value was 0 */ - movzbl %al, %eax /* convert to an int */ - - ret - -#endif /* SL_DEBUG */ - - -/* - * void - * s_unlock(struct simplelock *lkp) - * { - * lkp->lock_data = 0; - * } - */ -ENTRY(s_unlock) - movl 4(%esp), %eax /* get the address of the lock */ - movl $0, (%eax) - ret - -#if 0 - -/* - * XXX CRUFTY SS_LOCK IMPLEMENTATION REMOVED XXX - * - * These versions of simple_lock block interrupts, - * making it suitable for regions accessed by both top and bottom levels. - * This is done by saving the current value of the cpu flags in a per-cpu - * global, and disabling interrupts when the lock is taken. When the - * lock is released, interrupts might be enabled, depending upon the saved - * cpu flags. - * Because of this, it must ONLY be used for SHORT, deterministic paths! - * - * Note: - * It would appear to be "bad behaviour" to blindly store a value in - * ss_eflags, as this could destroy the previous contents. But since ss_eflags - * is a per-cpu variable, and its fatal to attempt to acquire a simplelock - * that you already hold, we get away with it. This needs to be cleaned - * up someday... - */ - -/* - * void ss_lock(struct simplelock *lkp) - */ -#ifndef SL_DEBUG - -ENTRY(ss_lock) - movl 4(%esp), %eax /* get the address of the lock */ - movl $1, %ecx /* value for a held lock */ -ssetlock: - pushfl - cli - xchgl %ecx, (%eax) /* compete */ - testl %ecx, %ecx - jz sgotit /* it was clear, return */ - popfl /* previous value while waiting */ -swait: - cmpl $0, (%eax) /* wait to empty */ - jne swait /* still set... */ - jmp ssetlock /* empty again, try once more */ -sgotit: - popl PCPU(SS_EFLAGS) /* save the old eflags */ - ret - -#else /* SL_DEBUG */ - -ENTRY(ss_lock) - movl 4(%esp), %edx /* get the address of the lock */ -ssetlock: - movl PCPU(CPU_LOCKID), %ecx /* add cpu id portion */ - incl %ecx /* add lock portion */ - pushfl - cli - movl $0, %eax - lock - cmpxchgl %ecx, (%edx) /* compete */ - jz sgotit /* it was clear, return */ - pushl %eax /* save what we xchanged */ - decl %eax /* remove lock portion */ - cmpl PCPU(CPU_LOCKID), %eax /* do we hold it? */ - je sbad_slock /* yes, thats not good... */ - addl $4, %esp /* clear the stack */ - popfl -swait: - cmpl $0, (%edx) /* wait to empty */ - jne swait /* still set... */ - jmp ssetlock /* empty again, try once more */ -sgotit: - popl PCPU(SS_EFLAGS) /* save the old task priority */ -sgotit2: - ret - - ALIGN_TEXT -sbad_slock: - /* %eax (current lock) is already on the stack */ - pushl %edx - pushl PCPU(CPUID) - pushl $sbsl1 - call _panic - -sbsl1: .asciz "rsslock: cpu: %d, addr: 0x%08x, lock: 0x%08x" - -#endif /* SL_DEBUG */ - -/* - * void ss_unlock(struct simplelock *lkp) - */ -ENTRY(ss_unlock) - movl 4(%esp), %eax /* get the address of the lock */ - movl $0, (%eax) /* clear the simple lock */ - testl $PSL_I, PCPU(SS_EFLAGS) - jz ss_unlock2 - sti -ss_unlock2: - ret - -#endif - -/* - * These versions of simple_lock does not contain calls to profiling code. - * Thus they can be called from the profiling code. - */ - -/* - * void s_lock_np(struct simplelock *lkp) - */ -NON_GPROF_ENTRY(s_lock_np) - movl 4(%esp), %eax /* get the address of the lock */ - movl $1, %ecx -1: - xchgl %ecx, (%eax) - testl %ecx, %ecx - jz 3f -2: - cmpl $0, (%eax) /* wait to empty */ - jne 2b /* still set... */ - jmp 1b /* empty again, try once more */ -3: - NON_GPROF_RET - -/* - * void s_unlock_np(struct simplelock *lkp) - */ -NON_GPROF_ENTRY(s_unlock_np) - movl 4(%esp), %eax /* get the address of the lock */ - movl $0, (%eax) - NON_GPROF_RET diff --git a/sys/i386/include/asnames.h b/sys/i386/include/asnames.h index 1b6a35d..a7e3e24 100644 --- a/sys/i386/include/asnames.h +++ b/sys/i386/include/asnames.h @@ -233,7 +233,7 @@ #define _Giant Giant #define _idle idle #define _imen imen -#define _imen_lock imen_lock +#define _imen_mtx imen_mtx #define _in_vm86call in_vm86call #define _init386 init386 #define _init_secondary init_secondary @@ -282,8 +282,6 @@ #define _reg_u_sub reg_u_sub #define _rel_mplock rel_mplock #define _round_reg round_reg -#define _s_lock s_lock -#define _s_unlock s_unlock #define _sched_ithd sched_ithd #define _sched_lock sched_lock #define _set_precision_flag_down set_precision_flag_down @@ -296,8 +294,6 @@ #define _smp_rendezvous_action smp_rendezvous_action #define _softclock softclock #define _spending spending -#define _ss_lock ss_lock -#define _ss_unlock ss_unlock #define _started_cpus started_cpus #define _stopped_cpus stopped_cpus #define _svr4_sigcode svr4_sigcode diff --git a/sys/i386/include/lock.h b/sys/i386/include/lock.h index b858833..414186c 100644 --- a/sys/i386/include/lock.h +++ b/sys/i386/include/lock.h @@ -39,13 +39,8 @@ /* * Protects the IO APIC and apic_imen as a critical region. */ -#define IMASK_LOCK \ - pushl $_imen_lock ; /* address of lock */ \ - call _s_lock ; /* MP-safe */ \ - addl $4, %esp - -#define IMASK_UNLOCK \ - movl $0, _imen_lock +#define IMASK_LOCK MTX_ENTER(_imen_mtx, MTX_SPIN) +#define IMASK_UNLOCK MTX_EXIT(_imen_mtx, MTX_SPIN) #else /* SMP */ @@ -67,8 +62,8 @@ * XXX should rc (RISCom/8) use this? */ #ifdef USE_COMLOCK -#define COM_LOCK() s_lock(&com_lock) -#define COM_UNLOCK() s_unlock(&com_lock) +#define COM_LOCK() mtx_enter(&com_mtx, MTX_SPIN) +#define COM_UNLOCK() mtx_exit(&com_mtx, MTX_SPIN) #else #define COM_LOCK() #define COM_UNLOCK() @@ -81,46 +76,11 @@ #endif /* SMP */ -/* - * Simple spin lock. - * It is an error to hold one of these locks while a process is sleeping. - */ -struct simplelock { - volatile int lock_data; -}; - -/* functions in simplelock.s */ -void s_lock_init __P((struct simplelock *)); -void s_lock __P((struct simplelock *)); -int s_lock_try __P((struct simplelock *)); -void ss_lock __P((struct simplelock *)); -void ss_unlock __P((struct simplelock *)); -void s_lock_np __P((struct simplelock *)); -void s_unlock_np __P((struct simplelock *)); - -/* inline simplelock functions */ -static __inline void -s_unlock(struct simplelock *lkp) -{ - lkp->lock_data = 0; -} - /* global data in mp_machdep.c */ -extern struct simplelock imen_lock; -extern struct simplelock com_lock; -extern struct simplelock mcount_lock; -extern struct simplelock panic_lock; - -#if !defined(SIMPLELOCK_DEBUG) && MAXCPU > 1 -/* - * This set of defines turns on the real functions in i386/isa/apic_ipl.s. - */ -#define simple_lock_init(alp) s_lock_init(alp) -#define simple_lock(alp) s_lock(alp) -#define simple_lock_try(alp) s_lock_try(alp) -#define simple_unlock(alp) s_unlock(alp) - -#endif /* !SIMPLELOCK_DEBUG && MAXCPU > 1 */ +extern struct mtx imen_mtx; +extern struct mtx com_mtx; +extern struct mtx mcount_mtx; +extern struct mtx panic_mtx; #endif /* LOCORE */ diff --git a/sys/i386/include/mptable.h b/sys/i386/include/mptable.h index 21118f2..598fbf2 100644 --- a/sys/i386/include/mptable.h +++ b/sys/i386/include/mptable.h @@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY { #define MP_ANNOUNCE_POST 0x19 /* used to hold the AP's until we are ready to release them */ -struct simplelock ap_boot_lock; +struct mtx ap_boot_mtx; /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ int current_postcode; @@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, * Local data and functions. */ +/* Set to 1 once we're ready to let the APs out of the pen. */ +static volatile int aps_ready = 0; + static int mp_capable; static u_int boot_address; static u_int base_memory; @@ -345,36 +348,40 @@ static void release_aps(void *dummy); */ /* critical region around IO APIC, apic_imen */ -struct simplelock imen_lock; +struct mtx imen_mtx; /* lock region used by kernel profiling */ -struct simplelock mcount_lock; +struct mtx mcount_mtx; #ifdef USE_COMLOCK /* locks com (tty) data/hardware accesses: a FASTINTR() */ -struct simplelock com_lock; +struct mtx com_mtx; #endif /* USE_COMLOCK */ /* lock around the MP rendezvous */ -static struct simplelock smp_rv_lock; +static struct mtx smp_rv_mtx; /* only 1 CPU can panic at a time :) */ -struct simplelock panic_lock; +struct mtx panic_mtx; static void init_locks(void) { - s_lock_init(&mcount_lock); + /* + * XXX The mcount mutex probably needs to be statically initialized, + * since it will be used even in the function calls that get us to this + * point. + */ + mtx_init(&mcount_mtx, "mcount", MTX_DEF); - s_lock_init(&imen_lock); - s_lock_init(&smp_rv_lock); - s_lock_init(&panic_lock); + mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN); + mtx_init(&panic_mtx, "panic", MTX_DEF); #ifdef USE_COMLOCK - s_lock_init(&com_lock); + mtx_init(&com_mtx, "com", MTX_SPIN); #endif /* USE_COMLOCK */ - s_lock_init(&ap_boot_lock); + mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN); } /* @@ -655,9 +662,6 @@ mp_enable(u_int boot_addr) /* initialize all SMP locks */ init_locks(); - /* obtain the ap_boot_lock */ - s_lock(&ap_boot_lock); - /* start each Application Processor */ start_all_aps(boot_addr); } @@ -2247,8 +2251,12 @@ ap_init(void) { u_int apic_id; + /* spin until all the AP's are ready */ + while (!aps_ready) + /* spin */ ; + /* lock against other AP's that are waking up */ - s_lock(&ap_boot_lock); + mtx_enter(&ap_boot_mtx, MTX_SPIN); /* BSP may have changed PTD while we're waiting for the lock */ cpu_invltlb(); @@ -2297,7 +2305,7 @@ ap_init(void) } /* let other AP's wake up now */ - s_unlock(&ap_boot_lock); + mtx_exit(&ap_boot_mtx, MTX_SPIN); /* wait until all the AP's are up */ while (smp_started == 0) @@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *), void (* teardown_func)(void *), void *arg) { - u_int efl; - + /* obtain rendezvous lock */ - s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */ + mtx_enter(&smp_rv_mtx, MTX_SPIN); /* set static function pointers */ smp_rv_setup_func = setup_func; @@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *), smp_rv_waiters[0] = 0; smp_rv_waiters[1] = 0; - /* disable interrupts on this CPU, save interrupt status */ - efl = read_eflags(); - write_eflags(efl & ~PSL_I); - - /* signal other processors, which will enter the IPI with interrupts off */ + /* + * signal other processors, which will enter the IPI with interrupts off + */ all_but_self_ipi(XRENDEZVOUS_OFFSET); /* call executor function */ smp_rendezvous_action(); - /* restore interrupt flag */ - write_eflags(efl); - /* release lock */ - s_unlock(&smp_rv_lock); + mtx_exit(&smp_rv_mtx, MTX_SPIN); } void release_aps(void *dummy __unused) { - s_unlock(&ap_boot_lock); + atomic_store_rel_int(&aps_ready, 1); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); diff --git a/sys/i386/include/profile.h b/sys/i386/include/profile.h index 22738c3..28db117 100644 --- a/sys/i386/include/profile.h +++ b/sys/i386/include/profile.h @@ -66,8 +66,8 @@ #ifdef SMP #define MCOUNT_ENTER(s) { s = read_eflags(); \ __asm __volatile("cli" : : : "memory"); \ - s_lock_np(&mcount_lock); } -#define MCOUNT_EXIT(s) { s_unlock_np(&mcount_lock); write_eflags(s); } + mtx_enter(&mcount_mtx, MTX_DEF); } +#define MCOUNT_EXIT(s) { mtx_exit(&mcount_mtx, MTX_DEF); write_eflags(s); } #else #define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); } #define MCOUNT_EXIT(s) (write_eflags(s)) |