summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/fpu.c4
-rw-r--r--sys/amd64/amd64/machdep.c2
-rw-r--r--sys/amd64/amd64/mp_machdep.c20
-rw-r--r--sys/amd64/amd64/mptable.c20
-rw-r--r--sys/amd64/amd64/trap.c108
-rw-r--r--sys/amd64/amd64/tsc.c32
-rw-r--r--sys/amd64/amd64/vm_machdep.c8
-rw-r--r--sys/amd64/include/cpu.h4
-rw-r--r--sys/amd64/include/mptable.h20
-rw-r--r--sys/amd64/include/mutex.h89
-rw-r--r--sys/amd64/include/profile.h4
-rw-r--r--sys/amd64/isa/clock.c32
-rw-r--r--sys/amd64/isa/intr_machdep.c4
-rw-r--r--sys/amd64/isa/ithread.c14
-rw-r--r--sys/amd64/isa/nmi.c4
-rw-r--r--sys/amd64/isa/npx.c4
16 files changed, 206 insertions, 163 deletions
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
index a729e0f..0dab6ae 100644
--- a/sys/amd64/amd64/fpu.c
+++ b/sys/amd64/amd64/fpu.c
@@ -724,7 +724,7 @@ npx_intr(dummy)
u_short control;
struct intrframe *frame;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (PCPU_GET(npxproc) == NULL || !npx_exists) {
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
PCPU_GET(npxproc), curproc, npx_exists);
@@ -783,7 +783,7 @@ npx_intr(dummy)
*/
psignal(curproc, SIGFPE);
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
/*
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index d5c7ece..ea1703c 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -1910,7 +1910,7 @@ init386(first)
* Giant is used early for at least debugger traps and unexpected traps.
*/
mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* make ldt memory segments */
/*
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 9d53cd7..2802750 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/amd64/amd64/mptable.c b/sys/amd64/amd64/mptable.c
index 9d53cd7..2802750 100644
--- a/sys/amd64/amd64/mptable.c
+++ b/sys/amd64/amd64/mptable.c
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index b4373b3..533d791 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -174,11 +174,11 @@ userret(p, frame, oticks)
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_priority = p->p_usrpri;
if (resched_wanted()) {
/*
@@ -193,30 +193,30 @@ userret(p, frame, oticks)
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
/*
* Charge system time if profiling.
*/
if (p->p_sflag & PS_PROFIL) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/* XXX - do we need Giant? */
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, frame->tf_eip,
(u_int)(p->p_sticks - oticks) * psratio);
}
curpriority = p->p_priority;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -282,9 +282,9 @@ restart:
((frame.tf_eflags & PSL_VM) && !in_vm86call)) {
/* user trap */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
switch (type) {
@@ -312,9 +312,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0)
goto user;
break;
@@ -339,9 +339,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = trap_pfault(&frame, TRUE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
if (i == -2) {
/*
@@ -371,13 +371,13 @@ restart:
#ifndef TIMER_FREQ
# define TIMER_FREQ 1193182
#endif
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* machine/parity/power fail/"kitchen sink" faults */
@@ -421,9 +421,9 @@ restart:
ucode = FPE_FPU_NP_TRAP;
break;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = (*pmath_emulate)(&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0) {
if (!(frame.tf_eflags & PSL_T))
goto out;
@@ -452,9 +452,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(void) trap_pfault(&frame, FALSE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
case T_DNA:
@@ -477,9 +477,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i != 0)
/*
* returns to original process
@@ -510,9 +510,9 @@ restart:
*/
if (frame.tf_eip == (int)cpu_switch_load_gs) {
PCPU_GET(curpcb)->pcb_gs = 0;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGBUS);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
@@ -621,13 +621,13 @@ restart:
#ifdef DEV_ISA
case T_NMI:
#ifdef POWERFAIL_NMI
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* XXX Giant */
@@ -651,13 +651,13 @@ restart:
#endif /* DEV_ISA */
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
trap_fatal(&frame, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* Translate fault for emulators (e.g. Linux) */
if (*p->p_sysent->sv_transtrap)
i = (*p->p_sysent->sv_transtrap)(i, type);
@@ -673,12 +673,12 @@ restart:
uprintf("\n");
}
#endif
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
user:
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
out:
return;
}
@@ -1103,15 +1103,15 @@ syscall2(frame)
#ifdef DIAGNOSTIC
if (ISPL(frame.tf_cs) != SEL_UPL) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
panic("syscall");
/* NOT REACHED */
}
#endif
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
params = (caddr_t)frame.tf_esp + sizeof(int);
@@ -1121,9 +1121,9 @@ syscall2(frame)
/*
* The prep code is not MP aware.
*/
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
} else {
/*
* Need to check if this is a 32 bit or 64 bit syscall.
@@ -1160,7 +1160,7 @@ syscall2(frame)
*/
if (params && (i = narg * sizeof(int)) &&
(error = copyin(params, (caddr_t)args, (u_int)i))) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL))
ktrsyscall(p->p_tracep, code, narg, args);
@@ -1174,13 +1174,13 @@ syscall2(frame)
* we are ktracing
*/
if ((callp->sy_narg & SYF_MPSAFE) == 0) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsyscall(p->p_tracep, code, narg, args);
}
#endif
@@ -1230,7 +1230,7 @@ bad:
*/
if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
frame.tf_eflags &= ~PSL_T;
trapsignal(p, SIGTRAP, 0);
}
@@ -1243,7 +1243,7 @@ bad:
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
}
#endif
@@ -1259,7 +1259,7 @@ bad:
* Release Giant if we had to get it
*/
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#ifdef WITNESS
if (witness_list(p)) {
@@ -1278,38 +1278,38 @@ ast(frame)
struct proc *p = CURPROC;
u_quad_t sticks;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
astoff();
atomic_add_int(&cnt.v_soft, 1);
if (p->p_sflag & PS_OWEUPC) {
p->p_sflag &= ~PS_OWEUPC;
- mtx_exit(&sched_lock, MTX_SPIN);
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
}
if (p->p_sflag & PS_ALRMPEND) {
p->p_sflag &= ~PS_ALRMPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGVTALRM);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
if (p->p_sflag & PS_PROFPEND) {
p->p_sflag &= ~PS_PROFPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGPROF);
} else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
diff --git a/sys/amd64/amd64/tsc.c b/sys/amd64/amd64/tsc.c
index d7a1ff0..bbd066b 100644
--- a/sys/amd64/amd64/tsc.c
+++ b/sys/amd64/amd64/tsc.c
@@ -207,7 +207,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -215,7 +215,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -232,14 +232,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -247,7 +247,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -255,7 +255,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -403,7 +403,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -411,7 +411,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -525,10 +525,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -679,7 +679,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -688,7 +688,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -703,11 +703,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1194,7 +1194,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1218,7 +1218,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index eddae55..434587d 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -261,8 +261,8 @@ cpu_exit(p)
reset_dbregs();
pcb->pcb_flags &= ~PCB_DBREGS;
}
- mtx_enter(&sched_lock, MTX_SPIN);
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
+ mtx_lock_spin(&sched_lock);
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
mtx_assert(&Giant, MA_NOTOWNED);
/*
@@ -574,7 +574,7 @@ vm_page_zero_idle()
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- if (mtx_try_enter(&Giant, MTX_DEF)) {
+ if (mtx_trylock(&Giant)) {
s = splvm();
zero_state = 0;
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
@@ -597,7 +597,7 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (1);
}
return (0);
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index a1d47f0..0b99ec6 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -92,9 +92,9 @@
* counter in the proc table and flag isn't really necessary.
*/
#define need_proftick(p) do { \
- mtx_enter(&sched_lock, MTX_SPIN); \
+ mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
- mtx_exit(&sched_lock, MTX_SPIN); \
+ mtx_unlock_spin(&sched_lock); \
aston(); \
} while (0)
diff --git a/sys/amd64/include/mptable.h b/sys/amd64/include/mptable.h
index 9d53cd7..2802750 100644
--- a/sys/amd64/include/mptable.h
+++ b/sys/amd64/include/mptable.h
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/amd64/include/mutex.h b/sys/amd64/include/mutex.h
index 2f16de3..c4fe210 100644
--- a/sys/amd64/include/mutex.h
+++ b/sys/amd64/include/mutex.h
@@ -43,22 +43,10 @@ extern struct mtx clock_lock;
/*
* Debugging
*/
-#ifdef MUTEX_DEBUG
-
-#ifdef _KERN_MUTEX_C_
-char STR_IEN[] = "fl & PSL_I";
-char STR_IDIS[] = "!(fl & PSL_I)";
-char STR_SIEN[] = "mpp->mtx_saveintr & PSL_I";
-#else /* _KERN_MUTEX_C_ */
-extern char STR_IEN[];
-extern char STR_IDIS[];
-extern char STR_SIEN[];
-#endif /* _KERN_MUTEX_C_ */
-#endif /* MUTEX_DEBUG */
-
-#define ASS_IEN MPASS2(read_eflags() & PSL_I, STR_IEN)
-#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, STR_IDIS)
-#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, STR_SIEN)
+#define ASS_IEN MPASS2(read_eflags() & PSL_I, "fl & PSL_I")
+#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, "!(fl & PSL_I)")
+#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, \
+ "mpp->mtx_saveintr & PSL_I")
#define mtx_legal2block() (read_eflags() & PSL_I)
@@ -66,9 +54,6 @@ extern char STR_SIEN[];
* Assembly macros (for internal use only)
*------------------------------------------------------------------------------
*/
-
-#ifdef _KERN_MUTEX_C_
-
#define _V(x) __STRING(x)
#if 0
@@ -252,22 +237,80 @@ extern char STR_SIEN[];
#undef _V
-#endif /* _KERN_MUTEX_C_ */
-
#endif /* _KERNEL */
#else /* !LOCORE */
/*
* Simple assembly macros to get and release mutexes.
+ *
+ * Note: All of these macros accept a "flags" argument and are analoguous
+ * to the mtx_lock_flags and mtx_unlock_flags general macros. If one
+ * desires to not pass a flag, the value 0 may be passed as second
+ * argument.
+ *
+ * XXX: We only have MTX_LOCK_SPIN and MTX_UNLOCK_SPIN for now, since that's
+ * all we use right now. We should add MTX_LOCK and MTX_UNLOCK (for sleep
+ * locks) in the near future, however.
*/
+#define MTX_LOCK_SPIN(lck, flags) \
+ pushl %eax ; \
+ pushl %ecx ; \
+ pushl %ebx ; \
+ movl $(MTX_UNOWNED) , %eax ; \
+ movl PCPU(CURPROC), %ebx ; \
+ pushfl ; \
+ popl %ecx ; \
+ cli ; \
+ MPLOCKED cmpxchgl %ebx, lck+MTX_LOCK ; \
+ jz 2f ; \
+ cmpl lck+MTX_LOCK, %ebx ; \
+ je 3f ; \
+ pushl $0 ; \
+ pushl $0 ; \
+ pushl %ecx ; \
+ pushl $flags ; \
+ pushl $lck ; \
+ call _mtx_lock_spin ; \
+ addl $0x14, %esp ; \
+ jmp 1f ; \
+3: movl lck+MTX_RECURSECNT, %ebx ; \
+ incl %ebx ; \
+ movl %ebx, lck+MTX_RECURSECNT ; \
+ jmp 1f ; \
+2: movl %ecx, lck+MTX_SAVEINTR ; \
+1: popl %ebx ; \
+ popl %ecx ; \
+ popl %eax
+
+#define MTX_UNLOCK_SPIN(lck) \
+ pushl %edx ; \
+ pushl %eax ; \
+ movl lck+MTX_SAVEINTR, %edx ; \
+ movl lck+MTX_RECURSECNT, %eax ; \
+ testl %eax, %eax ; \
+ jne 2f ; \
+ movl $(MTX_UNOWNED), %eax ; \
+ xchgl %eax, lck+MTX_LOCK ; \
+ pushl %edx ; \
+ popfl ; \
+ jmp 1f ; \
+2: decl %eax ; \
+ movl %eax, lck+MTX_RECURSECNT ; \
+1: popl %eax ; \
+ popl %edx
+/*
+ * XXX: These two are broken right now and need to be made to work for
+ * XXX: sleep locks, as the above two work for spin locks. We're not in
+ * XXX: too much of a rush to do these as we do not use them right now.
+ */
#define MTX_ENTER(lck, type) \
pushl $0 ; /* dummy __LINE__ */ \
pushl $0 ; /* dummy __FILE__ */ \
pushl $type ; \
pushl $lck ; \
- call _mtx_enter ; \
+ call _mtx_lock_XXX ; \
addl $16,%esp
#define MTX_EXIT(lck, type) \
@@ -275,7 +318,7 @@ extern char STR_SIEN[];
pushl $0 ; /* dummy __FILE__ */ \
pushl $type ; \
pushl $lck ; \
- call _mtx_exit ; \
+ call _mtx_unlock_XXX ; \
addl $16,%esp
#endif /* !LOCORE */
diff --git a/sys/amd64/include/profile.h b/sys/amd64/include/profile.h
index 28db117..5f2a780 100644
--- a/sys/amd64/include/profile.h
+++ b/sys/amd64/include/profile.h
@@ -66,8 +66,8 @@
#ifdef SMP
#define MCOUNT_ENTER(s) { s = read_eflags(); \
__asm __volatile("cli" : : : "memory"); \
- mtx_enter(&mcount_mtx, MTX_DEF); }
-#define MCOUNT_EXIT(s) { mtx_exit(&mcount_mtx, MTX_DEF); write_eflags(s); }
+ mtx_lock(&mcount_mtx); }
+#define MCOUNT_EXIT(s) { mtx_unlock(&mcount_mtx); write_eflags(s); }
#else
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); }
#define MCOUNT_EXIT(s) (write_eflags(s))
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
index d7a1ff0..bbd066b 100644
--- a/sys/amd64/isa/clock.c
+++ b/sys/amd64/isa/clock.c
@@ -207,7 +207,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -215,7 +215,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -232,14 +232,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -247,7 +247,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -255,7 +255,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -403,7 +403,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -411,7 +411,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -525,10 +525,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -679,7 +679,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -688,7 +688,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -703,11 +703,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1194,7 +1194,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1218,7 +1218,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/amd64/isa/intr_machdep.c b/sys/amd64/isa/intr_machdep.c
index d44a672..70b9378 100644
--- a/sys/amd64/isa/intr_machdep.c
+++ b/sys/amd64/isa/intr_machdep.c
@@ -701,7 +701,7 @@ inthand_remove(struct intrhand *idesc)
ithds[ithd->irq] = NULL;
if ((idesc->ih_flags & INTR_FAST) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ithd->it_proc->p_stat == SWAIT) {
ithd->it_proc->p_intr_nesting_level = 0;
ithd->it_proc->p_stat = SRUN;
@@ -713,7 +713,7 @@ inthand_remove(struct intrhand *idesc)
* XXX: should we lower the threads priority?
*/
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
free(idesc->ih_name, M_DEVBUF);
diff --git a/sys/amd64/isa/ithread.c b/sys/amd64/isa/ithread.c
index 5f64861..99a1abf 100644
--- a/sys/amd64/isa/ithread.c
+++ b/sys/amd64/isa/ithread.c
@@ -114,7 +114,7 @@ sched_ithd(void *cookie)
* is higher priority than their current thread, it gets run now.
*/
ir->it_need = 1;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ir->it_proc->p_stat == SWAIT) { /* not on run queue */
CTR1(KTR_INTR, "sched_ithd: setrunqueue %d",
ir->it_proc->p_pid);
@@ -134,7 +134,7 @@ sched_ithd(void *cookie)
ir->it_proc->p_stat );
need_resched();
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -163,7 +163,7 @@ ithd_loop(void *dummy)
me->it_proc->p_pid, me->it_proc->p_comm);
curproc->p_ithd = NULL;
free(me, M_DEVBUF);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
kthread_exit(0);
}
@@ -188,10 +188,10 @@ ithd_loop(void *dummy)
ih->ih_flags);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ih->ih_handler(ih->ih_argument);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
}
@@ -201,7 +201,7 @@ ithd_loop(void *dummy)
* set again, so we have to check it again.
*/
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!me->it_need) {
INTREN (1 << me->irq); /* reset the mask bit */
@@ -217,6 +217,6 @@ ithd_loop(void *dummy)
CTR1(KTR_INTR, "ithd_loop pid %d: resumed",
me->it_proc->p_pid);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/amd64/isa/nmi.c b/sys/amd64/isa/nmi.c
index d44a672..70b9378 100644
--- a/sys/amd64/isa/nmi.c
+++ b/sys/amd64/isa/nmi.c
@@ -701,7 +701,7 @@ inthand_remove(struct intrhand *idesc)
ithds[ithd->irq] = NULL;
if ((idesc->ih_flags & INTR_FAST) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ithd->it_proc->p_stat == SWAIT) {
ithd->it_proc->p_intr_nesting_level = 0;
ithd->it_proc->p_stat = SRUN;
@@ -713,7 +713,7 @@ inthand_remove(struct intrhand *idesc)
* XXX: should we lower the threads priority?
*/
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
free(idesc->ih_name, M_DEVBUF);
diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c
index a729e0f..0dab6ae 100644
--- a/sys/amd64/isa/npx.c
+++ b/sys/amd64/isa/npx.c
@@ -724,7 +724,7 @@ npx_intr(dummy)
u_short control;
struct intrframe *frame;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (PCPU_GET(npxproc) == NULL || !npx_exists) {
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
PCPU_GET(npxproc), curproc, npx_exists);
@@ -783,7 +783,7 @@ npx_intr(dummy)
*/
psignal(curproc, SIGFPE);
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
/*
OpenPOWER on IntegriCloud