summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/busdma_machdep.c20
-rw-r--r--sys/alpha/alpha/interrupt.c12
-rw-r--r--sys/alpha/alpha/machdep.c2
-rw-r--r--sys/alpha/alpha/mp_machdep.c18
-rw-r--r--sys/alpha/alpha/procfs_machdep.c24
-rw-r--r--sys/alpha/alpha/trap.c76
-rw-r--r--sys/alpha/alpha/vm_machdep.c8
-rw-r--r--sys/alpha/include/cpu.h4
-rw-r--r--sys/alpha/include/mutex.h47
-rw-r--r--sys/alpha/osf1/osf1_misc.c4
-rw-r--r--sys/amd64/amd64/fpu.c4
-rw-r--r--sys/amd64/amd64/machdep.c2
-rw-r--r--sys/amd64/amd64/mp_machdep.c20
-rw-r--r--sys/amd64/amd64/mptable.c20
-rw-r--r--sys/amd64/amd64/trap.c108
-rw-r--r--sys/amd64/amd64/tsc.c32
-rw-r--r--sys/amd64/amd64/vm_machdep.c8
-rw-r--r--sys/amd64/include/cpu.h4
-rw-r--r--sys/amd64/include/mptable.h20
-rw-r--r--sys/amd64/include/mutex.h89
-rw-r--r--sys/amd64/include/profile.h4
-rw-r--r--sys/amd64/isa/clock.c32
-rw-r--r--sys/amd64/isa/intr_machdep.c4
-rw-r--r--sys/amd64/isa/ithread.c14
-rw-r--r--sys/amd64/isa/nmi.c4
-rw-r--r--sys/amd64/isa/npx.c4
-rw-r--r--sys/compat/linprocfs/linprocfs.c4
-rw-r--r--sys/compat/linprocfs/linprocfs_misc.c4
-rw-r--r--sys/compat/linux/linux_misc.c4
-rw-r--r--sys/compat/svr4/svr4_misc.c12
-rw-r--r--sys/dev/acpica/Osd/OsdSynch.c8
-rw-r--r--sys/dev/an/if_anreg.h4
-rw-r--r--sys/dev/dc/if_dcreg.h4
-rw-r--r--sys/dev/fxp/if_fxpvar.h4
-rw-r--r--sys/dev/ichsmb/ichsmb.c44
-rw-r--r--sys/dev/isp/isp_freebsd.c4
-rw-r--r--sys/dev/isp/isp_freebsd.h4
-rw-r--r--sys/dev/pccbb/pccbb.c26
-rw-r--r--sys/dev/random/harvest.c2
-rw-r--r--sys/dev/random/yarrow.c72
-rw-r--r--sys/dev/sf/if_sfreg.h4
-rw-r--r--sys/dev/sio/sio.c86
-rw-r--r--sys/dev/sk/if_skreg.h8
-rw-r--r--sys/dev/ti/if_tireg.h4
-rw-r--r--sys/dev/usb/if_auereg.h4
-rw-r--r--sys/dev/usb/if_cuereg.h4
-rw-r--r--sys/dev/usb/if_kuereg.h4
-rw-r--r--sys/dev/vinum/vinumdaemon.c4
-rw-r--r--sys/dev/vinum/vinumlock.c4
-rw-r--r--sys/dev/vr/if_vrreg.h4
-rw-r--r--sys/dev/wi/if_wireg.h4
-rw-r--r--sys/fs/cd9660/cd9660_node.c16
-rw-r--r--sys/fs/deadfs/dead_vnops.c2
-rw-r--r--sys/fs/hpfs/hpfs_hash.c24
-rw-r--r--sys/fs/msdosfs/msdosfs_denode.c16
-rw-r--r--sys/fs/msdosfs/msdosfs_vfsops.c14
-rw-r--r--sys/fs/msdosfs/msdosfs_vnops.c4
-rw-r--r--sys/fs/ntfs/ntfs_ihash.c12
-rw-r--r--sys/fs/ntfs/ntfs_subr.c14
-rw-r--r--sys/fs/ntfs/ntfs_vfsops.c4
-rw-r--r--sys/fs/nullfs/null_vnops.c4
-rw-r--r--sys/fs/nwfs/nwfs_node.c2
-rw-r--r--sys/fs/nwfs/nwfs_vnops.c14
-rw-r--r--sys/fs/procfs/procfs_ctl.c32
-rw-r--r--sys/fs/procfs/procfs_status.c6
-rw-r--r--sys/gnu/ext2fs/ext2_ihash.c20
-rw-r--r--sys/gnu/ext2fs/ext2_vfsops.c26
-rw-r--r--sys/gnu/fs/ext2fs/ext2_vfsops.c26
-rw-r--r--sys/i386/i386/machdep.c2
-rw-r--r--sys/i386/i386/mp_machdep.c20
-rw-r--r--sys/i386/i386/mpapic.c8
-rw-r--r--sys/i386/i386/mptable.c20
-rw-r--r--sys/i386/i386/procfs_machdep.c42
-rw-r--r--sys/i386/i386/trap.c108
-rw-r--r--sys/i386/i386/tsc.c32
-rw-r--r--sys/i386/i386/vm86.c8
-rw-r--r--sys/i386/i386/vm_machdep.c8
-rw-r--r--sys/i386/include/cpu.h4
-rw-r--r--sys/i386/include/lock.h8
-rw-r--r--sys/i386/include/mptable.h20
-rw-r--r--sys/i386/include/mutex.h89
-rw-r--r--sys/i386/include/profile.h4
-rw-r--r--sys/i386/isa/clock.c32
-rw-r--r--sys/i386/isa/if_el.c4
-rw-r--r--sys/i386/isa/intr_machdep.c4
-rw-r--r--sys/i386/isa/ithread.c14
-rw-r--r--sys/i386/isa/nmi.c4
-rw-r--r--sys/i386/isa/npx.c4
-rw-r--r--sys/ia64/ia64/interrupt.c6
-rw-r--r--sys/ia64/ia64/machdep.c2
-rw-r--r--sys/ia64/ia64/mp_machdep.c4
-rw-r--r--sys/ia64/ia64/procfs_machdep.c22
-rw-r--r--sys/ia64/ia64/trap.c76
-rw-r--r--sys/ia64/ia64/vm_machdep.c4
-rw-r--r--sys/ia64/include/cpu.h4
-rw-r--r--sys/ia64/include/mutex.h20
-rw-r--r--sys/isa/atrtc.c32
-rw-r--r--sys/isa/sio.c86
-rw-r--r--sys/isofs/cd9660/cd9660_node.c16
-rw-r--r--sys/kern/imgact_elf.c4
-rw-r--r--sys/kern/init_main.c12
-rw-r--r--sys/kern/kern_acct.c4
-rw-r--r--sys/kern/kern_clock.c28
-rw-r--r--sys/kern/kern_condvar.c60
-rw-r--r--sys/kern/kern_exit.c12
-rw-r--r--sys/kern/kern_fork.c16
-rw-r--r--sys/kern/kern_idle.c4
-rw-r--r--sys/kern/kern_intr.c12
-rw-r--r--sys/kern/kern_kthread.c4
-rw-r--r--sys/kern/kern_lock.c24
-rw-r--r--sys/kern/kern_malloc.c24
-rw-r--r--sys/kern/kern_mutex.c948
-rw-r--r--sys/kern/kern_proc.c4
-rw-r--r--sys/kern/kern_prot.c14
-rw-r--r--sys/kern/kern_resource.c40
-rw-r--r--sys/kern/kern_shutdown.c6
-rw-r--r--sys/kern/kern_sig.c48
-rw-r--r--sys/kern/kern_subr.c4
-rw-r--r--sys/kern/kern_synch.c84
-rw-r--r--sys/kern/kern_timeout.c38
-rw-r--r--sys/kern/subr_eventhandler.c12
-rw-r--r--sys/kern/subr_prof.c6
-rw-r--r--sys/kern/subr_rman.c42
-rw-r--r--sys/kern/subr_smp.c20
-rw-r--r--sys/kern/subr_trap.c108
-rw-r--r--sys/kern/subr_turnstile.c948
-rw-r--r--sys/kern/subr_witness.c948
-rw-r--r--sys/kern/sys_generic.c12
-rw-r--r--sys/kern/sys_process.c24
-rw-r--r--sys/kern/tty.c4
-rw-r--r--sys/kern/uipc_mbuf.c30
-rw-r--r--sys/kern/uipc_syscalls.c12
-rw-r--r--sys/kern/vfs_aio.c2
-rw-r--r--sys/kern/vfs_bio.c2
-rw-r--r--sys/kern/vfs_conf.c4
-rw-r--r--sys/kern/vfs_default.c4
-rw-r--r--sys/kern/vfs_export.c174
-rw-r--r--sys/kern/vfs_extattr.c46
-rw-r--r--sys/kern/vfs_mount.c4
-rw-r--r--sys/kern/vfs_subr.c174
-rw-r--r--sys/kern/vfs_syscalls.c46
-rw-r--r--sys/kern/vfs_vnops.c12
-rw-r--r--sys/miscfs/deadfs/dead_vnops.c2
-rw-r--r--sys/miscfs/nullfs/null_vnops.c4
-rw-r--r--sys/miscfs/procfs/procfs_ctl.c32
-rw-r--r--sys/miscfs/procfs/procfs_status.c6
-rw-r--r--sys/msdosfs/msdosfs_denode.c16
-rw-r--r--sys/msdosfs/msdosfs_vfsops.c14
-rw-r--r--sys/msdosfs/msdosfs_vnops.c4
-rw-r--r--sys/net/if_var.h4
-rw-r--r--sys/netgraph/ng_base.c134
-rw-r--r--sys/nfs/nfs_nqlease.c6
-rw-r--r--sys/ntfs/ntfs_ihash.c12
-rw-r--r--sys/ntfs/ntfs_subr.c14
-rw-r--r--sys/ntfs/ntfs_vfsops.c4
-rw-r--r--sys/nwfs/nwfs_node.c2
-rw-r--r--sys/nwfs/nwfs_vnops.c14
-rw-r--r--sys/pc98/cbus/clock.c32
-rw-r--r--sys/pc98/cbus/pcrtc.c32
-rw-r--r--sys/pc98/cbus/sio.c86
-rw-r--r--sys/pc98/i386/machdep.c2
-rw-r--r--sys/pc98/pc98/clock.c32
-rw-r--r--sys/pc98/pc98/machdep.c2
-rw-r--r--sys/pc98/pc98/npx.c4
-rw-r--r--sys/pc98/pc98/sio.c86
-rw-r--r--sys/pci/if_dcreg.h4
-rw-r--r--sys/pci/if_fxpvar.h4
-rw-r--r--sys/pci/if_pcnreg.h4
-rw-r--r--sys/pci/if_rlreg.h4
-rw-r--r--sys/pci/if_sfreg.h4
-rw-r--r--sys/pci/if_sisreg.h4
-rw-r--r--sys/pci/if_skreg.h8
-rw-r--r--sys/pci/if_stereg.h4
-rw-r--r--sys/pci/if_tireg.h4
-rw-r--r--sys/pci/if_tlreg.h4
-rw-r--r--sys/pci/if_vrreg.h4
-rw-r--r--sys/pci/if_wbreg.h4
-rw-r--r--sys/pci/if_wxvar.h8
-rw-r--r--sys/pci/if_xlreg.h4
-rw-r--r--sys/powerpc/aim/vm_machdep.c8
-rw-r--r--sys/powerpc/include/mutex.h47
-rw-r--r--sys/powerpc/powerpc/mp_machdep.c8
-rw-r--r--sys/powerpc/powerpc/procfs_machdep.c24
-rw-r--r--sys/powerpc/powerpc/vm_machdep.c8
-rw-r--r--sys/sys/buf.h4
-rw-r--r--sys/sys/mbuf.h32
-rw-r--r--sys/sys/mutex.h372
-rw-r--r--sys/sys/proc.h4
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c52
-rw-r--r--sys/ufs/ifs/ifs_vfsops.c14
-rw-r--r--sys/ufs/ufs/ufs_ihash.c20
-rw-r--r--sys/ufs/ufs/ufs_quota.c12
-rw-r--r--sys/ufs/ufs/ufs_vnops.c12
-rw-r--r--sys/vm/vm_fault.c4
-rw-r--r--sys/vm/vm_glue.c48
-rw-r--r--sys/vm/vm_map.h8
-rw-r--r--sys/vm/vm_meter.c10
-rw-r--r--sys/vm/vm_object.c4
-rw-r--r--sys/vm/vm_pageout.c20
-rw-r--r--sys/vm/vm_zone.c34
200 files changed, 3509 insertions, 3721 deletions
diff --git a/sys/alpha/alpha/busdma_machdep.c b/sys/alpha/alpha/busdma_machdep.c
index 2cc55d2..e920894 100644
--- a/sys/alpha/alpha/busdma_machdep.c
+++ b/sys/alpha/alpha/busdma_machdep.c
@@ -614,11 +614,11 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
break;
}
bpage->busaddr = pmap_kextract(bpage->vaddr);
- mtx_enter(&bounce_lock, MTX_DEF);
+ mtx_lock(&bounce_lock);
STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
total_bpages++;
free_bpages++;
- mtx_exit(&bounce_lock, MTX_DEF);
+ mtx_unlock(&bounce_lock);
count++;
numpages--;
}
@@ -653,7 +653,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
panic("add_bounce_page: map doesn't need any pages");
map->pagesreserved--;
- mtx_enter(&bounce_lock, MTX_DEF);
+ mtx_lock(&bounce_lock);
bpage = STAILQ_FIRST(&bounce_page_list);
if (bpage == NULL)
panic("add_bounce_page: free page list is empty");
@@ -661,7 +661,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
STAILQ_REMOVE_HEAD(&bounce_page_list, links);
reserved_bpages--;
active_bpages++;
- mtx_exit(&bounce_lock, MTX_DEF);
+ mtx_unlock(&bounce_lock);
bpage->datavaddr = vaddr;
bpage->datacount = size;
@@ -677,7 +677,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
bpage->datavaddr = 0;
bpage->datacount = 0;
- mtx_enter(&bounce_lock, MTX_DEF);
+ mtx_lock(&bounce_lock);
STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
free_bpages++;
active_bpages--;
@@ -690,7 +690,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
sched_swi(vm_ih, SWI_NOSWITCH);
}
}
- mtx_exit(&bounce_lock, MTX_DEF);
+ mtx_unlock(&bounce_lock);
}
void
@@ -698,13 +698,13 @@ busdma_swi(void)
{
struct bus_dmamap *map;
- mtx_enter(&bounce_lock, MTX_DEF);
+ mtx_lock(&bounce_lock);
while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
- mtx_exit(&bounce_lock, MTX_DEF);
+ mtx_unlock(&bounce_lock);
bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
map->callback, map->callback_arg, /*flags*/0);
- mtx_enter(&bounce_lock, MTX_DEF);
+ mtx_lock(&bounce_lock);
}
- mtx_exit(&bounce_lock, MTX_DEF);
+ mtx_unlock(&bounce_lock);
}
diff --git a/sys/alpha/alpha/interrupt.c b/sys/alpha/alpha/interrupt.c
index 57d0da5..f2b431f 100644
--- a/sys/alpha/alpha/interrupt.c
+++ b/sys/alpha/alpha/interrupt.c
@@ -560,7 +560,7 @@ alpha_dispatch_intr(void *frame, unsigned long vector)
"alpha_dispatch_intr: disabling vector 0x%x", i->vector);
i->disable(i->vector);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ithd->it_proc->p_stat == SWAIT) {
/* not on the run queue and not running */
CTR1(KTR_INTR, "alpha_dispatch_intr: setrunqueue %d",
@@ -587,7 +587,7 @@ alpha_dispatch_intr(void *frame, unsigned long vector)
ithd->it_proc->p_pid, ithd->it_need, ithd->it_proc->p_stat);
need_resched();
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
void
@@ -626,10 +626,10 @@ ithd_loop(void *dummy)
ih->ih_flags);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ih->ih_handler(ih->ih_argument);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
/*
@@ -646,7 +646,7 @@ ithd_loop(void *dummy)
* set again, so we have to check it again.
*/
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!ithd->it_need) {
ithd->it_proc->p_stat = SWAIT; /* we're idle */
CTR1(KTR_INTR, "ithd_loop pid %d: done",
@@ -655,7 +655,7 @@ ithd_loop(void *dummy)
CTR1(KTR_INTR, "ithd_loop pid %d: resumed",
ithd->it_proc->p_pid);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/alpha/alpha/machdep.c b/sys/alpha/alpha/machdep.c
index c76665a..bbfe946 100644
--- a/sys/alpha/alpha/machdep.c
+++ b/sys/alpha/alpha/machdep.c
@@ -1012,7 +1012,7 @@ alpha_init(pfn, ptb, bim, bip, biv)
*/
mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* Look at arguments passed to us and compute boothowto.
diff --git a/sys/alpha/alpha/mp_machdep.c b/sys/alpha/alpha/mp_machdep.c
index bf61c11..b40ffa4 100644
--- a/sys/alpha/alpha/mp_machdep.c
+++ b/sys/alpha/alpha/mp_machdep.c
@@ -162,7 +162,7 @@ smp_init_secondary(void)
alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA);
alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("smp_init_secondary: called\n");
CTR0(KTR_SMP, "smp_init_secondary");
@@ -176,7 +176,7 @@ smp_init_secondary(void)
spl0();
smp_ipi_all(0);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
extern void smp_init_secondary_glue(void);
@@ -657,14 +657,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -682,9 +682,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -841,7 +841,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -858,7 +858,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
/*
diff --git a/sys/alpha/alpha/procfs_machdep.c b/sys/alpha/alpha/procfs_machdep.c
index 229d2f9..c0766ca 100644
--- a/sys/alpha/alpha/procfs_machdep.c
+++ b/sys/alpha/alpha/procfs_machdep.c
@@ -86,12 +86,12 @@ procfs_read_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_regs(p, regs));
}
@@ -101,12 +101,12 @@ procfs_write_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_regs(p, regs));
}
@@ -121,12 +121,12 @@ procfs_read_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_fpregs(p, fpregs));
}
@@ -136,12 +136,12 @@ procfs_write_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_fpregs(p, fpregs));
}
diff --git a/sys/alpha/alpha/trap.c b/sys/alpha/alpha/trap.c
index 7aa7362..eada017 100644
--- a/sys/alpha/alpha/trap.c
+++ b/sys/alpha/alpha/trap.c
@@ -106,10 +106,10 @@ userret(p, frame, oticks)
/* take pending signals */
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_priority = p->p_usrpri;
if (want_resched) {
/*
@@ -125,30 +125,30 @@ userret(p, frame, oticks)
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
splx(s);
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
/*
* If profiling, charge recent system time to the trapped pc.
*/
if (p->p_sflag & PS_PROFIL) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, frame->tf_regs[FRAME_PC],
(int)(p->p_sticks - oticks) * psratio);
}
curpriority = p->p_priority;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
static void
@@ -230,9 +230,9 @@ trap(a0, a1, a2, entry, framep)
ucode = 0;
user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0;
if (user) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_tf = framep;
#if 0
/* This is to catch some weird stuff on the UDB (mj) */
@@ -259,12 +259,12 @@ trap(a0, a1, a2, entry, framep)
* and per-process unaligned-access-handling flags).
*/
if (user) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if ((i = unaligned_fixup(a0, a1, a2, p)) == 0) {
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
ucode = a0; /* VA */
break;
}
@@ -288,13 +288,13 @@ trap(a0, a1, a2, entry, framep)
* is not requested or if the completion fails.
*/
if (user) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (a0 & EXCSUM_SWC)
if (fp_software_completion(a1, p)) {
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
i = SIGFPE;
ucode = a0; /* exception summary */
break;
@@ -415,7 +415,7 @@ trap(a0, a1, a2, entry, framep)
goto out;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* It is only a kernel address space fault iff:
* 1. !user and
@@ -529,11 +529,11 @@ trap(a0, a1, a2, entry, framep)
rv = KERN_INVALID_ADDRESS;
}
if (rv == KERN_SUCCESS) {
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (!user) {
/* Check for copyin/copyout fault */
if (p != NULL &&
@@ -575,7 +575,7 @@ out:
framep->tf_regs[FRAME_SP] = alpha_pal_rdusp();
userret(p, framep, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
return;
@@ -621,7 +621,7 @@ syscall(code, framep)
* Find our per-cpu globals.
*/
globalp = (struct globaldata *) alpha_pal_rdval();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
framep->tf_regs[FRAME_TRAPARG_A0] = 0;
framep->tf_regs[FRAME_TRAPARG_A1] = 0;
@@ -635,9 +635,9 @@ syscall(code, framep)
p = curproc;
p->p_md.md_tf = framep;
opc = framep->tf_regs[FRAME_PC] - 4;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
#ifdef DIAGNOSTIC
alpha_fpstate_check(p);
@@ -739,7 +739,7 @@ syscall(code, framep)
* is not the case, this code will need to be revisited.
*/
STOPEVENT(p, S_SCX, code);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#ifdef WITNESS
if (witness_list(p)) {
@@ -763,9 +763,9 @@ ast(framep)
u_quad_t sticks;
p = curproc;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_tf = framep;
if ((framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) == 0)
@@ -774,36 +774,36 @@ ast(framep)
cnt.v_soft++;
PCPU_SET(astpending, 0);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_OWEUPC) {
p->p_sflag &= ~PS_OWEUPC;
- mtx_exit(&sched_lock, MTX_SPIN);
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
}
if (p->p_sflag & PS_ALRMPEND) {
p->p_sflag &= ~PS_ALRMPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGVTALRM);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
if (p->p_sflag & PS_PROFPEND) {
p->p_sflag &= ~PS_PROFPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGPROF);
} else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
userret(p, framep, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
/*
diff --git a/sys/alpha/alpha/vm_machdep.c b/sys/alpha/alpha/vm_machdep.c
index 6d96337..851b1878 100644
--- a/sys/alpha/alpha/vm_machdep.c
+++ b/sys/alpha/alpha/vm_machdep.c
@@ -253,8 +253,8 @@ cpu_exit(p)
{
alpha_fpstate_drop(p);
- mtx_enter(&sched_lock, MTX_SPIN);
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
+ mtx_lock_spin(&sched_lock);
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
mtx_assert(&Giant, MA_NOTOWNED);
/*
@@ -437,7 +437,7 @@ vm_page_zero_idle()
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- if (mtx_try_enter(&Giant, MTX_DEF)) {
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -466,7 +466,7 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (1);
}
return (0);
diff --git a/sys/alpha/include/cpu.h b/sys/alpha/include/cpu.h
index 515228a..8ecaaf3 100644
--- a/sys/alpha/include/cpu.h
+++ b/sys/alpha/include/cpu.h
@@ -81,9 +81,9 @@ struct clockframe {
* through trap, marking the proc as needing a profiling tick.
*/
#define need_proftick(p) do { \
- mtx_enter(&sched_lock, MTX_SPIN); \
+ mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
- mtx_exit(&sched_lock, MTX_SPIN); \
+ mtx_unlock_spin(&sched_lock); \
aston(); \
} while (0)
diff --git a/sys/alpha/include/mutex.h b/sys/alpha/include/mutex.h
index 40717b0..564ad19 100644
--- a/sys/alpha/include/mutex.h
+++ b/sys/alpha/include/mutex.h
@@ -39,26 +39,12 @@
/*
* Debugging
*/
-#ifdef MUTEX_DEBUG
-
-#ifdef _KERN_MUTEX_C_
-char STR_IEN[] = "ps & IPL == IPL_0";
-char STR_IDIS[] = "ps & IPL == IPL_HIGH";
-char STR_SIEN[] = "mpp->mtx_saveintr == IPL_0";
-#else /* _KERN_MUTEX_C_ */
-extern char STR_IEN[];
-extern char STR_IDIS[];
-extern char STR_SIEN[];
-#endif /* _KERN_MUTEX_C_ */
-
-#endif /* MUTEX_DEBUG */
-
#define ASS_IEN MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
- == ALPHA_PSL_IPL_0, STR_IEN)
+ == ALPHA_PSL_IPL_0, "ps & IPL == IPL_0")
#define ASS_IDIS MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
- == ALPHA_PSL_IPL_HIGH, STR_IDIS)
+ == ALPHA_PSL_IPL_HIGH, "ps & IPL == IPL_HIGH")
#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr \
- == ALPHA_PSL_IPL_0, STR_SIEN)
+ == ALPHA_PSL_IPL_0, "mpp->mtx_saveintr == IPL_0")
#define mtx_legal2block() \
((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_0)
@@ -68,34 +54,33 @@ extern char STR_SIEN[];
*--------------------------------------------------------------------------
*/
-#ifdef _KERN_MUTEX_C_
-
-#define _V(x) __STRING(x)
-
/*
- * Get a spin lock, handle recusion inline (as the less common case)
+ * Get a spin lock, handle recusion inline.
*/
-
-#define _getlock_spin_block(mp, tid, type) do { \
+#define _get_spin_lock(mp, tid, opts) do { \
u_int _ipl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _ipl); \
- else { \
+ if (!_obtain_lock((mp), (tid))) { \
+ if ((mp)->mtx_lock == (uintptr_t)(tid)) \
+ (mp)->mtx_recurse++; \
+ else \
+ _mtx_lock_spin((mp), (opts), _ipl, __FILE__, \
+ __LINE__); \
+ } else { \
alpha_mb(); \
(mp)->mtx_saveintr = _ipl; \
} \
} while (0)
-#undef _V
-
-#endif /* _KERN_MUTEX_C_ */
-
#endif /* _KERNEL */
#else /* !LOCORE */
/*
* Simple assembly macros to get and release non-recursive spin locks
+ *
+ * XXX: These are presently unused and cannot be used right now. Need to be
+ * re-written (they are wrong). If you plan to use this and still see
+ * this message, know not to unless you fix them first! :-)
*/
#define MTX_ENTER(lck) \
ldiq a0, ALPHA_PSL_IPL_HIGH; \
diff --git a/sys/alpha/osf1/osf1_misc.c b/sys/alpha/osf1/osf1_misc.c
index e6cc37c..90dbc62 100644
--- a/sys/alpha/osf1/osf1_misc.c
+++ b/sys/alpha/osf1/osf1_misc.c
@@ -1341,9 +1341,9 @@ osf1_getrusage(p, uap)
switch (uap->who) {
case RUSAGE_SELF:
rup = &p->p_stats->p_ru;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
break;
case RUSAGE_CHILDREN:
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
index a729e0f..0dab6ae 100644
--- a/sys/amd64/amd64/fpu.c
+++ b/sys/amd64/amd64/fpu.c
@@ -724,7 +724,7 @@ npx_intr(dummy)
u_short control;
struct intrframe *frame;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (PCPU_GET(npxproc) == NULL || !npx_exists) {
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
PCPU_GET(npxproc), curproc, npx_exists);
@@ -783,7 +783,7 @@ npx_intr(dummy)
*/
psignal(curproc, SIGFPE);
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
/*
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index d5c7ece..ea1703c 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -1910,7 +1910,7 @@ init386(first)
* Giant is used early for at least debugger traps and unexpected traps.
*/
mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* make ldt memory segments */
/*
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 9d53cd7..2802750 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/amd64/amd64/mptable.c b/sys/amd64/amd64/mptable.c
index 9d53cd7..2802750 100644
--- a/sys/amd64/amd64/mptable.c
+++ b/sys/amd64/amd64/mptable.c
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index b4373b3..533d791 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -174,11 +174,11 @@ userret(p, frame, oticks)
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_priority = p->p_usrpri;
if (resched_wanted()) {
/*
@@ -193,30 +193,30 @@ userret(p, frame, oticks)
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
/*
* Charge system time if profiling.
*/
if (p->p_sflag & PS_PROFIL) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/* XXX - do we need Giant? */
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, frame->tf_eip,
(u_int)(p->p_sticks - oticks) * psratio);
}
curpriority = p->p_priority;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -282,9 +282,9 @@ restart:
((frame.tf_eflags & PSL_VM) && !in_vm86call)) {
/* user trap */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
switch (type) {
@@ -312,9 +312,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0)
goto user;
break;
@@ -339,9 +339,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = trap_pfault(&frame, TRUE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
if (i == -2) {
/*
@@ -371,13 +371,13 @@ restart:
#ifndef TIMER_FREQ
# define TIMER_FREQ 1193182
#endif
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* machine/parity/power fail/"kitchen sink" faults */
@@ -421,9 +421,9 @@ restart:
ucode = FPE_FPU_NP_TRAP;
break;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = (*pmath_emulate)(&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0) {
if (!(frame.tf_eflags & PSL_T))
goto out;
@@ -452,9 +452,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(void) trap_pfault(&frame, FALSE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
case T_DNA:
@@ -477,9 +477,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i != 0)
/*
* returns to original process
@@ -510,9 +510,9 @@ restart:
*/
if (frame.tf_eip == (int)cpu_switch_load_gs) {
PCPU_GET(curpcb)->pcb_gs = 0;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGBUS);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
@@ -621,13 +621,13 @@ restart:
#ifdef DEV_ISA
case T_NMI:
#ifdef POWERFAIL_NMI
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* XXX Giant */
@@ -651,13 +651,13 @@ restart:
#endif /* DEV_ISA */
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
trap_fatal(&frame, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* Translate fault for emulators (e.g. Linux) */
if (*p->p_sysent->sv_transtrap)
i = (*p->p_sysent->sv_transtrap)(i, type);
@@ -673,12 +673,12 @@ restart:
uprintf("\n");
}
#endif
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
user:
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
out:
return;
}
@@ -1103,15 +1103,15 @@ syscall2(frame)
#ifdef DIAGNOSTIC
if (ISPL(frame.tf_cs) != SEL_UPL) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
panic("syscall");
/* NOT REACHED */
}
#endif
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
params = (caddr_t)frame.tf_esp + sizeof(int);
@@ -1121,9 +1121,9 @@ syscall2(frame)
/*
* The prep code is not MP aware.
*/
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
} else {
/*
* Need to check if this is a 32 bit or 64 bit syscall.
@@ -1160,7 +1160,7 @@ syscall2(frame)
*/
if (params && (i = narg * sizeof(int)) &&
(error = copyin(params, (caddr_t)args, (u_int)i))) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL))
ktrsyscall(p->p_tracep, code, narg, args);
@@ -1174,13 +1174,13 @@ syscall2(frame)
* we are ktracing
*/
if ((callp->sy_narg & SYF_MPSAFE) == 0) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsyscall(p->p_tracep, code, narg, args);
}
#endif
@@ -1230,7 +1230,7 @@ bad:
*/
if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
frame.tf_eflags &= ~PSL_T;
trapsignal(p, SIGTRAP, 0);
}
@@ -1243,7 +1243,7 @@ bad:
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
}
#endif
@@ -1259,7 +1259,7 @@ bad:
* Release Giant if we had to get it
*/
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#ifdef WITNESS
if (witness_list(p)) {
@@ -1278,38 +1278,38 @@ ast(frame)
struct proc *p = CURPROC;
u_quad_t sticks;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
astoff();
atomic_add_int(&cnt.v_soft, 1);
if (p->p_sflag & PS_OWEUPC) {
p->p_sflag &= ~PS_OWEUPC;
- mtx_exit(&sched_lock, MTX_SPIN);
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
}
if (p->p_sflag & PS_ALRMPEND) {
p->p_sflag &= ~PS_ALRMPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGVTALRM);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
if (p->p_sflag & PS_PROFPEND) {
p->p_sflag &= ~PS_PROFPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGPROF);
} else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
diff --git a/sys/amd64/amd64/tsc.c b/sys/amd64/amd64/tsc.c
index d7a1ff0..bbd066b 100644
--- a/sys/amd64/amd64/tsc.c
+++ b/sys/amd64/amd64/tsc.c
@@ -207,7 +207,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -215,7 +215,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -232,14 +232,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -247,7 +247,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -255,7 +255,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -403,7 +403,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -411,7 +411,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -525,10 +525,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -679,7 +679,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -688,7 +688,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -703,11 +703,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1194,7 +1194,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1218,7 +1218,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index eddae55..434587d 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -261,8 +261,8 @@ cpu_exit(p)
reset_dbregs();
pcb->pcb_flags &= ~PCB_DBREGS;
}
- mtx_enter(&sched_lock, MTX_SPIN);
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
+ mtx_lock_spin(&sched_lock);
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
mtx_assert(&Giant, MA_NOTOWNED);
/*
@@ -574,7 +574,7 @@ vm_page_zero_idle()
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- if (mtx_try_enter(&Giant, MTX_DEF)) {
+ if (mtx_trylock(&Giant)) {
s = splvm();
zero_state = 0;
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
@@ -597,7 +597,7 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (1);
}
return (0);
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index a1d47f0..0b99ec6 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -92,9 +92,9 @@
* counter in the proc table and flag isn't really necessary.
*/
#define need_proftick(p) do { \
- mtx_enter(&sched_lock, MTX_SPIN); \
+ mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
- mtx_exit(&sched_lock, MTX_SPIN); \
+ mtx_unlock_spin(&sched_lock); \
aston(); \
} while (0)
diff --git a/sys/amd64/include/mptable.h b/sys/amd64/include/mptable.h
index 9d53cd7..2802750 100644
--- a/sys/amd64/include/mptable.h
+++ b/sys/amd64/include/mptable.h
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/amd64/include/mutex.h b/sys/amd64/include/mutex.h
index 2f16de3..c4fe210 100644
--- a/sys/amd64/include/mutex.h
+++ b/sys/amd64/include/mutex.h
@@ -43,22 +43,10 @@ extern struct mtx clock_lock;
/*
* Debugging
*/
-#ifdef MUTEX_DEBUG
-
-#ifdef _KERN_MUTEX_C_
-char STR_IEN[] = "fl & PSL_I";
-char STR_IDIS[] = "!(fl & PSL_I)";
-char STR_SIEN[] = "mpp->mtx_saveintr & PSL_I";
-#else /* _KERN_MUTEX_C_ */
-extern char STR_IEN[];
-extern char STR_IDIS[];
-extern char STR_SIEN[];
-#endif /* _KERN_MUTEX_C_ */
-#endif /* MUTEX_DEBUG */
-
-#define ASS_IEN MPASS2(read_eflags() & PSL_I, STR_IEN)
-#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, STR_IDIS)
-#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, STR_SIEN)
+#define ASS_IEN MPASS2(read_eflags() & PSL_I, "fl & PSL_I")
+#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, "!(fl & PSL_I)")
+#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, \
+ "mpp->mtx_saveintr & PSL_I")
#define mtx_legal2block() (read_eflags() & PSL_I)
@@ -66,9 +54,6 @@ extern char STR_SIEN[];
* Assembly macros (for internal use only)
*------------------------------------------------------------------------------
*/
-
-#ifdef _KERN_MUTEX_C_
-
#define _V(x) __STRING(x)
#if 0
@@ -252,22 +237,80 @@ extern char STR_SIEN[];
#undef _V
-#endif /* _KERN_MUTEX_C_ */
-
#endif /* _KERNEL */
#else /* !LOCORE */
/*
* Simple assembly macros to get and release mutexes.
+ *
+ * Note: All of these macros accept a "flags" argument and are analoguous
+ * to the mtx_lock_flags and mtx_unlock_flags general macros. If one
+ * desires to not pass a flag, the value 0 may be passed as second
+ * argument.
+ *
+ * XXX: We only have MTX_LOCK_SPIN and MTX_UNLOCK_SPIN for now, since that's
+ * all we use right now. We should add MTX_LOCK and MTX_UNLOCK (for sleep
+ * locks) in the near future, however.
*/
+#define MTX_LOCK_SPIN(lck, flags) \
+ pushl %eax ; \
+ pushl %ecx ; \
+ pushl %ebx ; \
+ movl $(MTX_UNOWNED) , %eax ; \
+ movl PCPU(CURPROC), %ebx ; \
+ pushfl ; \
+ popl %ecx ; \
+ cli ; \
+ MPLOCKED cmpxchgl %ebx, lck+MTX_LOCK ; \
+ jz 2f ; \
+ cmpl lck+MTX_LOCK, %ebx ; \
+ je 3f ; \
+ pushl $0 ; \
+ pushl $0 ; \
+ pushl %ecx ; \
+ pushl $flags ; \
+ pushl $lck ; \
+ call _mtx_lock_spin ; \
+ addl $0x14, %esp ; \
+ jmp 1f ; \
+3: movl lck+MTX_RECURSECNT, %ebx ; \
+ incl %ebx ; \
+ movl %ebx, lck+MTX_RECURSECNT ; \
+ jmp 1f ; \
+2: movl %ecx, lck+MTX_SAVEINTR ; \
+1: popl %ebx ; \
+ popl %ecx ; \
+ popl %eax
+
+#define MTX_UNLOCK_SPIN(lck) \
+ pushl %edx ; \
+ pushl %eax ; \
+ movl lck+MTX_SAVEINTR, %edx ; \
+ movl lck+MTX_RECURSECNT, %eax ; \
+ testl %eax, %eax ; \
+ jne 2f ; \
+ movl $(MTX_UNOWNED), %eax ; \
+ xchgl %eax, lck+MTX_LOCK ; \
+ pushl %edx ; \
+ popfl ; \
+ jmp 1f ; \
+2: decl %eax ; \
+ movl %eax, lck+MTX_RECURSECNT ; \
+1: popl %eax ; \
+ popl %edx
+/*
+ * XXX: These two are broken right now and need to be made to work for
+ * XXX: sleep locks, as the above two work for spin locks. We're not in
+ * XXX: too much of a rush to do these as we do not use them right now.
+ */
#define MTX_ENTER(lck, type) \
pushl $0 ; /* dummy __LINE__ */ \
pushl $0 ; /* dummy __FILE__ */ \
pushl $type ; \
pushl $lck ; \
- call _mtx_enter ; \
+ call _mtx_lock_XXX ; \
addl $16,%esp
#define MTX_EXIT(lck, type) \
@@ -275,7 +318,7 @@ extern char STR_SIEN[];
pushl $0 ; /* dummy __FILE__ */ \
pushl $type ; \
pushl $lck ; \
- call _mtx_exit ; \
+ call _mtx_unlock_XXX ; \
addl $16,%esp
#endif /* !LOCORE */
diff --git a/sys/amd64/include/profile.h b/sys/amd64/include/profile.h
index 28db117..5f2a780 100644
--- a/sys/amd64/include/profile.h
+++ b/sys/amd64/include/profile.h
@@ -66,8 +66,8 @@
#ifdef SMP
#define MCOUNT_ENTER(s) { s = read_eflags(); \
__asm __volatile("cli" : : : "memory"); \
- mtx_enter(&mcount_mtx, MTX_DEF); }
-#define MCOUNT_EXIT(s) { mtx_exit(&mcount_mtx, MTX_DEF); write_eflags(s); }
+ mtx_lock(&mcount_mtx); }
+#define MCOUNT_EXIT(s) { mtx_unlock(&mcount_mtx); write_eflags(s); }
#else
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); }
#define MCOUNT_EXIT(s) (write_eflags(s))
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
index d7a1ff0..bbd066b 100644
--- a/sys/amd64/isa/clock.c
+++ b/sys/amd64/isa/clock.c
@@ -207,7 +207,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -215,7 +215,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -232,14 +232,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -247,7 +247,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -255,7 +255,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -403,7 +403,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -411,7 +411,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -525,10 +525,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -679,7 +679,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -688,7 +688,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -703,11 +703,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1194,7 +1194,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1218,7 +1218,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/amd64/isa/intr_machdep.c b/sys/amd64/isa/intr_machdep.c
index d44a672..70b9378 100644
--- a/sys/amd64/isa/intr_machdep.c
+++ b/sys/amd64/isa/intr_machdep.c
@@ -701,7 +701,7 @@ inthand_remove(struct intrhand *idesc)
ithds[ithd->irq] = NULL;
if ((idesc->ih_flags & INTR_FAST) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ithd->it_proc->p_stat == SWAIT) {
ithd->it_proc->p_intr_nesting_level = 0;
ithd->it_proc->p_stat = SRUN;
@@ -713,7 +713,7 @@ inthand_remove(struct intrhand *idesc)
* XXX: should we lower the threads priority?
*/
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
free(idesc->ih_name, M_DEVBUF);
diff --git a/sys/amd64/isa/ithread.c b/sys/amd64/isa/ithread.c
index 5f64861..99a1abf 100644
--- a/sys/amd64/isa/ithread.c
+++ b/sys/amd64/isa/ithread.c
@@ -114,7 +114,7 @@ sched_ithd(void *cookie)
* is higher priority than their current thread, it gets run now.
*/
ir->it_need = 1;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ir->it_proc->p_stat == SWAIT) { /* not on run queue */
CTR1(KTR_INTR, "sched_ithd: setrunqueue %d",
ir->it_proc->p_pid);
@@ -134,7 +134,7 @@ sched_ithd(void *cookie)
ir->it_proc->p_stat );
need_resched();
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -163,7 +163,7 @@ ithd_loop(void *dummy)
me->it_proc->p_pid, me->it_proc->p_comm);
curproc->p_ithd = NULL;
free(me, M_DEVBUF);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
kthread_exit(0);
}
@@ -188,10 +188,10 @@ ithd_loop(void *dummy)
ih->ih_flags);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ih->ih_handler(ih->ih_argument);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
}
@@ -201,7 +201,7 @@ ithd_loop(void *dummy)
* set again, so we have to check it again.
*/
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!me->it_need) {
INTREN (1 << me->irq); /* reset the mask bit */
@@ -217,6 +217,6 @@ ithd_loop(void *dummy)
CTR1(KTR_INTR, "ithd_loop pid %d: resumed",
me->it_proc->p_pid);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/amd64/isa/nmi.c b/sys/amd64/isa/nmi.c
index d44a672..70b9378 100644
--- a/sys/amd64/isa/nmi.c
+++ b/sys/amd64/isa/nmi.c
@@ -701,7 +701,7 @@ inthand_remove(struct intrhand *idesc)
ithds[ithd->irq] = NULL;
if ((idesc->ih_flags & INTR_FAST) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ithd->it_proc->p_stat == SWAIT) {
ithd->it_proc->p_intr_nesting_level = 0;
ithd->it_proc->p_stat = SRUN;
@@ -713,7 +713,7 @@ inthand_remove(struct intrhand *idesc)
* XXX: should we lower the threads priority?
*/
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
free(idesc->ih_name, M_DEVBUF);
diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c
index a729e0f..0dab6ae 100644
--- a/sys/amd64/isa/npx.c
+++ b/sys/amd64/isa/npx.c
@@ -724,7 +724,7 @@ npx_intr(dummy)
u_short control;
struct intrframe *frame;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (PCPU_GET(npxproc) == NULL || !npx_exists) {
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
PCPU_GET(npxproc), curproc, npx_exists);
@@ -783,7 +783,7 @@ npx_intr(dummy)
*/
psignal(curproc, SIGFPE);
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
/*
diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c
index 2e23fe7..749d16a 100644
--- a/sys/compat/linprocfs/linprocfs.c
+++ b/sys/compat/linprocfs/linprocfs.c
@@ -454,12 +454,12 @@ linprocfs_doprocstatus(curp, p, pfs, uio)
sbuf_new(&sb, NULL, 1024, 0);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat > sizeof state_str / sizeof *state_str)
state = state_str[0];
else
state = state_str[(int)p->p_stat];
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_SHARED);
ppid = p->p_pptr ? p->p_pptr->p_pid : 0;
diff --git a/sys/compat/linprocfs/linprocfs_misc.c b/sys/compat/linprocfs/linprocfs_misc.c
index 2e23fe7..749d16a 100644
--- a/sys/compat/linprocfs/linprocfs_misc.c
+++ b/sys/compat/linprocfs/linprocfs_misc.c
@@ -454,12 +454,12 @@ linprocfs_doprocstatus(curp, p, pfs, uio)
sbuf_new(&sb, NULL, 1024, 0);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat > sizeof state_str / sizeof *state_str)
state = state_str[0];
else
state = state_str[(int)p->p_stat];
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_SHARED);
ppid = p->p_pptr ? p->p_pptr->p_pid : 0;
diff --git a/sys/compat/linux/linux_misc.c b/sys/compat/linux/linux_misc.c
index 0caff43..822ef8b 100644
--- a/sys/compat/linux/linux_misc.c
+++ b/sys/compat/linux/linux_misc.c
@@ -642,9 +642,9 @@ linux_times(struct proc *p, struct linux_times_args *args)
#ifdef DEBUG
printf("Linux-emul(%ld): times(*)\n", (long)p->p_pid);
#endif
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
calcru(p, &ru.ru_utime, &ru.ru_stime, NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
tms.tms_utime = CONVTCK(ru.ru_utime);
tms.tms_stime = CONVTCK(ru.ru_stime);
diff --git a/sys/compat/svr4/svr4_misc.c b/sys/compat/svr4/svr4_misc.c
index b976c2d..680109b 100644
--- a/sys/compat/svr4/svr4_misc.c
+++ b/sys/compat/svr4/svr4_misc.c
@@ -1141,7 +1141,7 @@ svr4_setinfo(p, st, s)
if (p) {
i.si_pid = p->p_pid;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SZOMB) {
i.si_stime = p->p_ru->ru_stime.tv_sec;
i.si_utime = p->p_ru->ru_utime.tv_sec;
@@ -1150,7 +1150,7 @@ svr4_setinfo(p, st, s)
i.si_stime = p->p_stats->p_ru.ru_stime.tv_sec;
i.si_utime = p->p_stats->p_ru.ru_utime.tv_sec;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
if (WIFEXITED(st)) {
@@ -1226,10 +1226,10 @@ loop:
}
nfound++;
PROC_LOCK(q);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (q->p_stat == SZOMB &&
((SCARG(uap, options) & (SVR4_WEXITED|SVR4_WTRAPPED)))) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(q);
PROCTREE_LOCK(PT_RELEASE);
*retval = 0;
@@ -1357,7 +1357,7 @@ loop:
if (q->p_stat == SSTOP && (q->p_flag & P_WAITED) == 0 &&
(q->p_flag & P_TRACED ||
(SCARG(uap, options) & (SVR4_WSTOPPED|SVR4_WCONTINUED)))) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
DPRINTF(("jobcontrol %d\n", q->p_pid));
if (((SCARG(uap, options) & SVR4_WNOWAIT)) == 0)
q->p_flag |= P_WAITED;
@@ -1366,7 +1366,7 @@ loop:
return svr4_setinfo(q, W_STOPCODE(q->p_xstat),
SCARG(uap, info));
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(q);
}
diff --git a/sys/dev/acpica/Osd/OsdSynch.c b/sys/dev/acpica/Osd/OsdSynch.c
index 79ae4f5..dacfc5c 100644
--- a/sys/dev/acpica/Osd/OsdSynch.c
+++ b/sys/dev/acpica/Osd/OsdSynch.c
@@ -139,7 +139,7 @@ AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT32 Timeout)
tmo = 1;
}
- mtx_enter(&as->as_mtx, MTX_DEF);
+ mtx_lock(&as->as_mtx);
DEBUG_PRINT(TRACE_MUTEX, ("get %d units from semaphore %p (has %d), timeout %d\n",
Units, as, as->as_units, Timeout));
for (;;) {
@@ -163,7 +163,7 @@ AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT32 Timeout)
break;
}
}
- mtx_exit(&as->as_mtx, MTX_DEF);
+ mtx_unlock(&as->as_mtx);
return_ACPI_STATUS(result);
#else
@@ -182,14 +182,14 @@ AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
if (as == NULL)
return_ACPI_STATUS(AE_BAD_PARAMETER);
- mtx_enter(&as->as_mtx, MTX_DEF);
+ mtx_lock(&as->as_mtx);
DEBUG_PRINT(TRACE_MUTEX, ("return %d units to semaphore %p (has %d)\n",
Units, as, as->as_units));
as->as_units += Units;
if (as->as_units > as->as_maxunits)
as->as_units = as->as_maxunits;
wakeup(as);
- mtx_exit(&as->as_mtx, MTX_DEF);
+ mtx_unlock(&as->as_mtx);
return_ACPI_STATUS(AE_OK);
#else
return(AE_OK);
diff --git a/sys/dev/an/if_anreg.h b/sys/dev/an/if_anreg.h
index 9d67a3a..241cd65 100644
--- a/sys/dev/an/if_anreg.h
+++ b/sys/dev/an/if_anreg.h
@@ -844,8 +844,8 @@ struct an_softc {
device_t an_dev;
};
-#define AN_LOCK(_sc) mtx_enter(&(_sc)->an_mtx, MTX_DEF)
-#define AN_UNLOCK(_sc) mtx_exit(&(_sc)->an_mtx, MTX_DEF)
+#define AN_LOCK(_sc) mtx_lock(&(_sc)->an_mtx)
+#define AN_UNLOCK(_sc) mtx_unlock(&(_sc)->an_mtx)
void an_release_resources __P((device_t));
int an_alloc_port __P((device_t, int, int));
diff --git a/sys/dev/dc/if_dcreg.h b/sys/dev/dc/if_dcreg.h
index 2327fa6..56f441f 100644
--- a/sys/dev/dc/if_dcreg.h
+++ b/sys/dev/dc/if_dcreg.h
@@ -702,8 +702,8 @@ struct dc_softc {
};
-#define DC_LOCK(_sc) mtx_enter(&(_sc)->dc_mtx, MTX_DEF)
-#define DC_UNLOCK(_sc) mtx_exit(&(_sc)->dc_mtx, MTX_DEF)
+#define DC_LOCK(_sc) mtx_lock(&(_sc)->dc_mtx)
+#define DC_UNLOCK(_sc) mtx_unlock(&(_sc)->dc_mtx)
#define DC_TX_POLL 0x00000001
#define DC_TX_COALESCE 0x00000002
diff --git a/sys/dev/fxp/if_fxpvar.h b/sys/dev/fxp/if_fxpvar.h
index aee009c..7a9eb8d 100644
--- a/sys/dev/fxp/if_fxpvar.h
+++ b/sys/dev/fxp/if_fxpvar.h
@@ -86,5 +86,5 @@ struct fxp_softc {
#define sc_if arpcom.ac_if
#define FXP_UNIT(_sc) (_sc)->arpcom.ac_if.if_unit
-#define FXP_LOCK(_sc) mtx_enter(&(_sc)->sc_mtx, MTX_DEF)
-#define FXP_UNLOCK(_sc) mtx_exit(&(_sc)->sc_mtx, MTX_DEF)
+#define FXP_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define FXP_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
diff --git a/sys/dev/ichsmb/ichsmb.c b/sys/dev/ichsmb/ichsmb.c
index 9bbc7db..71b9b7a 100644
--- a/sys/dev/ichsmb/ichsmb.c
+++ b/sys/dev/ichsmb/ichsmb.c
@@ -167,7 +167,7 @@ ichsmb_quick(device_t dev, u_char slave, int how)
switch (how) {
case SMB_QREAD:
case SMB_QWRITE:
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_QUICK;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | (how == SMB_QREAD ?
@@ -175,7 +175,7 @@ ichsmb_quick(device_t dev, u_char slave, int how)
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT,
ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd);
smb_error = ichsmb_wait(sc);
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
break;
default:
smb_error = SMB_ENOTSUPP;
@@ -193,7 +193,7 @@ ichsmb_sendb(device_t dev, u_char slave, char byte)
DBG("slave=0x%02x byte=0x%02x\n", slave, (u_char)byte);
KASSERT(sc->ich_cmd == -1,
("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd));
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | ICH_XMIT_SLVA_WRITE);
@@ -201,7 +201,7 @@ ichsmb_sendb(device_t dev, u_char slave, char byte)
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT,
ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd);
smb_error = ichsmb_wait(sc);
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
DBG("smb_error=%d\n", smb_error);
return (smb_error);
}
@@ -215,7 +215,7 @@ ichsmb_recvb(device_t dev, u_char slave, char *byte)
DBG("slave=0x%02x\n", slave);
KASSERT(sc->ich_cmd == -1,
("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd));
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | ICH_XMIT_SLVA_READ);
@@ -223,7 +223,7 @@ ichsmb_recvb(device_t dev, u_char slave, char *byte)
ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd);
if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR)
*byte = bus_space_read_1(sc->io_bst, sc->io_bsh, ICH_D0);
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
DBG("smb_error=%d byte=0x%02x\n", smb_error, (u_char)*byte);
return (smb_error);
}
@@ -238,7 +238,7 @@ ichsmb_writeb(device_t dev, u_char slave, char cmd, char byte)
slave, (u_char)cmd, (u_char)byte);
KASSERT(sc->ich_cmd == -1,
("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd));
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE_DATA;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | ICH_XMIT_SLVA_WRITE);
@@ -247,7 +247,7 @@ ichsmb_writeb(device_t dev, u_char slave, char cmd, char byte)
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT,
ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd);
smb_error = ichsmb_wait(sc);
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
DBG("smb_error=%d\n", smb_error);
return (smb_error);
}
@@ -262,7 +262,7 @@ ichsmb_writew(device_t dev, u_char slave, char cmd, short word)
slave, (u_char)cmd, (u_int16_t)word);
KASSERT(sc->ich_cmd == -1,
("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd));
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_WORD_DATA;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | ICH_XMIT_SLVA_WRITE);
@@ -272,7 +272,7 @@ ichsmb_writew(device_t dev, u_char slave, char cmd, short word)
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT,
ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd);
smb_error = ichsmb_wait(sc);
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
DBG("smb_error=%d\n", smb_error);
return (smb_error);
}
@@ -286,7 +286,7 @@ ichsmb_readb(device_t dev, u_char slave, char cmd, char *byte)
DBG("slave=0x%02x cmd=0x%02x\n", slave, (u_char)cmd);
KASSERT(sc->ich_cmd == -1,
("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd));
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE_DATA;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | ICH_XMIT_SLVA_READ);
@@ -295,7 +295,7 @@ ichsmb_readb(device_t dev, u_char slave, char cmd, char *byte)
ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd);
if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR)
*byte = bus_space_read_1(sc->io_bst, sc->io_bsh, ICH_D0);
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
DBG("smb_error=%d byte=0x%02x\n", smb_error, (u_char)*byte);
return (smb_error);
}
@@ -309,7 +309,7 @@ ichsmb_readw(device_t dev, u_char slave, char cmd, short *word)
DBG("slave=0x%02x cmd=0x%02x\n", slave, (u_char)cmd);
KASSERT(sc->ich_cmd == -1,
("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd));
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_WORD_DATA;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | ICH_XMIT_SLVA_READ);
@@ -322,7 +322,7 @@ ichsmb_readw(device_t dev, u_char slave, char cmd, short *word)
| (bus_space_read_1(sc->io_bst,
sc->io_bsh, ICH_D1) << 8);
}
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
DBG("smb_error=%d word=0x%04x\n", smb_error, (u_int16_t)*word);
return (smb_error);
}
@@ -337,7 +337,7 @@ ichsmb_pcall(device_t dev, u_char slave, char cmd, short sdata, short *rdata)
slave, (u_char)cmd, (u_int16_t)sdata);
KASSERT(sc->ich_cmd == -1,
("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd));
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_PROC_CALL;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | ICH_XMIT_SLVA_WRITE);
@@ -352,7 +352,7 @@ ichsmb_pcall(device_t dev, u_char slave, char cmd, short sdata, short *rdata)
| (bus_space_read_1(sc->io_bst,
sc->io_bsh, ICH_D1) << 8);
}
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
DBG("smb_error=%d rdata=0x%04x\n", smb_error, (u_int16_t)*rdata);
return (smb_error);
}
@@ -388,7 +388,7 @@ ichsmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf)
sc->block_index = 1;
sc->block_write = 1;
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BLOCK;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | ICH_XMIT_SLVA_WRITE);
@@ -398,7 +398,7 @@ ichsmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf)
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT,
ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd);
smb_error = ichsmb_wait(sc);
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
DBG("smb_error=%d\n", smb_error);
return (smb_error);
}
@@ -419,7 +419,7 @@ ichsmb_bread(device_t dev, u_char slave, char cmd, u_char count, char *buf)
sc->block_index = 0;
sc->block_write = 0;
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BLOCK;
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA,
(slave << 1) | ICH_XMIT_SLVA_READ);
@@ -429,7 +429,7 @@ ichsmb_bread(device_t dev, u_char slave, char cmd, u_char count, char *buf)
ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd);
if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR)
bcopy(sc->block_data, buf, sc->block_count);
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
DBG("smb_error=%d\n", smb_error);
#if ICHSMB_DEBUG
#define DISP(ch) (((ch) < 0x20 || (ch) >= 0x7e) ? '.' : (ch))
@@ -491,7 +491,7 @@ ichsmb_device_intr(void *cookie)
int cmd_index;
int count;
- mtx_enter(&sc->mutex, MTX_DEF);
+ mtx_lock(&sc->mutex);
for (count = 0; count < maxloops; count++) {
/* Get and reset status bits */
@@ -603,7 +603,7 @@ finished:
/* Clear status bits and try again */
bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_STA, status);
}
- mtx_exit(&sc->mutex, MTX_DEF);
+ mtx_unlock(&sc->mutex);
/* Too many loops? */
if (count == maxloops) {
diff --git a/sys/dev/isp/isp_freebsd.c b/sys/dev/isp/isp_freebsd.c
index ecf9e71..72fb71b 100644
--- a/sys/dev/isp/isp_freebsd.c
+++ b/sys/dev/isp/isp_freebsd.c
@@ -1992,9 +1992,9 @@ isp_done(struct ccb_scsiio *sccb)
XS_CMD_S_CLEAR(sccb);
ISP_UNLOCK(isp);
#ifdef ISP_SMPLOCK
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
xpt_done((union ccb *) sccb);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#else
xpt_done((union ccb *) sccb);
#endif
diff --git a/sys/dev/isp/isp_freebsd.h b/sys/dev/isp/isp_freebsd.h
index 3bdfb1d..d184b7b 100644
--- a/sys/dev/isp/isp_freebsd.h
+++ b/sys/dev/isp/isp_freebsd.h
@@ -124,8 +124,8 @@ struct isposinfo {
*/
#ifdef ISP_SMPLOCK
-#define ISP_LOCK(x) mtx_enter(&(x)->isp_osinfo.lock, MTX_DEF)
-#define ISP_UNLOCK(x) mtx_exit(&(x)->isp_osinfo.lock, MTX_DEF)
+#define ISP_LOCK(x) mtx_lock(&(x)->isp_osinfo.lock)
+#define ISP_UNLOCK(x) mtx_unlock(&(x)->isp_osinfo.lock)
#else
#define ISP_LOCK isp_lock
#define ISP_UNLOCK isp_unlock
diff --git a/sys/dev/pccbb/pccbb.c b/sys/dev/pccbb/pccbb.c
index b45b7ce..02cc697 100644
--- a/sys/dev/pccbb/pccbb.c
+++ b/sys/dev/pccbb/pccbb.c
@@ -530,13 +530,13 @@ pccbb_detach(device_t dev)
if (error > 0)
return ENXIO;
- mtx_enter(&sc->sc_mtx, MTX_DEF);
+ mtx_lock(&sc->sc_mtx);
bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand);
sc->sc_flags |= PCCBB_KTHREAD_DONE;
if (sc->sc_flags & PCCBB_KTHREAD_RUNNING) {
wakeup(sc);
- mtx_exit(&sc->sc_mtx, MTX_DEF);
+ mtx_unlock(&sc->sc_mtx);
DEVPRINTF((dev, "waiting for kthread exit..."));
error = tsleep(sc, PWAIT, "pccbb-detach-wait", 60 * hz);
if (error)
@@ -544,7 +544,7 @@ pccbb_detach(device_t dev)
else
DPRINTF(("done\n"));
} else
- mtx_exit(&sc->sc_mtx, MTX_DEF);
+ mtx_unlock(&sc->sc_mtx);
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
bus_release_resource(dev, SYS_RES_MEMORY, PCCBBR_SOCKBASE,
@@ -572,17 +572,17 @@ pccbb_driver_added(device_t dev, driver_t *driver)
sc->sc_cbdev = devlist[tmp];
if ((sc->sc_socketreg->socket_state
& PCCBB_SOCKET_STAT_CD) == 0) {
- mtx_enter(&sc->sc_mtx, MTX_DEF);
+ mtx_lock(&sc->sc_mtx);
wakeup(sc);
- mtx_exit(&sc->sc_mtx, MTX_DEF);
+ mtx_unlock(&sc->sc_mtx);
}
} else if (strcmp(driver->name, "pccard") == 0) {
sc->sc_pccarddev = devlist[tmp];
if ((sc->sc_socketreg->socket_state
& PCCBB_SOCKET_STAT_CD) == 0) {
- mtx_enter(&sc->sc_mtx, MTX_DEF);
+ mtx_lock(&sc->sc_mtx);
wakeup(sc);
- mtx_exit(&sc->sc_mtx, MTX_DEF);
+ mtx_unlock(&sc->sc_mtx);
}
} else
device_printf(dev,
@@ -616,7 +616,7 @@ pccbb_event_thread (void *arg)
struct pccbb_softc *sc = arg;
u_int32_t status;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
for(;;) {
if (!(sc->sc_flags & PCCBB_KTHREAD_RUNNING))
sc->sc_flags |= PCCBB_KTHREAD_RUNNING;
@@ -629,7 +629,7 @@ pccbb_event_thread (void *arg)
*/
tsleep (&sc->sc_flags, PWAIT, "pccbbev", 1*hz);
}
- mtx_enter(&sc->sc_mtx, MTX_DEF);
+ mtx_lock(&sc->sc_mtx);
if (sc->sc_flags & PCCBB_KTHREAD_DONE)
break;
@@ -639,9 +639,9 @@ pccbb_event_thread (void *arg)
} else {
pccbb_removal(sc);
}
- mtx_exit(&sc->sc_mtx, MTX_DEF);
+ mtx_unlock(&sc->sc_mtx);
}
- mtx_exit(&sc->sc_mtx, MTX_DEF);
+ mtx_unlock(&sc->sc_mtx);
sc->sc_flags &= ~PCCBB_KTHREAD_RUNNING;
wakeup(sc);
kthread_exit(0);
@@ -744,9 +744,9 @@ pccbb_intr(void* arg)
sc->sc_socketreg->socket_event = sockevent | 0x01;
if (sockevent & PCCBB_SOCKET_EVENT_CD) {
- mtx_enter(&sc->sc_mtx, MTX_DEF);
+ mtx_lock(&sc->sc_mtx);
wakeup(sc);
- mtx_exit(&sc->sc_mtx, MTX_DEF);
+ mtx_unlock(&sc->sc_mtx);
} else {
if (sockevent & PCCBB_SOCKET_EVENT_CSTS) {
DPRINTF((" cstsevent occures, 0x%08x\n",
diff --git a/sys/dev/random/harvest.c b/sys/dev/random/harvest.c
index 60067dc..93ce35b 100644
--- a/sys/dev/random/harvest.c
+++ b/sys/dev/random/harvest.c
@@ -123,7 +123,7 @@ void
random_set_wakeup_exit(void *control)
{
wakeup(control);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
kthread_exit(0);
/* NOTREACHED */
}
diff --git a/sys/dev/random/yarrow.c b/sys/dev/random/yarrow.c
index 816ab23..f1325e5 100644
--- a/sys/dev/random/yarrow.c
+++ b/sys/dev/random/yarrow.c
@@ -96,10 +96,10 @@ random_kthread(void *arg /* NOTUSED */)
struct source *source;
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("OWNERSHIP Giant == %d sched_lock == %d\n",
mtx_owned(&Giant), mtx_owned(&sched_lock));
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
for (pl = 0; pl < 2; pl++)
@@ -114,11 +114,11 @@ random_kthread(void *arg /* NOTUSED */)
else {
#ifdef DEBUG1
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("HARVEST src=%d bits=%d/%d pool=%d count=%lld\n",
event->source, event->bits, event->frac,
event->pool, event->somecounter);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
/* Suck the harvested entropy out of the queue and hash
@@ -160,9 +160,9 @@ random_kthread(void *arg /* NOTUSED */)
/* Is the thread scheduled for a shutdown? */
if (random_kthread_control != 0) {
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Random kthread setting terminate\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
random_set_wakeup_exit(&random_kthread_control);
/* NOTREACHED */
@@ -179,9 +179,9 @@ random_init(void)
int error;
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Random initialise\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
/* This can be turned off by the very paranoid
@@ -213,9 +213,9 @@ random_init(void)
random_init_harvester(random_harvest_internal, read_random_real);
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Random initialise finish\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
return 0;
@@ -225,31 +225,31 @@ void
random_deinit(void)
{
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Random deinitialise\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
/* Deregister the randomness harvesting routine */
random_deinit_harvester();
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Random deinitialise waiting for thread to terminate\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
/* Command the hash/reseed thread to end and wait for it to finish */
- mtx_enter(&harvestring.lockout_mtx, MTX_DEF);
+ mtx_lock(&harvestring.lockout_mtx);
random_kthread_control = -1;
msleep((void *)&random_kthread_control, &harvestring.lockout_mtx, PUSER,
"rndend", 0);
- mtx_exit(&harvestring.lockout_mtx, MTX_DEF);
+ mtx_unlock(&harvestring.lockout_mtx);
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Random deinitialise removing mutexes\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
/* Remove the mutexes */
@@ -257,9 +257,9 @@ random_deinit(void)
mtx_destroy(&harvestring.lockout_mtx);
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Random deinitialise finish\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
}
@@ -276,13 +276,13 @@ reseed(int fastslow)
int i, j;
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Reseed type %d\n", fastslow);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
/* The reseed task must not be jumped on */
- mtx_enter(&random_reseed_mtx, MTX_DEF);
+ mtx_lock(&random_reseed_mtx);
/* 1. Hash the accumulated entropy into v[0] */
@@ -353,12 +353,12 @@ reseed(int fastslow)
/* XXX Not done here yet */
/* Release the reseed mutex */
- mtx_exit(&random_reseed_mtx, MTX_DEF);
+ mtx_unlock(&random_reseed_mtx);
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Reseed finish\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
if (!random_state.seeded) {
@@ -379,7 +379,7 @@ read_random_real(void *buf, u_int count)
u_int retval;
/* The reseed task must not be jumped on */
- mtx_enter(&random_reseed_mtx, MTX_DEF);
+ mtx_lock(&random_reseed_mtx);
if (gate) {
generator_gate();
@@ -423,7 +423,7 @@ read_random_real(void *buf, u_int count)
cur -= retval;
}
}
- mtx_exit(&random_reseed_mtx, MTX_DEF);
+ mtx_unlock(&random_reseed_mtx);
return retval;
}
@@ -462,9 +462,9 @@ generator_gate(void)
u_char temp[KEYSIZE];
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Generator gate\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
for (i = 0; i < KEYSIZE; i += sizeof(random_state.counter)) {
@@ -477,9 +477,9 @@ generator_gate(void)
memset((void *)temp, 0, KEYSIZE);
#ifdef DEBUG
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Generator gate finish\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
}
@@ -495,16 +495,16 @@ random_harvest_internal(u_int64_t somecounter, void *entropy, u_int count,
int newhead, tail;
#ifdef DEBUG1
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Random harvest\n");
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#endif
if (origin < ENTROPYSOURCE) {
/* Add the harvested data to the ring buffer, but
* do not block.
*/
- if (mtx_try_enter(&harvestring.lockout_mtx, MTX_DEF)) {
+ if (mtx_trylock(&harvestring.lockout_mtx)) {
tail = atomic_load_acq_int(&harvestring.tail);
newhead = (harvestring.head + 1) % HARVEST_RING_SIZE;
@@ -533,7 +533,7 @@ random_harvest_internal(u_int64_t somecounter, void *entropy, u_int count,
wakeup(&harvestring.head);
}
- mtx_exit(&harvestring.lockout_mtx, MTX_DEF);
+ mtx_unlock(&harvestring.lockout_mtx);
}
diff --git a/sys/dev/sf/if_sfreg.h b/sys/dev/sf/if_sfreg.h
index c2dc20e..fd2107f 100644
--- a/sys/dev/sf/if_sfreg.h
+++ b/sys/dev/sf/if_sfreg.h
@@ -1048,8 +1048,8 @@ struct sf_softc {
};
-#define SF_LOCK(_sc) mtx_enter(&(_sc)->sf_mtx, MTX_DEF)
-#define SF_UNLOCK(_sc) mtx_exit(&(_sc)->sf_mtx, MTX_DEF)
+#define SF_LOCK(_sc) mtx_lock(&(_sc)->sf_mtx)
+#define SF_UNLOCK(_sc) mtx_unlock(&(_sc)->sf_mtx)
#define SF_TIMEOUT 1000
diff --git a/sys/dev/sio/sio.c b/sys/dev/sio/sio.c
index 332ce49..be5f642 100644
--- a/sys/dev/sio/sio.c
+++ b/sys/dev/sio/sio.c
@@ -856,7 +856,7 @@ sioprobe(dev, xrid)
* but mask them in the processor as well in case there are some
* (misconfigured) shared interrupts.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
/* EXTRA DELAY? */
/*
@@ -953,7 +953,7 @@ sioprobe(dev, xrid)
CLR_FLAG(dev, COM_C_IIR_TXRDYBUG);
}
sio_setreg(com, com_cfcr, CFCR_8BITS);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
return (iobase == siocniobase ? 0 : result);
}
@@ -993,7 +993,7 @@ sioprobe(dev, xrid)
irqmap[3] = isa_irq_pending();
failures[9] = (sio_getreg(com, com_iir) & IIR_IMASK) - IIR_NOPEND;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
irqs = irqmap[1] & ~irqmap[0];
if (bus_get_resource(idev, SYS_RES_IRQ, 0, &xirq, NULL) == 0 &&
@@ -1181,7 +1181,7 @@ sioattach(dev, xrid)
} else
com->it_in.c_ispeed = com->it_in.c_ospeed = TTYDEF_SPEED;
if (siosetwater(com, com->it_in.c_ispeed) != 0) {
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
/*
* Leave i/o resources allocated if this is a `cn'-level
* console, so that other devices can't snarf them.
@@ -1190,7 +1190,7 @@ sioattach(dev, xrid)
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
return (ENOMEM);
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
termioschars(&com->it_in);
com->it_out = com->it_in;
@@ -1485,7 +1485,7 @@ open_top:
}
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
(void) inb(com->line_status_port);
(void) inb(com->data_port);
com->prev_modem_status = com->last_modem_status
@@ -1497,7 +1497,7 @@ open_top:
outb(com->intr_ctl_port, IER_ERXRDY | IER_ETXRDY
| IER_ERLS | IER_EMSC);
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
/*
* Handle initial DCD. Callout devices get a fake initial
* DCD (trapdoor DCD). If we are callout, then any sleeping
@@ -1753,7 +1753,7 @@ sioinput(com)
* semantics instead of the save-and-disable semantics
* that are used everywhere else.
*/
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
incc = com->iptr - buf;
if (tp->t_rawq.c_cc + incc > tp->t_ihiwat
&& (com->state & CS_RTS_IFLOW
@@ -1774,7 +1774,7 @@ sioinput(com)
tp->t_lflag &= ~FLUSHO;
comstart(tp);
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
} while (buf < com->iptr);
} else {
do {
@@ -1783,7 +1783,7 @@ sioinput(com)
* semantics instead of the save-and-disable semantics
* that are used everywhere else.
*/
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
line_status = buf[com->ierroff];
recv_data = *buf++;
if (line_status
@@ -1798,7 +1798,7 @@ sioinput(com)
recv_data |= TTY_PE;
}
(*linesw[tp->t_line].l_rint)(recv_data, tp);
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
} while (buf < com->iptr);
}
com_events -= (com->iptr - com->ibuf);
@@ -1823,9 +1823,9 @@ siointr(arg)
#ifndef COM_MULTIPORT
com = (struct com_s *)arg;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
#else /* COM_MULTIPORT */
bool_t possibly_more_intrs;
int unit;
@@ -1837,7 +1837,7 @@ siointr(arg)
* devices, then the edge from one may be lost because another is
* on.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
do {
possibly_more_intrs = FALSE;
for (unit = 0; unit < sio_numunits; ++unit) {
@@ -1856,7 +1856,7 @@ siointr(arg)
/* XXX COM_UNLOCK(); */
}
} while (possibly_more_intrs);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
#endif /* COM_MULTIPORT */
}
@@ -2264,7 +2264,7 @@ repeat:
* Discard any events related to never-opened or
* going-away devices.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
incc = com->iptr - com->ibuf;
com->iptr = com->ibuf;
if (com->state & CS_CHECKMSR) {
@@ -2272,33 +2272,33 @@ repeat:
com->state &= ~CS_CHECKMSR;
}
com_events -= incc;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
continue;
}
if (com->iptr != com->ibuf) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
sioinput(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
if (com->state & CS_CHECKMSR) {
u_char delta_modem_status;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
delta_modem_status = com->last_modem_status
^ com->prev_modem_status;
com->prev_modem_status = com->last_modem_status;
com_events -= LOTS_OF_EVENTS;
com->state &= ~CS_CHECKMSR;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (delta_modem_status & MSR_DCD)
(*linesw[tp->t_line].l_modem)
(tp, com->prev_modem_status & MSR_DCD);
}
if (com->state & CS_ODONE) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
com_events -= LOTS_OF_EVENTS;
com->state &= ~CS_ODONE;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (!(com->state & CS_BUSY)
&& !(com->extra_state & CSE_BUSYCHECK)) {
timeout(siobusycheck, com, hz / 100);
@@ -2484,7 +2484,7 @@ comparam(tp, t)
if (com->state >= (CS_BUSY | CS_TTGO))
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
splx(s);
comstart(tp);
if (com->ibufold != NULL) {
@@ -2518,7 +2518,7 @@ siosetwater(com, speed)
for (ibufsize = 128; ibufsize < cp4ticks;)
ibufsize <<= 1;
if (ibufsize == com->ibufsize) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
return (0);
}
@@ -2528,7 +2528,7 @@ siosetwater(com, speed)
*/
ibuf = malloc(2 * ibufsize, M_DEVBUF, M_NOWAIT);
if (ibuf == NULL) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
return (ENOMEM);
}
@@ -2546,7 +2546,7 @@ siosetwater(com, speed)
* Read current input buffer, if any. Continue with interrupts
* disabled.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->iptr != com->ibuf)
sioinput(com);
@@ -2581,7 +2581,7 @@ comstart(tp)
if (com == NULL)
return;
s = spltty();
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (tp->t_state & TS_TTSTOP)
com->state &= ~CS_TTGO;
else
@@ -2594,7 +2594,7 @@ comstart(tp)
&& com->state & CS_RTS_IFLOW)
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) {
ttwwakeup(tp);
splx(s);
@@ -2610,7 +2610,7 @@ comstart(tp)
sizeof com->obuf1);
com->obufs[0].l_next = NULL;
com->obufs[0].l_queued = TRUE;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state & CS_BUSY) {
qp = com->obufq.l_next;
while ((next = qp->l_next) != NULL)
@@ -2622,7 +2622,7 @@ comstart(tp)
com->obufq.l_next = &com->obufs[0];
com->state |= CS_BUSY;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
if (tp->t_outq.c_cc != 0 && !com->obufs[1].l_queued) {
com->obufs[1].l_tail
@@ -2630,7 +2630,7 @@ comstart(tp)
sizeof com->obuf2);
com->obufs[1].l_next = NULL;
com->obufs[1].l_queued = TRUE;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state & CS_BUSY) {
qp = com->obufq.l_next;
while ((next = qp->l_next) != NULL)
@@ -2642,14 +2642,14 @@ comstart(tp)
com->obufq.l_next = &com->obufs[1];
com->state |= CS_BUSY;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
tp->t_state |= TS_BUSY;
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state >= (CS_BUSY | CS_TTGO))
siointr1(com); /* fake interrupt to start output */
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
ttwwakeup(tp);
splx(s);
}
@@ -2664,7 +2664,7 @@ comstop(tp, rw)
com = com_addr(DEV_TO_UNIT(tp->t_dev));
if (com == NULL || com->gone)
return;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (rw & FWRITE) {
if (com->hasfifo)
#ifdef COM_ESP
@@ -2691,7 +2691,7 @@ comstop(tp, rw)
com_events -= (com->iptr - com->ibuf);
com->iptr = com->ibuf;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
comstart(tp);
}
@@ -2734,7 +2734,7 @@ commctl(com, bits, how)
mcr |= MCR_RTS;
if (com->gone)
return(0);
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
switch (how) {
case DMSET:
outb(com->modem_ctl_port,
@@ -2747,7 +2747,7 @@ commctl(com, bits, how)
outb(com->modem_ctl_port, com->mcr_image &= ~mcr);
break;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
return (0);
}
@@ -2806,9 +2806,9 @@ comwakeup(chan)
com = com_addr(unit);
if (com != NULL && !com->gone
&& (com->state >= (CS_BUSY | CS_TTGO) || com->poll)) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
}
@@ -2830,10 +2830,10 @@ comwakeup(chan)
u_int delta;
u_long total;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
delta = com->delta_error_counts[errnum];
com->delta_error_counts[errnum] = 0;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (delta == 0)
continue;
total = com->error_counts[errnum] += delta;
diff --git a/sys/dev/sk/if_skreg.h b/sys/dev/sk/if_skreg.h
index 6f31d1d..061707c 100644
--- a/sys/dev/sk/if_skreg.h
+++ b/sys/dev/sk/if_skreg.h
@@ -1182,10 +1182,10 @@ struct sk_softc {
struct mtx sk_mtx;
};
-#define SK_LOCK(_sc) mtx_enter(&(_sc)->sk_mtx, MTX_DEF)
-#define SK_UNLOCK(_sc) mtx_exit(&(_sc)->sk_mtx, MTX_DEF)
-#define SK_IF_LOCK(_sc) mtx_enter(&(_sc)->sk_softc->sk_mtx, MTX_DEF)
-#define SK_IF_UNLOCK(_sc) mtx_exit(&(_sc)->sk_softc->sk_mtx, MTX_DEF)
+#define SK_LOCK(_sc) mtx_lock(&(_sc)->sk_mtx)
+#define SK_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_mtx)
+#define SK_IF_LOCK(_sc) mtx_lock(&(_sc)->sk_softc->sk_mtx)
+#define SK_IF_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_softc->sk_mtx)
/* Softc for each logical interface */
struct sk_if_softc {
diff --git a/sys/dev/ti/if_tireg.h b/sys/dev/ti/if_tireg.h
index 0eaff14..df399be 100644
--- a/sys/dev/ti/if_tireg.h
+++ b/sys/dev/ti/if_tireg.h
@@ -1147,8 +1147,8 @@ struct ti_softc {
struct mtx ti_mtx;
};
-#define TI_LOCK(_sc) mtx_enter(&(_sc)->ti_mtx, MTX_DEF)
-#define TI_UNLOCK(_sc) mtx_exit(&(_sc)->ti_mtx, MTX_DEF)
+#define TI_LOCK(_sc) mtx_lock(&(_sc)->ti_mtx)
+#define TI_UNLOCK(_sc) mtx_unlock(&(_sc)->ti_mtx)
/*
* Microchip Technology 24Cxx EEPROM control bytes
diff --git a/sys/dev/usb/if_auereg.h b/sys/dev/usb/if_auereg.h
index 5558b50..5544091 100644
--- a/sys/dev/usb/if_auereg.h
+++ b/sys/dev/usb/if_auereg.h
@@ -249,8 +249,8 @@ struct aue_softc {
struct mtx aue_mtx;
};
-#define AUE_LOCK(_sc) mtx_enter(&(_sc)->aue_mtx, MTX_DEF)
-#define AUE_UNLOCK(_sc) mtx_exit(&(_sc)->aue_mtx, MTX_DEF)
+#define AUE_LOCK(_sc) mtx_lock(&(_sc)->aue_mtx)
+#define AUE_UNLOCK(_sc) mtx_unlock(&(_sc)->aue_mtx)
#define AUE_TIMEOUT 1000
#define ETHER_ALIGN 2
diff --git a/sys/dev/usb/if_cuereg.h b/sys/dev/usb/if_cuereg.h
index 5d043e4..dc7b8c8 100644
--- a/sys/dev/usb/if_cuereg.h
+++ b/sys/dev/usb/if_cuereg.h
@@ -182,5 +182,5 @@ struct cue_softc {
struct mtx cue_mtx;
};
-#define CUE_LOCK(_sc) mtx_enter(&(_sc)->cue_mtx, MTX_DEF)
-#define CUE_UNLOCK(_sc) mtx_exit(&(_sc)->cue_mtx, MTX_DEF)
+#define CUE_LOCK(_sc) mtx_lock(&(_sc)->cue_mtx)
+#define CUE_UNLOCK(_sc) mtx_unlock(&(_sc)->cue_mtx)
diff --git a/sys/dev/usb/if_kuereg.h b/sys/dev/usb/if_kuereg.h
index 49cd235..b5ffb32 100644
--- a/sys/dev/usb/if_kuereg.h
+++ b/sys/dev/usb/if_kuereg.h
@@ -173,5 +173,5 @@ struct kue_softc {
struct mtx kue_mtx;
};
-#define KUE_LOCK(_sc) mtx_enter(&(_sc)->kue_mtx, MTX_DEF)
-#define KUE_UNLOCK(_sc) mtx_exit(&(_sc)->kue_mtx, MTX_DEF)
+#define KUE_LOCK(_sc) mtx_lock(&(_sc)->kue_mtx)
+#define KUE_UNLOCK(_sc) mtx_unlock(&(_sc)->kue_mtx)
diff --git a/sys/dev/vinum/vinumdaemon.c b/sys/dev/vinum/vinumdaemon.c
index 99c1751..f03d36b 100644
--- a/sys/dev/vinum/vinumdaemon.c
+++ b/sys/dev/vinum/vinumdaemon.c
@@ -72,9 +72,9 @@ vinum_daemon(void)
PROC_LOCK(curproc);
curproc->p_flag |= P_SYSTEM; /* we're a system process */
PROC_UNLOCK(curproc);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
curproc->p_sflag |= PS_INMEM;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
daemon_save_config(); /* start by saving the configuration */
daemonpid = curproc->p_pid; /* mark our territory */
while (1) {
diff --git a/sys/dev/vinum/vinumlock.c b/sys/dev/vinum/vinumlock.c
index c85fbae..801f13f 100644
--- a/sys/dev/vinum/vinumlock.c
+++ b/sys/dev/vinum/vinumlock.c
@@ -132,7 +132,7 @@ lockrange(daddr_t stripe, struct buf *bp, struct plex *plex)
* increment all addresses by 1.
*/
stripe++;
- mtx_enter(&plex->lockmtx, MTX_DEF);
+ mtx_lock(&plex->lockmtx);
/* Wait here if the table is full */
while (plex->usedlocks == PLEX_LOCKS) /* all in use */
@@ -187,7 +187,7 @@ lockrange(daddr_t stripe, struct buf *bp, struct plex *plex)
pos->stripe = stripe;
pos->bp = bp;
plex->usedlocks++; /* one more lock */
- mtx_exit(&plex->lockmtx, MTX_DEF);
+ mtx_unlock(&plex->lockmtx);
#ifdef VINUMDEBUG
if (debug & DEBUG_LASTREQS)
logrq(loginfo_lock, (union rqinfou) pos, bp);
diff --git a/sys/dev/vr/if_vrreg.h b/sys/dev/vr/if_vrreg.h
index 8217a8c..235962d 100644
--- a/sys/dev/vr/if_vrreg.h
+++ b/sys/dev/vr/if_vrreg.h
@@ -414,8 +414,8 @@ struct vr_softc {
struct mtx vr_mtx;
};
-#define VR_LOCK(_sc) mtx_enter(&(_sc)->vr_mtx, MTX_DEF)
-#define VR_UNLOCK(_sc) mtx_exit(&(_sc)->vr_mtx, MTX_DEF)
+#define VR_LOCK(_sc) mtx_lock(&(_sc)->vr_mtx)
+#define VR_UNLOCK(_sc) mtx_unlock(&(_sc)->vr_mtx)
/*
* register space access macros
diff --git a/sys/dev/wi/if_wireg.h b/sys/dev/wi/if_wireg.h
index eb4ab7c..9751029 100644
--- a/sys/dev/wi/if_wireg.h
+++ b/sys/dev/wi/if_wireg.h
@@ -128,8 +128,8 @@ struct wi_softc {
int wi_prism2; /* set to 1 if it uses a Prism II chip */
};
-#define WI_LOCK(_sc) mtx_enter(&(_sc)->wi_mtx, MTX_DEF)
-#define WI_UNLOCK(_sc) mtx_exit(&(_sc)->wi_mtx, MTX_DEF)
+#define WI_LOCK(_sc) mtx_lock(&(_sc)->wi_mtx)
+#define WI_UNLOCK(_sc) mtx_unlock(&(_sc)->wi_mtx)
#define WI_TIMEOUT 65536
diff --git a/sys/fs/cd9660/cd9660_node.c b/sys/fs/cd9660/cd9660_node.c
index f260802..00f79e8 100644
--- a/sys/fs/cd9660/cd9660_node.c
+++ b/sys/fs/cd9660/cd9660_node.c
@@ -102,18 +102,18 @@ cd9660_ihashget(dev, inum)
struct vnode *vp;
loop:
- mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_lock(&cd9660_ihash_mtx);
for (ip = isohashtbl[INOHASH(dev, inum)]; ip; ip = ip->i_next) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&cd9660_ihash_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
return (vp);
}
}
- mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_unlock(&cd9660_ihash_mtx);
return (NULL);
}
@@ -127,14 +127,14 @@ cd9660_ihashins(ip)
struct proc *p = curproc; /* XXX */
struct iso_node **ipp, *iq;
- mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_lock(&cd9660_ihash_mtx);
ipp = &isohashtbl[INOHASH(ip->i_dev, ip->i_number)];
if ((iq = *ipp) != NULL)
iq->i_prev = &ip->i_next;
ip->i_next = iq;
ip->i_prev = ipp;
*ipp = ip;
- mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_unlock(&cd9660_ihash_mtx);
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
}
@@ -148,7 +148,7 @@ cd9660_ihashrem(ip)
{
register struct iso_node *iq;
- mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_lock(&cd9660_ihash_mtx);
if ((iq = ip->i_next) != NULL)
iq->i_prev = ip->i_prev;
*ip->i_prev = iq;
@@ -156,7 +156,7 @@ cd9660_ihashrem(ip)
ip->i_next = NULL;
ip->i_prev = NULL;
#endif
- mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_unlock(&cd9660_ihash_mtx);
}
/*
diff --git a/sys/fs/deadfs/dead_vnops.c b/sys/fs/deadfs/dead_vnops.c
index 4211f25..8de5c57 100644
--- a/sys/fs/deadfs/dead_vnops.c
+++ b/sys/fs/deadfs/dead_vnops.c
@@ -211,7 +211,7 @@ dead_lock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
ap->a_flags &= ~LK_INTERLOCK;
}
if (!chkvnlock(vp))
diff --git a/sys/fs/hpfs/hpfs_hash.c b/sys/fs/hpfs/hpfs_hash.c
index 50c9b66..791680c 100644
--- a/sys/fs/hpfs/hpfs_hash.c
+++ b/sys/fs/hpfs/hpfs_hash.c
@@ -92,11 +92,11 @@ hpfs_hphashlookup(dev, ino)
{
struct hpfsnode *hp;
- mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_lock(&hpfs_hphash_mtx);
LIST_FOREACH(hp, HPNOHASH(dev, ino), h_hash)
if (ino == hp->h_no && dev == hp->h_dev)
break;
- mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_unlock(&hpfs_hphash_mtx);
return (hp);
}
@@ -110,14 +110,14 @@ hpfs_hphashget(dev, ino)
struct hpfsnode *hp;
loop:
- mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_lock(&hpfs_hphash_mtx);
LIST_FOREACH(hp, HPNOHASH(dev, ino), h_hash) {
if (ino == hp->h_no && dev == hp->h_dev) {
LOCKMGR(&hp->h_intlock, LK_EXCLUSIVE | LK_INTERLOCK, &hpfs_hphash_slock, NULL);
return (hp);
}
}
- mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_unlock(&hpfs_hphash_mtx);
return (hp);
}
#endif
@@ -132,18 +132,18 @@ hpfs_hphashvget(dev, ino, p)
struct vnode *vp;
loop:
- mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_lock(&hpfs_hphash_mtx);
LIST_FOREACH(hp, HPNOHASH(dev, ino), h_hash) {
if (ino == hp->h_no && dev == hp->h_dev) {
vp = HPTOV(hp);
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&hpfs_hphash_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
return (vp);
}
}
- mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_unlock(&hpfs_hphash_mtx);
return (NULLVP);
}
@@ -156,11 +156,11 @@ hpfs_hphashins(hp)
{
struct hphashhead *hpp;
- mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_lock(&hpfs_hphash_mtx);
hpp = HPNOHASH(hp->h_dev, hp->h_no);
hp->h_flag |= H_HASHED;
LIST_INSERT_HEAD(hpp, hp, h_hash);
- mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_unlock(&hpfs_hphash_mtx);
}
/*
@@ -170,10 +170,10 @@ void
hpfs_hphashrem(hp)
struct hpfsnode *hp;
{
- mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_lock(&hpfs_hphash_mtx);
if (hp->h_flag & H_HASHED) {
hp->h_flag &= ~H_HASHED;
LIST_REMOVE(hp, h_hash);
}
- mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
+ mtx_unlock(&hpfs_hphash_mtx);
}
diff --git a/sys/fs/msdosfs/msdosfs_denode.c b/sys/fs/msdosfs/msdosfs_denode.c
index 6c1a3c7..dbdf554 100644
--- a/sys/fs/msdosfs/msdosfs_denode.c
+++ b/sys/fs/msdosfs/msdosfs_denode.c
@@ -130,21 +130,21 @@ msdosfs_hashget(dev, dirclust, diroff)
struct vnode *vp;
loop:
- mtx_enter(&dehash_mtx, MTX_DEF);
+ mtx_lock(&dehash_mtx);
for (dep = DEHASH(dev, dirclust, diroff); dep; dep = dep->de_next) {
if (dirclust == dep->de_dirclust
&& diroff == dep->de_diroffset
&& dev == dep->de_dev
&& dep->de_refcnt != 0) {
vp = DETOV(dep);
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&dehash_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&dehash_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
return (dep);
}
}
- mtx_exit(&dehash_mtx, MTX_DEF);
+ mtx_unlock(&dehash_mtx);
return (NULL);
}
@@ -154,7 +154,7 @@ msdosfs_hashins(dep)
{
struct denode **depp, *deq;
- mtx_enter(&dehash_mtx, MTX_DEF);
+ mtx_lock(&dehash_mtx);
depp = &DEHASH(dep->de_dev, dep->de_dirclust, dep->de_diroffset);
deq = *depp;
if (deq)
@@ -162,7 +162,7 @@ msdosfs_hashins(dep)
dep->de_next = deq;
dep->de_prev = depp;
*depp = dep;
- mtx_exit(&dehash_mtx, MTX_DEF);
+ mtx_unlock(&dehash_mtx);
}
static void
@@ -171,7 +171,7 @@ msdosfs_hashrem(dep)
{
struct denode *deq;
- mtx_enter(&dehash_mtx, MTX_DEF);
+ mtx_lock(&dehash_mtx);
deq = dep->de_next;
if (deq)
deq->de_prev = dep->de_prev;
@@ -180,7 +180,7 @@ msdosfs_hashrem(dep)
dep->de_next = NULL;
dep->de_prev = NULL;
#endif
- mtx_exit(&dehash_mtx, MTX_DEF);
+ mtx_unlock(&dehash_mtx);
}
/*
diff --git a/sys/fs/msdosfs/msdosfs_vfsops.c b/sys/fs/msdosfs/msdosfs_vfsops.c
index 449d691..3088d7a 100644
--- a/sys/fs/msdosfs/msdosfs_vfsops.c
+++ b/sys/fs/msdosfs/msdosfs_vfsops.c
@@ -862,7 +862,7 @@ msdosfs_sync(mp, waitfor, cred, p)
/*
* Write back each (modified) denode.
*/
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
/*
@@ -872,20 +872,20 @@ loop:
if (vp->v_mount != mp)
goto loop;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
nvp = LIST_NEXT(vp, v_mntvnodes);
dep = VTODE(vp);
if (vp->v_type == VNON ||
((dep->de_flag &
(DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
if (error == ENOENT)
goto loop;
continue;
@@ -895,9 +895,9 @@ loop:
allerror = error;
VOP_UNLOCK(vp, 0, p);
vrele(vp);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
/*
* Flush filesystem control info.
diff --git a/sys/fs/msdosfs/msdosfs_vnops.c b/sys/fs/msdosfs/msdosfs_vnops.c
index e4052f7..fb7b83d 100644
--- a/sys/fs/msdosfs/msdosfs_vnops.c
+++ b/sys/fs/msdosfs/msdosfs_vnops.c
@@ -233,12 +233,12 @@ msdosfs_close(ap)
struct denode *dep = VTODE(vp);
struct timespec ts;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_usecount > 1) {
getnanotime(&ts);
DETIMES(dep, &ts, &ts, &ts);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return 0;
}
diff --git a/sys/fs/ntfs/ntfs_ihash.c b/sys/fs/ntfs/ntfs_ihash.c
index b9c0345..fea648c 100644
--- a/sys/fs/ntfs/ntfs_ihash.c
+++ b/sys/fs/ntfs/ntfs_ihash.c
@@ -93,11 +93,11 @@ ntfs_nthashlookup(dev, inum)
{
struct ntnode *ip;
- mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_lock(&ntfs_nthash_mtx);
LIST_FOREACH(ip, NTNOHASH(dev, inum), i_hash)
if (inum == ip->i_number && dev == ip->i_dev)
break;
- mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_unlock(&ntfs_nthash_mtx);
return (ip);
}
@@ -111,11 +111,11 @@ ntfs_nthashins(ip)
{
struct nthashhead *ipp;
- mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_lock(&ntfs_nthash_mtx);
ipp = NTNOHASH(ip->i_dev, ip->i_number);
LIST_INSERT_HEAD(ipp, ip, i_hash);
ip->i_flag |= IN_HASHED;
- mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_unlock(&ntfs_nthash_mtx);
}
/*
@@ -125,10 +125,10 @@ void
ntfs_nthashrem(ip)
struct ntnode *ip;
{
- mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_lock(&ntfs_nthash_mtx);
if (ip->i_flag & IN_HASHED) {
ip->i_flag &= ~IN_HASHED;
LIST_REMOVE(ip, i_hash);
}
- mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_unlock(&ntfs_nthash_mtx);
}
diff --git a/sys/fs/ntfs/ntfs_subr.c b/sys/fs/ntfs/ntfs_subr.c
index 38324f2..c55cd0a 100644
--- a/sys/fs/ntfs/ntfs_subr.c
+++ b/sys/fs/ntfs/ntfs_subr.c
@@ -360,7 +360,7 @@ ntfs_ntget(ip)
dprintf(("ntfs_ntget: get ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- mtx_enter(&ip->i_interlock, MTX_DEF);
+ mtx_lock(&ip->i_interlock);
ip->i_usecount++;
LOCKMGR(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock);
@@ -438,7 +438,7 @@ ntfs_ntput(ip)
dprintf(("ntfs_ntput: rele ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- mtx_enter(&ip->i_interlock, MTX_DEF);
+ mtx_lock(&ip->i_interlock);
ip->i_usecount--;
#ifdef DIAGNOSTIC
@@ -462,7 +462,7 @@ ntfs_ntput(ip)
LIST_REMOVE(vap,va_list);
ntfs_freentvattr(vap);
}
- mtx_exit(&ip->i_interlock, MTX_DEF);
+ mtx_unlock(&ip->i_interlock);
mtx_destroy(&ip->i_interlock);
lockdestroy(&ip->i_lock);
@@ -479,9 +479,9 @@ void
ntfs_ntref(ip)
struct ntnode *ip;
{
- mtx_enter(&ip->i_interlock, MTX_DEF);
+ mtx_lock(&ip->i_interlock);
ip->i_usecount++;
- mtx_exit(&ip->i_interlock, MTX_DEF);
+ mtx_unlock(&ip->i_interlock);
dprintf(("ntfs_ntref: ino %d, usecount: %d\n",
ip->i_number, ip->i_usecount));
@@ -498,13 +498,13 @@ ntfs_ntrele(ip)
dprintf(("ntfs_ntrele: rele ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- mtx_enter(&ip->i_interlock, MTX_DEF);
+ mtx_lock(&ip->i_interlock);
ip->i_usecount--;
if (ip->i_usecount < 0)
panic("ntfs_ntrele: ino: %d usecount: %d \n",
ip->i_number,ip->i_usecount);
- mtx_exit(&ip->i_interlock, MTX_DEF);
+ mtx_unlock(&ip->i_interlock);
}
/*
diff --git a/sys/fs/ntfs/ntfs_vfsops.c b/sys/fs/ntfs/ntfs_vfsops.c
index 7c6b2c0..384883e 100644
--- a/sys/fs/ntfs/ntfs_vfsops.c
+++ b/sys/fs/ntfs/ntfs_vfsops.c
@@ -196,9 +196,9 @@ ntfs_mountroot()
return (error);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
(void)ntfs_statfs(mp, &mp->mnt_stat, p);
vfs_unbusy(mp);
return (0);
diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c
index 6dc0509..e9fded7 100644
--- a/sys/fs/nullfs/null_vnops.c
+++ b/sys/fs/nullfs/null_vnops.c
@@ -624,7 +624,7 @@ null_lock(ap)
if (lvp == NULL)
return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, p));
if (flags & LK_INTERLOCK) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
flags &= ~LK_INTERLOCK;
}
if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
@@ -671,7 +671,7 @@ null_unlock(ap)
return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, p));
if ((flags & LK_THISLAYER) == 0) {
if (flags & LK_INTERLOCK) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
flags &= ~LK_INTERLOCK;
}
VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, p);
diff --git a/sys/fs/nwfs/nwfs_node.c b/sys/fs/nwfs/nwfs_node.c
index 02a4ee3..2025191 100644
--- a/sys/fs/nwfs/nwfs_node.c
+++ b/sys/fs/nwfs/nwfs_node.c
@@ -149,7 +149,7 @@ loop:
rescan:
if (nwfs_hashlookup(nmp, fid, &np) == 0) {
vp = NWTOV(np);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
lockmgr(&nwhashlock, LK_RELEASE, NULL, p);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
diff --git a/sys/fs/nwfs/nwfs_vnops.c b/sys/fs/nwfs/nwfs_vnops.c
index 59694f9..93fe639 100644
--- a/sys/fs/nwfs/nwfs_vnops.c
+++ b/sys/fs/nwfs/nwfs_vnops.c
@@ -256,24 +256,24 @@ nwfs_close(ap)
if (vp->v_type == VDIR) return 0; /* nothing to do now */
error = 0;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (np->opened == 0) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return 0;
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
error = nwfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (np->opened == 0) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return 0;
}
if (--np->opened == 0) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
error = ncp_close_file(NWFSTOCONN(VTONWFS(vp)), &np->n_fh,
ap->a_p, ap->a_cred);
} else
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
np->n_atime = 0;
return (error);
}
diff --git a/sys/fs/procfs/procfs_ctl.c b/sys/fs/procfs/procfs_ctl.c
index 5d0ce44..1ca2208 100644
--- a/sys/fs/procfs/procfs_ctl.c
+++ b/sys/fs/procfs/procfs_ctl.c
@@ -167,13 +167,13 @@ procfs_control(curp, p, op)
default:
PROCTREE_LOCK(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!TRACE_WAIT_P(curp, p)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
return (EBUSY);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
}
@@ -252,31 +252,31 @@ procfs_control(curp, p, op)
error = 0;
if (p->p_flag & P_TRACED) {
PROCTREE_LOCK(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (error == 0 &&
(p->p_stat != SSTOP) &&
(p->p_flag & P_TRACED) &&
(p->p_pptr == curp)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
error = tsleep((caddr_t) p,
PWAIT|PCATCH, "procfsx", 0);
PROCTREE_LOCK(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
if (error == 0 && !TRACE_WAIT_P(curp, p))
error = EBUSY;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
} else {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (error == 0 && p->p_stat != SSTOP) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
error = tsleep((caddr_t) p,
PWAIT|PCATCH, "procfs", 0);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
return (error);
@@ -284,10 +284,10 @@ procfs_control(curp, p, op)
panic("procfs_control");
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SSTOP)
setrunnable(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (0);
}
@@ -329,17 +329,17 @@ procfs_doctl(curp, p, pfs, uio)
nm = vfs_findname(signames, msg, xlen);
if (nm) {
PROCTREE_LOCK(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (TRACE_WAIT_P(curp, p)) {
p->p_xstat = nm->nm_val;
#ifdef FIX_SSTEP
FIX_SSTEP(p);
#endif
setrunnable(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
psignal(p, nm->nm_val);
}
diff --git a/sys/fs/procfs/procfs_status.c b/sys/fs/procfs/procfs_status.c
index 813ad60..14c8fb4 100644
--- a/sys/fs/procfs/procfs_status.c
+++ b/sys/fs/procfs/procfs_status.c
@@ -123,12 +123,12 @@ procfs_dostatus(curp, p, pfs, uio)
DOCHECK();
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_INMEM) {
struct timeval ut, st;
calcru(p, &ut, &st, (struct timeval *) NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
ps += snprintf(ps, psbuf + sizeof(psbuf) - ps,
" %ld,%ld %ld,%ld %ld,%ld",
p->p_stats->p_start.tv_sec,
@@ -136,7 +136,7 @@ procfs_dostatus(curp, p, pfs, uio)
ut.tv_sec, ut.tv_usec,
st.tv_sec, st.tv_usec);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
ps += snprintf(ps, psbuf + sizeof(psbuf) - ps,
" -1,-1 -1,-1 -1,-1");
}
diff --git a/sys/gnu/ext2fs/ext2_ihash.c b/sys/gnu/ext2fs/ext2_ihash.c
index 6866a23..1fd39e9 100644
--- a/sys/gnu/ext2fs/ext2_ihash.c
+++ b/sys/gnu/ext2fs/ext2_ihash.c
@@ -77,11 +77,11 @@ ufs_ihashlookup(dev, inum)
{
struct inode *ip;
- mtx_enter(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&ufs_ihash_mtx);
LIST_FOREACH(ip, INOHASH(dev, inum), i_hash)
if (inum == ip->i_number && dev == ip->i_dev)
break;
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_unlock(&ufs_ihash_mtx);
if (ip)
return (ITOV(ip));
@@ -102,18 +102,18 @@ ufs_ihashget(dev, inum)
struct vnode *vp;
loop:
- mtx_enter(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&ufs_ihash_mtx);
LIST_FOREACH(ip, INOHASH(dev, inum), i_hash) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&ufs_ihash_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
return (vp);
}
}
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_unlock(&ufs_ihash_mtx);
return (NULL);
}
@@ -130,11 +130,11 @@ ufs_ihashins(ip)
/* lock the inode, then put it on the appropriate hash list */
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
- mtx_enter(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&ufs_ihash_mtx);
ipp = INOHASH(ip->i_dev, ip->i_number);
LIST_INSERT_HEAD(ipp, ip, i_hash);
ip->i_flag |= IN_HASHED;
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_unlock(&ufs_ihash_mtx);
}
/*
@@ -144,10 +144,10 @@ void
ufs_ihashrem(ip)
struct inode *ip;
{
- mtx_enter(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&ufs_ihash_mtx);
if (ip->i_flag & IN_HASHED) {
ip->i_flag &= ~IN_HASHED;
LIST_REMOVE(ip, i_hash);
}
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_unlock(&ufs_ihash_mtx);
}
diff --git a/sys/gnu/ext2fs/ext2_vfsops.c b/sys/gnu/ext2fs/ext2_vfsops.c
index 25af8d2..5ef3819 100644
--- a/sys/gnu/ext2fs/ext2_vfsops.c
+++ b/sys/gnu/ext2fs/ext2_vfsops.c
@@ -561,10 +561,10 @@ ext2_reload(mountp, cred, p)
brelse(bp);
loop:
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
for (vp = LIST_FIRST(&mountp->mnt_vnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mountp) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
goto loop;
}
nvp = LIST_NEXT(vp, v_mntvnodes);
@@ -576,8 +576,8 @@ loop:
/*
* Step 5: invalidate all cached file data.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&mntvnode_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
goto loop;
}
@@ -599,9 +599,9 @@ loop:
&ip->i_din);
brelse(bp);
vput(vp);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
return (0);
}
@@ -918,7 +918,7 @@ ext2_sync(mp, waitfor, cred, p)
/*
* Write back each (modified) inode.
*/
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
/*
@@ -927,20 +927,20 @@ loop:
*/
if (vp->v_mount != mp)
goto loop;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
nvp = LIST_NEXT(vp, v_mntvnodes);
ip = VTOI(vp);
if (vp->v_type == VNON ||
((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
if (error == ENOENT)
goto loop;
continue;
@@ -949,9 +949,9 @@ loop:
allerror = error;
VOP_UNLOCK(vp, 0, p);
vrele(vp);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
/*
* Force stale file system control information to be flushed.
*/
diff --git a/sys/gnu/fs/ext2fs/ext2_vfsops.c b/sys/gnu/fs/ext2fs/ext2_vfsops.c
index 25af8d2..5ef3819 100644
--- a/sys/gnu/fs/ext2fs/ext2_vfsops.c
+++ b/sys/gnu/fs/ext2fs/ext2_vfsops.c
@@ -561,10 +561,10 @@ ext2_reload(mountp, cred, p)
brelse(bp);
loop:
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
for (vp = LIST_FIRST(&mountp->mnt_vnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mountp) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
goto loop;
}
nvp = LIST_NEXT(vp, v_mntvnodes);
@@ -576,8 +576,8 @@ loop:
/*
* Step 5: invalidate all cached file data.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&mntvnode_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
goto loop;
}
@@ -599,9 +599,9 @@ loop:
&ip->i_din);
brelse(bp);
vput(vp);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
return (0);
}
@@ -918,7 +918,7 @@ ext2_sync(mp, waitfor, cred, p)
/*
* Write back each (modified) inode.
*/
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
/*
@@ -927,20 +927,20 @@ loop:
*/
if (vp->v_mount != mp)
goto loop;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
nvp = LIST_NEXT(vp, v_mntvnodes);
ip = VTOI(vp);
if (vp->v_type == VNON ||
((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
if (error == ENOENT)
goto loop;
continue;
@@ -949,9 +949,9 @@ loop:
allerror = error;
VOP_UNLOCK(vp, 0, p);
vrele(vp);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
/*
* Force stale file system control information to be flushed.
*/
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index d5c7ece..ea1703c 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -1910,7 +1910,7 @@ init386(first)
* Giant is used early for at least debugger traps and unexpected traps.
*/
mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* make ldt memory segments */
/*
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 9d53cd7..2802750 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/i386/i386/mpapic.c b/sys/i386/i386/mpapic.c
index 3143822..e40f216 100644
--- a/sys/i386/i386/mpapic.c
+++ b/sys/i386/i386/mpapic.c
@@ -210,11 +210,11 @@ io_apic_setup_intpin(int apic, int pin)
* shouldn't and stop the carnage.
*/
vector = NRSVIDT + pin; /* IDT vec */
- mtx_enter(&imen_mtx, MTX_SPIN);
+ mtx_lock_spin(&imen_mtx);
io_apic_write(apic, select,
(io_apic_read(apic, select) & ~IOART_INTMASK
& ~0xff)|IOART_INTMSET|vector);
- mtx_exit(&imen_mtx, MTX_SPIN);
+ mtx_unlock_spin(&imen_mtx);
/* we only deal with vectored INTs here */
if (apic_int_type(apic, pin) != 0)
@@ -258,10 +258,10 @@ io_apic_setup_intpin(int apic, int pin)
printf("IOAPIC #%d intpin %d -> irq %d\n",
apic, pin, irq);
vector = NRSVIDT + irq; /* IDT vec */
- mtx_enter(&imen_mtx, MTX_SPIN);
+ mtx_lock_spin(&imen_mtx);
io_apic_write(apic, select, flags | vector);
io_apic_write(apic, select + 1, target);
- mtx_exit(&imen_mtx, MTX_SPIN);
+ mtx_unlock_spin(&imen_mtx);
}
int
diff --git a/sys/i386/i386/mptable.c b/sys/i386/i386/mptable.c
index 9d53cd7..2802750 100644
--- a/sys/i386/i386/mptable.c
+++ b/sys/i386/i386/mptable.c
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/i386/i386/procfs_machdep.c b/sys/i386/i386/procfs_machdep.c
index cd4e469..5ca703a 100644
--- a/sys/i386/i386/procfs_machdep.c
+++ b/sys/i386/i386/procfs_machdep.c
@@ -86,12 +86,12 @@ procfs_read_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_regs(p, regs));
}
@@ -101,12 +101,12 @@ procfs_write_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_regs(p, regs));
}
@@ -116,12 +116,12 @@ procfs_read_dbregs(p, dbregs)
struct dbreg *dbregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_dbregs(p, dbregs));
}
@@ -131,12 +131,12 @@ procfs_write_dbregs(p, dbregs)
struct dbreg *dbregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_dbregs(p, dbregs));
}
@@ -151,12 +151,12 @@ procfs_read_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_fpregs(p, fpregs));
}
@@ -166,12 +166,12 @@ procfs_write_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_fpregs(p, fpregs));
}
@@ -180,11 +180,11 @@ procfs_sstep(p)
struct proc *p;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (ptrace_single_step(p));
}
diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c
index b4373b3..533d791 100644
--- a/sys/i386/i386/trap.c
+++ b/sys/i386/i386/trap.c
@@ -174,11 +174,11 @@ userret(p, frame, oticks)
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_priority = p->p_usrpri;
if (resched_wanted()) {
/*
@@ -193,30 +193,30 @@ userret(p, frame, oticks)
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
/*
* Charge system time if profiling.
*/
if (p->p_sflag & PS_PROFIL) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/* XXX - do we need Giant? */
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, frame->tf_eip,
(u_int)(p->p_sticks - oticks) * psratio);
}
curpriority = p->p_priority;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -282,9 +282,9 @@ restart:
((frame.tf_eflags & PSL_VM) && !in_vm86call)) {
/* user trap */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
switch (type) {
@@ -312,9 +312,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0)
goto user;
break;
@@ -339,9 +339,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = trap_pfault(&frame, TRUE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
if (i == -2) {
/*
@@ -371,13 +371,13 @@ restart:
#ifndef TIMER_FREQ
# define TIMER_FREQ 1193182
#endif
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* machine/parity/power fail/"kitchen sink" faults */
@@ -421,9 +421,9 @@ restart:
ucode = FPE_FPU_NP_TRAP;
break;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = (*pmath_emulate)(&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0) {
if (!(frame.tf_eflags & PSL_T))
goto out;
@@ -452,9 +452,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(void) trap_pfault(&frame, FALSE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
case T_DNA:
@@ -477,9 +477,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i != 0)
/*
* returns to original process
@@ -510,9 +510,9 @@ restart:
*/
if (frame.tf_eip == (int)cpu_switch_load_gs) {
PCPU_GET(curpcb)->pcb_gs = 0;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGBUS);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
@@ -621,13 +621,13 @@ restart:
#ifdef DEV_ISA
case T_NMI:
#ifdef POWERFAIL_NMI
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* XXX Giant */
@@ -651,13 +651,13 @@ restart:
#endif /* DEV_ISA */
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
trap_fatal(&frame, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* Translate fault for emulators (e.g. Linux) */
if (*p->p_sysent->sv_transtrap)
i = (*p->p_sysent->sv_transtrap)(i, type);
@@ -673,12 +673,12 @@ restart:
uprintf("\n");
}
#endif
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
user:
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
out:
return;
}
@@ -1103,15 +1103,15 @@ syscall2(frame)
#ifdef DIAGNOSTIC
if (ISPL(frame.tf_cs) != SEL_UPL) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
panic("syscall");
/* NOT REACHED */
}
#endif
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
params = (caddr_t)frame.tf_esp + sizeof(int);
@@ -1121,9 +1121,9 @@ syscall2(frame)
/*
* The prep code is not MP aware.
*/
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
} else {
/*
* Need to check if this is a 32 bit or 64 bit syscall.
@@ -1160,7 +1160,7 @@ syscall2(frame)
*/
if (params && (i = narg * sizeof(int)) &&
(error = copyin(params, (caddr_t)args, (u_int)i))) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL))
ktrsyscall(p->p_tracep, code, narg, args);
@@ -1174,13 +1174,13 @@ syscall2(frame)
* we are ktracing
*/
if ((callp->sy_narg & SYF_MPSAFE) == 0) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsyscall(p->p_tracep, code, narg, args);
}
#endif
@@ -1230,7 +1230,7 @@ bad:
*/
if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
frame.tf_eflags &= ~PSL_T;
trapsignal(p, SIGTRAP, 0);
}
@@ -1243,7 +1243,7 @@ bad:
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
}
#endif
@@ -1259,7 +1259,7 @@ bad:
* Release Giant if we had to get it
*/
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#ifdef WITNESS
if (witness_list(p)) {
@@ -1278,38 +1278,38 @@ ast(frame)
struct proc *p = CURPROC;
u_quad_t sticks;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
astoff();
atomic_add_int(&cnt.v_soft, 1);
if (p->p_sflag & PS_OWEUPC) {
p->p_sflag &= ~PS_OWEUPC;
- mtx_exit(&sched_lock, MTX_SPIN);
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
}
if (p->p_sflag & PS_ALRMPEND) {
p->p_sflag &= ~PS_ALRMPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGVTALRM);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
if (p->p_sflag & PS_PROFPEND) {
p->p_sflag &= ~PS_PROFPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGPROF);
} else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
diff --git a/sys/i386/i386/tsc.c b/sys/i386/i386/tsc.c
index d7a1ff0..bbd066b 100644
--- a/sys/i386/i386/tsc.c
+++ b/sys/i386/i386/tsc.c
@@ -207,7 +207,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -215,7 +215,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -232,14 +232,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -247,7 +247,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -255,7 +255,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -403,7 +403,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -411,7 +411,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -525,10 +525,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -679,7 +679,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -688,7 +688,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -703,11 +703,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1194,7 +1194,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1218,7 +1218,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/i386/i386/vm86.c b/sys/i386/i386/vm86.c
index 6e2f080..d1db1b1 100644
--- a/sys/i386/i386/vm86.c
+++ b/sys/i386/i386/vm86.c
@@ -576,9 +576,9 @@ vm86_intcall(int intnum, struct vm86frame *vmf)
return (EINVAL);
vmf->vmf_trapno = intnum;
- mtx_enter(&vm86pcb_lock, MTX_DEF);
+ mtx_lock(&vm86pcb_lock);
retval = vm86_bioscall(vmf);
- mtx_exit(&vm86pcb_lock, MTX_DEF);
+ mtx_unlock(&vm86pcb_lock);
return (retval);
}
@@ -606,9 +606,9 @@ vm86_datacall(intnum, vmf, vmc)
}
vmf->vmf_trapno = intnum;
- mtx_enter(&vm86pcb_lock, MTX_DEF);
+ mtx_lock(&vm86pcb_lock);
retval = vm86_bioscall(vmf);
- mtx_exit(&vm86pcb_lock, MTX_DEF);
+ mtx_unlock(&vm86pcb_lock);
for (i = 0; i < vmc->npages; i++) {
entry = vmc->pmap[i].pte_num;
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index eddae55..434587d 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -261,8 +261,8 @@ cpu_exit(p)
reset_dbregs();
pcb->pcb_flags &= ~PCB_DBREGS;
}
- mtx_enter(&sched_lock, MTX_SPIN);
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
+ mtx_lock_spin(&sched_lock);
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
mtx_assert(&Giant, MA_NOTOWNED);
/*
@@ -574,7 +574,7 @@ vm_page_zero_idle()
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- if (mtx_try_enter(&Giant, MTX_DEF)) {
+ if (mtx_trylock(&Giant)) {
s = splvm();
zero_state = 0;
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
@@ -597,7 +597,7 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (1);
}
return (0);
diff --git a/sys/i386/include/cpu.h b/sys/i386/include/cpu.h
index a1d47f0..0b99ec6 100644
--- a/sys/i386/include/cpu.h
+++ b/sys/i386/include/cpu.h
@@ -92,9 +92,9 @@
* counter in the proc table and flag isn't really necessary.
*/
#define need_proftick(p) do { \
- mtx_enter(&sched_lock, MTX_SPIN); \
+ mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
- mtx_exit(&sched_lock, MTX_SPIN); \
+ mtx_unlock_spin(&sched_lock); \
aston(); \
} while (0)
diff --git a/sys/i386/include/lock.h b/sys/i386/include/lock.h
index 414186c..35d1b3d 100644
--- a/sys/i386/include/lock.h
+++ b/sys/i386/include/lock.h
@@ -39,8 +39,8 @@
/*
* Protects the IO APIC and apic_imen as a critical region.
*/
-#define IMASK_LOCK MTX_ENTER(_imen_mtx, MTX_SPIN)
-#define IMASK_UNLOCK MTX_EXIT(_imen_mtx, MTX_SPIN)
+#define IMASK_LOCK MTX_LOCK_SPIN(_imen_mtx, 0)
+#define IMASK_UNLOCK MTX_UNLOCK_SPIN(_imen_mtx)
#else /* SMP */
@@ -62,8 +62,8 @@
* XXX should rc (RISCom/8) use this?
*/
#ifdef USE_COMLOCK
-#define COM_LOCK() mtx_enter(&com_mtx, MTX_SPIN)
-#define COM_UNLOCK() mtx_exit(&com_mtx, MTX_SPIN)
+#define COM_LOCK() mtx_lock_spin(&com_mtx)
+#define COM_UNLOCK() mtx_unlock_spin(&com_mtx)
#else
#define COM_LOCK()
#define COM_UNLOCK()
diff --git a/sys/i386/include/mptable.h b/sys/i386/include/mptable.h
index 9d53cd7..2802750 100644
--- a/sys/i386/include/mptable.h
+++ b/sys/i386/include/mptable.h
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/i386/include/mutex.h b/sys/i386/include/mutex.h
index 2f16de3..c4fe210 100644
--- a/sys/i386/include/mutex.h
+++ b/sys/i386/include/mutex.h
@@ -43,22 +43,10 @@ extern struct mtx clock_lock;
/*
* Debugging
*/
-#ifdef MUTEX_DEBUG
-
-#ifdef _KERN_MUTEX_C_
-char STR_IEN[] = "fl & PSL_I";
-char STR_IDIS[] = "!(fl & PSL_I)";
-char STR_SIEN[] = "mpp->mtx_saveintr & PSL_I";
-#else /* _KERN_MUTEX_C_ */
-extern char STR_IEN[];
-extern char STR_IDIS[];
-extern char STR_SIEN[];
-#endif /* _KERN_MUTEX_C_ */
-#endif /* MUTEX_DEBUG */
-
-#define ASS_IEN MPASS2(read_eflags() & PSL_I, STR_IEN)
-#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, STR_IDIS)
-#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, STR_SIEN)
+#define ASS_IEN MPASS2(read_eflags() & PSL_I, "fl & PSL_I")
+#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, "!(fl & PSL_I)")
+#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, \
+ "mpp->mtx_saveintr & PSL_I")
#define mtx_legal2block() (read_eflags() & PSL_I)
@@ -66,9 +54,6 @@ extern char STR_SIEN[];
* Assembly macros (for internal use only)
*------------------------------------------------------------------------------
*/
-
-#ifdef _KERN_MUTEX_C_
-
#define _V(x) __STRING(x)
#if 0
@@ -252,22 +237,80 @@ extern char STR_SIEN[];
#undef _V
-#endif /* _KERN_MUTEX_C_ */
-
#endif /* _KERNEL */
#else /* !LOCORE */
/*
* Simple assembly macros to get and release mutexes.
+ *
+ * Note: All of these macros accept a "flags" argument and are analoguous
+ * to the mtx_lock_flags and mtx_unlock_flags general macros. If one
+ * desires to not pass a flag, the value 0 may be passed as second
+ * argument.
+ *
+ * XXX: We only have MTX_LOCK_SPIN and MTX_UNLOCK_SPIN for now, since that's
+ * all we use right now. We should add MTX_LOCK and MTX_UNLOCK (for sleep
+ * locks) in the near future, however.
*/
+#define MTX_LOCK_SPIN(lck, flags) \
+ pushl %eax ; \
+ pushl %ecx ; \
+ pushl %ebx ; \
+ movl $(MTX_UNOWNED) , %eax ; \
+ movl PCPU(CURPROC), %ebx ; \
+ pushfl ; \
+ popl %ecx ; \
+ cli ; \
+ MPLOCKED cmpxchgl %ebx, lck+MTX_LOCK ; \
+ jz 2f ; \
+ cmpl lck+MTX_LOCK, %ebx ; \
+ je 3f ; \
+ pushl $0 ; \
+ pushl $0 ; \
+ pushl %ecx ; \
+ pushl $flags ; \
+ pushl $lck ; \
+ call _mtx_lock_spin ; \
+ addl $0x14, %esp ; \
+ jmp 1f ; \
+3: movl lck+MTX_RECURSECNT, %ebx ; \
+ incl %ebx ; \
+ movl %ebx, lck+MTX_RECURSECNT ; \
+ jmp 1f ; \
+2: movl %ecx, lck+MTX_SAVEINTR ; \
+1: popl %ebx ; \
+ popl %ecx ; \
+ popl %eax
+
+#define MTX_UNLOCK_SPIN(lck) \
+ pushl %edx ; \
+ pushl %eax ; \
+ movl lck+MTX_SAVEINTR, %edx ; \
+ movl lck+MTX_RECURSECNT, %eax ; \
+ testl %eax, %eax ; \
+ jne 2f ; \
+ movl $(MTX_UNOWNED), %eax ; \
+ xchgl %eax, lck+MTX_LOCK ; \
+ pushl %edx ; \
+ popfl ; \
+ jmp 1f ; \
+2: decl %eax ; \
+ movl %eax, lck+MTX_RECURSECNT ; \
+1: popl %eax ; \
+ popl %edx
+/*
+ * XXX: These two are broken right now and need to be made to work for
+ * XXX: sleep locks, as the above two work for spin locks. We're not in
+ * XXX: too much of a rush to do these as we do not use them right now.
+ */
#define MTX_ENTER(lck, type) \
pushl $0 ; /* dummy __LINE__ */ \
pushl $0 ; /* dummy __FILE__ */ \
pushl $type ; \
pushl $lck ; \
- call _mtx_enter ; \
+ call _mtx_lock_XXX ; \
addl $16,%esp
#define MTX_EXIT(lck, type) \
@@ -275,7 +318,7 @@ extern char STR_SIEN[];
pushl $0 ; /* dummy __FILE__ */ \
pushl $type ; \
pushl $lck ; \
- call _mtx_exit ; \
+ call _mtx_unlock_XXX ; \
addl $16,%esp
#endif /* !LOCORE */
diff --git a/sys/i386/include/profile.h b/sys/i386/include/profile.h
index 28db117..5f2a780 100644
--- a/sys/i386/include/profile.h
+++ b/sys/i386/include/profile.h
@@ -66,8 +66,8 @@
#ifdef SMP
#define MCOUNT_ENTER(s) { s = read_eflags(); \
__asm __volatile("cli" : : : "memory"); \
- mtx_enter(&mcount_mtx, MTX_DEF); }
-#define MCOUNT_EXIT(s) { mtx_exit(&mcount_mtx, MTX_DEF); write_eflags(s); }
+ mtx_lock(&mcount_mtx); }
+#define MCOUNT_EXIT(s) { mtx_unlock(&mcount_mtx); write_eflags(s); }
#else
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); }
#define MCOUNT_EXIT(s) (write_eflags(s))
diff --git a/sys/i386/isa/clock.c b/sys/i386/isa/clock.c
index d7a1ff0..bbd066b 100644
--- a/sys/i386/isa/clock.c
+++ b/sys/i386/isa/clock.c
@@ -207,7 +207,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -215,7 +215,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -232,14 +232,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -247,7 +247,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -255,7 +255,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -403,7 +403,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -411,7 +411,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -525,10 +525,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -679,7 +679,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -688,7 +688,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -703,11 +703,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1194,7 +1194,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1218,7 +1218,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/i386/isa/if_el.c b/sys/i386/isa/if_el.c
index ed53636..0e980c5 100644
--- a/sys/i386/isa/if_el.c
+++ b/sys/i386/isa/if_el.c
@@ -110,8 +110,8 @@ DRIVER_MODULE(if_el, isa, el_driver, el_devclass, 0, 0);
#define CSR_READ_1(sc, reg) \
bus_space_read_1(sc->el_btag, sc->el_bhandle, reg)
-#define EL_LOCK(_sc) mtx_enter(&(_sc)->el_mtx, MTX_DEF)
-#define EL_UNLOCK(_sc) mtx_exit(&(_sc)->el_mtx, MTX_DEF)
+#define EL_LOCK(_sc) mtx_lock(&(_sc)->el_mtx)
+#define EL_UNLOCK(_sc) mtx_unlock(&(_sc)->el_mtx)
/* Probe routine. See if the card is there and at the right place. */
static int
diff --git a/sys/i386/isa/intr_machdep.c b/sys/i386/isa/intr_machdep.c
index d44a672..70b9378 100644
--- a/sys/i386/isa/intr_machdep.c
+++ b/sys/i386/isa/intr_machdep.c
@@ -701,7 +701,7 @@ inthand_remove(struct intrhand *idesc)
ithds[ithd->irq] = NULL;
if ((idesc->ih_flags & INTR_FAST) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ithd->it_proc->p_stat == SWAIT) {
ithd->it_proc->p_intr_nesting_level = 0;
ithd->it_proc->p_stat = SRUN;
@@ -713,7 +713,7 @@ inthand_remove(struct intrhand *idesc)
* XXX: should we lower the threads priority?
*/
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
free(idesc->ih_name, M_DEVBUF);
diff --git a/sys/i386/isa/ithread.c b/sys/i386/isa/ithread.c
index 5f64861..99a1abf 100644
--- a/sys/i386/isa/ithread.c
+++ b/sys/i386/isa/ithread.c
@@ -114,7 +114,7 @@ sched_ithd(void *cookie)
* is higher priority than their current thread, it gets run now.
*/
ir->it_need = 1;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ir->it_proc->p_stat == SWAIT) { /* not on run queue */
CTR1(KTR_INTR, "sched_ithd: setrunqueue %d",
ir->it_proc->p_pid);
@@ -134,7 +134,7 @@ sched_ithd(void *cookie)
ir->it_proc->p_stat );
need_resched();
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -163,7 +163,7 @@ ithd_loop(void *dummy)
me->it_proc->p_pid, me->it_proc->p_comm);
curproc->p_ithd = NULL;
free(me, M_DEVBUF);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
kthread_exit(0);
}
@@ -188,10 +188,10 @@ ithd_loop(void *dummy)
ih->ih_flags);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ih->ih_handler(ih->ih_argument);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
}
@@ -201,7 +201,7 @@ ithd_loop(void *dummy)
* set again, so we have to check it again.
*/
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!me->it_need) {
INTREN (1 << me->irq); /* reset the mask bit */
@@ -217,6 +217,6 @@ ithd_loop(void *dummy)
CTR1(KTR_INTR, "ithd_loop pid %d: resumed",
me->it_proc->p_pid);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/i386/isa/nmi.c b/sys/i386/isa/nmi.c
index d44a672..70b9378 100644
--- a/sys/i386/isa/nmi.c
+++ b/sys/i386/isa/nmi.c
@@ -701,7 +701,7 @@ inthand_remove(struct intrhand *idesc)
ithds[ithd->irq] = NULL;
if ((idesc->ih_flags & INTR_FAST) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (ithd->it_proc->p_stat == SWAIT) {
ithd->it_proc->p_intr_nesting_level = 0;
ithd->it_proc->p_stat = SRUN;
@@ -713,7 +713,7 @@ inthand_remove(struct intrhand *idesc)
* XXX: should we lower the threads priority?
*/
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
free(idesc->ih_name, M_DEVBUF);
diff --git a/sys/i386/isa/npx.c b/sys/i386/isa/npx.c
index a729e0f..0dab6ae 100644
--- a/sys/i386/isa/npx.c
+++ b/sys/i386/isa/npx.c
@@ -724,7 +724,7 @@ npx_intr(dummy)
u_short control;
struct intrframe *frame;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (PCPU_GET(npxproc) == NULL || !npx_exists) {
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
PCPU_GET(npxproc), curproc, npx_exists);
@@ -783,7 +783,7 @@ npx_intr(dummy)
*/
psignal(curproc, SIGFPE);
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
/*
diff --git a/sys/ia64/ia64/interrupt.c b/sys/ia64/ia64/interrupt.c
index fc2f84a..961d9c2 100644
--- a/sys/ia64/ia64/interrupt.c
+++ b/sys/ia64/ia64/interrupt.c
@@ -86,7 +86,7 @@ interrupt(u_int64_t vector, struct trapframe *framep)
case 240: /* clock interrupt */
CTR0(KTR_INTR, "clock interrupt");
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
cnt.v_intr++;
#ifdef EVCNT_COUNTERS
clock_intr_evcnt.ev_count++;
@@ -98,11 +98,11 @@ interrupt(u_int64_t vector, struct trapframe *framep)
/* divide hz (1024) by 8 to get stathz (128) */
if((++schedclk2 & 0x7) == 0)
statclock((struct clockframe *)framep);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
break;
default:
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
panic("unexpected interrupt: vec %ld\n", vector);
/* NOTREACHED */
}
diff --git a/sys/ia64/ia64/machdep.c b/sys/ia64/ia64/machdep.c
index 62abd6a..e702ddc 100644
--- a/sys/ia64/ia64/machdep.c
+++ b/sys/ia64/ia64/machdep.c
@@ -597,7 +597,7 @@ ia64_init()
*/
mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
#if 0
/*
diff --git a/sys/ia64/ia64/mp_machdep.c b/sys/ia64/ia64/mp_machdep.c
index 0451e05..1750251 100644
--- a/sys/ia64/ia64/mp_machdep.c
+++ b/sys/ia64/ia64/mp_machdep.c
@@ -628,7 +628,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -645,7 +645,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
/*
diff --git a/sys/ia64/ia64/procfs_machdep.c b/sys/ia64/ia64/procfs_machdep.c
index 3f6f524..81b6e32 100644
--- a/sys/ia64/ia64/procfs_machdep.c
+++ b/sys/ia64/ia64/procfs_machdep.c
@@ -86,12 +86,12 @@ procfs_read_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_regs(p, regs));
}
@@ -101,12 +101,12 @@ procfs_write_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_regs(p, regs));
}
@@ -121,9 +121,9 @@ procfs_read_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
return (fill_fpregs(p, fpregs));
@@ -135,12 +135,12 @@ procfs_write_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_fpregs(p, fpregs));
}
diff --git a/sys/ia64/ia64/trap.c b/sys/ia64/ia64/trap.c
index fe14545..b127fd7 100644
--- a/sys/ia64/ia64/trap.c
+++ b/sys/ia64/ia64/trap.c
@@ -90,10 +90,10 @@ userret(register struct proc *p, struct trapframe *frame, u_quad_t oticks)
/* take pending signals */
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_priority = p->p_usrpri;
if (want_resched) {
/*
@@ -109,30 +109,30 @@ userret(register struct proc *p, struct trapframe *frame, u_quad_t oticks)
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
splx(s);
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
/*
* If profiling, charge recent system time to the trapped pc.
*/
if (p->p_sflag & PS_PROFIL) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, frame->tf_cr_iip,
(int)(p->p_sticks - oticks) * psratio);
}
curpriority = p->p_priority;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
static const char *ia64_vector_names[] = {
@@ -249,9 +249,9 @@ trap(int vector, int imm, struct trapframe *framep)
user = ((framep->tf_cr_ipsr & IA64_PSR_CPL) == IA64_PSR_CPL_USER);
if (user) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_tf = framep;
} else {
sticks = 0; /* XXX bogus -Wuninitialized warning */
@@ -265,12 +265,12 @@ trap(int vector, int imm, struct trapframe *framep)
* and per-process unaligned-access-handling flags).
*/
if (user) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if ((i = unaligned_fixup(framep, p)) == 0) {
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
ucode = framep->tf_cr_ifa; /* VA */
break;
}
@@ -330,7 +330,7 @@ trap(int vector, int imm, struct trapframe *framep)
vm_prot_t ftype = 0;
int rv;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* If it was caused by fuswintr or suswintr,
* just punt. Note that we check the faulting
@@ -345,7 +345,7 @@ trap(int vector, int imm, struct trapframe *framep)
p->p_addr->u_pcb.pcb_accessaddr == va) {
framep->tf_cr_iip = p->p_addr->u_pcb.pcb_onfault;
p->p_addr->u_pcb.pcb_onfault = 0;
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
@@ -455,11 +455,11 @@ trap(int vector, int imm, struct trapframe *framep)
rv = KERN_INVALID_ADDRESS;
}
if (rv == KERN_SUCCESS) {
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
ucode = va;
i = SIGSEGV;
#ifdef DEBUG
@@ -480,7 +480,7 @@ out:
if (user) {
userret(p, framep, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
return;
@@ -521,11 +521,11 @@ syscall(int code, u_int64_t *args, struct trapframe *framep)
cnt.v_syscall++;
p = curproc;
p->p_md.md_tf = framep;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* Skip past the break instruction. Remember old address in case
* we have to restart.
@@ -618,7 +618,7 @@ syscall(int code, u_int64_t *args, struct trapframe *framep)
* is not the case, this code will need to be revisited.
*/
STOPEVENT(p, S_SCX, code);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#ifdef WITNESS
if (witness_list(p)) {
@@ -646,13 +646,13 @@ child_return(p)
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsysret(p->p_tracep, SYS_fork, 0, 0);
}
#endif
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
/*
@@ -667,9 +667,9 @@ ast(framep)
u_quad_t sticks;
p = curproc;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_tf = framep;
if ((framep->tf_cr_ipsr & IA64_PSR_CPL) != IA64_PSR_CPL_USER)
@@ -678,36 +678,36 @@ ast(framep)
cnt.v_soft++;
PCPU_SET(astpending, 0);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_OWEUPC) {
p->p_sflag &= ~PS_OWEUPC;
- mtx_exit(&sched_lock, MTX_SPIN);
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
}
if (p->p_sflag & PS_ALRMPEND) {
p->p_sflag &= ~PS_ALRMPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGVTALRM);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
if (p->p_sflag & PS_PROFPEND) {
p->p_sflag &= ~PS_PROFPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGPROF);
} else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
userret(p, framep, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
extern int ia64_unaligned_print, ia64_unaligned_fix;
diff --git a/sys/ia64/ia64/vm_machdep.c b/sys/ia64/ia64/vm_machdep.c
index be2cb4d..9b8042a 100644
--- a/sys/ia64/ia64/vm_machdep.c
+++ b/sys/ia64/ia64/vm_machdep.c
@@ -303,8 +303,8 @@ cpu_exit(p)
ia64_fpstate_drop(p);
(void) splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
+ mtx_lock_spin(&sched_lock);
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
mtx_assert(&Giant, MA_NOTOWNED);
/*
diff --git a/sys/ia64/include/cpu.h b/sys/ia64/include/cpu.h
index 8a883bf..2574266 100644
--- a/sys/ia64/include/cpu.h
+++ b/sys/ia64/include/cpu.h
@@ -83,10 +83,10 @@ struct clockframe {
* through trap, marking the proc as needing a profiling tick.
*/
#define need_proftick(p) do { \
- mtx_enter(&sched_lock, MTX_SPIN); \
+ mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
aston(); \
- mtx_exit(&sched_lock, MTX_SPIN); \
+ mtx_unlock_spin(&sched_lock); \
} while (0)
/*
diff --git a/sys/ia64/include/mutex.h b/sys/ia64/include/mutex.h
index 56e1f2e..0e0173c 100644
--- a/sys/ia64/include/mutex.h
+++ b/sys/ia64/include/mutex.h
@@ -42,22 +42,10 @@
* Debugging
*/
#ifdef MUTEX_DEBUG
-
-#ifdef _KERN_MUTEX_C_
-char STR_IEN[] = "psr.i";
-char STR_IDIS[] = "!psr.i";
-char STR_SIEN[] = "mpp->mtx_saveintr & IA64_PSR_I";
-#else /* _KERN_MUTEX_C_ */
-extern char STR_IEN[];
-extern char STR_IDIS[];
-extern char STR_SIEN[];
-#endif /* _KERN_MUTEX_C_ */
-
-#endif /* MUTEX_DEBUG */
-
-#define ASS_IEN MPASS2((save_intr() & IA64_PSR_I), STR_IEN)
-#define ASS_IDIS MPASS2(!(save_intr() & IA64_PSR_I), STR_IDIS)
-#define ASS_SIEN(mpp) MPASS2(((mpp)->mtx_saveintr & IA64_PSR_I), STR_SIEN)
+#define ASS_IEN MPASS2((save_intr() & IA64_PSR_I), "psr.i")
+#define ASS_IDIS MPASS2(!(save_intr() & IA64_PSR_I), "!psr.i")
+#define ASS_SIEN(mpp) MPASS2(((mpp)->mtx_saveintr & IA64_PSR_I), \
+ "mpp->mtx_saveintr & IA64_PSR_I")
#define mtx_legal2block() (save_intr() & IA64_PSR_I)
diff --git a/sys/isa/atrtc.c b/sys/isa/atrtc.c
index d7a1ff0..bbd066b 100644
--- a/sys/isa/atrtc.c
+++ b/sys/isa/atrtc.c
@@ -207,7 +207,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -215,7 +215,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -232,14 +232,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -247,7 +247,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -255,7 +255,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -403,7 +403,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -411,7 +411,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -525,10 +525,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -679,7 +679,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -688,7 +688,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -703,11 +703,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1194,7 +1194,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1218,7 +1218,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/isa/sio.c b/sys/isa/sio.c
index 332ce49..be5f642 100644
--- a/sys/isa/sio.c
+++ b/sys/isa/sio.c
@@ -856,7 +856,7 @@ sioprobe(dev, xrid)
* but mask them in the processor as well in case there are some
* (misconfigured) shared interrupts.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
/* EXTRA DELAY? */
/*
@@ -953,7 +953,7 @@ sioprobe(dev, xrid)
CLR_FLAG(dev, COM_C_IIR_TXRDYBUG);
}
sio_setreg(com, com_cfcr, CFCR_8BITS);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
return (iobase == siocniobase ? 0 : result);
}
@@ -993,7 +993,7 @@ sioprobe(dev, xrid)
irqmap[3] = isa_irq_pending();
failures[9] = (sio_getreg(com, com_iir) & IIR_IMASK) - IIR_NOPEND;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
irqs = irqmap[1] & ~irqmap[0];
if (bus_get_resource(idev, SYS_RES_IRQ, 0, &xirq, NULL) == 0 &&
@@ -1181,7 +1181,7 @@ sioattach(dev, xrid)
} else
com->it_in.c_ispeed = com->it_in.c_ospeed = TTYDEF_SPEED;
if (siosetwater(com, com->it_in.c_ispeed) != 0) {
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
/*
* Leave i/o resources allocated if this is a `cn'-level
* console, so that other devices can't snarf them.
@@ -1190,7 +1190,7 @@ sioattach(dev, xrid)
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
return (ENOMEM);
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
termioschars(&com->it_in);
com->it_out = com->it_in;
@@ -1485,7 +1485,7 @@ open_top:
}
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
(void) inb(com->line_status_port);
(void) inb(com->data_port);
com->prev_modem_status = com->last_modem_status
@@ -1497,7 +1497,7 @@ open_top:
outb(com->intr_ctl_port, IER_ERXRDY | IER_ETXRDY
| IER_ERLS | IER_EMSC);
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
/*
* Handle initial DCD. Callout devices get a fake initial
* DCD (trapdoor DCD). If we are callout, then any sleeping
@@ -1753,7 +1753,7 @@ sioinput(com)
* semantics instead of the save-and-disable semantics
* that are used everywhere else.
*/
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
incc = com->iptr - buf;
if (tp->t_rawq.c_cc + incc > tp->t_ihiwat
&& (com->state & CS_RTS_IFLOW
@@ -1774,7 +1774,7 @@ sioinput(com)
tp->t_lflag &= ~FLUSHO;
comstart(tp);
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
} while (buf < com->iptr);
} else {
do {
@@ -1783,7 +1783,7 @@ sioinput(com)
* semantics instead of the save-and-disable semantics
* that are used everywhere else.
*/
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
line_status = buf[com->ierroff];
recv_data = *buf++;
if (line_status
@@ -1798,7 +1798,7 @@ sioinput(com)
recv_data |= TTY_PE;
}
(*linesw[tp->t_line].l_rint)(recv_data, tp);
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
} while (buf < com->iptr);
}
com_events -= (com->iptr - com->ibuf);
@@ -1823,9 +1823,9 @@ siointr(arg)
#ifndef COM_MULTIPORT
com = (struct com_s *)arg;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
#else /* COM_MULTIPORT */
bool_t possibly_more_intrs;
int unit;
@@ -1837,7 +1837,7 @@ siointr(arg)
* devices, then the edge from one may be lost because another is
* on.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
do {
possibly_more_intrs = FALSE;
for (unit = 0; unit < sio_numunits; ++unit) {
@@ -1856,7 +1856,7 @@ siointr(arg)
/* XXX COM_UNLOCK(); */
}
} while (possibly_more_intrs);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
#endif /* COM_MULTIPORT */
}
@@ -2264,7 +2264,7 @@ repeat:
* Discard any events related to never-opened or
* going-away devices.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
incc = com->iptr - com->ibuf;
com->iptr = com->ibuf;
if (com->state & CS_CHECKMSR) {
@@ -2272,33 +2272,33 @@ repeat:
com->state &= ~CS_CHECKMSR;
}
com_events -= incc;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
continue;
}
if (com->iptr != com->ibuf) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
sioinput(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
if (com->state & CS_CHECKMSR) {
u_char delta_modem_status;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
delta_modem_status = com->last_modem_status
^ com->prev_modem_status;
com->prev_modem_status = com->last_modem_status;
com_events -= LOTS_OF_EVENTS;
com->state &= ~CS_CHECKMSR;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (delta_modem_status & MSR_DCD)
(*linesw[tp->t_line].l_modem)
(tp, com->prev_modem_status & MSR_DCD);
}
if (com->state & CS_ODONE) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
com_events -= LOTS_OF_EVENTS;
com->state &= ~CS_ODONE;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (!(com->state & CS_BUSY)
&& !(com->extra_state & CSE_BUSYCHECK)) {
timeout(siobusycheck, com, hz / 100);
@@ -2484,7 +2484,7 @@ comparam(tp, t)
if (com->state >= (CS_BUSY | CS_TTGO))
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
splx(s);
comstart(tp);
if (com->ibufold != NULL) {
@@ -2518,7 +2518,7 @@ siosetwater(com, speed)
for (ibufsize = 128; ibufsize < cp4ticks;)
ibufsize <<= 1;
if (ibufsize == com->ibufsize) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
return (0);
}
@@ -2528,7 +2528,7 @@ siosetwater(com, speed)
*/
ibuf = malloc(2 * ibufsize, M_DEVBUF, M_NOWAIT);
if (ibuf == NULL) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
return (ENOMEM);
}
@@ -2546,7 +2546,7 @@ siosetwater(com, speed)
* Read current input buffer, if any. Continue with interrupts
* disabled.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->iptr != com->ibuf)
sioinput(com);
@@ -2581,7 +2581,7 @@ comstart(tp)
if (com == NULL)
return;
s = spltty();
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (tp->t_state & TS_TTSTOP)
com->state &= ~CS_TTGO;
else
@@ -2594,7 +2594,7 @@ comstart(tp)
&& com->state & CS_RTS_IFLOW)
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) {
ttwwakeup(tp);
splx(s);
@@ -2610,7 +2610,7 @@ comstart(tp)
sizeof com->obuf1);
com->obufs[0].l_next = NULL;
com->obufs[0].l_queued = TRUE;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state & CS_BUSY) {
qp = com->obufq.l_next;
while ((next = qp->l_next) != NULL)
@@ -2622,7 +2622,7 @@ comstart(tp)
com->obufq.l_next = &com->obufs[0];
com->state |= CS_BUSY;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
if (tp->t_outq.c_cc != 0 && !com->obufs[1].l_queued) {
com->obufs[1].l_tail
@@ -2630,7 +2630,7 @@ comstart(tp)
sizeof com->obuf2);
com->obufs[1].l_next = NULL;
com->obufs[1].l_queued = TRUE;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state & CS_BUSY) {
qp = com->obufq.l_next;
while ((next = qp->l_next) != NULL)
@@ -2642,14 +2642,14 @@ comstart(tp)
com->obufq.l_next = &com->obufs[1];
com->state |= CS_BUSY;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
tp->t_state |= TS_BUSY;
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state >= (CS_BUSY | CS_TTGO))
siointr1(com); /* fake interrupt to start output */
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
ttwwakeup(tp);
splx(s);
}
@@ -2664,7 +2664,7 @@ comstop(tp, rw)
com = com_addr(DEV_TO_UNIT(tp->t_dev));
if (com == NULL || com->gone)
return;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (rw & FWRITE) {
if (com->hasfifo)
#ifdef COM_ESP
@@ -2691,7 +2691,7 @@ comstop(tp, rw)
com_events -= (com->iptr - com->ibuf);
com->iptr = com->ibuf;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
comstart(tp);
}
@@ -2734,7 +2734,7 @@ commctl(com, bits, how)
mcr |= MCR_RTS;
if (com->gone)
return(0);
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
switch (how) {
case DMSET:
outb(com->modem_ctl_port,
@@ -2747,7 +2747,7 @@ commctl(com, bits, how)
outb(com->modem_ctl_port, com->mcr_image &= ~mcr);
break;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
return (0);
}
@@ -2806,9 +2806,9 @@ comwakeup(chan)
com = com_addr(unit);
if (com != NULL && !com->gone
&& (com->state >= (CS_BUSY | CS_TTGO) || com->poll)) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
}
@@ -2830,10 +2830,10 @@ comwakeup(chan)
u_int delta;
u_long total;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
delta = com->delta_error_counts[errnum];
com->delta_error_counts[errnum] = 0;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (delta == 0)
continue;
total = com->error_counts[errnum] += delta;
diff --git a/sys/isofs/cd9660/cd9660_node.c b/sys/isofs/cd9660/cd9660_node.c
index f260802..00f79e8 100644
--- a/sys/isofs/cd9660/cd9660_node.c
+++ b/sys/isofs/cd9660/cd9660_node.c
@@ -102,18 +102,18 @@ cd9660_ihashget(dev, inum)
struct vnode *vp;
loop:
- mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_lock(&cd9660_ihash_mtx);
for (ip = isohashtbl[INOHASH(dev, inum)]; ip; ip = ip->i_next) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&cd9660_ihash_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
return (vp);
}
}
- mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_unlock(&cd9660_ihash_mtx);
return (NULL);
}
@@ -127,14 +127,14 @@ cd9660_ihashins(ip)
struct proc *p = curproc; /* XXX */
struct iso_node **ipp, *iq;
- mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_lock(&cd9660_ihash_mtx);
ipp = &isohashtbl[INOHASH(ip->i_dev, ip->i_number)];
if ((iq = *ipp) != NULL)
iq->i_prev = &ip->i_next;
ip->i_next = iq;
ip->i_prev = ipp;
*ipp = ip;
- mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_unlock(&cd9660_ihash_mtx);
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
}
@@ -148,7 +148,7 @@ cd9660_ihashrem(ip)
{
register struct iso_node *iq;
- mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_lock(&cd9660_ihash_mtx);
if ((iq = ip->i_next) != NULL)
iq->i_prev = ip->i_prev;
*ip->i_prev = iq;
@@ -156,7 +156,7 @@ cd9660_ihashrem(ip)
ip->i_next = NULL;
ip->i_prev = NULL;
#endif
- mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
+ mtx_unlock(&cd9660_ihash_mtx);
}
/*
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 96db3b3..b5fc907 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -485,9 +485,9 @@ exec_elf_imgact(struct image_params *imgp)
* a context switch. Better safe than sorry; I really don't want
* the file to change while it's being loaded.
*/
- mtx_enter(&imgp->vp->v_interlock, MTX_DEF);
+ mtx_lock(&imgp->vp->v_interlock);
imgp->vp->v_flag |= VTEXT;
- mtx_exit(&imgp->vp->v_interlock, MTX_DEF);
+ mtx_unlock(&imgp->vp->v_interlock);
if ((error = exec_extract_strings(imgp)) != 0)
goto fail;
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index ffa711f..0214ed1 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -455,7 +455,7 @@ start_init(void *dummy)
char *ucp, **uap, *arg0, *arg1;
struct proc *p;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
p = curproc;
@@ -555,7 +555,7 @@ start_init(void *dummy)
* to user mode as init!
*/
if ((error = execve(p, &args)) == 0) {
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return;
}
if (error != ENOENT)
@@ -584,9 +584,9 @@ create_init(const void *udata __unused)
PROC_LOCK(initproc);
initproc->p_flag |= P_SYSTEM;
PROC_UNLOCK(initproc);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
initproc->p_sflag |= PS_INMEM;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
cpu_set_fork_handler(initproc, start_init, NULL);
}
SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL)
@@ -598,9 +598,9 @@ static void
kick_init(const void *udata __unused)
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
initproc->p_stat = SRUN;
setrunqueue(initproc);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
diff --git a/sys/kern/kern_acct.c b/sys/kern/kern_acct.c
index 9220e57..cfa8cbe 100644
--- a/sys/kern/kern_acct.c
+++ b/sys/kern/kern_acct.c
@@ -194,9 +194,9 @@ acct_process(p)
bcopy(p->p_comm, acct.ac_comm, sizeof acct.ac_comm);
/* (2) The amount of user and system time that was used */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
calcru(p, &ut, &st, NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
acct.ac_utime = encode_comp_t(ut.tv_sec, ut.tv_usec);
acct.ac_stime = encode_comp_t(st.tv_sec, st.tv_usec);
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 6d45911..0743c6c 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -170,17 +170,17 @@ hardclock(frame)
if (CLKF_USERMODE(frame) &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag |= PS_ALRMPEND;
aston();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag |= PS_PROFPEND;
aston();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
@@ -200,13 +200,13 @@ hardclock(frame)
* Process callouts at a very low cpu priority, so we don't keep the
* relatively high clock interrupt priority any longer than necessary.
*/
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
ticks++;
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
need_softclock = 1;
} else if (softticks + 1 == ticks)
++softticks;
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
/*
* sched_swi acquires sched_lock, so we don't want to call it with
@@ -292,7 +292,7 @@ startprofclock(p)
* it should be protected later on by a time_lock, which would
* cover psdiv, etc. as well.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_PROFIL) == 0) {
p->p_sflag |= PS_PROFIL;
if (++profprocs == 1 && stathz != 0) {
@@ -302,7 +302,7 @@ startprofclock(p)
splx(s);
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -314,7 +314,7 @@ stopprofclock(p)
{
int s;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_PROFIL) {
p->p_sflag &= ~PS_PROFIL;
if (--profprocs == 0 && stathz != 0) {
@@ -324,7 +324,7 @@ stopprofclock(p)
splx(s);
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -347,7 +347,7 @@ statclock(frame)
struct rusage *ru;
struct vmspace *vm;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (CLKF_USERMODE(frame)) {
/*
@@ -362,7 +362,7 @@ statclock(frame)
forward_statclock(pscnt);
#endif
if (--pscnt > 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
/*
@@ -392,7 +392,7 @@ statclock(frame)
forward_statclock(pscnt);
#endif
if (--pscnt > 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
/*
@@ -435,7 +435,7 @@ statclock(frame)
ru->ru_maxrss = rss;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 05b8dc7..d8b97bf 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -138,9 +138,9 @@ cv_switch_catch(struct proc *p)
* stopped, p->p_wchan will be 0 upon return from CURSIG.
*/
p->p_sflag |= PS_SINTR;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
sig = CURSIG(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (sig != 0) {
if (p->p_wchan != NULL)
cv_waitq_remove(p);
@@ -199,7 +199,7 @@ cv_wait(struct cv *cvp, struct mtx *mp)
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
@@ -207,25 +207,25 @@ cv_wait(struct cv *cvp, struct mtx *mp)
* procs or panic below, in case this is the idle process and
* already asleep.
*/
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
CV_WAIT_VALIDATE(cvp, mp);
DROP_GIANT_NOSWITCH();
- mtx_exit(mp, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mp, MTX_NOSWITCH);
cv_waitq_add(cvp, p);
cv_switch(p);
curpriority = p->p_usrpri;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p->p_tracep, 0, 0);
#endif
PICKUP_GIANT();
- mtx_enter(mp, MTX_DEF);
+ mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
}
@@ -253,7 +253,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
@@ -261,19 +261,19 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
* procs or panic below, in case this is the idle process and
* already asleep.
*/
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return 0;
}
CV_WAIT_VALIDATE(cvp, mp);
DROP_GIANT_NOSWITCH();
- mtx_exit(mp, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mp, MTX_NOSWITCH);
cv_waitq_add(cvp, p);
sig = cv_switch_catch(p);
curpriority = p->p_usrpri;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
/* proc_lock(p); */
@@ -291,7 +291,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p->p_tracep, 0, 0);
#endif
- mtx_enter(mp, MTX_DEF);
+ mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
return (rval);
@@ -319,7 +319,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
@@ -327,13 +327,13 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
* procs or panic below, in case this is the idle process and
* already asleep.
*/
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return 0;
}
CV_WAIT_VALIDATE(cvp, mp);
DROP_GIANT_NOSWITCH();
- mtx_exit(mp, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mp, MTX_NOSWITCH);
cv_waitq_add(cvp, p);
callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
@@ -346,13 +346,13 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
} else
callout_stop(&p->p_slpcallout);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p->p_tracep, 0, 0);
#endif
PICKUP_GIANT();
- mtx_enter(mp, MTX_DEF);
+ mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
return (rval);
@@ -382,7 +382,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
@@ -390,13 +390,13 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
* procs or panic below, in case this is the idle process and
* already asleep.
*/
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return 0;
}
CV_WAIT_VALIDATE(cvp, mp);
DROP_GIANT_NOSWITCH();
- mtx_exit(mp, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mp, MTX_NOSWITCH);
cv_waitq_add(cvp, p);
callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
@@ -409,7 +409,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
} else
callout_stop(&p->p_slpcallout);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
/* proc_lock(p); */
@@ -427,7 +427,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p->p_tracep, 0, 0);
#endif
- mtx_enter(mp, MTX_DEF);
+ mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
return (rval);
@@ -480,12 +480,12 @@ cv_signal(struct cv *cvp)
{
KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__));
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!TAILQ_EMPTY(&cvp->cv_waitq)) {
CV_SIGNAL_VALIDATE(cvp);
cv_wakeup(cvp);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -497,11 +497,11 @@ cv_broadcast(struct cv *cvp)
{
KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__));
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
CV_SIGNAL_VALIDATE(cvp);
while (!TAILQ_EMPTY(&cvp->cv_waitq))
cv_wakeup(cvp);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -513,13 +513,13 @@ cv_waitq_remove(struct proc *p)
{
struct cv *cvp;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((cvp = p->p_wchan) != NULL && p->p_sflag & PS_CVWAITQ) {
TAILQ_REMOVE(&cvp->cv_waitq, p, p_slpq);
p->p_sflag &= ~PS_CVWAITQ;
p->p_wchan = NULL;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -534,7 +534,7 @@ cv_timedwait_end(void *arg)
p = arg;
CTR3(KTR_PROC, "cv_timedwait_end: proc %p (pid %d, %s)", p, p->p_pid,
p->p_comm);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan != NULL) {
if (p->p_stat == SSLEEP)
setrunnable(p);
@@ -542,5 +542,5 @@ cv_timedwait_end(void *arg)
cv_waitq_remove(p);
p->p_sflag |= PS_TIMEOUT;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 5e803e1..71e6288 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -314,9 +314,9 @@ exit1(p, rv)
*/
p->p_xstat = rv;
*p->p_ru = p->p_stats->p_ru;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
ruadd(p->p_ru, &p->p_stats->p_cru);
/*
@@ -457,9 +457,9 @@ loop:
}
nfound++;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SZOMB) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
PROCTREE_LOCK(PT_RELEASE);
@@ -579,7 +579,7 @@ loop:
}
if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
(p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_flag |= P_WAITED;
PROC_UNLOCK(p);
PROCTREE_LOCK(PT_RELEASE);
@@ -598,7 +598,7 @@ loop:
error = 0;
return (error);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
}
PROCTREE_LOCK(PT_RELEASE);
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 2e2318f..8a9e835 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -380,11 +380,11 @@ again:
* The p_stats and p_sigacts substructs are set in vm_fork.
*/
p2->p_flag = 0;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p2->p_sflag = PS_INMEM;
if (p1->p_sflag & PS_PROFIL)
startprofclock(p2);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred),
M_SUBPROC, M_WAITOK);
bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred));
@@ -554,10 +554,10 @@ again:
p2->p_acflag = AFORK;
if ((flags & RFSTOPPED) == 0) {
splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p2->p_stat = SRUN;
setrunqueue(p2);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
spl0();
}
@@ -649,7 +649,7 @@ fork_exit(callout, arg, frame)
{
struct proc *p;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* XXX: We really shouldn't have to do this.
*/
@@ -674,7 +674,7 @@ fork_exit(callout, arg, frame)
*/
p = CURPROC;
if (p->p_flag & P_KTHREAD) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
p->p_comm, p->p_pid);
kthread_exit(0);
@@ -698,11 +698,11 @@ fork_return(p, frame)
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsysret(p->p_tracep, SYS_fork, 0, 0);
}
#endif
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
}
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index a24893d..fdf3566 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -105,8 +105,8 @@ idle_proc(void *dummy)
#endif
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index e4411c9..e7915c4 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -168,7 +168,7 @@ sched_swi(struct intrhand *ih, int flag)
ih->ih_need = 1;
if (!(flag & SWI_DELAY)) {
it->it_need = 1;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SWAIT) { /* not on run queue */
CTR1(KTR_INTR, "sched_swi: setrunqueue %d", p->p_pid);
/* membar_lock(); */
@@ -180,7 +180,7 @@ sched_swi(struct intrhand *ih, int flag)
CTR3(KTR_INTR, "sched_swi %d: it_need %d, state %d",
p->p_pid, it->it_need, p->p_stat );
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
need_resched();
}
}
@@ -223,10 +223,10 @@ sithd_loop(void *dummy)
ih->ih_flags);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ih->ih_handler(ih->ih_argument);
if ((ih->ih_flags & INTR_MPSAFE) == 0)
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
}
@@ -236,14 +236,14 @@ sithd_loop(void *dummy)
* set again, so we have to check it again.
*/
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!it->it_need) {
p->p_stat = SWAIT; /* we're idle */
CTR1(KTR_INTR, "sithd_loop pid %d: done", p->p_pid);
mi_switch();
CTR1(KTR_INTR, "sithd_loop pid %d: resumed", p->p_pid);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c
index f9ca35f..b322bc1 100644
--- a/sys/kern/kern_kthread.c
+++ b/sys/kern/kern_kthread.c
@@ -103,13 +103,13 @@ kthread_create(void (*func)(void *), void *arg,
cpu_set_fork_handler(p2, func, arg);
/* Delay putting it on the run queue until now. */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p2->p_sflag |= PS_INMEM;
if (!(flags & RFSTOPPED)) {
p2->p_stat = SRUN;
setrunqueue(p2);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return 0;
}
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index afd59f2..d5987f5 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -144,11 +144,11 @@ apause(struct lock *lkp, int flags)
return 0;
#ifdef SMP
for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) {
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
for (i = LOCK_SAMPLE_WAIT; i > 0; i--)
if ((lkp->lk_flags & flags) == 0)
break;
- mtx_enter(lkp->lk_interlock, MTX_DEF);
+ mtx_lock(lkp->lk_interlock);
if ((lkp->lk_flags & flags) == 0)
return 0;
}
@@ -236,9 +236,9 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
else
pid = p->p_pid;
- mtx_enter(lkp->lk_interlock, MTX_DEF);
+ mtx_lock(lkp->lk_interlock);
if (flags & LK_INTERLOCK)
- mtx_exit(interlkp, MTX_DEF);
+ mtx_unlock(interlkp);
extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
@@ -451,7 +451,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
break;
default:
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
panic("lockmgr: unknown locktype request %d",
flags & LK_TYPE_MASK);
/* NOTREACHED */
@@ -462,7 +462,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
lkp->lk_flags &= ~LK_WAITDRAIN;
wakeup((void *)&lkp->lk_flags);
}
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
return (error);
}
@@ -506,12 +506,12 @@ lockinit(lkp, prio, wmesg, timo, flags)
"timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
if (lock_mtx_array != NULL) {
- mtx_enter(&lock_mtx, MTX_DEF);
+ mtx_lock(&lock_mtx);
lkp->lk_interlock = &lock_mtx_array[lock_mtx_selector];
lock_mtx_selector++;
if (lock_mtx_selector == lock_nmtx)
lock_mtx_selector = 0;
- mtx_exit(&lock_mtx, MTX_DEF);
+ mtx_unlock(&lock_mtx);
} else {
/*
* Giving lockmgr locks that are initialized during boot a
@@ -561,7 +561,7 @@ lockstatus(lkp, p)
{
int lock_type = 0;
- mtx_enter(lkp->lk_interlock, MTX_DEF);
+ mtx_lock(lkp->lk_interlock);
if (lkp->lk_exclusivecount != 0) {
if (p == NULL || lkp->lk_lockholder == p->p_pid)
lock_type = LK_EXCLUSIVE;
@@ -569,7 +569,7 @@ lockstatus(lkp, p)
lock_type = LK_EXCLOTHER;
} else if (lkp->lk_sharecount != 0)
lock_type = LK_SHARED;
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
return (lock_type);
}
@@ -582,9 +582,9 @@ lockcount(lkp)
{
int count;
- mtx_enter(lkp->lk_interlock, MTX_DEF);
+ mtx_lock(lkp->lk_interlock);
count = lkp->lk_exclusivecount + lkp->lk_sharecount;
- mtx_exit(lkp->lk_interlock, MTX_DEF);
+ mtx_unlock(lkp->lk_interlock);
return (count);
}
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 089d867..a6447a5 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -154,7 +154,7 @@ malloc(size, type, flags)
indx = BUCKETINDX(size);
kbp = &bucket[indx];
s = splmem();
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
while (ksp->ks_memuse >= ksp->ks_limit) {
if (flags & M_ASLEEP) {
if (ksp->ks_limblocks < 65535)
@@ -163,7 +163,7 @@ malloc(size, type, flags)
}
if (flags & M_NOWAIT) {
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
return ((void *) NULL);
}
if (ksp->ks_limblocks < 65535)
@@ -183,7 +183,7 @@ malloc(size, type, flags)
allocsize = 1 << indx;
npg = btoc(allocsize);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags);
if (va == NULL) {
@@ -194,7 +194,7 @@ malloc(size, type, flags)
* Enter malloc_mtx after the error check to avoid having to
* immediately exit it again if there is an error.
*/
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
kbp->kb_total += kbp->kb_elmpercl;
kup = btokup(va);
@@ -278,7 +278,7 @@ out:
if (ksp->ks_memuse > ksp->ks_maxused)
ksp->ks_maxused = ksp->ks_memuse;
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
/* XXX: Do idle pre-zeroing. */
if (va != NULL && (flags & M_ZERO))
bzero(va, size);
@@ -314,7 +314,7 @@ free(addr, type)
size = 1 << kup->ku_indx;
kbp = &bucket[kup->ku_indx];
s = splmem();
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
#ifdef INVARIANTS
/*
* Check for returns of data that do not point to the
@@ -329,9 +329,9 @@ free(addr, type)
(void *)addr, size, type->ks_shortdesc, alloc);
#endif /* INVARIANTS */
if (size > MAXALLOCSAVE) {
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
size = kup->ku_pagecnt << PAGE_SHIFT;
ksp->ks_memuse -= size;
@@ -343,7 +343,7 @@ free(addr, type)
ksp->ks_inuse--;
kbp->kb_total -= 1;
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
return;
}
freep = (struct freelist *)addr;
@@ -410,7 +410,7 @@ free(addr, type)
}
#endif
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
}
/*
@@ -540,7 +540,7 @@ malloc_uninit(data)
#ifdef INVARIANTS
s = splmem();
- mtx_enter(&malloc_mtx, MTX_DEF);
+ mtx_lock(&malloc_mtx);
for (indx = 0; indx < MINBUCKET + 16; indx++) {
kbp = bucket + indx;
freep = (struct freelist*)kbp->kb_next;
@@ -551,7 +551,7 @@ malloc_uninit(data)
}
}
splx(s);
- mtx_exit(&malloc_mtx, MTX_DEF);
+ mtx_unlock(&malloc_mtx);
if (type->ks_memuse != 0)
printf("malloc_uninit: %ld bytes of '%s' still allocated\n",
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index c13dd1d..64d3972 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -31,6 +31,11 @@
*/
/*
+ * Machine independent bits of mutex implementation and implementation of
+ * `witness' structure & related debugging routines.
+ */
+
+/*
* Main Entry: witness
* Pronunciation: 'wit-n&s
* Function: noun
@@ -53,12 +58,6 @@
#include "opt_ddb.h"
#include "opt_witness.h"
-/*
- * Cause non-inlined mtx_*() to be compiled.
- * Must be defined early because other system headers may include mutex.h.
- */
-#define _KERN_MUTEX_C_
-
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
@@ -82,9 +81,8 @@
#include <sys/mutex.h>
/*
- * Machine independent bits of the mutex implementation
+ * The WITNESS-enabled mutex debug structure.
*/
-
#ifdef WITNESS
struct mtx_debug {
struct witness *mtxd_witness;
@@ -100,138 +98,54 @@ struct mtx_debug {
#endif /* WITNESS */
/*
- * Assembly macros
- *------------------------------------------------------------------------------
- */
-
-#define _V(x) __STRING(x)
-
-/*
- * Default, unoptimized mutex micro-operations
+ * Internal utility macros.
*/
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#ifndef _obtain_lock
-/* Actually obtain mtx_lock */
-#define _obtain_lock(mp, tid) \
- atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
-#endif
-
-#ifndef _release_lock
-/* Actually release mtx_lock */
-#define _release_lock(mp, tid) \
- atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _release_lock_quick
-/* Actually release mtx_lock quickly assuming that we own it */
-#define _release_lock_quick(mp) \
- atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _getlock_sleep
-/* Get a sleep lock, deal with recursion inline. */
-#define _getlock_sleep(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _getlock_spin_block
-/* Get a spin lock, handle recursion inline (as the less common case) */
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _mtx_intr = save_intr(); \
- disable_intr(); \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
- else \
- (mp)->mtx_saveintr = _mtx_intr; \
-} while (0)
-#endif
+#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-#ifndef _getlock_norecurse
-/*
- * Get a lock without any recursion handling. Calls the hard enter function if
- * we can't get it inline.
- */
-#define _getlock_norecurse(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
-} while (0)
-#endif
+#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-#ifndef _exitlock_norecurse
/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is handled
- * in the hard function.
+ * Early WITNESS-enabled declarations.
*/
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-#endif
+#ifdef WITNESS
-#ifndef _exitlock
/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-#define _exitlock(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) { \
- if ((mp)->mtx_lock & MTX_RECURSED) { \
- if (--((mp)->mtx_recurse) == 0) \
- atomic_clear_ptr(&(mp)->mtx_lock, \
- MTX_RECURSED); \
- } else { \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _exitlock_spin
-/* Release a spin lock (with possible recursion). */
-#define _exitlock_spin(mp) do { \
- if (!mtx_recursed((mp))) { \
- int _mtx_intr = (mp)->mtx_saveintr; \
- \
- _release_lock_quick(mp); \
- restore_intr(_mtx_intr); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-#endif
+ * Internal WITNESS routines which must be prototyped early.
+ *
+ * XXX: When/if witness code is cleaned up, it would be wise to place all
+ * witness prototyping early in this file.
+ */
+static void witness_init(struct mtx *, int flag);
+static void witness_destroy(struct mtx *);
+static void witness_display(void(*)(const char *fmt, ...));
-#ifdef WITNESS
-static void witness_init(struct mtx *, int flag);
-static void witness_destroy(struct mtx *);
-static void witness_display(void(*)(const char *fmt, ...));
+MALLOC_DEFINE(M_WITNESS, "witness", "witness mtx_debug structure");
/* All mutexes in system (used for debug/panic) */
static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0 };
+
/*
- * Set to 0 once mutexes have been fully initialized so that witness code can be
- * safely executed.
+ * This global is set to 0 once it becomes safe to use the witness code.
*/
static int witness_cold = 1;
+
#else /* WITNESS */
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
+/* XXX XXX XXX
+ * flag++ is sleazoid way of shuting up warning
*/
#define witness_init(m, flag) flag++
#define witness_destroy(m)
#define witness_try_enter(m, t, f, l)
#endif /* WITNESS */
-/* All mutexes in system (used for debug/panic) */
+/*
+ * All mutex locks in system are kept on the all_mtx list.
+ */
static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
{ NULL, NULL }, &all_mtx, &all_mtx,
@@ -242,19 +156,18 @@ static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
#endif
};
+/*
+ * Global variables for book keeping.
+ */
static int mtx_cur_cnt;
static int mtx_max_cnt;
+/*
+ * Prototypes for non-exported routines.
+ *
+ * NOTE: Prototypes for witness routines are placed at the bottom of the file.
+ */
static void propagate_priority(struct proc *);
-static void mtx_enter_hard(struct mtx *, int type, int saveintr);
-static void mtx_exit_hard(struct mtx *, int type);
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
static void
propagate_priority(struct proc *p)
@@ -277,6 +190,7 @@ propagate_priority(struct proc *p)
MPASS(m->mtx_lock == MTX_CONTESTED);
return;
}
+
MPASS(p->p_magic == P_MAGIC);
KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
if (p->p_priority <= pri)
@@ -314,7 +228,7 @@ propagate_priority(struct proc *p)
* quit.
*/
if (p->p_stat == SRUN) {
- printf("XXX: moving process %d(%s) to a new run queue\n",
+ printf("XXX: moving proc %d(%s) to a new run queue\n",
p->p_pid, p->p_comm);
MPASS(p->p_blocked == NULL);
remrunqueue(p);
@@ -338,6 +252,7 @@ propagate_priority(struct proc *p)
printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
p->p_comm, m->mtx_description);
+
/*
* Check if the proc needs to be moved up on
* the blocked chain
@@ -346,10 +261,11 @@ propagate_priority(struct proc *p)
printf("XXX: process at head of run queue\n");
continue;
}
+
p1 = TAILQ_PREV(p, rq, p_procq);
if (p1->p_priority <= pri) {
printf(
- "XXX: previous process %d(%s) has higher priority\n",
+ "XXX: previous process %d(%s) has higher priority\n",
p->p_pid, p->p_comm);
continue;
}
@@ -367,6 +283,7 @@ propagate_priority(struct proc *p)
if (p1->p_priority > pri)
break;
}
+
MPASS(p1 != NULL);
TAILQ_INSERT_BEFORE(p1, p, p_procq);
CTR4(KTR_LOCK,
@@ -376,421 +293,332 @@ propagate_priority(struct proc *p)
}
/*
- * Get lock 'm', the macro handles the easy (and most common cases) and leaves
- * the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out.
+ * The important part of mtx_trylock{,_flags}()
+ * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
+ * if we're called, it's because we know we don't already own this lock.
*/
-void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
+int
+_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *mpp = mtxp;
+ int rval;
- /* bits only valid on mtx_exit() */
- MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type, file, line);
+ KASSERT(CURPROC != NULL, ("curproc is NULL in _mtx_trylock"));
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this
- * lock we just bump the recursion count.
- */
- if (mpp->mtx_lock == (uintptr_t)CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- if ((type) & MTX_FIRST) {
- ASS_IEN;
- disable_intr();
- _getlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else {
- _getlock_spin_block(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
-done:
- WITNESS_ENTER(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
-
-}
+ /*
+ * _mtx_trylock does not accept MTX_NOSWITCH option.
+ */
+ MPASS((opts & MTX_NOSWITCH) == 0);
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
+ rval = _obtain_lock(m, CURTHD);
- rval = _obtain_lock(mpp, CURTHD);
#ifdef WITNESS
- if (rval && mpp->mtx_witness != NULL) {
- MPASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
+ if (rval && m->mtx_witness != NULL) {
+ /*
+ * We do not handle recursion in _mtx_trylock; see the
+ * note at the top of the routine.
+ */
+ MPASS(!mtx_recursed(m));
+ witness_try_enter(m, (opts | m->mtx_flags), file, line);
}
#endif /* WITNESS */
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, rval, file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "TRY_ENTER %s [%p] result=%d at %s:%d",
+ m->mtx_description, m, rval, file, line);
return rval;
}
/*
- * Release lock m.
+ * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
+ *
+ * We call this if the lock is either contested (i.e. we need to go to
+ * sleep waiting for it), or if we need to recurse on it.
*/
void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
+_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *const mpp = mtxp;
-
- MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line);
- WITNESS_EXIT(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- int mtx_intr = mpp->mtx_saveintr;
-
- MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse,
- file, line);
- _release_lock_quick(mpp);
- if (((type) & MTX_TOPHALF) == 0) {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- enable_intr();
- } else
- restore_intr(mtx_intr);
- }
- } else {
- if (((type & MTX_TOPHALF) == 0) &&
- (type & MTX_FIRST)) {
- ASS_IDIS;
- ASS_SIEN(mpp);
- }
- _exitlock_spin(mpp);
- }
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
+ struct proc *p = CURPROC;
+
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recurse", m);
+ return;
}
-}
-void
-mtx_enter_hard(struct mtx *m, int type, int saveintr)
-{
- struct proc *p = CURPROC;
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK, "mtx_lock: %p contested (lock=%p) [%p]", m,
+ (void *)m->mtx_lock, (void *)RETIP(m));
+
+ /*
+ * Save our priority. Even though p_nativepri is protected by
+ * sched_lock, we don't obtain it here as it can be expensive.
+ * Since this is the only place p_nativepri is set, and since two
+ * CPUs will not be executing the same process concurrently, we know
+ * that no other CPU is going to be messing with this. Also,
+ * p_nativepri is only read when we are blocked on a mutex, so that
+ * can't be happening right now either.
+ */
+ p->p_nativepri = p->p_priority;
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
+ while (!_obtain_lock(m, p)) {
+ uintptr_t v;
+ struct proc *p1;
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
- m->mtx_recurse++;
- atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p recurse", m);
- return;
+ mtx_lock_spin(&sched_lock);
+ /*
+ * Check if the lock has been released while spinning for
+ * the sched_lock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
}
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: %p contested (lock=%p) [%p]",
- m, (void *)m->mtx_lock, (void *)RETIP(m));
/*
- * Save our priority. Even though p_nativepri is protected
- * by sched_lock, we don't obtain it here as it can be
- * expensive. Since this is the only place p_nativepri is
- * set, and since two CPUs will not be executing the same
- * process concurrently, we know that no other CPU is going
- * to be messing with this. Also, p_nativepri is only read
- * when we are blocked on a mutex, so that can't be happening
- * right now either.
+ * The mutex was marked contested on release. This means that
+ * there are processes blocked on it.
*/
- p->p_nativepri = p->p_priority;
- while (!_obtain_lock(m, p)) {
- uintptr_t v;
- struct proc *p1;
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL,
+ ("contested mutex has no contesters"));
+ m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+
+ if (p1->p_priority < p->p_priority)
+ SET_PRIO(p, p1->p_priority);
+ mtx_unlock_spin(&sched_lock);
+ return;
+ }
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
- (void *)(v | MTX_CONTESTED))) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
+ /*
+ * If the mutex isn't already contested and a failure occurs
+ * setting the contested bit, the mutex was either released
+ * or the state of the MTX_RECURSED bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
+ (void *)(v | MTX_CONTESTED))) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
+ }
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
+ /*
+ * We deffinately must sleep for this lock.
+ */
+ mtx_assert(m, MA_NOTOWNED);
#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ /*
+ * If we're borrowing an interrupted thread's VM context, we
+ * must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "mtx_lock: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
+ }
#endif
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
+ /*
+ * Put us on the list of threads blocked on this mutex.
+ */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
+ }
- p->p_blocked = m; /* Who we're blocked on */
- p->p_mtxname = m->mtx_description;
- p->p_stat = SMTX;
+ /*
+ * Save who we're blocked on.
+ */
+ p->p_blocked = m;
+ p->p_mtxname = m->mtx_description;
+ p->p_stat = SMTX;
#if 0
- propagate_priority(p);
+ propagate_priority(p);
#endif
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p blocked on [%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p free from blocked on [%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (uintptr_t)p) {
- m->mtx_recurse++;
- return;
- }
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (_obtain_lock(m, p))
- break;
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
+ m->mtx_description);
+
+ mi_switch();
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
+ p, m, m->mtx_description);
+
+ mtx_unlock_spin(&sched_lock);
+ }
+
+ return;
+}
+
+/*
+ * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
+ *
+ * This is only called if we need to actually spin for the lock. Recursion
+ * is handled inline.
+ */
+void
+_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
+ int line)
+{
+ int i = 0;
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "mtx_lock_spin: %p spinning", m);
+
+ for (;;) {
+ if (_obtain_lock(m, CURPROC))
+ break;
+
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY(1);
#ifdef DDB
- else if (!db_active)
+ else if (!db_active)
#else
- else
+ else
#endif
- panic(
- "spin lock %s held by %p for > 5 seconds",
- m->mtx_description,
- (void *)m->mtx_lock);
- }
+ panic("spin lock %s held by %p for > 5 seconds",
+ m->mtx_description, (void *)m->mtx_lock);
}
-
-#ifdef MUTEX_DEBUG
- if (type != MTX_SPIN)
- m->mtx_saveintr = 0xbeefface;
- else
-#endif
- m->mtx_saveintr = saveintr;
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spin done", m);
- return;
- }
}
+
+ m->mtx_saveintr = mtx_intr;
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
+
+ return;
}
+/*
+ * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
+ *
+ * We are only called here if the lock is recursed or contested (i.e. we
+ * need to wake up a blocked thread).
+ */
void
-mtx_exit_hard(struct mtx *m, int type)
+_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
{
struct proc *p, *p1;
struct mtx *m1;
int pri;
p = CURPROC;
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (mtx_recursed(m)) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p contested", m);
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- _release_lock_quick(m);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p not held", m);
- } else
- atomic_store_rel_ptr(&m->mtx_lock,
- (void *)MTX_CONTESTED);
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p contested setrunqueue %p", m, p1);
- p1->p_blocked = NULL;
- p1->p_mtxname = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+ MPASS4(mtx_owned(m), "mtx_owned(mpp)", file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", m->mtx_description,
+ m, m->mtx_recurse, file, line);
+
+ if (mtx_recursed(m)) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
+ return;
+ }
+
+ mtx_lock_spin(&sched_lock);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
+
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ _release_lock_quick(m);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
+ } else
+ atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
+
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
+ m, p1);
+
+ p1->p_blocked = NULL;
+ p1->p_mtxname = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+
+ if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
-#endif
- setrunqueue(p);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p switching out lock=%p",
- m, (void *)m->mtx_lock);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p resuming lock=%p",
- m, (void *)m->mtx_lock);
}
- mtx_exit(&sched_lock, MTX_SPIN);
- break;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- if (type & MTX_FIRST)
- enable_intr(); /* XXX is this kosher? */
- else {
- MPASS(m->mtx_saveintr != 0xbeefface);
- restore_intr(m->mtx_saveintr);
- }
- break;
- case MTX_SPIN | MTX_TOPHALF:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- break;
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+#endif
+ setrunqueue(p);
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: %p switching out lock=%p", m,
+ (void *)m->mtx_lock);
+
+ mi_switch();
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
+ m, (void *)m->mtx_lock);
}
+
+ mtx_unlock_spin(&sched_lock);
+
+ return;
}
+/*
+ * All the unlocking of MTX_SPIN locks is done inline.
+ * See the _rel_spin_lock() macro for the details.
+ */
+
+/*
+ * The INVARIANTS-enabled mtx_assert()
+ */
#ifdef INVARIANTS
void
_mtx_assert(struct mtx *m, int what, const char *file, int line)
@@ -822,6 +650,9 @@ _mtx_assert(struct mtx *m, int what, const char *file, int line)
}
#endif
+/*
+ * The MUTEX_DEBUG-enabled mtx_validate()
+ */
#define MV_DESTROY 0 /* validate before destory */
#define MV_INIT 1 /* validate before init */
@@ -843,7 +674,7 @@ mtx_validate(struct mtx *m, int when)
if (m == &all_mtx || cold)
return 0;
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
/*
* XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
* we can re-enable the kernacc() checks.
@@ -887,50 +718,63 @@ mtx_validate(struct mtx *m, int when)
retval = 1;
}
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
return (retval);
}
#endif
+/*
+ * Mutex initialization routine; initialize lock `m' of type contained in
+ * `opts' with options contained in `opts' and description `description.'
+ * Place on "all_mtx" queue.
+ */
void
-mtx_init(struct mtx *m, const char *t, int flag)
+mtx_init(struct mtx *m, const char *description, int opts)
{
- if ((flag & MTX_QUIET) == 0)
- CTR2(KTR_LOCK, "mtx_init %p (%s)", m, t);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "mtx_init %p (%s)", m, description);
+
#ifdef MUTEX_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ /* Diagnostic and error correction */
+ if (mtx_validate(m, MV_INIT))
return;
#endif
bzero((void *)m, sizeof *m);
TAILQ_INIT(&m->mtx_blocked);
+
#ifdef WITNESS
if (!witness_cold) {
- /* XXX - should not use DEVBUF */
m->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(m->mtx_debug != NULL);
}
#endif
- m->mtx_description = t;
- m->mtx_flags = flag;
+ m->mtx_description = description;
+ m->mtx_flags = opts;
m->mtx_lock = MTX_UNOWNED;
+
/* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next = &all_mtx;
m->mtx_prev = all_mtx.mtx_prev;
m->mtx_prev->mtx_next = m;
all_mtx.mtx_prev = m;
if (++mtx_cur_cnt > mtx_max_cnt)
mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
+
#ifdef WITNESS
if (!witness_cold)
- witness_init(m, flag);
+ witness_init(m, opts);
#endif
}
+/*
+ * Remove lock `m' from all_mtx queue.
+ */
void
mtx_destroy(struct mtx *m)
{
@@ -939,7 +783,9 @@ mtx_destroy(struct mtx *m)
KASSERT(!witness_cold, ("%s: Cannot destroy while still cold\n",
__FUNCTION__));
#endif
+
CTR2(KTR_LOCK, "mtx_destroy %p (%s)", m, m->mtx_description);
+
#ifdef MUTEX_DEBUG
if (m->mtx_next == NULL)
panic("mtx_destroy: %p (%s) already destroyed",
@@ -950,7 +796,9 @@ mtx_destroy(struct mtx *m)
} else {
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
}
- mtx_validate(m, MV_DESTROY); /* diagnostic */
+
+ /* diagnostic */
+ mtx_validate(m, MV_DESTROY);
#endif
#ifdef WITNESS
@@ -959,25 +807,27 @@ mtx_destroy(struct mtx *m)
#endif /* WITNESS */
/* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next->mtx_prev = m->mtx_prev;
m->mtx_prev->mtx_next = m->mtx_next;
+
#ifdef MUTEX_DEBUG
m->mtx_next = m->mtx_prev = NULL;
#endif
+
#ifdef WITNESS
- free(m->mtx_debug, M_DEVBUF);
+ free(m->mtx_debug, M_WITNESS);
m->mtx_debug = NULL;
#endif
+
mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
}
+
/*
- * The non-inlined versions of the mtx_*() functions are always built (above),
- * but the witness code depends on the WITNESS kernel option being specified.
+ * The WITNESS-enabled diagnostic code.
*/
-
#ifdef WITNESS
static void
witness_fixup(void *dummy __unused)
@@ -988,26 +838,26 @@ witness_fixup(void *dummy __unused)
* We have to release Giant before initializing its witness
* structure so that WITNESS doesn't get confused.
*/
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&all_mtx, MTX_DEF);
+
+ mtx_lock(&all_mtx);
/* Iterate through all mutexes and finish up mutex initialization. */
for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- /* XXX - should not use DEVBUF */
mp->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(mp->mtx_debug != NULL);
witness_init(mp, mp->mtx_flags);
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
/* Mark the witness code as being ready for use. */
atomic_store_rel_int(&witness_cold, 0);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
SYSINIT(wtnsfxup, SI_SUB_MUTEX, SI_ORDER_FIRST, witness_fixup, NULL)
@@ -1061,6 +911,9 @@ TUNABLE_INT_DECL("debug.witness_skipspin", 0, witness_skipspin);
SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
"");
+/*
+ * Witness-enabled globals
+ */
static struct mtx w_mtx;
static struct witness *w_free;
static struct witness *w_all;
@@ -1069,20 +922,22 @@ static int witness_dead; /* fatal error, probably no memory */
static struct witness w_data[WITNESS_COUNT];
-static struct witness *enroll __P((const char *description, int flag));
-static int itismychild __P((struct witness *parent, struct witness *child));
-static void removechild __P((struct witness *parent, struct witness *child));
-static int isitmychild __P((struct witness *parent, struct witness *child));
-static int isitmydescendant __P((struct witness *parent, struct witness *child));
-static int dup_ok __P((struct witness *));
-static int blessed __P((struct witness *, struct witness *));
-static void witness_displaydescendants
- __P((void(*)(const char *fmt, ...), struct witness *));
-static void witness_leveldescendents __P((struct witness *parent, int level));
-static void witness_levelall __P((void));
-static struct witness * witness_get __P((void));
-static void witness_free __P((struct witness *m));
-
+/*
+ * Internal witness routine prototypes
+ */
+static struct witness *enroll(const char *description, int flag);
+static int itismychild(struct witness *parent, struct witness *child);
+static void removechild(struct witness *parent, struct witness *child);
+static int isitmychild(struct witness *parent, struct witness *child);
+static int isitmydescendant(struct witness *parent, struct witness *child);
+static int dup_ok(struct witness *);
+static int blessed(struct witness *, struct witness *);
+static void
+ witness_displaydescendants(void(*)(const char *fmt, ...), struct witness *);
+static void witness_leveldescendents(struct witness *parent, int level);
+static void witness_levelall(void);
+static struct witness * witness_get(void);
+static void witness_free(struct witness *m);
static char *ignore_list[] = {
"witness lock",
@@ -1129,7 +984,8 @@ static char *sleep_list[] = {
*/
static struct witness_blessed blessed_list[] = {
};
-static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed);
+static int blessed_count =
+ sizeof(blessed_list) / sizeof(struct witness_blessed);
static void
witness_init(struct mtx *m, int flag)
@@ -1211,17 +1067,17 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
i = PCPU_GET(witness_spin_check);
if (i != 0 && w->w_level < i) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
" %s:%d already holding %s:%x",
m->mtx_description, w->w_level, file, line,
spin_order_list[ffs(i)-1], i);
}
PCPU_SET(witness_spin_check, i | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1245,7 +1101,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
if (!mtx_legal2block())
- panic("blockable mtx_enter() of %s when not legal @ %s:%d",
+ panic("blockable mtx_lock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
/*
* Is this the first mutex acquired
@@ -1267,16 +1123,16 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
}
MPASS(!mtx_owned(&w_mtx));
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
/*
* If we have a known higher number just say ok
*/
if (witness_watch > 1 && w->w_level > w1->w_level) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
if (isitmydescendant(m1->mtx_witness, w)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
@@ -1284,7 +1140,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
MPASS(i < 200);
w1 = m1->mtx_witness;
if (isitmydescendant(w, w1)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (blessed(w, w1))
goto out;
if (m1 == &Giant) {
@@ -1313,7 +1169,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
}
m1 = LIST_FIRST(&p->p_heldmtx);
if (!itismychild(m1->mtx_witness, w))
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
out:
#ifdef DDB
@@ -1356,10 +1212,10 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
m->mtx_description, file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1407,10 +1263,10 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) & ~w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return;
}
if ((m->mtx_flags & MTX_SPIN) != 0)
@@ -1426,7 +1282,7 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
}
if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold)
- panic("switchable mtx_exit() of %s when not legal @ %s:%d",
+ panic("switchable mtx_unlock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
LIST_REMOVE(m, mtx_held);
m->mtx_held.le_prev = NULL;
@@ -1497,10 +1353,10 @@ enroll(const char *description, int flag)
}
if ((flag & MTX_SPIN) && witness_skipspin)
return (NULL);
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
for (w = w_all; w; w = w->w_next) {
if (strcmp(description, w->w_description) == 0) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return (w);
}
}
@@ -1509,7 +1365,7 @@ enroll(const char *description, int flag)
w->w_next = w_all;
w_all = w;
w->w_description = description;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (flag & MTX_SPIN) {
w->w_spin = 1;
@@ -1731,7 +1587,7 @@ witness_get()
if ((w = w_free) == NULL) {
witness_dead = 1;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
printf("witness exhausted\n");
return (NULL);
}
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index f5b35c1..d1ef108 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -403,7 +403,7 @@ fill_kinfo_proc(p, kp)
kp->ki_sigignore = p->p_procsig->ps_sigignore;
kp->ki_sigcatch = p->p_procsig->ps_sigcatch;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat != SIDL && p->p_stat != SZOMB && p->p_vmspace != NULL) {
struct vmspace *vm = p->p_vmspace;
@@ -449,7 +449,7 @@ fill_kinfo_proc(p, kp)
kp->ki_rqindex = p->p_rqindex;
kp->ki_oncpu = p->p_oncpu;
kp->ki_lastcpu = p->p_lastcpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
sp = NULL;
if (p->p_pgrp) {
kp->ki_pgid = p->p_pgrp->pg_id;
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index 7238499..0c5b589 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -1155,9 +1155,9 @@ crhold(cr)
struct ucred *cr;
{
- mtx_enter(&cr->cr_mtx, MTX_DEF);
+ mtx_lock(&cr->cr_mtx);
cr->cr_ref++;
- mtx_exit(&(cr)->cr_mtx, MTX_DEF);
+ mtx_unlock(&(cr)->cr_mtx);
}
@@ -1170,7 +1170,7 @@ crfree(cr)
struct ucred *cr;
{
- mtx_enter(&cr->cr_mtx, MTX_DEF);
+ mtx_lock(&cr->cr_mtx);
if (--cr->cr_ref == 0) {
mtx_destroy(&cr->cr_mtx);
/*
@@ -1182,7 +1182,7 @@ crfree(cr)
uifree(cr->cr_uidinfo);
FREE((caddr_t)cr, M_CRED);
} else {
- mtx_exit(&cr->cr_mtx, MTX_DEF);
+ mtx_unlock(&cr->cr_mtx);
}
}
@@ -1195,12 +1195,12 @@ crcopy(cr)
{
struct ucred *newcr;
- mtx_enter(&cr->cr_mtx, MTX_DEF);
+ mtx_lock(&cr->cr_mtx);
if (cr->cr_ref == 1) {
- mtx_exit(&cr->cr_mtx, MTX_DEF);
+ mtx_unlock(&cr->cr_mtx);
return (cr);
}
- mtx_exit(&cr->cr_mtx, MTX_DEF);
+ mtx_unlock(&cr->cr_mtx);
newcr = crdup(cr);
crfree(cr);
return (newcr);
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index e29c273..8af2be5 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -610,9 +610,9 @@ getrusage(p, uap)
case RUSAGE_SELF:
rup = &p->p_stats->p_ru;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
break;
case RUSAGE_CHILDREN:
@@ -724,12 +724,12 @@ uifind(uid)
{
struct uidinfo *uip;
- mtx_enter(&uihashtbl_mtx, MTX_DEF);
+ mtx_lock(&uihashtbl_mtx);
uip = uilookup(uid);
if (uip == NULL)
uip = uicreate(uid);
uihold(uip);
- mtx_exit(&uihashtbl_mtx, MTX_DEF);
+ mtx_unlock(&uihashtbl_mtx);
return (uip);
}
@@ -741,9 +741,9 @@ uihold(uip)
struct uidinfo *uip;
{
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_lock(&uip->ui_mtx);
uip->ui_ref++;
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
}
/*-
@@ -767,18 +767,18 @@ uifree(uip)
{
/* Prepare for optimal case. */
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_lock(&uip->ui_mtx);
if (--uip->ui_ref != 0) {
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return;
}
/* Prepare for suboptimal case. */
uip->ui_ref++;
- mtx_exit(&uip->ui_mtx, MTX_DEF);
- mtx_enter(&uihashtbl_mtx, MTX_DEF);
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
+ mtx_lock(&uihashtbl_mtx);
+ mtx_lock(&uip->ui_mtx);
/*
* We must subtract one from the count again because we backed out
@@ -788,7 +788,7 @@ uifree(uip)
*/
if (--uip->ui_ref == 0) {
LIST_REMOVE(uip, ui_hash);
- mtx_exit(&uihashtbl_mtx, MTX_DEF);
+ mtx_unlock(&uihashtbl_mtx);
if (uip->ui_sbsize != 0)
/* XXX no %qd in kernel. Truncate. */
printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
@@ -801,8 +801,8 @@ uifree(uip)
return;
}
- mtx_exit(&uihashtbl_mtx, MTX_DEF);
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uihashtbl_mtx);
+ mtx_unlock(&uip->ui_mtx);
}
/*
@@ -816,16 +816,16 @@ chgproccnt(uip, diff, max)
int max;
{
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_lock(&uip->ui_mtx);
/* don't allow them to exceed max, but allow subtraction */
if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return (0);
}
uip->ui_proccnt += diff;
if (uip->ui_proccnt < 0)
printf("negative proccnt for uid = %d\n", uip->ui_uid);
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return (1);
}
@@ -843,12 +843,12 @@ chgsbsize(uip, hiwat, to, max)
int s;
s = splnet();
- mtx_enter(&uip->ui_mtx, MTX_DEF);
+ mtx_lock(&uip->ui_mtx);
new = uip->ui_sbsize + to - *hiwat;
/* don't allow them to exceed max, but allow subtraction */
if (to > *hiwat && new > max) {
splx(s);
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return (0);
}
uip->ui_sbsize = new;
@@ -856,6 +856,6 @@ chgsbsize(uip, hiwat, to, max)
if (uip->ui_sbsize < 0)
printf("negative sbsize for uid = %d\n", uip->ui_uid);
splx(s);
- mtx_exit(&uip->ui_mtx, MTX_DEF);
+ mtx_unlock(&uip->ui_mtx);
return (1);
}
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index 1b7426f..b25fa4d 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -256,10 +256,10 @@ boot(int howto)
if (curproc != NULL) {
DROP_GIANT_NOSWITCH();
for (subiter = 0; subiter < 50 * iter; subiter++) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
setrunqueue(curproc);
mi_switch(); /* Allow interrupt threads to run */
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
DELAY(1000);
}
PICKUP_GIANT();
@@ -540,7 +540,7 @@ panic(const char *fmt, ...)
#ifdef SMP
/* Only 1 CPU can panic at a time */
- mtx_enter(&panic_mtx, MTX_DEF);
+ mtx_lock(&panic_mtx);
#endif
bootopt = RB_AUTOBOOT | RB_DUMP;
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index a72de0e..deee375 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -186,9 +186,9 @@ CURSIG(struct proc *p)
SIGSETNAND(tmpset, p->p_sigmask);
if (SIGISEMPTY(tmpset) && (p->p_flag & P_TRACED) == 0)
return (0);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
r = issignal(p);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (r);
}
@@ -1087,11 +1087,11 @@ psignal(p, sig)
action = SIG_DFL;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
(p->p_flag & P_TRACED) == 0)
p->p_nice = NZERO;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (prop & SA_CONT)
SIG_STOPSIGMASK(p->p_siglist);
@@ -1116,9 +1116,9 @@ psignal(p, sig)
* Defer further processing for signals which are held,
* except that stopped processes must be continued by SIGCONT.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (action == SIG_HOLD && (!(prop & SA_CONT) || p->p_stat != SSTOP)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
return;
}
@@ -1132,7 +1132,7 @@ psignal(p, sig)
* trap() or syscall().
*/
if ((p->p_sflag & PS_SINTR) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
/*
@@ -1142,7 +1142,7 @@ psignal(p, sig)
*/
if (p->p_flag & P_TRACED)
goto run;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* If SIGCONT is default (or ignored) and process is
* asleep, we are finished; the process should not
@@ -1182,7 +1182,7 @@ psignal(p, sig)
/* NOTREACHED */
case SSTOP:
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* If traced process is already stopped,
* then no further action is necessary.
@@ -1211,11 +1211,11 @@ psignal(p, sig)
SIGDELSET(p->p_siglist, sig);
if (action == SIG_CATCH)
goto runfast;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == NULL)
goto run;
p->p_stat = SSLEEP;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
@@ -1234,14 +1234,14 @@ psignal(p, sig)
* runnable and can look at the signal. But don't make
* the process runnable, leave it stopped.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan && p->p_sflag & PS_SINTR) {
if (p->p_sflag & PS_CVWAITQ)
cv_waitq_remove(p);
else
unsleep(p);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
default:
@@ -1251,17 +1251,17 @@ psignal(p, sig)
* It will either never be noticed, or noticed very soon.
*/
if (p == curproc) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
signotify(p);
}
#ifdef SMP
else if (p->p_stat == SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
forward_signal(p);
}
#endif
else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
/*NOTREACHED*/
@@ -1270,14 +1270,14 @@ runfast:
/*
* Raise priority to at least PUSER.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_priority > PUSER)
p->p_priority = PUSER;
run:
/* If we jump here, sched_lock has to be owned. */
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
setrunnable(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
out:
/* If we jump here, sched_lock should not be owned. */
mtx_assert(&sched_lock, MA_NOTOWNED);
@@ -1336,10 +1336,10 @@ issignal(p)
do {
stop(p);
PROCTREE_LOCK(PT_RELEASE);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROCTREE_LOCK(PT_SHARED);
} while (!trace_req(p)
@@ -1413,10 +1413,10 @@ issignal(p)
if ((p->p_pptr->p_procsig->ps_flag & PS_NOCLDSTOP) == 0)
psignal(p->p_pptr, SIGCHLD);
PROCTREE_LOCK(PT_RELEASE);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
break;
} else if (prop & SA_IGNORE) {
@@ -1464,11 +1464,11 @@ stop(p)
{
PROCTREE_ASSERT(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_stat = SSTOP;
p->p_flag &= ~P_WAITED;
wakeup((caddr_t)p->p_pptr);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index c3d7849..ef4121b 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -377,13 +377,13 @@ uio_yield()
p = curproc;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
p->p_priority = p->p_usrpri;
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
splx(s);
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 2518a28..8f54602 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -295,7 +295,7 @@ schedcpu(arg)
if (p->p_stat == SWAIT)
continue;
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_swtime++;
if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
p->p_slptime++;
@@ -305,7 +305,7 @@ schedcpu(arg)
* stop recalculating its priority until it wakes up.
*/
if (p->p_slptime > 1) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
@@ -343,7 +343,7 @@ schedcpu(arg)
} else
p->p_priority = p->p_usrpri;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
ALLPROC_LOCK(AP_RELEASE);
@@ -427,7 +427,7 @@ msleep(ident, mtx, priority, wmesg, timo)
ktrcsw(p->p_tracep, 1, 0);
#endif
WITNESS_SLEEP(0, mtx);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
s = splhigh();
if (cold || panicstr) {
/*
@@ -437,8 +437,8 @@ msleep(ident, mtx, priority, wmesg, timo)
* in case this is the idle process and already asleep.
*/
if (mtx != NULL && priority & PDROP)
- mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_flags(mtx, MTX_NOSWITCH);
+ mtx_unlock_spin(&sched_lock);
splx(s);
return (0);
}
@@ -448,7 +448,7 @@ msleep(ident, mtx, priority, wmesg, timo)
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(mtx, mtx);
- mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mtx, MTX_NOSWITCH);
if (priority & PDROP)
mtx = NULL;
}
@@ -485,15 +485,15 @@ msleep(ident, mtx, priority, wmesg, timo)
"msleep caught: proc %p (pid %d, %s), schedlock %p",
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
p->p_sflag |= PS_SINTR;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if ((sig = CURSIG(p))) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan)
unsleep(p);
p->p_stat = SRUN;
goto resume;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == NULL) {
catch = 0;
goto resume;
@@ -518,12 +518,12 @@ resume:
ktrcsw(p->p_tracep, 0, 0);
#endif
rval = EWOULDBLOCK;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
} else if (timo)
callout_stop(&p->p_slpcallout);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (catch && (sig != 0 || (sig = CURSIG(p)))) {
#ifdef KTRACE
@@ -543,7 +543,7 @@ out:
#endif
PICKUP_GIANT();
if (mtx != NULL) {
- mtx_enter(mtx, MTX_DEF);
+ mtx_lock(mtx);
WITNESS_RESTORE(mtx, mtx);
}
return (rval);
@@ -579,7 +579,7 @@ asleep(void *ident, int priority, const char *wmesg, int timo)
*/
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan != NULL)
unsleep(p);
@@ -593,7 +593,7 @@ asleep(void *ident, int priority, const char *wmesg, int timo)
TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
return(0);
@@ -620,12 +620,12 @@ mawait(struct mtx *mtx, int priority, int timo)
WITNESS_SAVE_DECL(mtx);
WITNESS_SLEEP(0, mtx);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(mtx, mtx);
- mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH);
+ mtx_unlock_flags(mtx, MTX_NOSWITCH);
if (priority & PDROP)
mtx = NULL;
}
@@ -657,15 +657,15 @@ mawait(struct mtx *mtx, int priority, int timo)
if (catch) {
p->p_sflag |= PS_SINTR;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if ((sig = CURSIG(p))) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan)
unsleep(p);
p->p_stat = SRUN;
goto resume;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == NULL) {
catch = 0;
goto resume;
@@ -687,12 +687,12 @@ resume:
ktrcsw(p->p_tracep, 0, 0);
#endif
rval = EWOULDBLOCK;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto out;
}
} else if (timo)
callout_stop(&p->p_slpcallout);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (catch && (sig != 0 || (sig = CURSIG(p)))) {
#ifdef KTRACE
@@ -720,7 +720,7 @@ resume:
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -735,7 +735,7 @@ resume:
out:
PICKUP_GIANT();
if (mtx != NULL) {
- mtx_enter(mtx, MTX_DEF);
+ mtx_lock(mtx);
WITNESS_RESTORE(mtx, mtx);
}
return (rval);
@@ -761,7 +761,7 @@ endtsleep(arg)
"endtsleep: proc %p (pid %d, %s), schedlock %p",
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan) {
if (p->p_stat == SSLEEP)
setrunnable(p);
@@ -769,7 +769,7 @@ endtsleep(arg)
unsleep(p);
p->p_sflag |= PS_TIMEOUT;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -783,12 +783,12 @@ unsleep(p)
int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan) {
TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_slpq);
p->p_wchan = NULL;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -804,7 +804,7 @@ wakeup(ident)
int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
restart:
TAILQ_FOREACH(p, qp, p_slpq) {
@@ -832,7 +832,7 @@ restart:
}
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -850,7 +850,7 @@ wakeup_one(ident)
int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
TAILQ_FOREACH(p, qp, p_slpq) {
@@ -878,7 +878,7 @@ wakeup_one(ident)
}
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
splx(s);
}
@@ -947,13 +947,13 @@ mi_switch()
p->p_runtime > p->p_limit->p_cpulimit) {
rlim = &p->p_rlimit[RLIMIT_CPU];
if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
killproc(p, "exceeded maximum CPU limit");
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
psignal(p, SIGXCPU);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (rlim->rlim_cur < rlim->rlim_max) {
/* XXX: we should make a private copy */
rlim->rlim_cur += 5;
@@ -990,7 +990,7 @@ setrunnable(p)
register int s;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
switch (p->p_stat) {
case 0:
case SRUN:
@@ -1022,7 +1022,7 @@ setrunnable(p)
}
else
maybe_resched(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -1036,7 +1036,7 @@ resetpriority(p)
{
register unsigned int newpriority;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT +
NICE_WEIGHT * (p->p_nice - PRIO_MIN);
@@ -1044,7 +1044,7 @@ resetpriority(p)
p->p_usrpri = newpriority;
}
maybe_resched(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/* ARGSUSED */
@@ -1100,13 +1100,13 @@ yield(struct proc *p, struct yield_args *uap)
p->p_retval[0] = 0;
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
p->p_priority = MAXPRI;
setrunqueue(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
splx(s);
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 5576e08..6543b41 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -91,7 +91,7 @@ softclock(void *dummy)
steps = 0;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
while (softticks != ticks) {
softticks++;
/*
@@ -108,10 +108,10 @@ softclock(void *dummy)
if (steps >= MAX_SOFTCLOCK_STEPS) {
nextsoftcheck = c;
/* Give interrupts a chance. */
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
c = nextsoftcheck;
steps = 0;
}
@@ -134,22 +134,22 @@ softclock(void *dummy)
c->c_flags =
(c->c_flags & ~CALLOUT_PENDING);
}
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
if (!(c_flags & CALLOUT_MPSAFE))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
splx(s);
c_func(c_arg);
s = splhigh();
if (!(c_flags & CALLOUT_MPSAFE))
- mtx_exit(&Giant, MTX_DEF);
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_unlock(&Giant);
+ mtx_lock_spin(&callout_lock);
steps = 0;
c = nextsoftcheck;
}
}
}
nextsoftcheck = NULL;
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
}
@@ -180,7 +180,7 @@ timeout(ftn, arg, to_ticks)
struct callout_handle handle;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
/* Fill in the next free callout structure. */
new = SLIST_FIRST(&callfree);
@@ -192,7 +192,7 @@ timeout(ftn, arg, to_ticks)
callout_reset(new, to_ticks, ftn, arg);
handle.callout = new;
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
return (handle);
}
@@ -214,10 +214,10 @@ untimeout(ftn, arg, handle)
return;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
callout_stop(handle.callout);
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
}
@@ -251,7 +251,7 @@ callout_reset(c, to_ticks, ftn, arg)
int s;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
if (c->c_flags & CALLOUT_PENDING)
callout_stop(c);
@@ -269,7 +269,7 @@ callout_reset(c, to_ticks, ftn, arg)
c->c_time = ticks + to_ticks;
TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
c, c_links.tqe);
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
}
@@ -280,13 +280,13 @@ callout_stop(c)
int s;
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
/*
* Don't attempt to delete a callout that's not on the queue.
*/
if (!(c->c_flags & CALLOUT_PENDING)) {
c->c_flags &= ~CALLOUT_ACTIVE;
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
return;
}
@@ -301,7 +301,7 @@ callout_stop(c)
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
}
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
}
@@ -366,7 +366,7 @@ adjust_timeout_calltodo(time_change)
/* don't collide with softclock() */
s = splhigh();
- mtx_enter(&callout_lock, MTX_SPIN);
+ mtx_lock_spin(&callout_lock);
for (p = calltodo.c_next; p != NULL; p = p->c_next) {
p->c_time -= delta_ticks;
@@ -377,7 +377,7 @@ adjust_timeout_calltodo(time_change)
/* take back the ticks the timer didn't use (p->c_time <= 0) */
delta_ticks = -p->c_time;
}
- mtx_exit(&callout_lock, MTX_SPIN);
+ mtx_unlock_spin(&callout_lock);
splx(s);
return;
diff --git a/sys/kern/subr_eventhandler.c b/sys/kern/subr_eventhandler.c
index 12f5e3d..5c524d8 100644
--- a/sys/kern/subr_eventhandler.c
+++ b/sys/kern/subr_eventhandler.c
@@ -73,7 +73,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
KASSERT(eventhandler_lists_initted, ("eventhandler registered too early"));
/* lock the eventhandler lists */
- mtx_enter(&eventhandler_mutex, MTX_DEF);
+ mtx_lock(&eventhandler_mutex);
/* Do we need to find/create the (slow) list? */
if (list == NULL) {
@@ -84,7 +84,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
if (list == NULL) {
if ((list = malloc(sizeof(struct eventhandler_list) + strlen(name) + 1,
M_EVENTHANDLER, M_NOWAIT)) == NULL) {
- mtx_exit(&eventhandler_mutex, MTX_DEF);
+ mtx_unlock(&eventhandler_mutex);
return(NULL);
}
list->el_flags = 0;
@@ -102,7 +102,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
/* allocate an entry for this handler, populate it */
if ((eg = malloc(sizeof(struct eventhandler_entry_generic),
M_EVENTHANDLER, M_NOWAIT)) == NULL) {
- mtx_exit(&eventhandler_mutex, MTX_DEF);
+ mtx_unlock(&eventhandler_mutex);
return(NULL);
}
eg->func = func;
@@ -122,7 +122,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
if (ep == NULL)
TAILQ_INSERT_TAIL(&list->el_entries, &eg->ee, ee_link);
lockmgr(&list->el_lock, LK_RELEASE, NULL, CURPROC);
- mtx_exit(&eventhandler_mutex, MTX_DEF);
+ mtx_unlock(&eventhandler_mutex);
return(&eg->ee);
}
@@ -154,14 +154,14 @@ eventhandler_find_list(char *name)
struct eventhandler_list *list;
/* scan looking for the requested list */
- mtx_enter(&eventhandler_mutex, MTX_DEF);
+ mtx_lock(&eventhandler_mutex);
for (list = TAILQ_FIRST(&eventhandler_lists);
list != NULL;
list = TAILQ_NEXT(list, el_link)) {
if (!strcmp(name, list->el_name))
break;
}
- mtx_exit(&eventhandler_mutex, MTX_DEF);
+ mtx_unlock(&eventhandler_mutex);
return(list);
}
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 7225c54..7fadeed 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -444,12 +444,12 @@ addupc_task(p, pc, ticks)
u_short v;
/* Testing PS_PROFIL may be unnecessary, but is certainly safe. */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_PROFIL) == 0 || ticks == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
prof = &p->p_stats->p_prof;
if (pc < prof->pr_off ||
diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c
index f94bbeb..9b898c4 100644
--- a/sys/kern/subr_rman.c
+++ b/sys/kern/subr_rman.c
@@ -104,9 +104,9 @@ rman_init(struct rman *rm)
return ENOMEM;
mtx_init(rm->rm_mtx, "rman", MTX_DEF);
- mtx_enter(&rman_mtx, MTX_DEF);
+ mtx_lock(&rman_mtx);
TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
- mtx_exit(&rman_mtx, MTX_DEF);
+ mtx_unlock(&rman_mtx);
return 0;
}
@@ -129,7 +129,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end)
r->r_dev = 0;
r->r_rm = rm;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
for (s = TAILQ_FIRST(&rm->rm_list);
s && s->r_end < r->r_start;
s = TAILQ_NEXT(s, r_link))
@@ -141,7 +141,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end)
TAILQ_INSERT_BEFORE(s, r, r_link);
}
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return 0;
}
@@ -150,10 +150,10 @@ rman_fini(struct rman *rm)
{
struct resource *r;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
TAILQ_FOREACH(r, &rm->rm_list, r_link) {
if (r->r_flags & RF_ALLOCATED) {
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return EBUSY;
}
}
@@ -167,10 +167,10 @@ rman_fini(struct rman *rm)
TAILQ_REMOVE(&rm->rm_list, r, r_link);
free(r, M_RMAN);
}
- mtx_exit(rm->rm_mtx, MTX_DEF);
- mtx_enter(&rman_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
+ mtx_lock(&rman_mtx);
TAILQ_REMOVE(&rman_head, rm, rm_link);
- mtx_exit(&rman_mtx, MTX_DEF);
+ mtx_unlock(&rman_mtx);
mtx_destroy(rm->rm_mtx);
free(rm->rm_mtx, M_RMAN);
@@ -193,7 +193,7 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
want_activate = (flags & RF_ACTIVE);
flags &= ~RF_ACTIVE;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
for (r = TAILQ_FIRST(&rm->rm_list);
r && r->r_end < start;
@@ -370,7 +370,7 @@ out:
}
}
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return (rv);
}
@@ -417,9 +417,9 @@ rman_activate_resource(struct resource *r)
struct rman *rm;
rm = r->r_rm;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
rv = int_rman_activate_resource(rm, r, &whohas);
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return rv;
}
@@ -432,7 +432,7 @@ rman_await_resource(struct resource *r, int pri, int timo)
rm = r->r_rm;
for (;;) {
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
rv = int_rman_activate_resource(rm, r, &whohas);
if (rv != EBUSY)
return (rv); /* returns with mutex held */
@@ -441,19 +441,19 @@ rman_await_resource(struct resource *r, int pri, int timo)
panic("rman_await_resource");
/*
* splhigh hopefully will prevent a race between
- * mtx_exit and tsleep where a process
+ * mtx_unlock and tsleep where a process
* could conceivably get in and release the resource
* before we have a chance to sleep on it.
*/
s = splhigh();
whohas->r_flags |= RF_WANTED;
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
rv = tsleep(r->r_sharehead, pri, "rmwait", timo);
if (rv) {
splx(s);
return rv;
}
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
splx(s);
}
}
@@ -478,9 +478,9 @@ rman_deactivate_resource(struct resource *r)
struct rman *rm;
rm = r->r_rm;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
int_rman_deactivate_resource(r);
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return 0;
}
@@ -576,9 +576,9 @@ rman_release_resource(struct resource *r)
int rv;
struct rman *rm = r->r_rm;
- mtx_enter(rm->rm_mtx, MTX_DEF);
+ mtx_lock(rm->rm_mtx);
rv = int_rman_release_resource(rm, r);
- mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_unlock(rm->rm_mtx);
return (rv);
}
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 9d53cd7..2802750 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -2268,7 +2268,7 @@ ap_init(void)
PCPU_SET(curproc, PCPU_GET(idleproc));
/* lock against other AP's that are waking up */
- mtx_enter(&ap_boot_mtx, MTX_SPIN);
+ mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2317,7 +2317,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- mtx_exit(&ap_boot_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2328,7 +2328,7 @@ ap_init(void)
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
@@ -2662,14 +2662,14 @@ forward_signal(struct proc *p)
return;
if (!forward_signal_enabled)
return;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (1) {
if (p->p_stat != SRUN) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
id = p->p_oncpu;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (id == 0xff)
return;
map = (1<<id);
@@ -2687,9 +2687,9 @@ forward_signal(struct proc *p)
break;
}
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (id == p->p_oncpu) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return;
}
}
@@ -2867,7 +2867,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2886,7 +2886,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
void
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index b4373b3..533d791 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -174,11 +174,11 @@ userret(p, frame, oticks)
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_priority = p->p_usrpri;
if (resched_wanted()) {
/*
@@ -193,30 +193,30 @@ userret(p, frame, oticks)
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
while ((sig = CURSIG(p)) != 0) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
postsig(sig);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
/*
* Charge system time if profiling.
*/
if (p->p_sflag & PS_PROFIL) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/* XXX - do we need Giant? */
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, frame->tf_eip,
(u_int)(p->p_sticks - oticks) * psratio);
}
curpriority = p->p_priority;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -282,9 +282,9 @@ restart:
((frame.tf_eflags & PSL_VM) && !in_vm86call)) {
/* user trap */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
switch (type) {
@@ -312,9 +312,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0)
goto user;
break;
@@ -339,9 +339,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = trap_pfault(&frame, TRUE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
if (i == -2) {
/*
@@ -371,13 +371,13 @@ restart:
#ifndef TIMER_FREQ
# define TIMER_FREQ 1193182
#endif
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* machine/parity/power fail/"kitchen sink" faults */
@@ -421,9 +421,9 @@ restart:
ucode = FPE_FPU_NP_TRAP;
break;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = (*pmath_emulate)(&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i == 0) {
if (!(frame.tf_eflags & PSL_T))
goto out;
@@ -452,9 +452,9 @@ restart:
*/
eva = rcr2();
enable_intr();
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(void) trap_pfault(&frame, FALSE, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
case T_DNA:
@@ -477,9 +477,9 @@ restart:
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
if (frame.tf_eflags & PSL_VM) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
i = vm86_emulate((struct vm86frame *)&frame);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
if (i != 0)
/*
* returns to original process
@@ -510,9 +510,9 @@ restart:
*/
if (frame.tf_eip == (int)cpu_switch_load_gs) {
PCPU_GET(curpcb)->pcb_gs = 0;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGBUS);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
@@ -621,13 +621,13 @@ restart:
#ifdef DEV_ISA
case T_NMI:
#ifdef POWERFAIL_NMI
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (time_second - lastalert > 10) {
log(LOG_WARNING, "NMI: power fail\n");
sysbeep(TIMER_FREQ/880, hz);
lastalert = time_second;
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
#else /* !POWERFAIL_NMI */
/* XXX Giant */
@@ -651,13 +651,13 @@ restart:
#endif /* DEV_ISA */
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
trap_fatal(&frame, eva);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
goto out;
}
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* Translate fault for emulators (e.g. Linux) */
if (*p->p_sysent->sv_transtrap)
i = (*p->p_sysent->sv_transtrap)(i, type);
@@ -673,12 +673,12 @@ restart:
uprintf("\n");
}
#endif
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
user:
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
out:
return;
}
@@ -1103,15 +1103,15 @@ syscall2(frame)
#ifdef DIAGNOSTIC
if (ISPL(frame.tf_cs) != SEL_UPL) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
panic("syscall");
/* NOT REACHED */
}
#endif
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
p->p_md.md_regs = &frame;
params = (caddr_t)frame.tf_esp + sizeof(int);
@@ -1121,9 +1121,9 @@ syscall2(frame)
/*
* The prep code is not MP aware.
*/
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
(*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
} else {
/*
* Need to check if this is a 32 bit or 64 bit syscall.
@@ -1160,7 +1160,7 @@ syscall2(frame)
*/
if (params && (i = narg * sizeof(int)) &&
(error = copyin(params, (caddr_t)args, (u_int)i))) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL))
ktrsyscall(p->p_tracep, code, narg, args);
@@ -1174,13 +1174,13 @@ syscall2(frame)
* we are ktracing
*/
if ((callp->sy_narg & SYF_MPSAFE) == 0) {
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSCALL)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsyscall(p->p_tracep, code, narg, args);
}
#endif
@@ -1230,7 +1230,7 @@ bad:
*/
if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
frame.tf_eflags &= ~PSL_T;
trapsignal(p, SIGTRAP, 0);
}
@@ -1243,7 +1243,7 @@ bad:
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) {
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
}
#endif
@@ -1259,7 +1259,7 @@ bad:
* Release Giant if we had to get it
*/
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
#ifdef WITNESS
if (witness_list(p)) {
@@ -1278,38 +1278,38 @@ ast(frame)
struct proc *p = CURPROC;
u_quad_t sticks;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
astoff();
atomic_add_int(&cnt.v_soft, 1);
if (p->p_sflag & PS_OWEUPC) {
p->p_sflag &= ~PS_OWEUPC;
- mtx_exit(&sched_lock, MTX_SPIN);
- mtx_enter(&Giant, MTX_DEF);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
+ mtx_lock(&Giant);
+ mtx_lock_spin(&sched_lock);
addupc_task(p, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
}
if (p->p_sflag & PS_ALRMPEND) {
p->p_sflag &= ~PS_ALRMPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGVTALRM);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
if (p->p_sflag & PS_PROFPEND) {
p->p_sflag &= ~PS_PROFPEND;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (!mtx_owned(&Giant))
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
psignal(p, SIGPROF);
} else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
userret(p, &frame, sticks);
if (mtx_owned(&Giant))
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index c13dd1d..64d3972 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -31,6 +31,11 @@
*/
/*
+ * Machine independent bits of mutex implementation and implementation of
+ * `witness' structure & related debugging routines.
+ */
+
+/*
* Main Entry: witness
* Pronunciation: 'wit-n&s
* Function: noun
@@ -53,12 +58,6 @@
#include "opt_ddb.h"
#include "opt_witness.h"
-/*
- * Cause non-inlined mtx_*() to be compiled.
- * Must be defined early because other system headers may include mutex.h.
- */
-#define _KERN_MUTEX_C_
-
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
@@ -82,9 +81,8 @@
#include <sys/mutex.h>
/*
- * Machine independent bits of the mutex implementation
+ * The WITNESS-enabled mutex debug structure.
*/
-
#ifdef WITNESS
struct mtx_debug {
struct witness *mtxd_witness;
@@ -100,138 +98,54 @@ struct mtx_debug {
#endif /* WITNESS */
/*
- * Assembly macros
- *------------------------------------------------------------------------------
- */
-
-#define _V(x) __STRING(x)
-
-/*
- * Default, unoptimized mutex micro-operations
+ * Internal utility macros.
*/
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#ifndef _obtain_lock
-/* Actually obtain mtx_lock */
-#define _obtain_lock(mp, tid) \
- atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
-#endif
-
-#ifndef _release_lock
-/* Actually release mtx_lock */
-#define _release_lock(mp, tid) \
- atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _release_lock_quick
-/* Actually release mtx_lock quickly assuming that we own it */
-#define _release_lock_quick(mp) \
- atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _getlock_sleep
-/* Get a sleep lock, deal with recursion inline. */
-#define _getlock_sleep(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _getlock_spin_block
-/* Get a spin lock, handle recursion inline (as the less common case) */
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _mtx_intr = save_intr(); \
- disable_intr(); \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
- else \
- (mp)->mtx_saveintr = _mtx_intr; \
-} while (0)
-#endif
+#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-#ifndef _getlock_norecurse
-/*
- * Get a lock without any recursion handling. Calls the hard enter function if
- * we can't get it inline.
- */
-#define _getlock_norecurse(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
-} while (0)
-#endif
+#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-#ifndef _exitlock_norecurse
/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is handled
- * in the hard function.
+ * Early WITNESS-enabled declarations.
*/
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-#endif
+#ifdef WITNESS
-#ifndef _exitlock
/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-#define _exitlock(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) { \
- if ((mp)->mtx_lock & MTX_RECURSED) { \
- if (--((mp)->mtx_recurse) == 0) \
- atomic_clear_ptr(&(mp)->mtx_lock, \
- MTX_RECURSED); \
- } else { \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _exitlock_spin
-/* Release a spin lock (with possible recursion). */
-#define _exitlock_spin(mp) do { \
- if (!mtx_recursed((mp))) { \
- int _mtx_intr = (mp)->mtx_saveintr; \
- \
- _release_lock_quick(mp); \
- restore_intr(_mtx_intr); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-#endif
+ * Internal WITNESS routines which must be prototyped early.
+ *
+ * XXX: When/if witness code is cleaned up, it would be wise to place all
+ * witness prototyping early in this file.
+ */
+static void witness_init(struct mtx *, int flag);
+static void witness_destroy(struct mtx *);
+static void witness_display(void(*)(const char *fmt, ...));
-#ifdef WITNESS
-static void witness_init(struct mtx *, int flag);
-static void witness_destroy(struct mtx *);
-static void witness_display(void(*)(const char *fmt, ...));
+MALLOC_DEFINE(M_WITNESS, "witness", "witness mtx_debug structure");
/* All mutexes in system (used for debug/panic) */
static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0 };
+
/*
- * Set to 0 once mutexes have been fully initialized so that witness code can be
- * safely executed.
+ * This global is set to 0 once it becomes safe to use the witness code.
*/
static int witness_cold = 1;
+
#else /* WITNESS */
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
+/* XXX XXX XXX
+ * flag++ is sleazoid way of shuting up warning
*/
#define witness_init(m, flag) flag++
#define witness_destroy(m)
#define witness_try_enter(m, t, f, l)
#endif /* WITNESS */
-/* All mutexes in system (used for debug/panic) */
+/*
+ * All mutex locks in system are kept on the all_mtx list.
+ */
static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
{ NULL, NULL }, &all_mtx, &all_mtx,
@@ -242,19 +156,18 @@ static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
#endif
};
+/*
+ * Global variables for book keeping.
+ */
static int mtx_cur_cnt;
static int mtx_max_cnt;
+/*
+ * Prototypes for non-exported routines.
+ *
+ * NOTE: Prototypes for witness routines are placed at the bottom of the file.
+ */
static void propagate_priority(struct proc *);
-static void mtx_enter_hard(struct mtx *, int type, int saveintr);
-static void mtx_exit_hard(struct mtx *, int type);
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
static void
propagate_priority(struct proc *p)
@@ -277,6 +190,7 @@ propagate_priority(struct proc *p)
MPASS(m->mtx_lock == MTX_CONTESTED);
return;
}
+
MPASS(p->p_magic == P_MAGIC);
KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
if (p->p_priority <= pri)
@@ -314,7 +228,7 @@ propagate_priority(struct proc *p)
* quit.
*/
if (p->p_stat == SRUN) {
- printf("XXX: moving process %d(%s) to a new run queue\n",
+ printf("XXX: moving proc %d(%s) to a new run queue\n",
p->p_pid, p->p_comm);
MPASS(p->p_blocked == NULL);
remrunqueue(p);
@@ -338,6 +252,7 @@ propagate_priority(struct proc *p)
printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
p->p_comm, m->mtx_description);
+
/*
* Check if the proc needs to be moved up on
* the blocked chain
@@ -346,10 +261,11 @@ propagate_priority(struct proc *p)
printf("XXX: process at head of run queue\n");
continue;
}
+
p1 = TAILQ_PREV(p, rq, p_procq);
if (p1->p_priority <= pri) {
printf(
- "XXX: previous process %d(%s) has higher priority\n",
+ "XXX: previous process %d(%s) has higher priority\n",
p->p_pid, p->p_comm);
continue;
}
@@ -367,6 +283,7 @@ propagate_priority(struct proc *p)
if (p1->p_priority > pri)
break;
}
+
MPASS(p1 != NULL);
TAILQ_INSERT_BEFORE(p1, p, p_procq);
CTR4(KTR_LOCK,
@@ -376,421 +293,332 @@ propagate_priority(struct proc *p)
}
/*
- * Get lock 'm', the macro handles the easy (and most common cases) and leaves
- * the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out.
+ * The important part of mtx_trylock{,_flags}()
+ * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
+ * if we're called, it's because we know we don't already own this lock.
*/
-void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
+int
+_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *mpp = mtxp;
+ int rval;
- /* bits only valid on mtx_exit() */
- MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type, file, line);
+ KASSERT(CURPROC != NULL, ("curproc is NULL in _mtx_trylock"));
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this
- * lock we just bump the recursion count.
- */
- if (mpp->mtx_lock == (uintptr_t)CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- if ((type) & MTX_FIRST) {
- ASS_IEN;
- disable_intr();
- _getlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else {
- _getlock_spin_block(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
-done:
- WITNESS_ENTER(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
-
-}
+ /*
+ * _mtx_trylock does not accept MTX_NOSWITCH option.
+ */
+ MPASS((opts & MTX_NOSWITCH) == 0);
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
+ rval = _obtain_lock(m, CURTHD);
- rval = _obtain_lock(mpp, CURTHD);
#ifdef WITNESS
- if (rval && mpp->mtx_witness != NULL) {
- MPASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
+ if (rval && m->mtx_witness != NULL) {
+ /*
+ * We do not handle recursion in _mtx_trylock; see the
+ * note at the top of the routine.
+ */
+ MPASS(!mtx_recursed(m));
+ witness_try_enter(m, (opts | m->mtx_flags), file, line);
}
#endif /* WITNESS */
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, rval, file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "TRY_ENTER %s [%p] result=%d at %s:%d",
+ m->mtx_description, m, rval, file, line);
return rval;
}
/*
- * Release lock m.
+ * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
+ *
+ * We call this if the lock is either contested (i.e. we need to go to
+ * sleep waiting for it), or if we need to recurse on it.
*/
void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
+_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *const mpp = mtxp;
-
- MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line);
- WITNESS_EXIT(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- int mtx_intr = mpp->mtx_saveintr;
-
- MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse,
- file, line);
- _release_lock_quick(mpp);
- if (((type) & MTX_TOPHALF) == 0) {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- enable_intr();
- } else
- restore_intr(mtx_intr);
- }
- } else {
- if (((type & MTX_TOPHALF) == 0) &&
- (type & MTX_FIRST)) {
- ASS_IDIS;
- ASS_SIEN(mpp);
- }
- _exitlock_spin(mpp);
- }
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
+ struct proc *p = CURPROC;
+
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recurse", m);
+ return;
}
-}
-void
-mtx_enter_hard(struct mtx *m, int type, int saveintr)
-{
- struct proc *p = CURPROC;
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK, "mtx_lock: %p contested (lock=%p) [%p]", m,
+ (void *)m->mtx_lock, (void *)RETIP(m));
+
+ /*
+ * Save our priority. Even though p_nativepri is protected by
+ * sched_lock, we don't obtain it here as it can be expensive.
+ * Since this is the only place p_nativepri is set, and since two
+ * CPUs will not be executing the same process concurrently, we know
+ * that no other CPU is going to be messing with this. Also,
+ * p_nativepri is only read when we are blocked on a mutex, so that
+ * can't be happening right now either.
+ */
+ p->p_nativepri = p->p_priority;
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
+ while (!_obtain_lock(m, p)) {
+ uintptr_t v;
+ struct proc *p1;
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
- m->mtx_recurse++;
- atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p recurse", m);
- return;
+ mtx_lock_spin(&sched_lock);
+ /*
+ * Check if the lock has been released while spinning for
+ * the sched_lock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
}
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: %p contested (lock=%p) [%p]",
- m, (void *)m->mtx_lock, (void *)RETIP(m));
/*
- * Save our priority. Even though p_nativepri is protected
- * by sched_lock, we don't obtain it here as it can be
- * expensive. Since this is the only place p_nativepri is
- * set, and since two CPUs will not be executing the same
- * process concurrently, we know that no other CPU is going
- * to be messing with this. Also, p_nativepri is only read
- * when we are blocked on a mutex, so that can't be happening
- * right now either.
+ * The mutex was marked contested on release. This means that
+ * there are processes blocked on it.
*/
- p->p_nativepri = p->p_priority;
- while (!_obtain_lock(m, p)) {
- uintptr_t v;
- struct proc *p1;
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL,
+ ("contested mutex has no contesters"));
+ m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+
+ if (p1->p_priority < p->p_priority)
+ SET_PRIO(p, p1->p_priority);
+ mtx_unlock_spin(&sched_lock);
+ return;
+ }
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
- (void *)(v | MTX_CONTESTED))) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
+ /*
+ * If the mutex isn't already contested and a failure occurs
+ * setting the contested bit, the mutex was either released
+ * or the state of the MTX_RECURSED bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
+ (void *)(v | MTX_CONTESTED))) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
+ }
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
+ /*
+ * We deffinately must sleep for this lock.
+ */
+ mtx_assert(m, MA_NOTOWNED);
#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ /*
+ * If we're borrowing an interrupted thread's VM context, we
+ * must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "mtx_lock: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
+ }
#endif
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
+ /*
+ * Put us on the list of threads blocked on this mutex.
+ */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
+ }
- p->p_blocked = m; /* Who we're blocked on */
- p->p_mtxname = m->mtx_description;
- p->p_stat = SMTX;
+ /*
+ * Save who we're blocked on.
+ */
+ p->p_blocked = m;
+ p->p_mtxname = m->mtx_description;
+ p->p_stat = SMTX;
#if 0
- propagate_priority(p);
+ propagate_priority(p);
#endif
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p blocked on [%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p free from blocked on [%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (uintptr_t)p) {
- m->mtx_recurse++;
- return;
- }
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (_obtain_lock(m, p))
- break;
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
+ m->mtx_description);
+
+ mi_switch();
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
+ p, m, m->mtx_description);
+
+ mtx_unlock_spin(&sched_lock);
+ }
+
+ return;
+}
+
+/*
+ * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
+ *
+ * This is only called if we need to actually spin for the lock. Recursion
+ * is handled inline.
+ */
+void
+_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
+ int line)
+{
+ int i = 0;
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "mtx_lock_spin: %p spinning", m);
+
+ for (;;) {
+ if (_obtain_lock(m, CURPROC))
+ break;
+
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY(1);
#ifdef DDB
- else if (!db_active)
+ else if (!db_active)
#else
- else
+ else
#endif
- panic(
- "spin lock %s held by %p for > 5 seconds",
- m->mtx_description,
- (void *)m->mtx_lock);
- }
+ panic("spin lock %s held by %p for > 5 seconds",
+ m->mtx_description, (void *)m->mtx_lock);
}
-
-#ifdef MUTEX_DEBUG
- if (type != MTX_SPIN)
- m->mtx_saveintr = 0xbeefface;
- else
-#endif
- m->mtx_saveintr = saveintr;
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spin done", m);
- return;
- }
}
+
+ m->mtx_saveintr = mtx_intr;
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
+
+ return;
}
+/*
+ * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
+ *
+ * We are only called here if the lock is recursed or contested (i.e. we
+ * need to wake up a blocked thread).
+ */
void
-mtx_exit_hard(struct mtx *m, int type)
+_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
{
struct proc *p, *p1;
struct mtx *m1;
int pri;
p = CURPROC;
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (mtx_recursed(m)) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p contested", m);
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- _release_lock_quick(m);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p not held", m);
- } else
- atomic_store_rel_ptr(&m->mtx_lock,
- (void *)MTX_CONTESTED);
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p contested setrunqueue %p", m, p1);
- p1->p_blocked = NULL;
- p1->p_mtxname = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+ MPASS4(mtx_owned(m), "mtx_owned(mpp)", file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", m->mtx_description,
+ m, m->mtx_recurse, file, line);
+
+ if (mtx_recursed(m)) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
+ return;
+ }
+
+ mtx_lock_spin(&sched_lock);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
+
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ _release_lock_quick(m);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
+ } else
+ atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
+
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
+ m, p1);
+
+ p1->p_blocked = NULL;
+ p1->p_mtxname = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+
+ if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
-#endif
- setrunqueue(p);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p switching out lock=%p",
- m, (void *)m->mtx_lock);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p resuming lock=%p",
- m, (void *)m->mtx_lock);
}
- mtx_exit(&sched_lock, MTX_SPIN);
- break;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- if (type & MTX_FIRST)
- enable_intr(); /* XXX is this kosher? */
- else {
- MPASS(m->mtx_saveintr != 0xbeefface);
- restore_intr(m->mtx_saveintr);
- }
- break;
- case MTX_SPIN | MTX_TOPHALF:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- break;
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+#endif
+ setrunqueue(p);
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: %p switching out lock=%p", m,
+ (void *)m->mtx_lock);
+
+ mi_switch();
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
+ m, (void *)m->mtx_lock);
}
+
+ mtx_unlock_spin(&sched_lock);
+
+ return;
}
+/*
+ * All the unlocking of MTX_SPIN locks is done inline.
+ * See the _rel_spin_lock() macro for the details.
+ */
+
+/*
+ * The INVARIANTS-enabled mtx_assert()
+ */
#ifdef INVARIANTS
void
_mtx_assert(struct mtx *m, int what, const char *file, int line)
@@ -822,6 +650,9 @@ _mtx_assert(struct mtx *m, int what, const char *file, int line)
}
#endif
+/*
+ * The MUTEX_DEBUG-enabled mtx_validate()
+ */
#define MV_DESTROY 0 /* validate before destory */
#define MV_INIT 1 /* validate before init */
@@ -843,7 +674,7 @@ mtx_validate(struct mtx *m, int when)
if (m == &all_mtx || cold)
return 0;
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
/*
* XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
* we can re-enable the kernacc() checks.
@@ -887,50 +718,63 @@ mtx_validate(struct mtx *m, int when)
retval = 1;
}
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
return (retval);
}
#endif
+/*
+ * Mutex initialization routine; initialize lock `m' of type contained in
+ * `opts' with options contained in `opts' and description `description.'
+ * Place on "all_mtx" queue.
+ */
void
-mtx_init(struct mtx *m, const char *t, int flag)
+mtx_init(struct mtx *m, const char *description, int opts)
{
- if ((flag & MTX_QUIET) == 0)
- CTR2(KTR_LOCK, "mtx_init %p (%s)", m, t);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "mtx_init %p (%s)", m, description);
+
#ifdef MUTEX_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ /* Diagnostic and error correction */
+ if (mtx_validate(m, MV_INIT))
return;
#endif
bzero((void *)m, sizeof *m);
TAILQ_INIT(&m->mtx_blocked);
+
#ifdef WITNESS
if (!witness_cold) {
- /* XXX - should not use DEVBUF */
m->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(m->mtx_debug != NULL);
}
#endif
- m->mtx_description = t;
- m->mtx_flags = flag;
+ m->mtx_description = description;
+ m->mtx_flags = opts;
m->mtx_lock = MTX_UNOWNED;
+
/* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next = &all_mtx;
m->mtx_prev = all_mtx.mtx_prev;
m->mtx_prev->mtx_next = m;
all_mtx.mtx_prev = m;
if (++mtx_cur_cnt > mtx_max_cnt)
mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
+
#ifdef WITNESS
if (!witness_cold)
- witness_init(m, flag);
+ witness_init(m, opts);
#endif
}
+/*
+ * Remove lock `m' from all_mtx queue.
+ */
void
mtx_destroy(struct mtx *m)
{
@@ -939,7 +783,9 @@ mtx_destroy(struct mtx *m)
KASSERT(!witness_cold, ("%s: Cannot destroy while still cold\n",
__FUNCTION__));
#endif
+
CTR2(KTR_LOCK, "mtx_destroy %p (%s)", m, m->mtx_description);
+
#ifdef MUTEX_DEBUG
if (m->mtx_next == NULL)
panic("mtx_destroy: %p (%s) already destroyed",
@@ -950,7 +796,9 @@ mtx_destroy(struct mtx *m)
} else {
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
}
- mtx_validate(m, MV_DESTROY); /* diagnostic */
+
+ /* diagnostic */
+ mtx_validate(m, MV_DESTROY);
#endif
#ifdef WITNESS
@@ -959,25 +807,27 @@ mtx_destroy(struct mtx *m)
#endif /* WITNESS */
/* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next->mtx_prev = m->mtx_prev;
m->mtx_prev->mtx_next = m->mtx_next;
+
#ifdef MUTEX_DEBUG
m->mtx_next = m->mtx_prev = NULL;
#endif
+
#ifdef WITNESS
- free(m->mtx_debug, M_DEVBUF);
+ free(m->mtx_debug, M_WITNESS);
m->mtx_debug = NULL;
#endif
+
mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
}
+
/*
- * The non-inlined versions of the mtx_*() functions are always built (above),
- * but the witness code depends on the WITNESS kernel option being specified.
+ * The WITNESS-enabled diagnostic code.
*/
-
#ifdef WITNESS
static void
witness_fixup(void *dummy __unused)
@@ -988,26 +838,26 @@ witness_fixup(void *dummy __unused)
* We have to release Giant before initializing its witness
* structure so that WITNESS doesn't get confused.
*/
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&all_mtx, MTX_DEF);
+
+ mtx_lock(&all_mtx);
/* Iterate through all mutexes and finish up mutex initialization. */
for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- /* XXX - should not use DEVBUF */
mp->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(mp->mtx_debug != NULL);
witness_init(mp, mp->mtx_flags);
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
/* Mark the witness code as being ready for use. */
atomic_store_rel_int(&witness_cold, 0);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
SYSINIT(wtnsfxup, SI_SUB_MUTEX, SI_ORDER_FIRST, witness_fixup, NULL)
@@ -1061,6 +911,9 @@ TUNABLE_INT_DECL("debug.witness_skipspin", 0, witness_skipspin);
SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
"");
+/*
+ * Witness-enabled globals
+ */
static struct mtx w_mtx;
static struct witness *w_free;
static struct witness *w_all;
@@ -1069,20 +922,22 @@ static int witness_dead; /* fatal error, probably no memory */
static struct witness w_data[WITNESS_COUNT];
-static struct witness *enroll __P((const char *description, int flag));
-static int itismychild __P((struct witness *parent, struct witness *child));
-static void removechild __P((struct witness *parent, struct witness *child));
-static int isitmychild __P((struct witness *parent, struct witness *child));
-static int isitmydescendant __P((struct witness *parent, struct witness *child));
-static int dup_ok __P((struct witness *));
-static int blessed __P((struct witness *, struct witness *));
-static void witness_displaydescendants
- __P((void(*)(const char *fmt, ...), struct witness *));
-static void witness_leveldescendents __P((struct witness *parent, int level));
-static void witness_levelall __P((void));
-static struct witness * witness_get __P((void));
-static void witness_free __P((struct witness *m));
-
+/*
+ * Internal witness routine prototypes
+ */
+static struct witness *enroll(const char *description, int flag);
+static int itismychild(struct witness *parent, struct witness *child);
+static void removechild(struct witness *parent, struct witness *child);
+static int isitmychild(struct witness *parent, struct witness *child);
+static int isitmydescendant(struct witness *parent, struct witness *child);
+static int dup_ok(struct witness *);
+static int blessed(struct witness *, struct witness *);
+static void
+ witness_displaydescendants(void(*)(const char *fmt, ...), struct witness *);
+static void witness_leveldescendents(struct witness *parent, int level);
+static void witness_levelall(void);
+static struct witness * witness_get(void);
+static void witness_free(struct witness *m);
static char *ignore_list[] = {
"witness lock",
@@ -1129,7 +984,8 @@ static char *sleep_list[] = {
*/
static struct witness_blessed blessed_list[] = {
};
-static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed);
+static int blessed_count =
+ sizeof(blessed_list) / sizeof(struct witness_blessed);
static void
witness_init(struct mtx *m, int flag)
@@ -1211,17 +1067,17 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
i = PCPU_GET(witness_spin_check);
if (i != 0 && w->w_level < i) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
" %s:%d already holding %s:%x",
m->mtx_description, w->w_level, file, line,
spin_order_list[ffs(i)-1], i);
}
PCPU_SET(witness_spin_check, i | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1245,7 +1101,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
if (!mtx_legal2block())
- panic("blockable mtx_enter() of %s when not legal @ %s:%d",
+ panic("blockable mtx_lock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
/*
* Is this the first mutex acquired
@@ -1267,16 +1123,16 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
}
MPASS(!mtx_owned(&w_mtx));
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
/*
* If we have a known higher number just say ok
*/
if (witness_watch > 1 && w->w_level > w1->w_level) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
if (isitmydescendant(m1->mtx_witness, w)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
@@ -1284,7 +1140,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
MPASS(i < 200);
w1 = m1->mtx_witness;
if (isitmydescendant(w, w1)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (blessed(w, w1))
goto out;
if (m1 == &Giant) {
@@ -1313,7 +1169,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
}
m1 = LIST_FIRST(&p->p_heldmtx);
if (!itismychild(m1->mtx_witness, w))
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
out:
#ifdef DDB
@@ -1356,10 +1212,10 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
m->mtx_description, file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1407,10 +1263,10 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) & ~w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return;
}
if ((m->mtx_flags & MTX_SPIN) != 0)
@@ -1426,7 +1282,7 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
}
if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold)
- panic("switchable mtx_exit() of %s when not legal @ %s:%d",
+ panic("switchable mtx_unlock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
LIST_REMOVE(m, mtx_held);
m->mtx_held.le_prev = NULL;
@@ -1497,10 +1353,10 @@ enroll(const char *description, int flag)
}
if ((flag & MTX_SPIN) && witness_skipspin)
return (NULL);
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
for (w = w_all; w; w = w->w_next) {
if (strcmp(description, w->w_description) == 0) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return (w);
}
}
@@ -1509,7 +1365,7 @@ enroll(const char *description, int flag)
w->w_next = w_all;
w_all = w;
w->w_description = description;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (flag & MTX_SPIN) {
w->w_spin = 1;
@@ -1731,7 +1587,7 @@ witness_get()
if ((w = w_free) == NULL) {
witness_dead = 1;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
printf("witness exhausted\n");
return (NULL);
}
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index c13dd1d..64d3972 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -31,6 +31,11 @@
*/
/*
+ * Machine independent bits of mutex implementation and implementation of
+ * `witness' structure & related debugging routines.
+ */
+
+/*
* Main Entry: witness
* Pronunciation: 'wit-n&s
* Function: noun
@@ -53,12 +58,6 @@
#include "opt_ddb.h"
#include "opt_witness.h"
-/*
- * Cause non-inlined mtx_*() to be compiled.
- * Must be defined early because other system headers may include mutex.h.
- */
-#define _KERN_MUTEX_C_
-
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
@@ -82,9 +81,8 @@
#include <sys/mutex.h>
/*
- * Machine independent bits of the mutex implementation
+ * The WITNESS-enabled mutex debug structure.
*/
-
#ifdef WITNESS
struct mtx_debug {
struct witness *mtxd_witness;
@@ -100,138 +98,54 @@ struct mtx_debug {
#endif /* WITNESS */
/*
- * Assembly macros
- *------------------------------------------------------------------------------
- */
-
-#define _V(x) __STRING(x)
-
-/*
- * Default, unoptimized mutex micro-operations
+ * Internal utility macros.
*/
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#ifndef _obtain_lock
-/* Actually obtain mtx_lock */
-#define _obtain_lock(mp, tid) \
- atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
-#endif
-
-#ifndef _release_lock
-/* Actually release mtx_lock */
-#define _release_lock(mp, tid) \
- atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _release_lock_quick
-/* Actually release mtx_lock quickly assuming that we own it */
-#define _release_lock_quick(mp) \
- atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
-#endif
-
-#ifndef _getlock_sleep
-/* Get a sleep lock, deal with recursion inline. */
-#define _getlock_sleep(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _getlock_spin_block
-/* Get a spin lock, handle recursion inline (as the less common case) */
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _mtx_intr = save_intr(); \
- disable_intr(); \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
- else \
- (mp)->mtx_saveintr = _mtx_intr; \
-} while (0)
-#endif
+#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-#ifndef _getlock_norecurse
-/*
- * Get a lock without any recursion handling. Calls the hard enter function if
- * we can't get it inline.
- */
-#define _getlock_norecurse(mp, tid, type) do { \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
-} while (0)
-#endif
+#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-#ifndef _exitlock_norecurse
/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is handled
- * in the hard function.
+ * Early WITNESS-enabled declarations.
*/
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-#endif
+#ifdef WITNESS
-#ifndef _exitlock
/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-#define _exitlock(mp, tid, type) do { \
- if (!_release_lock(mp, tid)) { \
- if ((mp)->mtx_lock & MTX_RECURSED) { \
- if (--((mp)->mtx_recurse) == 0) \
- atomic_clear_ptr(&(mp)->mtx_lock, \
- MTX_RECURSED); \
- } else { \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
- } \
-} while (0)
-#endif
-
-#ifndef _exitlock_spin
-/* Release a spin lock (with possible recursion). */
-#define _exitlock_spin(mp) do { \
- if (!mtx_recursed((mp))) { \
- int _mtx_intr = (mp)->mtx_saveintr; \
- \
- _release_lock_quick(mp); \
- restore_intr(_mtx_intr); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-#endif
+ * Internal WITNESS routines which must be prototyped early.
+ *
+ * XXX: When/if witness code is cleaned up, it would be wise to place all
+ * witness prototyping early in this file.
+ */
+static void witness_init(struct mtx *, int flag);
+static void witness_destroy(struct mtx *);
+static void witness_display(void(*)(const char *fmt, ...));
-#ifdef WITNESS
-static void witness_init(struct mtx *, int flag);
-static void witness_destroy(struct mtx *);
-static void witness_display(void(*)(const char *fmt, ...));
+MALLOC_DEFINE(M_WITNESS, "witness", "witness mtx_debug structure");
/* All mutexes in system (used for debug/panic) */
static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0 };
+
/*
- * Set to 0 once mutexes have been fully initialized so that witness code can be
- * safely executed.
+ * This global is set to 0 once it becomes safe to use the witness code.
*/
static int witness_cold = 1;
+
#else /* WITNESS */
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
+/* XXX XXX XXX
+ * flag++ is sleazoid way of shuting up warning
*/
#define witness_init(m, flag) flag++
#define witness_destroy(m)
#define witness_try_enter(m, t, f, l)
#endif /* WITNESS */
-/* All mutexes in system (used for debug/panic) */
+/*
+ * All mutex locks in system are kept on the all_mtx list.
+ */
static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
{ NULL, NULL }, &all_mtx, &all_mtx,
@@ -242,19 +156,18 @@ static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head",
#endif
};
+/*
+ * Global variables for book keeping.
+ */
static int mtx_cur_cnt;
static int mtx_max_cnt;
+/*
+ * Prototypes for non-exported routines.
+ *
+ * NOTE: Prototypes for witness routines are placed at the bottom of the file.
+ */
static void propagate_priority(struct proc *);
-static void mtx_enter_hard(struct mtx *, int type, int saveintr);
-static void mtx_exit_hard(struct mtx *, int type);
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
static void
propagate_priority(struct proc *p)
@@ -277,6 +190,7 @@ propagate_priority(struct proc *p)
MPASS(m->mtx_lock == MTX_CONTESTED);
return;
}
+
MPASS(p->p_magic == P_MAGIC);
KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
if (p->p_priority <= pri)
@@ -314,7 +228,7 @@ propagate_priority(struct proc *p)
* quit.
*/
if (p->p_stat == SRUN) {
- printf("XXX: moving process %d(%s) to a new run queue\n",
+ printf("XXX: moving proc %d(%s) to a new run queue\n",
p->p_pid, p->p_comm);
MPASS(p->p_blocked == NULL);
remrunqueue(p);
@@ -338,6 +252,7 @@ propagate_priority(struct proc *p)
printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
p->p_comm, m->mtx_description);
+
/*
* Check if the proc needs to be moved up on
* the blocked chain
@@ -346,10 +261,11 @@ propagate_priority(struct proc *p)
printf("XXX: process at head of run queue\n");
continue;
}
+
p1 = TAILQ_PREV(p, rq, p_procq);
if (p1->p_priority <= pri) {
printf(
- "XXX: previous process %d(%s) has higher priority\n",
+ "XXX: previous process %d(%s) has higher priority\n",
p->p_pid, p->p_comm);
continue;
}
@@ -367,6 +283,7 @@ propagate_priority(struct proc *p)
if (p1->p_priority > pri)
break;
}
+
MPASS(p1 != NULL);
TAILQ_INSERT_BEFORE(p1, p, p_procq);
CTR4(KTR_LOCK,
@@ -376,421 +293,332 @@ propagate_priority(struct proc *p)
}
/*
- * Get lock 'm', the macro handles the easy (and most common cases) and leaves
- * the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out.
+ * The important part of mtx_trylock{,_flags}()
+ * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
+ * if we're called, it's because we know we don't already own this lock.
*/
-void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
+int
+_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *mpp = mtxp;
+ int rval;
- /* bits only valid on mtx_exit() */
- MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type, file, line);
+ KASSERT(CURPROC != NULL, ("curproc is NULL in _mtx_trylock"));
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this
- * lock we just bump the recursion count.
- */
- if (mpp->mtx_lock == (uintptr_t)CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- if ((type) & MTX_FIRST) {
- ASS_IEN;
- disable_intr();
- _getlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else {
- _getlock_spin_block(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
-done:
- WITNESS_ENTER(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
-
-}
+ /*
+ * _mtx_trylock does not accept MTX_NOSWITCH option.
+ */
+ MPASS((opts & MTX_NOSWITCH) == 0);
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
+ rval = _obtain_lock(m, CURTHD);
- rval = _obtain_lock(mpp, CURTHD);
#ifdef WITNESS
- if (rval && mpp->mtx_witness != NULL) {
- MPASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
+ if (rval && m->mtx_witness != NULL) {
+ /*
+ * We do not handle recursion in _mtx_trylock; see the
+ * note at the top of the routine.
+ */
+ MPASS(!mtx_recursed(m));
+ witness_try_enter(m, (opts | m->mtx_flags), file, line);
}
#endif /* WITNESS */
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, rval, file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "TRY_ENTER %s [%p] result=%d at %s:%d",
+ m->mtx_description, m, rval, file, line);
return rval;
}
/*
- * Release lock m.
+ * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
+ *
+ * We call this if the lock is either contested (i.e. we need to go to
+ * sleep waiting for it), or if we need to recurse on it.
*/
void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
+_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct mtx *const mpp = mtxp;
-
- MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line);
- WITNESS_EXIT(mpp, type, file, line);
- if (((type) & MTX_QUIET) == 0)
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- int mtx_intr = mpp->mtx_saveintr;
-
- MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse,
- file, line);
- _release_lock_quick(mpp);
- if (((type) & MTX_TOPHALF) == 0) {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- enable_intr();
- } else
- restore_intr(mtx_intr);
- }
- } else {
- if (((type & MTX_TOPHALF) == 0) &&
- (type & MTX_FIRST)) {
- ASS_IDIS;
- ASS_SIEN(mpp);
- }
- _exitlock_spin(mpp);
- }
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
+ struct proc *p = CURPROC;
+
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recurse", m);
+ return;
}
-}
-void
-mtx_enter_hard(struct mtx *m, int type, int saveintr)
-{
- struct proc *p = CURPROC;
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK, "mtx_lock: %p contested (lock=%p) [%p]", m,
+ (void *)m->mtx_lock, (void *)RETIP(m));
+
+ /*
+ * Save our priority. Even though p_nativepri is protected by
+ * sched_lock, we don't obtain it here as it can be expensive.
+ * Since this is the only place p_nativepri is set, and since two
+ * CPUs will not be executing the same process concurrently, we know
+ * that no other CPU is going to be messing with this. Also,
+ * p_nativepri is only read when we are blocked on a mutex, so that
+ * can't be happening right now either.
+ */
+ p->p_nativepri = p->p_priority;
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
+ while (!_obtain_lock(m, p)) {
+ uintptr_t v;
+ struct proc *p1;
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
- m->mtx_recurse++;
- atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p recurse", m);
- return;
+ mtx_lock_spin(&sched_lock);
+ /*
+ * Check if the lock has been released while spinning for
+ * the sched_lock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
}
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: %p contested (lock=%p) [%p]",
- m, (void *)m->mtx_lock, (void *)RETIP(m));
/*
- * Save our priority. Even though p_nativepri is protected
- * by sched_lock, we don't obtain it here as it can be
- * expensive. Since this is the only place p_nativepri is
- * set, and since two CPUs will not be executing the same
- * process concurrently, we know that no other CPU is going
- * to be messing with this. Also, p_nativepri is only read
- * when we are blocked on a mutex, so that can't be happening
- * right now either.
+ * The mutex was marked contested on release. This means that
+ * there are processes blocked on it.
*/
- p->p_nativepri = p->p_priority;
- while (!_obtain_lock(m, p)) {
- uintptr_t v;
- struct proc *p1;
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL,
+ ("contested mutex has no contesters"));
+ m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+
+ if (p1->p_priority < p->p_priority)
+ SET_PRIO(p, p1->p_priority);
+ mtx_unlock_spin(&sched_lock);
+ return;
+ }
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
- (void *)(v | MTX_CONTESTED))) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
+ /*
+ * If the mutex isn't already contested and a failure occurs
+ * setting the contested bit, the mutex was either released
+ * or the state of the MTX_RECURSED bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
+ (void *)(v | MTX_CONTESTED))) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
+ }
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
+ /*
+ * We deffinately must sleep for this lock.
+ */
+ mtx_assert(m, MA_NOTOWNED);
#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ /*
+ * If we're borrowing an interrupted thread's VM context, we
+ * must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "mtx_lock: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
+ }
#endif
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
+ /*
+ * Put us on the list of threads blocked on this mutex.
+ */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
+ }
- p->p_blocked = m; /* Who we're blocked on */
- p->p_mtxname = m->mtx_description;
- p->p_stat = SMTX;
+ /*
+ * Save who we're blocked on.
+ */
+ p->p_blocked = m;
+ p->p_mtxname = m->mtx_description;
+ p->p_stat = SMTX;
#if 0
- propagate_priority(p);
+ propagate_priority(p);
#endif
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p blocked on [%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR3(KTR_LOCK,
- "mtx_enter: p %p free from blocked on [%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (uintptr_t)p) {
- m->mtx_recurse++;
- return;
- }
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (_obtain_lock(m, p))
- break;
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
+ m->mtx_description);
+
+ mi_switch();
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR3(KTR_LOCK,
+ "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
+ p, m, m->mtx_description);
+
+ mtx_unlock_spin(&sched_lock);
+ }
+
+ return;
+}
+
+/*
+ * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
+ *
+ * This is only called if we need to actually spin for the lock. Recursion
+ * is handled inline.
+ */
+void
+_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
+ int line)
+{
+ int i = 0;
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "mtx_lock_spin: %p spinning", m);
+
+ for (;;) {
+ if (_obtain_lock(m, CURPROC))
+ break;
+
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY(1);
#ifdef DDB
- else if (!db_active)
+ else if (!db_active)
#else
- else
+ else
#endif
- panic(
- "spin lock %s held by %p for > 5 seconds",
- m->mtx_description,
- (void *)m->mtx_lock);
- }
+ panic("spin lock %s held by %p for > 5 seconds",
+ m->mtx_description, (void *)m->mtx_lock);
}
-
-#ifdef MUTEX_DEBUG
- if (type != MTX_SPIN)
- m->mtx_saveintr = 0xbeefface;
- else
-#endif
- m->mtx_saveintr = saveintr;
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_enter: %p spin done", m);
- return;
- }
}
+
+ m->mtx_saveintr = mtx_intr;
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
+
+ return;
}
+/*
+ * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
+ *
+ * We are only called here if the lock is recursed or contested (i.e. we
+ * need to wake up a blocked thread).
+ */
void
-mtx_exit_hard(struct mtx *m, int type)
+_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
{
struct proc *p, *p1;
struct mtx *m1;
int pri;
p = CURPROC;
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (mtx_recursed(m)) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p contested", m);
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- _release_lock_quick(m);
- if ((type & MTX_QUIET) == 0)
- CTR1(KTR_LOCK, "mtx_exit: %p not held", m);
- } else
- atomic_store_rel_ptr(&m->mtx_lock,
- (void *)MTX_CONTESTED);
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p contested setrunqueue %p", m, p1);
- p1->p_blocked = NULL;
- p1->p_mtxname = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+ MPASS4(mtx_owned(m), "mtx_owned(mpp)", file, line);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", m->mtx_description,
+ m, m->mtx_recurse, file, line);
+
+ if (mtx_recursed(m)) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
+ return;
+ }
+
+ mtx_lock_spin(&sched_lock);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
+
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ _release_lock_quick(m);
+ if ((opts & MTX_QUIET) == 0)
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
+ } else
+ atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
+
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
+ m, p1);
+
+ p1->p_blocked = NULL;
+ p1->p_mtxname = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+
+ if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
}
-#endif
- setrunqueue(p);
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p switching out lock=%p",
- m, (void *)m->mtx_lock);
- mi_switch();
- if ((type & MTX_QUIET) == 0)
- CTR2(KTR_LOCK,
- "mtx_exit: %p resuming lock=%p",
- m, (void *)m->mtx_lock);
}
- mtx_exit(&sched_lock, MTX_SPIN);
- break;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- if (type & MTX_FIRST)
- enable_intr(); /* XXX is this kosher? */
- else {
- MPASS(m->mtx_saveintr != 0xbeefface);
- restore_intr(m->mtx_saveintr);
- }
- break;
- case MTX_SPIN | MTX_TOPHALF:
- if (mtx_recursed(m)) {
- m->mtx_recurse--;
- return;
- }
- MPASS(mtx_owned(m));
- _release_lock_quick(m);
- break;
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+#endif
+ setrunqueue(p);
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK,
+ "_mtx_unlock_sleep: %p switching out lock=%p", m,
+ (void *)m->mtx_lock);
+
+ mi_switch();
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
+ m, (void *)m->mtx_lock);
}
+
+ mtx_unlock_spin(&sched_lock);
+
+ return;
}
+/*
+ * All the unlocking of MTX_SPIN locks is done inline.
+ * See the _rel_spin_lock() macro for the details.
+ */
+
+/*
+ * The INVARIANTS-enabled mtx_assert()
+ */
#ifdef INVARIANTS
void
_mtx_assert(struct mtx *m, int what, const char *file, int line)
@@ -822,6 +650,9 @@ _mtx_assert(struct mtx *m, int what, const char *file, int line)
}
#endif
+/*
+ * The MUTEX_DEBUG-enabled mtx_validate()
+ */
#define MV_DESTROY 0 /* validate before destory */
#define MV_INIT 1 /* validate before init */
@@ -843,7 +674,7 @@ mtx_validate(struct mtx *m, int when)
if (m == &all_mtx || cold)
return 0;
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
/*
* XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
* we can re-enable the kernacc() checks.
@@ -887,50 +718,63 @@ mtx_validate(struct mtx *m, int when)
retval = 1;
}
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
return (retval);
}
#endif
+/*
+ * Mutex initialization routine; initialize lock `m' of type contained in
+ * `opts' with options contained in `opts' and description `description.'
+ * Place on "all_mtx" queue.
+ */
void
-mtx_init(struct mtx *m, const char *t, int flag)
+mtx_init(struct mtx *m, const char *description, int opts)
{
- if ((flag & MTX_QUIET) == 0)
- CTR2(KTR_LOCK, "mtx_init %p (%s)", m, t);
+
+ if ((opts & MTX_QUIET) == 0)
+ CTR2(KTR_LOCK, "mtx_init %p (%s)", m, description);
+
#ifdef MUTEX_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ /* Diagnostic and error correction */
+ if (mtx_validate(m, MV_INIT))
return;
#endif
bzero((void *)m, sizeof *m);
TAILQ_INIT(&m->mtx_blocked);
+
#ifdef WITNESS
if (!witness_cold) {
- /* XXX - should not use DEVBUF */
m->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(m->mtx_debug != NULL);
}
#endif
- m->mtx_description = t;
- m->mtx_flags = flag;
+ m->mtx_description = description;
+ m->mtx_flags = opts;
m->mtx_lock = MTX_UNOWNED;
+
/* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next = &all_mtx;
m->mtx_prev = all_mtx.mtx_prev;
m->mtx_prev->mtx_next = m;
all_mtx.mtx_prev = m;
if (++mtx_cur_cnt > mtx_max_cnt)
mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
+
#ifdef WITNESS
if (!witness_cold)
- witness_init(m, flag);
+ witness_init(m, opts);
#endif
}
+/*
+ * Remove lock `m' from all_mtx queue.
+ */
void
mtx_destroy(struct mtx *m)
{
@@ -939,7 +783,9 @@ mtx_destroy(struct mtx *m)
KASSERT(!witness_cold, ("%s: Cannot destroy while still cold\n",
__FUNCTION__));
#endif
+
CTR2(KTR_LOCK, "mtx_destroy %p (%s)", m, m->mtx_description);
+
#ifdef MUTEX_DEBUG
if (m->mtx_next == NULL)
panic("mtx_destroy: %p (%s) already destroyed",
@@ -950,7 +796,9 @@ mtx_destroy(struct mtx *m)
} else {
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
}
- mtx_validate(m, MV_DESTROY); /* diagnostic */
+
+ /* diagnostic */
+ mtx_validate(m, MV_DESTROY);
#endif
#ifdef WITNESS
@@ -959,25 +807,27 @@ mtx_destroy(struct mtx *m)
#endif /* WITNESS */
/* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
+ mtx_lock(&all_mtx);
m->mtx_next->mtx_prev = m->mtx_prev;
m->mtx_prev->mtx_next = m->mtx_next;
+
#ifdef MUTEX_DEBUG
m->mtx_next = m->mtx_prev = NULL;
#endif
+
#ifdef WITNESS
- free(m->mtx_debug, M_DEVBUF);
+ free(m->mtx_debug, M_WITNESS);
m->mtx_debug = NULL;
#endif
+
mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
}
+
/*
- * The non-inlined versions of the mtx_*() functions are always built (above),
- * but the witness code depends on the WITNESS kernel option being specified.
+ * The WITNESS-enabled diagnostic code.
*/
-
#ifdef WITNESS
static void
witness_fixup(void *dummy __unused)
@@ -988,26 +838,26 @@ witness_fixup(void *dummy __unused)
* We have to release Giant before initializing its witness
* structure so that WITNESS doesn't get confused.
*/
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
- mtx_enter(&all_mtx, MTX_DEF);
+
+ mtx_lock(&all_mtx);
/* Iterate through all mutexes and finish up mutex initialization. */
for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- /* XXX - should not use DEVBUF */
mp->mtx_debug = malloc(sizeof(struct mtx_debug),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ M_WITNESS, M_NOWAIT | M_ZERO);
MPASS(mp->mtx_debug != NULL);
witness_init(mp, mp->mtx_flags);
}
- mtx_exit(&all_mtx, MTX_DEF);
+ mtx_unlock(&all_mtx);
/* Mark the witness code as being ready for use. */
atomic_store_rel_int(&witness_cold, 0);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
}
SYSINIT(wtnsfxup, SI_SUB_MUTEX, SI_ORDER_FIRST, witness_fixup, NULL)
@@ -1061,6 +911,9 @@ TUNABLE_INT_DECL("debug.witness_skipspin", 0, witness_skipspin);
SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
"");
+/*
+ * Witness-enabled globals
+ */
static struct mtx w_mtx;
static struct witness *w_free;
static struct witness *w_all;
@@ -1069,20 +922,22 @@ static int witness_dead; /* fatal error, probably no memory */
static struct witness w_data[WITNESS_COUNT];
-static struct witness *enroll __P((const char *description, int flag));
-static int itismychild __P((struct witness *parent, struct witness *child));
-static void removechild __P((struct witness *parent, struct witness *child));
-static int isitmychild __P((struct witness *parent, struct witness *child));
-static int isitmydescendant __P((struct witness *parent, struct witness *child));
-static int dup_ok __P((struct witness *));
-static int blessed __P((struct witness *, struct witness *));
-static void witness_displaydescendants
- __P((void(*)(const char *fmt, ...), struct witness *));
-static void witness_leveldescendents __P((struct witness *parent, int level));
-static void witness_levelall __P((void));
-static struct witness * witness_get __P((void));
-static void witness_free __P((struct witness *m));
-
+/*
+ * Internal witness routine prototypes
+ */
+static struct witness *enroll(const char *description, int flag);
+static int itismychild(struct witness *parent, struct witness *child);
+static void removechild(struct witness *parent, struct witness *child);
+static int isitmychild(struct witness *parent, struct witness *child);
+static int isitmydescendant(struct witness *parent, struct witness *child);
+static int dup_ok(struct witness *);
+static int blessed(struct witness *, struct witness *);
+static void
+ witness_displaydescendants(void(*)(const char *fmt, ...), struct witness *);
+static void witness_leveldescendents(struct witness *parent, int level);
+static void witness_levelall(void);
+static struct witness * witness_get(void);
+static void witness_free(struct witness *m);
static char *ignore_list[] = {
"witness lock",
@@ -1129,7 +984,8 @@ static char *sleep_list[] = {
*/
static struct witness_blessed blessed_list[] = {
};
-static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed);
+static int blessed_count =
+ sizeof(blessed_list) / sizeof(struct witness_blessed);
static void
witness_init(struct mtx *m, int flag)
@@ -1211,17 +1067,17 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
i = PCPU_GET(witness_spin_check);
if (i != 0 && w->w_level < i) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
" %s:%d already holding %s:%x",
m->mtx_description, w->w_level, file, line,
spin_order_list[ffs(i)-1], i);
}
PCPU_SET(witness_spin_check, i | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1245,7 +1101,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
if (!mtx_legal2block())
- panic("blockable mtx_enter() of %s when not legal @ %s:%d",
+ panic("blockable mtx_lock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
/*
* Is this the first mutex acquired
@@ -1267,16 +1123,16 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
goto out;
}
MPASS(!mtx_owned(&w_mtx));
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
/*
* If we have a known higher number just say ok
*/
if (witness_watch > 1 && w->w_level > w1->w_level) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
if (isitmydescendant(m1->mtx_witness, w)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
goto out;
}
for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
@@ -1284,7 +1140,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
MPASS(i < 200);
w1 = m1->mtx_witness;
if (isitmydescendant(w, w1)) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (blessed(w, w1))
goto out;
if (m1 == &Giant) {
@@ -1313,7 +1169,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
}
m1 = LIST_FIRST(&p->p_heldmtx);
if (!itismychild(m1->mtx_witness, w))
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
out:
#ifdef DDB
@@ -1356,10 +1212,10 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
m->mtx_description, file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) | w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
w->w_file = file;
w->w_line = line;
m->mtx_line = line;
@@ -1407,10 +1263,10 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
file, line);
return;
}
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
PCPU_SET(witness_spin_check,
PCPU_GET(witness_spin_check) & ~w->w_level);
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return;
}
if ((m->mtx_flags & MTX_SPIN) != 0)
@@ -1426,7 +1282,7 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
}
if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold)
- panic("switchable mtx_exit() of %s when not legal @ %s:%d",
+ panic("switchable mtx_unlock() of %s when not legal @ %s:%d",
m->mtx_description, file, line);
LIST_REMOVE(m, mtx_held);
m->mtx_held.le_prev = NULL;
@@ -1497,10 +1353,10 @@ enroll(const char *description, int flag)
}
if ((flag & MTX_SPIN) && witness_skipspin)
return (NULL);
- mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
for (w = w_all; w; w = w->w_next) {
if (strcmp(description, w->w_description) == 0) {
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
return (w);
}
}
@@ -1509,7 +1365,7 @@ enroll(const char *description, int flag)
w->w_next = w_all;
w_all = w;
w->w_description = description;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
if (flag & MTX_SPIN) {
w->w_spin = 1;
@@ -1731,7 +1587,7 @@ witness_get()
if ((w = w_free) == NULL) {
witness_dead = 1;
- mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
+ mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
printf("witness exhausted\n");
return (NULL);
}
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index 4b2c5d8..eb63ee4 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -1025,13 +1025,13 @@ selrecord(selector, sip)
if (sip->si_pid == mypid)
return;
if (sip->si_pid && (p = pfind(sip->si_pid))) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == (caddr_t)&selwait) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
sip->si_flags |= SI_COLL;
return;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
sip->si_pid = mypid;
}
@@ -1055,15 +1055,15 @@ selwakeup(sip)
p = pfind(sip->si_pid);
sip->si_pid = 0;
if (p != NULL) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_wchan == (caddr_t)&selwait) {
if (p->p_stat == SSLEEP)
setrunnable(p);
else
unsleep(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
p->p_flag &= ~P_SELECT;
PROC_UNLOCK(p);
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index 2c03000..d53bf72 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -284,12 +284,12 @@ ptrace(curp, uap)
PROCTREE_LOCK(PT_RELEASE);
/* not currently stopped */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat != SSTOP || (p->p_flag & P_WAITED) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return EBUSY;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/* OK */
break;
@@ -377,13 +377,13 @@ ptrace(curp, uap)
sendsig:
/* deliver or queue signal */
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SSTOP) {
p->p_xstat = uap->data;
setrunnable(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (uap->data) {
mtx_assert(&Giant, MA_OWNED);
psignal(p, uap->data);
@@ -437,14 +437,14 @@ ptrace(curp, uap)
}
error = 0;
PHOLD(p); /* user had damn well better be incore! */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_INMEM) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
fill_kinfo_proc (p, &p->p_addr->u_kproc);
curp->p_retval[0] = *(int *)
((uintptr_t)p->p_addr + (uintptr_t)uap->addr);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
curp->p_retval[0] = 0;
error = EFAULT;
}
@@ -453,13 +453,13 @@ ptrace(curp, uap)
case PT_WRITE_U:
PHOLD(p); /* user had damn well better be incore! */
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_INMEM) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
fill_kinfo_proc (p, &p->p_addr->u_kproc);
error = ptrace_write_u(p, (vm_offset_t)uap->addr, uap->data);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
error = EFAULT;
}
PRELE(p);
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index f4fe297..b815e7c 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -2251,7 +2251,7 @@ ttyinfo(tp)
else if ((p = LIST_FIRST(&tp->t_pgrp->pg_members)) == 0)
ttyprintf(tp, "empty foreground process group\n");
else {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
/* Pick interesting process. */
for (pick = NULL; p != 0; p = LIST_NEXT(p, p_pglist))
@@ -2264,7 +2264,7 @@ ttyinfo(tp)
ltmp = pick->p_stat == SIDL || pick->p_stat == SWAIT ||
pick->p_stat == SZOMB ? 0 :
pgtok(vmspace_resident_count(pick->p_vmspace));
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
ttyprintf(tp, " cmd: %s %d [%s] ", pick->p_comm, pick->p_pid,
stmp);
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 1489157..adbfe31 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -152,20 +152,20 @@ mbinit(dummy)
/*
* Perform some initial allocations.
*/
- mtx_enter(&mcntfree.m_mtx, MTX_DEF);
+ mtx_lock(&mcntfree.m_mtx);
if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0)
goto bad;
- mtx_exit(&mcntfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mcntfree.m_mtx);
- mtx_enter(&mmbfree.m_mtx, MTX_DEF);
+ mtx_lock(&mmbfree.m_mtx);
if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
goto bad;
- mtx_exit(&mmbfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mmbfree.m_mtx);
- mtx_enter(&mclfree.m_mtx, MTX_DEF);
+ mtx_lock(&mclfree.m_mtx);
if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
goto bad;
- mtx_exit(&mclfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mclfree.m_mtx);
return;
bad:
@@ -204,10 +204,10 @@ m_alloc_ref(nmb, how)
*/
nbytes = round_page(nmb * sizeof(union mext_refcnt));
- mtx_exit(&mcntfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mcntfree.m_mtx);
if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ?
M_WAITOK : M_NOWAIT)) == NULL) {
- mtx_enter(&mcntfree.m_mtx, MTX_DEF);
+ mtx_lock(&mcntfree.m_mtx);
return (0);
}
nmb = nbytes / sizeof(union mext_refcnt);
@@ -216,7 +216,7 @@ m_alloc_ref(nmb, how)
* We don't let go of the mutex in order to avoid a race.
* It is up to the caller to let go of the mutex.
*/
- mtx_enter(&mcntfree.m_mtx, MTX_DEF);
+ mtx_lock(&mcntfree.m_mtx);
for (i = 0; i < nmb; i++) {
((union mext_refcnt *)p)->next_ref = mcntfree.m_head;
mcntfree.m_head = (union mext_refcnt *)p;
@@ -260,13 +260,13 @@ m_mballoc(nmb, how)
nbytes = round_page(nmb * MSIZE);
- mtx_exit(&mmbfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mmbfree.m_mtx);
p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
if (p == 0 && how == M_TRYWAIT) {
atomic_add_long(&mbstat.m_wait, 1);
p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
}
- mtx_enter(&mmbfree.m_mtx, MTX_DEF);
+ mtx_lock(&mmbfree.m_mtx);
/*
* Either the map is now full, or `how' is M_DONTWAIT and there
@@ -318,10 +318,10 @@ m_mballoc_wait(void)
* importantly, to avoid a potential lock order reversal which may
* result in deadlock (See comment above m_reclaim()).
*/
- mtx_exit(&mmbfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mmbfree.m_mtx);
m_reclaim();
- mtx_enter(&mmbfree.m_mtx, MTX_DEF);
+ mtx_lock(&mmbfree.m_mtx);
_MGET(p, M_DONTWAIT);
if (p == NULL) {
@@ -381,11 +381,11 @@ m_clalloc(ncl, how)
}
npg = ncl;
- mtx_exit(&mclfree.m_mtx, MTX_DEF);
+ mtx_unlock(&mclfree.m_mtx);
p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
ncl = ncl * PAGE_SIZE / MCLBYTES;
- mtx_enter(&mclfree.m_mtx, MTX_DEF);
+ mtx_lock(&mclfree.m_mtx);
/*
* Either the map is now full, or `how' is M_DONTWAIT and there
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 983742e..43c6c27 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1418,7 +1418,7 @@ sf_buf_init(void *arg)
int i;
mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", MTX_DEF);
- mtx_enter(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
SLIST_INIT(&sf_freelist.sf_head);
sf_base = kmem_alloc_pageable(kernel_map, nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
@@ -1428,7 +1428,7 @@ sf_buf_init(void *arg)
SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
}
sf_buf_alloc_want = 0;
- mtx_exit(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_unlock(&sf_freelist.sf_lock);
}
/*
@@ -1439,13 +1439,13 @@ sf_buf_alloc()
{
struct sf_buf *sf;
- mtx_enter(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
sf_buf_alloc_want++;
msleep(&sf_freelist, &sf_freelist.sf_lock, PVM, "sfbufa", 0);
}
SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
- mtx_exit(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_unlock(&sf_freelist.sf_lock);
return (sf);
}
@@ -1475,13 +1475,13 @@ sf_buf_free(caddr_t addr, void *args)
vm_page_free(m);
splx(s);
sf->m = NULL;
- mtx_enter(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
if (sf_buf_alloc_want) {
sf_buf_alloc_want--;
wakeup_one(&sf_freelist);
}
- mtx_exit(&sf_freelist.sf_lock, MTX_DEF);
+ mtx_unlock(&sf_freelist.sf_lock);
}
/*
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index 8c4175a..8335264 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -638,7 +638,7 @@ aio_daemon(void *uproc)
struct proc *curcp, *mycp, *userp;
struct vmspace *myvm, *tmpvm;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* Local copies of curproc (cp) and vmspace (myvm)
*/
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index a0d693c..c124559 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1800,7 +1800,7 @@ buf_daemon()
{
int s;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* This process needs to be suspended prior to shutdown sync.
diff --git a/sys/kern/vfs_conf.c b/sys/kern/vfs_conf.c
index 2ca46185..c1447ff 100644
--- a/sys/kern/vfs_conf.c
+++ b/sys/kern/vfs_conf.c
@@ -230,9 +230,9 @@ done:
} else {
/* register with list of mounted filesystems */
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 3a31666..618ce56 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -449,7 +449,7 @@ vop_nolock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK)
- mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
+ mtx_unlock(&ap->a_vp->v_interlock);
return (0);
#endif
}
@@ -471,7 +471,7 @@ vop_nounlock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK)
- mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
+ mtx_unlock(&ap->a_vp->v_interlock);
return (0);
}
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 3abcc82..38c1895 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -390,15 +390,15 @@ vfs_getvfs(fsid)
{
register struct mount *mp;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (mp);
}
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return ((struct mount *) 0);
}
@@ -422,7 +422,7 @@ vfs_getnewfsid(mp)
fsid_t tfsid;
int mtype;
- mtx_enter(&mntid_mtx, MTX_DEF);
+ mtx_lock(&mntid_mtx);
mtype = mp->mnt_vfc->vfc_typenum;
tfsid.val[1] = mtype;
mtype = (mtype & 0xFF) << 24;
@@ -435,7 +435,7 @@ vfs_getnewfsid(mp)
}
mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
- mtx_exit(&mntid_mtx, MTX_DEF);
+ mtx_unlock(&mntid_mtx);
}
/*
@@ -538,7 +538,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
if (wantfreevnodes && freevnodes < wantfreevnodes) {
vp = NULL;
@@ -560,7 +560,7 @@ getnewvnode(tag, mp, vops, vpp)
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
- !mtx_try_enter(&vp->v_interlock, MTX_DEF)) {
+ !mtx_trylock(&vp->v_interlock)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
continue;
@@ -570,7 +570,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
break;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
}
@@ -578,13 +578,13 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_flag |= VDOOMED;
vp->v_flag &= ~VFREE;
freevnodes--;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD) {
vgonel(vp, p);
} else {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
vn_finished_write(vnmp);
@@ -609,7 +609,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_clen = 0;
vp->v_socket = 0;
} else {
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
@@ -646,7 +646,7 @@ insmntque(vp, mp)
register struct mount *mp;
{
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
/*
* Delete from old mount point vnode list, if on one.
*/
@@ -656,11 +656,11 @@ insmntque(vp, mp)
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) == NULL) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
return;
}
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
}
/*
@@ -785,12 +785,12 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Destroy the copy in the VM cache, too.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
panic("vinvalbuf: flush failed");
@@ -1010,7 +1010,7 @@ sched_sync(void)
int s;
struct proc *p = updateproc;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
SHUTDOWN_PRI_LAST);
@@ -1104,10 +1104,10 @@ int
speedup_syncer()
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (updateproc->p_wchan == &lbolt)
setrunnable(updateproc);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;
@@ -1407,9 +1407,9 @@ addalias(nvp, dev)
KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
nvp->v_rdev = dev;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
}
/*
@@ -1435,7 +1435,7 @@ vget(vp, flags, p)
* the VXLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_flag & VXLOCK) {
if (vp->v_vxproc == curproc) {
printf("VXLOCK interlock avoided\n");
@@ -1461,15 +1461,15 @@ vget(vp, flags, p)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
return (error);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (0);
}
@@ -1479,9 +1479,9 @@ vget(vp, flags, p)
void
vref(struct vnode *vp)
{
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_usecount++;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
/*
@@ -1496,14 +1496,14 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
if (vp->v_usecount > 1) {
vp->v_usecount--;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return;
}
@@ -1525,7 +1525,7 @@ vrele(vp)
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
#endif
panic("vrele: negative ref cnt");
}
@@ -1543,7 +1543,7 @@ vput(vp)
struct proc *p = curproc; /* XXX */
KASSERT(vp != NULL, ("vput: null vp"));
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
if (vp->v_usecount > 1) {
@@ -1564,7 +1564,7 @@ vput(vp)
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
VOP_INACTIVE(vp, p);
} else {
@@ -1633,7 +1633,7 @@ vflush(mp, skipvp, flags)
struct vnode *vp, *nvp;
int busy = 0;
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
/*
@@ -1649,12 +1649,12 @@ loop:
if (vp == skipvp)
continue;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
/*
* Skip over a vnodes marked VSYSTEM.
*/
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
/*
@@ -1663,7 +1663,7 @@ loop:
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
@@ -1672,9 +1672,9 @@ loop:
* vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
vgonel(vp, p);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
continue;
}
@@ -1684,7 +1684,7 @@ loop:
* all other files, just kill them.
*/
if (flags & FORCECLOSE) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if (vp->v_type != VCHR) {
vgonel(vp, p);
} else {
@@ -1692,17 +1692,17 @@ loop:
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *) 0);
}
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
continue;
}
#ifdef DIAGNOSTIC
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
busy++;
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if (busy)
return (EBUSY);
return (0);
@@ -1784,7 +1784,7 @@ vclean(vp, flags, p)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@@ -1794,7 +1794,7 @@ vclean(vp, flags, p)
#endif
vfree(vp);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
cache_purge(vp);
@@ -1847,9 +1847,9 @@ vop_revoke(ap)
}
dev = vp->v_rdev;
for (;;) {
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
vq = SLIST_FIRST(&dev->si_hlist);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
if (!vq)
break;
vgone(vq);
@@ -1868,15 +1868,15 @@ vrecycle(vp, inter_lkp, p)
struct proc *p;
{
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_usecount == 0) {
if (inter_lkp) {
- mtx_exit(inter_lkp, MTX_DEF);
+ mtx_unlock(inter_lkp);
}
vgonel(vp, p);
return (1);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (0);
}
@@ -1890,7 +1890,7 @@ vgone(vp)
{
struct proc *p = curproc; /* XXX */
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vgonel(vp, p);
}
@@ -1919,7 +1919,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
/*
* Delete from old mount point vnode list, if on one.
@@ -1931,10 +1931,10 @@ vgonel(vp, p)
* if it is on one.
*/
if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
freedev(vp->v_rdev);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
vp->v_rdev = NULL;
}
@@ -1950,19 +1950,19 @@ vgonel(vp, p)
*/
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
if (vp->v_flag & VFREE)
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
else
freevnodes++;
vp->v_flag |= VFREE;
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
splx(s);
}
vp->v_type = VBAD;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
/*
@@ -1976,15 +1976,15 @@ vfinddev(dev, type, vpp)
{
struct vnode *vp;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
if (type == vp->v_type) {
*vpp = vp;
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (1);
}
}
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (0);
}
@@ -1999,10 +1999,10 @@ vcount(vp)
int count;
count = 0;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
count += vq->v_usecount;
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (count);
}
@@ -2083,7 +2083,7 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
struct vnode *vp;
printf("Locked vnodes\n");
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -2093,11 +2093,11 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
if (VOP_ISLOCKED(vp, NULL))
vprint((char *)0, vp);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
}
#endif
@@ -2202,14 +2202,14 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
return (SYSCTL_OUT(req, 0,
(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
again:
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
vp != NULL;
vp = nvp) {
@@ -2219,22 +2219,22 @@ again:
* recycled onto the same filesystem.
*/
if (vp->v_mount != mp) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
goto again;
}
nvp = LIST_NEXT(vp, v_mntvnodes);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
return (error);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (0);
}
@@ -2592,7 +2592,7 @@ loop:
continue;
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
@@ -2604,7 +2604,7 @@ loop:
vput(vp);
}
} else {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
}
if (anyio && (--tries > 0))
@@ -2638,7 +2638,7 @@ vfree(vp)
int s;
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
if (vp->v_flag & VAGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
@@ -2646,7 +2646,7 @@ vfree(vp)
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
freevnodes++;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~VAGE;
vp->v_flag |= VFREE;
splx(s);
@@ -2662,11 +2662,11 @@ vbusy(vp)
int s;
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~(VFREE|VAGE);
splx(s);
}
@@ -2685,7 +2685,7 @@ vn_pollrecord(vp, p, events)
struct proc *p;
short events;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_revents & events) {
/*
* This leaves events we are not interested
@@ -2697,12 +2697,12 @@ vn_pollrecord(vp, p, events)
events &= vp->v_pollinfo.vpi_revents;
vp->v_pollinfo.vpi_revents &= ~events;
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return events;
}
vp->v_pollinfo.vpi_events |= events;
selrecord(p, &vp->v_pollinfo.vpi_selinfo);
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return 0;
}
@@ -2717,7 +2717,7 @@ vn_pollevent(vp, events)
struct vnode *vp;
short events;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_events & events) {
/*
* We clear vpi_events so that we don't
@@ -2734,7 +2734,7 @@ vn_pollevent(vp, events)
vp->v_pollinfo.vpi_revents |= events;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
/*
@@ -2746,12 +2746,12 @@ void
vn_pollgone(vp)
struct vnode *vp;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_events) {
vp->v_pollinfo.vpi_events = 0;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
@@ -2856,9 +2856,9 @@ sync_fsync(ap)
* Walk the list of vnodes pushing all that are dirty and
* not already on the sync list.
*/
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 16e8984..178d2a2 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -176,16 +176,16 @@ mount(p, uap)
vput(vp);
return (EBUSY);
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vfs_unbusy(mp, p);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
mp->mnt_flag |= SCARG(uap, flags) &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, p);
@@ -243,15 +243,15 @@ mount(p, uap)
return (ENODEV);
}
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
/*
* Allocate and initialize the filesystem.
@@ -310,9 +310,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, p);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vrele(vp);
return (error);
}
@@ -322,13 +322,13 @@ update:
*/
cache_purge(vp);
if (!error) {
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
vp->v_mountedhere = mp;
- mtx_exit(&vp->v_interlock, MTX_DEF);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
checkdirs(vp);
VOP_UNLOCK(vp, 0, p);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
@@ -337,9 +337,9 @@ update:
if ((error = VFS_START(mp, 0, p)) != 0)
vrele(vp);
} else {
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free((caddr_t)mp, M_MOUNT);
@@ -464,7 +464,7 @@ dounmount(mp, flags, p)
int error;
int async_flag;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
vn_start_write(NULL, &mp, V_WAIT);
@@ -484,7 +484,7 @@ dounmount(mp, flags, p)
error = VFS_UNMOUNT(mp, flags, p);
}
vn_finished_write(mp);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
if (error) {
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
(void) vfs_allocate_syncvnode(mp);
@@ -535,7 +535,7 @@ sync(p, uap)
struct mount *mp, *nmp;
int asyncflag;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -551,11 +551,11 @@ sync(p, uap)
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
#if 0
/*
* XXX don't call vfs_bufstats() yet because that routine
@@ -727,7 +727,7 @@ getfsstat(p, uap)
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
sfsp = (caddr_t)SCARG(uap, buf);
count = 0;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -743,7 +743,7 @@ getfsstat(p, uap)
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
(error = VFS_STATFS(mp, sp, p))) {
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
continue;
@@ -757,11 +757,11 @@ getfsstat(p, uap)
sfsp += sizeof(*sp);
}
count++;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
if (sfsp && count > maxcount)
p->p_retval[0] = maxcount;
else
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index 2ca46185..c1447ff 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -230,9 +230,9 @@ done:
} else {
/* register with list of mounted filesystems */
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 3abcc82..38c1895 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -390,15 +390,15 @@ vfs_getvfs(fsid)
{
register struct mount *mp;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (mp);
}
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return ((struct mount *) 0);
}
@@ -422,7 +422,7 @@ vfs_getnewfsid(mp)
fsid_t tfsid;
int mtype;
- mtx_enter(&mntid_mtx, MTX_DEF);
+ mtx_lock(&mntid_mtx);
mtype = mp->mnt_vfc->vfc_typenum;
tfsid.val[1] = mtype;
mtype = (mtype & 0xFF) << 24;
@@ -435,7 +435,7 @@ vfs_getnewfsid(mp)
}
mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
- mtx_exit(&mntid_mtx, MTX_DEF);
+ mtx_unlock(&mntid_mtx);
}
/*
@@ -538,7 +538,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
if (wantfreevnodes && freevnodes < wantfreevnodes) {
vp = NULL;
@@ -560,7 +560,7 @@ getnewvnode(tag, mp, vops, vpp)
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
- !mtx_try_enter(&vp->v_interlock, MTX_DEF)) {
+ !mtx_trylock(&vp->v_interlock)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
continue;
@@ -570,7 +570,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
break;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
}
@@ -578,13 +578,13 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_flag |= VDOOMED;
vp->v_flag &= ~VFREE;
freevnodes--;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD) {
vgonel(vp, p);
} else {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
vn_finished_write(vnmp);
@@ -609,7 +609,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_clen = 0;
vp->v_socket = 0;
} else {
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
@@ -646,7 +646,7 @@ insmntque(vp, mp)
register struct mount *mp;
{
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
/*
* Delete from old mount point vnode list, if on one.
*/
@@ -656,11 +656,11 @@ insmntque(vp, mp)
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) == NULL) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
return;
}
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
}
/*
@@ -785,12 +785,12 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Destroy the copy in the VM cache, too.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
panic("vinvalbuf: flush failed");
@@ -1010,7 +1010,7 @@ sched_sync(void)
int s;
struct proc *p = updateproc;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
SHUTDOWN_PRI_LAST);
@@ -1104,10 +1104,10 @@ int
speedup_syncer()
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (updateproc->p_wchan == &lbolt)
setrunnable(updateproc);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;
@@ -1407,9 +1407,9 @@ addalias(nvp, dev)
KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
nvp->v_rdev = dev;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
}
/*
@@ -1435,7 +1435,7 @@ vget(vp, flags, p)
* the VXLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_flag & VXLOCK) {
if (vp->v_vxproc == curproc) {
printf("VXLOCK interlock avoided\n");
@@ -1461,15 +1461,15 @@ vget(vp, flags, p)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
return (error);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (0);
}
@@ -1479,9 +1479,9 @@ vget(vp, flags, p)
void
vref(struct vnode *vp)
{
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_usecount++;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
/*
@@ -1496,14 +1496,14 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
if (vp->v_usecount > 1) {
vp->v_usecount--;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return;
}
@@ -1525,7 +1525,7 @@ vrele(vp)
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
#endif
panic("vrele: negative ref cnt");
}
@@ -1543,7 +1543,7 @@ vput(vp)
struct proc *p = curproc; /* XXX */
KASSERT(vp != NULL, ("vput: null vp"));
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
if (vp->v_usecount > 1) {
@@ -1564,7 +1564,7 @@ vput(vp)
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
VOP_INACTIVE(vp, p);
} else {
@@ -1633,7 +1633,7 @@ vflush(mp, skipvp, flags)
struct vnode *vp, *nvp;
int busy = 0;
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
/*
@@ -1649,12 +1649,12 @@ loop:
if (vp == skipvp)
continue;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
/*
* Skip over a vnodes marked VSYSTEM.
*/
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
/*
@@ -1663,7 +1663,7 @@ loop:
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
@@ -1672,9 +1672,9 @@ loop:
* vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
vgonel(vp, p);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
continue;
}
@@ -1684,7 +1684,7 @@ loop:
* all other files, just kill them.
*/
if (flags & FORCECLOSE) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if (vp->v_type != VCHR) {
vgonel(vp, p);
} else {
@@ -1692,17 +1692,17 @@ loop:
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *) 0);
}
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
continue;
}
#ifdef DIAGNOSTIC
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
busy++;
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if (busy)
return (EBUSY);
return (0);
@@ -1784,7 +1784,7 @@ vclean(vp, flags, p)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@@ -1794,7 +1794,7 @@ vclean(vp, flags, p)
#endif
vfree(vp);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
cache_purge(vp);
@@ -1847,9 +1847,9 @@ vop_revoke(ap)
}
dev = vp->v_rdev;
for (;;) {
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
vq = SLIST_FIRST(&dev->si_hlist);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
if (!vq)
break;
vgone(vq);
@@ -1868,15 +1868,15 @@ vrecycle(vp, inter_lkp, p)
struct proc *p;
{
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_usecount == 0) {
if (inter_lkp) {
- mtx_exit(inter_lkp, MTX_DEF);
+ mtx_unlock(inter_lkp);
}
vgonel(vp, p);
return (1);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (0);
}
@@ -1890,7 +1890,7 @@ vgone(vp)
{
struct proc *p = curproc; /* XXX */
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vgonel(vp, p);
}
@@ -1919,7 +1919,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
/*
* Delete from old mount point vnode list, if on one.
@@ -1931,10 +1931,10 @@ vgonel(vp, p)
* if it is on one.
*/
if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
freedev(vp->v_rdev);
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
vp->v_rdev = NULL;
}
@@ -1950,19 +1950,19 @@ vgonel(vp, p)
*/
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
if (vp->v_flag & VFREE)
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
else
freevnodes++;
vp->v_flag |= VFREE;
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
splx(s);
}
vp->v_type = VBAD;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
/*
@@ -1976,15 +1976,15 @@ vfinddev(dev, type, vpp)
{
struct vnode *vp;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
if (type == vp->v_type) {
*vpp = vp;
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (1);
}
}
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (0);
}
@@ -1999,10 +1999,10 @@ vcount(vp)
int count;
count = 0;
- mtx_enter(&spechash_mtx, MTX_DEF);
+ mtx_lock(&spechash_mtx);
SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
count += vq->v_usecount;
- mtx_exit(&spechash_mtx, MTX_DEF);
+ mtx_unlock(&spechash_mtx);
return (count);
}
@@ -2083,7 +2083,7 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
struct vnode *vp;
printf("Locked vnodes\n");
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -2093,11 +2093,11 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
if (VOP_ISLOCKED(vp, NULL))
vprint((char *)0, vp);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
}
#endif
@@ -2202,14 +2202,14 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
return (SYSCTL_OUT(req, 0,
(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
again:
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
vp != NULL;
vp = nvp) {
@@ -2219,22 +2219,22 @@ again:
* recycled onto the same filesystem.
*/
if (vp->v_mount != mp) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
goto again;
}
nvp = LIST_NEXT(vp, v_mntvnodes);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
return (error);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (0);
}
@@ -2592,7 +2592,7 @@ loop:
continue;
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
@@ -2604,7 +2604,7 @@ loop:
vput(vp);
}
} else {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
}
}
if (anyio && (--tries > 0))
@@ -2638,7 +2638,7 @@ vfree(vp)
int s;
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
if (vp->v_flag & VAGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
@@ -2646,7 +2646,7 @@ vfree(vp)
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
freevnodes++;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~VAGE;
vp->v_flag |= VFREE;
splx(s);
@@ -2662,11 +2662,11 @@ vbusy(vp)
int s;
s = splbio();
- mtx_enter(&vnode_free_list_mtx, MTX_DEF);
+ mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
- mtx_exit(&vnode_free_list_mtx, MTX_DEF);
+ mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~(VFREE|VAGE);
splx(s);
}
@@ -2685,7 +2685,7 @@ vn_pollrecord(vp, p, events)
struct proc *p;
short events;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_revents & events) {
/*
* This leaves events we are not interested
@@ -2697,12 +2697,12 @@ vn_pollrecord(vp, p, events)
events &= vp->v_pollinfo.vpi_revents;
vp->v_pollinfo.vpi_revents &= ~events;
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return events;
}
vp->v_pollinfo.vpi_events |= events;
selrecord(p, &vp->v_pollinfo.vpi_selinfo);
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return 0;
}
@@ -2717,7 +2717,7 @@ vn_pollevent(vp, events)
struct vnode *vp;
short events;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_events & events) {
/*
* We clear vpi_events so that we don't
@@ -2734,7 +2734,7 @@ vn_pollevent(vp, events)
vp->v_pollinfo.vpi_revents |= events;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
/*
@@ -2746,12 +2746,12 @@ void
vn_pollgone(vp)
struct vnode *vp;
{
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_events) {
vp->v_pollinfo.vpi_events = 0;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
@@ -2856,9 +2856,9 @@ sync_fsync(ap)
* Walk the list of vnodes pushing all that are dirty and
* not already on the sync list.
*/
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 16e8984..178d2a2 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -176,16 +176,16 @@ mount(p, uap)
vput(vp);
return (EBUSY);
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vfs_unbusy(mp, p);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
mp->mnt_flag |= SCARG(uap, flags) &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, p);
@@ -243,15 +243,15 @@ mount(p, uap)
return (ENODEV);
}
}
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
/*
* Allocate and initialize the filesystem.
@@ -310,9 +310,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, p);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
vrele(vp);
return (error);
}
@@ -322,13 +322,13 @@ update:
*/
cache_purge(vp);
if (!error) {
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
vp->v_mountedhere = mp;
- mtx_exit(&vp->v_interlock, MTX_DEF);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
checkdirs(vp);
VOP_UNLOCK(vp, 0, p);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
@@ -337,9 +337,9 @@ update:
if ((error = VFS_START(mp, 0, p)) != 0)
vrele(vp);
} else {
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free((caddr_t)mp, M_MOUNT);
@@ -464,7 +464,7 @@ dounmount(mp, flags, p)
int error;
int async_flag;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
vn_start_write(NULL, &mp, V_WAIT);
@@ -484,7 +484,7 @@ dounmount(mp, flags, p)
error = VFS_UNMOUNT(mp, flags, p);
}
vn_finished_write(mp);
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
if (error) {
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
(void) vfs_allocate_syncvnode(mp);
@@ -535,7 +535,7 @@ sync(p, uap)
struct mount *mp, *nmp;
int asyncflag;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -551,11 +551,11 @@ sync(p, uap)
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
#if 0
/*
* XXX don't call vfs_bufstats() yet because that routine
@@ -727,7 +727,7 @@ getfsstat(p, uap)
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
sfsp = (caddr_t)SCARG(uap, buf);
count = 0;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -743,7 +743,7 @@ getfsstat(p, uap)
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
(error = VFS_STATFS(mp, sp, p))) {
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
continue;
@@ -757,11 +757,11 @@ getfsstat(p, uap)
sfsp += sizeof(*sp);
}
count++;
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
if (sfsp && count > maxcount)
p->p_retval[0] = maxcount;
else
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 221e9c0..0175123 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -641,10 +641,10 @@ debug_vn_lock(vp, flags, p, filename, line)
do {
if ((flags & LK_INTERLOCK) == 0)
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curproc) {
vp->v_flag |= VXWANT;
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
error = ENOENT;
} else {
@@ -833,9 +833,9 @@ filt_vnattach(struct knote *kn)
if ((vp)->v_tag != VT_UFS)
return (EOPNOTSUPP);
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext);
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
return (0);
}
@@ -845,10 +845,10 @@ filt_vndetach(struct knote *kn)
{
struct vnode *vp = (struct vnode *)kn->kn_fp->f_data;
- mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_lock(&vp->v_pollinfo.vpi_lock);
SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note,
kn, knote, kn_selnext);
- mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
+ mtx_unlock(&vp->v_pollinfo.vpi_lock);
}
static int
diff --git a/sys/miscfs/deadfs/dead_vnops.c b/sys/miscfs/deadfs/dead_vnops.c
index 4211f25..8de5c57 100644
--- a/sys/miscfs/deadfs/dead_vnops.c
+++ b/sys/miscfs/deadfs/dead_vnops.c
@@ -211,7 +211,7 @@ dead_lock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
ap->a_flags &= ~LK_INTERLOCK;
}
if (!chkvnlock(vp))
diff --git a/sys/miscfs/nullfs/null_vnops.c b/sys/miscfs/nullfs/null_vnops.c
index 6dc0509..e9fded7 100644
--- a/sys/miscfs/nullfs/null_vnops.c
+++ b/sys/miscfs/nullfs/null_vnops.c
@@ -624,7 +624,7 @@ null_lock(ap)
if (lvp == NULL)
return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, p));
if (flags & LK_INTERLOCK) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
flags &= ~LK_INTERLOCK;
}
if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
@@ -671,7 +671,7 @@ null_unlock(ap)
return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, p));
if ((flags & LK_THISLAYER) == 0) {
if (flags & LK_INTERLOCK) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
flags &= ~LK_INTERLOCK;
}
VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, p);
diff --git a/sys/miscfs/procfs/procfs_ctl.c b/sys/miscfs/procfs/procfs_ctl.c
index 5d0ce44..1ca2208 100644
--- a/sys/miscfs/procfs/procfs_ctl.c
+++ b/sys/miscfs/procfs/procfs_ctl.c
@@ -167,13 +167,13 @@ procfs_control(curp, p, op)
default:
PROCTREE_LOCK(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (!TRACE_WAIT_P(curp, p)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
return (EBUSY);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
}
@@ -252,31 +252,31 @@ procfs_control(curp, p, op)
error = 0;
if (p->p_flag & P_TRACED) {
PROCTREE_LOCK(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (error == 0 &&
(p->p_stat != SSTOP) &&
(p->p_flag & P_TRACED) &&
(p->p_pptr == curp)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
error = tsleep((caddr_t) p,
PWAIT|PCATCH, "procfsx", 0);
PROCTREE_LOCK(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
if (error == 0 && !TRACE_WAIT_P(curp, p))
error = EBUSY;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
} else {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
while (error == 0 && p->p_stat != SSTOP) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
error = tsleep((caddr_t) p,
PWAIT|PCATCH, "procfs", 0);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
return (error);
@@ -284,10 +284,10 @@ procfs_control(curp, p, op)
panic("procfs_control");
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SSTOP)
setrunnable(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (0);
}
@@ -329,17 +329,17 @@ procfs_doctl(curp, p, pfs, uio)
nm = vfs_findname(signames, msg, xlen);
if (nm) {
PROCTREE_LOCK(PT_SHARED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (TRACE_WAIT_P(curp, p)) {
p->p_xstat = nm->nm_val;
#ifdef FIX_SSTEP
FIX_SSTEP(p);
#endif
setrunnable(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROCTREE_LOCK(PT_RELEASE);
psignal(p, nm->nm_val);
}
diff --git a/sys/miscfs/procfs/procfs_status.c b/sys/miscfs/procfs/procfs_status.c
index 813ad60..14c8fb4 100644
--- a/sys/miscfs/procfs/procfs_status.c
+++ b/sys/miscfs/procfs/procfs_status.c
@@ -123,12 +123,12 @@ procfs_dostatus(curp, p, pfs, uio)
DOCHECK();
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_INMEM) {
struct timeval ut, st;
calcru(p, &ut, &st, (struct timeval *) NULL);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
ps += snprintf(ps, psbuf + sizeof(psbuf) - ps,
" %ld,%ld %ld,%ld %ld,%ld",
p->p_stats->p_start.tv_sec,
@@ -136,7 +136,7 @@ procfs_dostatus(curp, p, pfs, uio)
ut.tv_sec, ut.tv_usec,
st.tv_sec, st.tv_usec);
} else {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
ps += snprintf(ps, psbuf + sizeof(psbuf) - ps,
" -1,-1 -1,-1 -1,-1");
}
diff --git a/sys/msdosfs/msdosfs_denode.c b/sys/msdosfs/msdosfs_denode.c
index 6c1a3c7..dbdf554 100644
--- a/sys/msdosfs/msdosfs_denode.c
+++ b/sys/msdosfs/msdosfs_denode.c
@@ -130,21 +130,21 @@ msdosfs_hashget(dev, dirclust, diroff)
struct vnode *vp;
loop:
- mtx_enter(&dehash_mtx, MTX_DEF);
+ mtx_lock(&dehash_mtx);
for (dep = DEHASH(dev, dirclust, diroff); dep; dep = dep->de_next) {
if (dirclust == dep->de_dirclust
&& diroff == dep->de_diroffset
&& dev == dep->de_dev
&& dep->de_refcnt != 0) {
vp = DETOV(dep);
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&dehash_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&dehash_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
return (dep);
}
}
- mtx_exit(&dehash_mtx, MTX_DEF);
+ mtx_unlock(&dehash_mtx);
return (NULL);
}
@@ -154,7 +154,7 @@ msdosfs_hashins(dep)
{
struct denode **depp, *deq;
- mtx_enter(&dehash_mtx, MTX_DEF);
+ mtx_lock(&dehash_mtx);
depp = &DEHASH(dep->de_dev, dep->de_dirclust, dep->de_diroffset);
deq = *depp;
if (deq)
@@ -162,7 +162,7 @@ msdosfs_hashins(dep)
dep->de_next = deq;
dep->de_prev = depp;
*depp = dep;
- mtx_exit(&dehash_mtx, MTX_DEF);
+ mtx_unlock(&dehash_mtx);
}
static void
@@ -171,7 +171,7 @@ msdosfs_hashrem(dep)
{
struct denode *deq;
- mtx_enter(&dehash_mtx, MTX_DEF);
+ mtx_lock(&dehash_mtx);
deq = dep->de_next;
if (deq)
deq->de_prev = dep->de_prev;
@@ -180,7 +180,7 @@ msdosfs_hashrem(dep)
dep->de_next = NULL;
dep->de_prev = NULL;
#endif
- mtx_exit(&dehash_mtx, MTX_DEF);
+ mtx_unlock(&dehash_mtx);
}
/*
diff --git a/sys/msdosfs/msdosfs_vfsops.c b/sys/msdosfs/msdosfs_vfsops.c
index 449d691..3088d7a 100644
--- a/sys/msdosfs/msdosfs_vfsops.c
+++ b/sys/msdosfs/msdosfs_vfsops.c
@@ -862,7 +862,7 @@ msdosfs_sync(mp, waitfor, cred, p)
/*
* Write back each (modified) denode.
*/
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
/*
@@ -872,20 +872,20 @@ loop:
if (vp->v_mount != mp)
goto loop;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
nvp = LIST_NEXT(vp, v_mntvnodes);
dep = VTODE(vp);
if (vp->v_type == VNON ||
((dep->de_flag &
(DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
if (error == ENOENT)
goto loop;
continue;
@@ -895,9 +895,9 @@ loop:
allerror = error;
VOP_UNLOCK(vp, 0, p);
vrele(vp);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
/*
* Flush filesystem control info.
diff --git a/sys/msdosfs/msdosfs_vnops.c b/sys/msdosfs/msdosfs_vnops.c
index e4052f7..fb7b83d 100644
--- a/sys/msdosfs/msdosfs_vnops.c
+++ b/sys/msdosfs/msdosfs_vnops.c
@@ -233,12 +233,12 @@ msdosfs_close(ap)
struct denode *dep = VTODE(vp);
struct timespec ts;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_usecount > 1) {
getnanotime(&ts);
DETIMES(dep, &ts, &ts, &ts);
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return 0;
}
diff --git a/sys/net/if_var.h b/sys/net/if_var.h
index 68b0c01..53a7b1e 100644
--- a/sys/net/if_var.h
+++ b/sys/net/if_var.h
@@ -191,8 +191,8 @@ typedef void if_init_f_t __P((void *));
* (defined above). Entries are added to and deleted from these structures
* by these macros, which should be called with ipl raised to splimp().
*/
-#define IF_LOCK(ifq) mtx_enter(&(ifq)->ifq_mtx, MTX_DEF)
-#define IF_UNLOCK(ifq) mtx_exit(&(ifq)->ifq_mtx, MTX_DEF)
+#define IF_LOCK(ifq) mtx_lock(&(ifq)->ifq_mtx)
+#define IF_UNLOCK(ifq) mtx_unlock(&(ifq)->ifq_mtx)
#define _IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
#define _IF_DROP(ifq) ((ifq)->ifq_drops++)
#define _IF_QLEN(ifq) ((ifq)->ifq_len)
diff --git a/sys/netgraph/ng_base.c b/sys/netgraph/ng_base.c
index 860d210..e139041 100644
--- a/sys/netgraph/ng_base.c
+++ b/sys/netgraph/ng_base.c
@@ -239,23 +239,23 @@ ng_alloc_hook(void)
{
hook_p hook;
SLIST_ENTRY(ng_hook) temp;
- mtx_enter(&ng_nodelist_mtx, MTX_DEF);
+ mtx_lock(&ng_nodelist_mtx);
hook = LIST_FIRST(&ng_freehooks);
if (hook) {
LIST_REMOVE(hook, hk_hooks);
bcopy(&hook->hk_all, &temp, sizeof(temp));
bzero(hook, sizeof(struct ng_hook));
bcopy(&temp, &hook->hk_all, sizeof(temp));
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
hook->hk_magic = HK_MAGIC;
} else {
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
_NG_ALLOC_HOOK(hook);
if (hook) {
hook->hk_magic = HK_MAGIC;
- mtx_enter(&ng_nodelist_mtx, MTX_DEF);
+ mtx_lock(&ng_nodelist_mtx);
SLIST_INSERT_HEAD(&ng_allhooks, hook, hk_all);
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
}
}
return (hook);
@@ -266,23 +266,23 @@ ng_alloc_node(void)
{
node_p node;
SLIST_ENTRY(ng_node) temp;
- mtx_enter(&ng_nodelist_mtx, MTX_DEF);
+ mtx_lock(&ng_nodelist_mtx);
node = LIST_FIRST(&ng_freenodes);
if (node) {
LIST_REMOVE(node, nd_nodes);
bcopy(&node->nd_all, &temp, sizeof(temp));
bzero(node, sizeof(struct ng_node));
bcopy(&temp, &node->nd_all, sizeof(temp));
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
node->nd_magic = ND_MAGIC;
} else {
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
_NG_ALLOC_NODE(node);
if (node) {
node->nd_magic = ND_MAGIC;
- mtx_enter(&ng_nodelist_mtx, MTX_DEF);
+ mtx_lock(&ng_nodelist_mtx);
SLIST_INSERT_HEAD(&ng_allnodes, node, nd_all);
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
}
}
return (node);
@@ -294,18 +294,18 @@ ng_alloc_node(void)
#define NG_FREE_HOOK(hook) \
do { \
- mtx_enter(&ng_nodelist_mtx, MTX_DEF); \
+ mtx_lock(&ng_nodelist_mtx); \
LIST_INSERT_HEAD(&ng_freehooks, hook, hk_hooks); \
hook->hk_magic = 0; \
- mtx_exit(&ng_nodelist_mtx, MTX_DEF); \
+ mtx_unlock(&ng_nodelist_mtx); \
} while (0)
#define NG_FREE_NODE(node) \
do { \
- mtx_enter(&ng_nodelist_mtx, MTX_DEF); \
+ mtx_lock(&ng_nodelist_mtx); \
LIST_INSERT_HEAD(&ng_freenodes, node, nd_nodes); \
node->nd_magic = 0; \
- mtx_exit(&ng_nodelist_mtx, MTX_DEF); \
+ mtx_unlock(&ng_nodelist_mtx); \
} while (0)
#else /* NETGRAPH_DEBUG */ /*----------------------------------------------*/
@@ -625,13 +625,13 @@ ng_make_node_common(struct ng_type *type, node_p *nodepp)
LIST_INIT(&node->nd_hooks);
/* Link us into the node linked list */
- mtx_enter(&ng_nodelist_mtx, MTX_DEF);
+ mtx_lock(&ng_nodelist_mtx);
LIST_INSERT_HEAD(&ng_nodelist, node, nd_nodes);
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
/* get an ID and put us in the hash chain */
- mtx_enter(&ng_idhash_mtx, MTX_DEF);
+ mtx_lock(&ng_idhash_mtx);
for (;;) { /* wrap protection, even if silly */
node_p node2 = NULL;
node->nd_ID = nextID++; /* 137/second for 1 year before wrap */
@@ -644,7 +644,7 @@ ng_make_node_common(struct ng_type *type, node_p *nodepp)
}
LIST_INSERT_HEAD(&ng_ID_hash[NG_IDHASH_FN(node->nd_ID)],
node, nd_idnodes);
- mtx_exit(&ng_idhash_mtx, MTX_DEF);
+ mtx_unlock(&ng_idhash_mtx);
/* Done */
*nodepp = node;
@@ -757,14 +757,14 @@ ng_unref_node(node_p node)
if (v == 1) { /* we were the last */
- mtx_enter(&ng_nodelist_mtx, MTX_DEF);
+ mtx_lock(&ng_nodelist_mtx);
node->nd_type->refs--; /* XXX maybe should get types lock? */
LIST_REMOVE(node, nd_nodes);
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
- mtx_enter(&ng_idhash_mtx, MTX_DEF);
+ mtx_lock(&ng_idhash_mtx);
LIST_REMOVE(node, nd_idnodes);
- mtx_exit(&ng_idhash_mtx, MTX_DEF);
+ mtx_unlock(&ng_idhash_mtx);
mtx_destroy(&node->nd_input_queue.q_mtx);
NG_FREE_NODE(node);
@@ -778,11 +778,11 @@ static node_p
ng_ID2noderef(ng_ID_t ID)
{
node_p node;
- mtx_enter(&ng_idhash_mtx, MTX_DEF);
+ mtx_lock(&ng_idhash_mtx);
NG_IDHASH_FIND(ID, node);
if(node)
NG_NODE_REF(node);
- mtx_exit(&ng_idhash_mtx, MTX_DEF);
+ mtx_unlock(&ng_idhash_mtx);
return(node);
}
@@ -859,7 +859,7 @@ ng_name2noderef(node_p here, const char *name)
}
/* Find node by name */
- mtx_enter(&ng_nodelist_mtx, MTX_DEF);
+ mtx_lock(&ng_nodelist_mtx);
LIST_FOREACH(node, &ng_nodelist, nd_nodes) {
if (NG_NODE_IS_VALID(node)
&& NG_NODE_HAS_NAME(node)
@@ -869,7 +869,7 @@ ng_name2noderef(node_p here, const char *name)
}
if (node)
NG_NODE_REF(node);
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
return (node);
}
@@ -1148,10 +1148,10 @@ ng_newtype(struct ng_type *tp)
/* Link in new type */
- mtx_enter(&ng_typelist_mtx, MTX_DEF);
+ mtx_lock(&ng_typelist_mtx);
LIST_INSERT_HEAD(&ng_typelist, tp, types);
tp->refs = 1; /* first ref is linked list */
- mtx_exit(&ng_typelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_typelist_mtx);
return (0);
}
@@ -1163,12 +1163,12 @@ ng_findtype(const char *typename)
{
struct ng_type *type;
- mtx_enter(&ng_typelist_mtx, MTX_DEF);
+ mtx_lock(&ng_typelist_mtx);
LIST_FOREACH(type, &ng_typelist, types) {
if (strcmp(type->name, typename) == 0)
break;
}
- mtx_exit(&ng_typelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_typelist_mtx);
return (type);
}
@@ -1933,7 +1933,7 @@ ng_acquire_read(struct ng_queue *ngq, item_p item)
atomic_subtract_long(&ngq->q_flags, READER_INCREMENT);
/* ######### End Hack alert ######### */
- mtx_enter((&ngq->q_mtx), MTX_SPIN);
+ mtx_lock_spin((&ngq->q_mtx));
/*
* Try again. Another processor (or interrupt for that matter) may
* have removed the last queued item that was stopping us from
@@ -1942,7 +1942,7 @@ ng_acquire_read(struct ng_queue *ngq, item_p item)
*/
if ((ngq->q_flags & NGQ_RMASK) == 0) {
atomic_add_long(&ngq->q_flags, READER_INCREMENT);
- mtx_exit((&ngq->q_mtx), MTX_SPIN);
+ mtx_unlock_spin((&ngq->q_mtx));
return (item);
}
@@ -1957,7 +1957,7 @@ ng_acquire_read(struct ng_queue *ngq, item_p item)
* see if we can dequeue something to run instead.
*/
item = ng_dequeue(ngq);
- mtx_exit(&(ngq->q_mtx), MTX_SPIN);
+ mtx_unlock_spin(&(ngq->q_mtx));
return (item);
}
@@ -1965,7 +1965,7 @@ static __inline item_p
ng_acquire_write(struct ng_queue *ngq, item_p item)
{
restart:
- mtx_enter(&(ngq->q_mtx), MTX_SPIN);
+ mtx_lock_spin(&(ngq->q_mtx));
/*
* If there are no readers, no writer, and no pending packets, then
* we can just go ahead. In all other situations we need to queue the
@@ -1973,7 +1973,7 @@ restart:
*/
if ((ngq->q_flags & NGQ_WMASK) == 0) {
atomic_add_long(&ngq->q_flags, WRITER_ACTIVE);
- mtx_exit((&ngq->q_mtx), MTX_SPIN);
+ mtx_unlock_spin((&ngq->q_mtx));
if (ngq->q_flags & READER_MASK) {
/* Collision with fast-track reader */
atomic_subtract_long(&ngq->q_flags, WRITER_ACTIVE);
@@ -1993,7 +1993,7 @@ restart:
* see if we can dequeue something to run instead.
*/
item = ng_dequeue(ngq);
- mtx_exit(&(ngq->q_mtx), MTX_SPIN);
+ mtx_unlock_spin(&(ngq->q_mtx));
return (item);
}
@@ -2014,7 +2014,7 @@ ng_flush_input_queue(struct ng_queue * ngq)
{
item_p item;
u_int add_arg;
- mtx_enter(&ngq->q_mtx, MTX_SPIN);
+ mtx_lock_spin(&ngq->q_mtx);
for (;;) {
/* Now take a look at what's on the queue */
if (ngq->q_flags & READ_PENDING) {
@@ -2038,16 +2038,16 @@ ng_flush_input_queue(struct ng_queue * ngq)
}
atomic_add_long(&ngq->q_flags, add_arg);
- mtx_exit(&ngq->q_mtx, MTX_SPIN);
+ mtx_lock_spin(&ngq->q_mtx);
NG_FREE_ITEM(item);
- mtx_enter(&ngq->q_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ngq->q_mtx);
}
/*
* Take us off the work queue if we are there.
* We definatly have no work to be done.
*/
ng_worklist_remove(ngq->q_node);
- mtx_exit(&ngq->q_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ngq->q_mtx);
}
/***********************************************************************
@@ -2167,7 +2167,7 @@ ng_snd_item(item_p item, int queue)
#ifdef NETGRAPH_DEBUG
_ngi_check(item, __FILE__, __LINE__);
#endif
- mtx_enter(&(ngq->q_mtx), MTX_SPIN);
+ mtx_lock_spin(&(ngq->q_mtx));
ng_queue_rw(ngq, item, rw);
/*
* If there are active elements then we can rely on
@@ -2180,7 +2180,7 @@ ng_snd_item(item_p item, int queue)
if (CAN_GET_WORK(ngq->q_flags)) {
ng_setisr(node);
}
- mtx_exit(&(ngq->q_mtx), MTX_SPIN);
+ mtx_unlock_spin(&(ngq->q_mtx));
return (0);
}
/*
@@ -2234,13 +2234,13 @@ ng_snd_item(item_p item, int queue)
* dequeue acquires and adjusts the input_queue as it dequeues
* packets. It acquires the rw lock as needed.
*/
- mtx_enter(&ngq->q_mtx, MTX_SPIN);
+ mtx_lock_spin(&ngq->q_mtx);
item = ng_dequeue(ngq); /* fixes worklist too*/
if (!item) {
- mtx_exit(&ngq->q_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ngq->q_mtx);
return (error);
}
- mtx_exit(&ngq->q_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ngq->q_mtx);
/*
* We have the appropriate lock, so run the item.
@@ -2559,7 +2559,7 @@ ng_generic_msg(node_p here, item_p item, hook_p lasthook)
node_p node;
int num = 0;
- mtx_enter(&ng_nodelist_mtx, MTX_DEF);
+ mtx_lock(&ng_nodelist_mtx);
/* Count number of nodes */
LIST_FOREACH(node, &ng_nodelist, nd_nodes) {
if (NG_NODE_IS_VALID(node)
@@ -2567,7 +2567,7 @@ ng_generic_msg(node_p here, item_p item, hook_p lasthook)
num++;
}
}
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
/* Get response struct */
NG_MKRESPONSE(resp, msg, sizeof(*nl)
@@ -2580,7 +2580,7 @@ ng_generic_msg(node_p here, item_p item, hook_p lasthook)
/* Cycle through the linked list of nodes */
nl->numnames = 0;
- mtx_enter(&ng_nodelist_mtx, MTX_DEF);
+ mtx_lock(&ng_nodelist_mtx);
LIST_FOREACH(node, &ng_nodelist, nd_nodes) {
struct nodeinfo *const np = &nl->nodeinfo[nl->numnames];
@@ -2600,7 +2600,7 @@ ng_generic_msg(node_p here, item_p item, hook_p lasthook)
np->hooks = node->nd_numhooks;
nl->numnames++;
}
- mtx_exit(&ng_nodelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_nodelist_mtx);
break;
}
@@ -2610,12 +2610,12 @@ ng_generic_msg(node_p here, item_p item, hook_p lasthook)
struct ng_type *type;
int num = 0;
- mtx_enter(&ng_typelist_mtx, MTX_DEF);
+ mtx_lock(&ng_typelist_mtx);
/* Count number of types */
LIST_FOREACH(type, &ng_typelist, types) {
num++;
}
- mtx_exit(&ng_typelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_typelist_mtx);
/* Get response struct */
NG_MKRESPONSE(resp, msg, sizeof(*tl)
@@ -2628,7 +2628,7 @@ ng_generic_msg(node_p here, item_p item, hook_p lasthook)
/* Cycle through the linked list of types */
tl->numtypes = 0;
- mtx_enter(&ng_typelist_mtx, MTX_DEF);
+ mtx_lock(&ng_typelist_mtx);
LIST_FOREACH(type, &ng_typelist, types) {
struct typeinfo *const tp = &tl->typeinfo[tl->numtypes];
@@ -2641,7 +2641,7 @@ ng_generic_msg(node_p here, item_p item, hook_p lasthook)
tp->numnodes = type->refs - 1; /* don't count list */
tl->numtypes++;
}
- mtx_exit(&ng_typelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_typelist_mtx);
break;
}
@@ -2868,10 +2868,10 @@ ng_mod_event(module_t mod, int event, void *data)
/* Call type specific code */
if (type->mod_event != NULL)
if ((error = (*type->mod_event)(mod, event, data))) {
- mtx_enter(&ng_typelist_mtx, MTX_DEF);
+ mtx_lock(&ng_typelist_mtx);
type->refs--; /* undo it */
LIST_REMOVE(type, types);
- mtx_exit(&ng_typelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_typelist_mtx);
}
splx(s);
break;
@@ -2893,9 +2893,9 @@ ng_mod_event(module_t mod, int event, void *data)
break;
}
}
- mtx_enter(&ng_typelist_mtx, MTX_DEF);
+ mtx_lock(&ng_typelist_mtx);
LIST_REMOVE(type, types);
- mtx_exit(&ng_typelist_mtx, MTX_DEF);
+ mtx_unlock(&ng_typelist_mtx);
}
splx(s);
break;
@@ -3238,15 +3238,15 @@ ngintr(void)
node_p node = NULL;
for (;;) {
- mtx_enter(&ng_worklist_mtx, MTX_SPIN);
+ mtx_lock_spin(&ng_worklist_mtx);
node = TAILQ_FIRST(&ng_worklist);
if (!node) {
- mtx_exit(&ng_worklist_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ng_worklist_mtx);
break;
}
node->nd_flags &= ~NG_WORKQ;
TAILQ_REMOVE(&ng_worklist, node, nd_work);
- mtx_exit(&ng_worklist_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ng_worklist_mtx);
/*
* We have the node. We also take over the reference
* that the list had on it.
@@ -3261,14 +3261,14 @@ ngintr(void)
* future.
*/
for (;;) {
- mtx_enter(&node->nd_input_queue.q_mtx, MTX_SPIN);
+ mtx_lock_spin(&node->nd_input_queue.q_mtx);
item = ng_dequeue(&node->nd_input_queue);
if (item == NULL) {
- mtx_exit(&node->nd_input_queue.q_mtx, MTX_SPIN);
+ mtx_unlock_spin(&node->nd_input_queue.q_mtx);
NG_NODE_UNREF(node);
break; /* go look for another node */
} else {
- mtx_exit(&node->nd_input_queue.q_mtx, MTX_SPIN);
+ mtx_unlock_spin(&node->nd_input_queue.q_mtx);
ng_apply_item(item);
}
}
@@ -3278,19 +3278,19 @@ ngintr(void)
static void
ng_worklist_remove(node_p node)
{
- mtx_enter(&ng_worklist_mtx, MTX_SPIN);
+ mtx_lock_spin(&ng_worklist_mtx);
if (node->nd_flags & NG_WORKQ) {
TAILQ_REMOVE(&ng_worklist, node, nd_work);
NG_NODE_UNREF(node);
}
node->nd_flags &= ~NG_WORKQ;
- mtx_exit(&ng_worklist_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ng_worklist_mtx);
}
static void
ng_setisr(node_p node)
{
- mtx_enter(&ng_worklist_mtx, MTX_SPIN);
+ mtx_lock_spin(&ng_worklist_mtx);
if ((node->nd_flags & NG_WORKQ) == 0) {
/*
* If we are not already on the work queue,
@@ -3300,7 +3300,7 @@ ng_setisr(node_p node)
TAILQ_INSERT_TAIL(&ng_worklist, node, nd_work);
NG_NODE_REF(node);
}
- mtx_exit(&ng_worklist_mtx, MTX_SPIN);
+ mtx_unlock_spin(&ng_worklist_mtx);
schednetisr(NETISR_NETGRAPH);
}
diff --git a/sys/nfs/nfs_nqlease.c b/sys/nfs/nfs_nqlease.c
index 379db2c..3878a77 100644
--- a/sys/nfs/nfs_nqlease.c
+++ b/sys/nfs/nfs_nqlease.c
@@ -1194,7 +1194,7 @@ nqnfs_lease_updatetime(deltat)
* Search the mount list for all nqnfs mounts and do their timer
* queues.
*/
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nxtmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nxtmp = TAILQ_NEXT(mp, mnt_list);
@@ -1208,11 +1208,11 @@ nqnfs_lease_updatetime(deltat)
}
}
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
nxtmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
}
#ifndef NFS_NOSERVER
diff --git a/sys/ntfs/ntfs_ihash.c b/sys/ntfs/ntfs_ihash.c
index b9c0345..fea648c 100644
--- a/sys/ntfs/ntfs_ihash.c
+++ b/sys/ntfs/ntfs_ihash.c
@@ -93,11 +93,11 @@ ntfs_nthashlookup(dev, inum)
{
struct ntnode *ip;
- mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_lock(&ntfs_nthash_mtx);
LIST_FOREACH(ip, NTNOHASH(dev, inum), i_hash)
if (inum == ip->i_number && dev == ip->i_dev)
break;
- mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_unlock(&ntfs_nthash_mtx);
return (ip);
}
@@ -111,11 +111,11 @@ ntfs_nthashins(ip)
{
struct nthashhead *ipp;
- mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_lock(&ntfs_nthash_mtx);
ipp = NTNOHASH(ip->i_dev, ip->i_number);
LIST_INSERT_HEAD(ipp, ip, i_hash);
ip->i_flag |= IN_HASHED;
- mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_unlock(&ntfs_nthash_mtx);
}
/*
@@ -125,10 +125,10 @@ void
ntfs_nthashrem(ip)
struct ntnode *ip;
{
- mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_lock(&ntfs_nthash_mtx);
if (ip->i_flag & IN_HASHED) {
ip->i_flag &= ~IN_HASHED;
LIST_REMOVE(ip, i_hash);
}
- mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
+ mtx_unlock(&ntfs_nthash_mtx);
}
diff --git a/sys/ntfs/ntfs_subr.c b/sys/ntfs/ntfs_subr.c
index 38324f2..c55cd0a 100644
--- a/sys/ntfs/ntfs_subr.c
+++ b/sys/ntfs/ntfs_subr.c
@@ -360,7 +360,7 @@ ntfs_ntget(ip)
dprintf(("ntfs_ntget: get ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- mtx_enter(&ip->i_interlock, MTX_DEF);
+ mtx_lock(&ip->i_interlock);
ip->i_usecount++;
LOCKMGR(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock);
@@ -438,7 +438,7 @@ ntfs_ntput(ip)
dprintf(("ntfs_ntput: rele ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- mtx_enter(&ip->i_interlock, MTX_DEF);
+ mtx_lock(&ip->i_interlock);
ip->i_usecount--;
#ifdef DIAGNOSTIC
@@ -462,7 +462,7 @@ ntfs_ntput(ip)
LIST_REMOVE(vap,va_list);
ntfs_freentvattr(vap);
}
- mtx_exit(&ip->i_interlock, MTX_DEF);
+ mtx_unlock(&ip->i_interlock);
mtx_destroy(&ip->i_interlock);
lockdestroy(&ip->i_lock);
@@ -479,9 +479,9 @@ void
ntfs_ntref(ip)
struct ntnode *ip;
{
- mtx_enter(&ip->i_interlock, MTX_DEF);
+ mtx_lock(&ip->i_interlock);
ip->i_usecount++;
- mtx_exit(&ip->i_interlock, MTX_DEF);
+ mtx_unlock(&ip->i_interlock);
dprintf(("ntfs_ntref: ino %d, usecount: %d\n",
ip->i_number, ip->i_usecount));
@@ -498,13 +498,13 @@ ntfs_ntrele(ip)
dprintf(("ntfs_ntrele: rele ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- mtx_enter(&ip->i_interlock, MTX_DEF);
+ mtx_lock(&ip->i_interlock);
ip->i_usecount--;
if (ip->i_usecount < 0)
panic("ntfs_ntrele: ino: %d usecount: %d \n",
ip->i_number,ip->i_usecount);
- mtx_exit(&ip->i_interlock, MTX_DEF);
+ mtx_unlock(&ip->i_interlock);
}
/*
diff --git a/sys/ntfs/ntfs_vfsops.c b/sys/ntfs/ntfs_vfsops.c
index 7c6b2c0..384883e 100644
--- a/sys/ntfs/ntfs_vfsops.c
+++ b/sys/ntfs/ntfs_vfsops.c
@@ -196,9 +196,9 @@ ntfs_mountroot()
return (error);
}
- mtx_enter(&mountlist_mtx, MTX_DEF);
+ mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- mtx_exit(&mountlist_mtx, MTX_DEF);
+ mtx_unlock(&mountlist_mtx);
(void)ntfs_statfs(mp, &mp->mnt_stat, p);
vfs_unbusy(mp);
return (0);
diff --git a/sys/nwfs/nwfs_node.c b/sys/nwfs/nwfs_node.c
index 02a4ee3..2025191 100644
--- a/sys/nwfs/nwfs_node.c
+++ b/sys/nwfs/nwfs_node.c
@@ -149,7 +149,7 @@ loop:
rescan:
if (nwfs_hashlookup(nmp, fid, &np) == 0) {
vp = NWTOV(np);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
lockmgr(&nwhashlock, LK_RELEASE, NULL, p);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
diff --git a/sys/nwfs/nwfs_vnops.c b/sys/nwfs/nwfs_vnops.c
index 59694f9..93fe639 100644
--- a/sys/nwfs/nwfs_vnops.c
+++ b/sys/nwfs/nwfs_vnops.c
@@ -256,24 +256,24 @@ nwfs_close(ap)
if (vp->v_type == VDIR) return 0; /* nothing to do now */
error = 0;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (np->opened == 0) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return 0;
}
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
error = nwfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (np->opened == 0) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return 0;
}
if (--np->opened == 0) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
error = ncp_close_file(NWFSTOCONN(VTONWFS(vp)), &np->n_fh,
ap->a_p, ap->a_cred);
} else
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
np->n_atime = 0;
return (error);
}
diff --git a/sys/pc98/cbus/clock.c b/sys/pc98/cbus/clock.c
index 08af871..96b7e10 100644
--- a/sys/pc98/cbus/clock.c
+++ b/sys/pc98/cbus/clock.c
@@ -232,7 +232,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -240,7 +240,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -257,14 +257,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -272,7 +272,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -280,7 +280,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -465,7 +465,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -473,7 +473,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -610,10 +610,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -861,7 +861,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -870,7 +870,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -885,11 +885,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1540,7 +1540,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1564,7 +1564,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/pc98/cbus/pcrtc.c b/sys/pc98/cbus/pcrtc.c
index 08af871..96b7e10 100644
--- a/sys/pc98/cbus/pcrtc.c
+++ b/sys/pc98/cbus/pcrtc.c
@@ -232,7 +232,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -240,7 +240,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -257,14 +257,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -272,7 +272,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -280,7 +280,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -465,7 +465,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -473,7 +473,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -610,10 +610,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -861,7 +861,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -870,7 +870,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -885,11 +885,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1540,7 +1540,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1564,7 +1564,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/pc98/cbus/sio.c b/sys/pc98/cbus/sio.c
index fbc78bc..31914fe 100644
--- a/sys/pc98/cbus/sio.c
+++ b/sys/pc98/cbus/sio.c
@@ -1418,7 +1418,7 @@ sioprobe(dev, xrid)
* but mask them in the processor as well in case there are some
* (misconfigured) shared interrupts.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
/* EXTRA DELAY? */
/*
@@ -1528,7 +1528,7 @@ sioprobe(dev, xrid)
CLR_FLAG(dev, COM_C_IIR_TXRDYBUG);
}
sio_setreg(com, com_cfcr, CFCR_8BITS);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
return (iobase == siocniobase ? 0 : result);
}
@@ -1586,7 +1586,7 @@ sioprobe(dev, xrid)
}
#endif
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
irqs = irqmap[1] & ~irqmap[0];
if (bus_get_resource(idev, SYS_RES_IRQ, 0, &xirq, NULL) == 0 &&
@@ -1864,7 +1864,7 @@ sioattach(dev, xrid)
} else
com->it_in.c_ispeed = com->it_in.c_ospeed = TTYDEF_SPEED;
if (siosetwater(com, com->it_in.c_ispeed) != 0) {
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
/*
* Leave i/o resources allocated if this is a `cn'-level
* console, so that other devices can't snarf them.
@@ -1873,7 +1873,7 @@ sioattach(dev, xrid)
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
return (ENOMEM);
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
termioschars(&com->it_in);
com->it_out = com->it_in;
@@ -2274,7 +2274,7 @@ open_top:
}
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
#ifdef PC98
if (IS_8251(com->pc98_if_type)) {
com_tiocm_bis(com, TIOCM_LE);
@@ -2302,7 +2302,7 @@ open_top:
#ifdef PC98
}
#endif
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
/*
* Handle initial DCD. Callout devices get a fake initial
* DCD (trapdoor DCD). If we are callout, then any sleeping
@@ -2625,7 +2625,7 @@ sioinput(com)
* semantics instead of the save-and-disable semantics
* that are used everywhere else.
*/
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
incc = com->iptr - buf;
if (tp->t_rawq.c_cc + incc > tp->t_ihiwat
&& (com->state & CS_RTS_IFLOW
@@ -2646,7 +2646,7 @@ sioinput(com)
tp->t_lflag &= ~FLUSHO;
comstart(tp);
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
} while (buf < com->iptr);
} else {
do {
@@ -2655,7 +2655,7 @@ sioinput(com)
* semantics instead of the save-and-disable semantics
* that are used everywhere else.
*/
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
line_status = buf[com->ierroff];
recv_data = *buf++;
if (line_status
@@ -2670,7 +2670,7 @@ sioinput(com)
recv_data |= TTY_PE;
}
(*linesw[tp->t_line].l_rint)(recv_data, tp);
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
} while (buf < com->iptr);
}
com_events -= (com->iptr - com->ibuf);
@@ -2712,9 +2712,9 @@ siointr(arg)
#ifndef COM_MULTIPORT
com = (struct com_s *)arg;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
#else /* COM_MULTIPORT */
bool_t possibly_more_intrs;
int unit;
@@ -2726,7 +2726,7 @@ siointr(arg)
* devices, then the edge from one may be lost because another is
* on.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
do {
possibly_more_intrs = FALSE;
for (unit = 0; unit < sio_numunits; ++unit) {
@@ -2764,7 +2764,7 @@ siointr(arg)
/* XXX COM_UNLOCK(); */
}
} while (possibly_more_intrs);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
#endif /* COM_MULTIPORT */
}
@@ -3361,7 +3361,7 @@ repeat:
* Discard any events related to never-opened or
* going-away devices.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
incc = com->iptr - com->ibuf;
com->iptr = com->ibuf;
if (com->state & CS_CHECKMSR) {
@@ -3369,13 +3369,13 @@ repeat:
com->state &= ~CS_CHECKMSR;
}
com_events -= incc;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
continue;
}
if (com->iptr != com->ibuf) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
sioinput(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
if (com->state & CS_CHECKMSR) {
u_char delta_modem_status;
@@ -3383,13 +3383,13 @@ repeat:
#ifdef PC98
if (!IS_8251(com->pc98_if_type)) {
#endif
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
delta_modem_status = com->last_modem_status
^ com->prev_modem_status;
com->prev_modem_status = com->last_modem_status;
com_events -= LOTS_OF_EVENTS;
com->state &= ~CS_CHECKMSR;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (delta_modem_status & MSR_DCD)
(*linesw[tp->t_line].l_modem)
(tp, com->prev_modem_status & MSR_DCD);
@@ -3398,10 +3398,10 @@ repeat:
#endif
}
if (com->state & CS_ODONE) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
com_events -= LOTS_OF_EVENTS;
com->state &= ~CS_ODONE;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (!(com->state & CS_BUSY)
&& !(com->extra_state & CSE_BUSYCHECK)) {
timeout(siobusycheck, com, hz / 100);
@@ -3665,7 +3665,7 @@ comparam(tp, t)
if (com->state >= (CS_BUSY | CS_TTGO))
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
splx(s);
comstart(tp);
if (com->ibufold != NULL) {
@@ -3703,7 +3703,7 @@ siosetwater(com, speed)
ibufsize = 2048;
#endif
if (ibufsize == com->ibufsize) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
return (0);
}
@@ -3713,7 +3713,7 @@ siosetwater(com, speed)
*/
ibuf = malloc(2 * ibufsize, M_DEVBUF, M_NOWAIT);
if (ibuf == NULL) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
return (ENOMEM);
}
@@ -3731,7 +3731,7 @@ siosetwater(com, speed)
* Read current input buffer, if any. Continue with interrupts
* disabled.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->iptr != com->ibuf)
sioinput(com);
@@ -3766,7 +3766,7 @@ comstart(tp)
if (com == NULL)
return;
s = spltty();
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (tp->t_state & TS_TTSTOP)
com->state &= ~CS_TTGO;
else
@@ -3805,7 +3805,7 @@ comstart(tp)
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
#endif
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) {
ttwwakeup(tp);
splx(s);
@@ -3825,7 +3825,7 @@ comstart(tp)
#endif
com->obufs[0].l_next = NULL;
com->obufs[0].l_queued = TRUE;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state & CS_BUSY) {
qp = com->obufq.l_next;
while ((next = qp->l_next) != NULL)
@@ -3837,7 +3837,7 @@ comstart(tp)
com->obufq.l_next = &com->obufs[0];
com->state |= CS_BUSY;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
if (tp->t_outq.c_cc != 0 && !com->obufs[1].l_queued) {
com->obufs[1].l_tail
@@ -3849,7 +3849,7 @@ comstart(tp)
#endif
com->obufs[1].l_next = NULL;
com->obufs[1].l_queued = TRUE;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state & CS_BUSY) {
qp = com->obufq.l_next;
while ((next = qp->l_next) != NULL)
@@ -3861,14 +3861,14 @@ comstart(tp)
com->obufq.l_next = &com->obufs[1];
com->state |= CS_BUSY;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
tp->t_state |= TS_BUSY;
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state >= (CS_BUSY | CS_TTGO))
siointr1(com); /* fake interrupt to start output */
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
ttwwakeup(tp);
splx(s);
}
@@ -3886,7 +3886,7 @@ comstop(tp, rw)
com = com_addr(DEV_TO_UNIT(tp->t_dev));
if (com == NULL || com->gone)
return;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (rw & FWRITE) {
#ifdef PC98
if (!IS_8251(com->pc98_if_type)) {
@@ -3932,7 +3932,7 @@ comstop(tp, rw)
com_events -= (com->iptr - com->ibuf);
com->iptr = com->ibuf;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
comstart(tp);
}
@@ -3975,7 +3975,7 @@ commctl(com, bits, how)
mcr |= MCR_RTS;
if (com->gone)
return(0);
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
switch (how) {
case DMSET:
outb(com->modem_ctl_port,
@@ -3988,7 +3988,7 @@ commctl(com, bits, how)
outb(com->modem_ctl_port, com->mcr_image &= ~mcr);
break;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
return (0);
}
@@ -4047,9 +4047,9 @@ comwakeup(chan)
com = com_addr(unit);
if (com != NULL && !com->gone
&& (com->state >= (CS_BUSY | CS_TTGO) || com->poll)) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
}
@@ -4071,10 +4071,10 @@ comwakeup(chan)
u_int delta;
u_long total;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
delta = com->delta_error_counts[errnum];
com->delta_error_counts[errnum] = 0;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (delta == 0)
continue;
total = com->error_counts[errnum] += delta;
diff --git a/sys/pc98/i386/machdep.c b/sys/pc98/i386/machdep.c
index 167a2bd..49edba8 100644
--- a/sys/pc98/i386/machdep.c
+++ b/sys/pc98/i386/machdep.c
@@ -2219,7 +2219,7 @@ init386(first)
* Giant is used early for at least debugger traps and unexpected traps.
*/
mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* make ldt memory segments */
/*
diff --git a/sys/pc98/pc98/clock.c b/sys/pc98/pc98/clock.c
index 08af871..96b7e10 100644
--- a/sys/pc98/pc98/clock.c
+++ b/sys/pc98/pc98/clock.c
@@ -232,7 +232,7 @@ clkintr(struct clockframe frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
@@ -240,7 +240,7 @@ clkintr(struct clockframe frame)
i8254_lastcount = 0;
}
clkintr_pending = 0;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
switch (timer0_state) {
@@ -257,14 +257,14 @@ clkintr(struct clockframe frame)
break;
case ACQUIRE_PENDING:
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = TIMER_DIV(new_rate);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer_func = new_function;
timer0_state = ACQUIRED;
break;
@@ -272,7 +272,7 @@ clkintr(struct clockframe frame)
case RELEASE_PENDING:
if ((timer0_prescaler_count += timer0_max_count)
>= hardclock_max_count) {
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
i8254_offset = i8254_get_timecount(NULL);
i8254_lastcount = 0;
timer0_max_count = hardclock_max_count;
@@ -280,7 +280,7 @@ clkintr(struct clockframe frame)
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
timer0_prescaler_count = 0;
timer_func = hardclock;
timer0_state = RELEASED;
@@ -465,7 +465,7 @@ getit(void)
{
int high, low;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -473,7 +473,7 @@ getit(void)
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
@@ -610,10 +610,10 @@ sysbeep(int pitch, int period)
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_CNTR2, pitch);
outb(TIMER_CNTR2, (pitch>>8));
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
outb(IO_PPI, inb(IO_PPI) | 3);
@@ -861,7 +861,7 @@ set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
@@ -870,7 +870,7 @@ set_timer_freq(u_int freq, int intr_freq)
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -885,11 +885,11 @@ void
i8254_restore(void)
{
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
}
/*
@@ -1540,7 +1540,7 @@ i8254_get_timecount(struct timecounter *tc)
u_int eflags;
eflags = read_eflags();
- mtx_enter(&clock_lock, MTX_SPIN);
+ mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
@@ -1564,7 +1564,7 @@ i8254_get_timecount(struct timecounter *tc)
}
i8254_lastcount = count;
count += i8254_offset;
- mtx_exit(&clock_lock, MTX_SPIN);
+ mtx_unlock_spin(&clock_lock);
return (count);
}
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index 167a2bd..49edba8 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -2219,7 +2219,7 @@ init386(first)
* Giant is used early for at least debugger traps and unexpected traps.
*/
mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/* make ldt memory segments */
/*
diff --git a/sys/pc98/pc98/npx.c b/sys/pc98/pc98/npx.c
index 0f9a8b5..2625afb 100644
--- a/sys/pc98/pc98/npx.c
+++ b/sys/pc98/pc98/npx.c
@@ -774,7 +774,7 @@ npx_intr(dummy)
u_short control;
struct intrframe *frame;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
if (PCPU_GET(npxproc) == NULL || !npx_exists) {
printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n",
PCPU_GET(npxproc), curproc, npx_exists);
@@ -837,7 +837,7 @@ npx_intr(dummy)
*/
psignal(curproc, SIGFPE);
}
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
/*
diff --git a/sys/pc98/pc98/sio.c b/sys/pc98/pc98/sio.c
index fbc78bc..31914fe 100644
--- a/sys/pc98/pc98/sio.c
+++ b/sys/pc98/pc98/sio.c
@@ -1418,7 +1418,7 @@ sioprobe(dev, xrid)
* but mask them in the processor as well in case there are some
* (misconfigured) shared interrupts.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
/* EXTRA DELAY? */
/*
@@ -1528,7 +1528,7 @@ sioprobe(dev, xrid)
CLR_FLAG(dev, COM_C_IIR_TXRDYBUG);
}
sio_setreg(com, com_cfcr, CFCR_8BITS);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
return (iobase == siocniobase ? 0 : result);
}
@@ -1586,7 +1586,7 @@ sioprobe(dev, xrid)
}
#endif
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
irqs = irqmap[1] & ~irqmap[0];
if (bus_get_resource(idev, SYS_RES_IRQ, 0, &xirq, NULL) == 0 &&
@@ -1864,7 +1864,7 @@ sioattach(dev, xrid)
} else
com->it_in.c_ispeed = com->it_in.c_ospeed = TTYDEF_SPEED;
if (siosetwater(com, com->it_in.c_ispeed) != 0) {
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
/*
* Leave i/o resources allocated if this is a `cn'-level
* console, so that other devices can't snarf them.
@@ -1873,7 +1873,7 @@ sioattach(dev, xrid)
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
return (ENOMEM);
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
termioschars(&com->it_in);
com->it_out = com->it_in;
@@ -2274,7 +2274,7 @@ open_top:
}
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
#ifdef PC98
if (IS_8251(com->pc98_if_type)) {
com_tiocm_bis(com, TIOCM_LE);
@@ -2302,7 +2302,7 @@ open_top:
#ifdef PC98
}
#endif
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
/*
* Handle initial DCD. Callout devices get a fake initial
* DCD (trapdoor DCD). If we are callout, then any sleeping
@@ -2625,7 +2625,7 @@ sioinput(com)
* semantics instead of the save-and-disable semantics
* that are used everywhere else.
*/
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
incc = com->iptr - buf;
if (tp->t_rawq.c_cc + incc > tp->t_ihiwat
&& (com->state & CS_RTS_IFLOW
@@ -2646,7 +2646,7 @@ sioinput(com)
tp->t_lflag &= ~FLUSHO;
comstart(tp);
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
} while (buf < com->iptr);
} else {
do {
@@ -2655,7 +2655,7 @@ sioinput(com)
* semantics instead of the save-and-disable semantics
* that are used everywhere else.
*/
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
line_status = buf[com->ierroff];
recv_data = *buf++;
if (line_status
@@ -2670,7 +2670,7 @@ sioinput(com)
recv_data |= TTY_PE;
}
(*linesw[tp->t_line].l_rint)(recv_data, tp);
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
} while (buf < com->iptr);
}
com_events -= (com->iptr - com->ibuf);
@@ -2712,9 +2712,9 @@ siointr(arg)
#ifndef COM_MULTIPORT
com = (struct com_s *)arg;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
#else /* COM_MULTIPORT */
bool_t possibly_more_intrs;
int unit;
@@ -2726,7 +2726,7 @@ siointr(arg)
* devices, then the edge from one may be lost because another is
* on.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
do {
possibly_more_intrs = FALSE;
for (unit = 0; unit < sio_numunits; ++unit) {
@@ -2764,7 +2764,7 @@ siointr(arg)
/* XXX COM_UNLOCK(); */
}
} while (possibly_more_intrs);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
#endif /* COM_MULTIPORT */
}
@@ -3361,7 +3361,7 @@ repeat:
* Discard any events related to never-opened or
* going-away devices.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
incc = com->iptr - com->ibuf;
com->iptr = com->ibuf;
if (com->state & CS_CHECKMSR) {
@@ -3369,13 +3369,13 @@ repeat:
com->state &= ~CS_CHECKMSR;
}
com_events -= incc;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
continue;
}
if (com->iptr != com->ibuf) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
sioinput(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
if (com->state & CS_CHECKMSR) {
u_char delta_modem_status;
@@ -3383,13 +3383,13 @@ repeat:
#ifdef PC98
if (!IS_8251(com->pc98_if_type)) {
#endif
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
delta_modem_status = com->last_modem_status
^ com->prev_modem_status;
com->prev_modem_status = com->last_modem_status;
com_events -= LOTS_OF_EVENTS;
com->state &= ~CS_CHECKMSR;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (delta_modem_status & MSR_DCD)
(*linesw[tp->t_line].l_modem)
(tp, com->prev_modem_status & MSR_DCD);
@@ -3398,10 +3398,10 @@ repeat:
#endif
}
if (com->state & CS_ODONE) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
com_events -= LOTS_OF_EVENTS;
com->state &= ~CS_ODONE;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (!(com->state & CS_BUSY)
&& !(com->extra_state & CSE_BUSYCHECK)) {
timeout(siobusycheck, com, hz / 100);
@@ -3665,7 +3665,7 @@ comparam(tp, t)
if (com->state >= (CS_BUSY | CS_TTGO))
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
splx(s);
comstart(tp);
if (com->ibufold != NULL) {
@@ -3703,7 +3703,7 @@ siosetwater(com, speed)
ibufsize = 2048;
#endif
if (ibufsize == com->ibufsize) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
return (0);
}
@@ -3713,7 +3713,7 @@ siosetwater(com, speed)
*/
ibuf = malloc(2 * ibufsize, M_DEVBUF, M_NOWAIT);
if (ibuf == NULL) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
return (ENOMEM);
}
@@ -3731,7 +3731,7 @@ siosetwater(com, speed)
* Read current input buffer, if any. Continue with interrupts
* disabled.
*/
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->iptr != com->ibuf)
sioinput(com);
@@ -3766,7 +3766,7 @@ comstart(tp)
if (com == NULL)
return;
s = spltty();
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (tp->t_state & TS_TTSTOP)
com->state &= ~CS_TTGO;
else
@@ -3805,7 +3805,7 @@ comstart(tp)
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
#endif
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) {
ttwwakeup(tp);
splx(s);
@@ -3825,7 +3825,7 @@ comstart(tp)
#endif
com->obufs[0].l_next = NULL;
com->obufs[0].l_queued = TRUE;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state & CS_BUSY) {
qp = com->obufq.l_next;
while ((next = qp->l_next) != NULL)
@@ -3837,7 +3837,7 @@ comstart(tp)
com->obufq.l_next = &com->obufs[0];
com->state |= CS_BUSY;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
if (tp->t_outq.c_cc != 0 && !com->obufs[1].l_queued) {
com->obufs[1].l_tail
@@ -3849,7 +3849,7 @@ comstart(tp)
#endif
com->obufs[1].l_next = NULL;
com->obufs[1].l_queued = TRUE;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state & CS_BUSY) {
qp = com->obufq.l_next;
while ((next = qp->l_next) != NULL)
@@ -3861,14 +3861,14 @@ comstart(tp)
com->obufq.l_next = &com->obufs[1];
com->state |= CS_BUSY;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
tp->t_state |= TS_BUSY;
}
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (com->state >= (CS_BUSY | CS_TTGO))
siointr1(com); /* fake interrupt to start output */
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
ttwwakeup(tp);
splx(s);
}
@@ -3886,7 +3886,7 @@ comstop(tp, rw)
com = com_addr(DEV_TO_UNIT(tp->t_dev));
if (com == NULL || com->gone)
return;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
if (rw & FWRITE) {
#ifdef PC98
if (!IS_8251(com->pc98_if_type)) {
@@ -3932,7 +3932,7 @@ comstop(tp, rw)
com_events -= (com->iptr - com->ibuf);
com->iptr = com->ibuf;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
comstart(tp);
}
@@ -3975,7 +3975,7 @@ commctl(com, bits, how)
mcr |= MCR_RTS;
if (com->gone)
return(0);
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
switch (how) {
case DMSET:
outb(com->modem_ctl_port,
@@ -3988,7 +3988,7 @@ commctl(com, bits, how)
outb(com->modem_ctl_port, com->mcr_image &= ~mcr);
break;
}
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
return (0);
}
@@ -4047,9 +4047,9 @@ comwakeup(chan)
com = com_addr(unit);
if (com != NULL && !com->gone
&& (com->state >= (CS_BUSY | CS_TTGO) || com->poll)) {
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
siointr1(com);
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
}
}
@@ -4071,10 +4071,10 @@ comwakeup(chan)
u_int delta;
u_long total;
- mtx_enter(&sio_lock, MTX_SPIN);
+ mtx_lock_spin(&sio_lock);
delta = com->delta_error_counts[errnum];
com->delta_error_counts[errnum] = 0;
- mtx_exit(&sio_lock, MTX_SPIN);
+ mtx_unlock_spin(&sio_lock);
if (delta == 0)
continue;
total = com->error_counts[errnum] += delta;
diff --git a/sys/pci/if_dcreg.h b/sys/pci/if_dcreg.h
index 2327fa6..56f441f 100644
--- a/sys/pci/if_dcreg.h
+++ b/sys/pci/if_dcreg.h
@@ -702,8 +702,8 @@ struct dc_softc {
};
-#define DC_LOCK(_sc) mtx_enter(&(_sc)->dc_mtx, MTX_DEF)
-#define DC_UNLOCK(_sc) mtx_exit(&(_sc)->dc_mtx, MTX_DEF)
+#define DC_LOCK(_sc) mtx_lock(&(_sc)->dc_mtx)
+#define DC_UNLOCK(_sc) mtx_unlock(&(_sc)->dc_mtx)
#define DC_TX_POLL 0x00000001
#define DC_TX_COALESCE 0x00000002
diff --git a/sys/pci/if_fxpvar.h b/sys/pci/if_fxpvar.h
index aee009c..7a9eb8d 100644
--- a/sys/pci/if_fxpvar.h
+++ b/sys/pci/if_fxpvar.h
@@ -86,5 +86,5 @@ struct fxp_softc {
#define sc_if arpcom.ac_if
#define FXP_UNIT(_sc) (_sc)->arpcom.ac_if.if_unit
-#define FXP_LOCK(_sc) mtx_enter(&(_sc)->sc_mtx, MTX_DEF)
-#define FXP_UNLOCK(_sc) mtx_exit(&(_sc)->sc_mtx, MTX_DEF)
+#define FXP_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define FXP_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
diff --git a/sys/pci/if_pcnreg.h b/sys/pci/if_pcnreg.h
index db7c1a3..79ef531 100644
--- a/sys/pci/if_pcnreg.h
+++ b/sys/pci/if_pcnreg.h
@@ -451,8 +451,8 @@ struct pcn_softc {
struct mtx pcn_mtx;
};
-#define PCN_LOCK(_sc) mtx_enter(&(_sc)->pcn_mtx, MTX_DEF)
-#define PCN_UNLOCK(_sc) mtx_exit(&(_sc)->pcn_mtx, MTX_DEF)
+#define PCN_LOCK(_sc) mtx_lock(&(_sc)->pcn_mtx)
+#define PCN_UNLOCK(_sc) mtx_unlock(&(_sc)->pcn_mtx)
/*
* register space access macros
diff --git a/sys/pci/if_rlreg.h b/sys/pci/if_rlreg.h
index a023301..1f3fc88 100644
--- a/sys/pci/if_rlreg.h
+++ b/sys/pci/if_rlreg.h
@@ -373,8 +373,8 @@ struct rl_softc {
struct mtx rl_mtx;
};
-#define RL_LOCK(_sc) mtx_enter(&(_sc)->rl_mtx, MTX_DEF)
-#define RL_UNLOCK(_sc) mtx_exit(&(_sc)->rl_mtx, MTX_DEF)
+#define RL_LOCK(_sc) mtx_lock(&(_sc)->rl_mtx)
+#define RL_UNLOCK(_sc) mtx_unlock(&(_sc)->rl_mtx)
/*
* register space access macros
diff --git a/sys/pci/if_sfreg.h b/sys/pci/if_sfreg.h
index c2dc20e..fd2107f 100644
--- a/sys/pci/if_sfreg.h
+++ b/sys/pci/if_sfreg.h
@@ -1048,8 +1048,8 @@ struct sf_softc {
};
-#define SF_LOCK(_sc) mtx_enter(&(_sc)->sf_mtx, MTX_DEF)
-#define SF_UNLOCK(_sc) mtx_exit(&(_sc)->sf_mtx, MTX_DEF)
+#define SF_LOCK(_sc) mtx_lock(&(_sc)->sf_mtx)
+#define SF_UNLOCK(_sc) mtx_unlock(&(_sc)->sf_mtx)
#define SF_TIMEOUT 1000
diff --git a/sys/pci/if_sisreg.h b/sys/pci/if_sisreg.h
index 96bbd6d..9e1c44c 100644
--- a/sys/pci/if_sisreg.h
+++ b/sys/pci/if_sisreg.h
@@ -399,8 +399,8 @@ struct sis_softc {
struct mtx sis_mtx;
};
-#define SIS_LOCK(_sc) mtx_enter(&(_sc)->sis_mtx, MTX_DEF)
-#define SIS_UNLOCK(_sc) mtx_exit(&(_sc)->sis_mtx, MTX_DEF)
+#define SIS_LOCK(_sc) mtx_lock(&(_sc)->sis_mtx)
+#define SIS_UNLOCK(_sc) mtx_unlock(&(_sc)->sis_mtx)
/*
* register space access macros
diff --git a/sys/pci/if_skreg.h b/sys/pci/if_skreg.h
index 6f31d1d..061707c 100644
--- a/sys/pci/if_skreg.h
+++ b/sys/pci/if_skreg.h
@@ -1182,10 +1182,10 @@ struct sk_softc {
struct mtx sk_mtx;
};
-#define SK_LOCK(_sc) mtx_enter(&(_sc)->sk_mtx, MTX_DEF)
-#define SK_UNLOCK(_sc) mtx_exit(&(_sc)->sk_mtx, MTX_DEF)
-#define SK_IF_LOCK(_sc) mtx_enter(&(_sc)->sk_softc->sk_mtx, MTX_DEF)
-#define SK_IF_UNLOCK(_sc) mtx_exit(&(_sc)->sk_softc->sk_mtx, MTX_DEF)
+#define SK_LOCK(_sc) mtx_lock(&(_sc)->sk_mtx)
+#define SK_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_mtx)
+#define SK_IF_LOCK(_sc) mtx_lock(&(_sc)->sk_softc->sk_mtx)
+#define SK_IF_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_softc->sk_mtx)
/* Softc for each logical interface */
struct sk_if_softc {
diff --git a/sys/pci/if_stereg.h b/sys/pci/if_stereg.h
index 7a5ad7a..6081ec0 100644
--- a/sys/pci/if_stereg.h
+++ b/sys/pci/if_stereg.h
@@ -517,8 +517,8 @@ struct ste_softc {
struct mtx ste_mtx;
};
-#define STE_LOCK(_sc) mtx_enter(&(_sc)->ste_mtx, MTX_DEF)
-#define STE_UNLOCK(_sc) mtx_exit(&(_sc)->ste_mtx, MTX_DEF)
+#define STE_LOCK(_sc) mtx_lock(&(_sc)->ste_mtx)
+#define STE_UNLOCK(_sc) mtx_unlock(&(_sc)->ste_mtx)
struct ste_mii_frame {
u_int8_t mii_stdelim;
diff --git a/sys/pci/if_tireg.h b/sys/pci/if_tireg.h
index 0eaff14..df399be 100644
--- a/sys/pci/if_tireg.h
+++ b/sys/pci/if_tireg.h
@@ -1147,8 +1147,8 @@ struct ti_softc {
struct mtx ti_mtx;
};
-#define TI_LOCK(_sc) mtx_enter(&(_sc)->ti_mtx, MTX_DEF)
-#define TI_UNLOCK(_sc) mtx_exit(&(_sc)->ti_mtx, MTX_DEF)
+#define TI_LOCK(_sc) mtx_lock(&(_sc)->ti_mtx)
+#define TI_UNLOCK(_sc) mtx_unlock(&(_sc)->ti_mtx)
/*
* Microchip Technology 24Cxx EEPROM control bytes
diff --git a/sys/pci/if_tlreg.h b/sys/pci/if_tlreg.h
index eb57a2c..bd7ea5b 100644
--- a/sys/pci/if_tlreg.h
+++ b/sys/pci/if_tlreg.h
@@ -129,8 +129,8 @@ struct tl_softc {
struct mtx tl_mtx;
};
-#define TL_LOCK(_sc) mtx_enter(&(_sc)->tl_mtx, MTX_DEF)
-#define TL_UNLOCK(_sc) mtx_exit(&(_sc)->tl_mtx, MTX_DEF)
+#define TL_LOCK(_sc) mtx_lock(&(_sc)->tl_mtx)
+#define TL_UNLOCK(_sc) mtx_unlock(&(_sc)->tl_mtx)
/*
* Transmit interrupt threshold.
diff --git a/sys/pci/if_vrreg.h b/sys/pci/if_vrreg.h
index 8217a8c..235962d 100644
--- a/sys/pci/if_vrreg.h
+++ b/sys/pci/if_vrreg.h
@@ -414,8 +414,8 @@ struct vr_softc {
struct mtx vr_mtx;
};
-#define VR_LOCK(_sc) mtx_enter(&(_sc)->vr_mtx, MTX_DEF)
-#define VR_UNLOCK(_sc) mtx_exit(&(_sc)->vr_mtx, MTX_DEF)
+#define VR_LOCK(_sc) mtx_lock(&(_sc)->vr_mtx)
+#define VR_UNLOCK(_sc) mtx_unlock(&(_sc)->vr_mtx)
/*
* register space access macros
diff --git a/sys/pci/if_wbreg.h b/sys/pci/if_wbreg.h
index 6f58514..983886b 100644
--- a/sys/pci/if_wbreg.h
+++ b/sys/pci/if_wbreg.h
@@ -381,8 +381,8 @@ struct wb_softc {
struct mtx wb_mtx;
};
-#define WB_LOCK(_sc) mtx_enter(&(_sc)->wb_mtx, MTX_DEF)
-#define WB_UNLOCK(_sc) mtx_exit(&(_sc)->wb_mtx, MTX_DEF)
+#define WB_LOCK(_sc) mtx_lock(&(_sc)->wb_mtx)
+#define WB_UNLOCK(_sc) mtx_unlock(&(_sc)->wb_mtx)
/*
* register space access macros
diff --git a/sys/pci/if_wxvar.h b/sys/pci/if_wxvar.h
index 09e6594..af5be8c 100644
--- a/sys/pci/if_wxvar.h
+++ b/sys/pci/if_wxvar.h
@@ -214,10 +214,10 @@ struct wxmdvar {
#define UNTIMEOUT(f, arg, sc) untimeout(f, arg, (sc)->w.sch)
#define INLINE __inline
#ifdef SMPNG
-#define WX_LOCK(_sc) mtx_enter(&(_sc)->wx_mtx, MTX_DEF)
-#define WX_UNLOCK(_sc) mtx_exit(&(_sc)->wx_mtx, MTX_DEF)
-#define WX_ILOCK(_sc) mtx_enter(&(_sc)->wx_mtx, MTX_DEF)
-#define WX_IUNLK(_sc) mtx_exit(&(_sc)->wx_mtx, MTX_DEF)
+#define WX_LOCK(_sc) mtx_lock(&(_sc)->wx_mtx)
+#define WX_UNLOCK(_sc) mtx_unlock(&(_sc)->wx_mtx)
+#define WX_ILOCK(_sc) mtx_lock(&(_sc)->wx_mtx)
+#define WX_IUNLK(_sc) mtx_unlock(&(_sc)->wx_mtx)
#else
#define WX_LOCK(_sc) _sc->w.spl = splimp()
#define WX_UNLOCK(_sc) splx(_sc->w.spl)
diff --git a/sys/pci/if_xlreg.h b/sys/pci/if_xlreg.h
index 376db04..8e1248e 100644
--- a/sys/pci/if_xlreg.h
+++ b/sys/pci/if_xlreg.h
@@ -588,8 +588,8 @@ struct xl_softc {
struct mtx xl_mtx;
};
-#define XL_LOCK(_sc) mtx_enter(&(_sc)->xl_mtx, MTX_DEF)
-#define XL_UNLOCK(_sc) mtx_exit(&(_sc)->xl_mtx, MTX_DEF)
+#define XL_LOCK(_sc) mtx_lock(&(_sc)->xl_mtx)
+#define XL_UNLOCK(_sc) mtx_unlock(&(_sc)->xl_mtx)
#define xl_rx_goodframes(x) \
((x.xl_upper_frames_ok & 0x03) << 8) | x.xl_rx_frames_ok
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index 6d96337..851b1878 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -253,8 +253,8 @@ cpu_exit(p)
{
alpha_fpstate_drop(p);
- mtx_enter(&sched_lock, MTX_SPIN);
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
+ mtx_lock_spin(&sched_lock);
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
mtx_assert(&Giant, MA_NOTOWNED);
/*
@@ -437,7 +437,7 @@ vm_page_zero_idle()
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- if (mtx_try_enter(&Giant, MTX_DEF)) {
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -466,7 +466,7 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (1);
}
return (0);
diff --git a/sys/powerpc/include/mutex.h b/sys/powerpc/include/mutex.h
index 40717b0..564ad19 100644
--- a/sys/powerpc/include/mutex.h
+++ b/sys/powerpc/include/mutex.h
@@ -39,26 +39,12 @@
/*
* Debugging
*/
-#ifdef MUTEX_DEBUG
-
-#ifdef _KERN_MUTEX_C_
-char STR_IEN[] = "ps & IPL == IPL_0";
-char STR_IDIS[] = "ps & IPL == IPL_HIGH";
-char STR_SIEN[] = "mpp->mtx_saveintr == IPL_0";
-#else /* _KERN_MUTEX_C_ */
-extern char STR_IEN[];
-extern char STR_IDIS[];
-extern char STR_SIEN[];
-#endif /* _KERN_MUTEX_C_ */
-
-#endif /* MUTEX_DEBUG */
-
#define ASS_IEN MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
- == ALPHA_PSL_IPL_0, STR_IEN)
+ == ALPHA_PSL_IPL_0, "ps & IPL == IPL_0")
#define ASS_IDIS MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
- == ALPHA_PSL_IPL_HIGH, STR_IDIS)
+ == ALPHA_PSL_IPL_HIGH, "ps & IPL == IPL_HIGH")
#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr \
- == ALPHA_PSL_IPL_0, STR_SIEN)
+ == ALPHA_PSL_IPL_0, "mpp->mtx_saveintr == IPL_0")
#define mtx_legal2block() \
((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_0)
@@ -68,34 +54,33 @@ extern char STR_SIEN[];
*--------------------------------------------------------------------------
*/
-#ifdef _KERN_MUTEX_C_
-
-#define _V(x) __STRING(x)
-
/*
- * Get a spin lock, handle recusion inline (as the less common case)
+ * Get a spin lock, handle recusion inline.
*/
-
-#define _getlock_spin_block(mp, tid, type) do { \
+#define _get_spin_lock(mp, tid, opts) do { \
u_int _ipl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); \
- if (!_obtain_lock(mp, tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _ipl); \
- else { \
+ if (!_obtain_lock((mp), (tid))) { \
+ if ((mp)->mtx_lock == (uintptr_t)(tid)) \
+ (mp)->mtx_recurse++; \
+ else \
+ _mtx_lock_spin((mp), (opts), _ipl, __FILE__, \
+ __LINE__); \
+ } else { \
alpha_mb(); \
(mp)->mtx_saveintr = _ipl; \
} \
} while (0)
-#undef _V
-
-#endif /* _KERN_MUTEX_C_ */
-
#endif /* _KERNEL */
#else /* !LOCORE */
/*
* Simple assembly macros to get and release non-recursive spin locks
+ *
+ * XXX: These are presently unused and cannot be used right now. Need to be
+ * re-written (they are wrong). If you plan to use this and still see
+ * this message, know not to unless you fix them first! :-)
*/
#define MTX_ENTER(lck) \
ldiq a0, ALPHA_PSL_IPL_HIGH; \
diff --git a/sys/powerpc/powerpc/mp_machdep.c b/sys/powerpc/powerpc/mp_machdep.c
index 20e16b9..6a46c28 100644
--- a/sys/powerpc/powerpc/mp_machdep.c
+++ b/sys/powerpc/powerpc/mp_machdep.c
@@ -150,7 +150,7 @@ void
smp_init_secondary(void)
{
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
printf("smp_init_secondary: called\n");
CTR0(KTR_SMP, "smp_init_secondary");
@@ -163,7 +163,7 @@ smp_init_secondary(void)
mp_ncpus = PCPU_GET(cpuno) + 1;
spl0();
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
}
extern void smp_init_secondary_glue(void);
@@ -379,7 +379,7 @@ smp_rendezvous(void (* setup_func)(void *),
{
/* obtain rendezvous lock */
- mtx_enter(&smp_rv_mtx, MTX_SPIN);
+ mtx_lock_spin(&smp_rv_mtx);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -393,7 +393,7 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rendezvous_action();
/* release lock */
- mtx_exit(&smp_rv_mtx, MTX_SPIN);
+ mtx_unlock_spin(&smp_rv_mtx);
}
static u_int64_t
diff --git a/sys/powerpc/powerpc/procfs_machdep.c b/sys/powerpc/powerpc/procfs_machdep.c
index 229d2f9..c0766ca 100644
--- a/sys/powerpc/powerpc/procfs_machdep.c
+++ b/sys/powerpc/powerpc/procfs_machdep.c
@@ -86,12 +86,12 @@ procfs_read_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_regs(p, regs));
}
@@ -101,12 +101,12 @@ procfs_write_regs(p, regs)
struct reg *regs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_regs(p, regs));
}
@@ -121,12 +121,12 @@ procfs_read_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (fill_fpregs(p, fpregs));
}
@@ -136,12 +136,12 @@ procfs_write_fpregs(p, fpregs)
struct fpreg *fpregs;
{
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (EIO);
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
return (set_fpregs(p, fpregs));
}
diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c
index 6d96337..851b1878 100644
--- a/sys/powerpc/powerpc/vm_machdep.c
+++ b/sys/powerpc/powerpc/vm_machdep.c
@@ -253,8 +253,8 @@ cpu_exit(p)
{
alpha_fpstate_drop(p);
- mtx_enter(&sched_lock, MTX_SPIN);
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
+ mtx_lock_spin(&sched_lock);
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
mtx_assert(&Giant, MA_NOTOWNED);
/*
@@ -437,7 +437,7 @@ vm_page_zero_idle()
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- if (mtx_try_enter(&Giant, MTX_DEF)) {
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -466,7 +466,7 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_exit(&Giant, MTX_DEF);
+ mtx_unlock(&Giant);
return (1);
}
return (0);
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index 4cd8257..eaa3384 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -252,7 +252,7 @@ BUF_LOCK(struct buf *bp, int locktype)
int s, ret;
s = splbio();
- mtx_enter(&buftimelock, MTX_DEF);
+ mtx_lock(&buftimelock);
locktype |= LK_INTERLOCK;
bp->b_lock.lk_wmesg = buf_wmesg;
bp->b_lock.lk_prio = PRIBIO + 4;
@@ -271,7 +271,7 @@ BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int catch, int timo)
int s, ret;
s = splbio();
- mtx_enter(&buftimelock, MTX_DEF);
+ mtx_lock(&buftimelock);
locktype |= LK_INTERLOCK;
bp->b_lock.lk_wmesg = wmesg;
bp->b_lock.lk_prio = (PRIBIO + 4) | catch;
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index eab24f2..2f55401 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -300,7 +300,7 @@ struct mcntfree_lst {
#define _MEXT_ALLOC_CNT(m_cnt, how) do { \
union mext_refcnt *__mcnt; \
\
- mtx_enter(&mcntfree.m_mtx, MTX_DEF); \
+ mtx_lock(&mcntfree.m_mtx); \
if (mcntfree.m_head == NULL) \
m_alloc_ref(1, (how)); \
__mcnt = mcntfree.m_head; \
@@ -309,18 +309,18 @@ struct mcntfree_lst {
mbstat.m_refree--; \
__mcnt->refcnt = 0; \
} \
- mtx_exit(&mcntfree.m_mtx, MTX_DEF); \
+ mtx_unlock(&mcntfree.m_mtx); \
(m_cnt) = __mcnt; \
} while (0)
#define _MEXT_DEALLOC_CNT(m_cnt) do { \
union mext_refcnt *__mcnt = (m_cnt); \
\
- mtx_enter(&mcntfree.m_mtx, MTX_DEF); \
+ mtx_lock(&mcntfree.m_mtx); \
__mcnt->next_ref = mcntfree.m_head; \
mcntfree.m_head = __mcnt; \
mbstat.m_refree++; \
- mtx_exit(&mcntfree.m_mtx, MTX_DEF); \
+ mtx_unlock(&mcntfree.m_mtx); \
} while (0)
#define MEXT_INIT_REF(m, how) do { \
@@ -371,14 +371,14 @@ struct mcntfree_lst {
int _mhow = (how); \
int _mtype = (type); \
\
- mtx_enter(&mmbfree.m_mtx, MTX_DEF); \
+ mtx_lock(&mmbfree.m_mtx); \
_MGET(_mm, _mhow); \
if (_mm != NULL) { \
mbtypes[_mtype]++; \
- mtx_exit(&mmbfree.m_mtx, MTX_DEF); \
+ mtx_unlock(&mmbfree.m_mtx); \
_MGET_SETUP(_mm, _mtype); \
} else \
- mtx_exit(&mmbfree.m_mtx, MTX_DEF); \
+ mtx_unlock(&mmbfree.m_mtx); \
(m) = _mm; \
} while (0)
@@ -398,14 +398,14 @@ struct mcntfree_lst {
int _mhow = (how); \
int _mtype = (type); \
\
- mtx_enter(&mmbfree.m_mtx, MTX_DEF); \
+ mtx_lock(&mmbfree.m_mtx); \
_MGET(_mm, _mhow); \
if (_mm != NULL) { \
mbtypes[_mtype]++; \
- mtx_exit(&mmbfree.m_mtx, MTX_DEF); \
+ mtx_unlock(&mmbfree.m_mtx); \
_MGETHDR_SETUP(_mm, _mtype); \
} else \
- mtx_exit(&mmbfree.m_mtx, MTX_DEF); \
+ mtx_unlock(&mmbfree.m_mtx); \
(m) = _mm; \
} while (0)
@@ -437,9 +437,9 @@ struct mcntfree_lst {
#define MCLGET(m, how) do { \
struct mbuf *_mm = (m); \
\
- mtx_enter(&mclfree.m_mtx, MTX_DEF); \
+ mtx_lock(&mclfree.m_mtx); \
_MCLALLOC(_mm->m_ext.ext_buf, (how)); \
- mtx_exit(&mclfree.m_mtx, MTX_DEF); \
+ mtx_unlock(&mclfree.m_mtx); \
if (_mm->m_ext.ext_buf != NULL) { \
MEXT_INIT_REF(_mm, (how)); \
if (_mm->m_ext.ref_cnt == NULL) { \
@@ -474,12 +474,12 @@ struct mcntfree_lst {
#define _MCLFREE(p) do { \
union mcluster *_mp = (union mcluster *)(p); \
\
- mtx_enter(&mclfree.m_mtx, MTX_DEF); \
+ mtx_lock(&mclfree.m_mtx); \
_mp->mcl_next = mclfree.m_head; \
mclfree.m_head = _mp; \
mbstat.m_clfree++; \
MBWAKEUP(m_clalloc_wid); \
- mtx_exit(&mclfree.m_mtx, MTX_DEF); \
+ mtx_unlock(&mclfree.m_mtx); \
} while (0)
/* MEXTFREE:
@@ -514,7 +514,7 @@ struct mcntfree_lst {
KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \
if (_mm->m_flags & M_EXT) \
MEXTFREE(_mm); \
- mtx_enter(&mmbfree.m_mtx, MTX_DEF); \
+ mtx_lock(&mmbfree.m_mtx); \
mbtypes[_mm->m_type]--; \
_mm->m_type = MT_FREE; \
mbtypes[MT_FREE]++; \
@@ -522,7 +522,7 @@ struct mcntfree_lst {
_mm->m_next = mmbfree.m_head; \
mmbfree.m_head = _mm; \
MBWAKEUP(m_mballoc_wid); \
- mtx_exit(&mmbfree.m_mtx, MTX_DEF); \
+ mtx_unlock(&mmbfree.m_mtx); \
} while (0)
/*
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index 6bf21be..a765ee8 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -48,31 +48,34 @@
#ifdef _KERNEL
/*
- * Mutex flags
+ * Mutex types and options stored in mutex->mtx_flags
+ */
+#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */
+#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
+#define MTX_RECURSE 0x00000002 /* Option: lock allowed to recurse */
+
+/*
+ * Option flags passed to certain lock/unlock routines, through the use
+ * of corresponding mtx_{lock,unlock}_flags() interface macros.
*
- * Types
+ * XXX: The only reason we make these bits not interfere with the above "types
+ * and options" bits is because we have to pass both to the witness
+ * routines right now; if/when we clean up the witness interface to
+ * not check for mutex type from the passed in flag, but rather from
+ * the mutex lock's mtx_flags field, then we can change these values to
+ * 0x1, 0x2, ...
+ */
+#define MTX_NOSWITCH 0x00000004 /* Do not switch on release */
+#define MTX_QUIET 0x00000008 /* Don't log a mutex event */
+
+/*
+ * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
+ * with the exception of MTX_UNOWNED, applies to spin locks.
*/
-#define MTX_DEF 0x0 /* Default (spin/sleep) */
-#define MTX_SPIN 0x1 /* Spin only lock */
-
-/* Options */
-#define MTX_RECURSE 0x2 /* Recursive lock (for mtx_init) */
-#define MTX_RLIKELY 0x4 /* Recursion likely */
-#define MTX_NORECURSE 0x8 /* No recursion possible */
-#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
-#define MTX_NOSWITCH 0x20 /* Do not switch on release */
-#define MTX_FIRST 0x40 /* First spin lock holder */
-#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
-#define MTX_QUIET 0x100 /* Don't log a mutex event */
-
-/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
-#define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
-
-/* Flags/value used in mtx_lock */
-#define MTX_RECURSED 0x01 /* (non-spin) lock held recursively */
-#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
+#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
+#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
+#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */
#define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED)
-#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
#endif /* _KERNEL */
@@ -84,62 +87,243 @@ struct mtx_debug;
* Sleep/spin mutex
*/
struct mtx {
- volatile uintptr_t mtx_lock; /* lock owner/gate/flags */
+ volatile uintptr_t mtx_lock; /* owner (and state for sleep locks) */
volatile u_int mtx_recurse; /* number of recursive holds */
u_int mtx_saveintr; /* saved flags (for spin locks) */
int mtx_flags; /* flags passed to mtx_init() */
const char *mtx_description;
- TAILQ_HEAD(, proc) mtx_blocked;
- LIST_ENTRY(mtx) mtx_contested;
- struct mtx *mtx_next; /* all locks in system */
- struct mtx *mtx_prev;
- struct mtx_debug *mtx_debug;
+ TAILQ_HEAD(, proc) mtx_blocked; /* threads blocked on this lock */
+ LIST_ENTRY(mtx) mtx_contested; /* list of all contested locks */
+ struct mtx *mtx_next; /* all existing locks */
+ struct mtx *mtx_prev; /* in system... */
+ struct mtx_debug *mtx_debug; /* debugging information... */
};
+/*
+ * XXX: Friendly reminder to fix things in MP code that is presently being
+ * XXX: worked on.
+ */
#define mp_fixme(string)
#ifdef _KERNEL
-/* Prototypes */
-void mtx_init(struct mtx *m, const char *description, int flag);
+
+/*
+ * Prototypes
+ *
+ * NOTE: Functions prepended with `_' (underscore) are exported to other parts
+ * of the kernel via macros, thus allowing us to use the cpp __FILE__
+ * and __LINE__. These functions should not be called directly by any
+ * code using the IPI. Their macros cover their functionality.
+ *
+ * [See below for descriptions]
+ *
+ */
+void mtx_init(struct mtx *m, const char *description, int opts);
void mtx_destroy(struct mtx *m);
+void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line);
+void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
+void _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr,
+ const char *file, int line);
+void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
+int _mtx_trylock(struct mtx *m, int opts, const char *file, int line);
/*
- * Wrap the following functions with cpp macros so that filenames and line
- * numbers are embedded in the code correctly.
+ * We define our machine-independent (unoptimized) mutex micro-operations
+ * here, if they are not already defined in the machine-dependent mutex.h
*/
-void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
-int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
-void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
-#define mtx_enter(mtxp, type) \
- _mtx_enter((mtxp), (type), __FILE__, __LINE__)
+/* Actually obtain mtx_lock */
+#ifndef _obtain_lock
+#define _obtain_lock(mp, tid) \
+ atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
+#endif
-#define mtx_try_enter(mtxp, type) \
- _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
+/* Actually release mtx_lock */
+#ifndef _release_lock
+#define _release_lock(mp, tid) \
+ atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
+#endif
-#define mtx_exit(mtxp, type) \
- _mtx_exit((mtxp), (type), __FILE__, __LINE__)
+/* Actually release mtx_lock quickly, assuming we own it. */
+#ifndef _release_lock_quick
+#define _release_lock_quick(mp) \
+ atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
+#endif
-/* Global locks */
-extern struct mtx sched_lock;
-extern struct mtx Giant;
+/*
+ * Obtain a sleep lock inline, or call the "hard" function if we can't get it
+ * easy.
+ */
+#ifndef _get_sleep_lock
+#define _get_sleep_lock(mp, tid, opts) do { \
+ if (!_obtain_lock((mp), (tid))) \
+ _mtx_lock_sleep((mp), (opts), __FILE__, __LINE__); \
+} while (0)
+#endif
/*
- * Used to replace return with an exit Giant and return.
+ * Obtain a spin lock inline, or call the "hard" function if we can't get it
+ * easy. For spinlocks, we handle recursion inline (it turns out that function
+ * calls can be significantly expensive on some architectures).
+ * Since spin locks are not _too_ common, inlining this code is not too big
+ * a deal.
*/
+#ifndef _get_spin_lock
+#define _get_spin_lock(mp, tid, opts) do { \
+ u_int _mtx_intr = save_intr(); \
+ disable_intr(); \
+ if (!_obtain_lock((mp), (tid))) { \
+ if ((mp)->mtx_lock == (uintptr_t)(tid)) \
+ (mp)->mtx_recurse++; \
+ else \
+ _mtx_lock_spin((mp), (opts), _mtx_intr, \
+ __FILE__, __LINE__); \
+ } else \
+ (mp)->mtx_saveintr = _mtx_intr; \
+} while (0)
+#endif
-#define EGAR(a) \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return (a); \
+/*
+ * Release a sleep lock inline, or call the "hard" function if we can't do it
+ * easy.
+ */
+#ifndef _rel_sleep_lock
+#define _rel_sleep_lock(mp, tid, opts) do { \
+ if (!_release_lock((mp), (tid))) \
+ _mtx_unlock_sleep((mp), (opts), __FILE__, __LINE__); \
} while (0)
+#endif
-#define VEGAR \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return; \
+/*
+ * For spinlocks, we can handle everything inline, as it's pretty simple and
+ * a function call would be too expensive (at least on some architectures).
+ * Since spin locks are not _too_ common, inlining this code is not too big
+ * a deal.
+ */
+#ifndef _rel_spin_lock
+#define _rel_spin_lock(mp) do { \
+ u_int _mtx_intr = (mp)->mtx_saveintr; \
+ if (mtx_recursed((mp))) \
+ (mp)->mtx_recurse--; \
+ else { \
+ _release_lock_quick((mp)); \
+ restore_intr(_mtx_intr); \
+ } \
} while (0)
+#endif
+/*
+ * Exported lock manipulation interface.
+ *
+ * mtx_lock(m) locks MTX_DEF mutex `m'
+ *
+ * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
+ *
+ * mtx_unlock(m) unlocks MTX_DEF mutex `m'
+ *
+ * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
+ *
+ * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
+ * and passes option flags `opts' to the "hard" function, if required.
+ * With these routines, it is possible to pass flags such as MTX_QUIET
+ * and/or MTX_NOSWITCH to the appropriate lock manipulation routines.
+ *
+ * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
+ * it cannot. Rather, it returns 0 on failure and non-zero on success.
+ * It does NOT handle recursion as we assume that if a caller is properly
+ * using this part of the interface, he will know that the lock in question
+ * is _not_ recursed.
+ *
+ * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
+ * relevant option flags `opts.'
+ *
+ * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
+ *
+ * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
+ */
+#define mtx_lock(m) do { \
+ MPASS(CURPROC != NULL); \
+ _get_sleep_lock((m), CURTHD, 0); \
+ WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \
+} while (0)
+
+#define mtx_lock_spin(m) do { \
+ MPASS(CURPROC != NULL); \
+ _get_spin_lock((m), CURTHD, 0); \
+ WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \
+} while (0)
+
+#define mtx_unlock(m) do { \
+ MPASS(CURPROC != NULL); \
+ WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \
+ _rel_sleep_lock((m), CURTHD, 0); \
+} while (0)
+
+#define mtx_unlock_spin(m) do { \
+ MPASS(CURPROC != NULL); \
+ WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \
+ _rel_spin_lock((m)); \
+} while (0)
+
+#define mtx_lock_flags(m, opts) do { \
+ MPASS(CURPROC != NULL); \
+ _get_sleep_lock((m), CURTHD, (opts)); \
+ WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
+ __LINE__); \
+} while (0)
+
+#define mtx_lock_spin_flags(m, opts) do { \
+ MPASS(CURPROC != NULL); \
+ _get_spin_lock((m), CURTHD, (opts)); \
+ WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
+ __LINE__); \
+} while (0)
+
+#define mtx_unlock_flags(m, opts) do { \
+ MPASS(CURPROC != NULL); \
+ WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
+ __LINE__); \
+ _rel_sleep_lock((m), CURTHD, (opts)); \
+} while (0)
+
+/*
+ * The MTX_SPIN unlock case is all inlined, so we handle the MTX_QUIET
+ * flag right in the macro. Not a problem as if we don't have KTR_LOCK, this
+ * check will be optimized out.
+ */
+#define mtx_unlock_spin_flags(m, opts) do { \
+ MPASS(CURPROC != NULL); \
+ WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
+ __LINE__); \
+ if (((opts) & MTX_QUIET) == 0) \
+ CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", \
+ (m)->mtx_description, (m), (m)->mtx_recurse, \
+ __FILE__, __LINE__); \
+ _rel_spin_lock((m)); \
+} while (0)
+
+#define mtx_trylock(m) \
+ _mtx_trylock((m), 0, __FILE__, __LINE__)
+
+#define mtx_trylock_flags(m, opts) \
+ _mtx_trylock((m), (opts), __FILE__, __LINE__)
+
+#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
+
+#define mtx_recursed(m) ((m)->mtx_recurse != 0)
+
+/*
+ * Global locks.
+ */
+extern struct mtx sched_lock;
+extern struct mtx Giant;
+
+/*
+ * Giant lock manipulation and clean exit macros.
+ * Used to replace return with an exit Giant and return.
+ *
+ * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
+ */
#define DROP_GIANT_NOSWITCH() \
do { \
int _giantcnt; \
@@ -148,7 +332,7 @@ do { \
if (mtx_owned(&Giant)) \
WITNESS_SAVE(&Giant, Giant); \
for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH)
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH)
#define DROP_GIANT() \
do { \
@@ -158,12 +342,12 @@ do { \
if (mtx_owned(&Giant)) \
WITNESS_SAVE(&Giant, Giant); \
for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_exit(&Giant, MTX_DEF)
+ mtx_unlock(&Giant)
#define PICKUP_GIANT() \
mtx_assert(&Giant, MA_NOTOWNED); \
while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
+ mtx_lock(&Giant); \
if (mtx_owned(&Giant)) \
WITNESS_RESTORE(&Giant, Giant); \
} while (0)
@@ -171,37 +355,49 @@ do { \
#define PARTIAL_PICKUP_GIANT() \
mtx_assert(&Giant, MA_NOTOWNED); \
while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
+ mtx_lock(&Giant); \
if (mtx_owned(&Giant)) \
WITNESS_RESTORE(&Giant, Giant)
/*
- * Debugging
+ * The INVARIANTS-enabled mtx_assert() functionality.
*/
#ifdef INVARIANTS
-#define MA_OWNED 1
-#define MA_NOTOWNED 2
-#define MA_RECURSED 4
-#define MA_NOTRECURSED 8
+#define MA_OWNED 0x01
+#define MA_NOTOWNED 0x02
+#define MA_RECURSED 0x04
+#define MA_NOTRECURSED 0x08
+
void _mtx_assert(struct mtx *m, int what, const char *file, int line);
-#define mtx_assert(m, what) _mtx_assert((m), (what), __FILE__, __LINE__)
+#define mtx_assert(m, what) \
+ _mtx_assert((m), (what), __FILE__, __LINE__)
+
#else /* INVARIANTS */
#define mtx_assert(m, what)
#endif /* INVARIANTS */
+/*
+ * The MUTEX_DEBUG-enabled MPASS*() extra sanity-check macros.
+ */
#ifdef MUTEX_DEBUG
#define MPASS(ex) \
if (!(ex)) \
- panic("Assertion %s failed at %s:%d", #ex, __FILE__, __LINE__)
+ panic("Assertion %s failed at %s:%d", #ex, __FILE__, \
+ __LINE__)
+
#define MPASS2(ex, what) \
if (!(ex)) \
- panic("Assertion %s failed at %s:%d", what, __FILE__, __LINE__)
+ panic("Assertion %s failed at %s:%d", what, __FILE__, \
+ __LINE__)
+
#define MPASS3(ex, file, line) \
if (!(ex)) \
panic("Assertion %s failed at %s:%d", #ex, file, line)
+
#define MPASS4(ex, what, file, line) \
if (!(ex)) \
panic("Assertion %s failed at %s:%d", what, file, line)
+
#else /* MUTEX_DEBUG */
#define MPASS(ex)
#define MPASS2(ex, what)
@@ -210,37 +406,8 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
#endif /* MUTEX_DEBUG */
/*
- * Externally visible mutex functions.
- *------------------------------------------------------------------------------
- */
-
-/*
- * Return non-zero if a mutex is already owned by the current thread.
+ * Exported WITNESS-enabled functions and corresponding wrapper macros.
*/
-#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
-
-/*
- * Return non-zero if a mutex has been recursively acquired.
- */
-#define mtx_recursed(m) ((m)->mtx_recurse != 0)
-
-/* Common strings */
-#ifdef _KERN_MUTEX_C_
-char STR_mtx_enter_fmt[] = "GOT %s [%p] r=%d at %s:%d";
-char STR_mtx_exit_fmt[] = "REL %s [%p] r=%d at %s:%d";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] result=%d at %s:%d";
-char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
-char STR_mtx_owned[] = "mtx_owned(mpp)";
-char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
-#else /* _KERN_MUTEX_C_ */
-extern char STR_mtx_enter_fmt[];
-extern char STR_mtx_bad_type[];
-extern char STR_mtx_exit_fmt[];
-extern char STR_mtx_owned[];
-extern char STR_mtx_recurse[];
-extern char STR_mtx_try_enter_fmt[];
-#endif /* _KERN_MUTEX_C_ */
-
#ifdef WITNESS
void witness_save(struct mtx *, const char **, int *);
void witness_restore(struct mtx *, const char *, int);
@@ -250,16 +417,25 @@ void witness_exit(struct mtx *, int, const char *, int);
int witness_list(struct proc *);
int witness_sleep(int, struct mtx *, const char *, int);
-#define WITNESS_ENTER(m, t, f, l) witness_enter((m), (t), (f), (l))
-#define WITNESS_EXIT(m, t, f, l) witness_exit((m), (t), (f), (l))
-#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
+#define WITNESS_ENTER(m, t, f, l) \
+ witness_enter((m), (t), (f), (l))
+
+#define WITNESS_EXIT(m, t, f, l) \
+ witness_exit((m), (t), (f), (l))
+
+#define WITNESS_SLEEP(check, m) \
+ witness_sleep(check, (m), __FILE__, __LINE__)
+
#define WITNESS_SAVE_DECL(n) \
const char * __CONCAT(n, __wf); \
int __CONCAT(n, __wl)
+
#define WITNESS_SAVE(m, n) \
witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl))
+
#define WITNESS_RESTORE(m, n) \
witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl))
+
#else /* WITNESS */
#define witness_enter(m, t, f, l)
#define witness_tryenter(m, t, f, l)
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index c296db2..6c4ad6b 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -419,8 +419,8 @@ sigonstack(size_t sp)
} while (0)
/* Lock and unlock a process. */
-#define PROC_LOCK(p) mtx_enter(&(p)->p_mtx, MTX_DEF)
-#define PROC_UNLOCK(p) mtx_exit(&(p)->p_mtx, MTX_DEF)
+#define PROC_LOCK(p) mtx_lock(&(p)->p_mtx)
+#define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx)
/* Lock and unlock the proc lists. */
#define ALLPROC_LOCK(how) \
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index fee14d7..2d48115 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -393,7 +393,7 @@ ffs_reload(mp, cred, p)
if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
vfs_object_create(devvp, p, p->p_ucred);
- mtx_enter(&devvp->v_interlock, MTX_DEF);
+ mtx_lock(&devvp->v_interlock);
VOP_UNLOCK(devvp, LK_INTERLOCK, p);
}
@@ -454,10 +454,10 @@ ffs_reload(mp, cred, p)
}
loop:
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mp) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
goto loop;
}
nvp = LIST_NEXT(vp, v_mntvnodes);
@@ -469,8 +469,8 @@ loop:
/*
* Step 5: invalidate all cached file data.
*/
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&mntvnode_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
goto loop;
}
@@ -492,9 +492,9 @@ loop:
ip->i_effnlink = ip->i_nlink;
brelse(bp);
vput(vp);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
return (0);
}
@@ -551,7 +551,7 @@ ffs_mountfs(devvp, mp, p, malloctype)
if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
vfs_object_create(devvp, p, cred);
- mtx_enter(&devvp->v_interlock, MTX_DEF);
+ mtx_lock(&devvp->v_interlock);
VOP_UNLOCK(devvp, LK_INTERLOCK, p);
}
@@ -937,7 +937,7 @@ ffs_sync(mp, waitfor, cred, p)
wait = 1;
lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
}
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
/*
@@ -946,19 +946,19 @@ loop:
*/
if (vp->v_mount != mp)
goto loop;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
nvp = LIST_NEXT(vp, v_mntvnodes);
ip = VTOI(vp);
if (vp->v_type == VNON || ((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
TAILQ_EMPTY(&vp->v_dirtyblkhd))) {
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
continue;
}
if (vp->v_type != VCHR) {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
if ((error = vget(vp, lockreq, p)) != 0) {
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
if (error == ENOENT)
goto loop;
continue;
@@ -967,15 +967,15 @@ loop:
allerror = error;
VOP_UNLOCK(vp, 0, p);
vrele(vp);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
} else {
- mtx_exit(&mntvnode_mtx, MTX_DEF);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
+ mtx_unlock(&vp->v_interlock);
UFS_UPDATE(vp, wait);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
}
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
/*
* Force stale file system control information to be flushed.
*/
@@ -984,7 +984,7 @@ loop:
allerror = error;
/* Flushed work items may create new vnodes to clean */
if (count) {
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
goto loop;
}
}
@@ -1055,17 +1055,17 @@ restart:
* case getnewvnode() or MALLOC() blocks, otherwise a duplicate
* may occur!
*/
- mtx_enter(&ffs_inode_hash_mtx, MTX_DEF);
+ mtx_lock(&ffs_inode_hash_mtx);
if (ffs_inode_hash_lock) {
while (ffs_inode_hash_lock) {
ffs_inode_hash_lock = -1;
msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0);
}
- mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
+ mtx_unlock(&ffs_inode_hash_mtx);
goto restart;
}
ffs_inode_hash_lock = 1;
- mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
+ mtx_unlock(&ffs_inode_hash_mtx);
/*
* If this MALLOC() is performed after the getnewvnode()
@@ -1085,10 +1085,10 @@ restart:
* otherwise the processes waken up immediately hit
* themselves into the mutex.
*/
- mtx_enter(&ffs_inode_hash_mtx, MTX_DEF);
+ mtx_lock(&ffs_inode_hash_mtx);
want_wakeup = ffs_inode_hash_lock < 0;
ffs_inode_hash_lock = 0;
- mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
+ mtx_unlock(&ffs_inode_hash_mtx);
if (want_wakeup)
wakeup(&ffs_inode_hash_lock);
*vpp = NULL;
@@ -1126,10 +1126,10 @@ restart:
* otherwise the processes waken up immediately hit
* themselves into the mutex.
*/
- mtx_enter(&ffs_inode_hash_mtx, MTX_DEF);
+ mtx_lock(&ffs_inode_hash_mtx);
want_wakeup = ffs_inode_hash_lock < 0;
ffs_inode_hash_lock = 0;
- mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
+ mtx_unlock(&ffs_inode_hash_mtx);
if (want_wakeup)
wakeup(&ffs_inode_hash_lock);
diff --git a/sys/ufs/ifs/ifs_vfsops.c b/sys/ufs/ifs/ifs_vfsops.c
index 5b72c03..f0e2e8c 100644
--- a/sys/ufs/ifs/ifs_vfsops.c
+++ b/sys/ufs/ifs/ifs_vfsops.c
@@ -176,17 +176,17 @@ restart:
* case getnewvnode() or MALLOC() blocks, otherwise a duplicate
* may occur!
*/
- mtx_enter(&ifs_inode_hash_mtx, MTX_DEF);
+ mtx_lock(&ifs_inode_hash_mtx);
if (ifs_inode_hash_lock) {
while (ifs_inode_hash_lock) {
ifs_inode_hash_lock = -1;
msleep(&ifs_inode_hash_lock, &ifs_inode_hash_mtx, PVM, "ifsvgt", 0);
}
- mtx_exit(&ifs_inode_hash_mtx, MTX_DEF);
+ mtx_unlock(&ifs_inode_hash_mtx);
goto restart;
}
ifs_inode_hash_lock = 1;
- mtx_exit(&ifs_inode_hash_mtx, MTX_DEF);
+ mtx_unlock(&ifs_inode_hash_mtx);
/*
* If this MALLOC() is performed after the getnewvnode()
@@ -206,10 +206,10 @@ restart:
* otherwise the processes waken up immediately hit
* themselves into the mutex.
*/
- mtx_enter(&ifs_inode_hash_mtx, MTX_DEF);
+ mtx_lock(&ifs_inode_hash_mtx);
want_wakeup = ifs_inode_hash_lock < 0;
ifs_inode_hash_lock = 0;
- mtx_exit(&ifs_inode_hash_mtx, MTX_DEF);
+ mtx_unlock(&ifs_inode_hash_mtx);
if (want_wakeup)
wakeup(&ifs_inode_hash_lock);
*vpp = NULL;
@@ -247,10 +247,10 @@ restart:
* otherwise the processes waken up immediately hit
* themselves into the mutex.
*/
- mtx_enter(&ifs_inode_hash_mtx, MTX_DEF);
+ mtx_lock(&ifs_inode_hash_mtx);
want_wakeup = ifs_inode_hash_lock < 0;
ifs_inode_hash_lock = 0;
- mtx_exit(&ifs_inode_hash_mtx, MTX_DEF);
+ mtx_unlock(&ifs_inode_hash_mtx);
if (want_wakeup)
wakeup(&ifs_inode_hash_lock);
diff --git a/sys/ufs/ufs/ufs_ihash.c b/sys/ufs/ufs/ufs_ihash.c
index 6866a23..1fd39e9 100644
--- a/sys/ufs/ufs/ufs_ihash.c
+++ b/sys/ufs/ufs/ufs_ihash.c
@@ -77,11 +77,11 @@ ufs_ihashlookup(dev, inum)
{
struct inode *ip;
- mtx_enter(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&ufs_ihash_mtx);
LIST_FOREACH(ip, INOHASH(dev, inum), i_hash)
if (inum == ip->i_number && dev == ip->i_dev)
break;
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_unlock(&ufs_ihash_mtx);
if (ip)
return (ITOV(ip));
@@ -102,18 +102,18 @@ ufs_ihashget(dev, inum)
struct vnode *vp;
loop:
- mtx_enter(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&ufs_ihash_mtx);
LIST_FOREACH(ip, INOHASH(dev, inum), i_hash) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&ufs_ihash_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
return (vp);
}
}
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_unlock(&ufs_ihash_mtx);
return (NULL);
}
@@ -130,11 +130,11 @@ ufs_ihashins(ip)
/* lock the inode, then put it on the appropriate hash list */
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
- mtx_enter(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&ufs_ihash_mtx);
ipp = INOHASH(ip->i_dev, ip->i_number);
LIST_INSERT_HEAD(ipp, ip, i_hash);
ip->i_flag |= IN_HASHED;
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_unlock(&ufs_ihash_mtx);
}
/*
@@ -144,10 +144,10 @@ void
ufs_ihashrem(ip)
struct inode *ip;
{
- mtx_enter(&ufs_ihash_mtx, MTX_DEF);
+ mtx_lock(&ufs_ihash_mtx);
if (ip->i_flag & IN_HASHED) {
ip->i_flag &= ~IN_HASHED;
LIST_REMOVE(ip, i_hash);
}
- mtx_exit(&ufs_ihash_mtx, MTX_DEF);
+ mtx_unlock(&ufs_ihash_mtx);
}
diff --git a/sys/ufs/ufs/ufs_quota.c b/sys/ufs/ufs/ufs_quota.c
index 1b3c69a..f419f01 100644
--- a/sys/ufs/ufs/ufs_quota.c
+++ b/sys/ufs/ufs/ufs_quota.c
@@ -666,7 +666,7 @@ qsync(mp)
* Search vnodes associated with this mount point,
* synchronizing any modified dquot structures.
*/
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
again:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
@@ -674,11 +674,11 @@ again:
nextvp = LIST_NEXT(vp, v_mntvnodes);
if (vp->v_type == VNON)
continue;
- mtx_enter(&vp->v_interlock, MTX_DEF);
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
+ mtx_unlock(&mntvnode_mtx);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
if (error == ENOENT)
goto again;
continue;
@@ -689,11 +689,11 @@ again:
dqsync(vp, dq);
}
vput(vp);
- mtx_enter(&mntvnode_mtx, MTX_DEF);
+ mtx_lock(&mntvnode_mtx);
if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
goto again;
}
- mtx_exit(&mntvnode_mtx, MTX_DEF);
+ mtx_unlock(&mntvnode_mtx);
return (0);
}
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index 07d0dac..134e356 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -284,10 +284,10 @@ ufs_close(ap)
{
register struct vnode *vp = ap->a_vp;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_usecount > 1)
ufs_itimes(vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (0);
}
@@ -1863,10 +1863,10 @@ ufsspec_close(ap)
{
struct vnode *vp = ap->a_vp;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_usecount > 1)
ufs_itimes(vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
}
@@ -1937,10 +1937,10 @@ ufsfifo_close(ap)
{
struct vnode *vp = ap->a_vp;
- mtx_enter(&vp->v_interlock, MTX_DEF);
+ mtx_lock(&vp->v_interlock);
if (vp->v_usecount > 1)
ufs_itimes(vp);
- mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_unlock(&vp->v_interlock);
return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
}
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index ee30759..4641537 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -854,7 +854,7 @@ readrest:
vm_page_activate(fs.m);
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (curproc && (curproc->p_sflag & PS_INMEM) && curproc->p_stats) {
if (hardfault) {
curproc->p_stats->p_ru.ru_majflt++;
@@ -862,7 +862,7 @@ readrest:
curproc->p_stats->p_ru.ru_minflt++;
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* Unlock everything, and return
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index b76c855..0f80f57 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -313,18 +313,18 @@ faultin(p)
{
mtx_assert(&p->p_mtx, MA_OWNED);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
++p->p_lock;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
mtx_assert(&Giant, MA_OWNED);
pmap_swapin_proc(p);
PROC_LOCK(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SRUN) {
setrunqueue(p);
}
@@ -334,7 +334,7 @@ faultin(p)
/* undo the effect of setting SLOCK above */
--p->p_lock;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
/*
@@ -366,7 +366,7 @@ loop:
ppri = INT_MIN;
ALLPROC_LOCK(AP_SHARED);
LIST_FOREACH(p, &allproc, p_list) {
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat == SRUN &&
(p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) {
@@ -385,7 +385,7 @@ loop:
ppri = pri;
}
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
ALLPROC_LOCK(AP_RELEASE);
@@ -396,9 +396,9 @@ loop:
tsleep(&proc0, PVM, "sched", 0);
goto loop;
}
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_SWAPINREQ;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* We would like to bring someone in. (only if there is space).
@@ -406,9 +406,9 @@ loop:
PROC_LOCK(p);
faultin(p);
PROC_UNLOCK(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_swtime = 0;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
goto loop;
}
@@ -461,15 +461,15 @@ retry:
}
vm = p->p_vmspace;
PROC_UNLOCK(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
switch (p->p_stat) {
default:
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
case SSLEEP:
@@ -478,7 +478,7 @@ retry:
* do not swapout a realtime process
*/
if (RTP_PRIO_IS_REALTIME(p->p_rtprio.type)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
@@ -489,7 +489,7 @@ retry:
*/
if (((p->p_priority & 0x7f) < PSOCK) ||
(p->p_slptime < swap_idle_threshold1)) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
@@ -501,10 +501,10 @@ retry:
if (((action & VM_SWAP_NORMAL) == 0) &&
(((action & VM_SWAP_IDLE) == 0) ||
(p->p_slptime < swap_idle_threshold2))) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
++vm->vm_refcnt;
/*
@@ -522,17 +522,17 @@ retry:
* If the process has been asleep for awhile and had
* most of its pages taken away already, swap it out.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if ((action & VM_SWAP_NORMAL) ||
((action & VM_SWAP_IDLE) &&
(p->p_slptime > swap_idle_threshold2))) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
swapout(p);
vmspace_free(vm);
didswap++;
goto retry;
} else
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
}
ALLPROC_LOCK(AP_RELEASE);
@@ -559,19 +559,19 @@ swapout(p)
p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
(void) splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_INMEM;
p->p_sflag |= PS_SWAPPING;
if (p->p_stat == SRUN)
remrunqueue(p);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
(void) spl0();
pmap_swapout_proc(p);
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_SWAPPING;
p->p_swtime = 0;
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
}
#endif /* !NO_SWAPPING */
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index ef48af2..291826b 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -291,15 +291,15 @@ _vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
#define vm_map_set_recursive(map) \
do { \
- mtx_enter((map)->lock.lk_interlock, MTX_DEF); \
+ mtx_lock((map)->lock.lk_interlock); \
(map)->lock.lk_flags |= LK_CANRECURSE; \
- mtx_exit((map)->lock.lk_interlock, MTX_DEF); \
+ mtx_unlock((map)->lock.lk_interlock); \
} while(0)
#define vm_map_clear_recursive(map) \
do { \
- mtx_enter((map)->lock.lk_interlock, MTX_DEF); \
+ mtx_lock((map)->lock.lk_interlock); \
(map)->lock.lk_flags &= ~LK_CANRECURSE; \
- mtx_exit((map)->lock.lk_interlock, MTX_DEF); \
+ mtx_unlock((map)->lock.lk_interlock); \
} while(0)
/*
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 3a31ad4..0a05cb9 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -153,10 +153,10 @@ vmtotal(SYSCTL_HANDLER_ARGS)
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_flag & P_SYSTEM)
continue;
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
switch (p->p_stat) {
case 0:
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
case SMTX:
@@ -170,7 +170,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
} else if (p->p_slptime < maxslp)
totalp->t_sw++;
if (p->p_slptime >= maxslp) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
break;
@@ -186,12 +186,12 @@ vmtotal(SYSCTL_HANDLER_ARGS)
else
totalp->t_sw++;
if (p->p_stat == SIDL) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
break;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* Note active objects.
*/
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 1e16917..39191b1 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -458,9 +458,9 @@ vm_object_terminate(object)
/*
* Remove the object from the global object list.
*/
- mtx_enter(&vm_object_list_mtx, MTX_DEF);
+ mtx_lock(&vm_object_list_mtx);
TAILQ_REMOVE(&vm_object_list, object, object_list);
- mtx_exit(&vm_object_list_mtx, MTX_DEF);
+ mtx_unlock(&vm_object_list_mtx);
wakeup(object);
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 4046e0e..568f42b 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1140,12 +1140,12 @@ rescan0:
* if the process is in a non-running type state,
* don't touch it.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
/*
* get the process size
*/
@@ -1162,11 +1162,11 @@ rescan0:
ALLPROC_LOCK(AP_RELEASE);
if (bigproc != NULL) {
killproc(bigproc, "out of swap space");
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
bigproc->p_estcpu = 0;
bigproc->p_nice = PRIO_MIN;
resetpriority(bigproc);
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
wakeup(&cnt.v_free_count);
}
}
@@ -1305,7 +1305,7 @@ vm_pageout()
{
int pass;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
/*
* Initialize some paging parameters.
@@ -1449,7 +1449,7 @@ vm_daemon()
{
struct proc *p;
- mtx_enter(&Giant, MTX_DEF);
+ mtx_lock(&Giant);
while (TRUE) {
tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
@@ -1477,9 +1477,9 @@ vm_daemon()
* if the process is in a non-running type state,
* don't touch it.
*/
- mtx_enter(&sched_lock, MTX_SPIN);
+ mtx_lock_spin(&sched_lock);
if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
continue;
}
/*
@@ -1496,7 +1496,7 @@ vm_daemon()
*/
if ((p->p_sflag & PS_INMEM) == 0)
limit = 0; /* XXX */
- mtx_exit(&sched_lock, MTX_SPIN);
+ mtx_unlock_spin(&sched_lock);
size = vmspace_resident_count(p->p_vmspace);
if (limit >= 0 && size >= limit) {
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
index 390c5f2..f2d9622 100644
--- a/sys/vm/vm_zone.c
+++ b/sys/vm/vm_zone.c
@@ -173,9 +173,9 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
/* our zone is good and ready, add it to the list */
if ((z->zflags & ZONE_BOOT) == 0) {
mtx_init(&(z)->zmtx, "zone", MTX_DEF);
- mtx_enter(&zone_mtx, MTX_DEF);
+ mtx_lock(&zone_mtx);
SLIST_INSERT_HEAD(&zlist, z, zent);
- mtx_exit(&zone_mtx, MTX_DEF);
+ mtx_unlock(&zone_mtx);
}
return 1;
@@ -245,9 +245,9 @@ zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
z->zmax = nitems;
z->ztotal = nitems;
- mtx_enter(&zone_mtx, MTX_DEF);
+ mtx_lock(&zone_mtx);
SLIST_INSERT_HEAD(&zlist, z, zent);
- mtx_exit(&zone_mtx, MTX_DEF);
+ mtx_unlock(&zone_mtx);
}
/*
@@ -300,15 +300,15 @@ _zget(vm_zone_t z)
* map.
*/
if (lockstatus(&kernel_map->lock, NULL)) {
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
if (item != NULL)
atomic_add_int(&zone_kmem_pages, z->zalloc);
} else {
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
item = (void *) kmem_alloc(kernel_map, nbytes);
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
if (item != NULL)
atomic_add_int(&zone_kern_pages, z->zalloc);
}
@@ -363,11 +363,11 @@ zalloc(vm_zone_t z)
void *item;
KASSERT(z != NULL, ("invalid zone"));
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
if (z->zfreecnt <= z->zfreemin) {
item = _zget(z);
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
return item;
}
@@ -382,7 +382,7 @@ zalloc(vm_zone_t z)
z->zfreecnt--;
z->znalloc++;
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
return item;
}
@@ -394,7 +394,7 @@ zfree(vm_zone_t z, void *item)
{
KASSERT(z != NULL, ("invalid zone"));
KASSERT(item != NULL, ("invalid item"));
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
((void **) item)[0] = z->zitems;
#ifdef INVARIANTS
@@ -405,7 +405,7 @@ zfree(vm_zone_t z, void *item)
z->zitems = item;
z->zfreecnt++;
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
}
/*
@@ -418,22 +418,22 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
char tmpbuf[128];
vm_zone_t z;
- mtx_enter(&zone_mtx, MTX_DEF);
+ mtx_lock(&zone_mtx);
len = snprintf(tmpbuf, sizeof(tmpbuf),
"\nITEM SIZE LIMIT USED FREE REQUESTS\n\n");
error = SYSCTL_OUT(req, tmpbuf, SLIST_EMPTY(&zlist) ? len-1 : len);
SLIST_FOREACH(z, &zlist, zent) {
- mtx_enter(&z->zmtx, MTX_DEF);
+ mtx_lock(&z->zmtx);
len = snprintf(tmpbuf, sizeof(tmpbuf),
"%-14.14s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
z->zname, z->zsize, z->zmax, (z->ztotal - z->zfreecnt),
z->zfreecnt, z->znalloc);
- mtx_exit(&z->zmtx, MTX_DEF);
+ mtx_unlock(&z->zmtx);
if (SLIST_NEXT(z, zent) == NULL)
tmpbuf[len - 1] = 0;
error = SYSCTL_OUT(req, tmpbuf, len);
}
- mtx_exit(&zone_mtx, MTX_DEF);
+ mtx_unlock(&zone_mtx);
return (error);
}
OpenPOWER on IntegriCloud