summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/imgact_elf.c5
-rw-r--r--sys/kern/kern_lock.c64
-rw-r--r--sys/kern/vfs_bio.c4
-rw-r--r--sys/kern/vfs_conf.c4
-rw-r--r--sys/kern/vfs_default.c5
-rw-r--r--sys/kern/vfs_export.c111
-rw-r--r--sys/kern/vfs_extattr.c58
-rw-r--r--sys/kern/vfs_mount.c4
-rw-r--r--sys/kern/vfs_subr.c111
-rw-r--r--sys/kern/vfs_syscalls.c58
-rw-r--r--sys/kern/vfs_vnops.c6
11 files changed, 239 insertions, 191 deletions
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index dac862a..2123b11 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -63,6 +63,7 @@
#include <machine/elf.h>
#include <machine/md_var.h>
+#include <machine/mutex.h>
#define OLD_EI_BRAND 8
@@ -477,9 +478,9 @@ exec_elf_imgact(struct image_params *imgp)
* a context switch. Better safe than sorry; I really don't want
* the file to change while it's being loaded.
*/
- simple_lock(&imgp->vp->v_interlock);
+ mtx_enter(&imgp->vp->v_interlock, MTX_DEF);
imgp->vp->v_flag |= VTEXT;
- simple_unlock(&imgp->vp->v_interlock);
+ mtx_exit(&imgp->vp->v_interlock, MTX_DEF);
if ((error = exec_extract_strings(imgp)) != 0)
goto fail;
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 1e10fbe..12c6870 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -46,6 +46,8 @@
#include <sys/lock.h>
#include <sys/systm.h>
+#include <machine/mutex.h>
+
/*
* Locking primitives implementation.
* Locks provide shared/exclusive sychronization.
@@ -111,11 +113,11 @@ apause(struct lock *lkp, int flags)
return 0;
#ifdef SMP
for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) {
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
for (i = LOCK_SAMPLE_WAIT; i > 0; i--)
if ((lkp->lk_flags & flags) == 0)
break;
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if ((lkp->lk_flags & flags) == 0)
return 0;
}
@@ -127,6 +129,10 @@ static int
acquire(struct lock *lkp, int extflags, int wanted) {
int s, error;
+ CTR3(KTR_LOCKMGR,
+ "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n",
+ lkp, extflags, wanted);
+
if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
return EBUSY;
}
@@ -141,9 +147,9 @@ acquire(struct lock *lkp, int extflags, int wanted) {
while ((lkp->lk_flags & wanted) != 0) {
lkp->lk_flags |= LK_WAIT_NONZERO;
lkp->lk_waitcount++;
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
error = tsleep(lkp, lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo);
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if (lkp->lk_waitcount == 1) {
lkp->lk_flags &= ~LK_WAIT_NONZERO;
lkp->lk_waitcount = 0;
@@ -178,7 +184,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
#endif
struct lock *lkp;
u_int flags;
- struct simplelock *interlkp;
+ struct mtx *interlkp;
struct proc *p;
#ifdef DEBUG_LOCKS
const char *name; /* Name of lock function */
@@ -190,15 +196,19 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
pid_t pid;
int extflags;
+ CTR5(KTR_LOCKMGR,
+ "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, "
+ "interlkp == %p, p == %p", lkp, lkp->lk_wmesg, flags, interlkp, p);
+
error = 0;
if (p == NULL)
pid = LK_KERNPROC;
else
pid = p->p_pid;
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if (flags & LK_INTERLOCK)
- simple_unlock(interlkp);
+ mtx_exit(interlkp, MTX_DEF);
extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
@@ -424,7 +434,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
break;
default:
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
panic("lockmgr: unknown locktype request %d",
flags & LK_TYPE_MASK);
/* NOTREACHED */
@@ -435,7 +445,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
lkp->lk_flags &= ~LK_WAITDRAIN;
wakeup((void *)&lkp->lk_flags);
}
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
return (error);
}
@@ -453,10 +463,10 @@ acquiredrain(struct lock *lkp, int extflags) {
while (lkp->lk_flags & LK_ALL) {
lkp->lk_flags |= LK_WAITDRAIN;
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
error = tsleep(&lkp->lk_flags, lkp->lk_prio,
lkp->lk_wmesg, lkp->lk_timo);
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if (error)
return error;
if (extflags & LK_SLEEPFAIL) {
@@ -477,9 +487,14 @@ lockinit(lkp, prio, wmesg, timo, flags)
int timo;
int flags;
{
+ CTR5(KTR_LOCKMGR, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
+ "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
- simple_lock_init(&lkp->lk_interlock);
- lkp->lk_flags = (flags & LK_EXTFLG_MASK);
+ if (lkp->lk_flags & LK_VALID)
+ lockdestroy(lkp);
+
+ mtx_init(&lkp->lk_interlock, "lockmgr interlock", MTX_DEF);
+ lkp->lk_flags = (flags & LK_EXTFLG_MASK) | LK_VALID;
lkp->lk_sharecount = 0;
lkp->lk_waitcount = 0;
lkp->lk_exclusivecount = 0;
@@ -490,6 +505,21 @@ lockinit(lkp, prio, wmesg, timo, flags)
}
/*
+ * Destroy a lock.
+ */
+void
+lockdestroy(lkp)
+ struct lock *lkp;
+{
+ CTR2(KTR_LOCKMGR, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
+ lkp, lkp->lk_wmesg);
+ if (lkp->lk_flags & LK_VALID) {
+ lkp->lk_flags &= ~LK_VALID;
+ mtx_destroy(&lkp->lk_interlock);
+ }
+}
+
+/*
* Determine the status of a lock.
*/
int
@@ -499,7 +529,7 @@ lockstatus(lkp, p)
{
int lock_type = 0;
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if (lkp->lk_exclusivecount != 0) {
if (p == NULL || lkp->lk_lockholder == p->p_pid)
lock_type = LK_EXCLUSIVE;
@@ -507,7 +537,7 @@ lockstatus(lkp, p)
lock_type = LK_EXCLOTHER;
} else if (lkp->lk_sharecount != 0)
lock_type = LK_SHARED;
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
return (lock_type);
}
@@ -520,9 +550,9 @@ lockcount(lkp)
{
int count;
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
count = lkp->lk_exclusivecount + lkp->lk_sharecount;
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
return (count);
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 11e9183..d9e77cc 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -61,7 +61,7 @@ struct bio_ops bioops; /* I/O operation notification */
struct buf *buf; /* buffer header pool */
struct swqueue bswlist;
-struct simplelock buftimelock; /* Interlock on setting prio and timo */
+struct mtx buftimelock; /* Interlock on setting prio and timo */
static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
vm_offset_t to);
@@ -290,7 +290,7 @@ bufinit(void)
TAILQ_INIT(&bswlist);
LIST_INIT(&invalhash);
- simple_lock_init(&buftimelock);
+ mtx_init(&buftimelock, "buftime lock", MTX_DEF);
for (i = 0; i <= bufhashmask; i++)
LIST_INIT(&bufhashtbl[i]);
diff --git a/sys/kern/vfs_conf.c b/sys/kern/vfs_conf.c
index 34d4afb..442ab60 100644
--- a/sys/kern/vfs_conf.c
+++ b/sys/kern/vfs_conf.c
@@ -230,9 +230,9 @@ done:
} else {
/* register with list of mounted filesystems */
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 01418f0..9bea916 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -53,6 +53,7 @@
#include <sys/poll.h>
#include <machine/limits.h>
+#include <machine/mutex.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@@ -449,7 +450,7 @@ vop_nolock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK)
- simple_unlock(&ap->a_vp->v_interlock);
+ mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
return (0);
#endif
}
@@ -471,7 +472,7 @@ vop_nounlock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK)
- simple_unlock(&ap->a_vp->v_interlock);
+ mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
return (0);
}
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index d7a9436..dc8f57d 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -145,7 +145,7 @@ SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
/* mounted fs */
struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
/* For any iteration/modification of mountlist */
-struct simplelock mountlist_slock;
+struct mtx mountlist_mtx;
/* For any iteration/modification of mnt_vnodelist */
struct simplelock mntvnode_slock;
/*
@@ -238,6 +238,7 @@ vntblinit()
{
desiredvnodes = maxproc + cnt.v_page_count / 4;
+ mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
simple_lock_init(&mntvnode_slock);
simple_lock_init(&mntid_slock);
simple_lock_init(&spechash_slock);
@@ -260,7 +261,7 @@ int
vfs_busy(mp, flags, interlkp, p)
struct mount *mp;
int flags;
- struct simplelock *interlkp;
+ struct mtx *interlkp;
struct proc *p;
{
int lkflags;
@@ -270,7 +271,7 @@ vfs_busy(mp, flags, interlkp, p)
return (ENOENT);
mp->mnt_kern_flag |= MNTK_MWAIT;
if (interlkp) {
- simple_unlock(interlkp);
+ mtx_exit(interlkp, MTX_DEF);
}
/*
* Since all busy locks are shared except the exclusive
@@ -280,7 +281,7 @@ vfs_busy(mp, flags, interlkp, p)
*/
tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
if (interlkp) {
- simple_lock(interlkp);
+ mtx_enter(interlkp, MTX_DEF);
}
return (ENOENT);
}
@@ -384,15 +385,15 @@ vfs_getvfs(fsid)
{
register struct mount *mp;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (mp);
}
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return ((struct mount *) 0);
}
@@ -554,7 +555,7 @@ getnewvnode(tag, mp, vops, vpp)
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
- !simple_lock_try(&vp->v_interlock)) {
+ !mtx_try_enter(&vp->v_interlock, MTX_DEF)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
continue;
@@ -564,7 +565,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
break;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
}
@@ -577,7 +578,7 @@ getnewvnode(tag, mp, vops, vpp)
if (vp->v_type != VBAD) {
vgonel(vp, p);
} else {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
vn_finished_write(vnmp);
@@ -605,7 +606,7 @@ getnewvnode(tag, mp, vops, vpp)
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
- simple_lock_init(&vp->v_interlock);
+ mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
vp->v_dd = vp;
cache_purge(vp);
LIST_INIT(&vp->v_cache_src);
@@ -777,12 +778,12 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Destroy the copy in the VM cache, too.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
panic("vinvalbuf: flush failed");
@@ -1423,10 +1424,10 @@ vget(vp, flags, p)
* the VXLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vget", 0);
return (ENOENT);
}
@@ -1445,15 +1446,15 @@ vget(vp, flags, p)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
return (error);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (0);
}
@@ -1463,9 +1464,9 @@ vget(vp, flags, p)
void
vref(struct vnode *vp)
{
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_usecount++;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
/*
@@ -1480,14 +1481,14 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
if (vp->v_usecount > 1) {
vp->v_usecount--;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return;
}
@@ -1509,7 +1510,7 @@ vrele(vp)
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
#endif
panic("vrele: negative ref cnt");
}
@@ -1527,7 +1528,7 @@ vput(vp)
KASSERT(vp != NULL, ("vput: null vp"));
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
@@ -1549,7 +1550,7 @@ vput(vp)
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
VOP_INACTIVE(vp, p);
} else {
@@ -1633,12 +1634,12 @@ loop:
if (vp == skipvp)
continue;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
/*
* Skip over a vnodes marked VSYSTEM.
*/
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
/*
@@ -1647,7 +1648,7 @@ loop:
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
@@ -1683,7 +1684,7 @@ loop:
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
busy++;
}
simple_unlock(&mntvnode_slock);
@@ -1767,7 +1768,7 @@ vclean(vp, flags, p)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@@ -1777,11 +1778,15 @@ vclean(vp, flags, p)
#endif
vfree(vp);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
cache_purge(vp);
- vp->v_vnlock = NULL;
+ if (vp->v_vnlock) {
+ lockdestroy(vp->v_vnlock);
+ vp->v_vnlock = NULL;
+ }
+ lockdestroy(&vp->v_lock);
if (VSHOULDFREE(vp))
vfree(vp);
@@ -1822,7 +1827,7 @@ vop_revoke(ap)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
return (0);
}
@@ -1849,7 +1854,7 @@ vrecycle(vp, inter_lkp, p)
struct proc *p;
{
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount == 0) {
if (inter_lkp) {
simple_unlock(inter_lkp);
@@ -1857,7 +1862,7 @@ vrecycle(vp, inter_lkp, p)
vgonel(vp, p);
return (1);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (0);
}
@@ -1871,7 +1876,7 @@ vgone(vp)
{
struct proc *p = curproc; /* XXX */
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vgonel(vp, p);
}
@@ -1891,7 +1896,7 @@ vgonel(vp, p)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vgone", 0);
return;
}
@@ -1900,7 +1905,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
/*
* Delete from old mount point vnode list, if on one.
@@ -1943,7 +1948,7 @@ vgonel(vp, p)
}
vp->v_type = VBAD;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
/*
@@ -2064,9 +2069,9 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
struct vnode *vp;
printf("Locked vnodes\n");
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2074,11 +2079,11 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
if (VOP_ISLOCKED(vp, NULL))
vprint((char *)0, vp);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
}
#endif
@@ -2183,9 +2188,9 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
return (SYSCTL_OUT(req, 0,
(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2211,11 +2216,11 @@ again:
simple_lock(&mntvnode_slock);
}
simple_unlock(&mntvnode_slock);
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
@@ -2574,7 +2579,7 @@ loop:
continue;
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
@@ -2586,7 +2591,7 @@ loop:
vput(vp);
}
} else {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
}
if (anyio && (--tries > 0))
@@ -2838,14 +2843,14 @@ sync_fsync(ap)
* Walk the list of vnodes pushing all that are dirty and
* not already on the sync list.
*/
- simple_lock(&mountlist_slock);
- if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) {
- simple_unlock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
+ if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
vfs_unbusy(mp, p);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
asyncflag = mp->mnt_flag & MNT_ASYNC;
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 80cdc6d..bedad1b 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -65,6 +65,7 @@
#include <sys/extattr.h>
#include <machine/limits.h>
+#include <machine/mutex.h>
#include <miscfs/union/union.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -174,16 +175,16 @@ mount(p, uap)
vput(vp);
return (EBUSY);
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vfs_unbusy(mp, p);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
mp->mnt_flag |= SCARG(uap, flags) &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, p);
@@ -241,15 +242,15 @@ mount(p, uap)
return (ENODEV);
}
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
/*
* Allocate and initialize the filesystem.
@@ -309,9 +310,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, p);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vrele(vp);
return (error);
}
@@ -321,13 +322,13 @@ update:
*/
cache_purge(vp);
if (!error) {
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
vp->v_mountedhere = mp;
- simple_unlock(&vp->v_interlock);
- simple_lock(&mountlist_slock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
checkdirs(vp);
VOP_UNLOCK(vp, 0, p);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
@@ -336,9 +337,9 @@ update:
if ((error = VFS_START(mp, 0, p)) != 0)
vrele(vp);
} else {
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free((caddr_t)mp, M_MOUNT);
@@ -461,9 +462,9 @@ dounmount(mp, flags, p)
int error;
int async_flag;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
- lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_slock, p);
+ lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
vn_start_write(NULL, &mp, V_WAIT);
if (mp->mnt_flag & MNT_EXPUBLIC)
@@ -481,14 +482,14 @@ dounmount(mp, flags, p)
error = VFS_UNMOUNT(mp, flags, p);
}
vn_finished_write(mp);
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
if (error) {
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
(void) vfs_allocate_syncvnode(mp);
mp->mnt_kern_flag &= ~MNTK_UNMOUNT;
mp->mnt_flag |= async_flag;
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
- &mountlist_slock, p);
+ &mountlist_mtx, p);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
return (error);
@@ -501,7 +502,8 @@ dounmount(mp, flags, p)
mp->mnt_vfc->vfc_refcount--;
if (!LIST_EMPTY(&mp->mnt_vnodelist))
panic("unmount: dangling vnode");
- lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p);
+ lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, p);
+ lockdestroy(&mp->mnt_lock);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
free((caddr_t)mp, M_MOUNT);
@@ -531,9 +533,9 @@ sync(p, uap)
struct mount *mp, *nmp;
int asyncflag;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -547,11 +549,11 @@ sync(p, uap)
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
#if 0
/*
* XXX don't call vfs_bufstats() yet because that routine
@@ -723,9 +725,9 @@ getfsstat(p, uap)
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
sfsp = (caddr_t)SCARG(uap, buf);
count = 0;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -739,7 +741,7 @@ getfsstat(p, uap)
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
(error = VFS_STATFS(mp, sp, p))) {
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
continue;
@@ -753,11 +755,11 @@ getfsstat(p, uap)
sfsp += sizeof(*sp);
}
count++;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
if (sfsp && count > maxcount)
p->p_retval[0] = maxcount;
else
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index 34d4afb..442ab60 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -230,9 +230,9 @@ done:
} else {
/* register with list of mounted filesystems */
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index d7a9436..dc8f57d 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -145,7 +145,7 @@ SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
/* mounted fs */
struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
/* For any iteration/modification of mountlist */
-struct simplelock mountlist_slock;
+struct mtx mountlist_mtx;
/* For any iteration/modification of mnt_vnodelist */
struct simplelock mntvnode_slock;
/*
@@ -238,6 +238,7 @@ vntblinit()
{
desiredvnodes = maxproc + cnt.v_page_count / 4;
+ mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
simple_lock_init(&mntvnode_slock);
simple_lock_init(&mntid_slock);
simple_lock_init(&spechash_slock);
@@ -260,7 +261,7 @@ int
vfs_busy(mp, flags, interlkp, p)
struct mount *mp;
int flags;
- struct simplelock *interlkp;
+ struct mtx *interlkp;
struct proc *p;
{
int lkflags;
@@ -270,7 +271,7 @@ vfs_busy(mp, flags, interlkp, p)
return (ENOENT);
mp->mnt_kern_flag |= MNTK_MWAIT;
if (interlkp) {
- simple_unlock(interlkp);
+ mtx_exit(interlkp, MTX_DEF);
}
/*
* Since all busy locks are shared except the exclusive
@@ -280,7 +281,7 @@ vfs_busy(mp, flags, interlkp, p)
*/
tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
if (interlkp) {
- simple_lock(interlkp);
+ mtx_enter(interlkp, MTX_DEF);
}
return (ENOENT);
}
@@ -384,15 +385,15 @@ vfs_getvfs(fsid)
{
register struct mount *mp;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (mp);
}
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return ((struct mount *) 0);
}
@@ -554,7 +555,7 @@ getnewvnode(tag, mp, vops, vpp)
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
- !simple_lock_try(&vp->v_interlock)) {
+ !mtx_try_enter(&vp->v_interlock, MTX_DEF)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
continue;
@@ -564,7 +565,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
break;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
}
@@ -577,7 +578,7 @@ getnewvnode(tag, mp, vops, vpp)
if (vp->v_type != VBAD) {
vgonel(vp, p);
} else {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
vn_finished_write(vnmp);
@@ -605,7 +606,7 @@ getnewvnode(tag, mp, vops, vpp)
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
- simple_lock_init(&vp->v_interlock);
+ mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
vp->v_dd = vp;
cache_purge(vp);
LIST_INIT(&vp->v_cache_src);
@@ -777,12 +778,12 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Destroy the copy in the VM cache, too.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
panic("vinvalbuf: flush failed");
@@ -1423,10 +1424,10 @@ vget(vp, flags, p)
* the VXLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vget", 0);
return (ENOENT);
}
@@ -1445,15 +1446,15 @@ vget(vp, flags, p)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
return (error);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (0);
}
@@ -1463,9 +1464,9 @@ vget(vp, flags, p)
void
vref(struct vnode *vp)
{
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_usecount++;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
/*
@@ -1480,14 +1481,14 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
if (vp->v_usecount > 1) {
vp->v_usecount--;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return;
}
@@ -1509,7 +1510,7 @@ vrele(vp)
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
#endif
panic("vrele: negative ref cnt");
}
@@ -1527,7 +1528,7 @@ vput(vp)
KASSERT(vp != NULL, ("vput: null vp"));
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
@@ -1549,7 +1550,7 @@ vput(vp)
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
VOP_INACTIVE(vp, p);
} else {
@@ -1633,12 +1634,12 @@ loop:
if (vp == skipvp)
continue;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
/*
* Skip over a vnodes marked VSYSTEM.
*/
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
/*
@@ -1647,7 +1648,7 @@ loop:
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
@@ -1683,7 +1684,7 @@ loop:
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
busy++;
}
simple_unlock(&mntvnode_slock);
@@ -1767,7 +1768,7 @@ vclean(vp, flags, p)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@@ -1777,11 +1778,15 @@ vclean(vp, flags, p)
#endif
vfree(vp);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
cache_purge(vp);
- vp->v_vnlock = NULL;
+ if (vp->v_vnlock) {
+ lockdestroy(vp->v_vnlock);
+ vp->v_vnlock = NULL;
+ }
+ lockdestroy(&vp->v_lock);
if (VSHOULDFREE(vp))
vfree(vp);
@@ -1822,7 +1827,7 @@ vop_revoke(ap)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
return (0);
}
@@ -1849,7 +1854,7 @@ vrecycle(vp, inter_lkp, p)
struct proc *p;
{
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount == 0) {
if (inter_lkp) {
simple_unlock(inter_lkp);
@@ -1857,7 +1862,7 @@ vrecycle(vp, inter_lkp, p)
vgonel(vp, p);
return (1);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (0);
}
@@ -1871,7 +1876,7 @@ vgone(vp)
{
struct proc *p = curproc; /* XXX */
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vgonel(vp, p);
}
@@ -1891,7 +1896,7 @@ vgonel(vp, p)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vgone", 0);
return;
}
@@ -1900,7 +1905,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
/*
* Delete from old mount point vnode list, if on one.
@@ -1943,7 +1948,7 @@ vgonel(vp, p)
}
vp->v_type = VBAD;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
/*
@@ -2064,9 +2069,9 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
struct vnode *vp;
printf("Locked vnodes\n");
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2074,11 +2079,11 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
if (VOP_ISLOCKED(vp, NULL))
vprint((char *)0, vp);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
}
#endif
@@ -2183,9 +2188,9 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
return (SYSCTL_OUT(req, 0,
(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2211,11 +2216,11 @@ again:
simple_lock(&mntvnode_slock);
}
simple_unlock(&mntvnode_slock);
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
@@ -2574,7 +2579,7 @@ loop:
continue;
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
@@ -2586,7 +2591,7 @@ loop:
vput(vp);
}
} else {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
}
if (anyio && (--tries > 0))
@@ -2838,14 +2843,14 @@ sync_fsync(ap)
* Walk the list of vnodes pushing all that are dirty and
* not already on the sync list.
*/
- simple_lock(&mountlist_slock);
- if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) {
- simple_unlock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
+ if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
vfs_unbusy(mp, p);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
asyncflag = mp->mnt_flag & MNT_ASYNC;
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 80cdc6d..bedad1b 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -65,6 +65,7 @@
#include <sys/extattr.h>
#include <machine/limits.h>
+#include <machine/mutex.h>
#include <miscfs/union/union.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -174,16 +175,16 @@ mount(p, uap)
vput(vp);
return (EBUSY);
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vfs_unbusy(mp, p);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
mp->mnt_flag |= SCARG(uap, flags) &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, p);
@@ -241,15 +242,15 @@ mount(p, uap)
return (ENODEV);
}
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
/*
* Allocate and initialize the filesystem.
@@ -309,9 +310,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, p);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vrele(vp);
return (error);
}
@@ -321,13 +322,13 @@ update:
*/
cache_purge(vp);
if (!error) {
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
vp->v_mountedhere = mp;
- simple_unlock(&vp->v_interlock);
- simple_lock(&mountlist_slock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
checkdirs(vp);
VOP_UNLOCK(vp, 0, p);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
@@ -336,9 +337,9 @@ update:
if ((error = VFS_START(mp, 0, p)) != 0)
vrele(vp);
} else {
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free((caddr_t)mp, M_MOUNT);
@@ -461,9 +462,9 @@ dounmount(mp, flags, p)
int error;
int async_flag;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
- lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_slock, p);
+ lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
vn_start_write(NULL, &mp, V_WAIT);
if (mp->mnt_flag & MNT_EXPUBLIC)
@@ -481,14 +482,14 @@ dounmount(mp, flags, p)
error = VFS_UNMOUNT(mp, flags, p);
}
vn_finished_write(mp);
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
if (error) {
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
(void) vfs_allocate_syncvnode(mp);
mp->mnt_kern_flag &= ~MNTK_UNMOUNT;
mp->mnt_flag |= async_flag;
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
- &mountlist_slock, p);
+ &mountlist_mtx, p);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
return (error);
@@ -501,7 +502,8 @@ dounmount(mp, flags, p)
mp->mnt_vfc->vfc_refcount--;
if (!LIST_EMPTY(&mp->mnt_vnodelist))
panic("unmount: dangling vnode");
- lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p);
+ lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, p);
+ lockdestroy(&mp->mnt_lock);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
free((caddr_t)mp, M_MOUNT);
@@ -531,9 +533,9 @@ sync(p, uap)
struct mount *mp, *nmp;
int asyncflag;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -547,11 +549,11 @@ sync(p, uap)
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
#if 0
/*
* XXX don't call vfs_bufstats() yet because that routine
@@ -723,9 +725,9 @@ getfsstat(p, uap)
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
sfsp = (caddr_t)SCARG(uap, buf);
count = 0;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -739,7 +741,7 @@ getfsstat(p, uap)
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
(error = VFS_STATFS(mp, sp, p))) {
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
continue;
@@ -753,11 +755,11 @@ getfsstat(p, uap)
sfsp += sizeof(*sp);
}
count++;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
if (sfsp && count > maxcount)
p->p_retval[0] = maxcount;
else
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 427209f..9287443 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -54,6 +54,8 @@
#include <sys/ttycom.h>
#include <sys/conf.h>
+#include <machine/mutex.h>
+
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
@@ -640,10 +642,10 @@ debug_vn_lock(vp, flags, p, filename, line)
do {
if ((flags & LK_INTERLOCK) == 0)
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
error = ENOENT;
} else {
OpenPOWER on IntegriCloud