summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/coda/coda_vnops.c1
-rw-r--r--sys/dev/agp/agp.c1
-rw-r--r--sys/fs/cd9660/cd9660_node.c7
-rw-r--r--sys/fs/coda/coda_vnops.c1
-rw-r--r--sys/fs/deadfs/dead_vnops.c4
-rw-r--r--sys/fs/devfs/devfs_vfsops.c2
-rw-r--r--sys/fs/hpfs/hpfs.h1
-rw-r--r--sys/fs/hpfs/hpfs_hash.c14
-rw-r--r--sys/fs/hpfs/hpfs_vfsops.c13
-rw-r--r--sys/fs/hpfs/hpfs_vnops.c2
-rw-r--r--sys/fs/msdosfs/msdosfs_denode.c7
-rw-r--r--sys/fs/msdosfs/msdosfs_vfsops.c6
-rw-r--r--sys/fs/msdosfs/msdosfs_vnops.c6
-rw-r--r--sys/fs/ntfs/ntfs_ihash.c9
-rw-r--r--sys/fs/ntfs/ntfs_ihash.h1
-rw-r--r--sys/fs/ntfs/ntfs_inode.h2
-rw-r--r--sys/fs/ntfs/ntfs_subr.c28
-rw-r--r--sys/fs/ntfs/ntfs_subr.h1
-rw-r--r--sys/fs/ntfs/ntfs_vfsops.c15
-rw-r--r--sys/fs/nullfs/null_subr.c4
-rw-r--r--sys/fs/nwfs/nwfs_node.c1
-rw-r--r--sys/fs/nwfs/nwfs_vnops.c16
-rw-r--r--sys/fs/unionfs/union_subr.c1
-rw-r--r--sys/gnu/ext2fs/ext2_ihash.c6
-rw-r--r--sys/gnu/ext2fs/ext2_vfsops.c8
-rw-r--r--sys/gnu/fs/ext2fs/ext2_vfsops.c8
-rw-r--r--sys/isofs/cd9660/cd9660_node.c7
-rw-r--r--sys/kern/imgact_elf.c5
-rw-r--r--sys/kern/kern_lock.c64
-rw-r--r--sys/kern/vfs_bio.c4
-rw-r--r--sys/kern/vfs_conf.c4
-rw-r--r--sys/kern/vfs_default.c5
-rw-r--r--sys/kern/vfs_export.c111
-rw-r--r--sys/kern/vfs_extattr.c58
-rw-r--r--sys/kern/vfs_mount.c4
-rw-r--r--sys/kern/vfs_subr.c111
-rw-r--r--sys/kern/vfs_syscalls.c58
-rw-r--r--sys/kern/vfs_vnops.c6
-rw-r--r--sys/miscfs/deadfs/dead_vnops.c4
-rw-r--r--sys/miscfs/nullfs/null_subr.c4
-rw-r--r--sys/miscfs/union/union_subr.c1
-rw-r--r--sys/msdosfs/msdosfs_denode.c7
-rw-r--r--sys/msdosfs/msdosfs_vfsops.c6
-rw-r--r--sys/msdosfs/msdosfs_vnops.c6
-rw-r--r--sys/netncp/ncp_conn.c8
-rw-r--r--sys/netncp/ncp_conn.h1
-rw-r--r--sys/netncp/ncp_subr.c1
-rw-r--r--sys/nfs/nfs_node.c2
-rw-r--r--sys/nfs/nfs_nqlease.c10
-rw-r--r--sys/nfsclient/nfs_node.c2
-rw-r--r--sys/ntfs/ntfs_ihash.c9
-rw-r--r--sys/ntfs/ntfs_ihash.h1
-rw-r--r--sys/ntfs/ntfs_inode.h2
-rw-r--r--sys/ntfs/ntfs_subr.c28
-rw-r--r--sys/ntfs/ntfs_subr.h1
-rw-r--r--sys/ntfs/ntfs_vfsops.c15
-rw-r--r--sys/nwfs/nwfs_node.c1
-rw-r--r--sys/nwfs/nwfs_vnops.c16
-rw-r--r--sys/pci/agp.c1
-rw-r--r--sys/sys/buf.h15
-rw-r--r--sys/sys/ktr.h1
-rw-r--r--sys/sys/lock.h22
-rw-r--r--sys/sys/lockmgr.h22
-rw-r--r--sys/sys/mount.h4
-rw-r--r--sys/sys/vnode.h5
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c14
-rw-r--r--sys/ufs/ufs/ufs_extattr.c1
-rw-r--r--sys/ufs/ufs/ufs_ihash.c6
-rw-r--r--sys/ufs/ufs/ufs_inode.c1
-rw-r--r--sys/ufs/ufs/ufs_quota.c2
-rw-r--r--sys/ufs/ufs/ufs_vnops.c14
-rw-r--r--sys/vm/vm_map.c8
-rw-r--r--sys/vm/vm_map.h10
73 files changed, 553 insertions, 300 deletions
diff --git a/sys/coda/coda_vnops.c b/sys/coda/coda_vnops.c
index c1ed267..521db98 100644
--- a/sys/coda/coda_vnops.c
+++ b/sys/coda/coda_vnops.c
@@ -1753,6 +1753,7 @@ coda_reclaim(v)
#endif
}
cache_purge(vp);
+ lockdestroy(&(VTOC(vp)->c_lock));
coda_free(VTOC(vp));
VTOC(vp) = NULL;
return (0);
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
index 4848f01..5bf48a1 100644
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -271,6 +271,7 @@ agp_generic_detach(device_t dev)
struct agp_softc *sc = device_get_softc(dev);
bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
lockmgr(&sc->as_lock, LK_DRAIN, 0, curproc);
+ lockdestroy(&sc->as_lock);
destroy_dev(sc->as_devnode);
agp_flush_cache();
return 0;
diff --git a/sys/fs/cd9660/cd9660_node.c b/sys/fs/cd9660/cd9660_node.c
index 54bace3..500a1a6 100644
--- a/sys/fs/cd9660/cd9660_node.c
+++ b/sys/fs/cd9660/cd9660_node.c
@@ -49,6 +49,8 @@
#include <sys/malloc.h>
#include <sys/stat.h>
+#include <machine/mutex.h>
+
#include <isofs/cd9660/iso.h>
#include <isofs/cd9660/cd9660_node.h>
#include <isofs/cd9660/cd9660_mount.h>
@@ -108,7 +110,7 @@ loop:
for (ip = isohashtbl[INOHASH(dev, inum)]; ip; ip = ip->i_next) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&cd9660_ihash_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
@@ -138,7 +140,7 @@ cd9660_ihashins(ip)
*ipp = ip;
simple_unlock(&cd9660_ihash_slock);
- lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct simplelock *)0, p);
+ lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
}
/*
@@ -218,6 +220,7 @@ cd9660_reclaim(ap)
vrele(ip->i_devvp);
ip->i_devvp = 0;
}
+ lockdestroy(&ip->i_vnode->v_lock);
FREE(vp->v_data, M_ISOFSNODE);
vp->v_data = NULL;
return (0);
diff --git a/sys/fs/coda/coda_vnops.c b/sys/fs/coda/coda_vnops.c
index c1ed267..521db98 100644
--- a/sys/fs/coda/coda_vnops.c
+++ b/sys/fs/coda/coda_vnops.c
@@ -1753,6 +1753,7 @@ coda_reclaim(v)
#endif
}
cache_purge(vp);
+ lockdestroy(&(VTOC(vp)->c_lock));
coda_free(VTOC(vp));
VTOC(vp) = NULL;
return (0);
diff --git a/sys/fs/deadfs/dead_vnops.c b/sys/fs/deadfs/dead_vnops.c
index f990733..ab6678f 100644
--- a/sys/fs/deadfs/dead_vnops.c
+++ b/sys/fs/deadfs/dead_vnops.c
@@ -41,6 +41,8 @@
#include <sys/vnode.h>
#include <sys/poll.h>
+#include <machine/mutex.h>
+
static int chkvnlock __P((struct vnode *));
/*
* Prototypes for dead operations on vnodes.
@@ -210,7 +212,7 @@ dead_lock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
ap->a_flags &= ~LK_INTERLOCK;
}
if (!chkvnlock(vp))
diff --git a/sys/fs/devfs/devfs_vfsops.c b/sys/fs/devfs/devfs_vfsops.c
index 2956607..00951ae 100644
--- a/sys/fs/devfs/devfs_vfsops.c
+++ b/sys/fs/devfs/devfs_vfsops.c
@@ -96,6 +96,7 @@ devfs_mount(mp, path, data, ndp, p)
error = devfs_root(mp, &rvp);
if (error) {
+ lockdestroy(&fmp->dm_lock);
FREE(fmp, M_DEVFS);
return (error);
}
@@ -142,6 +143,7 @@ devfs_unmount(mp, mntflags, p)
vrele(rootvp);
vgone(rootvp);
mp->mnt_data = 0;
+ lockdestroy(&fmp->dm_lock);
free(fmp, M_DEVFS);
return 0;
}
diff --git a/sys/fs/hpfs/hpfs.h b/sys/fs/hpfs/hpfs.h
index 7c880b2..be12791 100644
--- a/sys/fs/hpfs/hpfs.h
+++ b/sys/fs/hpfs/hpfs.h
@@ -414,6 +414,7 @@ extern vop_t ** hpfs_vnodeop_p;
/* Hash routines, too small to be separate header */
void hpfs_hphashinit __P((void));
+void hpfs_hphashdestroy __P((void));
struct hpfsnode *hpfs_hphashlookup __P((dev_t, lsn_t));
struct hpfsnode *hpfs_hphashget __P((dev_t, lsn_t));
struct vnode *hpfs_hphashvget __P((dev_t, lsn_t, struct proc *));
diff --git a/sys/fs/hpfs/hpfs_hash.c b/sys/fs/hpfs/hpfs_hash.c
index a948ade..450711c 100644
--- a/sys/fs/hpfs/hpfs_hash.c
+++ b/sys/fs/hpfs/hpfs_hash.c
@@ -43,6 +43,8 @@
#include <sys/malloc.h>
#include <sys/proc.h>
+#include <machine/mutex.h>
+
#include <fs/hpfs/hpfs.h>
MALLOC_DEFINE(M_HPFSHASH, "HPFS hash", "HPFS node hash tables");
@@ -72,6 +74,16 @@ hpfs_hphashinit()
}
/*
+ * Destroy inode hash table.
+ */
+void
+hpfs_hphashdestroy(void)
+{
+
+ lockdestroy(&hpfs_hphash_lock);
+}
+
+/*
* Use the device/inum pair to find the incore inode, and return a pointer
* to it. If it is in core, return it, even if it is locked.
*/
@@ -126,7 +138,7 @@ loop:
for (hp = HPNOHASH(dev, ino)->lh_first; hp; hp = hp->h_hash.le_next) {
if (ino == hp->h_no && dev == hp->h_dev) {
vp = HPTOV(hp);
- simple_lock (&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock (&hpfs_hphash_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
diff --git a/sys/fs/hpfs/hpfs_vfsops.c b/sys/fs/hpfs/hpfs_vfsops.c
index 0bb10dd..1906799 100644
--- a/sys/fs/hpfs/hpfs_vfsops.c
+++ b/sys/fs/hpfs/hpfs_vfsops.c
@@ -87,6 +87,7 @@ struct sockaddr;
static int hpfs_mount __P((struct mount *, char *, caddr_t,
struct nameidata *, struct proc *));
static int hpfs_init __P((struct vfsconf *));
+static int hpfs_uninit __P((struct vfsconf *));
static int hpfs_checkexp __P((struct mount *, struct sockaddr *,
int *, struct ucred **));
#else /* defined(__NetBSD__) */
@@ -169,6 +170,16 @@ hpfs_init ()
#endif
}
+#if defined(__FreeBSD__)
+static int
+hpfs_uninit (vfsp)
+ struct vfsconf *vfsp;
+{
+ hpfs_hphashdestroy();
+ return 0;;
+}
+#endif
+
static int
hpfs_mount (
struct mount *mp,
@@ -737,7 +748,7 @@ static struct vfsops hpfs_vfsops = {
hpfs_checkexp,
hpfs_vptofh,
hpfs_init,
- vfs_stduninit,
+ hpfs_uninit,
vfs_stdextattrctl,
};
VFS_SET(hpfs_vfsops, hpfs, 0);
diff --git a/sys/fs/hpfs/hpfs_vnops.c b/sys/fs/hpfs/hpfs_vnops.c
index dcfd2cf..f1b0b2ce 100644
--- a/sys/fs/hpfs/hpfs_vnops.c
+++ b/sys/fs/hpfs/hpfs_vnops.c
@@ -698,6 +698,8 @@ hpfs_reclaim(ap)
hp->h_devvp = NULL;
}
+ lockdestroy(&hp->hlock);
+
vp->v_data = NULL;
FREE(hp, M_HPFSNO);
diff --git a/sys/fs/msdosfs/msdosfs_denode.c b/sys/fs/msdosfs/msdosfs_denode.c
index 4e237bf..0122f6b 100644
--- a/sys/fs/msdosfs/msdosfs_denode.c
+++ b/sys/fs/msdosfs/msdosfs_denode.c
@@ -61,6 +61,8 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
+#include <machine/mutex.h>
+
#include <msdosfs/bpb.h>
#include <msdosfs/msdosfsmount.h>
#include <msdosfs/direntry.h>
@@ -138,7 +140,7 @@ loop:
&& dev == dep->de_dev
&& dep->de_refcnt != 0) {
vp = DETOV(dep);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&dehash_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
@@ -278,7 +280,7 @@ deget(pmp, dirclust, diroffset, depp)
* of at the start of msdosfs_hashins() so that reinsert() can
* call msdosfs_hashins() with a locked denode.
*/
- if (lockmgr(&ldep->de_lock, LK_EXCLUSIVE, (struct simplelock *)0, p))
+ if (lockmgr(&ldep->de_lock, LK_EXCLUSIVE, (struct mtx *)0, p))
panic("deget: unexpected lock failure");
/*
@@ -660,6 +662,7 @@ msdosfs_reclaim(ap)
#if 0 /* XXX */
dep->de_flag = 0;
#endif
+ lockdestroy(&dep->de_lock);
FREE(dep, M_MSDOSFSNODE);
vp->v_data = NULL;
diff --git a/sys/fs/msdosfs/msdosfs_vfsops.c b/sys/fs/msdosfs/msdosfs_vfsops.c
index 33b5cd7..8af88d4 100644
--- a/sys/fs/msdosfs/msdosfs_vfsops.c
+++ b/sys/fs/msdosfs/msdosfs_vfsops.c
@@ -62,6 +62,8 @@
#include <sys/malloc.h>
#include <sys/stat.h> /* defines ALLPERMS */
+#include <machine/mutex.h>
+
#include <msdosfs/bpb.h>
#include <msdosfs/bootsect.h>
#include <msdosfs/direntry.h>
@@ -873,14 +875,14 @@ loop:
if (vp->v_mount != mp)
goto loop;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
nvp = vp->v_mntvnodes.le_next;
dep = VTODE(vp);
if (vp->v_type == VNON ||
((dep->de_flag &
(DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
simple_unlock(&mntvnode_slock);
diff --git a/sys/fs/msdosfs/msdosfs_vnops.c b/sys/fs/msdosfs/msdosfs_vnops.c
index f8426de..37a57ea 100644
--- a/sys/fs/msdosfs/msdosfs_vnops.c
+++ b/sys/fs/msdosfs/msdosfs_vnops.c
@@ -68,6 +68,8 @@
#include <vm/vm_extern.h>
#include <vm/vnode_pager.h>
+#include <machine/mutex.h>
+
#include <msdosfs/bpb.h>
#include <msdosfs/direntry.h>
#include <msdosfs/denode.h>
@@ -231,12 +233,12 @@ msdosfs_close(ap)
struct denode *dep = VTODE(vp);
struct timespec ts;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount > 1) {
getnanotime(&ts);
DETIMES(dep, &ts, &ts, &ts);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return 0;
}
diff --git a/sys/fs/ntfs/ntfs_ihash.c b/sys/fs/ntfs/ntfs_ihash.c
index 0deecff..cd2300d 100644
--- a/sys/fs/ntfs/ntfs_ihash.c
+++ b/sys/fs/ntfs/ntfs_ihash.c
@@ -75,6 +75,15 @@ ntfs_nthashinit()
}
/*
+ * Destroy inode hash table.
+ */
+void
+ntfs_nthashdestroy(void)
+{
+ lockdestroy(&ntfs_hashlock);
+}
+
+/*
* Use the device/inum pair to find the incore inode, and return a pointer
* to it. If it is in core, return it, even if it is locked.
*/
diff --git a/sys/fs/ntfs/ntfs_ihash.h b/sys/fs/ntfs/ntfs_ihash.h
index 7b7143f..a3f166f 100644
--- a/sys/fs/ntfs/ntfs_ihash.h
+++ b/sys/fs/ntfs/ntfs_ihash.h
@@ -30,6 +30,7 @@
extern struct lock ntfs_hashlock;
void ntfs_nthashinit __P((void));
+void ntfs_nthashdestroy __P((void));
struct ntnode *ntfs_nthashlookup __P((dev_t, ino_t));
struct ntnode *ntfs_nthashget __P((dev_t, ino_t));
void ntfs_nthashins __P((struct ntnode *));
diff --git a/sys/fs/ntfs/ntfs_inode.h b/sys/fs/ntfs/ntfs_inode.h
index a86d5f7..a865276 100644
--- a/sys/fs/ntfs/ntfs_inode.h
+++ b/sys/fs/ntfs/ntfs_inode.h
@@ -69,7 +69,7 @@ struct ntnode {
/* locking */
struct lock i_lock;
- struct simplelock i_interlock;
+ struct mtx i_interlock;
int i_usecount;
LIST_HEAD(,fnode) i_fnlist;
diff --git a/sys/fs/ntfs/ntfs_subr.c b/sys/fs/ntfs/ntfs_subr.c
index e8f5588..f060e5b 100644
--- a/sys/fs/ntfs/ntfs_subr.c
+++ b/sys/fs/ntfs/ntfs_subr.c
@@ -361,7 +361,7 @@ ntfs_ntget(ip)
dprintf(("ntfs_ntget: get ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- simple_lock(&ip->i_interlock);
+ mtx_enter(&ip->i_interlock, MTX_DEF);
ip->i_usecount++;
LOCKMGR(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock);
@@ -410,7 +410,7 @@ ntfs_ntlookup(
/* init lock and lock the newborn ntnode */
lockinit(&ip->i_lock, PINOD, "ntnode", 0, LK_EXCLUSIVE);
- simple_lock_init(&ip->i_interlock);
+ mtx_init(&ip->i_interlock, "ntnode interlock", MTX_DEF);
ntfs_ntget(ip);
ntfs_nthashins(ip);
@@ -440,7 +440,7 @@ ntfs_ntput(ip)
dprintf(("ntfs_ntput: rele ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- simple_lock(&ip->i_interlock);
+ mtx_enter(&ip->i_interlock, MTX_DEF);
ip->i_usecount--;
#ifdef DIAGNOSTIC
@@ -464,6 +464,10 @@ ntfs_ntput(ip)
LIST_REMOVE(vap,va_list);
ntfs_freentvattr(vap);
}
+ mtx_exit(&ip->i_interlock, MTX_DEF);
+ mtx_destroy(&ip->i_interlock);
+ lockdestroy(&ip->i_lock);
+
FREE(ip, M_NTFSNTNODE);
} else {
LOCKMGR(&ip->i_lock, LK_RELEASE|LK_INTERLOCK, &ip->i_interlock);
@@ -477,9 +481,9 @@ void
ntfs_ntref(ip)
struct ntnode *ip;
{
- simple_lock(&ip->i_interlock);
+ mtx_enter(&ip->i_interlock, MTX_DEF);
ip->i_usecount++;
- simple_unlock(&ip->i_interlock);
+ mtx_exit(&ip->i_interlock, MTX_DEF);
dprintf(("ntfs_ntref: ino %d, usecount: %d\n",
ip->i_number, ip->i_usecount));
@@ -496,13 +500,13 @@ ntfs_ntrele(ip)
dprintf(("ntfs_ntrele: rele ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- simple_lock(&ip->i_interlock);
+ mtx_enter(&ip->i_interlock, MTX_DEF);
ip->i_usecount--;
if (ip->i_usecount < 0)
panic("ntfs_ntrele: ino: %d usecount: %d \n",
ip->i_number,ip->i_usecount);
- simple_unlock(&ip->i_interlock);
+ mtx_exit(&ip->i_interlock, MTX_DEF);
}
/*
@@ -771,6 +775,9 @@ ntfs_frele(
FREE(fp->f_attrname, M_TEMP);
if (fp->f_dirblbuf)
FREE(fp->f_dirblbuf, M_NTFSDIR);
+#ifdef __FreeBSD__
+ lockdestroy(&fp->f_lock);
+#endif
FREE(fp, M_NTFSFNODE);
ntfs_ntrele(ip);
}
@@ -1915,6 +1922,13 @@ ntfs_toupper_init()
ntfs_toupper_usecount = 0;
}
+void
+ntfs_toupper_destroy(void)
+{
+
+ lockdestroy(&ntfs_toupper_lock);
+}
+
/*
* if the ntfs_toupper_tab[] is filled already, just raise use count;
* otherwise read the data from the filesystem we are currently mounting
diff --git a/sys/fs/ntfs/ntfs_subr.h b/sys/fs/ntfs/ntfs_subr.h
index a0cda50..8f1480a 100644
--- a/sys/fs/ntfs/ntfs_subr.h
+++ b/sys/fs/ntfs/ntfs_subr.h
@@ -103,6 +103,7 @@ int ntfs_loadntnode __P(( struct ntfsmount *, struct ntnode * ));
int ntfs_writentvattr_plain __P((struct ntfsmount *, struct ntnode *, struct ntvattr *, off_t, size_t, void *, size_t *, struct uio *));
int ntfs_writeattr_plain __P((struct ntfsmount *, struct ntnode *, u_int32_t, char *, off_t, size_t, void *, size_t *, struct uio *));
void ntfs_toupper_init __P((void));
+void ntfs_toupper_destroy __P((void));
int ntfs_toupper_use __P((struct mount *, struct ntfsmount *));
void ntfs_toupper_unuse __P((void));
int ntfs_fget __P((struct ntfsmount *, struct ntnode *, int, char *, struct fnode **));
diff --git a/sys/fs/ntfs/ntfs_vfsops.c b/sys/fs/ntfs/ntfs_vfsops.c
index 77ac0d8..1b0b97a 100644
--- a/sys/fs/ntfs/ntfs_vfsops.c
+++ b/sys/fs/ntfs/ntfs_vfsops.c
@@ -196,9 +196,9 @@ ntfs_mountroot()
return (error);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
(void)ntfs_statfs(mp, &mp->mnt_stat, p);
vfs_unbusy(mp);
return (0);
@@ -222,6 +222,15 @@ ntfs_init (
return 0;
}
+static int
+ntfs_uninit (
+ struct vfsconf *vcp )
+{
+ ntfs_toupper_destroy();
+ ntfs_nthashdestroy();
+ return 0;
+}
+
#endif /* NetBSD */
static int
@@ -1006,7 +1015,7 @@ static struct vfsops ntfs_vfsops = {
ntfs_checkexp,
ntfs_vptofh,
ntfs_init,
- vfs_stduninit,
+ ntfs_uninit,
vfs_stdextattrctl,
};
VFS_SET(ntfs_vfsops, ntfs, 0);
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index efb1357..b5df78c 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -92,8 +92,10 @@ nullfs_uninit(vfsp)
struct vfsconf *vfsp;
{
- if (null_node_hashtbl)
+ if (null_node_hashtbl) {
+ lockdestroy(&null_hashlock);
free(null_node_hashtbl, M_NULLFSHASH);
+ }
return (0);
}
diff --git a/sys/fs/nwfs/nwfs_node.c b/sys/fs/nwfs/nwfs_node.c
index 2d34600..03d3e86 100644
--- a/sys/fs/nwfs/nwfs_node.c
+++ b/sys/fs/nwfs/nwfs_node.c
@@ -83,6 +83,7 @@ nwfs_hash_init(void) {
void
nwfs_hash_free(void) {
+ lockdestroy(&nwhashlock);
free(nwhashtbl, M_NWFSHASH);
}
diff --git a/sys/fs/nwfs/nwfs_vnops.c b/sys/fs/nwfs/nwfs_vnops.c
index e309785..e7c7fa6 100644
--- a/sys/fs/nwfs/nwfs_vnops.c
+++ b/sys/fs/nwfs/nwfs_vnops.c
@@ -46,6 +46,8 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
+#include <machine/mutex.h>
+
#include <netncp/ncp.h>
#include <netncp/ncp_conn.h>
#include <netncp/ncp_subr.h>
@@ -255,24 +257,24 @@ nwfs_close(ap)
if (vp->v_type == VDIR) return 0; /* nothing to do now */
error = 0;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (np->opened == 0) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return 0;
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
error = nwfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (np->opened == 0) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return 0;
}
if (--np->opened == 0) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
error = ncp_close_file(NWFSTOCONN(VTONWFS(vp)), &np->n_fh,
ap->a_p, ap->a_cred);
} else
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
np->n_atime = 0;
return (error);
}
diff --git a/sys/fs/unionfs/union_subr.c b/sys/fs/unionfs/union_subr.c
index d1d6e31..9d34281 100644
--- a/sys/fs/unionfs/union_subr.c
+++ b/sys/fs/unionfs/union_subr.c
@@ -637,6 +637,7 @@ union_freevp(vp)
free(un->un_path, M_TEMP);
un->un_path = NULL;
}
+ lockdestroy(&un->un_lock);
FREE(vp->v_data, M_TEMP);
vp->v_data = 0;
diff --git a/sys/gnu/ext2fs/ext2_ihash.c b/sys/gnu/ext2fs/ext2_ihash.c
index 36176f0..9153d7a 100644
--- a/sys/gnu/ext2fs/ext2_ihash.c
+++ b/sys/gnu/ext2fs/ext2_ihash.c
@@ -42,6 +42,8 @@
#include <sys/malloc.h>
#include <sys/proc.h>
+#include <machine/mutex.h>
+
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ufs/ufs_extern.h>
@@ -108,7 +110,7 @@ loop:
for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&ufs_ihash_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
@@ -130,7 +132,7 @@ ufs_ihashins(ip)
struct ihashhead *ipp;
/* lock the inode, then put it on the appropriate hash list */
- lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct simplelock *)0, p);
+ lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
simple_lock(&ufs_ihash_slock);
ipp = INOHASH(ip->i_dev, ip->i_number);
diff --git a/sys/gnu/ext2fs/ext2_vfsops.c b/sys/gnu/ext2fs/ext2_vfsops.c
index fc57087..5cc49e0 100644
--- a/sys/gnu/ext2fs/ext2_vfsops.c
+++ b/sys/gnu/ext2fs/ext2_vfsops.c
@@ -57,6 +57,8 @@
#include <sys/malloc.h>
#include <sys/stat.h>
+#include <machine/mutex.h>
+
#include <ufs/ufs/extattr.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/ufsmount.h>
@@ -575,7 +577,7 @@ loop:
/*
* Step 5: invalidate all cached file data.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&mntvnode_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
goto loop;
@@ -922,14 +924,14 @@ loop:
*/
if (vp->v_mount != mp)
goto loop;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
nvp = vp->v_mntvnodes.le_next;
ip = VTOI(vp);
if (vp->v_type == VNON ||
((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
simple_unlock(&mntvnode_slock);
diff --git a/sys/gnu/fs/ext2fs/ext2_vfsops.c b/sys/gnu/fs/ext2fs/ext2_vfsops.c
index fc57087..5cc49e0 100644
--- a/sys/gnu/fs/ext2fs/ext2_vfsops.c
+++ b/sys/gnu/fs/ext2fs/ext2_vfsops.c
@@ -57,6 +57,8 @@
#include <sys/malloc.h>
#include <sys/stat.h>
+#include <machine/mutex.h>
+
#include <ufs/ufs/extattr.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/ufsmount.h>
@@ -575,7 +577,7 @@ loop:
/*
* Step 5: invalidate all cached file data.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&mntvnode_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
goto loop;
@@ -922,14 +924,14 @@ loop:
*/
if (vp->v_mount != mp)
goto loop;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
nvp = vp->v_mntvnodes.le_next;
ip = VTOI(vp);
if (vp->v_type == VNON ||
((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
simple_unlock(&mntvnode_slock);
diff --git a/sys/isofs/cd9660/cd9660_node.c b/sys/isofs/cd9660/cd9660_node.c
index 54bace3..500a1a6 100644
--- a/sys/isofs/cd9660/cd9660_node.c
+++ b/sys/isofs/cd9660/cd9660_node.c
@@ -49,6 +49,8 @@
#include <sys/malloc.h>
#include <sys/stat.h>
+#include <machine/mutex.h>
+
#include <isofs/cd9660/iso.h>
#include <isofs/cd9660/cd9660_node.h>
#include <isofs/cd9660/cd9660_mount.h>
@@ -108,7 +110,7 @@ loop:
for (ip = isohashtbl[INOHASH(dev, inum)]; ip; ip = ip->i_next) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&cd9660_ihash_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
@@ -138,7 +140,7 @@ cd9660_ihashins(ip)
*ipp = ip;
simple_unlock(&cd9660_ihash_slock);
- lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct simplelock *)0, p);
+ lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
}
/*
@@ -218,6 +220,7 @@ cd9660_reclaim(ap)
vrele(ip->i_devvp);
ip->i_devvp = 0;
}
+ lockdestroy(&ip->i_vnode->v_lock);
FREE(vp->v_data, M_ISOFSNODE);
vp->v_data = NULL;
return (0);
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index dac862a..2123b11 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -63,6 +63,7 @@
#include <machine/elf.h>
#include <machine/md_var.h>
+#include <machine/mutex.h>
#define OLD_EI_BRAND 8
@@ -477,9 +478,9 @@ exec_elf_imgact(struct image_params *imgp)
* a context switch. Better safe than sorry; I really don't want
* the file to change while it's being loaded.
*/
- simple_lock(&imgp->vp->v_interlock);
+ mtx_enter(&imgp->vp->v_interlock, MTX_DEF);
imgp->vp->v_flag |= VTEXT;
- simple_unlock(&imgp->vp->v_interlock);
+ mtx_exit(&imgp->vp->v_interlock, MTX_DEF);
if ((error = exec_extract_strings(imgp)) != 0)
goto fail;
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 1e10fbe..12c6870 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -46,6 +46,8 @@
#include <sys/lock.h>
#include <sys/systm.h>
+#include <machine/mutex.h>
+
/*
* Locking primitives implementation.
* Locks provide shared/exclusive sychronization.
@@ -111,11 +113,11 @@ apause(struct lock *lkp, int flags)
return 0;
#ifdef SMP
for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) {
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
for (i = LOCK_SAMPLE_WAIT; i > 0; i--)
if ((lkp->lk_flags & flags) == 0)
break;
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if ((lkp->lk_flags & flags) == 0)
return 0;
}
@@ -127,6 +129,10 @@ static int
acquire(struct lock *lkp, int extflags, int wanted) {
int s, error;
+ CTR3(KTR_LOCKMGR,
+ "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n",
+ lkp, extflags, wanted);
+
if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
return EBUSY;
}
@@ -141,9 +147,9 @@ acquire(struct lock *lkp, int extflags, int wanted) {
while ((lkp->lk_flags & wanted) != 0) {
lkp->lk_flags |= LK_WAIT_NONZERO;
lkp->lk_waitcount++;
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
error = tsleep(lkp, lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo);
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if (lkp->lk_waitcount == 1) {
lkp->lk_flags &= ~LK_WAIT_NONZERO;
lkp->lk_waitcount = 0;
@@ -178,7 +184,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
#endif
struct lock *lkp;
u_int flags;
- struct simplelock *interlkp;
+ struct mtx *interlkp;
struct proc *p;
#ifdef DEBUG_LOCKS
const char *name; /* Name of lock function */
@@ -190,15 +196,19 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
pid_t pid;
int extflags;
+ CTR5(KTR_LOCKMGR,
+ "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, "
+ "interlkp == %p, p == %p", lkp, lkp->lk_wmesg, flags, interlkp, p);
+
error = 0;
if (p == NULL)
pid = LK_KERNPROC;
else
pid = p->p_pid;
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if (flags & LK_INTERLOCK)
- simple_unlock(interlkp);
+ mtx_exit(interlkp, MTX_DEF);
extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
@@ -424,7 +434,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
break;
default:
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
panic("lockmgr: unknown locktype request %d",
flags & LK_TYPE_MASK);
/* NOTREACHED */
@@ -435,7 +445,7 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
lkp->lk_flags &= ~LK_WAITDRAIN;
wakeup((void *)&lkp->lk_flags);
}
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
return (error);
}
@@ -453,10 +463,10 @@ acquiredrain(struct lock *lkp, int extflags) {
while (lkp->lk_flags & LK_ALL) {
lkp->lk_flags |= LK_WAITDRAIN;
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
error = tsleep(&lkp->lk_flags, lkp->lk_prio,
lkp->lk_wmesg, lkp->lk_timo);
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if (error)
return error;
if (extflags & LK_SLEEPFAIL) {
@@ -477,9 +487,14 @@ lockinit(lkp, prio, wmesg, timo, flags)
int timo;
int flags;
{
+ CTR5(KTR_LOCKMGR, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
+ "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
- simple_lock_init(&lkp->lk_interlock);
- lkp->lk_flags = (flags & LK_EXTFLG_MASK);
+ if (lkp->lk_flags & LK_VALID)
+ lockdestroy(lkp);
+
+ mtx_init(&lkp->lk_interlock, "lockmgr interlock", MTX_DEF);
+ lkp->lk_flags = (flags & LK_EXTFLG_MASK) | LK_VALID;
lkp->lk_sharecount = 0;
lkp->lk_waitcount = 0;
lkp->lk_exclusivecount = 0;
@@ -490,6 +505,21 @@ lockinit(lkp, prio, wmesg, timo, flags)
}
/*
+ * Destroy a lock.
+ */
+void
+lockdestroy(lkp)
+ struct lock *lkp;
+{
+ CTR2(KTR_LOCKMGR, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
+ lkp, lkp->lk_wmesg);
+ if (lkp->lk_flags & LK_VALID) {
+ lkp->lk_flags &= ~LK_VALID;
+ mtx_destroy(&lkp->lk_interlock);
+ }
+}
+
+/*
* Determine the status of a lock.
*/
int
@@ -499,7 +529,7 @@ lockstatus(lkp, p)
{
int lock_type = 0;
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
if (lkp->lk_exclusivecount != 0) {
if (p == NULL || lkp->lk_lockholder == p->p_pid)
lock_type = LK_EXCLUSIVE;
@@ -507,7 +537,7 @@ lockstatus(lkp, p)
lock_type = LK_EXCLOTHER;
} else if (lkp->lk_sharecount != 0)
lock_type = LK_SHARED;
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
return (lock_type);
}
@@ -520,9 +550,9 @@ lockcount(lkp)
{
int count;
- simple_lock(&lkp->lk_interlock);
+ mtx_enter(&lkp->lk_interlock, MTX_DEF);
count = lkp->lk_exclusivecount + lkp->lk_sharecount;
- simple_unlock(&lkp->lk_interlock);
+ mtx_exit(&lkp->lk_interlock, MTX_DEF);
return (count);
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 11e9183..d9e77cc 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -61,7 +61,7 @@ struct bio_ops bioops; /* I/O operation notification */
struct buf *buf; /* buffer header pool */
struct swqueue bswlist;
-struct simplelock buftimelock; /* Interlock on setting prio and timo */
+struct mtx buftimelock; /* Interlock on setting prio and timo */
static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
vm_offset_t to);
@@ -290,7 +290,7 @@ bufinit(void)
TAILQ_INIT(&bswlist);
LIST_INIT(&invalhash);
- simple_lock_init(&buftimelock);
+ mtx_init(&buftimelock, "buftime lock", MTX_DEF);
for (i = 0; i <= bufhashmask; i++)
LIST_INIT(&bufhashtbl[i]);
diff --git a/sys/kern/vfs_conf.c b/sys/kern/vfs_conf.c
index 34d4afb..442ab60 100644
--- a/sys/kern/vfs_conf.c
+++ b/sys/kern/vfs_conf.c
@@ -230,9 +230,9 @@ done:
} else {
/* register with list of mounted filesystems */
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 01418f0..9bea916 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -53,6 +53,7 @@
#include <sys/poll.h>
#include <machine/limits.h>
+#include <machine/mutex.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@@ -449,7 +450,7 @@ vop_nolock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK)
- simple_unlock(&ap->a_vp->v_interlock);
+ mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
return (0);
#endif
}
@@ -471,7 +472,7 @@ vop_nounlock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK)
- simple_unlock(&ap->a_vp->v_interlock);
+ mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
return (0);
}
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index d7a9436..dc8f57d 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -145,7 +145,7 @@ SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
/* mounted fs */
struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
/* For any iteration/modification of mountlist */
-struct simplelock mountlist_slock;
+struct mtx mountlist_mtx;
/* For any iteration/modification of mnt_vnodelist */
struct simplelock mntvnode_slock;
/*
@@ -238,6 +238,7 @@ vntblinit()
{
desiredvnodes = maxproc + cnt.v_page_count / 4;
+ mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
simple_lock_init(&mntvnode_slock);
simple_lock_init(&mntid_slock);
simple_lock_init(&spechash_slock);
@@ -260,7 +261,7 @@ int
vfs_busy(mp, flags, interlkp, p)
struct mount *mp;
int flags;
- struct simplelock *interlkp;
+ struct mtx *interlkp;
struct proc *p;
{
int lkflags;
@@ -270,7 +271,7 @@ vfs_busy(mp, flags, interlkp, p)
return (ENOENT);
mp->mnt_kern_flag |= MNTK_MWAIT;
if (interlkp) {
- simple_unlock(interlkp);
+ mtx_exit(interlkp, MTX_DEF);
}
/*
* Since all busy locks are shared except the exclusive
@@ -280,7 +281,7 @@ vfs_busy(mp, flags, interlkp, p)
*/
tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
if (interlkp) {
- simple_lock(interlkp);
+ mtx_enter(interlkp, MTX_DEF);
}
return (ENOENT);
}
@@ -384,15 +385,15 @@ vfs_getvfs(fsid)
{
register struct mount *mp;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (mp);
}
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return ((struct mount *) 0);
}
@@ -554,7 +555,7 @@ getnewvnode(tag, mp, vops, vpp)
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
- !simple_lock_try(&vp->v_interlock)) {
+ !mtx_try_enter(&vp->v_interlock, MTX_DEF)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
continue;
@@ -564,7 +565,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
break;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
}
@@ -577,7 +578,7 @@ getnewvnode(tag, mp, vops, vpp)
if (vp->v_type != VBAD) {
vgonel(vp, p);
} else {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
vn_finished_write(vnmp);
@@ -605,7 +606,7 @@ getnewvnode(tag, mp, vops, vpp)
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
- simple_lock_init(&vp->v_interlock);
+ mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
vp->v_dd = vp;
cache_purge(vp);
LIST_INIT(&vp->v_cache_src);
@@ -777,12 +778,12 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Destroy the copy in the VM cache, too.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
panic("vinvalbuf: flush failed");
@@ -1423,10 +1424,10 @@ vget(vp, flags, p)
* the VXLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vget", 0);
return (ENOENT);
}
@@ -1445,15 +1446,15 @@ vget(vp, flags, p)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
return (error);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (0);
}
@@ -1463,9 +1464,9 @@ vget(vp, flags, p)
void
vref(struct vnode *vp)
{
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_usecount++;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
/*
@@ -1480,14 +1481,14 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
if (vp->v_usecount > 1) {
vp->v_usecount--;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return;
}
@@ -1509,7 +1510,7 @@ vrele(vp)
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
#endif
panic("vrele: negative ref cnt");
}
@@ -1527,7 +1528,7 @@ vput(vp)
KASSERT(vp != NULL, ("vput: null vp"));
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
@@ -1549,7 +1550,7 @@ vput(vp)
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
VOP_INACTIVE(vp, p);
} else {
@@ -1633,12 +1634,12 @@ loop:
if (vp == skipvp)
continue;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
/*
* Skip over a vnodes marked VSYSTEM.
*/
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
/*
@@ -1647,7 +1648,7 @@ loop:
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
@@ -1683,7 +1684,7 @@ loop:
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
busy++;
}
simple_unlock(&mntvnode_slock);
@@ -1767,7 +1768,7 @@ vclean(vp, flags, p)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@@ -1777,11 +1778,15 @@ vclean(vp, flags, p)
#endif
vfree(vp);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
cache_purge(vp);
- vp->v_vnlock = NULL;
+ if (vp->v_vnlock) {
+ lockdestroy(vp->v_vnlock);
+ vp->v_vnlock = NULL;
+ }
+ lockdestroy(&vp->v_lock);
if (VSHOULDFREE(vp))
vfree(vp);
@@ -1822,7 +1827,7 @@ vop_revoke(ap)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
return (0);
}
@@ -1849,7 +1854,7 @@ vrecycle(vp, inter_lkp, p)
struct proc *p;
{
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount == 0) {
if (inter_lkp) {
simple_unlock(inter_lkp);
@@ -1857,7 +1862,7 @@ vrecycle(vp, inter_lkp, p)
vgonel(vp, p);
return (1);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (0);
}
@@ -1871,7 +1876,7 @@ vgone(vp)
{
struct proc *p = curproc; /* XXX */
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vgonel(vp, p);
}
@@ -1891,7 +1896,7 @@ vgonel(vp, p)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vgone", 0);
return;
}
@@ -1900,7 +1905,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
/*
* Delete from old mount point vnode list, if on one.
@@ -1943,7 +1948,7 @@ vgonel(vp, p)
}
vp->v_type = VBAD;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
/*
@@ -2064,9 +2069,9 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
struct vnode *vp;
printf("Locked vnodes\n");
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2074,11 +2079,11 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
if (VOP_ISLOCKED(vp, NULL))
vprint((char *)0, vp);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
}
#endif
@@ -2183,9 +2188,9 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
return (SYSCTL_OUT(req, 0,
(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2211,11 +2216,11 @@ again:
simple_lock(&mntvnode_slock);
}
simple_unlock(&mntvnode_slock);
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
@@ -2574,7 +2579,7 @@ loop:
continue;
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
@@ -2586,7 +2591,7 @@ loop:
vput(vp);
}
} else {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
}
if (anyio && (--tries > 0))
@@ -2838,14 +2843,14 @@ sync_fsync(ap)
* Walk the list of vnodes pushing all that are dirty and
* not already on the sync list.
*/
- simple_lock(&mountlist_slock);
- if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) {
- simple_unlock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
+ if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
vfs_unbusy(mp, p);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
asyncflag = mp->mnt_flag & MNT_ASYNC;
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 80cdc6d..bedad1b 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -65,6 +65,7 @@
#include <sys/extattr.h>
#include <machine/limits.h>
+#include <machine/mutex.h>
#include <miscfs/union/union.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -174,16 +175,16 @@ mount(p, uap)
vput(vp);
return (EBUSY);
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vfs_unbusy(mp, p);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
mp->mnt_flag |= SCARG(uap, flags) &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, p);
@@ -241,15 +242,15 @@ mount(p, uap)
return (ENODEV);
}
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
/*
* Allocate and initialize the filesystem.
@@ -309,9 +310,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, p);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vrele(vp);
return (error);
}
@@ -321,13 +322,13 @@ update:
*/
cache_purge(vp);
if (!error) {
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
vp->v_mountedhere = mp;
- simple_unlock(&vp->v_interlock);
- simple_lock(&mountlist_slock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
checkdirs(vp);
VOP_UNLOCK(vp, 0, p);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
@@ -336,9 +337,9 @@ update:
if ((error = VFS_START(mp, 0, p)) != 0)
vrele(vp);
} else {
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free((caddr_t)mp, M_MOUNT);
@@ -461,9 +462,9 @@ dounmount(mp, flags, p)
int error;
int async_flag;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
- lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_slock, p);
+ lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
vn_start_write(NULL, &mp, V_WAIT);
if (mp->mnt_flag & MNT_EXPUBLIC)
@@ -481,14 +482,14 @@ dounmount(mp, flags, p)
error = VFS_UNMOUNT(mp, flags, p);
}
vn_finished_write(mp);
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
if (error) {
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
(void) vfs_allocate_syncvnode(mp);
mp->mnt_kern_flag &= ~MNTK_UNMOUNT;
mp->mnt_flag |= async_flag;
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
- &mountlist_slock, p);
+ &mountlist_mtx, p);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
return (error);
@@ -501,7 +502,8 @@ dounmount(mp, flags, p)
mp->mnt_vfc->vfc_refcount--;
if (!LIST_EMPTY(&mp->mnt_vnodelist))
panic("unmount: dangling vnode");
- lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p);
+ lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, p);
+ lockdestroy(&mp->mnt_lock);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
free((caddr_t)mp, M_MOUNT);
@@ -531,9 +533,9 @@ sync(p, uap)
struct mount *mp, *nmp;
int asyncflag;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -547,11 +549,11 @@ sync(p, uap)
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
#if 0
/*
* XXX don't call vfs_bufstats() yet because that routine
@@ -723,9 +725,9 @@ getfsstat(p, uap)
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
sfsp = (caddr_t)SCARG(uap, buf);
count = 0;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -739,7 +741,7 @@ getfsstat(p, uap)
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
(error = VFS_STATFS(mp, sp, p))) {
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
continue;
@@ -753,11 +755,11 @@ getfsstat(p, uap)
sfsp += sizeof(*sp);
}
count++;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
if (sfsp && count > maxcount)
p->p_retval[0] = maxcount;
else
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index 34d4afb..442ab60 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -230,9 +230,9 @@ done:
} else {
/* register with list of mounted filesystems */
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index d7a9436..dc8f57d 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -145,7 +145,7 @@ SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
/* mounted fs */
struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
/* For any iteration/modification of mountlist */
-struct simplelock mountlist_slock;
+struct mtx mountlist_mtx;
/* For any iteration/modification of mnt_vnodelist */
struct simplelock mntvnode_slock;
/*
@@ -238,6 +238,7 @@ vntblinit()
{
desiredvnodes = maxproc + cnt.v_page_count / 4;
+ mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
simple_lock_init(&mntvnode_slock);
simple_lock_init(&mntid_slock);
simple_lock_init(&spechash_slock);
@@ -260,7 +261,7 @@ int
vfs_busy(mp, flags, interlkp, p)
struct mount *mp;
int flags;
- struct simplelock *interlkp;
+ struct mtx *interlkp;
struct proc *p;
{
int lkflags;
@@ -270,7 +271,7 @@ vfs_busy(mp, flags, interlkp, p)
return (ENOENT);
mp->mnt_kern_flag |= MNTK_MWAIT;
if (interlkp) {
- simple_unlock(interlkp);
+ mtx_exit(interlkp, MTX_DEF);
}
/*
* Since all busy locks are shared except the exclusive
@@ -280,7 +281,7 @@ vfs_busy(mp, flags, interlkp, p)
*/
tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
if (interlkp) {
- simple_lock(interlkp);
+ mtx_enter(interlkp, MTX_DEF);
}
return (ENOENT);
}
@@ -384,15 +385,15 @@ vfs_getvfs(fsid)
{
register struct mount *mp;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (mp);
}
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return ((struct mount *) 0);
}
@@ -554,7 +555,7 @@ getnewvnode(tag, mp, vops, vpp)
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
(VOP_GETVOBJECT(vp, &object) == 0 &&
(object->resident_page_count || object->ref_count)) ||
- !simple_lock_try(&vp->v_interlock)) {
+ !mtx_try_enter(&vp->v_interlock, MTX_DEF)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
continue;
@@ -564,7 +565,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
break;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
}
@@ -577,7 +578,7 @@ getnewvnode(tag, mp, vops, vpp)
if (vp->v_type != VBAD) {
vgonel(vp, p);
} else {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
vn_finished_write(vnmp);
@@ -605,7 +606,7 @@ getnewvnode(tag, mp, vops, vpp)
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
- simple_lock_init(&vp->v_interlock);
+ mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
vp->v_dd = vp;
cache_purge(vp);
LIST_INIT(&vp->v_cache_src);
@@ -777,12 +778,12 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Destroy the copy in the VM cache, too.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
panic("vinvalbuf: flush failed");
@@ -1423,10 +1424,10 @@ vget(vp, flags, p)
* the VXLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vget", 0);
return (ENOENT);
}
@@ -1445,15 +1446,15 @@ vget(vp, flags, p)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
return (error);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (0);
}
@@ -1463,9 +1464,9 @@ vget(vp, flags, p)
void
vref(struct vnode *vp)
{
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_usecount++;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
/*
@@ -1480,14 +1481,14 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
if (vp->v_usecount > 1) {
vp->v_usecount--;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return;
}
@@ -1509,7 +1510,7 @@ vrele(vp)
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
#endif
panic("vrele: negative ref cnt");
}
@@ -1527,7 +1528,7 @@ vput(vp)
KASSERT(vp != NULL, ("vput: null vp"));
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
@@ -1549,7 +1550,7 @@ vput(vp)
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
VOP_INACTIVE(vp, p);
} else {
@@ -1633,12 +1634,12 @@ loop:
if (vp == skipvp)
continue;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
/*
* Skip over a vnodes marked VSYSTEM.
*/
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
/*
@@ -1647,7 +1648,7 @@ loop:
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
@@ -1683,7 +1684,7 @@ loop:
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
busy++;
}
simple_unlock(&mntvnode_slock);
@@ -1767,7 +1768,7 @@ vclean(vp, flags, p)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@@ -1777,11 +1778,15 @@ vclean(vp, flags, p)
#endif
vfree(vp);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
cache_purge(vp);
- vp->v_vnlock = NULL;
+ if (vp->v_vnlock) {
+ lockdestroy(vp->v_vnlock);
+ vp->v_vnlock = NULL;
+ }
+ lockdestroy(&vp->v_lock);
if (VSHOULDFREE(vp))
vfree(vp);
@@ -1822,7 +1827,7 @@ vop_revoke(ap)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
return (0);
}
@@ -1849,7 +1854,7 @@ vrecycle(vp, inter_lkp, p)
struct proc *p;
{
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount == 0) {
if (inter_lkp) {
simple_unlock(inter_lkp);
@@ -1857,7 +1862,7 @@ vrecycle(vp, inter_lkp, p)
vgonel(vp, p);
return (1);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (0);
}
@@ -1871,7 +1876,7 @@ vgone(vp)
{
struct proc *p = curproc; /* XXX */
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vgonel(vp, p);
}
@@ -1891,7 +1896,7 @@ vgonel(vp, p)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vgone", 0);
return;
}
@@ -1900,7 +1905,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
/*
* Delete from old mount point vnode list, if on one.
@@ -1943,7 +1948,7 @@ vgonel(vp, p)
}
vp->v_type = VBAD;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
/*
@@ -2064,9 +2069,9 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
struct vnode *vp;
printf("Locked vnodes\n");
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2074,11 +2079,11 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
if (VOP_ISLOCKED(vp, NULL))
vprint((char *)0, vp);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
}
#endif
@@ -2183,9 +2188,9 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
return (SYSCTL_OUT(req, 0,
(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2211,11 +2216,11 @@ again:
simple_lock(&mntvnode_slock);
}
simple_unlock(&mntvnode_slock);
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
@@ -2574,7 +2579,7 @@ loop:
continue;
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
@@ -2586,7 +2591,7 @@ loop:
vput(vp);
}
} else {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
}
}
if (anyio && (--tries > 0))
@@ -2838,14 +2843,14 @@ sync_fsync(ap)
* Walk the list of vnodes pushing all that are dirty and
* not already on the sync list.
*/
- simple_lock(&mountlist_slock);
- if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) {
- simple_unlock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
+ if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
vfs_unbusy(mp, p);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
return (0);
}
asyncflag = mp->mnt_flag & MNT_ASYNC;
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 80cdc6d..bedad1b 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -65,6 +65,7 @@
#include <sys/extattr.h>
#include <machine/limits.h>
+#include <machine/mutex.h>
#include <miscfs/union/union.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -174,16 +175,16 @@ mount(p, uap)
vput(vp);
return (EBUSY);
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vfs_unbusy(mp, p);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
mp->mnt_flag |= SCARG(uap, flags) &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, p);
@@ -241,15 +242,15 @@ mount(p, uap)
return (ENODEV);
}
}
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
/*
* Allocate and initialize the filesystem.
@@ -309,9 +310,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, p);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
vrele(vp);
return (error);
}
@@ -321,13 +322,13 @@ update:
*/
cache_purge(vp);
if (!error) {
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
vp->v_mountedhere = mp;
- simple_unlock(&vp->v_interlock);
- simple_lock(&mountlist_slock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
checkdirs(vp);
VOP_UNLOCK(vp, 0, p);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
@@ -336,9 +337,9 @@ update:
if ((error = VFS_START(mp, 0, p)) != 0)
vrele(vp);
} else {
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
vp->v_flag &= ~VMOUNT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free((caddr_t)mp, M_MOUNT);
@@ -461,9 +462,9 @@ dounmount(mp, flags, p)
int error;
int async_flag;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
- lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_slock, p);
+ lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
vn_start_write(NULL, &mp, V_WAIT);
if (mp->mnt_flag & MNT_EXPUBLIC)
@@ -481,14 +482,14 @@ dounmount(mp, flags, p)
error = VFS_UNMOUNT(mp, flags, p);
}
vn_finished_write(mp);
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
if (error) {
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
(void) vfs_allocate_syncvnode(mp);
mp->mnt_kern_flag &= ~MNTK_UNMOUNT;
mp->mnt_flag |= async_flag;
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
- &mountlist_slock, p);
+ &mountlist_mtx, p);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
return (error);
@@ -501,7 +502,8 @@ dounmount(mp, flags, p)
mp->mnt_vfc->vfc_refcount--;
if (!LIST_EMPTY(&mp->mnt_vnodelist))
panic("unmount: dangling vnode");
- lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p);
+ lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, p);
+ lockdestroy(&mp->mnt_lock);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
free((caddr_t)mp, M_MOUNT);
@@ -531,9 +533,9 @@ sync(p, uap)
struct mount *mp, *nmp;
int asyncflag;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -547,11 +549,11 @@ sync(p, uap)
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
#if 0
/*
* XXX don't call vfs_bufstats() yet because that routine
@@ -723,9 +725,9 @@ getfsstat(p, uap)
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
sfsp = (caddr_t)SCARG(uap, buf);
count = 0;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -739,7 +741,7 @@ getfsstat(p, uap)
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
(error = VFS_STATFS(mp, sp, p))) {
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
continue;
@@ -753,11 +755,11 @@ getfsstat(p, uap)
sfsp += sizeof(*sp);
}
count++;
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
if (sfsp && count > maxcount)
p->p_retval[0] = maxcount;
else
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 427209f..9287443 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -54,6 +54,8 @@
#include <sys/ttycom.h>
#include <sys/conf.h>
+#include <machine/mutex.h>
+
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
@@ -640,10 +642,10 @@ debug_vn_lock(vp, flags, p, filename, line)
do {
if ((flags & LK_INTERLOCK) == 0)
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
error = ENOENT;
} else {
diff --git a/sys/miscfs/deadfs/dead_vnops.c b/sys/miscfs/deadfs/dead_vnops.c
index f990733..ab6678f 100644
--- a/sys/miscfs/deadfs/dead_vnops.c
+++ b/sys/miscfs/deadfs/dead_vnops.c
@@ -41,6 +41,8 @@
#include <sys/vnode.h>
#include <sys/poll.h>
+#include <machine/mutex.h>
+
static int chkvnlock __P((struct vnode *));
/*
* Prototypes for dead operations on vnodes.
@@ -210,7 +212,7 @@ dead_lock(ap)
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
ap->a_flags &= ~LK_INTERLOCK;
}
if (!chkvnlock(vp))
diff --git a/sys/miscfs/nullfs/null_subr.c b/sys/miscfs/nullfs/null_subr.c
index efb1357..b5df78c 100644
--- a/sys/miscfs/nullfs/null_subr.c
+++ b/sys/miscfs/nullfs/null_subr.c
@@ -92,8 +92,10 @@ nullfs_uninit(vfsp)
struct vfsconf *vfsp;
{
- if (null_node_hashtbl)
+ if (null_node_hashtbl) {
+ lockdestroy(&null_hashlock);
free(null_node_hashtbl, M_NULLFSHASH);
+ }
return (0);
}
diff --git a/sys/miscfs/union/union_subr.c b/sys/miscfs/union/union_subr.c
index d1d6e31..9d34281 100644
--- a/sys/miscfs/union/union_subr.c
+++ b/sys/miscfs/union/union_subr.c
@@ -637,6 +637,7 @@ union_freevp(vp)
free(un->un_path, M_TEMP);
un->un_path = NULL;
}
+ lockdestroy(&un->un_lock);
FREE(vp->v_data, M_TEMP);
vp->v_data = 0;
diff --git a/sys/msdosfs/msdosfs_denode.c b/sys/msdosfs/msdosfs_denode.c
index 4e237bf..0122f6b 100644
--- a/sys/msdosfs/msdosfs_denode.c
+++ b/sys/msdosfs/msdosfs_denode.c
@@ -61,6 +61,8 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
+#include <machine/mutex.h>
+
#include <msdosfs/bpb.h>
#include <msdosfs/msdosfsmount.h>
#include <msdosfs/direntry.h>
@@ -138,7 +140,7 @@ loop:
&& dev == dep->de_dev
&& dep->de_refcnt != 0) {
vp = DETOV(dep);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&dehash_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
@@ -278,7 +280,7 @@ deget(pmp, dirclust, diroffset, depp)
* of at the start of msdosfs_hashins() so that reinsert() can
* call msdosfs_hashins() with a locked denode.
*/
- if (lockmgr(&ldep->de_lock, LK_EXCLUSIVE, (struct simplelock *)0, p))
+ if (lockmgr(&ldep->de_lock, LK_EXCLUSIVE, (struct mtx *)0, p))
panic("deget: unexpected lock failure");
/*
@@ -660,6 +662,7 @@ msdosfs_reclaim(ap)
#if 0 /* XXX */
dep->de_flag = 0;
#endif
+ lockdestroy(&dep->de_lock);
FREE(dep, M_MSDOSFSNODE);
vp->v_data = NULL;
diff --git a/sys/msdosfs/msdosfs_vfsops.c b/sys/msdosfs/msdosfs_vfsops.c
index 33b5cd7..8af88d4 100644
--- a/sys/msdosfs/msdosfs_vfsops.c
+++ b/sys/msdosfs/msdosfs_vfsops.c
@@ -62,6 +62,8 @@
#include <sys/malloc.h>
#include <sys/stat.h> /* defines ALLPERMS */
+#include <machine/mutex.h>
+
#include <msdosfs/bpb.h>
#include <msdosfs/bootsect.h>
#include <msdosfs/direntry.h>
@@ -873,14 +875,14 @@ loop:
if (vp->v_mount != mp)
goto loop;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
nvp = vp->v_mntvnodes.le_next;
dep = VTODE(vp);
if (vp->v_type == VNON ||
((dep->de_flag &
(DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
simple_unlock(&mntvnode_slock);
diff --git a/sys/msdosfs/msdosfs_vnops.c b/sys/msdosfs/msdosfs_vnops.c
index f8426de..37a57ea 100644
--- a/sys/msdosfs/msdosfs_vnops.c
+++ b/sys/msdosfs/msdosfs_vnops.c
@@ -68,6 +68,8 @@
#include <vm/vm_extern.h>
#include <vm/vnode_pager.h>
+#include <machine/mutex.h>
+
#include <msdosfs/bpb.h>
#include <msdosfs/direntry.h>
#include <msdosfs/denode.h>
@@ -231,12 +233,12 @@ msdosfs_close(ap)
struct denode *dep = VTODE(vp);
struct timespec ts;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount > 1) {
getnanotime(&ts);
DETIMES(dep, &ts, &ts, &ts);
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return 0;
}
diff --git a/sys/netncp/ncp_conn.c b/sys/netncp/ncp_conn.c
index 3cfc449..df86f99 100644
--- a/sys/netncp/ncp_conn.c
+++ b/sys/netncp/ncp_conn.c
@@ -79,6 +79,13 @@ ncp_conn_init(void) {
}
int
+ncp_conn_destroy(void) {
+ lockdestroy(&listlock);
+ lockdestroy(&lhlock);
+ return 0;
+}
+
+int
ncp_conn_locklist(int flags, struct proc *p){
return lockmgr(&listlock, flags | LK_CANRECURSE, 0, p);
}
@@ -231,6 +238,7 @@ ncp_conn_free(struct ncp_conn *ncp) {
* if signal is raised - how I do react ?
*/
lockmgr(&ncp->nc_lock, LK_DRAIN, 0, ncp->procp);
+ lockdestroy(&ncp->nc_lock);
while (ncp->nc_lwant) {
printf("lwant = %d\n", ncp->nc_lwant);
tsleep(&ncp->nc_lwant, PZERO,"ncpdr",2*hz);
diff --git a/sys/netncp/ncp_conn.h b/sys/netncp/ncp_conn.h
index 850e5e7..0171421 100644
--- a/sys/netncp/ncp_conn.h
+++ b/sys/netncp/ncp_conn.h
@@ -204,6 +204,7 @@ struct ncp_conn {
#define ncp_conn_invalidate(conn) {conn->flags |= NCPFL_INVALID;}
int ncp_conn_init(void);
+int ncp_conn_destroy(void);
int ncp_conn_alloc(struct proc *p,struct ucred *cred, struct ncp_conn **connid);
int ncp_conn_free(struct ncp_conn *conn);
int ncp_conn_access(struct ncp_conn *conn,struct ucred *cred,mode_t mode);
diff --git a/sys/netncp/ncp_subr.c b/sys/netncp/ncp_subr.c
index a14e3f0..4601690 100644
--- a/sys/netncp/ncp_subr.c
+++ b/sys/netncp/ncp_subr.c
@@ -124,6 +124,7 @@ ncp_done(void) {
ncp_conn_unlock(ncp,p);
}
ncp_conn_unlocklist(p);
+ ncp_conn_destroy();
}
diff --git a/sys/nfs/nfs_node.c b/sys/nfs/nfs_node.c
index 1de8739..3299627 100644
--- a/sys/nfs/nfs_node.c
+++ b/sys/nfs/nfs_node.c
@@ -300,6 +300,8 @@ nfs_reclaim(ap)
FREE((caddr_t)np->n_fhp, M_NFSBIGFH);
}
+ lockdestroy(&np->n_rslock);
+
cache_purge(vp);
zfree(nfsnode_zone, vp->v_data);
vp->v_data = (void *)0;
diff --git a/sys/nfs/nfs_nqlease.c b/sys/nfs/nfs_nqlease.c
index fea4a99..3dccaef 100644
--- a/sys/nfs/nfs_nqlease.c
+++ b/sys/nfs/nfs_nqlease.c
@@ -65,6 +65,8 @@
#include <vm/vm_zone.h>
+#include <machine/mutex.h>
+
#include <netinet/in.h>
#include <nfs/rpcv2.h>
#include <nfs/nfsproto.h>
@@ -1199,9 +1201,9 @@ nqnfs_lease_updatetime(deltat)
* Search the mount list for all nqnfs mounts and do their timer
* queues.
*/
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nxtmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
nxtmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -1215,11 +1217,11 @@ nqnfs_lease_updatetime(deltat)
}
}
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
nxtmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
}
#ifndef NFS_NOSERVER
diff --git a/sys/nfsclient/nfs_node.c b/sys/nfsclient/nfs_node.c
index 1de8739..3299627 100644
--- a/sys/nfsclient/nfs_node.c
+++ b/sys/nfsclient/nfs_node.c
@@ -300,6 +300,8 @@ nfs_reclaim(ap)
FREE((caddr_t)np->n_fhp, M_NFSBIGFH);
}
+ lockdestroy(&np->n_rslock);
+
cache_purge(vp);
zfree(nfsnode_zone, vp->v_data);
vp->v_data = (void *)0;
diff --git a/sys/ntfs/ntfs_ihash.c b/sys/ntfs/ntfs_ihash.c
index 0deecff..cd2300d 100644
--- a/sys/ntfs/ntfs_ihash.c
+++ b/sys/ntfs/ntfs_ihash.c
@@ -75,6 +75,15 @@ ntfs_nthashinit()
}
/*
+ * Destroy inode hash table.
+ */
+void
+ntfs_nthashdestroy(void)
+{
+ lockdestroy(&ntfs_hashlock);
+}
+
+/*
* Use the device/inum pair to find the incore inode, and return a pointer
* to it. If it is in core, return it, even if it is locked.
*/
diff --git a/sys/ntfs/ntfs_ihash.h b/sys/ntfs/ntfs_ihash.h
index 7b7143f..a3f166f 100644
--- a/sys/ntfs/ntfs_ihash.h
+++ b/sys/ntfs/ntfs_ihash.h
@@ -30,6 +30,7 @@
extern struct lock ntfs_hashlock;
void ntfs_nthashinit __P((void));
+void ntfs_nthashdestroy __P((void));
struct ntnode *ntfs_nthashlookup __P((dev_t, ino_t));
struct ntnode *ntfs_nthashget __P((dev_t, ino_t));
void ntfs_nthashins __P((struct ntnode *));
diff --git a/sys/ntfs/ntfs_inode.h b/sys/ntfs/ntfs_inode.h
index a86d5f7..a865276 100644
--- a/sys/ntfs/ntfs_inode.h
+++ b/sys/ntfs/ntfs_inode.h
@@ -69,7 +69,7 @@ struct ntnode {
/* locking */
struct lock i_lock;
- struct simplelock i_interlock;
+ struct mtx i_interlock;
int i_usecount;
LIST_HEAD(,fnode) i_fnlist;
diff --git a/sys/ntfs/ntfs_subr.c b/sys/ntfs/ntfs_subr.c
index e8f5588..f060e5b 100644
--- a/sys/ntfs/ntfs_subr.c
+++ b/sys/ntfs/ntfs_subr.c
@@ -361,7 +361,7 @@ ntfs_ntget(ip)
dprintf(("ntfs_ntget: get ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- simple_lock(&ip->i_interlock);
+ mtx_enter(&ip->i_interlock, MTX_DEF);
ip->i_usecount++;
LOCKMGR(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock);
@@ -410,7 +410,7 @@ ntfs_ntlookup(
/* init lock and lock the newborn ntnode */
lockinit(&ip->i_lock, PINOD, "ntnode", 0, LK_EXCLUSIVE);
- simple_lock_init(&ip->i_interlock);
+ mtx_init(&ip->i_interlock, "ntnode interlock", MTX_DEF);
ntfs_ntget(ip);
ntfs_nthashins(ip);
@@ -440,7 +440,7 @@ ntfs_ntput(ip)
dprintf(("ntfs_ntput: rele ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- simple_lock(&ip->i_interlock);
+ mtx_enter(&ip->i_interlock, MTX_DEF);
ip->i_usecount--;
#ifdef DIAGNOSTIC
@@ -464,6 +464,10 @@ ntfs_ntput(ip)
LIST_REMOVE(vap,va_list);
ntfs_freentvattr(vap);
}
+ mtx_exit(&ip->i_interlock, MTX_DEF);
+ mtx_destroy(&ip->i_interlock);
+ lockdestroy(&ip->i_lock);
+
FREE(ip, M_NTFSNTNODE);
} else {
LOCKMGR(&ip->i_lock, LK_RELEASE|LK_INTERLOCK, &ip->i_interlock);
@@ -477,9 +481,9 @@ void
ntfs_ntref(ip)
struct ntnode *ip;
{
- simple_lock(&ip->i_interlock);
+ mtx_enter(&ip->i_interlock, MTX_DEF);
ip->i_usecount++;
- simple_unlock(&ip->i_interlock);
+ mtx_exit(&ip->i_interlock, MTX_DEF);
dprintf(("ntfs_ntref: ino %d, usecount: %d\n",
ip->i_number, ip->i_usecount));
@@ -496,13 +500,13 @@ ntfs_ntrele(ip)
dprintf(("ntfs_ntrele: rele ntnode %d: %p, usecount: %d\n",
ip->i_number, ip, ip->i_usecount));
- simple_lock(&ip->i_interlock);
+ mtx_enter(&ip->i_interlock, MTX_DEF);
ip->i_usecount--;
if (ip->i_usecount < 0)
panic("ntfs_ntrele: ino: %d usecount: %d \n",
ip->i_number,ip->i_usecount);
- simple_unlock(&ip->i_interlock);
+ mtx_exit(&ip->i_interlock, MTX_DEF);
}
/*
@@ -771,6 +775,9 @@ ntfs_frele(
FREE(fp->f_attrname, M_TEMP);
if (fp->f_dirblbuf)
FREE(fp->f_dirblbuf, M_NTFSDIR);
+#ifdef __FreeBSD__
+ lockdestroy(&fp->f_lock);
+#endif
FREE(fp, M_NTFSFNODE);
ntfs_ntrele(ip);
}
@@ -1915,6 +1922,13 @@ ntfs_toupper_init()
ntfs_toupper_usecount = 0;
}
+void
+ntfs_toupper_destroy(void)
+{
+
+ lockdestroy(&ntfs_toupper_lock);
+}
+
/*
* if the ntfs_toupper_tab[] is filled already, just raise use count;
* otherwise read the data from the filesystem we are currently mounting
diff --git a/sys/ntfs/ntfs_subr.h b/sys/ntfs/ntfs_subr.h
index a0cda50..8f1480a 100644
--- a/sys/ntfs/ntfs_subr.h
+++ b/sys/ntfs/ntfs_subr.h
@@ -103,6 +103,7 @@ int ntfs_loadntnode __P(( struct ntfsmount *, struct ntnode * ));
int ntfs_writentvattr_plain __P((struct ntfsmount *, struct ntnode *, struct ntvattr *, off_t, size_t, void *, size_t *, struct uio *));
int ntfs_writeattr_plain __P((struct ntfsmount *, struct ntnode *, u_int32_t, char *, off_t, size_t, void *, size_t *, struct uio *));
void ntfs_toupper_init __P((void));
+void ntfs_toupper_destroy __P((void));
int ntfs_toupper_use __P((struct mount *, struct ntfsmount *));
void ntfs_toupper_unuse __P((void));
int ntfs_fget __P((struct ntfsmount *, struct ntnode *, int, char *, struct fnode **));
diff --git a/sys/ntfs/ntfs_vfsops.c b/sys/ntfs/ntfs_vfsops.c
index 77ac0d8..1b0b97a 100644
--- a/sys/ntfs/ntfs_vfsops.c
+++ b/sys/ntfs/ntfs_vfsops.c
@@ -196,9 +196,9 @@ ntfs_mountroot()
return (error);
}
- simple_lock(&mountlist_slock);
+ mtx_enter(&mountlist_mtx, MTX_DEF);
CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
- simple_unlock(&mountlist_slock);
+ mtx_exit(&mountlist_mtx, MTX_DEF);
(void)ntfs_statfs(mp, &mp->mnt_stat, p);
vfs_unbusy(mp);
return (0);
@@ -222,6 +222,15 @@ ntfs_init (
return 0;
}
+static int
+ntfs_uninit (
+ struct vfsconf *vcp )
+{
+ ntfs_toupper_destroy();
+ ntfs_nthashdestroy();
+ return 0;
+}
+
#endif /* NetBSD */
static int
@@ -1006,7 +1015,7 @@ static struct vfsops ntfs_vfsops = {
ntfs_checkexp,
ntfs_vptofh,
ntfs_init,
- vfs_stduninit,
+ ntfs_uninit,
vfs_stdextattrctl,
};
VFS_SET(ntfs_vfsops, ntfs, 0);
diff --git a/sys/nwfs/nwfs_node.c b/sys/nwfs/nwfs_node.c
index 2d34600..03d3e86 100644
--- a/sys/nwfs/nwfs_node.c
+++ b/sys/nwfs/nwfs_node.c
@@ -83,6 +83,7 @@ nwfs_hash_init(void) {
void
nwfs_hash_free(void) {
+ lockdestroy(&nwhashlock);
free(nwhashtbl, M_NWFSHASH);
}
diff --git a/sys/nwfs/nwfs_vnops.c b/sys/nwfs/nwfs_vnops.c
index e309785..e7c7fa6 100644
--- a/sys/nwfs/nwfs_vnops.c
+++ b/sys/nwfs/nwfs_vnops.c
@@ -46,6 +46,8 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
+#include <machine/mutex.h>
+
#include <netncp/ncp.h>
#include <netncp/ncp_conn.h>
#include <netncp/ncp_subr.h>
@@ -255,24 +257,24 @@ nwfs_close(ap)
if (vp->v_type == VDIR) return 0; /* nothing to do now */
error = 0;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (np->opened == 0) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return 0;
}
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
error = nwfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (np->opened == 0) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return 0;
}
if (--np->opened == 0) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
error = ncp_close_file(NWFSTOCONN(VTONWFS(vp)), &np->n_fh,
ap->a_p, ap->a_cred);
} else
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
np->n_atime = 0;
return (error);
}
diff --git a/sys/pci/agp.c b/sys/pci/agp.c
index 4848f01..5bf48a1 100644
--- a/sys/pci/agp.c
+++ b/sys/pci/agp.c
@@ -271,6 +271,7 @@ agp_generic_detach(device_t dev)
struct agp_softc *sc = device_get_softc(dev);
bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
lockmgr(&sc->as_lock, LK_DRAIN, 0, curproc);
+ lockdestroy(&sc->as_lock);
destroy_dev(sc->as_devnode);
agp_flush_cache();
return 0;
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index d469a04..94ae4c1 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -229,10 +229,12 @@ struct buf {
/*
* Buffer locking
*/
-extern struct simplelock buftimelock; /* Interlock on setting prio and timo */
+extern struct mtx buftimelock; /* Interlock on setting prio and timo */
extern char *buf_wmesg; /* Default buffer lock message */
#define BUF_WMESG "bufwait"
#include <sys/proc.h> /* XXX for curproc */
+#include <machine/mutex.h>
+
/*
* Initialize a lock.
*/
@@ -249,7 +251,7 @@ BUF_LOCK(struct buf *bp, int locktype)
int s, ret;
s = splbio();
- simple_lock(&buftimelock);
+ mtx_enter(&buftimelock, MTX_DEF);
locktype |= LK_INTERLOCK;
bp->b_lock.lk_wmesg = buf_wmesg;
bp->b_lock.lk_prio = PRIBIO + 4;
@@ -268,7 +270,7 @@ BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int catch, int timo)
int s, ret;
s = splbio();
- simple_lock(&buftimelock);
+ mtx_enter(&buftimelock, MTX_DEF);
locktype |= LK_INTERLOCK;
bp->b_lock.lk_wmesg = wmesg;
bp->b_lock.lk_prio = (PRIBIO + 4) | catch;
@@ -296,8 +298,12 @@ BUF_UNLOCK(struct buf *bp)
* Free a buffer lock.
*/
#define BUF_LOCKFREE(bp) \
+do { \
if (BUF_REFCNT(bp) > 0) \
- panic("free locked buf")
+ panic("free locked buf"); \
+ lockdestroy(&(bp)->b_lock); \
+} while (0)
+
/*
* When initiating asynchronous I/O, change ownership of the lock to the
* kernel. Once done, the lock may legally released by biodone. The
@@ -423,6 +429,7 @@ buf_deallocate(struct buf *bp)
{
if (bioops.io_deallocate)
(*bioops.io_deallocate)(bp);
+ BUF_LOCKFREE(bp);
}
static __inline void
diff --git a/sys/sys/ktr.h b/sys/sys/ktr.h
index d946772..7f1f770 100644
--- a/sys/sys/ktr.h
+++ b/sys/sys/ktr.h
@@ -146,6 +146,7 @@ extern struct ktr_entry ktr_buf[];
#endif
#endif
#ifndef _TR
+#include <sys/systm.h>
#define _TR() \
struct ktr_entry *_ktrptr; \
int _ktr_newidx, _ktr_saveidx; \
diff --git a/sys/sys/lock.h b/sys/sys/lock.h
index a296d8f..1e126b1 100644
--- a/sys/sys/lock.h
+++ b/sys/sys/lock.h
@@ -41,8 +41,8 @@
#ifndef _LOCK_H_
#define _LOCK_H_
-
#include <machine/lock.h>
+#include <machine/mutex.h>
/*
* The general lock structure. Provides for multiple shared locks,
@@ -50,7 +50,8 @@
* can be gained. The simple locks are defined in <machine/param.h>.
*/
struct lock {
- struct simplelock lk_interlock; /* lock on remaining fields */
+ struct mtx lk_interlock; /* lock on remaining fields */
+ struct mtxf lk_pad; /* padding to keep sizeof constant */
u_int lk_flags; /* see below */
int lk_sharecount; /* # of accepted shared locks */
int lk_waitcount; /* # of processes sleeping for lock */
@@ -132,13 +133,20 @@ struct lock {
#define LK_HAVE_EXCL 0x00000400 /* exclusive lock obtained */
#define LK_WAITDRAIN 0x00000800 /* process waiting for lock to drain */
#define LK_DRAINING 0x00004000 /* lock is being drained */
+#define LK_VALID 0x00008000 /*
+ * Lock is initialized. This is a
+ * temporary hack to support vfs
+ * layering.
+ */
/*
* Control flags
*
* Non-persistent external flags.
*/
-#define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after
- getting lk_interlock */
+#define LK_INTERLOCK 0x00010000 /*
+ * unlock passed mutex after getting
+ * lk_interlock
+ */
#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
#define LK_NOOBJ 0x00040000 /* vget: don't create object */
#define LK_THISLAYER 0x00080000 /* vn_lock: lock/unlock only current layer */
@@ -177,9 +185,11 @@ struct proc;
void lockinit __P((struct lock *, int prio, char *wmesg, int timo,
int flags));
+void lockdestroy __P((struct lock *));
+
#ifdef DEBUG_LOCKS
int debuglockmgr __P((struct lock *, u_int flags,
- struct simplelock *, struct proc *p,
+ struct mtx *, struct proc *p,
const char *,
const char *,
int));
@@ -188,7 +198,7 @@ int debuglockmgr __P((struct lock *, u_int flags,
"lockmgr", __FILE__, __LINE__)
#else
int lockmgr __P((struct lock *, u_int flags,
- struct simplelock *, struct proc *p));
+ struct mtx *, struct proc *p));
#endif
void lockmgr_printinfo __P((struct lock *));
int lockstatus __P((struct lock *, struct proc *));
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index a296d8f..1e126b1 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -41,8 +41,8 @@
#ifndef _LOCK_H_
#define _LOCK_H_
-
#include <machine/lock.h>
+#include <machine/mutex.h>
/*
* The general lock structure. Provides for multiple shared locks,
@@ -50,7 +50,8 @@
* can be gained. The simple locks are defined in <machine/param.h>.
*/
struct lock {
- struct simplelock lk_interlock; /* lock on remaining fields */
+ struct mtx lk_interlock; /* lock on remaining fields */
+ struct mtxf lk_pad; /* padding to keep sizeof constant */
u_int lk_flags; /* see below */
int lk_sharecount; /* # of accepted shared locks */
int lk_waitcount; /* # of processes sleeping for lock */
@@ -132,13 +133,20 @@ struct lock {
#define LK_HAVE_EXCL 0x00000400 /* exclusive lock obtained */
#define LK_WAITDRAIN 0x00000800 /* process waiting for lock to drain */
#define LK_DRAINING 0x00004000 /* lock is being drained */
+#define LK_VALID 0x00008000 /*
+ * Lock is initialized. This is a
+ * temporary hack to support vfs
+ * layering.
+ */
/*
* Control flags
*
* Non-persistent external flags.
*/
-#define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after
- getting lk_interlock */
+#define LK_INTERLOCK 0x00010000 /*
+ * unlock passed mutex after getting
+ * lk_interlock
+ */
#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
#define LK_NOOBJ 0x00040000 /* vget: don't create object */
#define LK_THISLAYER 0x00080000 /* vn_lock: lock/unlock only current layer */
@@ -177,9 +185,11 @@ struct proc;
void lockinit __P((struct lock *, int prio, char *wmesg, int timo,
int flags));
+void lockdestroy __P((struct lock *));
+
#ifdef DEBUG_LOCKS
int debuglockmgr __P((struct lock *, u_int flags,
- struct simplelock *, struct proc *p,
+ struct mtx *, struct proc *p,
const char *,
const char *,
int));
@@ -188,7 +198,7 @@ int debuglockmgr __P((struct lock *, u_int flags,
"lockmgr", __FILE__, __LINE__)
#else
int lockmgr __P((struct lock *, u_int flags,
- struct simplelock *, struct proc *p));
+ struct mtx *, struct proc *p));
#endif
void lockmgr_printinfo __P((struct lock *));
int lockstatus __P((struct lock *, struct proc *));
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index c7c0e93..42b2f1c 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -411,7 +411,7 @@ int vfs_setpublicfs /* set publicly exported fs */
int vfs_lock __P((struct mount *)); /* lock a vfs */
void vfs_msync __P((struct mount *, int));
void vfs_unlock __P((struct mount *)); /* unlock a vfs */
-int vfs_busy __P((struct mount *, int, struct simplelock *, struct proc *));
+int vfs_busy __P((struct mount *, int, struct mtx *, struct proc *));
int vfs_export /* process mount export info */
__P((struct mount *, struct netexport *, struct export_args *));
struct netcred *vfs_export_lookup /* lookup host in fs export list */
@@ -428,7 +428,7 @@ void vfs_unmountall __P((void));
int vfs_register __P((struct vfsconf *));
int vfs_unregister __P((struct vfsconf *));
extern TAILQ_HEAD(mntlist, mount) mountlist; /* mounted filesystem list */
-extern struct simplelock mountlist_slock;
+extern struct mtx mountlist_mtx;
extern struct nfs_public nfs_pub;
/*
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 707df71..cbb2095 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -44,6 +44,7 @@
#include <sys/acl.h>
#include <machine/lock.h>
+#include <machine/mutex.h>
/*
* The vnode is the focus of all file activity in UNIX. There is a
@@ -82,7 +83,7 @@ struct namecache;
* v_freelist is locked by the global vnode_free_list simple lock.
* v_mntvnodes is locked by the global mntvnodes simple lock.
* v_flag, v_usecount, v_holdcount and v_writecount are
- * locked by the v_interlock simple lock.
+ * locked by the v_interlock mutex.
* v_pollinfo is locked by the lock contained inside it.
*/
struct vnode {
@@ -115,7 +116,7 @@ struct vnode {
daddr_t v_lasta; /* last allocation */
int v_clen; /* length of current cluster */
struct vm_object *v_object; /* Place to store VM object */
- struct simplelock v_interlock; /* lock on usecount and flag */
+ struct mtx v_interlock; /* lock on usecount and flag */
struct lock v_lock; /* used if fs don't have one */
struct lock *v_vnlock; /* pointer to vnode lock */
enum vtagtype v_tag; /* type of underlying data */
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index 6806734..ea6ef69 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -51,6 +51,8 @@
#include <sys/disklabel.h>
#include <sys/malloc.h>
+#include <machine/mutex.h>
+
#include <ufs/ufs/extattr.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/ufsmount.h>
@@ -394,7 +396,7 @@ ffs_reload(mp, cred, p)
if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
vfs_object_create(devvp, p, p->p_ucred);
- simple_lock(&devvp->v_interlock);
+ mtx_enter(&devvp->v_interlock, MTX_DEF);
VOP_UNLOCK(devvp, LK_INTERLOCK, p);
}
@@ -469,7 +471,7 @@ loop:
/*
* Step 5: invalidate all cached file data.
*/
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&mntvnode_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
goto loop;
@@ -551,7 +553,7 @@ ffs_mountfs(devvp, mp, p, malloctype)
if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
vfs_object_create(devvp, p, cred);
- simple_lock(&devvp->v_interlock);
+ mtx_enter(&devvp->v_interlock, MTX_DEF);
VOP_UNLOCK(devvp, LK_INTERLOCK, p);
}
@@ -933,13 +935,13 @@ loop:
*/
if (vp->v_mount != mp)
goto loop;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
nvp = vp->v_mntvnodes.le_next;
ip = VTOI(vp);
if (vp->v_type == VNON || ((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
TAILQ_EMPTY(&vp->v_dirtyblkhd))) {
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
continue;
}
if (vp->v_type != VCHR) {
@@ -957,7 +959,7 @@ loop:
simple_lock(&mntvnode_slock);
} else {
simple_unlock(&mntvnode_slock);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
UFS_UPDATE(vp, wait);
simple_lock(&mntvnode_slock);
}
diff --git a/sys/ufs/ufs/ufs_extattr.c b/sys/ufs/ufs/ufs_extattr.c
index 593dde1..da9ede2 100644
--- a/sys/ufs/ufs/ufs_extattr.c
+++ b/sys/ufs/ufs/ufs_extattr.c
@@ -855,4 +855,5 @@ ufs_extattr_vnode_inactive(struct vnode *vp, struct proc *p)
ufs_extattr_rm(vp, uele->uele_attrname, 0, p);
ufs_extattr_uepm_unlock(ump, p);
+ lockdestroy(&ump->um_extattr.uepm_lock);
}
diff --git a/sys/ufs/ufs/ufs_ihash.c b/sys/ufs/ufs/ufs_ihash.c
index 36176f0..9153d7a 100644
--- a/sys/ufs/ufs/ufs_ihash.c
+++ b/sys/ufs/ufs/ufs_ihash.c
@@ -42,6 +42,8 @@
#include <sys/malloc.h>
#include <sys/proc.h>
+#include <machine/mutex.h>
+
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ufs/ufs_extern.h>
@@ -108,7 +110,7 @@ loop:
for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&ufs_ihash_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
@@ -130,7 +132,7 @@ ufs_ihashins(ip)
struct ihashhead *ipp;
/* lock the inode, then put it on the appropriate hash list */
- lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct simplelock *)0, p);
+ lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
simple_lock(&ufs_ihash_slock);
ipp = INOHASH(ip->i_dev, ip->i_number);
diff --git a/sys/ufs/ufs/ufs_inode.c b/sys/ufs/ufs/ufs_inode.c
index b700fd3..95b7b84 100644
--- a/sys/ufs/ufs/ufs_inode.c
+++ b/sys/ufs/ufs/ufs_inode.c
@@ -155,6 +155,7 @@ ufs_reclaim(ap)
}
}
#endif
+ lockdestroy(&vp->v_lock);
FREE(vp->v_data, VFSTOUFS(vp->v_mount)->um_malloctype);
vp->v_data = 0;
return (0);
diff --git a/sys/ufs/ufs/ufs_quota.c b/sys/ufs/ufs/ufs_quota.c
index 75a559e..10f93af 100644
--- a/sys/ufs/ufs/ufs_quota.c
+++ b/sys/ufs/ufs/ufs_quota.c
@@ -674,7 +674,7 @@ again:
nextvp = vp->v_mntvnodes.le_next;
if (vp->v_type == VNON)
continue;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
simple_unlock(&mntvnode_slock);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index fe8de37..3ac1038 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -60,6 +60,8 @@
#include <sys/event.h>
#include <sys/conf.h>
+#include <machine/mutex.h>
+
#include <vm/vm.h>
#include <vm/vm_extern.h>
@@ -282,10 +284,10 @@ ufs_close(ap)
{
register struct vnode *vp = ap->a_vp;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount > 1)
ufs_itimes(vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (0);
}
@@ -1857,10 +1859,10 @@ ufsspec_close(ap)
{
struct vnode *vp = ap->a_vp;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount > 1)
ufs_itimes(vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
}
@@ -1931,10 +1933,10 @@ ufsfifo_close(ap)
{
struct vnode *vp = ap->a_vp;
- simple_lock(&vp->v_interlock);
+ mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount > 1)
ufs_itimes(vp);
- simple_unlock(&vp->v_interlock);
+ mtx_exit(&vp->v_interlock, MTX_DEF);
return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 4eea821..e5b50d2 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -215,6 +215,7 @@ vmspace_free(vm)
vm_map_unlock(&vm->vm_map);
pmap_release(vmspace_pmap(vm));
+ vm_map_destroy(&vm->vm_map);
zfree(vmspace_zone, vm);
}
}
@@ -261,6 +262,13 @@ vm_map_init(map, min, max)
lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
}
+void
+vm_map_destroy(map)
+ struct vm_map *map;
+{
+ lockdestroy(&map->lock);
+}
+
/*
* vm_map_entry_dispose: [ internal use only ]
*
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index f290b2c..d238488 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -203,6 +203,7 @@ struct vmspace {
* as unbraced elements in a higher level statement.
*/
+/* XXX This macro is not called anywhere, and (map)->ref_lock doesn't exist. */
#define vm_map_lock_drain_interlock(map) \
do { \
lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
@@ -290,15 +291,15 @@ _vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
#define vm_map_set_recursive(map) \
do { \
- simple_lock(&(map)->lock.lk_interlock); \
+ mtx_enter(&(map)->lock.lk_interlock, MTX_DEF); \
(map)->lock.lk_flags |= LK_CANRECURSE; \
- simple_unlock(&(map)->lock.lk_interlock); \
+ mtx_exit(&(map)->lock.lk_interlock, MTX_DEF); \
} while(0)
#define vm_map_clear_recursive(map) \
do { \
- simple_lock(&(map)->lock.lk_interlock); \
+ mtx_enter(&(map)->lock.lk_interlock, MTX_DEF); \
(map)->lock.lk_flags &= ~LK_CANRECURSE; \
- simple_unlock(&(map)->lock.lk_interlock); \
+ mtx_exit(&(map)->lock.lk_interlock, MTX_DEF); \
} while(0)
/*
@@ -355,6 +356,7 @@ int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size
int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t));
+void vm_map_destroy __P((struct vm_map *));
int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int));
int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
vm_pindex_t *, vm_prot_t *, boolean_t *));
OpenPOWER on IntegriCloud