summaryrefslogtreecommitdiffstats
path: root/sys/ufs
diff options
context:
space:
mode:
authortanimura <tanimura@FreeBSD.org>2000-12-13 10:04:01 +0000
committertanimura <tanimura@FreeBSD.org>2000-12-13 10:04:01 +0000
commit635b424e75b70075159860177e730f0baae40646 (patch)
tree631c7dab4526ec4a3284c6b02cd0244b2f645452 /sys/ufs
parenta8dbeb3f7b850f093f2892563882b3afcecd5e8e (diff)
downloadFreeBSD-src-635b424e75b70075159860177e730f0baae40646.zip
FreeBSD-src-635b424e75b70075159860177e730f0baae40646.tar.gz
Do not race for the lock of an inode hash.
Reviewed by: jhb
Diffstat (limited to 'sys/ufs')
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c48
-rw-r--r--sys/ufs/ifs/ifs_vfsops.c48
2 files changed, 84 insertions, 12 deletions
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index a0d3142..b103394 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -1012,6 +1012,22 @@ loop:
* done by the calling routine.
*/
static int ffs_inode_hash_lock;
+/*
+ * ffs_inode_hash_lock is a variable to manage mutual exclusion
+ * of vnode allocation and intertion to the hash, especially to
+ * avoid holding more than one vnodes for the same inode in the
+ * hash table. ffs_inode_hash_lock must hence be tested-and-set
+ * or cleared atomically, accomplished by ffs_inode_hash_mtx.
+ *
+ * As vnode allocation may block during MALLOC() and zone
+ * allocation, we should also do msleep() to give away the CPU
+ * if anyone else is allocating a vnode. lockmgr is not suitable
+ * here because someone else may insert to the hash table the
+ * vnode we are trying to allocate during our sleep, in which
+ * case the hash table needs to be examined once again after
+ * waking up.
+ */
+static struct mtx ffs_inode_hash_mtx;
int
ffs_vget(mp, ino, vpp)
@@ -1025,7 +1041,7 @@ ffs_vget(mp, ino, vpp)
struct buf *bp;
struct vnode *vp;
dev_t dev;
- int error;
+ int error, want_wakeup;
ump = VFSTOUFS(mp);
dev = ump->um_dev;
@@ -1039,14 +1055,17 @@ restart:
* case getnewvnode() or MALLOC() blocks, otherwise a duplicate
* may occur!
*/
+ mtx_enter(&ffs_inode_hash_mtx, MTX_DEF);
if (ffs_inode_hash_lock) {
while (ffs_inode_hash_lock) {
ffs_inode_hash_lock = -1;
- tsleep(&ffs_inode_hash_lock, PVM, "ffsvgt", 0);
+ msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0);
}
+ mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
goto restart;
}
ffs_inode_hash_lock = 1;
+ mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
/*
* If this MALLOC() is performed after the getnewvnode()
@@ -1061,9 +1080,17 @@ restart:
/* Allocate a new vnode/inode. */
error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp);
if (error) {
- if (ffs_inode_hash_lock < 0)
- wakeup(&ffs_inode_hash_lock);
+ /*
+ * Do not wake up processes while holding the mutex,
+ * otherwise the processes waken up immediately hit
+ * themselves into the mutex.
+ */
+ mtx_enter(&ffs_inode_hash_mtx, MTX_DEF);
+ want_wakeup = ffs_inode_hash_lock < 0;
ffs_inode_hash_lock = 0;
+ mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
+ if (want_wakeup)
+ wakeup(&ffs_inode_hash_lock);
*vpp = NULL;
FREE(ip, ump->um_malloctype);
return (error);
@@ -1094,9 +1121,17 @@ restart:
*/
ufs_ihashins(ip);
- if (ffs_inode_hash_lock < 0)
- wakeup(&ffs_inode_hash_lock);
+ /*
+ * Do not wake up processes while holding the mutex,
+ * otherwise the processes waken up immediately hit
+ * themselves into the mutex.
+ */
+ mtx_enter(&ffs_inode_hash_mtx, MTX_DEF);
+ want_wakeup = ffs_inode_hash_lock < 0;
ffs_inode_hash_lock = 0;
+ mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
+ if (want_wakeup)
+ wakeup(&ffs_inode_hash_lock);
/* Read in the disk contents for the inode, copy into the inode. */
error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
@@ -1213,6 +1248,7 @@ ffs_init(vfsp)
{
softdep_initialize();
+ mtx_init(&ffs_inode_hash_mtx, "ifsvgt", MTX_DEF);
return (ufs_init(vfsp));
}
diff --git a/sys/ufs/ifs/ifs_vfsops.c b/sys/ufs/ifs/ifs_vfsops.c
index ec1abd9..80a4135 100644
--- a/sys/ufs/ifs/ifs_vfsops.c
+++ b/sys/ufs/ifs/ifs_vfsops.c
@@ -101,6 +101,7 @@ static int
ifs_init(vfsp)
struct vfsconf *vfsp;
{
+ mtx_init(&ifs_inode_hash_mtx, "ifsvgt", MTX_DEF);
return (ufs_init(vfsp));
}
@@ -132,6 +133,22 @@ ifs_mount(mp, path, data, ndp, p)
* done by the calling routine.
*/
static int ifs_inode_hash_lock;
+/*
+ * ifs_inode_hash_lock is a variable to manage mutual exclusion
+ * of vnode allocation and intertion to the hash, especially to
+ * avoid holding more than one vnodes for the same inode in the
+ * hash table. ifs_inode_hash_lock must hence be tested-and-set
+ * or cleared atomically, accomplished by ifs_inode_hash_mtx.
+ *
+ * As vnode allocation may block during MALLOC() and zone
+ * allocation, we should also do msleep() to give away the CPU
+ * if anyone else is allocating a vnode. lockmgr is not suitable
+ * here because someone else may insert to the hash table the
+ * vnode we are trying to allocate during our sleep, in which
+ * case the hash table needs to be examined once again after
+ * waking up.
+ */
+static struct mtx ifs_inode_hash_mtx;
int
ifs_vget(mp, ino, vpp)
@@ -145,7 +162,7 @@ ifs_vget(mp, ino, vpp)
struct buf *bp;
struct vnode *vp;
dev_t dev;
- int error;
+ int error, want_wakeup;
ump = VFSTOUFS(mp);
dev = ump->um_dev;
@@ -159,14 +176,17 @@ restart:
* case getnewvnode() or MALLOC() blocks, otherwise a duplicate
* may occur!
*/
+ mtx_enter(&ifs_inode_hash_mtx, MTX_DEF);
if (ifs_inode_hash_lock) {
while (ifs_inode_hash_lock) {
ifs_inode_hash_lock = -1;
- tsleep(&ifs_inode_hash_lock, PVM, "ifsvgt", 0);
+ msleep(&ifs_inode_hash_lock, &ifs_inode_hash_mtx, PVM, "ifsvgt", 0);
}
+ mtx_exit(&ifs_inode_hash_mtx, MTX_DEF);
goto restart;
}
ifs_inode_hash_lock = 1;
+ mtx_exit(&ifs_inode_hash_mtx, MTX_DEF);
/*
* If this MALLOC() is performed after the getnewvnode()
@@ -181,9 +201,17 @@ restart:
/* Allocate a new vnode/inode. */
error = getnewvnode(VT_UFS, mp, ifs_vnodeop_p, &vp);
if (error) {
- if (ifs_inode_hash_lock < 0)
- wakeup(&ifs_inode_hash_lock);
+ /*
+ * Do not wake up processes while holding the mutex,
+ * otherwise the processes waken up immediately hit
+ * themselves into the mutex.
+ */
+ mtx_enter(&ifs_inode_hash_mtx, MTX_DEF);
+ want_wakeup = ifs_inode_hash_lock < 0;
ifs_inode_hash_lock = 0;
+ mtx_exit(&ifs_inode_hash_mtx, MTX_DEF);
+ if (want_wakeup)
+ wakeup(&ifs_inode_hash_lock);
*vpp = NULL;
FREE(ip, ump->um_malloctype);
return (error);
@@ -214,9 +242,17 @@ restart:
*/
ufs_ihashins(ip);
- if (ifs_inode_hash_lock < 0)
- wakeup(&ifs_inode_hash_lock);
+ /*
+ * Do not wake up processes while holding the mutex,
+ * otherwise the processes waken up immediately hit
+ * themselves into the mutex.
+ */
+ mtx_enter(&ifs_inode_hash_mtx, MTX_DEF);
+ want_wakeup = ffs_inode_hash_lock < 0;
ifs_inode_hash_lock = 0;
+ mtx_exit(&ifs_inode_hash_mtx, MTX_DEF);
+ if (want_wakeup)
+ wakeup(&ifs_inode_hash_lock);
/* Read in the disk contents for the inode, copy into the inode. */
error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
OpenPOWER on IntegriCloud