summaryrefslogtreecommitdiffstats
path: root/sys/fs/nullfs/null_vnops.c
diff options
context:
space:
mode:
authortjr <tjr@FreeBSD.org>2003-06-17 08:52:45 +0000
committertjr <tjr@FreeBSD.org>2003-06-17 08:52:45 +0000
commit35c71928a03800d369738a09947e6bcb26d29b1a (patch)
tree2f94c3b734394e185413381cd31727106309db45 /sys/fs/nullfs/null_vnops.c
parentb73ccc7a1b62b75d6ab333fb0e8454d4f940cbda (diff)
downloadFreeBSD-src-35c71928a03800d369738a09947e6bcb26d29b1a.zip
FreeBSD-src-35c71928a03800d369738a09947e6bcb26d29b1a.tar.gz
MFp4: Fix two bugs causing possible deadlocks or panics, and one nit:
- Emulate lock draining (LK_DRAIN) in null_lock() to avoid deadlocks when the vnode is being recycled. - Don't allow null_nodeget() to return a nullfs vnode from the wrong mount when multiple nullfs's are mounted. It's unclear why these checks were removed in null_subr.c 1.35, but they are definitely necessary. Without the checks, trying to unmount a nullfs mount will erroneously return EBUSY, and forcibly unmounting with -f will cause a panic. - Bump LOG2_SIZEVNODE up to 8, since vnodes are >256 bytes now. The old value (7) didn't cause any problems, but made the hash algorithm suboptimal. These changes fix nullfs enough that a parallel buildworld succeeds. Submitted by: tegge (partially; LK_DRAIN) Tested by: kris
Diffstat (limited to 'sys/fs/nullfs/null_vnops.c')
-rw-r--r--sys/fs/nullfs/null_vnops.c61
1 files changed, 57 insertions, 4 deletions
diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c
index 588e18c..96d6f83 100644
--- a/sys/fs/nullfs/null_vnops.c
+++ b/sys/fs/nullfs/null_vnops.c
@@ -592,6 +592,7 @@ null_lock(ap)
struct thread *td = ap->a_td;
struct vnode *lvp;
int error;
+ struct null_node *nn;
if (flags & LK_THISLAYER) {
if (vp->v_vnlock != NULL) {
@@ -614,13 +615,65 @@ null_lock(ap)
* going away doesn't mean the struct lock below us is.
* LK_EXCLUSIVE is fine.
*/
+ if ((flags & LK_INTERLOCK) == 0) {
+ VI_LOCK(vp);
+ flags |= LK_INTERLOCK;
+ }
+ nn = VTONULL(vp);
if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n");
- return(lockmgr(vp->v_vnlock,
- (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
- &vp->v_interlock, td));
+ /*
+ * Emulate lock draining by waiting for all other
+ * pending locks to complete. Afterwards the
+ * lockmgr call might block, but no other threads
+ * will attempt to use this nullfs vnode due to the
+ * VI_XLOCK flag.
+ */
+ while (nn->null_pending_locks > 0) {
+ nn->null_drain_wakeup = 1;
+ msleep(&nn->null_pending_locks,
+ VI_MTX(vp),
+ PVFS,
+ "nuldr", 0);
+ }
+ error = lockmgr(vp->v_vnlock,
+ (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
+ VI_MTX(vp), td);
+ return error;
+ }
+ nn->null_pending_locks++;
+ error = lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td);
+ VI_LOCK(vp);
+ /*
+ * If we're called from vrele then v_usecount can have been 0
+ * and another process might have initiated a recycle
+ * operation. When that happens, just back out.
+ */
+ if (error == 0 && (vp->v_iflag & VI_XLOCK) != 0 &&
+ td != vp->v_vxproc) {
+ lockmgr(vp->v_vnlock,
+ (flags & ~LK_TYPE_MASK) | LK_RELEASE,
+ VI_MTX(vp), td);
+ VI_LOCK(vp);
+ error = ENOENT;
+ }
+ nn->null_pending_locks--;
+ /*
+ * Wakeup the process draining the vnode after all
+ * pending lock attempts has been failed.
+ */
+ if (nn->null_pending_locks == 0 &&
+ nn->null_drain_wakeup != 0) {
+ nn->null_drain_wakeup = 0;
+ wakeup(&nn->null_pending_locks);
+ }
+ if (error == ENOENT && (vp->v_iflag & VI_XLOCK) != 0 &&
+ vp->v_vxproc != curthread) {
+ vp->v_iflag |= VI_XWANT;
+ msleep(vp, VI_MTX(vp), PINOD, "nulbo", 0);
}
- return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td));
+ VI_UNLOCK(vp);
+ return error;
} else {
/*
* To prevent race conditions involving doing a lookup
OpenPOWER on IntegriCloud