diff options
author | tjr <tjr@FreeBSD.org> | 2003-06-17 08:52:45 +0000 |
---|---|---|
committer | tjr <tjr@FreeBSD.org> | 2003-06-17 08:52:45 +0000 |
commit | 35c71928a03800d369738a09947e6bcb26d29b1a (patch) | |
tree | 2f94c3b734394e185413381cd31727106309db45 /sys/fs/nullfs/null_subr.c | |
parent | b73ccc7a1b62b75d6ab333fb0e8454d4f940cbda (diff) | |
download | FreeBSD-src-35c71928a03800d369738a09947e6bcb26d29b1a.zip FreeBSD-src-35c71928a03800d369738a09947e6bcb26d29b1a.tar.gz |
MFp4: Fix two bugs causing possible deadlocks or panics, and one nit:
- Emulate lock draining (LK_DRAIN) in null_lock() to avoid deadlocks
when the vnode is being recycled.
- Don't allow null_nodeget() to return a nullfs vnode from the wrong
mount when multiple nullfs's are mounted. It's unclear why these checks
were removed in null_subr.c 1.35, but they are definitely necessary.
Without the checks, trying to unmount a nullfs mount will erroneously
return EBUSY, and forcibly unmounting with -f will cause a panic.
- Bump LOG2_SIZEVNODE up to 8, since vnodes are >256 bytes now. The old
value (7) didn't cause any problems, but made the hash algorithm
suboptimal.
These changes fix nullfs enough that a parallel buildworld succeeds.
Submitted by: tegge (partially; LK_DRAIN)
Tested by: kris
Diffstat (limited to 'sys/fs/nullfs/null_subr.c')
-rw-r--r-- | sys/fs/nullfs/null_subr.c | 45 |
1 files changed, 36 insertions, 9 deletions
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c index f2a43e0..2120415 100644 --- a/sys/fs/nullfs/null_subr.c +++ b/sys/fs/nullfs/null_subr.c @@ -50,7 +50,7 @@ #include <fs/nullfs/null.h> -#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */ +#define LOG2_SIZEVNODE 8 /* log2(sizeof struct vnode) */ #define NNULLNODECACHE 16 /* @@ -71,8 +71,8 @@ struct mtx null_hashmtx; static MALLOC_DEFINE(M_NULLFSHASH, "NULLFS hash", "NULLFS hash table"); MALLOC_DEFINE(M_NULLFSNODE, "NULLFS node", "NULLFS vnode private part"); -static struct vnode * null_hashget(struct vnode *); -static struct vnode * null_hashins(struct null_node *); +static struct vnode * null_hashget(struct mount *, struct vnode *); +static struct vnode * null_hashins(struct mount *, struct null_node *); /* * Initialise cache headers @@ -103,7 +103,8 @@ nullfs_uninit(vfsp) * Lower vnode should be locked on entry and will be left locked on exit. */ static struct vnode * -null_hashget(lowervp) +null_hashget(mp, lowervp) + struct mount *mp; struct vnode *lowervp; { struct thread *td = curthread; /* XXX */ @@ -121,9 +122,20 @@ null_hashget(lowervp) loop: mtx_lock(&null_hashmtx); LIST_FOREACH(a, hd, null_hash) { - if (a->null_lowervp == lowervp) { + if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) { vp = NULLTOV(a); mtx_lock(&vp->v_interlock); + /* + * Don't block if nullfs vnode is being recycled. + * We already hold a lock on the lower vnode, thus + * waiting might deadlock against the thread + * recycling the nullfs vnode or another thread + * in vrele() waiting for the vnode lock. + */ + if ((vp->v_iflag & VI_XLOCK) != 0) { + VI_UNLOCK(vp); + continue; + } mtx_unlock(&null_hashmtx); /* * We need vget for the VXLOCK @@ -145,7 +157,8 @@ loop: * node found. */ static struct vnode * -null_hashins(xp) +null_hashins(mp, xp) + struct mount *mp; struct null_node *xp; { struct thread *td = curthread; /* XXX */ @@ -157,9 +170,21 @@ null_hashins(xp) loop: mtx_lock(&null_hashmtx); LIST_FOREACH(oxp, hd, null_hash) { - if (oxp->null_lowervp == xp->null_lowervp) { + if (oxp->null_lowervp == xp->null_lowervp && + NULLTOV(oxp)->v_mount == mp) { ovp = NULLTOV(oxp); mtx_lock(&ovp->v_interlock); + /* + * Don't block if nullfs vnode is being recycled. + * We already hold a lock on the lower vnode, thus + * waiting might deadlock against the thread + * recycling the nullfs vnode or another thread + * in vrele() waiting for the vnode lock. + */ + if ((ovp->v_iflag & VI_XLOCK) != 0) { + VI_UNLOCK(ovp); + continue; + } mtx_unlock(&null_hashmtx); if (vget(ovp, LK_EXCLUSIVE | LK_THISLAYER | LK_INTERLOCK, td)) goto loop; @@ -192,7 +217,7 @@ null_nodeget(mp, lowervp, vpp) int error; /* Lookup the hash firstly */ - *vpp = null_hashget(lowervp); + *vpp = null_hashget(mp, lowervp); if (*vpp != NULL) { vrele(lowervp); return (0); @@ -222,6 +247,8 @@ null_nodeget(mp, lowervp, vpp) xp->null_vnode = vp; xp->null_lowervp = lowervp; + xp->null_pending_locks = 0; + xp->null_drain_wakeup = 0; vp->v_type = lowervp->v_type; vp->v_data = xp; @@ -244,7 +271,7 @@ null_nodeget(mp, lowervp, vpp) * Atomically insert our new node into the hash or vget existing * if someone else has beaten us to it. */ - *vpp = null_hashins(xp); + *vpp = null_hashins(mp, xp); if (*vpp != NULL) { vrele(lowervp); VOP_UNLOCK(vp, LK_THISLAYER, td); |