summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2002-08-04 10:29:36 +0000
committerjeff <jeff@FreeBSD.org>2002-08-04 10:29:36 +0000
commit02517b6731ab2da44ce9b49260429744cf0114d5 (patch)
treee889f56910bf98cbee3be239655a9e4bbb928b2e /sys/kern
parenta03ca02ee99a255a2c14a75ff8eb488960b13ea7 (diff)
downloadFreeBSD-src-02517b6731ab2da44ce9b49260429744cf0114d5.zip
FreeBSD-src-02517b6731ab2da44ce9b49260429744cf0114d5.tar.gz
- Replace v_flag with v_iflag and v_vflag
- v_vflag is protected by the vnode lock and is used when synchronization with VOP calls is needed. - v_iflag is protected by interlock and is used for dealing with vnode management issues. These flags include X/O LOCK, FREE, DOOMED, etc. - All accesses to v_iflag and v_vflag have either been locked or marked with mp_fixme's. - Many ASSERT_VOP_LOCKED calls have been added where the locking was not clear. - Many functions in vfs_subr.c were restructured to provide for stronger locking. Idea stolen from: BSD/OS
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/imgact_aout.c3
-rw-r--r--sys/kern/imgact_elf.c12
-rw-r--r--sys/kern/kern_mac.c12
-rw-r--r--sys/kern/vfs_bio.c8
-rw-r--r--sys/kern/vfs_cache.c6
-rw-r--r--sys/kern/vfs_default.c2
-rw-r--r--sys/kern/vfs_extattr.c11
-rw-r--r--sys/kern/vfs_lookup.c4
-rw-r--r--sys/kern/vfs_mount.c97
-rw-r--r--sys/kern/vfs_subr.c255
-rw-r--r--sys/kern/vfs_syscalls.c11
-rw-r--r--sys/kern/vfs_vnops.c12
12 files changed, 253 insertions, 180 deletions
diff --git a/sys/kern/imgact_aout.c b/sys/kern/imgact_aout.c
index b13837e..ba2f76b 100644
--- a/sys/kern/imgact_aout.c
+++ b/sys/kern/imgact_aout.c
@@ -240,7 +240,8 @@ exec_aout_imgact(imgp)
imgp->proc->p_sysent = &aout_sysvec;
/* Indicate that this file should not be modified */
- imgp->vp->v_flag |= VTEXT;
+ mp_fixme("Unlocked vflag access.");
+ imgp->vp->v_vflag |= VV_TEXT;
return (0);
}
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 1b35000..92e86af 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -500,10 +500,11 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
error = exec_map_first_page(imgp);
/*
* Also make certain that the interpreter stays the same, so set
- * its VTEXT flag, too.
+ * its VV_TEXT flag, too.
*/
if (error == 0)
- nd->ni_vp->v_flag |= VTEXT;
+ nd->ni_vp->v_vflag |= VV_TEXT;
+
VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
vm_object_reference(imgp->object);
@@ -628,10 +629,11 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
* VTEXT now since from here on out, there are places we can have
* a context switch. Better safe than sorry; I really don't want
* the file to change while it's being loaded.
+ *
+ * XXX We can't really set this flag safely without the vnode lock.
*/
- mtx_lock(&imgp->vp->v_interlock);
- imgp->vp->v_flag |= VTEXT;
- mtx_unlock(&imgp->vp->v_interlock);
+ mp_fixme("This needs the vnode lock to be safe.");
+ imgp->vp->v_vflag |= VV_TEXT;
if ((error = exec_extract_strings(imgp)) != 0)
goto fail;
diff --git a/sys/kern/kern_mac.c b/sys/kern/kern_mac.c
index 43c03cc..6544f01 100644
--- a/sys/kern/kern_mac.c
+++ b/sys/kern/kern_mac.c
@@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
+ ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
- vp->v_flag |= VCACHEDLABEL;
+ vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
- vp->v_flag |= VCACHEDLABEL;
+ vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
- if (vp->v_flag & VCACHEDLABEL) {
+ if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
+ ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
- tvp->v_flag |= VCACHEDLABEL;
+ tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
- vp->v_flag |= VCACHEDLABEL;
+ vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 548e4fb..e0d623c 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -2107,7 +2107,7 @@ inmem(struct vnode * vp, daddr_t blkno)
return 1;
if (vp->v_mount == NULL)
return 0;
- if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_flag & VOBJBUF) == 0)
+ if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_vflag & VV_OBJBUF) == 0)
return 0;
size = PAGE_SIZE;
@@ -2408,7 +2408,8 @@ loop:
bsize = size;
offset = blkno * bsize;
- vmio = (VOP_GETVOBJECT(vp, NULL) == 0) && (vp->v_flag & VOBJBUF);
+ vmio = (VOP_GETVOBJECT(vp, NULL) == 0) &&
+ (vp->v_vflag & VV_OBJBUF);
maxsize = vmio ? size + (offset & PAGE_MASK) : size;
maxsize = imax(maxsize, bsize);
@@ -2912,11 +2913,12 @@ bufdone(struct buf *bp)
obj = bp->b_object;
#if defined(VFS_BIO_DEBUG)
+ mp_fixme("usecount and vflag accessed without locks.");
if (vp->v_usecount == 0) {
panic("biodone: zero vnode ref count");
}
- if ((vp->v_flag & VOBJBUF) == 0) {
+ if ((vp->v_vflag & VV_OBJBUF) == 0) {
panic("biodone: vnode is not setup for merged cache");
}
#endif
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index be79fc2..6e68c2f 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -726,8 +726,9 @@ __getcwd(td, uap)
fdp = td->td_proc->p_fd;
slash_prefixed = 0;
FILEDESC_LOCK(fdp);
+ mp_fixme("No vnode locking done!");
for (vp = fdp->fd_cdir; vp != fdp->fd_rdir && vp != rootvnode;) {
- if (vp->v_flag & VROOT) {
+ if (vp->v_vflag & VV_ROOT) {
if (vp->v_mount == NULL) { /* forced unmount */
FILEDESC_UNLOCK(fdp);
free(buf, M_TEMP);
@@ -827,6 +828,7 @@ vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
return (ENODEV);
if (vn == NULL)
return (EINVAL);
+ ASSERT_VOP_LOCKED(vp, "vn_fullpath");
buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
bp = buf + MAXPATHLEN - 1;
*bp = '\0';
@@ -834,7 +836,7 @@ vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
slash_prefixed = 0;
FILEDESC_LOCK(fdp);
for (vp = vn; vp != fdp->fd_rdir && vp != rootvnode;) {
- if (vp->v_flag & VROOT) {
+ if (vp->v_vflag & VV_ROOT) {
if (vp->v_mount == NULL) { /* forced unmount */
FILEDESC_UNLOCK(fdp);
free(buf, M_TEMP);
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index abc8ebd..6ad13d5 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -595,7 +595,7 @@ retry:
}
KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
- vp->v_flag |= VOBJBUF;
+ vp->v_vflag |= VV_OBJBUF;
retn:
return (error);
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index f02f62f..197829b 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -1155,7 +1155,8 @@ restart:
*
* XXX: can this only be a VDIR case?
*/
- if (vp->v_flag & VROOT)
+ mp_fixme("Accessing vflags w/o the vn lock.");
+ if (vp->v_vflag & VV_ROOT)
error = EBUSY;
}
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
@@ -2778,7 +2779,7 @@ restart:
/*
* The root of a mounted filesystem cannot be deleted.
*/
- if (vp->v_flag & VROOT) {
+ if (vp->v_vflag & VV_ROOT) {
error = EBUSY;
goto out;
}
@@ -2939,7 +2940,8 @@ unionread:
return (error);
}
}
- if ((vp->v_flag & VROOT) &&
+ mp_fixme("Accessing vflags w/o vn lock.");
+ if ((vp->v_vflag & VV_ROOT) &&
(vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;
@@ -3030,7 +3032,8 @@ unionread:
return (error);
}
}
- if ((vp->v_flag & VROOT) &&
+ mp_fixme("Accessing vflag without vn lock.");
+ if ((vp->v_vflag & VV_ROOT) &&
(vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index e8cabd5..db1b342 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -451,7 +451,7 @@ dirloop:
VREF(dp);
goto nextname;
}
- if ((dp->v_flag & VROOT) == 0 ||
+ if ((dp->v_vflag & VV_ROOT) == 0 ||
(cnp->cn_flags & NOCROSSMOUNT))
break;
if (dp->v_mount == NULL) { /* forced unmount */
@@ -485,7 +485,7 @@ unionlookup:
printf("not found\n");
#endif
if ((error == ENOENT) &&
- (dp->v_flag & VROOT) && (dp->v_mount != NULL) &&
+ (dp->v_vflag & VV_ROOT) && (dp->v_mount != NULL) &&
(dp->v_mount->mnt_flag & MNT_UNION)) {
tdp = dp;
dp = dp->v_mount->mnt_vnodecovered;
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index 3dd4b06..ee77f0b 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -505,7 +505,7 @@ vfs_nmount(td, fsflags, fsoptions)
NDFREE(&nd, NDF_ONLY_PNBUF);
vp = nd.ni_vp;
if (fsflags & MNT_UPDATE) {
- if ((vp->v_flag & VROOT) == 0) {
+ if ((vp->v_vflag & VV_ROOT) == 0) {
vput(vp);
error = EINVAL;
goto bad;
@@ -539,16 +539,17 @@ vfs_nmount(td, fsflags, fsoptions)
error = EBUSY;
goto bad;
}
- mtx_lock(&vp->v_interlock);
- if ((vp->v_flag & VMOUNT) != 0 || vp->v_mountedhere != NULL) {
- mtx_unlock(&vp->v_interlock);
+ VI_LOCK(vp);
+ if ((vp->v_iflag & VI_MOUNT) != 0 ||
+ vp->v_mountedhere != NULL) {
+ VI_UNLOCK(vp);
vfs_unbusy(mp, td);
vput(vp);
error = EBUSY;
goto bad;
}
- vp->v_flag |= VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ vp->v_iflag |= VI_MOUNT;
+ VI_UNLOCK(vp);
mp->mnt_flag |= fsflags &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, td);
@@ -616,16 +617,16 @@ vfs_nmount(td, fsflags, fsoptions)
goto bad;
}
}
- mtx_lock(&vp->v_interlock);
- if ((vp->v_flag & VMOUNT) != 0 ||
+ VI_LOCK(vp);
+ if ((vp->v_iflag & VI_MOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
vput(vp);
error = EBUSY;
goto bad;
}
- vp->v_flag |= VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ vp->v_iflag |= VI_MOUNT;
+ VI_UNLOCK(vp);
/*
* Allocate and initialize the filesystem.
@@ -660,9 +661,9 @@ update:
if (mp->mnt_op->vfs_mount != NULL) {
printf("%s doesn't support the new mount syscall\n",
mp->mnt_vfc->vfc_name);
- mtx_lock(&vp->v_interlock);
- vp->v_flag &= ~VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ VI_LOCK(vp);
+ vp->v_iflag &= ~VI_MOUNT;
+ VI_UNLOCK(vp);
if (mp->mnt_flag & MNT_UPDATE)
vfs_unbusy(mp, td);
else {
@@ -722,9 +723,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, td);
- mtx_lock(&vp->v_interlock);
- vp->v_flag &= ~VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ VI_LOCK(vp);
+ vp->v_iflag &= ~VI_MOUNT;
+ VI_UNLOCK(vp);
vrele(vp);
return (error);
}
@@ -736,10 +737,10 @@ update:
if (!error) {
struct vnode *newdp;
- mtx_lock(&vp->v_interlock);
- vp->v_flag &= ~VMOUNT;
+ VI_LOCK(vp);
+ vp->v_iflag &= ~VI_MOUNT;
vp->v_mountedhere = mp;
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
mtx_unlock(&mountlist_mtx);
@@ -756,9 +757,9 @@ update:
goto bad;
}
} else {
- mtx_lock(&vp->v_interlock);
- vp->v_flag &= ~VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ VI_LOCK(vp);
+ vp->v_iflag &= ~VI_MOUNT;
+ VI_UNLOCK(vp);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, td);
#ifdef MAC
@@ -880,7 +881,7 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
NDFREE(&nd, NDF_ONLY_PNBUF);
vp = nd.ni_vp;
if (fsflags & MNT_UPDATE) {
- if ((vp->v_flag & VROOT) == 0) {
+ if ((vp->v_vflag & VV_ROOT) == 0) {
vput(vp);
return (EINVAL);
}
@@ -911,15 +912,16 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
vput(vp);
return (EBUSY);
}
- mtx_lock(&vp->v_interlock);
- if ((vp->v_flag & VMOUNT) != 0 || vp->v_mountedhere != NULL) {
- mtx_unlock(&vp->v_interlock);
+ VI_LOCK(vp);
+ if ((vp->v_iflag & VI_MOUNT) != 0 ||
+ vp->v_mountedhere != NULL) {
+ VI_UNLOCK(vp);
vfs_unbusy(mp, td);
vput(vp);
return (EBUSY);
}
- vp->v_flag |= VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ vp->v_iflag |= VI_MOUNT;
+ VI_UNLOCK(vp);
mp->mnt_flag |= fsflags &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, td);
@@ -983,15 +985,15 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
return (ENODEV);
}
}
- mtx_lock(&vp->v_interlock);
- if ((vp->v_flag & VMOUNT) != 0 ||
+ VI_LOCK(vp);
+ if ((vp->v_iflag & VI_MOUNT) != 0 ||
vp->v_mountedhere != NULL) {
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
vput(vp);
return (EBUSY);
}
- vp->v_flag |= VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ vp->v_iflag |= VI_MOUNT;
+ VI_UNLOCK(vp);
/*
* Allocate and initialize the filesystem.
@@ -1024,9 +1026,9 @@ update:
if (mp->mnt_op->vfs_mount == NULL) {
printf("%s doesn't support the old mount syscall\n",
mp->mnt_vfc->vfc_name);
- mtx_lock(&vp->v_interlock);
- vp->v_flag &= ~VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ VI_LOCK(vp);
+ vp->v_iflag &= ~VI_MOUNT;
+ VI_UNLOCK(vp);
if (mp->mnt_flag & MNT_UPDATE)
vfs_unbusy(mp, td);
else {
@@ -1075,9 +1077,9 @@ update:
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, td);
- mtx_lock(&vp->v_interlock);
- vp->v_flag &= ~VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ VI_LOCK(vp);
+ vp->v_iflag &= ~VI_MOUNT;
+ VI_UNLOCK(vp);
vrele(vp);
return (error);
}
@@ -1089,10 +1091,11 @@ update:
if (!error) {
struct vnode *newdp;
- mtx_lock(&vp->v_interlock);
- vp->v_flag &= ~VMOUNT;
+ mp_fixme("Does interlock protect mounted here or not?");
+ VI_LOCK(vp);
+ vp->v_iflag &= ~VI_MOUNT;
vp->v_mountedhere = mp;
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
mtx_unlock(&mountlist_mtx);
@@ -1107,9 +1110,9 @@ update:
if ((error = VFS_START(mp, 0, td)) != 0)
vrele(vp);
} else {
- mtx_lock(&vp->v_interlock);
- vp->v_flag &= ~VMOUNT;
- mtx_unlock(&vp->v_interlock);
+ VI_LOCK(vp);
+ vp->v_iflag &= ~VI_MOUNT;
+ VI_UNLOCK(vp);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, td);
#ifdef MAC
@@ -1226,7 +1229,7 @@ unmount(td, uap)
/*
* Must be the root of the filesystem
*/
- if ((vp->v_flag & VROOT) == 0) {
+ if ((vp->v_vflag & VV_ROOT) == 0) {
vput(vp);
return (EINVAL);
}
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index adf38ee..1cc90ea 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -572,18 +572,16 @@ vlrureclaim(struct mount *mp, int count)
if (vp->v_type != VNON &&
vp->v_type != VBAD &&
- VMIGHTFREE(vp) && /* critical path opt */
- (vp->v_object == NULL || vp->v_object->resident_page_count < trigger) &&
- mtx_trylock(&vp->v_interlock)
- ) {
- mtx_unlock(&mntvnode_mtx);
- if (VMIGHTFREE(vp)) {
+ VI_TRYLOCK(vp)) {
+ if (VMIGHTFREE(vp) && /* critical path opt */
+ (vp->v_object == NULL ||
+ vp->v_object->resident_page_count < trigger)) {
+ mtx_unlock(&mntvnode_mtx);
vgonel(vp, curthread);
done++;
- } else {
- mtx_unlock(&vp->v_interlock);
- }
- mtx_lock(&mntvnode_mtx);
+ mtx_lock(&mntvnode_mtx);
+ } else
+ VI_UNLOCK(vp);
}
--count;
}
@@ -771,8 +769,9 @@ getnewvnode(tag, mp, vops, vpp)
}
}
if (vp) {
- vp->v_flag |= VDOOMED;
- vp->v_flag &= ~VFREE;
+ mp_fixme("Unlocked v_iflags access.\n");
+ vp->v_iflag |= VI_DOOMED;
+ vp->v_iflag &= ~VI_FREE;
freevnodes--;
mtx_unlock(&vnode_free_list_mtx);
cache_purge(vp);
@@ -806,7 +805,8 @@ getnewvnode(tag, mp, vops, vpp)
#ifdef MAC
mac_destroy_vnode(vp);
#endif
- vp->v_flag = 0;
+ vp->v_iflag = 0;
+ vp->v_vflag = 0;
vp->v_lastw = 0;
vp->v_lasta = 0;
vp->v_cstart = 0;
@@ -893,13 +893,15 @@ vwakeup(bp)
bp->b_flags &= ~B_WRITEINPROG;
if ((vp = bp->b_vp)) {
+ VI_LOCK(vp);
vp->v_numoutput--;
if (vp->v_numoutput < 0)
panic("vwakeup: neg numoutput");
- if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
- vp->v_flag &= ~VBWAIT;
+ if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
+ vp->v_iflag &= ~VI_BWAIT;
wakeup(&vp->v_numoutput);
}
+ VI_UNLOCK(vp);
}
}
@@ -923,24 +925,33 @@ vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
if (flags & V_SAVE) {
s = splbio();
+ VI_LOCK(vp);
while (vp->v_numoutput) {
- vp->v_flag |= VBWAIT;
- error = tsleep(&vp->v_numoutput,
+ vp->v_iflag |= VI_BWAIT;
+ error = msleep(&vp->v_numoutput, VI_MTX(vp),
slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
if (error) {
+ VI_UNLOCK(vp);
splx(s);
return (error);
}
}
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
splx(s);
+ VI_UNLOCK(vp);
if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
return (error);
+ /*
+ * XXX We could save a lock/unlock if this was only
+ * enabled under INVARIANTS
+ */
+ VI_LOCK(vp);
s = splbio();
if (vp->v_numoutput > 0 ||
!TAILQ_EMPTY(&vp->v_dirtyblkhd))
panic("vinvalbuf: dirty bufs");
}
+ VI_UNLOCK(vp);
splx(s);
}
s = splbio();
@@ -969,28 +980,30 @@ vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
* have write I/O in-progress but if there is a VM object then the
* VM object can also have read-I/O in-progress.
*/
+ VI_LOCK(vp);
do {
while (vp->v_numoutput > 0) {
- vp->v_flag |= VBWAIT;
- tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
+ vp->v_iflag |= VI_BWAIT;
+ msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
}
+ VI_UNLOCK(vp);
if (VOP_GETVOBJECT(vp, &object) == 0) {
while (object->paging_in_progress)
vm_object_pip_sleep(object, "vnvlbx");
}
+ VI_LOCK(vp);
} while (vp->v_numoutput > 0);
+ VI_UNLOCK(vp);
splx(s);
/*
* Destroy the copy in the VM cache, too.
*/
- mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
- mtx_unlock(&vp->v_interlock);
if ((flags & (V_ALT | V_NORMAL)) == 0 &&
(!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
@@ -1156,12 +1169,13 @@ restartsync:
}
}
-
+
+ VI_LOCK(vp);
while (vp->v_numoutput > 0) {
- vp->v_flag |= VBWAIT;
- tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0);
+ vp->v_iflag |= VI_BWAIT;
+ msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
}
-
+ VI_UNLOCK(vp);
splx(s);
vnode_pager_setsize(vp, length);
@@ -1407,10 +1421,12 @@ brelvp(bp)
s = splbio();
if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
buf_vlist_remove(bp);
- if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
- vp->v_flag &= ~VONWORKLST;
+ VI_LOCK(vp);
+ if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
+ vp->v_iflag &= ~VI_ONWORKLST;
LIST_REMOVE(vp, v_synclist);
}
+ VI_UNLOCK(vp);
splx(s);
bp->b_vp = (struct vnode *) 0;
vdrop(vp);
@@ -1427,17 +1443,19 @@ vn_syncer_add_to_worklist(struct vnode *vp, int delay)
int s, slot;
s = splbio();
+ mtx_assert(VI_MTX(vp), MA_OWNED);
- if (vp->v_flag & VONWORKLST) {
+ if (vp->v_iflag & VI_ONWORKLST)
LIST_REMOVE(vp, v_synclist);
- }
+ else
+ vp->v_iflag |= VI_ONWORKLST;
if (delay > syncer_maxdelay - 2)
delay = syncer_maxdelay - 2;
slot = (syncer_delayno + delay) & syncer_mask;
LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
- vp->v_flag |= VONWORKLST;
+
splx(s);
}
@@ -1509,7 +1527,9 @@ sched_sync(void)
* position and then add us back in at a later
* position.
*/
+ VI_LOCK(vp);
vn_syncer_add_to_worklist(vp, syncdelay);
+ VI_UNLOCK(vp);
}
splx(s);
}
@@ -1653,7 +1673,8 @@ reassignbuf(bp, newvp)
* of clean buffers.
*/
if (bp->b_flags & B_DELWRI) {
- if ((newvp->v_flag & VONWORKLST) == 0) {
+ VI_LOCK(newvp);
+ if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
switch (newvp->v_type) {
case VDIR:
delay = dirdelay;
@@ -1669,15 +1690,18 @@ reassignbuf(bp, newvp)
}
vn_syncer_add_to_worklist(newvp, delay);
}
+ VI_UNLOCK(newvp);
buf_vlist_add(bp, newvp, BX_VNDIRTY);
} else {
buf_vlist_add(bp, newvp, BX_VNCLEAN);
- if ((newvp->v_flag & VONWORKLST) &&
+ VI_LOCK(newvp);
+ if ((newvp->v_iflag & VI_ONWORKLST) &&
TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
- newvp->v_flag &= ~VONWORKLST;
+ newvp->v_iflag &= ~VI_ONWORKLST;
LIST_REMOVE(newvp, v_synclist);
}
+ VI_UNLOCK(newvp);
}
if (bp->b_vp != newvp) {
bp->b_vp = newvp;
@@ -1811,19 +1835,19 @@ vget(vp, flags, td)
* If the vnode is in the process of being cleaned out for
* another use, we wait for the cleaning to finish and then
* return failure. Cleaning is determined by checking that
- * the VXLOCK flag is set.
+ * the VI_XLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
- mtx_lock(&vp->v_interlock);
- if (vp->v_flag & VXLOCK) {
+ VI_LOCK(vp);
+ if (vp->v_iflag & VI_XLOCK) {
if (vp->v_vxproc == curthread) {
#if 0
/* this can now occur in normal operation */
log(LOG_INFO, "VXLOCK interlock avoided\n");
#endif
} else {
- vp->v_flag |= VXWANT;
- msleep(vp, &vp->v_interlock, PINOD | PDROP, "vget", 0);
+ vp->v_iflag |= VI_XWANT;
+ msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
return (ENOENT);
}
}
@@ -1842,17 +1866,17 @@ vget(vp, flags, td)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
- mtx_lock(&vp->v_interlock);
+ VI_LOCK(vp);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
else
vlruvp(vp);
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
}
return (error);
}
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
return (0);
}
@@ -1879,7 +1903,7 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
- mtx_lock(&vp->v_interlock);
+ VI_LOCK(vp);
/* Skip this v_writecount check if we're going to panic below. */
KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
@@ -1888,7 +1912,7 @@ vrele(vp)
if (vp->v_usecount > 1) {
vp->v_usecount--;
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
return;
}
@@ -1903,15 +1927,17 @@ vrele(vp)
*/
if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0)
VOP_INACTIVE(vp, td);
+ VI_LOCK(vp);
if (VSHOULDFREE(vp))
vfree(vp);
else
vlruvp(vp);
+ VI_UNLOCK(vp);
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
#endif
panic("vrele: negative ref cnt");
}
@@ -1949,12 +1975,14 @@ vput(vp)
* If we are doing a vput, the node is already locked,
* so we just need to release the vnode mutex.
*/
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
VOP_INACTIVE(vp, td);
+ VI_LOCK(vp);
if (VSHOULDFREE(vp))
vfree(vp);
else
vlruvp(vp);
+ VI_UNLOCK(vp);
} else {
#ifdef DIAGNOSTIC
@@ -1975,8 +2003,10 @@ vhold(vp)
s = splbio();
vp->v_holdcnt++;
+ VI_LOCK(vp);
if (VSHOULDBUSY(vp))
vbusy(vp);
+ VI_UNLOCK(vp);
splx(s);
}
@@ -1994,10 +2024,12 @@ vdrop(vp)
if (vp->v_holdcnt <= 0)
panic("vdrop: holdcnt");
vp->v_holdcnt--;
+ VI_LOCK(vp);
if (VSHOULDFREE(vp))
vfree(vp);
else
vlruvp(vp);
+ VI_UNLOCK(vp);
splx(s);
}
@@ -2012,7 +2044,7 @@ vdrop(vp)
* If WRITECLOSE is set, only flush out regular file vnodes open for
* writing.
*
- * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
+ * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
*
* `rootrefs' specifies the base reference count for the root vnode
* of this filesystem. The root vnode is considered busy if its
@@ -2061,12 +2093,12 @@ loop:
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
- mtx_lock(&vp->v_interlock);
+ VI_LOCK(vp);
/*
- * Skip over a vnodes marked VSYSTEM.
+ * Skip over a vnodes marked VV_SYSTEM.
*/
- if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
- mtx_unlock(&vp->v_interlock);
+ if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
+ VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
continue;
}
@@ -2075,6 +2107,7 @@ loop:
* files (even if open only for reading) and regular file
* vnodes open for writing.
*/
+ mp_fixme("Getattr called with interlock held!");
if ((flags & WRITECLOSE) &&
(vp->v_type == VNON ||
(VOP_GETATTR(vp, &vattr, td->td_ucred, td) == 0 &&
@@ -2105,6 +2138,7 @@ loop:
vgonel(vp, td);
} else {
vclean(vp, 0, td);
+ VI_UNLOCK(vp);
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *) 0);
}
@@ -2115,7 +2149,7 @@ loop:
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
busy++;
}
@@ -2173,6 +2207,7 @@ vclean(vp, flags, td)
{
int active;
+ mtx_assert(VI_MTX(vp), MA_OWNED);
/*
* Check to see if the vnode is in use. If so we have to reference it
* before we clean it out so that its count cannot fall to zero and
@@ -2185,9 +2220,9 @@ vclean(vp, flags, td)
* Prevent the vnode from being recycled or brought into use while we
* clean it out.
*/
- if (vp->v_flag & VXLOCK)
+ if (vp->v_iflag & VI_XLOCK)
panic("vclean: deadlock");
- vp->v_flag |= VXLOCK;
+ vp->v_iflag |= VI_XLOCK;
vp->v_vxproc = curthread;
/*
* Even if the count is zero, the VOP_INACTIVE routine may still
@@ -2241,7 +2276,7 @@ vclean(vp, flags, td)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
- mtx_lock(&vp->v_interlock);
+ VI_LOCK(vp);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@@ -2251,13 +2286,14 @@ vclean(vp, flags, td)
#endif
vfree(vp);
}
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
}
cache_purge(vp);
vp->v_vnlock = NULL;
lockdestroy(&vp->v_lock);
+ VI_LOCK(vp);
if (VSHOULDFREE(vp))
vfree(vp);
@@ -2268,10 +2304,10 @@ vclean(vp, flags, td)
if (vp->v_pollinfo != NULL)
vn_pollgone(vp);
vp->v_tag = VT_NON;
- vp->v_flag &= ~VXLOCK;
+ vp->v_iflag &= ~VI_XLOCK;
vp->v_vxproc = NULL;
- if (vp->v_flag & VXWANT) {
- vp->v_flag &= ~VXWANT;
+ if (vp->v_iflag & VI_XWANT) {
+ vp->v_iflag &= ~VI_XWANT;
wakeup(vp);
}
}
@@ -2293,16 +2329,19 @@ vop_revoke(ap)
KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
vp = ap->a_vp;
+ VI_LOCK(vp);
/*
* If a vgone (or vclean) is already in progress,
* wait until it is done and return.
*/
- if (vp->v_flag & VXLOCK) {
- vp->v_flag |= VXWANT;
- msleep(vp, &vp->v_interlock, PINOD | PDROP,
+ if (vp->v_iflag & VI_XLOCK) {
+ vp->v_iflag |= VI_XWANT;
+ msleep(vp, VI_MTX(vp), PINOD | PDROP,
"vop_revokeall", 0);
+ VI_UNLOCK(vp);
return (0);
}
+ VI_UNLOCK(vp);
dev = vp->v_rdev;
for (;;) {
mtx_lock(&spechash_mtx);
@@ -2348,7 +2387,7 @@ vgone(vp)
{
struct thread *td = curthread; /* XXX */
- mtx_lock(&vp->v_interlock);
+ VI_LOCK(vp);
vgonel(vp, td);
}
@@ -2366,9 +2405,11 @@ vgonel(vp, td)
* If a vgone (or vclean) is already in progress,
* wait until it is done and return.
*/
- if (vp->v_flag & VXLOCK) {
- vp->v_flag |= VXWANT;
- msleep(vp, &vp->v_interlock, PINOD | PDROP, "vgone", 0);
+ mtx_assert(VI_MTX(vp), MA_OWNED);
+ if (vp->v_iflag & VI_XLOCK) {
+ vp->v_iflag |= VI_XWANT;
+ VI_UNLOCK(vp);
+ tsleep(vp, PINOD | PDROP, "vgone", 0);
return;
}
@@ -2376,7 +2417,7 @@ vgonel(vp, td)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, td);
- mtx_lock(&vp->v_interlock);
+ VI_UNLOCK(vp);
/*
* Delete from old mount point vnode list, if on one.
@@ -2405,21 +2446,23 @@ vgonel(vp, td)
* incremented first, vgone would (incorrectly) try to
* close the previous instance of the underlying object.
*/
- if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
+ VI_LOCK(vp);
+ if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
s = splbio();
mtx_lock(&vnode_free_list_mtx);
- if (vp->v_flag & VFREE)
+ if (vp->v_iflag & VI_FREE) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- else
+ } else {
+ vp->v_iflag |= VI_FREE;
freevnodes++;
- vp->v_flag |= VFREE;
+ }
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
mtx_unlock(&vnode_free_list_mtx);
splx(s);
}
vp->v_type = VBAD;
- mtx_unlock(&vp->v_interlock);
+ VI_UNLOCK(vp);
}
/*
@@ -2499,24 +2542,24 @@ vprint(label, vp)
typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
buf[0] = '\0';
- if (vp->v_flag & VROOT)
- strcat(buf, "|VROOT");
- if (vp->v_flag & VTEXT)
- strcat(buf, "|VTEXT");
- if (vp->v_flag & VSYSTEM)
- strcat(buf, "|VSYSTEM");
- if (vp->v_flag & VXLOCK)
- strcat(buf, "|VXLOCK");
- if (vp->v_flag & VXWANT)
- strcat(buf, "|VXWANT");
- if (vp->v_flag & VBWAIT)
- strcat(buf, "|VBWAIT");
- if (vp->v_flag & VDOOMED)
- strcat(buf, "|VDOOMED");
- if (vp->v_flag & VFREE)
- strcat(buf, "|VFREE");
- if (vp->v_flag & VOBJBUF)
- strcat(buf, "|VOBJBUF");
+ if (vp->v_vflag & VV_ROOT)
+ strcat(buf, "|VV_ROOT");
+ if (vp->v_vflag & VV_TEXT)
+ strcat(buf, "|VV_TEXT");
+ if (vp->v_vflag & VV_SYSTEM)
+ strcat(buf, "|VV_SYSTEM");
+ if (vp->v_iflag & VI_XLOCK)
+ strcat(buf, "|VI_XLOCK");
+ if (vp->v_iflag & VI_XWANT)
+ strcat(buf, "|VI_XWANT");
+ if (vp->v_iflag & VI_BWAIT)
+ strcat(buf, "|VI_BWAIT");
+ if (vp->v_iflag & VI_DOOMED)
+ strcat(buf, "|VI_DOOMED");
+ if (vp->v_iflag & VI_FREE)
+ strcat(buf, "|VI_FREE");
+ if (vp->v_vflag & VV_OBJBUF)
+ strcat(buf, "|VV_OBJBUF");
if (buf[0] != '\0')
printf(" flags (%s)", &buf[1]);
if (vp->v_data == NULL) {
@@ -2673,7 +2716,6 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
xvn[n].xv_size = sizeof *xvn;
xvn[n].xv_vnode = vp;
#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
- XV_COPY(flag);
XV_COPY(usecount);
XV_COPY(writecount);
XV_COPY(holdcnt);
@@ -2682,6 +2724,8 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
XV_COPY(numoutput);
XV_COPY(type);
#undef XV_COPY
+ xvn[n].xv_flag = vp->v_vflag;
+
switch (vp->v_type) {
case VREG:
case VDIR:
@@ -2801,13 +2845,14 @@ loop:
}
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
- if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */
+ mp_fixme("What locks do we need here?");
+ if (vp->v_iflag & VI_XLOCK) /* XXX: what if MNT_WAIT? */
continue;
- if (vp->v_flag & VNOSYNC) /* unlinked, skip it */
+ if (vp->v_vflag & VV_NOSYNC) /* unlinked, skip it */
continue;
- if ((vp->v_flag & VOBJDIRTY) &&
+ if ((vp->v_iflag & VI_OBJDIRTY) &&
(flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
mtx_unlock(&mntvnode_mtx);
if (!vget(vp,
@@ -2857,18 +2902,19 @@ vfree(vp)
{
int s;
+ mtx_assert(VI_MTX(vp), MA_OWNED);
s = splbio();
mtx_lock(&vnode_free_list_mtx);
- KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
- if (vp->v_flag & VAGE) {
+ KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
+ if (vp->v_iflag & VI_AGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
} else {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
freevnodes++;
mtx_unlock(&vnode_free_list_mtx);
- vp->v_flag &= ~VAGE;
- vp->v_flag |= VFREE;
+ vp->v_iflag &= ~VI_AGE;
+ vp->v_iflag |= VI_FREE;
splx(s);
}
@@ -2882,12 +2928,13 @@ vbusy(vp)
int s;
s = splbio();
+ mtx_assert(VI_MTX(vp), MA_OWNED);
mtx_lock(&vnode_free_list_mtx);
- KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
+ KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
mtx_unlock(&vnode_free_list_mtx);
- vp->v_flag &= ~(VFREE|VAGE);
+ vp->v_iflag &= ~(VI_FREE|VI_AGE);
splx(s);
}
@@ -3044,7 +3091,9 @@ vfs_allocate_syncvnode(mp)
}
next = start;
}
+ VI_LOCK(vp);
vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
+ VI_UNLOCK(vp);
mp->mnt_syncer = vp;
return (0);
}
@@ -3075,7 +3124,9 @@ sync_fsync(ap)
/*
* Move ourselves to the back of the sync list.
*/
+ VI_LOCK(syncvp);
vn_syncer_add_to_worklist(syncvp, syncdelay);
+ VI_UNLOCK(syncvp);
/*
* Walk the list of vnodes pushing all that are dirty and
@@ -3133,10 +3184,12 @@ sync_reclaim(ap)
s = splbio();
vp->v_mount->mnt_syncer = NULL;
- if (vp->v_flag & VONWORKLST) {
+ VI_LOCK(vp);
+ if (vp->v_iflag & VI_ONWORKLST) {
LIST_REMOVE(vp, v_synclist);
- vp->v_flag &= ~VONWORKLST;
+ vp->v_iflag &= ~VI_ONWORKLST;
}
+ VI_UNLOCK(vp);
splx(s);
return (0);
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index f02f62f..197829b 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -1155,7 +1155,8 @@ restart:
*
* XXX: can this only be a VDIR case?
*/
- if (vp->v_flag & VROOT)
+ mp_fixme("Accessing vflags w/o the vn lock.");
+ if (vp->v_vflag & VV_ROOT)
error = EBUSY;
}
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
@@ -2778,7 +2779,7 @@ restart:
/*
* The root of a mounted filesystem cannot be deleted.
*/
- if (vp->v_flag & VROOT) {
+ if (vp->v_vflag & VV_ROOT) {
error = EBUSY;
goto out;
}
@@ -2939,7 +2940,8 @@ unionread:
return (error);
}
}
- if ((vp->v_flag & VROOT) &&
+ mp_fixme("Accessing vflags w/o vn lock.");
+ if ((vp->v_vflag & VV_ROOT) &&
(vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;
@@ -3030,7 +3032,8 @@ unionread:
return (error);
}
}
- if ((vp->v_flag & VROOT) &&
+ mp_fixme("Accessing vflag without vn lock.");
+ if ((vp->v_vflag & VV_ROOT) &&
(vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 9a4ed2b..09dcf58 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -279,13 +279,15 @@ vn_writechk(vp)
register struct vnode *vp;
{
+ ASSERT_VOP_LOCKED(vp, "vn_writechk");
/*
* If there's shared text associated with
* the vnode, try to free it up once. If
* we fail, we can't allow writing.
*/
- if (vp->v_flag & VTEXT)
+ if (vp->v_vflag & VV_TEXT)
return (ETXTBSY);
+
return (0);
}
@@ -818,10 +820,10 @@ debug_vn_lock(vp, flags, td, filename, line)
do {
if ((flags & LK_INTERLOCK) == 0)
- mtx_lock(&vp->v_interlock);
- if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) {
- vp->v_flag |= VXWANT;
- msleep(vp, &vp->v_interlock, PINOD | PDROP,
+ VI_LOCK(vp);
+ if ((vp->v_iflag & VI_XLOCK) && vp->v_vxproc != curthread) {
+ vp->v_iflag |= VI_XWANT;
+ msleep(vp, VI_MTX(vp), PINOD | PDROP,
"vn_lock", 0);
error = ENOENT;
} else {
OpenPOWER on IntegriCloud