summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/coda/coda_subr.c4
-rw-r--r--sys/fs/coda/coda_subr.c4
-rw-r--r--sys/fs/msdosfs/msdosfs_vfsops.c6
-rw-r--r--sys/fs/nwfs/nwfs_vfsops.c4
-rw-r--r--sys/fs/smbfs/smbfs_vfsops.c4
-rw-r--r--sys/fs/unionfs/union_vfsops.c2
-rw-r--r--sys/gnu/ext2fs/ext2_vfsops.c9
-rw-r--r--sys/gnu/fs/ext2fs/ext2_vfsops.c9
-rw-r--r--sys/kern/vfs_extattr.c3
-rw-r--r--sys/kern/vfs_subr.c23
-rw-r--r--sys/kern/vfs_syscalls.c3
-rw-r--r--sys/nfsclient/nfs_subs.c4
-rw-r--r--sys/nfsclient/nfs_vfsops.c4
-rw-r--r--sys/sys/mount.h4
-rw-r--r--sys/sys/vnode.h2
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c8
-rw-r--r--sys/ufs/ufs/ufs_quota.c18
-rw-r--r--usr.sbin/pstat/pstat.c4
18 files changed, 60 insertions, 55 deletions
diff --git a/sys/coda/coda_subr.c b/sys/coda/coda_subr.c
index 21dc937..026d957 100644
--- a/sys/coda/coda_subr.c
+++ b/sys/coda/coda_subr.c
@@ -312,10 +312,10 @@ coda_checkunmounting(mp)
struct cnode *cp;
int count = 0, bad = 0;
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
if (vp->v_mount != mp)
goto loop;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
cp = VTOC(vp);
count++;
if (!(cp->c_flags & C_UNMOUNTING)) {
diff --git a/sys/fs/coda/coda_subr.c b/sys/fs/coda/coda_subr.c
index 21dc937..026d957 100644
--- a/sys/fs/coda/coda_subr.c
+++ b/sys/fs/coda/coda_subr.c
@@ -312,10 +312,10 @@ coda_checkunmounting(mp)
struct cnode *cp;
int count = 0, bad = 0;
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
if (vp->v_mount != mp)
goto loop;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
cp = VTOC(vp);
count++;
if (!(cp->c_flags & C_UNMOUNTING)) {
diff --git a/sys/fs/msdosfs/msdosfs_vfsops.c b/sys/fs/msdosfs/msdosfs_vfsops.c
index cd1e404..ca5a9ca 100644
--- a/sys/fs/msdosfs/msdosfs_vfsops.c
+++ b/sys/fs/msdosfs/msdosfs_vfsops.c
@@ -176,7 +176,7 @@ msdosfs_mountroot()
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
mp->mnt_op = &msdosfs_vfsops;
mp->mnt_flag = 0;
- LIST_INIT(&mp->mnt_vnodelist);
+ TAILQ_INIT(&mp->mnt_nvnodelist);
args.flags = 0;
args.uid = 0;
@@ -859,14 +859,14 @@ msdosfs_sync(mp, waitfor, cred, td)
*/
mtx_lock(&mntvnode_mtx);
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
goto loop;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
diff --git a/sys/fs/nwfs/nwfs_vfsops.c b/sys/fs/nwfs/nwfs_vfsops.c
index de5ae2f..119a163 100644
--- a/sys/fs/nwfs/nwfs_vfsops.c
+++ b/sys/fs/nwfs/nwfs_vfsops.c
@@ -479,7 +479,7 @@ nwfs_sync(mp, waitfor, cred, td)
*/
mtx_lock(&mntvnode_mtx);
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist);
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
vp != NULL;
vp = nvp) {
/*
@@ -488,7 +488,7 @@ loop:
*/
if (vp->v_mount != mp)
goto loop;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
diff --git a/sys/fs/smbfs/smbfs_vfsops.c b/sys/fs/smbfs/smbfs_vfsops.c
index b5196c0..d06f7bf 100644
--- a/sys/fs/smbfs/smbfs_vfsops.c
+++ b/sys/fs/smbfs/smbfs_vfsops.c
@@ -434,9 +434,9 @@ smbfs_sync(mp, waitfor, cred, p)
* Force stale buffer cache information to be flushed.
*/
loop:
- for (vp = mp->mnt_vnodelist.lh_first;
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
vp != NULL;
- vp = vp->v_mntvnodes.le_next) {
+ vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
diff --git a/sys/fs/unionfs/union_vfsops.c b/sys/fs/unionfs/union_vfsops.c
index 53c5ae6..7f3d5bd 100644
--- a/sys/fs/unionfs/union_vfsops.c
+++ b/sys/fs/unionfs/union_vfsops.c
@@ -333,7 +333,7 @@ union_unmount(mp, mntflags, td)
/* count #vnodes held on mount list */
mtx_lock(&mntvnode_mtx);
n = 0;
- LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
+ TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
n++;
mtx_unlock(&mntvnode_mtx);
diff --git a/sys/gnu/ext2fs/ext2_vfsops.c b/sys/gnu/ext2fs/ext2_vfsops.c
index 713a0cb..4d9ea28 100644
--- a/sys/gnu/ext2fs/ext2_vfsops.c
+++ b/sys/gnu/ext2fs/ext2_vfsops.c
@@ -141,6 +141,7 @@ ext2_mountroot()
}
mp = bsd_malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
bzero((char *)mp, (u_long)sizeof(struct mount));
+ TAILQ_INIT(&mp->mnt_nvnodelist);
mp->mnt_op = &ext2fs_vfsops;
mp->mnt_flag = MNT_RDONLY;
if (error = ext2_mountfs(rootvp, mp, td)) {
@@ -568,12 +569,12 @@ ext2_reload(mountp, cred, td)
loop:
mtx_lock(&mntvnode_mtx);
- for (vp = LIST_FIRST(&mountp->mnt_vnodelist); vp != NULL; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mountp->mnt_nvnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mountp) {
mtx_unlock(&mntvnode_mtx);
goto loop;
}
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
/*
* Step 4: invalidate all inactive vnodes.
@@ -926,14 +927,14 @@ ext2_sync(mp, waitfor, cred, td)
*/
mtx_lock(&mntvnode_mtx);
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
goto loop;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
ip = VTOI(vp);
diff --git a/sys/gnu/fs/ext2fs/ext2_vfsops.c b/sys/gnu/fs/ext2fs/ext2_vfsops.c
index 713a0cb..4d9ea28 100644
--- a/sys/gnu/fs/ext2fs/ext2_vfsops.c
+++ b/sys/gnu/fs/ext2fs/ext2_vfsops.c
@@ -141,6 +141,7 @@ ext2_mountroot()
}
mp = bsd_malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
bzero((char *)mp, (u_long)sizeof(struct mount));
+ TAILQ_INIT(&mp->mnt_nvnodelist);
mp->mnt_op = &ext2fs_vfsops;
mp->mnt_flag = MNT_RDONLY;
if (error = ext2_mountfs(rootvp, mp, td)) {
@@ -568,12 +569,12 @@ ext2_reload(mountp, cred, td)
loop:
mtx_lock(&mntvnode_mtx);
- for (vp = LIST_FIRST(&mountp->mnt_vnodelist); vp != NULL; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mountp->mnt_nvnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mountp) {
mtx_unlock(&mntvnode_mtx);
goto loop;
}
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
/*
* Step 4: invalidate all inactive vnodes.
@@ -926,14 +927,14 @@ ext2_sync(mp, waitfor, cred, td)
*/
mtx_lock(&mntvnode_mtx);
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
goto loop;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
ip = VTOI(vp);
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 949f5da..b260d55 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -319,6 +319,7 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
* Allocate and initialize the filesystem.
*/
mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
+ TAILQ_INIT(&mp->mnt_nvnodelist);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops;
@@ -591,7 +592,7 @@ dounmount(mp, flags, td)
if ((coveredvp = mp->mnt_vnodecovered) != NULL)
coveredvp->v_mountedhere = NULL;
mp->mnt_vfc->vfc_refcount--;
- if (!LIST_EMPTY(&mp->mnt_vnodelist))
+ if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
panic("unmount: dangling vnode");
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, td);
lockdestroy(&mp->mnt_lock);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 6c1c2f2..8a33fa6 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -345,7 +345,7 @@ vfs_rootmountalloc(fstypename, devname, mpp)
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
- LIST_INIT(&mp->mnt_vnodelist);
+ TAILQ_INIT(&mp->mnt_nvnodelist);
mp->mnt_vfc = vfsp;
mp->mnt_op = vfsp->vfc_vfsops;
mp->mnt_flag = MNT_RDONLY;
@@ -700,7 +700,7 @@ insmntque(vp, mp)
* Delete from old mount point vnode list, if on one.
*/
if (vp->v_mount != NULL)
- LIST_REMOVE(vp, v_mntvnodes);
+ TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
/*
* Insert into list of vnodes for the new mount point, if available.
*/
@@ -708,7 +708,7 @@ insmntque(vp, mp)
mtx_unlock(&mntvnode_mtx);
return;
}
- LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
+ TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
}
@@ -1723,14 +1723,14 @@ vflush(mp, rootrefs, flags)
}
mtx_lock(&mntvnode_mtx);
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
/*
* Make sure this vnode wasn't reclaimed in getnewvnode().
* Start over if it has (it won't be on the list anymore).
*/
if (vp->v_mount != mp)
goto loop;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
@@ -2191,7 +2191,7 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
continue;
}
mtx_lock(&mntvnode_mtx);
- LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
+ TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
if (VOP_ISLOCKED(vp, NULL))
vprint((char *)0, vp);
}
@@ -2313,7 +2313,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
}
mtx_lock(&mntvnode_mtx);
again:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist);
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
vp != NULL;
vp = nvp) {
/*
@@ -2323,7 +2323,7 @@ again:
*/
if (vp->v_mount != mp)
goto again;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
@@ -2402,7 +2402,8 @@ vfs_unmountall()
* the mount point must be locked.
*/
void
-vfs_msync(struct mount *mp, int flags) {
+vfs_msync(struct mount *mp, int flags)
+{
struct vnode *vp, *nvp;
struct vm_object *obj;
int anyio, tries;
@@ -2413,9 +2414,9 @@ vfs_msync(struct mount *mp, int flags) {
loop:
anyio = 0;
mtx_lock(&mntvnode_mtx);
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
if (vp->v_mount != mp) {
mtx_unlock(&mntvnode_mtx);
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 949f5da..b260d55 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -319,6 +319,7 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
* Allocate and initialize the filesystem.
*/
mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
+ TAILQ_INIT(&mp->mnt_nvnodelist);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops;
@@ -591,7 +592,7 @@ dounmount(mp, flags, td)
if ((coveredvp = mp->mnt_vnodecovered) != NULL)
coveredvp->v_mountedhere = NULL;
mp->mnt_vfc->vfc_refcount--;
- if (!LIST_EMPTY(&mp->mnt_vnodelist))
+ if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
panic("unmount: dangling vnode");
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, td);
lockdestroy(&mp->mnt_lock);
diff --git a/sys/nfsclient/nfs_subs.c b/sys/nfsclient/nfs_subs.c
index 4b427b5..e20f11f 100644
--- a/sys/nfsclient/nfs_subs.c
+++ b/sys/nfsclient/nfs_subs.c
@@ -788,10 +788,10 @@ nfs_clearcommit(struct mount *mp)
s = splbio();
mtx_lock(&mntvnode_mtx);
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
if (vp->v_mount != mp) /* Paranoia */
goto loop;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
if (BUF_REFCNT(bp) == 0 &&
diff --git a/sys/nfsclient/nfs_vfsops.c b/sys/nfsclient/nfs_vfsops.c
index 670a5aa..e80adad 100644
--- a/sys/nfsclient/nfs_vfsops.c
+++ b/sys/nfsclient/nfs_vfsops.c
@@ -968,7 +968,7 @@ nfs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct thread *td)
*/
mtx_lock(&mntvnode_mtx);
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist);
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
vp != NULL;
vp = vnp) {
/*
@@ -977,7 +977,7 @@ loop:
*/
if (vp->v_mount != mp)
goto loop;
- vnp = LIST_NEXT(vp, v_mntvnodes);
+ vnp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index f3975ca..23d26b8 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -110,7 +110,7 @@ struct statfs {
* array of operations and an instance record. The file systems are
* put on a doubly linked list.
*/
-LIST_HEAD(vnodelst, vnode);
+TAILQ_HEAD(vnodelst, vnode);
struct mount {
TAILQ_ENTRY(mount) mnt_list; /* mount list */
@@ -118,7 +118,7 @@ struct mount {
struct vfsconf *mnt_vfc; /* configuration info */
struct vnode *mnt_vnodecovered; /* vnode we mounted on */
struct vnode *mnt_syncer; /* syncer vnode */
- struct vnodelst mnt_vnodelist; /* list of vnodes this mount */
+ struct vnodelst mnt_nvnodelist; /* list of vnodes this mount */
struct lock mnt_lock; /* mount structure lock */
int mnt_writeopcount; /* write syscalls in progress */
int mnt_flag; /* flags shared with user */
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 37eab80..5986579 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -99,7 +99,7 @@ struct vnode {
struct mount *v_mount; /* ptr to vfs we are in */
vop_t **v_op; /* vnode operations vector */
TAILQ_ENTRY(vnode) v_freelist; /* vnode freelist */
- LIST_ENTRY(vnode) v_mntvnodes; /* vnodes for mount point */
+ TAILQ_ENTRY(vnode) v_nmntvnodes; /* vnodes for mount point */
struct buflists v_cleanblkhd; /* clean blocklist head */
struct buflists v_dirtyblkhd; /* dirty blocklist head */
LIST_ENTRY(vnode) v_synclist; /* vnodes with dirty buffers */
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index 0f51888..ad4c24d 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -476,12 +476,12 @@ ffs_reload(mp, cred, td)
loop:
mtx_lock(&mntvnode_mtx);
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mp) {
mtx_unlock(&mntvnode_mtx);
goto loop;
}
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
/*
* Step 4: invalidate all inactive vnodes.
@@ -1008,14 +1008,14 @@ ffs_sync(mp, waitfor, cred, td)
}
mtx_lock(&mntvnode_mtx);
loop:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
goto loop;
- nvp = LIST_NEXT(vp, v_mntvnodes);
+ nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
diff --git a/sys/ufs/ufs/ufs_quota.c b/sys/ufs/ufs/ufs_quota.c
index f1a5835..7f8e6d8 100644
--- a/sys/ufs/ufs/ufs_quota.c
+++ b/sys/ufs/ufs/ufs_quota.c
@@ -439,10 +439,10 @@ quotaon(td, mp, type, fname)
*/
mtx_lock(&mntvnode_mtx);
again:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
goto again;
- nextvp = LIST_NEXT(vp, v_mntvnodes);
+ nextvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
@@ -460,7 +460,7 @@ again:
mtx_lock(&mntvnode_mtx);
if (error)
break;
- if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
+ if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
goto again;
}
mtx_unlock(&mntvnode_mtx);
@@ -495,10 +495,10 @@ quotaoff(td, mp, type)
*/
mtx_lock(&mntvnode_mtx);
again:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
goto again;
- nextvp = LIST_NEXT(vp, v_mntvnodes);
+ nextvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
@@ -517,7 +517,7 @@ again:
dqrele(vp, dq);
vput(vp);
mtx_lock(&mntvnode_mtx);
- if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
+ if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
goto again;
}
mtx_unlock(&mntvnode_mtx);
@@ -694,10 +694,10 @@ qsync(mp)
*/
mtx_lock(&mntvnode_mtx);
again:
- for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
+ for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
goto again;
- nextvp = LIST_NEXT(vp, v_mntvnodes);
+ nextvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
if (vp->v_type == VNON) {
@@ -719,7 +719,7 @@ again:
}
vput(vp);
mtx_lock(&mntvnode_mtx);
- if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
+ if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
goto again;
}
mtx_unlock(&mntvnode_mtx);
diff --git a/usr.sbin/pstat/pstat.c b/usr.sbin/pstat/pstat.c
index cb7e404..b0a5db9 100644
--- a/usr.sbin/pstat/pstat.c
+++ b/usr.sbin/pstat/pstat.c
@@ -693,10 +693,10 @@ kinfo_vnodes(avnodes)
for (num = 0, mp = TAILQ_FIRST(&mountlist); ; mp = mp_next) {
KGET2(mp, &mount, sizeof(mount), "mount entry");
mp_next = TAILQ_NEXT(&mount, mnt_list);
- for (vp = LIST_FIRST(&mount.mnt_vnodelist);
+ for (vp = TAILQ_FIRST(&mount.mnt_nvnodelist);
vp != NULL; vp = vp_next) {
KGET2(vp, &vnode, sizeof(vnode), "vnode");
- vp_next = LIST_NEXT(&vnode, v_mntvnodes);
+ vp_next = TAILQ_NEXT(&vnode, v_nmntvnodes);
if ((bp + VPTRSZ + VNODESZ) > evbuf)
/* XXX - should realloc */
errx(1, "no more room for vnodes");
OpenPOWER on IntegriCloud