summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_syscalls.c
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>1999-11-16 16:28:58 +0000
committerphk <phk@FreeBSD.org>1999-11-16 16:28:58 +0000
commitec4e24bd526aa8ffb6e20b093113ee415769c159 (patch)
treeb7a1ee1fe863a0610766302fc19cec04808b654f /sys/kern/vfs_syscalls.c
parentf70f7d4cf02e8f20b01f3ca353ec7b587ebf64a1 (diff)
downloadFreeBSD-src-ec4e24bd526aa8ffb6e20b093113ee415769c159.zip
FreeBSD-src-ec4e24bd526aa8ffb6e20b093113ee415769c159.tar.gz
Commit the remaining part of PR14914:
Alot of the code in sys/kern directly accesses the *Q_HEAD and *Q_ENTRY structures for list operations. This patch makes all list operations in sys/kern use the queue(3) macros, rather than directly accessing the *Q_{HEAD,ENTRY} structures. Reviewed by: phk Submitted by: Jake Burkholder <jake@checker.org> PR: 14914
Diffstat (limited to 'sys/kern/vfs_syscalls.c')
-rw-r--r--sys/kern/vfs_syscalls.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 19c9d90..b83e502 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -369,7 +369,7 @@ checkdirs(olddp)
return;
if (VFS_ROOT(olddp->v_mountedhere, &newdp))
panic("mount: lost mount");
- for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
+ LIST_FOREACH(p, &allproc, p_list) {
fdp = p->p_fd;
if (fdp->fd_cdir == olddp) {
vrele(fdp->fd_cdir);
@@ -500,7 +500,7 @@ dounmount(mp, flags, p)
vrele(coveredvp);
}
mp->mnt_vfc->vfc_refcount--;
- if (mp->mnt_vnodelist.lh_first != NULL)
+ if (!LIST_EMPTY(&mp->mnt_vnodelist))
panic("unmount: dangling vnode");
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p);
if (mp->mnt_kern_flag & MNTK_MWAIT)
@@ -533,9 +533,10 @@ sync(p, uap)
int asyncflag;
simple_lock(&mountlist_slock);
- for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
+ mp = CIRCLEQ_FIRST(&mountlist);
+ for (; mp != (void *)&mountlist; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
- nmp = mp->mnt_list.cqe_next;
+ nmp = CIRCLEQ_NEXT(mp, mnt_list);
continue;
}
if ((mp->mnt_flag & MNT_RDONLY) == 0) {
@@ -547,7 +548,7 @@ sync(p, uap)
mp->mnt_flag |= asyncflag;
}
simple_lock(&mountlist_slock);
- nmp = mp->mnt_list.cqe_next;
+ nmp = CIRCLEQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
simple_unlock(&mountlist_slock);
@@ -717,9 +718,10 @@ getfsstat(p, uap)
sfsp = (caddr_t)SCARG(uap, buf);
count = 0;
simple_lock(&mountlist_slock);
- for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
+ mp = CIRCLEQ_FIRST(&mountlist);
+ for (; mp != (void *)&mountlist; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
- nmp = mp->mnt_list.cqe_next;
+ nmp = CIRCLEQ_NEXT(mp, mnt_list);
continue;
}
if (sfsp && count < maxcount) {
@@ -733,7 +735,7 @@ getfsstat(p, uap)
(SCARG(uap, flags) & MNT_WAIT)) &&
(error = VFS_STATFS(mp, sp, p))) {
simple_lock(&mountlist_slock);
- nmp = mp->mnt_list.cqe_next;
+ nmp = CIRCLEQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
continue;
}
@@ -747,7 +749,7 @@ getfsstat(p, uap)
}
count++;
simple_lock(&mountlist_slock);
- nmp = mp->mnt_list.cqe_next;
+ nmp = CIRCLEQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
}
simple_unlock(&mountlist_slock);
OpenPOWER on IntegriCloud