diff options
-rw-r--r-- | sys/kern/kern_lockf.c | 16 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 37 | ||||
-rw-r--r-- | sys/kern/subr_eventhandler.c | 2 | ||||
-rw-r--r-- | sys/kern/subr_rman.c | 31 | ||||
-rw-r--r-- | sys/kern/vfs_export.c | 37 | ||||
-rw-r--r-- | sys/kern/vfs_extattr.c | 20 | ||||
-rw-r--r-- | sys/kern/vfs_subr.c | 37 | ||||
-rw-r--r-- | sys/kern/vfs_syscalls.c | 20 |
8 files changed, 99 insertions, 101 deletions
diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c index 27bb5c4..86855ea 100644 --- a/sys/kern/kern_lockf.c +++ b/sys/kern/kern_lockf.c @@ -362,8 +362,8 @@ lf_setlock(lock) overlap->lf_type == F_WRLCK) { lf_wakelock(overlap); } else { - while ((ltmp = overlap->lf_blkhd.tqh_first) != - NOLOCKF) { + while (!TAILQ_EMPTY(&overlap->lf_blkhd)) { + ltmp = TAILQ_FIRST(&overlap->lf_blkhd); TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, lf_block); TAILQ_INSERT_TAIL(&lock->lf_blkhd, @@ -721,7 +721,8 @@ lf_wakelock(listhead) { register struct lockf *wakelock; - while ((wakelock = listhead->lf_blkhd.tqh_first) != NOLOCKF) { + while (!TAILQ_EMPTY(&listhead->lf_blkhd)) { + wakelock = TAILQ_FIRST(&listhead->lf_blkhd); TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); wakelock->lf_next = NOLOCKF; #ifdef LOCKF_DEBUG @@ -756,8 +757,8 @@ lf_print(tag, lock) lock->lf_type == F_WRLCK ? "exclusive" : lock->lf_type == F_UNLCK ? "unlock" : "unknown", (long)lock->lf_start, (long)lock->lf_end); - if (lock->lf_blkhd.tqh_first) - printf(" block %p\n", (void *)lock->lf_blkhd.tqh_first); + if (!TAILQ_EMPTY(&lock->lf_blkhd)) + printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd)); else printf("\n"); } @@ -786,8 +787,7 @@ lf_printlist(tag, lock) lf->lf_type == F_WRLCK ? "exclusive" : lf->lf_type == F_UNLCK ? "unlock" : "unknown", (long)lf->lf_start, (long)lf->lf_end); - for (blk = lf->lf_blkhd.tqh_first; blk; - blk = blk->lf_block.tqe_next) { + TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) { printf("\n\t\tlock request %p for ", (void *)blk); if (blk->lf_flags & F_POSIX) printf("proc %ld", @@ -801,7 +801,7 @@ lf_printlist(tag, lock) blk->lf_type == F_UNLCK ? "unlock" : "unknown", (long)blk->lf_start, (long)blk->lf_end); - if (blk->lf_blkhd.tqh_first) + if (!TAILQ_EMPTY(&blk->lf_blkhd)) panic("lf_printlist: bad list"); } printf("\n"); diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index f2a4b21..d420821 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -111,7 +111,7 @@ chgproccnt(uid, diff) register struct uihashhead *uipp; uipp = UIHASH(uid); - for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) + LIST_FOREACH(uip, uipp, ui_hash) if (uip->ui_uid == uid) break; if (uip) { @@ -149,7 +149,7 @@ chgsbsize(uid, diff) register struct uihashhead *uipp; uipp = UIHASH(uid); - for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) + LIST_FOREACH(uip, uipp, ui_hash) if (uip->ui_uid == uid) break; if (diff <= 0) { @@ -197,7 +197,7 @@ pfind(pid) { register struct proc *p; - for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) + LIST_FOREACH(p, PIDHASH(pid), p_hash) if (p->p_pid == pid) return (p); return (NULL); @@ -212,8 +212,7 @@ pgfind(pgid) { register struct pgrp *pgrp; - for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; - pgrp = pgrp->pg_hash.le_next) + LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) if (pgrp->pg_id == pgid) return (pgrp); return (NULL); @@ -287,7 +286,7 @@ enterpgrp(p, pgid, mksess) fixjobc(p, p->p_pgrp, 0); LIST_REMOVE(p, p_pglist); - if (p->p_pgrp->pg_members.lh_first == 0) + if (LIST_EMPTY(&p->p_pgrp->pg_members)) pgdelete(p->p_pgrp); p->p_pgrp = pgrp; LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); @@ -303,7 +302,7 @@ leavepgrp(p) { LIST_REMOVE(p, p_pglist); - if (p->p_pgrp->pg_members.lh_first == 0) + if (LIST_EMPTY(&p->p_pgrp->pg_members)) pgdelete(p->p_pgrp); p->p_pgrp = 0; return (0); @@ -368,7 +367,7 @@ fixjobc(p, pgrp, entering) * their process groups; if so, adjust counts for children's * process groups. */ - for (p = p->p_children.lh_first; p != 0; p = p->p_sibling.le_next) + LIST_FOREACH(p, &p->p_children, p_sibling) if ((hispgrp = p->p_pgrp) != pgrp && hispgrp->pg_session == mysession && p->p_stat != SZOMB) { @@ -390,10 +389,9 @@ orphanpg(pg) { register struct proc *p; - for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) { + LIST_FOREACH(p, &pg->pg_members, p_pglist) { if (p->p_stat == SSTOP) { - for (p = pg->pg_members.lh_first; p != 0; - p = p->p_pglist.le_next) { + LIST_FOREACH(p, &pg->pg_members, p_pglist) { psignal(p, SIGHUP); psignal(p, SIGCONT); } @@ -413,17 +411,16 @@ DB_SHOW_COMMAND(pgrpdump, pgrpdump) register int i; for (i = 0; i <= pgrphash; i++) { - if ((pgrp = pgrphashtbl[i].lh_first) != NULL) { + if (!LIST_EMPTY(&pgrphashtbl[i])) { printf("\tindx %d\n", i); - for (; pgrp != 0; pgrp = pgrp->pg_hash.le_next) { + LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { printf( "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", (void *)pgrp, (long)pgrp->pg_id, (void *)pgrp->pg_session, pgrp->pg_session->s_count, - (void *)pgrp->pg_members.lh_first); - for (p = pgrp->pg_members.lh_first; p != 0; - p = p->p_pglist.le_next) { + (void *)LIST_FIRST(&pgrp->pg_members)); + LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { printf("\t\tpid %ld addr %p pgrp %p\n", (long)p->p_pid, (void *)p, (void *)p->p_pgrp); @@ -494,7 +491,7 @@ zpfind(pid_t pid) { struct proc *p; - for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) + LIST_FOREACH(p, &zombproc, p_list) if (p->p_pid == pid) return (p); return (NULL); @@ -557,10 +554,10 @@ sysctl_kern_proc SYSCTL_HANDLER_ARGS } for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) { if (!doingzomb) - p = allproc.lh_first; + p = LIST_FIRST(&allproc); else - p = zombproc.lh_first; - for (; p != 0; p = p->p_list.le_next) { + p = LIST_FIRST(&zombproc); + for (; p != 0; p = LIST_NEXT(p, p_list)) { /* * Skip embryonic processes. */ diff --git a/sys/kern/subr_eventhandler.c b/sys/kern/subr_eventhandler.c index 997afa7..c23441d 100644 --- a/sys/kern/subr_eventhandler.c +++ b/sys/kern/subr_eventhandler.c @@ -118,7 +118,7 @@ eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag) /* remove entire list */ while (!TAILQ_EMPTY(&list->el_entries)) { ep = TAILQ_FIRST(&list->el_entries); - TAILQ_REMOVE(&list->el_entries, list->el_entries.tqh_first, ee_link); + TAILQ_REMOVE(&list->el_entries, ep, ee_link); free(ep, M_EVENTHANDLER); } } diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c index ed9a633..28b3d0f 100644 --- a/sys/kern/subr_rman.c +++ b/sys/kern/subr_rman.c @@ -128,9 +128,9 @@ rman_manage_region(struct rman *rm, u_long start, u_long end) r->r_rm = rm; simple_lock(rm->rm_slock); - for (s = rm->rm_list.cqh_first; + for (s = CIRCLEQ_FIRST(&rm->rm_list); !CIRCLEQ_TERMCOND(s, rm->rm_list) && s->r_end < r->r_start; - s = s->r_link.cqe_next) + s = CIRCLEQ_NEXT(s, r_link)) ; if (CIRCLEQ_TERMCOND(s, rm->rm_list)) { @@ -149,8 +149,7 @@ rman_fini(struct rman *rm) struct resource *r; simple_lock(rm->rm_slock); - for (r = rm->rm_list.cqh_first; !CIRCLEQ_TERMCOND(r, rm->rm_list); - r = r->r_link.cqe_next) { + CIRCLEQ_FOREACH(r, &rm->rm_list, r_link) { if (r->r_flags & RF_ALLOCATED) { simple_unlock(rm->rm_slock); return EBUSY; @@ -161,8 +160,8 @@ rman_fini(struct rman *rm) * There really should only be one of these if we are in this * state and the code is working properly, but it can't hurt. */ - for (r = rm->rm_list.cqh_first; !CIRCLEQ_TERMCOND(r, rm->rm_list); - r = rm->rm_list.cqh_first) { + while (!CIRCLEQ_EMPTY(&rm->rm_list)) { + r = CIRCLEQ_FIRST(&rm->rm_list); CIRCLEQ_REMOVE(&rm->rm_list, r, r_link); free(r, M_RMAN); } @@ -195,9 +194,9 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, simple_lock(rm->rm_slock); - for (r = rm->rm_list.cqh_first; + for (r = CIRCLEQ_FIRST(&rm->rm_list); !CIRCLEQ_TERMCOND(r, rm->rm_list) && r->r_end < start; - r = r->r_link.cqe_next) + r = CIRCLEQ_NEXT(r, r_link)) ; if (CIRCLEQ_TERMCOND(r, rm->rm_list)) { @@ -211,7 +210,7 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, * First try to find an acceptable totally-unshared region. */ for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list); - s = s->r_link.cqe_next) { + s = CIRCLEQ_NEXT(s, r_link)) { #ifdef RMAN_DEBUG printf("considering [%#lx, %#lx]\n", s->r_start, s->r_end); #endif /* RMAN_DEBUG */ @@ -339,7 +338,7 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, goto out; for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list); - s = s->r_link.cqe_next) { + s = CIRCLEQ_NEXT(s, r_link)) { if (s->r_start > end) break; if ((s->r_flags & flags) != flags) @@ -422,8 +421,8 @@ int_rman_activate_resource(struct rman *rm, struct resource *r, } ok = 1; - for (s = r->r_sharehead->lh_first; s && ok; - s = s->r_sharelink.le_next) { + for (s = LIST_FIRST(r->r_sharehead); s && ok; + s = LIST_NEXT(s, r_sharelink)) { if ((s->r_flags & RF_ACTIVE) != 0) { ok = 0; *whohas = s; @@ -531,7 +530,7 @@ int_rman_release_resource(struct rman *rm, struct resource *r) * If we are in the main circleq, appoint someone else. */ LIST_REMOVE(r, r_sharelink); - s = r->r_sharehead->lh_first; + s = LIST_FIRST(r->r_sharehead); if (r->r_flags & RF_FIRSTSHARE) { s->r_flags |= RF_FIRSTSHARE; CIRCLEQ_INSERT_BEFORE(&rm->rm_list, r, s, r_link); @@ -542,7 +541,7 @@ int_rman_release_resource(struct rman *rm, struct resource *r) * Make sure that the sharing list goes away completely * if the resource is no longer being shared at all. */ - if (s->r_sharelink.le_next == 0) { + if (LIST_NEXT(s, r_sharelink) == 0) { free(s->r_sharehead, M_RMAN); s->r_sharehead = 0; s->r_flags &= ~RF_FIRSTSHARE; @@ -554,8 +553,8 @@ int_rman_release_resource(struct rman *rm, struct resource *r) * Look at the adjacent resources in the list and see if our * segment can be merged with any of them. */ - s = r->r_link.cqe_prev; - t = r->r_link.cqe_next; + s = CIRCLEQ_PREV(r, r_link); + t = CIRCLEQ_NEXT(r, r_link); if (s != (void *)&rm->rm_list && (s->r_flags & RF_ALLOCATED) == 0 && t != (void *)&rm->rm_list && (t->r_flags & RF_ALLOCATED) == 0) { diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c index 28af937..6d339cf 100644 --- a/sys/kern/vfs_export.c +++ b/sys/kern/vfs_export.c @@ -315,8 +315,7 @@ vfs_getvfs(fsid) register struct mount *mp; simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; - mp = mp->mnt_list.cqe_next) { + CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { simple_unlock(&mountlist_slock); @@ -1543,14 +1542,14 @@ vflush(mp, skipvp, flags) simple_lock(&mntvnode_slock); loop: - for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { + for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { /* * Make sure this vnode wasn't reclaimed in getnewvnode(). * Start over if it has (it won't be on the list anymore). */ if (vp->v_mount != mp) goto loop; - nvp = vp->v_mntvnodes.le_next; + nvp = LIST_NEXT(vp, v_mntvnodes); /* * Skip over a selected vnode. */ @@ -1974,19 +1973,17 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) printf("Locked vnodes\n"); simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); continue; } - for (vp = mp->mnt_vnodelist.lh_first; - vp != NULL; - vp = vp->v_mntvnodes.le_next) { + LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { if (VOP_ISLOCKED(vp)) vprint((char *)0, vp); } simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } simple_unlock(&mountlist_slock); @@ -2094,14 +2091,15 @@ sysctl_vnode SYSCTL_HANDLER_ARGS (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + mp = CIRCLEQ_FIRST(&mountlist); + for (; mp != (void *)&mountlist; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); continue; } again: simple_lock(&mntvnode_slock); - for (vp = mp->mnt_vnodelist.lh_first; + for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { /* @@ -2113,7 +2111,7 @@ again: simple_unlock(&mntvnode_slock); goto again; } - nvp = vp->v_mntvnodes.le_next; + nvp = LIST_NEXT(vp, v_mntvnodes); simple_unlock(&mntvnode_slock); if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || (error = SYSCTL_OUT(req, vp, VNODESZ))) @@ -2122,7 +2120,7 @@ again: } simple_unlock(&mntvnode_slock); simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } simple_unlock(&mountlist_slock); @@ -2172,8 +2170,9 @@ vfs_unmountall() /* * Since this only runs when rebooting, it is not interlocked. */ - for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { - nmp = mp->mnt_list.cqe_prev; + mp = CIRCLEQ_LAST(&mountlist); + for (; mp != (void *)&mountlist; mp = nmp) { + nmp = CIRCLEQ_PREV(mp, mnt_list); error = dounmount(mp, MNT_FORCE, p); if (error) { printf("unmount of %s failed (", @@ -2452,9 +2451,9 @@ vfs_msync(struct mount *mp, int flags) { tries = 5; loop: anyio = 0; - for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { + for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { - nvp = vp->v_mntvnodes.le_next; + nvp = LIST_NEXT(vp, v_mntvnodes); if (vp->v_mount != mp) { goto loop; diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c index 19c9d90..b83e502 100644 --- a/sys/kern/vfs_extattr.c +++ b/sys/kern/vfs_extattr.c @@ -369,7 +369,7 @@ checkdirs(olddp) return; if (VFS_ROOT(olddp->v_mountedhere, &newdp)) panic("mount: lost mount"); - for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { + LIST_FOREACH(p, &allproc, p_list) { fdp = p->p_fd; if (fdp->fd_cdir == olddp) { vrele(fdp->fd_cdir); @@ -500,7 +500,7 @@ dounmount(mp, flags, p) vrele(coveredvp); } mp->mnt_vfc->vfc_refcount--; - if (mp->mnt_vnodelist.lh_first != NULL) + if (!LIST_EMPTY(&mp->mnt_vnodelist)) panic("unmount: dangling vnode"); lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p); if (mp->mnt_kern_flag & MNTK_MWAIT) @@ -533,9 +533,10 @@ sync(p, uap) int asyncflag; simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + mp = CIRCLEQ_FIRST(&mountlist); + for (; mp != (void *)&mountlist; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); continue; } if ((mp->mnt_flag & MNT_RDONLY) == 0) { @@ -547,7 +548,7 @@ sync(p, uap) mp->mnt_flag |= asyncflag; } simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } simple_unlock(&mountlist_slock); @@ -717,9 +718,10 @@ getfsstat(p, uap) sfsp = (caddr_t)SCARG(uap, buf); count = 0; simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + mp = CIRCLEQ_FIRST(&mountlist); + for (; mp != (void *)&mountlist; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); continue; } if (sfsp && count < maxcount) { @@ -733,7 +735,7 @@ getfsstat(p, uap) (SCARG(uap, flags) & MNT_WAIT)) && (error = VFS_STATFS(mp, sp, p))) { simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); continue; } @@ -747,7 +749,7 @@ getfsstat(p, uap) } count++; simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } simple_unlock(&mountlist_slock); diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 28af937..6d339cf 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -315,8 +315,7 @@ vfs_getvfs(fsid) register struct mount *mp; simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; - mp = mp->mnt_list.cqe_next) { + CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { simple_unlock(&mountlist_slock); @@ -1543,14 +1542,14 @@ vflush(mp, skipvp, flags) simple_lock(&mntvnode_slock); loop: - for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { + for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { /* * Make sure this vnode wasn't reclaimed in getnewvnode(). * Start over if it has (it won't be on the list anymore). */ if (vp->v_mount != mp) goto loop; - nvp = vp->v_mntvnodes.le_next; + nvp = LIST_NEXT(vp, v_mntvnodes); /* * Skip over a selected vnode. */ @@ -1974,19 +1973,17 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) printf("Locked vnodes\n"); simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); continue; } - for (vp = mp->mnt_vnodelist.lh_first; - vp != NULL; - vp = vp->v_mntvnodes.le_next) { + LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { if (VOP_ISLOCKED(vp)) vprint((char *)0, vp); } simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } simple_unlock(&mountlist_slock); @@ -2094,14 +2091,15 @@ sysctl_vnode SYSCTL_HANDLER_ARGS (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + mp = CIRCLEQ_FIRST(&mountlist); + for (; mp != (void *)&mountlist; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); continue; } again: simple_lock(&mntvnode_slock); - for (vp = mp->mnt_vnodelist.lh_first; + for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { /* @@ -2113,7 +2111,7 @@ again: simple_unlock(&mntvnode_slock); goto again; } - nvp = vp->v_mntvnodes.le_next; + nvp = LIST_NEXT(vp, v_mntvnodes); simple_unlock(&mntvnode_slock); if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || (error = SYSCTL_OUT(req, vp, VNODESZ))) @@ -2122,7 +2120,7 @@ again: } simple_unlock(&mntvnode_slock); simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } simple_unlock(&mountlist_slock); @@ -2172,8 +2170,9 @@ vfs_unmountall() /* * Since this only runs when rebooting, it is not interlocked. */ - for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { - nmp = mp->mnt_list.cqe_prev; + mp = CIRCLEQ_LAST(&mountlist); + for (; mp != (void *)&mountlist; mp = nmp) { + nmp = CIRCLEQ_PREV(mp, mnt_list); error = dounmount(mp, MNT_FORCE, p); if (error) { printf("unmount of %s failed (", @@ -2452,9 +2451,9 @@ vfs_msync(struct mount *mp, int flags) { tries = 5; loop: anyio = 0; - for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { + for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { - nvp = vp->v_mntvnodes.le_next; + nvp = LIST_NEXT(vp, v_mntvnodes); if (vp->v_mount != mp) { goto loop; diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c index 19c9d90..b83e502 100644 --- a/sys/kern/vfs_syscalls.c +++ b/sys/kern/vfs_syscalls.c @@ -369,7 +369,7 @@ checkdirs(olddp) return; if (VFS_ROOT(olddp->v_mountedhere, &newdp)) panic("mount: lost mount"); - for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { + LIST_FOREACH(p, &allproc, p_list) { fdp = p->p_fd; if (fdp->fd_cdir == olddp) { vrele(fdp->fd_cdir); @@ -500,7 +500,7 @@ dounmount(mp, flags, p) vrele(coveredvp); } mp->mnt_vfc->vfc_refcount--; - if (mp->mnt_vnodelist.lh_first != NULL) + if (!LIST_EMPTY(&mp->mnt_vnodelist)) panic("unmount: dangling vnode"); lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p); if (mp->mnt_kern_flag & MNTK_MWAIT) @@ -533,9 +533,10 @@ sync(p, uap) int asyncflag; simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + mp = CIRCLEQ_FIRST(&mountlist); + for (; mp != (void *)&mountlist; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); continue; } if ((mp->mnt_flag & MNT_RDONLY) == 0) { @@ -547,7 +548,7 @@ sync(p, uap) mp->mnt_flag |= asyncflag; } simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } simple_unlock(&mountlist_slock); @@ -717,9 +718,10 @@ getfsstat(p, uap) sfsp = (caddr_t)SCARG(uap, buf); count = 0; simple_lock(&mountlist_slock); - for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + mp = CIRCLEQ_FIRST(&mountlist); + for (; mp != (void *)&mountlist; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); continue; } if (sfsp && count < maxcount) { @@ -733,7 +735,7 @@ getfsstat(p, uap) (SCARG(uap, flags) & MNT_WAIT)) && (error = VFS_STATFS(mp, sp, p))) { simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); continue; } @@ -747,7 +749,7 @@ getfsstat(p, uap) } count++; simple_lock(&mountlist_slock); - nmp = mp->mnt_list.cqe_next; + nmp = CIRCLEQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } simple_unlock(&mountlist_slock); |