diff options
author | bmilekic <bmilekic@FreeBSD.org> | 2001-02-09 06:11:45 +0000 |
---|---|---|
committer | bmilekic <bmilekic@FreeBSD.org> | 2001-02-09 06:11:45 +0000 |
commit | f364d4ac3621ae2689a3cc1b82c73eb491475a24 (patch) | |
tree | 84444d0341ce519800ed7913d826f5f38c622d6d /sys/ufs | |
parent | 363bdddf694863339f6629340cfb324771b8ffe7 (diff) | |
download | FreeBSD-src-f364d4ac3621ae2689a3cc1b82c73eb491475a24.zip FreeBSD-src-f364d4ac3621ae2689a3cc1b82c73eb491475a24.tar.gz |
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
Diffstat (limited to 'sys/ufs')
-rw-r--r-- | sys/ufs/ffs/ffs_vfsops.c | 52 | ||||
-rw-r--r-- | sys/ufs/ifs/ifs_vfsops.c | 14 | ||||
-rw-r--r-- | sys/ufs/ufs/ufs_ihash.c | 20 | ||||
-rw-r--r-- | sys/ufs/ufs/ufs_quota.c | 12 | ||||
-rw-r--r-- | sys/ufs/ufs/ufs_vnops.c | 12 |
5 files changed, 55 insertions, 55 deletions
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c index fee14d7..2d48115 100644 --- a/sys/ufs/ffs/ffs_vfsops.c +++ b/sys/ufs/ffs/ffs_vfsops.c @@ -393,7 +393,7 @@ ffs_reload(mp, cred, p) if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); vfs_object_create(devvp, p, p->p_ucred); - mtx_enter(&devvp->v_interlock, MTX_DEF); + mtx_lock(&devvp->v_interlock); VOP_UNLOCK(devvp, LK_INTERLOCK, p); } @@ -454,10 +454,10 @@ ffs_reload(mp, cred, p) } loop: - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { if (vp->v_mount != mp) { - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); goto loop; } nvp = LIST_NEXT(vp, v_mntvnodes); @@ -469,8 +469,8 @@ loop: /* * Step 5: invalidate all cached file data. */ - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&mntvnode_mtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { goto loop; } @@ -492,9 +492,9 @@ loop: ip->i_effnlink = ip->i_nlink; brelse(bp); vput(vp); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); return (0); } @@ -551,7 +551,7 @@ ffs_mountfs(devvp, mp, p, malloctype) if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); vfs_object_create(devvp, p, cred); - mtx_enter(&devvp->v_interlock, MTX_DEF); + mtx_lock(&devvp->v_interlock); VOP_UNLOCK(devvp, LK_INTERLOCK, p); } @@ -937,7 +937,7 @@ ffs_sync(mp, waitfor, cred, p) wait = 1; lockreq = LK_EXCLUSIVE | LK_INTERLOCK; } - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); loop: for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { /* @@ -946,19 +946,19 @@ loop: */ if (vp->v_mount != mp) goto loop; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); nvp = LIST_NEXT(vp, v_mntvnodes); ip = VTOI(vp); if (vp->v_type == VNON || ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && TAILQ_EMPTY(&vp->v_dirtyblkhd))) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); continue; } if (vp->v_type != VCHR) { - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); if ((error = vget(vp, lockreq, p)) != 0) { - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); if (error == ENOENT) goto loop; continue; @@ -967,15 +967,15 @@ loop: allerror = error; VOP_UNLOCK(vp, 0, p); vrele(vp); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } else { - mtx_exit(&mntvnode_mtx, MTX_DEF); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&mntvnode_mtx); + mtx_unlock(&vp->v_interlock); UFS_UPDATE(vp, wait); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); /* * Force stale file system control information to be flushed. */ @@ -984,7 +984,7 @@ loop: allerror = error; /* Flushed work items may create new vnodes to clean */ if (count) { - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); goto loop; } } @@ -1055,17 +1055,17 @@ restart: * case getnewvnode() or MALLOC() blocks, otherwise a duplicate * may occur! */ - mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ffs_inode_hash_mtx); if (ffs_inode_hash_lock) { while (ffs_inode_hash_lock) { ffs_inode_hash_lock = -1; msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0); } - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ffs_inode_hash_mtx); goto restart; } ffs_inode_hash_lock = 1; - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ffs_inode_hash_mtx); /* * If this MALLOC() is performed after the getnewvnode() @@ -1085,10 +1085,10 @@ restart: * otherwise the processes waken up immediately hit * themselves into the mutex. */ - mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ffs_inode_hash_mtx); want_wakeup = ffs_inode_hash_lock < 0; ffs_inode_hash_lock = 0; - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ffs_inode_hash_mtx); if (want_wakeup) wakeup(&ffs_inode_hash_lock); *vpp = NULL; @@ -1126,10 +1126,10 @@ restart: * otherwise the processes waken up immediately hit * themselves into the mutex. */ - mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ffs_inode_hash_mtx); want_wakeup = ffs_inode_hash_lock < 0; ffs_inode_hash_lock = 0; - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ffs_inode_hash_mtx); if (want_wakeup) wakeup(&ffs_inode_hash_lock); diff --git a/sys/ufs/ifs/ifs_vfsops.c b/sys/ufs/ifs/ifs_vfsops.c index 5b72c03..f0e2e8c 100644 --- a/sys/ufs/ifs/ifs_vfsops.c +++ b/sys/ufs/ifs/ifs_vfsops.c @@ -176,17 +176,17 @@ restart: * case getnewvnode() or MALLOC() blocks, otherwise a duplicate * may occur! */ - mtx_enter(&ifs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ifs_inode_hash_mtx); if (ifs_inode_hash_lock) { while (ifs_inode_hash_lock) { ifs_inode_hash_lock = -1; msleep(&ifs_inode_hash_lock, &ifs_inode_hash_mtx, PVM, "ifsvgt", 0); } - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ifs_inode_hash_mtx); goto restart; } ifs_inode_hash_lock = 1; - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ifs_inode_hash_mtx); /* * If this MALLOC() is performed after the getnewvnode() @@ -206,10 +206,10 @@ restart: * otherwise the processes waken up immediately hit * themselves into the mutex. */ - mtx_enter(&ifs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ifs_inode_hash_mtx); want_wakeup = ifs_inode_hash_lock < 0; ifs_inode_hash_lock = 0; - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ifs_inode_hash_mtx); if (want_wakeup) wakeup(&ifs_inode_hash_lock); *vpp = NULL; @@ -247,10 +247,10 @@ restart: * otherwise the processes waken up immediately hit * themselves into the mutex. */ - mtx_enter(&ifs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ifs_inode_hash_mtx); want_wakeup = ifs_inode_hash_lock < 0; ifs_inode_hash_lock = 0; - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ifs_inode_hash_mtx); if (want_wakeup) wakeup(&ifs_inode_hash_lock); diff --git a/sys/ufs/ufs/ufs_ihash.c b/sys/ufs/ufs/ufs_ihash.c index 6866a23..1fd39e9 100644 --- a/sys/ufs/ufs/ufs_ihash.c +++ b/sys/ufs/ufs/ufs_ihash.c @@ -77,11 +77,11 @@ ufs_ihashlookup(dev, inum) { struct inode *ip; - mtx_enter(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&ufs_ihash_mtx); LIST_FOREACH(ip, INOHASH(dev, inum), i_hash) if (inum == ip->i_number && dev == ip->i_dev) break; - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_unlock(&ufs_ihash_mtx); if (ip) return (ITOV(ip)); @@ -102,18 +102,18 @@ ufs_ihashget(dev, inum) struct vnode *vp; loop: - mtx_enter(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&ufs_ihash_mtx); LIST_FOREACH(ip, INOHASH(dev, inum), i_hash) { if (inum == ip->i_number && dev == ip->i_dev) { vp = ITOV(ip); - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&ufs_ihash_mtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) goto loop; return (vp); } } - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_unlock(&ufs_ihash_mtx); return (NULL); } @@ -130,11 +130,11 @@ ufs_ihashins(ip) /* lock the inode, then put it on the appropriate hash list */ lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p); - mtx_enter(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&ufs_ihash_mtx); ipp = INOHASH(ip->i_dev, ip->i_number); LIST_INSERT_HEAD(ipp, ip, i_hash); ip->i_flag |= IN_HASHED; - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_unlock(&ufs_ihash_mtx); } /* @@ -144,10 +144,10 @@ void ufs_ihashrem(ip) struct inode *ip; { - mtx_enter(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&ufs_ihash_mtx); if (ip->i_flag & IN_HASHED) { ip->i_flag &= ~IN_HASHED; LIST_REMOVE(ip, i_hash); } - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_unlock(&ufs_ihash_mtx); } diff --git a/sys/ufs/ufs/ufs_quota.c b/sys/ufs/ufs/ufs_quota.c index 1b3c69a..f419f01 100644 --- a/sys/ufs/ufs/ufs_quota.c +++ b/sys/ufs/ufs/ufs_quota.c @@ -666,7 +666,7 @@ qsync(mp) * Search vnodes associated with this mount point, * synchronizing any modified dquot structures. */ - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); again: for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) { if (vp->v_mount != mp) @@ -674,11 +674,11 @@ again: nextvp = LIST_NEXT(vp, v_mntvnodes); if (vp->v_type == VNON) continue; - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&mntvnode_mtx); error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); if (error) { - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); if (error == ENOENT) goto again; continue; @@ -689,11 +689,11 @@ again: dqsync(vp, dq); } vput(vp); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); if (LIST_NEXT(vp, v_mntvnodes) != nextvp) goto again; } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); return (0); } diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c index 07d0dac..134e356 100644 --- a/sys/ufs/ufs/ufs_vnops.c +++ b/sys/ufs/ufs/ufs_vnops.c @@ -284,10 +284,10 @@ ufs_close(ap) { register struct vnode *vp = ap->a_vp; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (vp->v_usecount > 1) ufs_itimes(vp); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return (0); } @@ -1863,10 +1863,10 @@ ufsspec_close(ap) { struct vnode *vp = ap->a_vp; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (vp->v_usecount > 1) ufs_itimes(vp); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); } @@ -1937,10 +1937,10 @@ ufsfifo_close(ap) { struct vnode *vp = ap->a_vp; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (vp->v_usecount > 1) ufs_itimes(vp); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); } |