summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2003-02-25 03:37:48 +0000
committerjeff <jeff@FreeBSD.org>2003-02-25 03:37:48 +0000
commit9e4c9a6ce908881b1e6f83cbb906a9fce08dd3ab (patch)
treedf5eb5e550ba49b92f45eadaca861cb18128ad5d /sys/kern
parent541937cf7373ff6a61c871266ea041503bb02233 (diff)
downloadFreeBSD-src-9e4c9a6ce908881b1e6f83cbb906a9fce08dd3ab.zip
FreeBSD-src-9e4c9a6ce908881b1e6f83cbb906a9fce08dd3ab.tar.gz
- Add an interlock argument to BUF_LOCK and BUF_TIMELOCK.
- Remove the buftimelock mutex and acquire the buf's interlock to protect these fields instead. - Hold the vnode interlock while locking bufs on the clean/dirty queues. This reduces some cases from one BUF_LOCK with a LK_NOWAIT and another BUF_LOCK with a LK_TIMEFAIL to a single lock. Reviewed by: arch, mckusick
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c13
-rw-r--r--sys/kern/vfs_bio.c36
-rw-r--r--sys/kern/vfs_cluster.c8
-rw-r--r--sys/kern/vfs_default.c2
-rw-r--r--sys/kern/vfs_subr.c75
5 files changed, 65 insertions, 69 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index aaf09aa..6ee1c5c 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -232,19 +232,16 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
else
thr = td;
- if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) {
- if ((flags & LK_INTERLOCK) == 0)
- WITNESS_SLEEP(1, NULL);
- else
- WITNESS_SLEEP(1, &interlkp->mtx_object);
- }
-
- mtx_lock(lkp->lk_interlock);
+ if ((flags & LK_INTERNAL) == 0)
+ mtx_lock(lkp->lk_interlock);
if (flags & LK_INTERLOCK) {
mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
mtx_unlock(interlkp);
}
+ if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
+ WITNESS_SLEEP(1, &lkp->lk_interlock->mtx_object);
+
if (panicstr != NULL) {
mtx_unlock(lkp->lk_interlock);
return (0);
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index a1f31e1..d7f6d2f 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -68,7 +68,6 @@ struct buf_ops buf_ops_bio = {
* carnal knowledge of buffers. This knowledge should be moved to vfs_bio.c.
*/
struct buf *buf; /* buffer header pool */
-struct mtx buftimelock; /* Interlock on setting prio and timo */
static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
vm_offset_t to);
@@ -519,7 +518,6 @@ bufinit(void)
#ifdef USE_BUFHASH
LIST_INIT(&invalhash);
#endif
- mtx_init(&buftimelock, "buftime lock", NULL, MTX_DEF);
mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
@@ -967,7 +965,7 @@ vfs_backgroundwritedone(bp)
* queue if it currently resides there.
*/
origbp->b_flags &= ~B_LOCKED;
- if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
+ if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
bremfree(origbp);
bqrelse(origbp);
}
@@ -1630,7 +1628,7 @@ vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
return (0);
/* If the buf is busy we don't want to wait for it */
- if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT) != 0)
+ if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
return (0);
/* Only cluster with valid clusterable delayed write buffers */
@@ -1710,7 +1708,7 @@ vfs_bio_awrite(struct buf * bp)
}
}
- BUF_LOCK(bp, LK_EXCLUSIVE);
+ BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
bremfree(bp);
bp->b_flags |= B_ASYNC;
@@ -1870,7 +1868,7 @@ restart:
* remains valid only for QUEUE_EMPTY[KVA] bp's.
*/
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
panic("getnewbuf: locked buf");
bremfreel(bp);
mtx_unlock(&bqlock);
@@ -2147,7 +2145,7 @@ flushbufqueues(void)
if ((bp->b_xflags & BX_BKGRDINPROG) != 0)
continue;
if (bp->b_flags & B_INVAL) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
panic("flushbufqueues: locked buf");
bremfreel(bp);
mtx_unlock(&bqlock);
@@ -2182,7 +2180,7 @@ flushbufqueues(void)
if ((bp->b_xflags & BX_BKGRDINPROG) != 0)
continue;
if (bp->b_flags & B_INVAL) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
panic("flushbufqueues: locked buf");
bremfreel(bp);
mtx_unlock(&bqlock);
@@ -2407,6 +2405,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
{
struct buf *bp;
int s;
+ int error;
#ifdef USE_BUFHASH
struct bufhashhdr *bh;
#endif
@@ -2437,19 +2436,24 @@ loop:
VI_LOCK(vp);
if ((bp = gbincore(vp, blkno))) {
- VI_UNLOCK(vp);
/*
* Buffer is in-core. If the buffer is not busy, it must
* be on a queue.
*/
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
- "getblk", slpflag, slptimeo) == ENOLCK)
- goto loop;
- splx(s);
- return (struct buf *) NULL;
- }
+ error = BUF_TIMELOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+ VI_MTX(vp), "getblk", slpflag, slptimeo);
+
+ /*
+ * If we slept and got the lock we have to restart in case
+ * the buffer changed identities.
+ */
+ if (error == ENOLCK)
+ goto loop;
+ /* We timed out or were interrupted. */
+ else if (error)
+ return (NULL);
/*
* The buffer is locked. B_CACHE is cleared if the buffer is
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index 90c8852..96e541c 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -403,7 +403,8 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
*/
if ((tbp = incore(vp, lbn + i)) != NULL &&
(tbp->b_flags & B_INVAL) == 0) {
- if (BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT))
+ if (BUF_LOCK(tbp,
+ LK_EXCLUSIVE | LK_NOWAIT, NULL))
break;
BUF_UNLOCK(tbp);
@@ -794,7 +795,7 @@ cluster_wbuild(vp, size, start_lbn, len)
*/
if (((tbp = incore(vp, start_lbn)) == NULL) ||
((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI) ||
- BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) {
+ BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
++start_lbn;
--len;
splx(s);
@@ -884,7 +885,8 @@ cluster_wbuild(vp, size, start_lbn, len)
(bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
(tbp->b_flags & B_LOCKED) ||
tbp->b_wcred != bp->b_wcred ||
- BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) {
+ BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT,
+ NULL)) {
splx(s);
break;
}
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 5caa568..e5f2342 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -745,7 +745,7 @@ loop2:
if ((bp->b_vflags & BV_SCANNED) != 0)
continue;
bp->b_vflags |= BV_SCANNED;
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
continue;
VI_UNLOCK(vp);
if ((bp->b_flags & B_DELWRI) == 0)
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index dbebf26..8e556ab 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1218,17 +1218,15 @@ flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
for (found = 0, bp = blist; bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
- VI_LOCK(vp);
continue;
}
found += 1;
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- error = BUF_TIMELOCK(bp,
- LK_EXCLUSIVE | LK_SLEEPFAIL,
- "flushbuf", slpflag, slptimeo);
+ error = BUF_TIMELOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
+ "flushbuf", slpflag, slptimeo);
+ if (error) {
if (error != ENOLCK)
*errorp = error;
goto done;
@@ -1303,50 +1301,48 @@ restart:
anyfreed = 0;
for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
if (bp->b_lblkno >= trunclbn) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
+ if (BUF_LOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+ VI_MTX(vp)) == ENOLCK)
goto restart;
- } else {
- bremfree(bp);
- bp->b_flags |= (B_INVAL | B_RELBUF);
- bp->b_flags &= ~B_ASYNC;
- brelse(bp);
- anyfreed = 1;
- }
+
+ bremfree(bp);
+ bp->b_flags |= (B_INVAL | B_RELBUF);
+ bp->b_flags &= ~B_ASYNC;
+ brelse(bp);
+ anyfreed = 1;
+
if (nbp &&
(((nbp->b_xflags & BX_VNCLEAN) == 0) ||
(nbp->b_vp != vp) ||
(nbp->b_flags & B_DELWRI))) {
goto restart;
}
+ VI_LOCK(vp);
}
- VI_LOCK(vp);
}
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
if (bp->b_lblkno >= trunclbn) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
+ if (BUF_LOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+ VI_MTX(vp)) == ENOLCK)
goto restart;
- } else {
- bremfree(bp);
- bp->b_flags |= (B_INVAL | B_RELBUF);
- bp->b_flags &= ~B_ASYNC;
- brelse(bp);
- anyfreed = 1;
- }
+ bremfree(bp);
+ bp->b_flags |= (B_INVAL | B_RELBUF);
+ bp->b_flags &= ~B_ASYNC;
+ brelse(bp);
+ anyfreed = 1;
if (nbp &&
(((nbp->b_xflags & BX_VNDIRTY) == 0) ||
(nbp->b_vp != vp) ||
(nbp->b_flags & B_DELWRI) == 0)) {
goto restart;
}
+ VI_LOCK(vp);
}
- VI_LOCK(vp);
}
}
@@ -1354,24 +1350,21 @@ restart:
restartsync:
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
+ if (BUF_LOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+ VI_MTX(vp)) == ENOLCK)
goto restart;
- } else {
- bremfree(bp);
- if (bp->b_vp == vp) {
- bp->b_flags |= B_ASYNC;
- } else {
- bp->b_flags &= ~B_ASYNC;
- }
- BUF_WRITE(bp);
- }
+ bremfree(bp);
+ if (bp->b_vp == vp)
+ bp->b_flags |= B_ASYNC;
+ else
+ bp->b_flags &= ~B_ASYNC;
+
+ BUF_WRITE(bp);
VI_LOCK(vp);
goto restartsync;
}
- VI_LOCK(vp);
}
}
OpenPOWER on IntegriCloud