diff options
author | kib <kib@FreeBSD.org> | 2010-08-12 08:36:23 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2010-08-12 08:36:23 +0000 |
commit | ade28bdd4036313b9b17b915d917a7851f02122f (patch) | |
tree | bea00f78c841645c41a70f3ff5730886d006bf0e | |
parent | 60a46e5ff9a6a8f27e212d1b48b21486e7a11f12 (diff) | |
download | FreeBSD-src-ade28bdd4036313b9b17b915d917a7851f02122f.zip FreeBSD-src-ade28bdd4036313b9b17b915d917a7851f02122f.tar.gz |
The buffers b_vflags field is not always properly protected by
bufobj lock. If b_bufobj is not NULL, then bufobj lock should be
held when manipulating the flags. Not doing this sometimes leaves
BV_BKGRDINPROG to be erronously set, causing softdep' getdirtybuf() to
stuck indefinitely in "getbuf" sleep, waiting for background write to
finish which is not actually performed.
Add BO_LOCK() in the cases where it was missed.
In collaboration with: pho
Tested by: bz
Reviewed by: jeff
MFC after: 1 month
-rw-r--r-- | sys/kern/vfs_bio.c | 53 | ||||
-rw-r--r-- | sys/kern/vfs_subr.c | 10 |
2 files changed, 59 insertions, 4 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 3bed100..5e2cca2 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -398,6 +398,8 @@ bufcountwakeup(struct buf *bp) KASSERT((bp->b_vflags & BV_INFREECNT) == 0, ("buf %p already counted as free", bp)); + if (bp->b_bufobj != NULL) + mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED); bp->b_vflags |= BV_INFREECNT; old = atomic_fetchadd_int(&numfreebuffers, 1); KASSERT(old >= 0 && old < nbuf, @@ -714,6 +716,8 @@ bremfree(struct buf *bp) if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { KASSERT((bp->b_vflags & BV_INFREECNT) != 0, ("buf %p not counted in numfreebuffers", bp)); + if (bp->b_bufobj != NULL) + mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED); bp->b_vflags &= ~BV_INFREECNT; old = atomic_fetchadd_int(&numfreebuffers, -1); KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1)); @@ -770,6 +774,8 @@ bremfreel(struct buf *bp) if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { KASSERT((bp->b_vflags & BV_INFREECNT) != 0, ("buf %p not counted in numfreebuffers", bp)); + if (bp->b_bufobj != NULL) + mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED); bp->b_vflags &= ~BV_INFREECNT; old = atomic_fetchadd_int(&numfreebuffers, -1); KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1)); @@ -1412,8 +1418,16 @@ brelse(struct buf *bp) /* enqueue */ mtx_lock(&bqlock); /* Handle delayed bremfree() processing. */ - if (bp->b_flags & B_REMFREE) + if (bp->b_flags & B_REMFREE) { + struct bufobj *bo; + + bo = bp->b_bufobj; + if (bo != NULL) + BO_LOCK(bo); bremfreel(bp); + if (bo != NULL) + BO_UNLOCK(bo); + } if (bp->b_qindex != QUEUE_NONE) panic("brelse: free buffer onto another queue???"); @@ -1474,8 +1488,16 @@ brelse(struct buf *bp) * if B_INVAL is set ). */ - if (!(bp->b_flags & B_DELWRI)) + if (!(bp->b_flags & B_DELWRI)) { + struct bufobj *bo; + + bo = bp->b_bufobj; + if (bo != NULL) + BO_LOCK(bo); bufcountwakeup(bp); + if (bo != NULL) + BO_UNLOCK(bo); + } /* * Something we can maybe free or reuse @@ -1504,6 +1526,8 @@ brelse(struct buf *bp) void bqrelse(struct buf *bp) { + struct bufobj *bo; + CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); @@ -1514,10 +1538,15 @@ bqrelse(struct buf *bp) return; } + bo = bp->b_bufobj; if (bp->b_flags & B_MANAGED) { if (bp->b_flags & B_REMFREE) { mtx_lock(&bqlock); + if (bo != NULL) + BO_LOCK(bo); bremfreel(bp); + if (bo != NULL) + BO_UNLOCK(bo); mtx_unlock(&bqlock); } bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); @@ -1527,8 +1556,13 @@ bqrelse(struct buf *bp) mtx_lock(&bqlock); /* Handle delayed bremfree() processing. */ - if (bp->b_flags & B_REMFREE) + if (bp->b_flags & B_REMFREE) { + if (bo != NULL) + BO_LOCK(bo); bremfreel(bp); + if (bo != NULL) + BO_UNLOCK(bo); + } if (bp->b_qindex != QUEUE_NONE) panic("bqrelse: free buffer onto another queue???"); /* buffers with stale but valid contents */ @@ -1563,8 +1597,13 @@ bqrelse(struct buf *bp) } mtx_unlock(&bqlock); - if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) + if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) { + if (bo != NULL) + BO_LOCK(bo); bufcountwakeup(bp); + if (bo != NULL) + BO_UNLOCK(bo); + } /* * Something we can maybe free or reuse. @@ -1898,7 +1937,11 @@ restart: KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex)); + if (bp->b_bufobj != NULL) + BO_LOCK(bp->b_bufobj); bremfreel(bp); + if (bp->b_bufobj != NULL) + BO_UNLOCK(bp->b_bufobj); mtx_unlock(&bqlock); if (qindex == QUEUE_CLEAN) { @@ -2635,7 +2678,9 @@ loop: bp->b_flags &= ~B_CACHE; else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) bp->b_flags |= B_CACHE; + BO_LOCK(bo); bremfree(bp); + BO_UNLOCK(bo); /* * check for size inconsistancies for non-VMIO case. diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 20982c6..985d73b 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -1260,13 +1260,17 @@ flushbuflist( struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, */ if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && (flags & V_SAVE)) { + BO_LOCK(bo); bremfree(bp); + BO_UNLOCK(bo); bp->b_flags |= B_ASYNC; bwrite(bp); BO_LOCK(bo); return (EAGAIN); /* XXX: why not loop ? */ } + BO_LOCK(bo); bremfree(bp); + BO_UNLOCK(bo); bp->b_flags |= (B_INVAL | B_RELBUF); bp->b_flags &= ~B_ASYNC; brelse(bp); @@ -1318,7 +1322,9 @@ restart: BO_MTX(bo)) == ENOLCK) goto restart; + BO_LOCK(bo); bremfree(bp); + BO_UNLOCK(bo); bp->b_flags |= (B_INVAL | B_RELBUF); bp->b_flags &= ~B_ASYNC; brelse(bp); @@ -1340,7 +1346,9 @@ restart: LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo)) == ENOLCK) goto restart; + BO_LOCK(bo); bremfree(bp); + BO_UNLOCK(bo); bp->b_flags |= (B_INVAL | B_RELBUF); bp->b_flags &= ~B_ASYNC; brelse(bp); @@ -1372,7 +1380,9 @@ restartsync: VNASSERT((bp->b_flags & B_DELWRI), vp, ("buf(%p) on dirty queue without DELWRI", bp)); + BO_LOCK(bo); bremfree(bp); + BO_UNLOCK(bo); bawrite(bp); BO_LOCK(bo); goto restartsync; |