summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_bio.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2010-08-12 08:36:23 +0000
committerkib <kib@FreeBSD.org>2010-08-12 08:36:23 +0000
commitade28bdd4036313b9b17b915d917a7851f02122f (patch)
treebea00f78c841645c41a70f3ff5730886d006bf0e /sys/kern/vfs_bio.c
parent60a46e5ff9a6a8f27e212d1b48b21486e7a11f12 (diff)
downloadFreeBSD-src-ade28bdd4036313b9b17b915d917a7851f02122f.zip
FreeBSD-src-ade28bdd4036313b9b17b915d917a7851f02122f.tar.gz
The buffers b_vflags field is not always properly protected by
bufobj lock. If b_bufobj is not NULL, then bufobj lock should be held when manipulating the flags. Not doing this sometimes leaves BV_BKGRDINPROG to be erronously set, causing softdep' getdirtybuf() to stuck indefinitely in "getbuf" sleep, waiting for background write to finish which is not actually performed. Add BO_LOCK() in the cases where it was missed. In collaboration with: pho Tested by: bz Reviewed by: jeff MFC after: 1 month
Diffstat (limited to 'sys/kern/vfs_bio.c')
-rw-r--r--sys/kern/vfs_bio.c53
1 files changed, 49 insertions, 4 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 3bed100..5e2cca2 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -398,6 +398,8 @@ bufcountwakeup(struct buf *bp)
KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
("buf %p already counted as free", bp));
+ if (bp->b_bufobj != NULL)
+ mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
bp->b_vflags |= BV_INFREECNT;
old = atomic_fetchadd_int(&numfreebuffers, 1);
KASSERT(old >= 0 && old < nbuf,
@@ -714,6 +716,8 @@ bremfree(struct buf *bp)
if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
("buf %p not counted in numfreebuffers", bp));
+ if (bp->b_bufobj != NULL)
+ mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
bp->b_vflags &= ~BV_INFREECNT;
old = atomic_fetchadd_int(&numfreebuffers, -1);
KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
@@ -770,6 +774,8 @@ bremfreel(struct buf *bp)
if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
("buf %p not counted in numfreebuffers", bp));
+ if (bp->b_bufobj != NULL)
+ mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
bp->b_vflags &= ~BV_INFREECNT;
old = atomic_fetchadd_int(&numfreebuffers, -1);
KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
@@ -1412,8 +1418,16 @@ brelse(struct buf *bp)
/* enqueue */
mtx_lock(&bqlock);
/* Handle delayed bremfree() processing. */
- if (bp->b_flags & B_REMFREE)
+ if (bp->b_flags & B_REMFREE) {
+ struct bufobj *bo;
+
+ bo = bp->b_bufobj;
+ if (bo != NULL)
+ BO_LOCK(bo);
bremfreel(bp);
+ if (bo != NULL)
+ BO_UNLOCK(bo);
+ }
if (bp->b_qindex != QUEUE_NONE)
panic("brelse: free buffer onto another queue???");
@@ -1474,8 +1488,16 @@ brelse(struct buf *bp)
* if B_INVAL is set ).
*/
- if (!(bp->b_flags & B_DELWRI))
+ if (!(bp->b_flags & B_DELWRI)) {
+ struct bufobj *bo;
+
+ bo = bp->b_bufobj;
+ if (bo != NULL)
+ BO_LOCK(bo);
bufcountwakeup(bp);
+ if (bo != NULL)
+ BO_UNLOCK(bo);
+ }
/*
* Something we can maybe free or reuse
@@ -1504,6 +1526,8 @@ brelse(struct buf *bp)
void
bqrelse(struct buf *bp)
{
+ struct bufobj *bo;
+
CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
@@ -1514,10 +1538,15 @@ bqrelse(struct buf *bp)
return;
}
+ bo = bp->b_bufobj;
if (bp->b_flags & B_MANAGED) {
if (bp->b_flags & B_REMFREE) {
mtx_lock(&bqlock);
+ if (bo != NULL)
+ BO_LOCK(bo);
bremfreel(bp);
+ if (bo != NULL)
+ BO_UNLOCK(bo);
mtx_unlock(&bqlock);
}
bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
@@ -1527,8 +1556,13 @@ bqrelse(struct buf *bp)
mtx_lock(&bqlock);
/* Handle delayed bremfree() processing. */
- if (bp->b_flags & B_REMFREE)
+ if (bp->b_flags & B_REMFREE) {
+ if (bo != NULL)
+ BO_LOCK(bo);
bremfreel(bp);
+ if (bo != NULL)
+ BO_UNLOCK(bo);
+ }
if (bp->b_qindex != QUEUE_NONE)
panic("bqrelse: free buffer onto another queue???");
/* buffers with stale but valid contents */
@@ -1563,8 +1597,13 @@ bqrelse(struct buf *bp)
}
mtx_unlock(&bqlock);
- if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
+ if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) {
+ if (bo != NULL)
+ BO_LOCK(bo);
bufcountwakeup(bp);
+ if (bo != NULL)
+ BO_UNLOCK(bo);
+ }
/*
* Something we can maybe free or reuse.
@@ -1898,7 +1937,11 @@ restart:
KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
+ if (bp->b_bufobj != NULL)
+ BO_LOCK(bp->b_bufobj);
bremfreel(bp);
+ if (bp->b_bufobj != NULL)
+ BO_UNLOCK(bp->b_bufobj);
mtx_unlock(&bqlock);
if (qindex == QUEUE_CLEAN) {
@@ -2635,7 +2678,9 @@ loop:
bp->b_flags &= ~B_CACHE;
else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
bp->b_flags |= B_CACHE;
+ BO_LOCK(bo);
bremfree(bp);
+ BO_UNLOCK(bo);
/*
* check for size inconsistancies for non-VMIO case.
OpenPOWER on IntegriCloud