summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/gnu/fs/xfs/FreeBSD/xfs_buf.c6
-rw-r--r--sys/gnu/fs/xfs/FreeBSD/xfs_buf.h3
-rw-r--r--sys/kern/kern_shutdown.c2
-rw-r--r--sys/kern/vfs_bio.c30
-rw-r--r--sys/kern/vfs_subr.c2
-rw-r--r--sys/nfs4client/nfs4_vnops.c9
-rw-r--r--sys/nfsclient/nfs_subs.c2
-rw-r--r--sys/nfsclient/nfs_vnops.c9
-rw-r--r--sys/sys/buf.h36
-rw-r--r--sys/sys/lockmgr.h2
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c2
11 files changed, 46 insertions, 57 deletions
diff --git a/sys/gnu/fs/xfs/FreeBSD/xfs_buf.c b/sys/gnu/fs/xfs/FreeBSD/xfs_buf.c
index 30aafc4..65030df 100644
--- a/sys/gnu/fs/xfs/FreeBSD/xfs_buf.c
+++ b/sys/gnu/fs/xfs/FreeBSD/xfs_buf.c
@@ -86,7 +86,7 @@ xfs_buf_get_empty(size_t size, xfs_buftarg_t *target)
bp->b_bufsize = size;
bp->b_bcount = size;
- KASSERT(BUF_REFCNT(bp) == 1,
+ KASSERT(BUF_ISLOCKED(bp),
("xfs_buf_get_empty: bp %p not locked",bp));
xfs_buf_set_target(bp, target);
@@ -103,7 +103,7 @@ xfs_buf_get_noaddr(size_t len, xfs_buftarg_t *target)
bp = geteblk(len);
if (bp != NULL) {
- KASSERT(BUF_REFCNT(bp) == 1,
+ KASSERT(BUF_ISLOCKED(bp),
("xfs_buf_get_empty: bp %p not locked",bp));
xfs_buf_set_target(bp, target);
@@ -163,7 +163,7 @@ XFS_bwrite(xfs_buf_t *bp)
if ((bp->b_flags & B_ASYNC) == 0) {
error = bufwait(bp);
#if 0
- if (BUF_REFCNT(bp) > 1)
+ if (BUF_LOCKRECURSED(bp))
BUF_UNLOCK(bp);
else
brelse(bp);
diff --git a/sys/gnu/fs/xfs/FreeBSD/xfs_buf.h b/sys/gnu/fs/xfs/FreeBSD/xfs_buf.h
index c27a14c..34e579d 100644
--- a/sys/gnu/fs/xfs/FreeBSD/xfs_buf.h
+++ b/sys/gnu/fs/xfs/FreeBSD/xfs_buf.h
@@ -160,7 +160,6 @@ xfs_buf_get_error(struct buf *bp)
#define XFS_BUF_HOLD(x) ((void)0)
#define XFS_BUF_UNHOLD(x) ((void)0)
-#define XFS_BUF_ISHOLD(x) BUF_REFCNT(x)
#define XFS_BUF_READ(x) ((x)->b_iocmd = BIO_READ)
#define XFS_BUF_UNREAD(x) ((x)->b_iocmd = 0)
@@ -234,7 +233,7 @@ xfs_buf_offset(xfs_buf_t *bp, size_t offset)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_SET_REF(bp, ref)
-#define XFS_BUF_VALUSEMA(bp) (BUF_REFCNT(bp)? 0 : 1)
+#define XFS_BUF_VALUSEMA(bp) (BUF_ISLOCKED(bp) ? 0 : 1)
#define XFS_BUF_CPSEMA(bp) \
(BUF_LOCK(bp, LK_EXCLUSIVE|LK_CANRECURSE | LK_SLEEPFAIL, NULL) == 0)
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index 7bbd98f..c3de8c1f 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -255,7 +255,7 @@ static int
isbufbusy(struct buf *bp)
{
if (((bp->b_flags & (B_INVAL | B_PERSISTENT)) == 0 &&
- BUF_REFCNT(bp) > 0) ||
+ BUF_ISLOCKED(bp)) ||
((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
return (1);
return (0);
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index e0f395a..8c6cd8a 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -658,7 +658,7 @@ bremfree(struct buf *bp)
{
CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
- KASSERT(BUF_REFCNT(bp), ("bremfree: buf must be locked."));
+ KASSERT(BUF_ISLOCKED(bp), ("bremfree: buf must be locked."));
KASSERT((bp->b_flags & B_REMFREE) == 0,
("bremfree: buffer %p already marked for delayed removal.", bp));
KASSERT(bp->b_qindex != QUEUE_NONE,
@@ -695,7 +695,7 @@ bremfreel(struct buf *bp)
{
CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
bp, bp->b_vp, bp->b_flags);
- KASSERT(BUF_REFCNT(bp), ("bremfreel: buffer %p not locked.", bp));
+ KASSERT(BUF_ISLOCKED(bp), ("bremfreel: buffer %p not locked.", bp));
KASSERT(bp->b_qindex != QUEUE_NONE,
("bremfreel: buffer %p not on a queue.", bp));
mtx_assert(&bqlock, MA_OWNED);
@@ -834,7 +834,7 @@ bufwrite(struct buf *bp)
oldflags = bp->b_flags;
- if (BUF_REFCNT(bp) == 0)
+ if (!BUF_ISLOCKED(bp))
panic("bufwrite: buffer is not busy???");
if (bp->b_pin_count > 0)
@@ -952,7 +952,7 @@ bdwrite(struct buf *bp)
CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
- KASSERT(BUF_REFCNT(bp) != 0, ("bdwrite: buffer is not busy"));
+ KASSERT(BUF_ISLOCKED(bp), ("bdwrite: buffer is not busy"));
if (bp->b_flags & B_INVAL) {
brelse(bp);
@@ -1047,7 +1047,7 @@ bdirty(struct buf *bp)
CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
bp, bp->b_vp, bp->b_flags);
- KASSERT(BUF_REFCNT(bp) == 1, ("bdirty: bp %p not locked",bp));
+ KASSERT(BUF_ISLOCKED(bp), ("bdirty: bp %p not locked",bp));
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
@@ -1081,7 +1081,7 @@ bundirty(struct buf *bp)
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
- KASSERT(BUF_REFCNT(bp) == 1, ("bundirty: bp %p not locked",bp));
+ KASSERT(BUF_ISLOCKED(bp), ("bundirty: bp %p not locked",bp));
if (bp->b_flags & B_DELWRI) {
bp->b_flags &= ~B_DELWRI;
@@ -1341,7 +1341,7 @@ brelse(struct buf *bp)
brelvp(bp);
}
- if (BUF_REFCNT(bp) > 1) {
+ if (BUF_LOCKRECURSED(bp)) {
/* do not release to free list */
BUF_UNLOCK(bp);
return;
@@ -1446,7 +1446,7 @@ bqrelse(struct buf *bp)
KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
- if (BUF_REFCNT(bp) > 1) {
+ if (BUF_LOCKRECURSED(bp)) {
/* do not release to free list */
BUF_UNLOCK(bp);
return;
@@ -2660,7 +2660,7 @@ loop:
bp->b_flags &= ~B_DONE;
}
CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
- KASSERT(BUF_REFCNT(bp) == 1, ("getblk: bp %p not locked",bp));
+ KASSERT(BUF_ISLOCKED(bp), ("getblk: bp %p not locked",bp));
KASSERT(bp->b_bufobj == bo,
("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
return (bp);
@@ -2681,7 +2681,7 @@ geteblk(int size)
continue;
allocbuf(bp, size);
bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
- KASSERT(BUF_REFCNT(bp) == 1, ("geteblk: bp %p not locked",bp));
+ KASSERT(BUF_ISLOCKED(bp), ("geteblk: bp %p not locked",bp));
return (bp);
}
@@ -2707,7 +2707,7 @@ allocbuf(struct buf *bp, int size)
int newbsize, mbsize;
int i;
- if (BUF_REFCNT(bp) == 0)
+ if (!BUF_ISLOCKED(bp))
panic("allocbuf: buffer not busy");
if (bp->b_kvasize < size)
@@ -3150,8 +3150,7 @@ bufdone(struct buf *bp)
CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
dropobj = NULL;
- KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp,
- BUF_REFCNT(bp)));
+ KASSERT(BUF_ISLOCKED(bp), ("biodone: bp %p not busy", bp));
KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
runningbufwakeup(bp);
@@ -3176,8 +3175,7 @@ bufdone(struct buf *bp)
void
bufdone_finish(struct buf *bp)
{
- KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp,
- BUF_REFCNT(bp)));
+ KASSERT(BUF_ISLOCKED(bp), ("biodone: bp %p not busy", bp));
if (!LIST_EMPTY(&bp->b_dep))
buf_complete(bp);
@@ -3943,7 +3941,7 @@ DB_SHOW_COMMAND(lockedbufs, lockedbufs)
for (i = 0; i < nbuf; i++) {
bp = &buf[i];
- if (lockcount(&bp->b_lock)) {
+ if (BUF_ISLOCKED(bp)) {
db_show_buffer((uintptr_t)bp, 1, 0, NULL);
db_printf("\n");
}
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 953c1d8..2fe63b0 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -3570,7 +3570,7 @@ vop_strategy_pre(void *ap)
if ((bp->b_flags & B_CLUSTER) != 0)
return;
- if (BUF_REFCNT(bp) < 1) {
+ if (!BUF_ISLOCKED(bp)) {
if (vfs_badlock_print)
printf(
"VOP_STRATEGY: bp is not locked but should be\n");
diff --git a/sys/nfs4client/nfs4_vnops.c b/sys/nfs4client/nfs4_vnops.c
index 7cc0fa6..6b31724 100644
--- a/sys/nfs4client/nfs4_vnops.c
+++ b/sys/nfs4client/nfs4_vnops.c
@@ -2446,8 +2446,9 @@ nfs4_strategy(struct vop_strategy_args *ap)
struct ucred *cr;
int error = 0;
- KASSERT(!(bp->b_flags & B_DONE), ("nfs4_strategy: buffer %p unexpectedly marked B_DONE", bp));
- KASSERT(BUF_REFCNT(bp) > 0, ("nfs4_strategy: buffer %p not locked", bp));
+ KASSERT(!(bp->b_flags & B_DONE),
+ ("nfs4_strategy: buffer %p unexpectedly marked B_DONE", bp));
+ KASSERT(BUF_ISLOCKED(bp), ("nfs4_strategy: buffer %p not locked", bp));
if (bp->b_iocmd == BIO_READ)
cr = bp->b_rcred;
@@ -2525,7 +2526,7 @@ again:
bveccount = 0;
VI_LOCK(vp);
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
- if (BUF_REFCNT(bp) == 0 &&
+ if (!BUF_ISLOCKED(bp) &&
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
== (B_DELWRI | B_NEEDCOMMIT))
bveccount++;
@@ -2807,7 +2808,7 @@ nfs4_writebp(struct buf *bp, int force __unused, struct thread *td)
off_t off;
#endif
- if (BUF_REFCNT(bp) == 0)
+ if (!BUF_ISLOCKED(bp))
panic("bwrite: buffer is not locked???");
if (bp->b_flags & B_INVAL) {
diff --git a/sys/nfsclient/nfs_subs.c b/sys/nfsclient/nfs_subs.c
index 4bbf0b0..881b1a1 100644
--- a/sys/nfsclient/nfs_subs.c
+++ b/sys/nfsclient/nfs_subs.c
@@ -918,7 +918,7 @@ nfs_clearcommit(struct mount *mp)
}
MNT_IUNLOCK(mp);
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
- if (BUF_REFCNT(bp) == 0 &&
+ if (!BUF_ISLOCKED(bp) &&
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
== (B_DELWRI | B_NEEDCOMMIT))
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
diff --git a/sys/nfsclient/nfs_vnops.c b/sys/nfsclient/nfs_vnops.c
index 1a27e82..6e9e431 100644
--- a/sys/nfsclient/nfs_vnops.c
+++ b/sys/nfsclient/nfs_vnops.c
@@ -2692,8 +2692,9 @@ nfs_strategy(struct vop_strategy_args *ap)
struct buf *bp = ap->a_bp;
struct ucred *cr;
- KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
- KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
+ KASSERT(!(bp->b_flags & B_DONE),
+ ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
+ KASSERT(BUF_ISLOCKED(bp), ("nfs_strategy: buffer %p not locked", bp));
if (bp->b_iocmd == BIO_READ)
cr = bp->b_rcred;
@@ -2771,7 +2772,7 @@ again:
bveccount = 0;
VI_LOCK(vp);
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
- if (BUF_REFCNT(bp) == 0 &&
+ if (!BUF_ISLOCKED(bp) &&
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
== (B_DELWRI | B_NEEDCOMMIT))
bveccount++;
@@ -3087,7 +3088,7 @@ nfs_writebp(struct buf *bp, int force __unused, struct thread *td)
off_t off;
#endif
- if (BUF_REFCNT(bp) == 0)
+ if (!BUF_ISLOCKED(bp))
panic("bwrite: buffer is not locked???");
if (bp->b_flags & B_INVAL) {
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index a684f93..a469e19 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -320,11 +320,22 @@ BUF_UNLOCK(struct buf *bp)
}
/*
+ * Check if a buffer lock is recursed.
+ */
+#define BUF_LOCKRECURSED(bp) \
+ (lockmgr_recursed(&(bp)->b_lock))
+
+/*
+ * Check if a buffer lock is currently held.
+ */
+#define BUF_ISLOCKED(bp) \
+ (lockstatus(&(bp)->b_lock, curthread))
+/*
* Free a buffer lock.
*/
#define BUF_LOCKFREE(bp) \
do { \
- if (BUF_REFCNT(bp) > 0) \
+ if (BUF_ISLOCKED(bp)) \
panic("free locked buf"); \
lockdestroy(&(bp)->b_lock); \
} while (0)
@@ -344,29 +355,6 @@ BUF_KERNPROC(struct buf *bp)
lockmgr_disown(&bp->b_lock);
}
#endif
-/*
- * Find out the number of references to a lock.
- */
-static __inline int BUF_REFCNT(struct buf *);
-static __inline int
-BUF_REFCNT(struct buf *bp)
-{
- int s, ret;
-
- /*
- * When the system is panicing, the lock manager grants all lock
- * requests whether or not the lock is available. To avoid "unlocked
- * buffer" panics after a crash, we just claim that all buffers
- * are locked when cleaning up after a system panic.
- */
- if (panicstr != NULL)
- return (1);
- s = splbio();
- ret = lockcount(&(bp)->b_lock);
- splx(s);
- return ret;
-}
-
/*
* Find out the number of waiters on a lock.
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index a305a73f8..bbfb29a 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -204,6 +204,8 @@ int lockcount(struct lock *);
int lockwaiters(struct lock *);
#define lockmgr(lock, flags, mtx, td) _lockmgr((lock), (flags), (mtx), (td), __FILE__, __LINE__)
+#define lockmgr_recursed(lkp) \
+ ((lkp)->lk_exclusivecount > 1)
#ifdef DDB
int lockmgr_chain(struct thread *td, struct thread **ownerp);
#endif
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index ae3fbeb..66c152e 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -1718,7 +1718,7 @@ ffs_bufwrite(struct buf *bp)
oldflags = bp->b_flags;
- if (BUF_REFCNT(bp) == 0)
+ if (!BUF_ISLOCKED(bp))
panic("bufwrite: buffer is not busy???");
s = splbio();
/*
OpenPOWER on IntegriCloud