summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/vfs_bio.c104
-rw-r--r--sys/nfsclient/nfs_bio.c2
-rw-r--r--sys/sys/buf.h3
-rw-r--r--sys/ufs/ffs/ffs_vnops.c3
4 files changed, 72 insertions, 40 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 4c3e193..5562c9e 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -645,24 +645,49 @@ bfreekva(struct buf *bp)
/*
* bremfree:
*
- * Remove the buffer from the appropriate free list.
+ * Mark the buffer for removal from the appropriate free list in brelse.
+ *
*/
void
bremfree(struct buf *bp)
{
+ KASSERT(BUF_REFCNT(bp), ("bremfree: buf must be locked."));
+ KASSERT((bp->b_flags & B_REMFREE) == 0 && bp->b_qindex != QUEUE_NONE,
+ ("bremfree: buffer not on a queue."));
+
+ bp->b_flags |= B_REMFREE;
+ /* Fixup numfreebuffers count. */
+ if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
+ atomic_subtract_int(&numfreebuffers, 1);
+}
+
+/*
+ * bremfreef:
+ *
+ * Force an immediate removal from a free list. Used only in nfs when
+ * it abuses the b_freelist pointer.
+ */
+void
+bremfreef(struct buf *bp)
+{
mtx_lock(&bqlock);
bremfreel(bp);
mtx_unlock(&bqlock);
}
+/*
+ * bremfreel:
+ *
+ * Removes a buffer from the free list, must be called with the
+ * bqlock held.
+ */
void
bremfreel(struct buf *bp)
{
int s = splbio();
- int old_qindex = bp->b_qindex;
- GIANT_REQUIRED;
+ mtx_assert(&bqlock, MA_OWNED);
if (bp->b_qindex != QUEUE_NONE) {
KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp));
@@ -672,24 +697,22 @@ bremfreel(struct buf *bp)
if (BUF_REFCNT(bp) <= 1)
panic("bremfree: removing a buffer not on a queue");
}
-
/*
- * Fixup numfreebuffers count. If the buffer is invalid or not
- * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
- * the buffer was free and we must decrement numfreebuffers.
+ * If this was a delayed bremfree() we only need to remove the buffer
+ * from the queue and return the stats are already done.
*/
- if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
- switch(old_qindex) {
- case QUEUE_DIRTY:
- case QUEUE_CLEAN:
- case QUEUE_EMPTY:
- case QUEUE_EMPTYKVA:
- atomic_subtract_int(&numfreebuffers, 1);
- break;
- default:
- break;
- }
+ if (bp->b_flags & B_REMFREE) {
+ bp->b_flags &= ~B_REMFREE;
+ splx(s);
+ return;
}
+ /*
+ * Fixup numfreebuffers count. If the buffer is invalid or not
+ * delayed-write, the buffer was free and we must decrement
+ * numfreebuffers.
+ */
+ if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
+ atomic_subtract_int(&numfreebuffers, 1);
splx(s);
}
@@ -1105,7 +1128,7 @@ bdirty(struct buf *bp)
{
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
- KASSERT(bp->b_qindex == QUEUE_NONE,
+ KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
bp->b_flags &= ~(B_RELBUF);
bp->b_iocmd = BIO_WRITE;
@@ -1135,7 +1158,7 @@ bundirty(struct buf *bp)
{
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
- KASSERT(bp->b_qindex == QUEUE_NONE,
+ KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
if (bp->b_flags & B_DELWRI) {
@@ -1398,8 +1421,6 @@ brelse(struct buf *bp)
}
- if (bp->b_qindex != QUEUE_NONE)
- panic("brelse: free buffer onto another queue???");
if (BUF_REFCNT(bp) > 1) {
/* do not release to free list */
BUF_UNLOCK(bp);
@@ -1409,6 +1430,11 @@ brelse(struct buf *bp)
/* enqueue */
mtx_lock(&bqlock);
+ /* Handle delayed bremfree() processing. */
+ if (bp->b_flags & B_REMFREE)
+ bremfreel(bp);
+ if (bp->b_qindex != QUEUE_NONE)
+ panic("brelse: free buffer onto another queue???");
/* buffers with no memory */
if (bp->b_bufsize == 0) {
@@ -1502,8 +1528,6 @@ bqrelse(struct buf *bp)
KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
- if (bp->b_qindex != QUEUE_NONE)
- panic("bqrelse: free buffer onto another queue???");
if (BUF_REFCNT(bp) > 1) {
/* do not release to free list */
BUF_UNLOCK(bp);
@@ -1511,6 +1535,11 @@ bqrelse(struct buf *bp)
return;
}
mtx_lock(&bqlock);
+ /* Handle delayed bremfree() processing. */
+ if (bp->b_flags & B_REMFREE)
+ bremfreel(bp);
+ if (bp->b_qindex != QUEUE_NONE)
+ panic("bqrelse: free buffer onto another queue???");
/* buffers with stale but valid contents */
if (bp->b_flags & B_DELWRI) {
bp->b_qindex = QUEUE_DIRTY;
@@ -1854,18 +1883,6 @@ restart:
}
/*
- * Sanity Checks
- */
- KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
-
- /*
- * Note: we no longer distinguish between VMIO and non-VMIO
- * buffers.
- */
-
- KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
-
- /*
* If we are defragging then we need a buffer with
* b_kvasize != 0. XXX this situation should no longer
* occur, if defrag is non-zero the buffer's b_kvasize
@@ -1880,9 +1897,20 @@ restart:
* Start freeing the bp. This is somewhat involved. nbp
* remains valid only for QUEUE_EMPTY[KVA] bp's.
*/
-
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
- panic("getnewbuf: locked buf");
+ continue;
+ /*
+ * Sanity Checks
+ */
+ KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
+
+ /*
+ * Note: we no longer distinguish between VMIO and non-VMIO
+ * buffers.
+ */
+
+ KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
+
bremfreel(bp);
mtx_unlock(&bqlock);
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 1ae3157..56e2cb6 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -1241,6 +1241,8 @@ again:
bp->b_wcred = crhold(cred);
}
+ if (bp->b_flags & B_REMFREE)
+ bremfreef(bp);
BUF_KERNPROC(bp);
TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
nmp->nm_bufqlen++;
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index 5a1cea4..c742f31 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -219,7 +219,7 @@ struct buf {
#define B_RAM 0x10000000 /* Read ahead mark (flag) */
#define B_VMIO 0x20000000 /* VMIO flag */
#define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */
-#define B_80000000 0x80000000 /* Available flag. */
+#define B_REMFREE 0x80000000 /* Delayed bremfree */
#define PRINT_BUF_FLAGS "\20\40b31\37cluster\36vmio\35ram\34b27" \
"\33paging\32b25\31b24\30b23\27relbuf\26dirty\25b20" \
@@ -485,6 +485,7 @@ void bufinit(void);
void bwillwrite(void);
int buf_dirty_count_severe(void);
void bremfree(struct buf *);
+void bremfreef(struct buf *); /* XXX Force bremfree, only for nfs. */
int bread(struct vnode *, daddr_t, int, struct ucred *, struct buf **);
int breadn(struct vnode *, daddr_t, int, daddr_t *, int *, int,
struct ucred *, struct buf **);
diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c
index e903b6d..3716bb0 100644
--- a/sys/ufs/ffs/ffs_vnops.c
+++ b/sys/ufs/ffs/ffs_vnops.c
@@ -208,14 +208,15 @@ loop:
continue;
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
continue;
+ VI_UNLOCK(vp);
if (!wait && LIST_FIRST(&bp->b_dep) != NULL &&
(bp->b_flags & B_DEFERRED) == 0 &&
buf_countdeps(bp, 0)) {
bp->b_flags |= B_DEFERRED;
BUF_UNLOCK(bp);
+ VI_LOCK(vp);
continue;
}
- VI_UNLOCK(vp);
if ((bp->b_flags & B_DELWRI) == 0)
panic("ffs_fsync: not dirty");
/*
OpenPOWER on IntegriCloud