summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/vinum/vinumrequest.c4
-rw-r--r--sys/gnu/ext2fs/fs.h2
-rw-r--r--sys/gnu/fs/ext2fs/fs.h2
-rw-r--r--sys/kern/kern_lock.c13
-rw-r--r--sys/kern/vfs_bio.c36
-rw-r--r--sys/kern/vfs_cluster.c8
-rw-r--r--sys/kern/vfs_default.c2
-rw-r--r--sys/kern/vfs_subr.c75
-rw-r--r--sys/nfsclient/nfs_vnops.c18
-rw-r--r--sys/nfsserver/nfs_serv.c10
-rw-r--r--sys/sys/buf.h23
-rw-r--r--sys/sys/lockmgr.h1
-rw-r--r--sys/ufs/ffs/ffs_softdep.c9
-rw-r--r--sys/ufs/ffs/ffs_vnops.c2
-rw-r--r--sys/vm/vm_pager.c2
15 files changed, 105 insertions, 102 deletions
diff --git a/sys/dev/vinum/vinumrequest.c b/sys/dev/vinum/vinumrequest.c
index 8423ed2..6973e3f 100644
--- a/sys/dev/vinum/vinumrequest.c
+++ b/sys/dev/vinum/vinumrequest.c
@@ -814,7 +814,7 @@ build_rq_buffer(struct rqelement *rqe, struct plex *plex)
panic("build_rq_buffer: rqe already locked"); /* XXX remove this when we're sure */
#endif
BUF_LOCKINIT(bp); /* get a lock for the buffer */
- BUF_LOCK(bp, LK_EXCLUSIVE); /* and lock it */
+ BUF_LOCK(bp, LK_EXCLUSIVE, NULL); /* and lock it */
BUF_KERNPROC(bp);
rqe->flags |= XFR_BUFLOCKED;
bp->b_iodone = complete_rqe;
@@ -949,7 +949,7 @@ sdio(struct buf *bp)
sbp->b.b_blkno = bp->b_blkno + sd->driveoffset;
sbp->b.b_iodone = sdio_done; /* come here on completion */
BUF_LOCKINIT(&sbp->b); /* get a lock for the buffer */
- BUF_LOCK(&sbp->b, LK_EXCLUSIVE); /* and lock it */
+ BUF_LOCK(&sbp->b, LK_EXCLUSIVE, NULL); /* and lock it */
BUF_KERNPROC(&sbp->b);
sbp->bp = bp; /* note the address of the original header */
sbp->sdno = sd->sdno; /* note for statistics */
diff --git a/sys/gnu/ext2fs/fs.h b/sys/gnu/ext2fs/fs.h
index 50fb711..c12c708 100644
--- a/sys/gnu/ext2fs/fs.h
+++ b/sys/gnu/ext2fs/fs.h
@@ -170,7 +170,7 @@ extern u_char *fragtbl[];
s = splbio(); \
flags = (bp)->b_flags; \
(bp)->b_flags &= ~(B_DIRTY | B_LOCKED); \
- BUF_LOCK(bp, LK_EXCLUSIVE); \
+ BUF_LOCK(bp, LK_EXCLUSIVE, NULL); \
bremfree(bp); \
splx(s); \
if (flags & B_DIRTY) \
diff --git a/sys/gnu/fs/ext2fs/fs.h b/sys/gnu/fs/ext2fs/fs.h
index 50fb711..c12c708 100644
--- a/sys/gnu/fs/ext2fs/fs.h
+++ b/sys/gnu/fs/ext2fs/fs.h
@@ -170,7 +170,7 @@ extern u_char *fragtbl[];
s = splbio(); \
flags = (bp)->b_flags; \
(bp)->b_flags &= ~(B_DIRTY | B_LOCKED); \
- BUF_LOCK(bp, LK_EXCLUSIVE); \
+ BUF_LOCK(bp, LK_EXCLUSIVE, NULL); \
bremfree(bp); \
splx(s); \
if (flags & B_DIRTY) \
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index aaf09aa..6ee1c5c 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -232,19 +232,16 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
else
thr = td;
- if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) {
- if ((flags & LK_INTERLOCK) == 0)
- WITNESS_SLEEP(1, NULL);
- else
- WITNESS_SLEEP(1, &interlkp->mtx_object);
- }
-
- mtx_lock(lkp->lk_interlock);
+ if ((flags & LK_INTERNAL) == 0)
+ mtx_lock(lkp->lk_interlock);
if (flags & LK_INTERLOCK) {
mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
mtx_unlock(interlkp);
}
+ if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
+ WITNESS_SLEEP(1, &lkp->lk_interlock->mtx_object);
+
if (panicstr != NULL) {
mtx_unlock(lkp->lk_interlock);
return (0);
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index a1f31e1..d7f6d2f 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -68,7 +68,6 @@ struct buf_ops buf_ops_bio = {
* carnal knowledge of buffers. This knowledge should be moved to vfs_bio.c.
*/
struct buf *buf; /* buffer header pool */
-struct mtx buftimelock; /* Interlock on setting prio and timo */
static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
vm_offset_t to);
@@ -519,7 +518,6 @@ bufinit(void)
#ifdef USE_BUFHASH
LIST_INIT(&invalhash);
#endif
- mtx_init(&buftimelock, "buftime lock", NULL, MTX_DEF);
mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
@@ -967,7 +965,7 @@ vfs_backgroundwritedone(bp)
* queue if it currently resides there.
*/
origbp->b_flags &= ~B_LOCKED;
- if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
+ if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
bremfree(origbp);
bqrelse(origbp);
}
@@ -1630,7 +1628,7 @@ vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
return (0);
/* If the buf is busy we don't want to wait for it */
- if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT) != 0)
+ if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
return (0);
/* Only cluster with valid clusterable delayed write buffers */
@@ -1710,7 +1708,7 @@ vfs_bio_awrite(struct buf * bp)
}
}
- BUF_LOCK(bp, LK_EXCLUSIVE);
+ BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
bremfree(bp);
bp->b_flags |= B_ASYNC;
@@ -1870,7 +1868,7 @@ restart:
* remains valid only for QUEUE_EMPTY[KVA] bp's.
*/
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
panic("getnewbuf: locked buf");
bremfreel(bp);
mtx_unlock(&bqlock);
@@ -2147,7 +2145,7 @@ flushbufqueues(void)
if ((bp->b_xflags & BX_BKGRDINPROG) != 0)
continue;
if (bp->b_flags & B_INVAL) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
panic("flushbufqueues: locked buf");
bremfreel(bp);
mtx_unlock(&bqlock);
@@ -2182,7 +2180,7 @@ flushbufqueues(void)
if ((bp->b_xflags & BX_BKGRDINPROG) != 0)
continue;
if (bp->b_flags & B_INVAL) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
panic("flushbufqueues: locked buf");
bremfreel(bp);
mtx_unlock(&bqlock);
@@ -2407,6 +2405,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
{
struct buf *bp;
int s;
+ int error;
#ifdef USE_BUFHASH
struct bufhashhdr *bh;
#endif
@@ -2437,19 +2436,24 @@ loop:
VI_LOCK(vp);
if ((bp = gbincore(vp, blkno))) {
- VI_UNLOCK(vp);
/*
* Buffer is in-core. If the buffer is not busy, it must
* be on a queue.
*/
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
- "getblk", slpflag, slptimeo) == ENOLCK)
- goto loop;
- splx(s);
- return (struct buf *) NULL;
- }
+ error = BUF_TIMELOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+ VI_MTX(vp), "getblk", slpflag, slptimeo);
+
+ /*
+ * If we slept and got the lock we have to restart in case
+ * the buffer changed identities.
+ */
+ if (error == ENOLCK)
+ goto loop;
+ /* We timed out or were interrupted. */
+ else if (error)
+ return (NULL);
/*
* The buffer is locked. B_CACHE is cleared if the buffer is
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index 90c8852..96e541c 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -403,7 +403,8 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
*/
if ((tbp = incore(vp, lbn + i)) != NULL &&
(tbp->b_flags & B_INVAL) == 0) {
- if (BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT))
+ if (BUF_LOCK(tbp,
+ LK_EXCLUSIVE | LK_NOWAIT, NULL))
break;
BUF_UNLOCK(tbp);
@@ -794,7 +795,7 @@ cluster_wbuild(vp, size, start_lbn, len)
*/
if (((tbp = incore(vp, start_lbn)) == NULL) ||
((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI) ||
- BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) {
+ BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
++start_lbn;
--len;
splx(s);
@@ -884,7 +885,8 @@ cluster_wbuild(vp, size, start_lbn, len)
(bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
(tbp->b_flags & B_LOCKED) ||
tbp->b_wcred != bp->b_wcred ||
- BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) {
+ BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT,
+ NULL)) {
splx(s);
break;
}
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 5caa568..e5f2342 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -745,7 +745,7 @@ loop2:
if ((bp->b_vflags & BV_SCANNED) != 0)
continue;
bp->b_vflags |= BV_SCANNED;
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
continue;
VI_UNLOCK(vp);
if ((bp->b_flags & B_DELWRI) == 0)
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index dbebf26..8e556ab 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1218,17 +1218,15 @@ flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
for (found = 0, bp = blist; bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
- VI_LOCK(vp);
continue;
}
found += 1;
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- error = BUF_TIMELOCK(bp,
- LK_EXCLUSIVE | LK_SLEEPFAIL,
- "flushbuf", slpflag, slptimeo);
+ error = BUF_TIMELOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
+ "flushbuf", slpflag, slptimeo);
+ if (error) {
if (error != ENOLCK)
*errorp = error;
goto done;
@@ -1303,50 +1301,48 @@ restart:
anyfreed = 0;
for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
if (bp->b_lblkno >= trunclbn) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
+ if (BUF_LOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+ VI_MTX(vp)) == ENOLCK)
goto restart;
- } else {
- bremfree(bp);
- bp->b_flags |= (B_INVAL | B_RELBUF);
- bp->b_flags &= ~B_ASYNC;
- brelse(bp);
- anyfreed = 1;
- }
+
+ bremfree(bp);
+ bp->b_flags |= (B_INVAL | B_RELBUF);
+ bp->b_flags &= ~B_ASYNC;
+ brelse(bp);
+ anyfreed = 1;
+
if (nbp &&
(((nbp->b_xflags & BX_VNCLEAN) == 0) ||
(nbp->b_vp != vp) ||
(nbp->b_flags & B_DELWRI))) {
goto restart;
}
+ VI_LOCK(vp);
}
- VI_LOCK(vp);
}
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
if (bp->b_lblkno >= trunclbn) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
+ if (BUF_LOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+ VI_MTX(vp)) == ENOLCK)
goto restart;
- } else {
- bremfree(bp);
- bp->b_flags |= (B_INVAL | B_RELBUF);
- bp->b_flags &= ~B_ASYNC;
- brelse(bp);
- anyfreed = 1;
- }
+ bremfree(bp);
+ bp->b_flags |= (B_INVAL | B_RELBUF);
+ bp->b_flags &= ~B_ASYNC;
+ brelse(bp);
+ anyfreed = 1;
if (nbp &&
(((nbp->b_xflags & BX_VNDIRTY) == 0) ||
(nbp->b_vp != vp) ||
(nbp->b_flags & B_DELWRI) == 0)) {
goto restart;
}
+ VI_LOCK(vp);
}
- VI_LOCK(vp);
}
}
@@ -1354,24 +1350,21 @@ restart:
restartsync:
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
+ if (BUF_LOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+ VI_MTX(vp)) == ENOLCK)
goto restart;
- } else {
- bremfree(bp);
- if (bp->b_vp == vp) {
- bp->b_flags |= B_ASYNC;
- } else {
- bp->b_flags &= ~B_ASYNC;
- }
- BUF_WRITE(bp);
- }
+ bremfree(bp);
+ if (bp->b_vp == vp)
+ bp->b_flags |= B_ASYNC;
+ else
+ bp->b_flags &= ~B_ASYNC;
+
+ BUF_WRITE(bp);
VI_LOCK(vp);
goto restartsync;
}
- VI_LOCK(vp);
}
}
diff --git a/sys/nfsclient/nfs_vnops.c b/sys/nfsclient/nfs_vnops.c
index 919f2a3..5d7a4d0 100644
--- a/sys/nfsclient/nfs_vnops.c
+++ b/sys/nfsclient/nfs_vnops.c
@@ -2658,10 +2658,11 @@ again:
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
if (bvecpos >= bvecsize)
break;
- VI_UNLOCK(vp);
if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
(B_DELWRI | B_NEEDCOMMIT) ||
- BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
+ BUF_LOCK(bp,
+ LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
+ VI_MTX(vp))) {
VI_LOCK(vp);
nbp = TAILQ_NEXT(bp, b_vnbufs);
continue;
@@ -2785,14 +2786,13 @@ loop:
VI_LOCK(vp);
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- if (waitfor != MNT_WAIT || passone) {
- VI_LOCK(vp);
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
+ if (waitfor != MNT_WAIT || passone)
continue;
- }
- error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
- "nfsfsync", slpflag, slptimeo);
+
+ error = BUF_TIMELOCK(bp,
+ LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+ VI_MTX(vp), "nfsfsync", slpflag, slptimeo);
splx(s);
if (error == 0)
panic("nfs_fsync: inconsistent lock");
diff --git a/sys/nfsserver/nfs_serv.c b/sys/nfsserver/nfs_serv.c
index 1d953cd..e35b4dc 100644
--- a/sys/nfsserver/nfs_serv.c
+++ b/sys/nfsserver/nfs_serv.c
@@ -3687,6 +3687,7 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
}
s = splbio();
+ VI_LOCK(vp);
while (cnt > 0) {
struct buf *bp;
@@ -3700,16 +3701,18 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
* should not be set if B_INVAL is set there could be
* a race here since we haven't locked the buffer).
*/
- if ((bp = incore(vp, lblkno)) != NULL &&
+ if ((bp = gbincore(vp, lblkno)) != NULL &&
(bp->b_flags & (B_DELWRI|B_INVAL)) == B_DELWRI) {
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
- BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL);
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
+ LK_INTERLOCK, VI_MTX(vp)) == ENOLCK) {
+ VI_LOCK(vp);
continue; /* retry */
}
bremfree(bp);
bp->b_flags &= ~B_ASYNC;
BUF_WRITE(bp);
++nfs_commit_miss;
+ VI_LOCK(vp);
}
++nfs_commit_blks;
if (cnt < iosize)
@@ -3717,6 +3720,7 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
cnt -= iosize;
++lblkno;
}
+ VI_UNLOCK(vp);
splx(s);
}
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index ac60adf..94c23cc 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -273,7 +273,6 @@ struct buf {
/*
* Buffer locking
*/
-extern struct mtx buftimelock; /* Interlock on setting prio and timo */
extern const char *buf_wmesg; /* Default buffer lock message */
#define BUF_WMESG "bufwait"
#include <sys/proc.h> /* XXX for curthread */
@@ -288,37 +287,39 @@ extern const char *buf_wmesg; /* Default buffer lock message */
*
* Get a lock sleeping non-interruptably until it becomes available.
*/
-static __inline int BUF_LOCK(struct buf *, int);
+static __inline int BUF_LOCK(struct buf *, int, struct mtx *);
static __inline int
-BUF_LOCK(struct buf *bp, int locktype)
+BUF_LOCK(struct buf *bp, int locktype, struct mtx *interlock)
{
int s, ret;
s = splbio();
- mtx_lock(&buftimelock);
- locktype |= LK_INTERLOCK;
+ mtx_lock(bp->b_lock.lk_interlock);
+ locktype |= LK_INTERNAL;
bp->b_lock.lk_wmesg = buf_wmesg;
bp->b_lock.lk_prio = PRIBIO + 4;
- ret = lockmgr(&(bp)->b_lock, locktype, &buftimelock, curthread);
+ ret = lockmgr(&(bp)->b_lock, locktype, interlock, curthread);
splx(s);
return ret;
}
/*
* Get a lock sleeping with specified interruptably and timeout.
*/
-static __inline int BUF_TIMELOCK(struct buf *, int, char *, int, int);
+static __inline int BUF_TIMELOCK(struct buf *, int, struct mtx *,
+ char *, int, int);
static __inline int
-BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int catch, int timo)
+BUF_TIMELOCK(struct buf *bp, int locktype, struct mtx *interlock,
+ char *wmesg, int catch, int timo)
{
int s, ret;
s = splbio();
- mtx_lock(&buftimelock);
- locktype |= LK_INTERLOCK | LK_TIMELOCK;
+ mtx_lock(bp->b_lock.lk_interlock);
+ locktype |= LK_INTERNAL | LK_TIMELOCK;
bp->b_lock.lk_wmesg = wmesg;
bp->b_lock.lk_prio = (PRIBIO + 4) | catch;
bp->b_lock.lk_timo = timo;
- ret = lockmgr(&(bp)->b_lock, (locktype), &buftimelock, curthread);
+ ret = lockmgr(&(bp)->b_lock, (locktype), interlock, curthread);
splx(s);
return ret;
}
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index a186614..26226cd 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -149,6 +149,7 @@ struct lock {
*/
#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
#define LK_THISLAYER 0x00040000 /* vn_lock: lock/unlock only current layer */
+#define LK_INTERNAL 0x00080000/* The internal lock is already held */
/*
* Internal state flags corresponding to lk_sharecount, and lk_waitcount
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index 8d4b5de..c51b02a 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -327,7 +327,7 @@ interlocked_sleep(lk, op, ident, mtx, flags, wmesg, timo)
retval = msleep(ident, mtx, flags, wmesg, timo);
break;
case LOCKBUF:
- retval = BUF_LOCK((struct buf *)ident, flags);
+ retval = BUF_LOCK((struct buf *)ident, flags, NULL);
break;
default:
panic("interlocked_sleep: unknown operation");
@@ -4890,11 +4890,11 @@ softdep_fsync_mountdev(vp)
VI_LOCK(vp);
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
- VI_UNLOCK(vp);
/*
* If it is already scheduled, skip to the next buffer.
*/
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
+ VI_MTX(vp))) {
VI_LOCK(vp);
continue;
}
@@ -5807,7 +5807,8 @@ getdirtybuf(bpp, waitfor)
for (;;) {
if ((bp = *bpp) == NULL)
return (0);
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
+ /* XXX Probably needs interlock */
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
if ((bp->b_xflags & BX_BKGRDINPROG) == 0)
break;
BUF_UNLOCK(bp);
diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c
index 66fc2fe..9b39812 100644
--- a/sys/ufs/ffs/ffs_vnops.c
+++ b/sys/ufs/ffs/ffs_vnops.c
@@ -200,7 +200,7 @@ loop:
bp->b_vflags |= BV_SCANNED;
if ((skipmeta == 1 && bp->b_lblkno < 0))
continue;
- if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
+ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
continue;
if (!wait && LIST_FIRST(&bp->b_dep) != NULL &&
(bp->b_flags & B_DEFERRED) == 0 &&
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index af671ab..342c98a 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -345,7 +345,7 @@ initpbuf(struct buf *bp)
bp->b_error = 0;
bp->b_magic = B_MAGIC_BIO;
bp->b_op = &buf_ops_bio;
- BUF_LOCK(bp, LK_EXCLUSIVE);
+ BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
}
/*
OpenPOWER on IntegriCloud