summaryrefslogtreecommitdiffstats
path: root/sys/sys
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2003-02-25 03:37:48 +0000
committerjeff <jeff@FreeBSD.org>2003-02-25 03:37:48 +0000
commit9e4c9a6ce908881b1e6f83cbb906a9fce08dd3ab (patch)
treedf5eb5e550ba49b92f45eadaca861cb18128ad5d /sys/sys
parent541937cf7373ff6a61c871266ea041503bb02233 (diff)
downloadFreeBSD-src-9e4c9a6ce908881b1e6f83cbb906a9fce08dd3ab.zip
FreeBSD-src-9e4c9a6ce908881b1e6f83cbb906a9fce08dd3ab.tar.gz
- Add an interlock argument to BUF_LOCK and BUF_TIMELOCK.
- Remove the buftimelock mutex and acquire the buf's interlock to protect these fields instead. - Hold the vnode interlock while locking bufs on the clean/dirty queues. This reduces some cases from one BUF_LOCK with a LK_NOWAIT and another BUF_LOCK with a LK_TIMEFAIL to a single lock. Reviewed by: arch, mckusick
Diffstat (limited to 'sys/sys')
-rw-r--r--sys/sys/buf.h23
-rw-r--r--sys/sys/lockmgr.h1
2 files changed, 13 insertions, 11 deletions
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index ac60adf..94c23cc 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -273,7 +273,6 @@ struct buf {
/*
* Buffer locking
*/
-extern struct mtx buftimelock; /* Interlock on setting prio and timo */
extern const char *buf_wmesg; /* Default buffer lock message */
#define BUF_WMESG "bufwait"
#include <sys/proc.h> /* XXX for curthread */
@@ -288,37 +287,39 @@ extern const char *buf_wmesg; /* Default buffer lock message */
*
* Get a lock sleeping non-interruptably until it becomes available.
*/
-static __inline int BUF_LOCK(struct buf *, int);
+static __inline int BUF_LOCK(struct buf *, int, struct mtx *);
static __inline int
-BUF_LOCK(struct buf *bp, int locktype)
+BUF_LOCK(struct buf *bp, int locktype, struct mtx *interlock)
{
int s, ret;
s = splbio();
- mtx_lock(&buftimelock);
- locktype |= LK_INTERLOCK;
+ mtx_lock(bp->b_lock.lk_interlock);
+ locktype |= LK_INTERNAL;
bp->b_lock.lk_wmesg = buf_wmesg;
bp->b_lock.lk_prio = PRIBIO + 4;
- ret = lockmgr(&(bp)->b_lock, locktype, &buftimelock, curthread);
+ ret = lockmgr(&(bp)->b_lock, locktype, interlock, curthread);
splx(s);
return ret;
}
/*
* Get a lock sleeping with specified interruptably and timeout.
*/
-static __inline int BUF_TIMELOCK(struct buf *, int, char *, int, int);
+static __inline int BUF_TIMELOCK(struct buf *, int, struct mtx *,
+ char *, int, int);
static __inline int
-BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int catch, int timo)
+BUF_TIMELOCK(struct buf *bp, int locktype, struct mtx *interlock,
+ char *wmesg, int catch, int timo)
{
int s, ret;
s = splbio();
- mtx_lock(&buftimelock);
- locktype |= LK_INTERLOCK | LK_TIMELOCK;
+ mtx_lock(bp->b_lock.lk_interlock);
+ locktype |= LK_INTERNAL | LK_TIMELOCK;
bp->b_lock.lk_wmesg = wmesg;
bp->b_lock.lk_prio = (PRIBIO + 4) | catch;
bp->b_lock.lk_timo = timo;
- ret = lockmgr(&(bp)->b_lock, (locktype), &buftimelock, curthread);
+ ret = lockmgr(&(bp)->b_lock, (locktype), interlock, curthread);
splx(s);
return ret;
}
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index a186614..26226cd 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -149,6 +149,7 @@ struct lock {
*/
#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
#define LK_THISLAYER 0x00040000 /* vn_lock: lock/unlock only current layer */
+#define LK_INTERNAL 0x00080000/* The internal lock is already held */
/*
* Internal state flags corresponding to lk_sharecount, and lk_waitcount
OpenPOWER on IntegriCloud