summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_lock.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2008-01-08 23:48:31 +0000
committerattilio <attilio@FreeBSD.org>2008-01-08 23:48:31 +0000
commit9e182da1916d63733010443cc5d45688fe47c3e7 (patch)
tree8b4bc5a73ee6f8eec360e2bca8b5a48ec5a6bea6 /sys/kern/kern_lock.c
parent1975c095433557ab04b0b28101fb49a9320af637 (diff)
downloadFreeBSD-src-9e182da1916d63733010443cc5d45688fe47c3e7.zip
FreeBSD-src-9e182da1916d63733010443cc5d45688fe47c3e7.tar.gz
Remove explicit calling of lockmgr() with the NULL argument.
Now, lockmgr() function can only be called passing curthread and the KASSERT() is upgraded according with this. In order to support on-the-fly owner switching, the new function lockmgr_disown() has been introduced and gets used in BUF_KERNPROC(). KPI, so, results changed and FreeBSD version will be bumped soon. Differently from previous code, we assume idle thread cannot try to acquire the lockmgr as it cannot sleep, so loose the relative check[1] in BUF_KERNPROC(). Tested by: kris [1] kib asked for a KASSERT in the lockmgr_disown() about this condition, but after thinking at it, as this is a well known general rule, I found it not really necessary.
Diffstat (limited to 'sys/kern/kern_lock.c')
-rw-r--r--sys/kern/kern_lock.c65
1 files changed, 42 insertions, 23 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 8d1a219..6e5a7b3 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -105,7 +105,7 @@ unlock_lockmgr(struct lock_object *lock)
panic("lockmgr locks do not support sleep interlocking");
}
-#define COUNT(td, x) if ((td)) (td)->td_locks += (x)
+#define COUNT(td, x) ((td)->td_locks += (x))
#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
LK_SHARE_NONZERO | LK_WAIT_NONZERO)
@@ -194,24 +194,18 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
{
int error;
- struct thread *thr;
int extflags, lockflags;
int contested = 0;
uint64_t waitstart = 0;
/*
- * Lock owner can only be curthread or, at least, NULL in order to
- * have a deadlock free implementation of the primitive.
+ * Lock owner can only be curthread in order to have a deadlock
+ * free implementation of the primitive.
*/
- KASSERT(td == NULL || td == curthread,
- ("lockmgr: owner thread (%p) cannot differ from curthread or NULL",
- td));
+ KASSERT(td == curthread,
+ ("lockmgr: owner thread (%p) cannot differ from curthread", td));
error = 0;
- if (td == NULL)
- thr = LK_KERNPROC;
- else
- thr = td;
if ((flags & LK_INTERNAL) == 0)
mtx_lock(lkp->lk_interlock);
@@ -260,7 +254,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
* lock requests or upgrade requests ( but not the exclusive
* lock itself ).
*/
- if (lkp->lk_lockholder != thr) {
+ if (lkp->lk_lockholder != td) {
lockflags = LK_HAVE_EXCL;
if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
@@ -286,10 +280,10 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
/* FALLTHROUGH downgrade */
case LK_DOWNGRADE:
- KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
+ KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0,
("lockmgr: not holding exclusive lock "
"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
- lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
+ lkp->lk_lockholder, td, lkp->lk_exclusivecount));
sharelock(td, lkp, lkp->lk_exclusivecount);
COUNT(td, -lkp->lk_exclusivecount);
lkp->lk_exclusivecount = 0;
@@ -308,7 +302,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
* after the upgrade). If we return an error, the file
* will always be unlocked.
*/
- if (lkp->lk_lockholder == thr)
+ if (lkp->lk_lockholder == td)
panic("lockmgr: upgrade exclusive lock");
if (lkp->lk_sharecount <= 0)
panic("lockmgr: upgrade without shared");
@@ -342,7 +336,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = thr;
+ lkp->lk_lockholder = td;
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
@@ -362,7 +356,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
/* FALLTHROUGH exclusive request */
case LK_EXCLUSIVE:
- if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
+ if (lkp->lk_lockholder == td) {
/*
* Recursive lock.
*/
@@ -400,7 +394,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
break;
}
lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = thr;
+ lkp->lk_lockholder = td;
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
@@ -413,10 +407,10 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
case LK_RELEASE:
if (lkp->lk_exclusivecount != 0) {
- if (lkp->lk_lockholder != thr &&
+ if (lkp->lk_lockholder != td &&
lkp->lk_lockholder != LK_KERNPROC) {
panic("lockmgr: thread %p, not %s %p unlocking",
- thr, "exclusive lock holder",
+ td, "exclusive lock holder",
lkp->lk_lockholder);
}
if (lkp->lk_lockholder != LK_KERNPROC)
@@ -433,7 +427,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
shareunlock(td, lkp, 1);
else {
printf("lockmgr: thread %p unlocking unheld lock\n",
- thr);
+ td);
kdb_backtrace();
}
@@ -448,14 +442,14 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
* check for holding a shared lock, but at least we can
* check for an exclusive one.
*/
- if (lkp->lk_lockholder == thr)
+ if (lkp->lk_lockholder == td)
panic("lockmgr: draining against myself");
error = acquiredrain(lkp, extflags);
if (error)
break;
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
- lkp->lk_lockholder = thr;
+ lkp->lk_lockholder = td;
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
#if defined(DEBUG_LOCKS)
@@ -544,6 +538,31 @@ lockdestroy(lkp)
}
/*
+ * Disown the lockmgr.
+ */
+void
+lockmgr_disown(struct lock *lkp)
+{
+ struct thread *td;
+
+ td = curthread;
+ KASSERT(lkp->lk_exclusivecount || lkp->lk_lockholder == LK_KERNPROC,
+ ("%s: %p lockmgr must be exclusively locked", __func__, lkp));
+ KASSERT(lkp->lk_lockholder == td,
+ ("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp,
+ td));
+
+ /*
+ * Drop the lock reference and switch the owner. This will result
+ * in an atomic operation like td_lock is only accessed by curthread
+ * and lk_lockholder only needs one write.
+ */
+ if (lkp->lk_lockholder == td)
+ td->td_locks--;
+ lkp->lk_lockholder = LK_KERNPROC;
+}
+
+/*
* Determine the status of a lock.
*/
int
OpenPOWER on IntegriCloud