summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2008-04-06 20:08:51 +0000
committerattilio <attilio@FreeBSD.org>2008-04-06 20:08:51 +0000
commit07441f19e1222522a776d90e80001fe97ae1c962 (patch)
tree2d6f3d40588b2302cacf1c1cc30a8c323b74b5e9 /sys
parent2f4904816fb159c02b0f2efb5956fa5d7d029368 (diff)
downloadFreeBSD-src-07441f19e1222522a776d90e80001fe97ae1c962.zip
FreeBSD-src-07441f19e1222522a776d90e80001fe97ae1c962.tar.gz
Optimize lockmgr in order to get rid of the pool mutex interlock, of the
state transitioning flags and of msleep(9) callings. Use, instead, an algorithm very similar to what sx(9) and rwlock(9) alredy do and direct accesses to the sleepqueue(9) primitive. In order to avoid writer starvation a mechanism very similar to what rwlock(9) uses now is implemented, with the correspective per-thread shared lockmgrs counter. This patch also adds 2 new functions to lockmgr KPI: lockmgr_rw() and lockmgr_args_rw(). These two are like the 2 "normal" versions, but they both accept a rwlock as interlock. In order to realize this, the general lockmgr manager function "__lockmgr_args()" has been implemented through the generic lock layer. It supports all the blocking primitives, but currently only these 2 mappers live. The patch drops the support for WITNESS atm, but it will be probabilly added soon. Also, there is a little race in the draining code which is also present in the current CVS stock implementation: if some sharers, once they wakeup, are in the runqueue they can contend the lock with the exclusive drainer. This is hard to be fixed but the now committed code mitigate this issue a lot better than the (past) CVS version. In addition assertive KA_HELD and KA_UNHELD have been made mute assertions because they are dangerous and they will be nomore supported soon. In order to avoid namespace pollution, stack.h is splitted into two parts: one which includes only the "struct stack" definition (_stack.h) and one defining the KPI. In this way, newly added _lockmgr.h can just include _stack.h. Kernel ABI results heavilly changed by this commit (the now committed version of "struct lock" is a lot smaller than the previous one) and KPI results broken by lockmgr_rw() / lockmgr_args_rw() introduction, so manpages and __FreeBSD_version will be updated accordingly. Tested by: kris, pho, jeff, danger Reviewed by: jeff Sponsored by: Google, Summer of Code program 2007
Diffstat (limited to 'sys')
-rw-r--r--sys/fs/unionfs/union_subr.c9
-rw-r--r--sys/kern/kern_lock.c1428
-rw-r--r--sys/sys/_lockmgr.h49
-rw-r--r--sys/sys/_stack.h39
-rw-r--r--sys/sys/lockmgr.h323
-rw-r--r--sys/sys/proc.h3
-rw-r--r--sys/sys/sleepqueue.h1
-rw-r--r--sys/sys/stack.h7
-rw-r--r--sys/sys/vnode.h6
-rw-r--r--sys/ufs/ffs/ffs_softdep.c4
10 files changed, 1043 insertions, 826 deletions
diff --git a/sys/fs/unionfs/union_subr.c b/sys/fs/unionfs/union_subr.c
index 2db12a5..05e9399 100644
--- a/sys/fs/unionfs/union_subr.c
+++ b/sys/fs/unionfs/union_subr.c
@@ -538,12 +538,13 @@ static void
unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
struct thread *td)
{
- int count, lockcnt;
+ unsigned count, lockrec;
struct vnode *vp;
struct vnode *lvp;
vp = UNIONFSTOV(unp);
lvp = unp->un_lowervp;
+ ASSERT_VOP_ELOCKED(lvp, "unionfs_node_update");
/*
* lock update
@@ -551,11 +552,9 @@ unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
VI_LOCK(vp);
unp->un_uppervp = uvp;
vp->v_vnlock = uvp->v_vnlock;
- lockcnt = lvp->v_vnlock->lk_exclusivecount;
- if (lockcnt <= 0)
- panic("unionfs: no exclusive lock");
VI_UNLOCK(vp);
- for (count = 1; count < lockcnt; count++)
+ lockrec = lvp->v_vnlock->lk_recurse;
+ for (count = 0; count < lockrec; count++)
vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
}
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 987b361..93c262b 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1,692 +1,900 @@
/*-
- * Copyright (c) 1995
- * The Regents of the University of California. All rights reserved.
- *
- * Copyright (C) 1997
- * John S. Dyson. All rights reserved.
- *
- * This code contains ideas from software contributed to Berkeley by
- * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
- * System project at Carnegie-Mellon University.
+ * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
+ * notice(s), this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
*/
+#include "opt_ddb.h"
+
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include "opt_ddb.h"
-#include "opt_global.h"
-
#include <sys/param.h>
-#include <sys/kdb.h>
-#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
+#include <sys/lock_profile.h>
#include <sys/lockmgr.h>
#include <sys/mutex.h>
#include <sys/proc.h>
-#include <sys/systm.h>
-#include <sys/lock_profile.h>
+#include <sys/sleepqueue.h>
#ifdef DEBUG_LOCKS
#include <sys/stack.h>
#endif
+#include <sys/systm.h>
-#define LOCKMGR_TRYOP(x) ((x) & LK_NOWAIT)
-#define LOCKMGR_TRYW(x) (LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0)
-#define LOCKMGR_UNHELD(x) (((x) & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0)
-#define LOCKMGR_NOTOWNER(td) ((td) != curthread && (td) != LK_KERNPROC)
+#include <machine/cpu.h>
-static void assert_lockmgr(struct lock_object *lock, int what);
#ifdef DDB
#include <ddb/ddb.h>
-static void db_show_lockmgr(struct lock_object *lock);
#endif
-static void lock_lockmgr(struct lock_object *lock, int how);
-static int unlock_lockmgr(struct lock_object *lock);
+
+CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) ==
+ (LK_CANRECURSE | LK_NOSHARE));
+
+#define SQ_EXCLUSIVE_QUEUE 0
+#define SQ_SHARED_QUEUE 1
+
+#ifndef INVARIANTS
+#define _lockmgr_assert(lk, what, file, line)
+#define TD_LOCKS_INC(td)
+#define TD_LOCKS_DEC(td)
+#else
+#define TD_LOCKS_INC(td) ((td)->td_locks++)
+#define TD_LOCKS_DEC(td) ((td)->td_locks--)
+#endif
+#define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
+#define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
+
+#ifndef DEBUG_LOCKS
+#define STACK_PRINT(lk)
+#define STACK_SAVE(lk)
+#define STACK_ZERO(lk)
+#else
+#define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
+#define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
+#define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
+#endif
+
+#define LOCK_LOG2(lk, string, arg1, arg2) \
+ if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
+ CTR2(KTR_LOCK, (string), (arg1), (arg2))
+#define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
+ if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
+ CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
+
+#define LK_TRYOP(x) \
+ ((x) & LK_NOWAIT)
+#define LK_CAN_SHARE(x) \
+ (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
+ curthread->td_lk_slocks))
+
+#define lockmgr_disowned(lk) \
+ (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
+
+#define lockmgr_xlocked(lk) \
+ (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
+
+static void assert_lockmgr(struct lock_object *lock, int how);
+#ifdef DDB
+static void db_show_lockmgr(struct lock_object *lock);
+#endif
+static void lock_lockmgr(struct lock_object *lock, int how);
+static int unlock_lockmgr(struct lock_object *lock);
struct lock_class lock_class_lockmgr = {
.lc_name = "lockmgr",
- .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
+ .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
.lc_assert = assert_lockmgr,
#ifdef DDB
.lc_ddb_show = db_show_lockmgr,
#endif
.lc_lock = lock_lockmgr,
- .lc_unlock = unlock_lockmgr,
+ .lc_unlock = unlock_lockmgr
};
-#ifndef INVARIANTS
-#define _lockmgr_assert(lkp, what, file, line)
-#endif
+static __inline struct thread *
+lockmgr_xholder(struct lock *lk)
+{
+ uintptr_t x;
+
+ x = lk->lk_lock;
+ return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
+}
/*
- * Locking primitives implementation.
- * Locks provide shared/exclusive sychronization.
+ * It assumes sleepq_lock held and returns with this one unheld.
+ * It also assumes the generic interlock is sane and previously checked.
+ * If LK_INTERLOCK is specified the interlock is not reacquired after the
+ * sleep.
*/
+static __inline int
+sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
+ const char *wmesg, int pri, int timo, int queue)
+{
+ struct lock_class *class;
+ int catch, error;
-void
+ class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
+ catch = (pri) ? (pri & PCATCH) : 0;
+ pri &= PRIMASK;
+ error = 0;
+
+ LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
+ (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
+
+ if (flags & LK_INTERLOCK)
+ class->lc_unlock(ilk);
+ DROP_GIANT();
+ sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
+ SLEEPQ_INTERRUPTIBLE : 0), queue);
+ if ((flags & LK_TIMELOCK) && timo)
+ sleepq_set_timeout(&lk->lock_object, timo);
+
+ /*
+ * Decisional switch for real sleeping.
+ */
+ if ((flags & LK_TIMELOCK) && timo && catch)
+ error = sleepq_timedwait_sig(&lk->lock_object, pri);
+ else if ((flags & LK_TIMELOCK) && timo)
+ error = sleepq_timedwait(&lk->lock_object, pri);
+ else if (catch)
+ error = sleepq_wait_sig(&lk->lock_object, pri);
+ else
+ sleepq_wait(&lk->lock_object, pri);
+ PICKUP_GIANT();
+ if ((flags & LK_SLEEPFAIL) && error == 0)
+ error = ENOLCK;
+
+ return (error);
+}
+
+static __inline void
+wakeupshlk(struct lock *lk, const char *file, int line)
+{
+ uintptr_t v, x;
+ int queue;
+
+ TD_LOCKS_DEC(curthread);
+ TD_SLOCKS_DEC(curthread);
+ LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
+
+ for (;;) {
+ x = lk->lk_lock;
+
+ /*
+ * If there is more than one shared lock held, just drop one
+ * and return.
+ */
+ if (LK_SHARERS(x) > 1) {
+ if (atomic_cmpset_ptr(&lk->lk_lock, x,
+ x - LK_ONE_SHARER))
+ break;
+ continue;
+ }
+
+ /*
+ * If there are not waiters on the exclusive queue, drop the
+ * lock quickly.
+ */
+ if ((x & LK_ALL_WAITERS) == 0) {
+ MPASS(x == LK_SHARERS_LOCK(1));
+ if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1),
+ LK_UNLOCKED))
+ break;
+ continue;
+ }
+
+ /*
+ * We should have a sharer with waiters, so enter the hard
+ * path in order to handle wakeups correctly.
+ */
+ sleepq_lock(&lk->lock_object);
+ x = lk->lk_lock & LK_ALL_WAITERS;
+ v = LK_UNLOCKED;
+
+ /*
+ * If the lock has exclusive waiters, give them preference in
+ * order to avoid deadlock with shared runners up.
+ */
+ if (x & LK_EXCLUSIVE_WAITERS) {
+ queue = SQ_EXCLUSIVE_QUEUE;
+ v |= (x & LK_SHARED_WAITERS);
+ } else {
+ MPASS(x == LK_SHARED_WAITERS);
+ queue = SQ_SHARED_QUEUE;
+ }
+
+ if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
+ v)) {
+ sleepq_release(&lk->lock_object);
+ continue;
+ }
+ LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
+ __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
+ "exclusive");
+ sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
+ sleepq_release(&lk->lock_object);
+ break;
+ }
+
+ lock_profile_release_lock(&lk->lock_object);
+}
+
+static void
assert_lockmgr(struct lock_object *lock, int what)
{
panic("lockmgr locks do not support assertions");
}
-void
+static void
lock_lockmgr(struct lock_object *lock, int how)
{
panic("lockmgr locks do not support sleep interlocking");
}
-int
+static int
unlock_lockmgr(struct lock_object *lock)
{
panic("lockmgr locks do not support sleep interlocking");
}
-#define COUNT(td, x) ((td)->td_locks += (x))
-#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
- LK_SHARE_NONZERO | LK_WAIT_NONZERO)
+void
+lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
+{
+ int iflags;
-static int acquire(struct lock **lkpp, int extflags, int wanted,
- const char *wmesg, int prio, int timo, int *contested, uint64_t *waittime);
-static int acquiredrain(struct lock *lkp, int extflags, const char *wmesg,
- int prio, int timo);
+ MPASS((flags & ~LK_INIT_MASK) == 0);
-static __inline void
-sharelock(struct thread *td, struct lock *lkp, int incr) {
- lkp->lk_flags |= LK_SHARE_NONZERO;
- lkp->lk_sharecount += incr;
- COUNT(td, incr);
+ iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
+ if ((flags & LK_NODUP) == 0)
+ iflags |= LO_DUPOK;
+ if (flags & LK_NOPROFILE)
+ iflags |= LO_NOPROFILE;
+ if ((flags & LK_NOWITNESS) == 0)
+ iflags |= LO_WITNESS;
+ if (flags & LK_QUIET)
+ iflags |= LO_QUIET;
+ iflags |= flags & (LK_CANRECURSE | LK_NOSHARE);
+
+ lk->lk_lock = LK_UNLOCKED;
+ lk->lk_recurse = 0;
+ lk->lk_timo = timo;
+ lk->lk_pri = pri;
+ lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
+ STACK_ZERO(lk);
}
-static __inline void
-shareunlock(struct thread *td, struct lock *lkp, int decr) {
-
- KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
+void
+lockdestroy(struct lock *lk)
+{
- COUNT(td, -decr);
- if (lkp->lk_sharecount == decr) {
- lkp->lk_flags &= ~LK_SHARE_NONZERO;
- if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
- wakeup(lkp);
- }
- lkp->lk_sharecount = 0;
- } else {
- lkp->lk_sharecount -= decr;
- }
+ KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
+ KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
+ lock_destroy(&lk->lock_object);
}
-static int
-acquire(struct lock **lkpp, int extflags, int wanted, const char *wmesg,
- int prio, int timo, int *contested, uint64_t *waittime)
+int
+__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
+ const char *wmesg, int pri, int timo, const char *file, int line)
{
- struct lock *lkp = *lkpp;
+ uint64_t waittime;
+ struct lock_class *class;
const char *iwmesg;
- int error, iprio, itimo;
-
- iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg;
- iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio;
- itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo;
+ uintptr_t tid, v, x;
+ u_int op;
+ int contested, error, ipri, itimo, queue;
- CTR3(KTR_LOCK,
- "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
- lkp, extflags, wanted);
-
- if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
- return EBUSY;
+ contested = 0;
error = 0;
- if ((lkp->lk_flags & wanted) != 0)
- lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
-
- while ((lkp->lk_flags & wanted) != 0) {
- CTR2(KTR_LOCK,
- "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
- lkp, lkp->lk_flags);
- lkp->lk_flags |= LK_WAIT_NONZERO;
- lkp->lk_waitcount++;
- error = msleep(lkp, lkp->lk_interlock, iprio, iwmesg,
- ((extflags & LK_TIMELOCK) ? itimo : 0));
- lkp->lk_waitcount--;
- if (lkp->lk_waitcount == 0)
- lkp->lk_flags &= ~LK_WAIT_NONZERO;
- if (error)
- break;
- if (extflags & LK_SLEEPFAIL) {
- error = ENOLCK;
- break;
- }
- if (lkp->lk_newlock != NULL) {
- mtx_lock(lkp->lk_newlock->lk_interlock);
- mtx_unlock(lkp->lk_interlock);
- if (lkp->lk_waitcount == 0)
- wakeup((void *)(&lkp->lk_newlock));
- *lkpp = lkp = lkp->lk_newlock;
- }
+ waittime = 0;
+ tid = (uintptr_t)curthread;
+ op = (flags & LK_TYPE_MASK);
+ iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
+ ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
+ itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
+
+ MPASS((flags & ~LK_TOTAL_MASK) == 0);
+ KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
+ (op != LK_DOWNGRADE && op != LK_RELEASE),
+ ("%s: Invalid flags in regard of the operation desired @ %s:%d",
+ __func__, file, line));
+ KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
+ ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
+ __func__, file, line));
+
+ class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
+ if (panicstr != NULL) {
+ if (flags & LK_INTERLOCK)
+ class->lc_unlock(ilk);
+ return (0);
}
- mtx_assert(lkp->lk_interlock, MA_OWNED);
- return (error);
-}
-/*
- * Set, change, or release a lock.
- *
- * Shared requests increment the shared count. Exclusive requests set the
- * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
- * accepted shared locks and shared-to-exclusive upgrades to go away.
- */
-int
-_lockmgr_args(struct lock *lkp, u_int flags, struct mtx *interlkp,
- const char *wmesg, int prio, int timo, char *file, int line)
+ if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
+ op = LK_EXCLUSIVE;
-{
- struct thread *td;
- int error;
- int extflags, lockflags;
- int contested = 0;
- uint64_t waitstart = 0;
+ switch (op) {
+ case LK_SHARED:
+ for (;;) {
+ x = lk->lk_lock;
- error = 0;
- td = curthread;
+ /*
+ * If no other thread has an exclusive lock, or
+ * no exclusive waiter is present, bump the count of
+ * sharers. Since we have to preserve the state of
+ * waiters, if we fail to acquire the shared lock
+ * loop back and retry.
+ */
+ if (LK_CAN_SHARE(x)) {
+ if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
+ x + LK_ONE_SHARER))
+ break;
+ continue;
+ }
+ lock_profile_obtain_lock_failed(&lk->lock_object,
+ &contested, &waittime);
-#ifdef INVARIANTS
- if (lkp->lk_flags & LK_DESTROYED) {
- if (flags & LK_INTERLOCK)
- mtx_unlock(interlkp);
- if (panicstr != NULL)
- return (0);
- panic("%s: %p lockmgr is destroyed", __func__, lkp);
- }
-#endif
- mtx_lock(lkp->lk_interlock);
- CTR6(KTR_LOCK,
- "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
- "td == %p", lkp, (wmesg != LK_WMESG_DEFAULT) ? wmesg :
- lkp->lk_wmesg, lkp->lk_lockholder, lkp->lk_exclusivecount, flags,
- td);
-#ifdef DEBUG_LOCKS
- {
- struct stack stack; /* XXX */
- stack_save(&stack);
- CTRSTACK(KTR_LOCK, &stack, 0, 1);
- }
-#endif
+ /*
+ * If the lock is alredy held by curthread in
+ * exclusive way avoid a deadlock.
+ */
+ if (LK_HOLDER(x) == tid) {
+ LOCK_LOG2(lk,
+ "%s: %p alredy held in exclusive mode",
+ __func__, lk);
+ error = EDEADLK;
+ break;
+ }
- if (flags & LK_INTERLOCK) {
- mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
- mtx_unlock(interlkp);
- }
+ /*
+ * If the lock is expected to not sleep just give up
+ * and return.
+ */
+ if (LK_TRYOP(flags)) {
+ LOCK_LOG2(lk, "%s: %p fails the try operation",
+ __func__, lk);
+ error = EBUSY;
+ break;
+ }
- if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
- WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
- &lkp->lk_interlock->lock_object,
- "Acquiring lockmgr lock \"%s\"",
- (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg);
+ /*
+ * Acquire the sleepqueue chain lock because we
+ * probabilly will need to manipulate waiters flags.
+ */
+ sleepq_lock(&lk->lock_object);
+ x = lk->lk_lock;
- if (panicstr != NULL) {
- mtx_unlock(lkp->lk_interlock);
- return (0);
- }
- if ((lkp->lk_flags & LK_NOSHARE) &&
- (flags & LK_TYPE_MASK) == LK_SHARED) {
- flags &= ~LK_TYPE_MASK;
- flags |= LK_EXCLUSIVE;
- }
- extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
+ /*
+ * if the lock can be acquired in shared mode, try
+ * again.
+ */
+ if (LK_CAN_SHARE(x)) {
+ sleepq_release(&lk->lock_object);
+ continue;
+ }
- switch (flags & LK_TYPE_MASK) {
+ /*
+ * Try to set the LK_SHARED_WAITERS flag. If we fail,
+ * loop back and retry.
+ */
+ if ((x & LK_SHARED_WAITERS) == 0) {
+ if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
+ x | LK_SHARED_WAITERS)) {
+ sleepq_release(&lk->lock_object);
+ continue;
+ }
+ LOCK_LOG2(lk, "%s: %p set shared waiters flag",
+ __func__, lk);
+ }
- case LK_SHARED:
- if (!LOCKMGR_TRYOP(extflags))
- WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER, file,
+ /*
+ * As far as we have been unable to acquire the
+ * shared lock and the shared waiters flag is set,
+ * we will sleep.
+ */
+ error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
+ SQ_SHARED_QUEUE);
+ flags &= ~LK_INTERLOCK;
+ if (error) {
+ LOCK_LOG3(lk,
+ "%s: interrupted sleep for %p with %d",
+ __func__, lk, error);
+ break;
+ }
+ LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
+ __func__, lk);
+ }
+ if (error == 0) {
+ lock_profile_obtain_lock_success(&lk->lock_object,
+ contested, waittime, file, line);
+ LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
line);
+ TD_LOCKS_INC(curthread);
+ TD_SLOCKS_INC(curthread);
+ STACK_SAVE(lk);
+ }
+ break;
+ case LK_UPGRADE:
+ _lockmgr_assert(lk, KA_SLOCKED, file, line);
+ x = lk->lk_lock & LK_ALL_WAITERS;
+
/*
- * If we are not the exclusive lock holder, we have to block
- * while there is an exclusive lock holder or while an
- * exclusive lock request or upgrade request is in progress.
- *
- * However, if TDP_DEADLKTREAT is set, we override exclusive
- * lock requests or upgrade requests ( but not the exclusive
- * lock itself ).
+ * Try to switch from one shared lock to an exclusive one.
+ * We need to preserve waiters flags during the operation.
*/
- if (lkp->lk_lockholder != td) {
- lockflags = LK_HAVE_EXCL;
- if (!(td->td_pflags & TDP_DEADLKTREAT))
- lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
- error = acquire(&lkp, extflags, lockflags, wmesg,
- prio, timo, &contested, &waitstart);
- if (error)
- break;
- sharelock(td, lkp, 1);
- if (lkp->lk_sharecount == 1)
- lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
- WITNESS_LOCK(&lkp->lk_object, LOCKMGR_TRYW(extflags),
- file, line);
-
-#if defined(DEBUG_LOCKS)
- stack_save(&lkp->lk_stack);
-#endif
+ if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
+ tid | x)) {
+ LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
+ line);
+ TD_SLOCKS_DEC(curthread);
break;
}
+
/*
- * We hold an exclusive lock, so downgrade it to shared.
- * An alternative would be to fail with EDEADLK.
+ * We have been unable to succeed in upgrading, so just
+ * give up the shared lock.
*/
- /* FALLTHROUGH downgrade */
+ wakeupshlk(lk, file, line);
- case LK_DOWNGRADE:
- _lockmgr_assert(lkp, KA_XLOCKED, file, line);
- sharelock(td, lkp, lkp->lk_exclusivecount);
- WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line);
- COUNT(td, -lkp->lk_exclusivecount);
- lkp->lk_exclusivecount = 0;
- lkp->lk_flags &= ~LK_HAVE_EXCL;
- lkp->lk_lockholder = LK_NOPROC;
- if (lkp->lk_waitcount)
- wakeup((void *)lkp);
- break;
+ /* FALLTHROUGH */
+ case LK_EXCLUSIVE:
- case LK_UPGRADE:
- /*
- * Upgrade a shared lock to an exclusive one. If another
- * shared lock has already requested an upgrade to an
- * exclusive lock, our shared lock is released and an
- * exclusive lock is requested (which will be granted
- * after the upgrade). If we return an error, the file
- * will always be unlocked.
- */
- _lockmgr_assert(lkp, KA_SLOCKED, file, line);
- shareunlock(td, lkp, 1);
- if (lkp->lk_sharecount == 0)
- lock_profile_release_lock(&lkp->lk_object);
/*
- * If we are just polling, check to see if we will block.
+ * If curthread alredy holds the lock and this one is
+ * allowed to recurse, simply recurse on it.
*/
- if ((extflags & LK_NOWAIT) &&
- ((lkp->lk_flags & LK_WANT_UPGRADE) ||
- lkp->lk_sharecount > 1)) {
- error = EBUSY;
- WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
+ if (lockmgr_xlocked(lk)) {
+ if ((flags & LK_CANRECURSE) == 0 &&
+ (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) {
+
+ /*
+ * If the lock is expected to not panic just
+ * give up and return.
+ */
+ if (LK_TRYOP(flags)) {
+ LOCK_LOG2(lk,
+ "%s: %p fails the try operation",
+ __func__, lk);
+ error = EBUSY;
+ break;
+ }
+ if (flags & LK_INTERLOCK)
+ class->lc_unlock(ilk);
+ panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
+ __func__, iwmesg, file, line);
+ }
+ lk->lk_recurse++;
+ LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
+ LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
+ lk->lk_recurse, file, line);
+ TD_LOCKS_INC(curthread);
break;
}
- if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
+
+ while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
+ tid)) {
+ lock_profile_obtain_lock_failed(&lk->lock_object,
+ &contested, &waittime);
+
/*
- * We are first shared lock to request an upgrade, so
- * request upgrade and wait for the shared count to
- * drop to zero, then take exclusive lock.
+ * If the lock is expected to not sleep just give up
+ * and return.
*/
- lkp->lk_flags |= LK_WANT_UPGRADE;
- error = acquire(&lkp, extflags, LK_SHARE_NONZERO, wmesg,
- prio, timo, &contested, &waitstart);
- lkp->lk_flags &= ~LK_WANT_UPGRADE;
+ if (LK_TRYOP(flags)) {
+ LOCK_LOG2(lk, "%s: %p fails the try operation",
+ __func__, lk);
+ error = EBUSY;
+ break;
+ }
- if (error) {
- if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
- wakeup((void *)lkp);
- WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
- break;
+ /*
+ * Acquire the sleepqueue chain lock because we
+ * probabilly will need to manipulate waiters flags.
+ */
+ sleepq_lock(&lk->lock_object);
+ x = lk->lk_lock;
+ v = x & LK_ALL_WAITERS;
+
+ /*
+ * if the lock has been released while we spun on
+ * the sleepqueue chain lock just try again.
+ */
+ if (x == LK_UNLOCKED) {
+ sleepq_release(&lk->lock_object);
+ continue;
+ }
+
+ /*
+ * The lock can be in the state where there is a
+ * pending queue of waiters, but still no owner.
+ * This happens when the lock is contested and an
+ * owner is going to claim the lock.
+ * If curthread is the one successfully acquiring it
+ * claim lock ownership and return, preserving waiters
+ * flags.
+ */
+ if (x == (LK_UNLOCKED | v)) {
+ if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
+ tid | v)) {
+ sleepq_release(&lk->lock_object);
+ LOCK_LOG2(lk,
+ "%s: %p claimed by a new writer",
+ __func__, lk);
+ break;
+ }
+ sleepq_release(&lk->lock_object);
+ continue;
}
- if (lkp->lk_exclusivecount != 0)
- panic("lockmgr: non-zero exclusive count");
- lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = td;
- lkp->lk_exclusivecount = 1;
- WITNESS_UPGRADE(&lkp->lk_object, LOP_EXCLUSIVE |
- LOP_TRYLOCK, file, line);
- COUNT(td, 1);
- lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
-#if defined(DEBUG_LOCKS)
- stack_save(&lkp->lk_stack);
-#endif
- break;
- }
- /*
- * Someone else has requested upgrade. Release our shared
- * lock, awaken upgrade requestor if we are the last shared
- * lock, then request an exclusive lock.
- */
- WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
- if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
- LK_WAIT_NONZERO)
- wakeup((void *)lkp);
- /* FALLTHROUGH exclusive request */
- case LK_EXCLUSIVE:
- if (!LOCKMGR_TRYOP(extflags))
- WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
- LOP_EXCLUSIVE, file, line);
- if (lkp->lk_lockholder == td) {
/*
- * Recursive lock.
+ * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
+ * fail, loop back and retry.
*/
- if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
- panic("lockmgr: locking against myself");
- if ((extflags & LK_CANRECURSE) != 0) {
- lkp->lk_exclusivecount++;
- WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
- LOCKMGR_TRYW(extflags), file, line);
- COUNT(td, 1);
+ if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
+ if (!atomic_cmpset_ptr(&lk->lk_lock, x,
+ x | LK_EXCLUSIVE_WAITERS)) {
+ sleepq_release(&lk->lock_object);
+ continue;
+ }
+ LOCK_LOG2(lk, "%s: %p set excl waiters flag",
+ __func__, lk);
+ }
+
+ /*
+ * As far as we have been unable to acquire the
+ * exclusive lock and the exclusive waiters flag
+ * is set, we will sleep.
+ */
+ error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
+ SQ_EXCLUSIVE_QUEUE);
+ flags &= ~LK_INTERLOCK;
+ if (error) {
+ LOCK_LOG3(lk,
+ "%s: interrupted sleep for %p with %d",
+ __func__, lk, error);
break;
}
+ LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
+ __func__, lk);
}
- /*
- * If we are just polling, check to see if we will sleep.
- */
- if ((extflags & LK_NOWAIT) &&
- (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
- error = EBUSY;
- break;
+ if (error == 0) {
+ lock_profile_obtain_lock_success(&lk->lock_object,
+ contested, waittime, file, line);
+ LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
+ lk->lk_recurse, file, line);
+ TD_LOCKS_INC(curthread);
+ STACK_SAVE(lk);
}
+ break;
+ case LK_DOWNGRADE:
+ _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
+
/*
- * Try to acquire the want_exclusive flag.
- */
- error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL),
- wmesg, prio, timo, &contested, &waitstart);
- if (error)
- break;
- lkp->lk_flags |= LK_WANT_EXCL;
- /*
- * Wait for shared locks and upgrades to finish.
+ * In order to preserve waiters flags, just spin.
*/
- error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE |
- LK_SHARE_NONZERO, wmesg, prio, timo,
- &contested, &waitstart);
- lkp->lk_flags &= ~LK_WANT_EXCL;
- if (error) {
- if (lkp->lk_flags & LK_WAIT_NONZERO)
- wakeup((void *)lkp);
- break;
- }
- lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = td;
- if (lkp->lk_exclusivecount != 0)
- panic("lockmgr: non-zero exclusive count");
- lkp->lk_exclusivecount = 1;
- WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
- LOCKMGR_TRYW(extflags), file, line);
- COUNT(td, 1);
- lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
-#if defined(DEBUG_LOCKS)
- stack_save(&lkp->lk_stack);
-#endif
+ for (;;) {
+ x = lk->lk_lock & LK_ALL_WAITERS;
+ if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
+ LK_SHARERS_LOCK(1) | x)) {
+ LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object,
+ 0, 0, file, line);
+ TD_SLOCKS_INC(curthread);
+ break;
+ }
+ cpu_spinwait();
+ }
break;
-
case LK_RELEASE:
- _lockmgr_assert(lkp, KA_LOCKED, file, line);
- if (lkp->lk_exclusivecount != 0) {
- if (lkp->lk_lockholder != LK_KERNPROC) {
- WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE,
- file, line);
- COUNT(td, -1);
+ _lockmgr_assert(lk, KA_LOCKED, file, line);
+ x = lk->lk_lock;
+
+ if ((x & LK_SHARE) == 0) {
+
+ /*
+ * As first option, treact the lock as if it has not
+ * any waiter.
+ * Fix-up the tid var if the lock has been disowned.
+ */
+ if (LK_HOLDER(x) == LK_KERNPROC)
+ tid = LK_KERNPROC;
+ else
+ TD_LOCKS_DEC(curthread);
+ LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
+ lk->lk_recurse, file, line);
+
+ /*
+ * The lock is held in exclusive mode.
+ * If the lock is recursed also, then unrecurse it.
+ */
+ if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
+ LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
+ lk);
+ lk->lk_recurse--;
+ break;
}
- if (lkp->lk_exclusivecount-- == 1) {
- lkp->lk_flags &= ~LK_HAVE_EXCL;
- lkp->lk_lockholder = LK_NOPROC;
- lock_profile_release_lock(&lkp->lk_object);
+ lock_profile_release_lock(&lk->lock_object);
+
+ if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
+ LK_UNLOCKED))
+ break;
+
+ sleepq_lock(&lk->lock_object);
+ x = lk->lk_lock & LK_ALL_WAITERS;
+ v = LK_UNLOCKED;
+
+ /*
+ * If the lock has exclusive waiters, give them
+ * preference in order to avoid deadlock with
+ * shared runners up.
+ */
+ if (x & LK_EXCLUSIVE_WAITERS) {
+ queue = SQ_EXCLUSIVE_QUEUE;
+ v |= (x & LK_SHARED_WAITERS);
+ } else {
+ MPASS(x == LK_SHARED_WAITERS);
+ queue = SQ_SHARED_QUEUE;
}
- } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
- WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
- shareunlock(td, lkp, 1);
- }
- if (lkp->lk_flags & LK_WAIT_NONZERO)
- wakeup((void *)lkp);
+ LOCK_LOG3(lk,
+ "%s: %p waking up threads on the %s queue",
+ __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
+ "exclusive");
+ atomic_store_rel_ptr(&lk->lk_lock, v);
+ sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
+ sleepq_release(&lk->lock_object);
+ break;
+ } else
+ wakeupshlk(lk, file, line);
break;
-
case LK_DRAIN:
+
/*
- * Check that we do not already hold the lock, as it can
- * never drain if we do. Unfortunately, we have no way to
- * check for holding a shared lock, but at least we can
- * check for an exclusive one.
+ * Trying to drain a lock we alredy own will result in a
+ * deadlock.
*/
- if (!LOCKMGR_TRYOP(extflags))
- WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
- LOP_EXCLUSIVE, file, line);
- if (lkp->lk_lockholder == td)
- panic("lockmgr: draining against myself");
-
- error = acquiredrain(lkp, extflags, wmesg, prio, timo);
- if (error)
- break;
- lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
- lkp->lk_lockholder = td;
- lkp->lk_exclusivecount = 1;
- WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
- LOCKMGR_TRYW(extflags), file, line);
- COUNT(td, 1);
-#if defined(DEBUG_LOCKS)
- stack_save(&lkp->lk_stack);
-#endif
- break;
+ if (lockmgr_xlocked(lk)) {
+ if (flags & LK_INTERLOCK)
+ class->lc_unlock(ilk);
+ panic("%s: draining %s with the lock held @ %s:%d\n",
+ __func__, iwmesg, file, line);
+ }
- default:
- mtx_unlock(lkp->lk_interlock);
- panic("lockmgr: unknown locktype request %d",
- flags & LK_TYPE_MASK);
- /* NOTREACHED */
- }
- if ((lkp->lk_flags & LK_WAITDRAIN) &&
- (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
- LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
- lkp->lk_flags &= ~LK_WAITDRAIN;
- wakeup((void *)&lkp->lk_flags);
- }
- mtx_unlock(lkp->lk_interlock);
- return (error);
-}
+ while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
+ lock_profile_obtain_lock_failed(&lk->lock_object,
+ &contested, &waittime);
-static int
-acquiredrain(struct lock *lkp, int extflags, const char *wmesg, int prio,
- int timo)
-{
- const char *iwmesg;
- int error, iprio, itimo;
+ /*
+ * If the lock is expected to not sleep just give up
+ * and return.
+ */
+ if (LK_TRYOP(flags)) {
+ LOCK_LOG2(lk, "%s: %p fails the try operation",
+ __func__, lk);
+ error = EBUSY;
+ break;
+ }
- iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg;
- iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio;
- itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo;
+ /*
+ * Acquire the sleepqueue chain lock because we
+ * probabilly will need to manipulate waiters flags.
+ */
+ sleepq_lock(&lk->lock_object);
+ x = lk->lk_lock;
+ v = x & LK_ALL_WAITERS;
- if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
- return EBUSY;
- }
- while (lkp->lk_flags & LK_ALL) {
- lkp->lk_flags |= LK_WAITDRAIN;
- error = msleep(&lkp->lk_flags, lkp->lk_interlock, iprio, iwmesg,
- ((extflags & LK_TIMELOCK) ? itimo : 0));
- if (error)
- return error;
- if (extflags & LK_SLEEPFAIL) {
- return ENOLCK;
+ /*
+ * if the lock has been released while we spun on
+ * the sleepqueue chain lock just try again.
+ */
+ if (x == LK_UNLOCKED) {
+ sleepq_release(&lk->lock_object);
+ continue;
+ }
+
+ if (x == (LK_UNLOCKED | v)) {
+ v = x;
+ if (v & LK_EXCLUSIVE_WAITERS) {
+ queue = SQ_EXCLUSIVE_QUEUE;
+ v &= ~LK_EXCLUSIVE_WAITERS;
+ } else {
+ MPASS(v & LK_SHARED_WAITERS);
+ queue = SQ_SHARED_QUEUE;
+ v &= ~LK_SHARED_WAITERS;
+ }
+ if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
+ sleepq_release(&lk->lock_object);
+ continue;
+ }
+ LOCK_LOG3(lk,
+ "%s: %p waking up all threads on the %s queue",
+ __func__, lk, queue == SQ_SHARED_QUEUE ?
+ "shared" : "exclusive");
+ sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
+ 0, queue);
+
+ /*
+ * If shared waiters have been woken up we need
+ * to wait for one of them to acquire the lock
+ * before to set the exclusive waiters in
+ * order to avoid a deadlock.
+ */
+ if (queue == SQ_SHARED_QUEUE) {
+ for (v = lk->lk_lock;
+ (v & LK_SHARE) && !LK_SHARERS(v);
+ v = lk->lk_lock)
+ cpu_spinwait();
+ }
+ }
+
+ /*
+ * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
+ * fail, loop back and retry.
+ */
+ if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
+ if (!atomic_cmpset_ptr(&lk->lk_lock, x,
+ x | LK_EXCLUSIVE_WAITERS)) {
+ sleepq_release(&lk->lock_object);
+ continue;
+ }
+ LOCK_LOG2(lk, "%s: %p set drain waiters flag",
+ __func__, lk);
+ }
+
+ /*
+ * As far as we have been unable to acquire the
+ * exclusive lock and the exclusive waiters flag
+ * is set, we will sleep.
+ */
+ if (flags & LK_INTERLOCK) {
+ class->lc_unlock(ilk);
+ flags &= ~LK_INTERLOCK;
+ }
+ DROP_GIANT();
+ sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
+ SQ_EXCLUSIVE_QUEUE);
+ sleepq_wait(&lk->lock_object, ipri & PRIMASK);
+ PICKUP_GIANT();
+ LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
+ __func__, lk);
+ }
+
+ if (error == 0) {
+ lock_profile_obtain_lock_success(&lk->lock_object,
+ contested, waittime, file, line);
+ LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
+ lk->lk_recurse, file, line);
+ TD_LOCKS_INC(curthread);
+ STACK_SAVE(lk);
}
+ break;
+ default:
+ if (flags & LK_INTERLOCK)
+ class->lc_unlock(ilk);
+ panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
}
- return 0;
-}
-/*
- * Initialize a lock; required before use.
- */
-void
-lockinit(lkp, prio, wmesg, timo, flags)
- struct lock *lkp;
- int prio;
- const char *wmesg;
- int timo;
- int flags;
-{
- int iflags;
+ /*
+ * We could have exited from the switch without reacquiring the
+ * interlock, so we need to check for the interlock ownership.
+ */
+ if (flags & LK_INTERLOCK)
+ class->lc_unlock(ilk);
- KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0,
- ("%s: Invalid flags passed with mask 0x%x", __func__,
- flags & LK_EXTFLG_MASK));
- CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
- "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
-
- lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
- lkp->lk_flags = (flags & LK_EXTFLG_MASK) & ~(LK_FUNC_MASK);
- lkp->lk_sharecount = 0;
- lkp->lk_waitcount = 0;
- lkp->lk_exclusivecount = 0;
- lkp->lk_prio = prio;
- lkp->lk_timo = timo;
- lkp->lk_lockholder = LK_NOPROC;
- lkp->lk_newlock = NULL;
- iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
- if (!(flags & LK_NODUP))
- iflags |= LO_DUPOK;
- if (flags & LK_NOPROFILE)
- iflags |= LO_NOPROFILE;
- if (!(flags & LK_NOWITNESS))
- iflags |= LO_WITNESS;
- if (flags & LK_QUIET)
- iflags |= LO_QUIET;
-#ifdef DEBUG_LOCKS
- stack_zero(&lkp->lk_stack);
-#endif
- lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, iflags);
+ return (error);
}
-/*
- * Destroy a lock.
- */
void
-lockdestroy(lkp)
- struct lock *lkp;
+_lockmgr_disown(struct lock *lk, const char *file, int line)
{
+ uintptr_t tid, x;
+
+ tid = (uintptr_t)curthread;
+ _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
+
+ /*
+ * If the owner is alredy LK_KERNPROC just skip the whole operation.
+ */
+ if (LK_HOLDER(lk->lk_lock) != tid)
+ return;
- CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
- lkp, lkp->lk_wmesg);
- KASSERT((lkp->lk_flags & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0,
- ("lockmgr still held"));
- KASSERT(lkp->lk_exclusivecount == 0, ("lockmgr still recursed"));
- lkp->lk_flags = LK_DESTROYED;
- lock_destroy(&lkp->lk_object);
+ /*
+ * In order to preserve waiters flags, just spin.
+ */
+ for (;;) {
+ x = lk->lk_lock & LK_ALL_WAITERS;
+ if (atomic_cmpset_ptr(&lk->lk_lock, tid | x,
+ LK_KERNPROC | x)) {
+ LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file,
+ line);
+ TD_LOCKS_DEC(curthread);
+ return;
+ }
+ cpu_spinwait();
+ }
}
-/*
- * Disown the lockmgr.
- */
void
-_lockmgr_disown(struct lock *lkp, const char *file, int line)
+lockmgr_printinfo(struct lock *lk)
{
struct thread *td;
+ uintptr_t x;
+
+ if (lk->lk_lock == LK_UNLOCKED)
+ printf(" lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
+ else if (lk->lk_lock & LK_SHARE)
+ printf(" lock type %s: SHARED (count %ju)\n",
+ lk->lock_object.lo_name,
+ (uintmax_t)LK_SHARERS(lk->lk_lock));
+ else {
+ td = lockmgr_xholder(lk);
+ printf(" lock type %s: EXCL by thread %p (pid %d)\n",
+ lk->lock_object.lo_name, td, td->td_proc->p_pid);
+ }
- td = curthread;
- KASSERT(panicstr != NULL || (lkp->lk_flags & LK_DESTROYED) == 0,
- ("%s: %p lockmgr is destroyed", __func__, lkp));
- _lockmgr_assert(lkp, KA_XLOCKED | KA_NOTRECURSED, file, line);
+ x = lk->lk_lock;
+ if (x & LK_EXCLUSIVE_WAITERS)
+ printf(" with exclusive waiters pending\n");
+ if (x & LK_SHARED_WAITERS)
+ printf(" with shared waiters pending\n");
- /*
- * Drop the lock reference and switch the owner. This will result
- * in an atomic operation like td_lock is only accessed by curthread
- * and lk_lockholder only needs one write. Note also that the lock
- * owner can be alredy KERNPROC, so in that case just skip the
- * decrement.
- */
- if (lkp->lk_lockholder == td) {
- WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, file, line);
- td->td_locks--;
- }
- lkp->lk_lockholder = LK_KERNPROC;
+ STACK_PRINT(lk);
}
-/*
- * Determine the status of a lock.
- */
int
-lockstatus(lkp)
- struct lock *lkp;
+lockstatus(struct lock *lk)
{
- int lock_type = 0;
- int interlocked;
-
- KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
- ("%s: %p lockmgr is destroyed", __func__, lkp));
-
- if (!kdb_active) {
- interlocked = 1;
- mtx_lock(lkp->lk_interlock);
- } else
- interlocked = 0;
- if (lkp->lk_exclusivecount != 0) {
- if (lkp->lk_lockholder == curthread)
- lock_type = LK_EXCLUSIVE;
- else
- lock_type = LK_EXCLOTHER;
- } else if (lkp->lk_sharecount != 0)
- lock_type = LK_SHARED;
- if (interlocked)
- mtx_unlock(lkp->lk_interlock);
- return (lock_type);
-}
+ uintptr_t v, x;
+ int ret;
-/*
- * Print out information about state of a lock. Used by VOP_PRINT
- * routines to display status about contained locks.
- */
-void
-lockmgr_printinfo(lkp)
- struct lock *lkp;
-{
+ ret = LK_SHARED;
+ x = lk->lk_lock;
+ v = LK_HOLDER(x);
- if (lkp->lk_sharecount)
- printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
- lkp->lk_sharecount);
- else if (lkp->lk_flags & LK_HAVE_EXCL)
- printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
- lkp->lk_wmesg, lkp->lk_exclusivecount,
- lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
- if (lkp->lk_waitcount > 0)
- printf(" with %d pending", lkp->lk_waitcount);
-#ifdef DEBUG_LOCKS
- stack_print_ddb(&lkp->lk_stack);
-#endif
+ if ((x & LK_SHARE) == 0) {
+ if (v == (uintptr_t)curthread || v == LK_KERNPROC)
+ ret = LK_EXCLUSIVE;
+ else
+ ret = LK_EXCLOTHER;
+ } else if (x == LK_UNLOCKED)
+ ret = 0;
+
+ return (ret);
}
#ifdef INVARIANT_SUPPORT
#ifndef INVARIANTS
-#undef _lockmgr_assert
+#undef _lockmgr_assert
#endif
void
-_lockmgr_assert(struct lock *lkp, int what, const char *file, int line)
+_lockmgr_assert(struct lock *lk, int what, const char *file, int line)
{
- struct thread *td;
- u_int x;
int slocked = 0;
- x = lkp->lk_flags;
- td = lkp->lk_lockholder;
if (panicstr != NULL)
return;
switch (what) {
@@ -697,133 +905,107 @@ _lockmgr_assert(struct lock *lkp, int what, const char *file, int line)
case KA_LOCKED:
case KA_LOCKED | KA_NOTRECURSED:
case KA_LOCKED | KA_RECURSED:
-#ifdef WITNESS
- /*
- * We cannot trust WITNESS if the lock is held in
- * exclusive mode and a call to lockmgr_disown() happened.
- * Workaround this skipping the check if the lock is
- * held in exclusive mode even for the KA_LOCKED case.
- */
- if (slocked || (x & LK_HAVE_EXCL) == 0) {
- witness_assert(&lkp->lk_object, what, file, line);
- break;
- }
-#endif
- if (LOCKMGR_UNHELD(x) || ((x & LK_SHARE_NONZERO) == 0 &&
- (slocked || LOCKMGR_NOTOWNER(td))))
+ if (lk->lk_lock == LK_UNLOCKED ||
+ ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
+ (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
panic("Lock %s not %slocked @ %s:%d\n",
- lkp->lk_object.lo_name, slocked ? "share " : "",
+ lk->lock_object.lo_name, slocked ? "share" : "",
file, line);
- if ((x & LK_SHARE_NONZERO) == 0) {
- if (lockmgr_recursed(lkp)) {
+
+ if ((lk->lk_lock & LK_SHARE) == 0) {
+ if (lockmgr_recursed(lk)) {
if (what & KA_NOTRECURSED)
panic("Lock %s recursed @ %s:%d\n",
- lkp->lk_object.lo_name, file, line);
+ lk->lock_object.lo_name, file,
+ line);
} else if (what & KA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n",
- lkp->lk_object.lo_name, file, line);
+ lk->lock_object.lo_name, file, line);
}
break;
case KA_XLOCKED:
case KA_XLOCKED | KA_NOTRECURSED:
case KA_XLOCKED | KA_RECURSED:
- if ((x & LK_HAVE_EXCL) == 0 || LOCKMGR_NOTOWNER(td))
+ if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
panic("Lock %s not exclusively locked @ %s:%d\n",
- lkp->lk_object.lo_name, file, line);
- if (lockmgr_recursed(lkp)) {
+ lk->lock_object.lo_name, file, line);
+ if (lockmgr_recursed(lk)) {
if (what & KA_NOTRECURSED)
panic("Lock %s recursed @ %s:%d\n",
- lkp->lk_object.lo_name, file, line);
+ lk->lock_object.lo_name, file, line);
} else if (what & KA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n",
- lkp->lk_object.lo_name, file, line);
+ lk->lock_object.lo_name, file, line);
break;
case KA_UNLOCKED:
- if (td == curthread || td == LK_KERNPROC)
+ if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
panic("Lock %s exclusively locked @ %s:%d\n",
- lkp->lk_object.lo_name, file, line);
- break;
- case KA_HELD:
- case KA_UNHELD:
- if (LOCKMGR_UNHELD(x)) {
- if (what & KA_HELD)
- panic("Lock %s not locked by anyone @ %s:%d\n",
- lkp->lk_object.lo_name, file, line);
- } else if (what & KA_UNHELD)
- panic("Lock %s locked by someone @ %s:%d\n",
- lkp->lk_object.lo_name, file, line);
+ lk->lock_object.lo_name, file, line);
break;
default:
- panic("Unknown lockmgr lock assertion: 0x%x @ %s:%d", what,
- file, line);
+ panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
+ line);
}
}
-#endif /* INVARIANT_SUPPORT */
+#endif
#ifdef DDB
-/*
- * Check to see if a thread that is blocked on a sleep queue is actually
- * blocked on a 'struct lock'. If so, output some details and return true.
- * If the lock has an exclusive owner, return that in *ownerp.
- */
int
lockmgr_chain(struct thread *td, struct thread **ownerp)
{
- struct lock *lkp;
+ struct lock *lk;
- lkp = td->td_wchan;
+ lk = td->td_wchan;
- /* Simple test to see if wchan points to a lockmgr lock. */
- if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
- lkp->lk_wmesg == td->td_wmesg)
- goto ok;
+ if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
+ return (0);
+ db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
+ if (lk->lk_lock & LK_SHARE)
+ db_printf("SHARED (count %ju)\n",
+ (uintmax_t)LK_SHARERS(lk->lk_lock));
+ else
+ db_printf("EXCL\n");
+ *ownerp = lockmgr_xholder(lk);
- /*
- * If this thread is doing a DRAIN, then it would be asleep on
- * &lkp->lk_flags rather than lkp.
- */
- lkp = (struct lock *)((char *)td->td_wchan -
- offsetof(struct lock, lk_flags));
- if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
- lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN))
- goto ok;
-
- /* Doen't seem to be a lockmgr lock. */
- return (0);
-
-ok:
- /* Ok, we think we have a lockmgr lock, so output some details. */
- db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
- if (lkp->lk_sharecount) {
- db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
- *ownerp = NULL;
- } else {
- db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
- *ownerp = lkp->lk_lockholder;
- }
return (1);
}
-void
+static void
db_show_lockmgr(struct lock_object *lock)
{
struct thread *td;
- struct lock *lkp;
+ struct lock *lk;
- lkp = (struct lock *)lock;
+ lk = (struct lock *)lock;
- db_printf(" lock type: %s\n", lkp->lk_wmesg);
db_printf(" state: ");
- if (lkp->lk_sharecount)
- db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
- else if (lkp->lk_flags & LK_HAVE_EXCL) {
- td = lkp->lk_lockholder;
- db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
- db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
- td->td_proc->p_pid, td->td_name);
- } else
+ if (lk->lk_lock == LK_UNLOCKED)
db_printf("UNLOCKED\n");
- if (lkp->lk_waitcount > 0)
- db_printf(" waiters: %d\n", lkp->lk_waitcount);
+ else if (lk->lk_lock & LK_SHARE)
+ db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
+ else {
+ td = lockmgr_xholder(lk);
+ if (td == (struct thread *)LK_KERNPROC)
+ db_printf("XLOCK: LK_KERNPROC\n");
+ else
+ db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
+ td->td_tid, td->td_proc->p_pid,
+ td->td_proc->p_comm);
+ if (lockmgr_recursed(lk))
+ db_printf(" recursed: %d\n", lk->lk_recurse);
+ }
+ db_printf(" waiters: ");
+ switch (lk->lk_lock & LK_ALL_WAITERS) {
+ case LK_SHARED_WAITERS:
+ db_printf("shared\n");
+ case LK_EXCLUSIVE_WAITERS:
+ db_printf("exclusive\n");
+ break;
+ case LK_ALL_WAITERS:
+ db_printf("shared and exclusive\n");
+ break;
+ default:
+ db_printf("none\n");
+ }
}
#endif
diff --git a/sys/sys/_lockmgr.h b/sys/sys/_lockmgr.h
new file mode 100644
index 0000000..11ddac6
--- /dev/null
+++ b/sys/sys/_lockmgr.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__LOCKMGR_H_
+#define _SYS__LOCKMGR_H_
+
+#ifdef DEBUG_LOCKS
+#include <sys/_stack.h>
+#endif
+
+struct lock {
+ struct lock_object lock_object;
+ volatile uintptr_t lk_lock;
+ volatile unsigned lk_recurse;
+ int lk_timo;
+ int lk_pri;
+#ifdef DEBUG_LOCKS
+ struct stack lk_stack;
+#endif
+};
+
+#endif
diff --git a/sys/sys/_stack.h b/sys/sys/_stack.h
new file mode 100644
index 0000000..96273d3
--- /dev/null
+++ b/sys/sys/_stack.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2005 Antoine Brodin
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__STACK_H_
+#define _SYS__STACK_H_
+
+#define STACK_MAX 18 /* Don't change, stack_ktr relies on this. */
+
+struct stack {
+ int depth;
+ vm_offset_t pcs[STACK_MAX];
+};
+
+#endif
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index 9e2fe59..5251f31 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -1,240 +1,189 @@
/*-
- * Copyright (c) 1995
- * The Regents of the University of California. All rights reserved.
- *
- * This code contains ideas from software contributed to Berkeley by
- * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
- * System project at Carnegie-Mellon University.
+ * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
+ * notice(s), this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
*
- * @(#)lock.h 8.12 (Berkeley) 5/19/95
* $FreeBSD$
*/
#ifndef _SYS_LOCKMGR_H_
#define _SYS_LOCKMGR_H_
-#ifdef DEBUG_LOCKS
-#include <sys/stack.h> /* XXX */
-#endif
-#include <sys/queue.h>
#include <sys/_lock.h>
+#include <sys/_lockmgr.h>
+#include <sys/_mutex.h>
+#include <sys/_rwlock.h>
+
+#define LK_SHARE 0x01
+#define LK_SHARED_WAITERS 0x02
+#define LK_EXCLUSIVE_WAITERS 0x04
+#define LK_ALL_WAITERS \
+ (LK_SHARED_WAITERS | LK_EXCLUSIVE_WAITERS)
+#define LK_FLAGMASK \
+ (LK_SHARE | LK_ALL_WAITERS)
+
+#define LK_HOLDER(x) ((x) & ~LK_FLAGMASK)
+#define LK_SHARERS_SHIFT 3
+#define LK_SHARERS(x) (LK_HOLDER(x) >> LK_SHARERS_SHIFT)
+#define LK_SHARERS_LOCK(x) ((x) << LK_SHARERS_SHIFT | LK_SHARE)
+#define LK_ONE_SHARER (1 << LK_SHARERS_SHIFT)
+#define LK_UNLOCKED LK_SHARERS_LOCK(0)
+#define LK_KERNPROC ((uintptr_t)(-1) & ~LK_FLAGMASK)
+
+#ifdef _KERNEL
-struct mtx;
+#if !defined(LOCK_FILE) || !defined(LOCK_LINE)
+#error "LOCK_FILE and LOCK_LINE not defined, include <sys/lock.h> before"
+#endif
+
+struct thread;
/*
- * The general lock structure. Provides for multiple shared locks,
- * upgrading from shared to exclusive, and sleeping until the lock
- * can be gained.
+ * Function prototipes. Routines that start with an underscore are not part
+ * of the public interface and might be wrappered with a macro.
*/
-struct lock {
- struct lock_object lk_object; /* common lock properties */
- struct mtx *lk_interlock; /* lock on remaining fields */
- u_int lk_flags; /* see below */
- int lk_sharecount; /* # of accepted shared locks */
- int lk_waitcount; /* # of processes sleeping for lock */
- short lk_exclusivecount; /* # of recursive exclusive locks */
- short lk_prio; /* priority at which to sleep */
- int lk_timo; /* maximum sleep time (for tsleep) */
- struct thread *lk_lockholder; /* thread of exclusive lock holder */
- struct lock *lk_newlock; /* lock taking over this lock */
-
-#ifdef DEBUG_LOCKS
- struct stack lk_stack;
+int __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
+ const char *wmesg, int prio, int timo, const char *file, int line);
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+void _lockmgr_assert(struct lock *lk, int what, const char *file, int line);
#endif
-};
+void _lockmgr_disown(struct lock *lk, const char *file, int line);
-#define lk_wmesg lk_object.lo_name
-
-#ifdef _KERNEL
+void lockdestroy(struct lock *lk);
+void lockinit(struct lock *lk, int prio, const char *wmesg, int timo,
+ int flags);
+#ifdef DDB
+int lockmgr_chain(struct thread *td, struct thread **ownerp);
+#endif
+void lockmgr_printinfo(struct lock *lk);
+int lockstatus(struct lock *lk);
/*
- * Lock request types:
- * LK_SHARED - get one of many possible shared locks. If a process
- * holding an exclusive lock requests a shared lock, the exclusive
- * lock(s) will be downgraded to shared locks.
- * LK_EXCLUSIVE - stop further shared locks, when they are cleared,
- * grant a pending upgrade if it exists, then grant an exclusive
- * lock. Only one exclusive lock may exist at a time, except that
- * a process holding an exclusive lock may get additional exclusive
- * locks if it explicitly sets the LK_CANRECURSE flag in the lock
- * request, or if the LK_CANRECUSE flag was set when the lock was
- * initialized.
- * LK_UPGRADE - the process must hold a shared lock that it wants to
- * have upgraded to an exclusive lock. Other processes may get
- * exclusive access to the resource between the time that the upgrade
- * is requested and the time that it is granted.
- * LK_DOWNGRADE - the process must hold an exclusive lock that it wants
- * to have downgraded to a shared lock. If the process holds multiple
- * (recursive) exclusive locks, they will all be downgraded to shared
- * locks.
- * LK_RELEASE - release one instance of a lock.
- * LK_DRAIN - wait for all activity on the lock to end, then mark it
- * decommissioned. This feature is used before freeing a lock that
- * is part of a piece of memory that is about to be freed.
- * LK_EXCLOTHER - return for lockstatus(). Used when another process
- * holds the lock exclusively.
- *
- * These are flags that are passed to the lockmgr routine.
+ * As far as the ilk can be a static NULL pointer these functions need a
+ * strict prototype in order to safely use the lock_object member.
*/
-#define LK_TYPE_MASK 0x0000000f /* type of lock sought */
-#define LK_SHARED 0x00000001 /* shared lock */
-#define LK_EXCLUSIVE 0x00000002 /* exclusive lock */
-#define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */
-#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */
-#define LK_RELEASE 0x00000006 /* release any type of lock */
-#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
-#define LK_EXCLOTHER 0x00000008 /* other process holds lock */
+static __inline int
+_lockmgr_args(struct lock *lk, u_int flags, struct mtx *ilk, const char *wmesg,
+ int prio, int timo, const char *file, int line)
+{
+
+ return (__lockmgr_args(lk, flags, (ilk != NULL) ? &ilk->lock_object :
+ NULL, wmesg, prio, timo, file, line));
+}
+
+static __inline int
+_lockmgr_args_rw(struct lock *lk, u_int flags, struct rwlock *ilk,
+ const char *wmesg, int prio, int timo, const char *file, int line)
+{
+
+ return (__lockmgr_args(lk, flags, (ilk != NULL) ? &ilk->lock_object :
+ NULL, wmesg, prio, timo, file, line));
+}
+
/*
- * External lock flags.
- *
- * These may be set in lock_init to set their mode permanently,
- * or passed in as arguments to the lock manager.
+ * Define aliases in order to complete lockmgr KPI.
*/
-#define LK_EXTFLG_MASK 0x0000fff0 /* mask of external flags */
-#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
-#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */
-#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */
-#define LK_NOSHARE 0x00000080 /* Only allow exclusive locks */
-#define LK_TIMELOCK 0x00000100 /* use lk_timo, else no timeout */
-#define LK_NOWITNESS 0x00000200 /* disable WITNESS */
-#define LK_NODUP 0x00000400 /* enable duplication logging */
-#define LK_NOPROFILE 0x00000800 /* disable lock profiling */
-#define LK_QUIET 0x00001000 /* disable lock operations tracking */
-#define LK_FUNC_MASK (LK_NODUP | LK_NOPROFILE | LK_NOWITNESS | LK_QUIET)
+#define lockmgr(lk, flags, ilk) \
+ _lockmgr_args((lk), (flags), (ilk), LK_WMESG_DEFAULT, \
+ LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)
+#define lockmgr_args(lk, flags, ilk, wmesg, prio, timo) \
+ _lockmgr_args((lk), (flags), (ilk), (wmesg), (prio), (timo), \
+ LOCK_FILE, LOCK_LINE)
+#define lockmgr_args_rw(lk, flags, ilk, wmesg, prio, timo) \
+ _lockmgr_args_rw((lk), (flags), (ilk), (wmesg), (prio), (timo), \
+ LOCK_FILE, LOCK_LINE)
+#define lockmgr_disown(lk) \
+ _lockmgr_disown((lk), LOCK_FILE, LOCK_LINE)
+#define lockmgr_recursed(lk) \
+ ((lk)->lk_recurse != 0)
+#define lockmgr_rw(lk, flags, ilk) \
+ _lockmgr_args_rw((lk), (flags), (ilk), LK_WMESG_DEFAULT, \
+ LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)
+#define lockmgr_waiters(lk) \
+ ((lk)->lk_lock & LK_ALL_WAITERS)
+#ifdef INVARIANTS
+#define lockmgr_assert(lk, what) \
+ _lockmgr_assert((lk), (what), LOCK_FILE, LOCK_LINE)
+#else
+#define lockmgr_assert(lk, what)
+#endif
+
/*
- * Nonpersistent external flags.
+ * Flags for lockinit().
*/
-#define LK_RETRY 0x00010000 /* vn_lock: retry until locked */
-#define LK_INTERLOCK 0x00020000 /*
- * unlock passed mutex after getting
- * lk_interlock
- */
+#define LK_INIT_MASK 0x000FF
+#define LK_CANRECURSE 0x00001
+#define LK_NODUP 0x00002
+#define LK_NOPROFILE 0x00004
+#define LK_NOSHARE 0x00008
+#define LK_NOWITNESS 0x00010
+#define LK_QUIET 0x00020
/*
- * Default values for lockmgr_args().
+ * Additional attributes to be used in lockmgr().
*/
-#define LK_WMESG_DEFAULT (NULL)
-#define LK_PRIO_DEFAULT (-1)
-#define LK_TIMO_DEFAULT (0)
+#define LK_EATTR_MASK 0x0FF00
+#define LK_INTERLOCK 0x00100
+#define LK_NOWAIT 0x00200
+#define LK_RETRY 0x00400
+#define LK_SLEEPFAIL 0x00800
+#define LK_TIMELOCK 0x01000
/*
- * Internal lock flags.
- *
- * These flags are used internally to the lock manager.
+ * Operations for lockmgr().
*/
-#define LK_WANT_UPGRADE 0x00100000 /* waiting for share-to-excl upgrade */
-#define LK_WANT_EXCL 0x00200000 /* exclusive lock sought */
-#define LK_HAVE_EXCL 0x00400000 /* exclusive lock obtained */
-#define LK_WAITDRAIN 0x00800000 /* process waiting for lock to drain */
-#define LK_DRAINING 0x01000000 /* lock is being drained */
-#define LK_DESTROYED 0x02000000 /* lock is destroyed */
+#define LK_TYPE_MASK 0xF0000
+#define LK_DOWNGRADE 0x10000
+#define LK_DRAIN 0x20000
+#define LK_EXCLOTHER 0x30000
+#define LK_EXCLUSIVE 0x40000
+#define LK_RELEASE 0x50000
+#define LK_SHARED 0x60000
+#define LK_UPGRADE 0x70000
+
+#define LK_TOTAL_MASK (LK_INIT_MASK | LK_EATTR_MASK | LK_TYPE_MASK)
+
/*
- * Internal state flags corresponding to lk_sharecount, and lk_waitcount
+ * Default values for lockmgr_args().
*/
-#define LK_SHARE_NONZERO 0x10000000
-#define LK_WAIT_NONZERO 0x20000000
-
-#ifndef LOCK_FILE
-#error "LOCK_FILE not defined, include <sys/lock.h> before <sys/lockmgr.h>"
-#endif
+#define LK_WMESG_DEFAULT (NULL)
+#define LK_PRIO_DEFAULT (0)
+#define LK_TIMO_DEFAULT (0)
/*
* Assertion flags.
*/
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
-#define KA_BASE (LA_MASKASSERT + 1)
#define KA_LOCKED LA_LOCKED
#define KA_SLOCKED LA_SLOCKED
#define KA_XLOCKED LA_XLOCKED
#define KA_UNLOCKED LA_UNLOCKED
#define KA_RECURSED LA_RECURSED
#define KA_NOTRECURSED LA_NOTRECURSED
-#define KA_HELD (KA_BASE << 0x00)
-#define KA_UNHELD (KA_BASE << 0x01)
-#endif
-
-/*
- * Lock return status.
- *
- * Successfully obtained locks return 0. Locks will always succeed
- * unless one of the following is true:
- * LK_FORCEUPGRADE is requested and some other process has already
- * requested a lock upgrade (returns EBUSY).
- * LK_WAIT is set and a sleep would be required (returns EBUSY).
- * LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK).
- * PCATCH is set in lock priority and a signal arrives (returns
- * either EINTR or ERESTART if system calls is to be restarted).
- * Non-null lock timeout and timeout expires (returns EWOULDBLOCK).
- * A failed lock attempt always returns a non-zero error value. No lock
- * is held after an error return (in particular, a failed LK_UPGRADE
- * or LK_FORCEUPGRADE will have released its shared access lock).
- */
-
-/*
- * Indicator that no process holds exclusive lock
- */
-#define LK_KERNPROC ((struct thread *)-2)
-#define LK_NOPROC ((struct thread *) -1)
-
-struct thread;
-
-void lockinit(struct lock *, int prio, const char *wmesg,
- int timo, int flags);
-void lockdestroy(struct lock *);
-
-int _lockmgr_args(struct lock *, u_int flags, struct mtx *,
- const char *wmesg, int prio, int timo, char *file, int line);
-#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
-void _lockmgr_assert(struct lock *, int what, const char *, int);
-#endif
-void _lockmgr_disown(struct lock *, const char *, int);
-void lockmgr_printinfo(struct lock *);
-int lockstatus(struct lock *);
-
-#define lockmgr(lock, flags, mtx) \
- _lockmgr_args((lock), (flags), (mtx), LK_WMESG_DEFAULT, \
- LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)
-#define lockmgr_disown(lock) \
- _lockmgr_disown((lock), LOCK_FILE, LOCK_LINE)
-#define lockmgr_args(lock, flags, mtx, wmesg, prio, timo) \
- _lockmgr_args((lock), (flags), (mtx), (wmesg), (prio), (timo), \
- LOCK_FILE, LOCK_LINE)
-#define lockmgr_recursed(lkp) \
- ((lkp)->lk_exclusivecount > 1)
-#define lockmgr_waiters(lkp) \
- ((lkp)->lk_waitcount != 0)
-#ifdef INVARIANTS
-#define lockmgr_assert(lkp, what) \
- _lockmgr_assert((lkp), (what), LOCK_FILE, LOCK_LINE)
-#else
-#define lockmgr_assert(lkp, what)
-#endif
-#ifdef DDB
-int lockmgr_chain(struct thread *td, struct thread **ownerp);
+#define KA_HELD
+#define KA_UNHELD
#endif
#endif /* _KERNEL */
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index a68fbef..a3e055f 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -200,7 +200,8 @@ struct thread {
volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
u_char td_tsqueue; /* (t) Turnstile queue blocked on. */
short td_locks; /* (k) Count of non-spin locks. */
- short td_rw_rlocks; /* (k) count of rwlock read locks. */
+ short td_rw_rlocks; /* (k) Count of rwlock read locks. */
+ short td_lk_slocks; /* (k) Count of lockmgr shared locks. */
struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */
const char *td_lockname; /* (t) Name of lock blocked on. */
LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */
diff --git a/sys/sys/sleepqueue.h b/sys/sys/sleepqueue.h
index 170d858..57b2c43 100644
--- a/sys/sys/sleepqueue.h
+++ b/sys/sys/sleepqueue.h
@@ -87,6 +87,7 @@ struct thread;
#define SLEEPQ_CONDVAR 0x01 /* Used for a cv. */
#define SLEEPQ_PAUSE 0x02 /* Used by pause. */
#define SLEEPQ_SX 0x03 /* Used by an sx lock. */
+#define SLEEPQ_LK 0x04 /* Used by a lockmgr. */
#define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */
void init_sleepqueues(void);
diff --git a/sys/sys/stack.h b/sys/sys/stack.h
index 6bd52ab..0ebdd22 100644
--- a/sys/sys/stack.h
+++ b/sys/sys/stack.h
@@ -29,15 +29,10 @@
#ifndef _SYS_STACK_H_
#define _SYS_STACK_H_
-#define STACK_MAX 18 /* Don't change, stack_ktr relies on this. */
+#include <sys/_stack.h>
struct sbuf;
-struct stack {
- int depth;
- vm_offset_t pcs[STACK_MAX];
-};
-
/* MI Routines. */
struct stack *stack_create(void);
void stack_destroy(struct stack *);
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index b82ab13..6705b3c 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -395,8 +395,10 @@ extern void (*lease_updatetime)(int deltat);
#define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
#define VI_MTX(vp) (&(vp)->v_interlock)
-#define VN_LOCK_AREC(vp) ((vp)->v_vnlock->lk_flags |= LK_CANRECURSE)
-#define VN_LOCK_ASHARE(vp) ((vp)->v_vnlock->lk_flags &= ~LK_NOSHARE)
+#define VN_LOCK_AREC(vp) \
+ ((vp)->v_vnlock->lock_object.lo_flags |= LK_CANRECURSE)
+#define VN_LOCK_ASHARE(vp) \
+ ((vp)->v_vnlock->lock_object.lo_flags &= ~LK_NOSHARE)
#endif /* _KERNEL */
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index 255723c..e4c0def 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -553,8 +553,8 @@ MTX_SYSINIT(softdep_lock, &lk, "Softdep Lock", MTX_DEF);
#define ACQUIRE_LOCK(lk) mtx_lock(lk)
#define FREE_LOCK(lk) mtx_unlock(lk)
-#define BUF_AREC(bp) ((bp)->b_lock.lk_flags |= LK_CANRECURSE)
-#define BUF_NOREC(bp) ((bp)->b_lock.lk_flags &= ~LK_CANRECURSE)
+#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LK_CANRECURSE)
+#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LK_CANRECURSE)
/*
* Worklist queue management.
OpenPOWER on IntegriCloud