summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_mutex.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2012-10-31 13:38:56 +0000
committerattilio <attilio@FreeBSD.org>2012-10-31 13:38:56 +0000
commitb9e5ac8e1ce9f8dbec2f8ab42e0c36e0eb7c0244 (patch)
treead03c11ad8da0f2c0561fe4449cfa1b4faea70e8 /sys/kern/kern_mutex.c
parent245c75ddd9d9843e079e3280fcd1c008b66baf3e (diff)
downloadFreeBSD-src-b9e5ac8e1ce9f8dbec2f8ab42e0c36e0eb7c0244.zip
FreeBSD-src-b9e5ac8e1ce9f8dbec2f8ab42e0c36e0eb7c0244.tar.gz
Give mtx(9) the ability to crunch different type of structures, with the
only constraint that they have a lock cookie named mtx_lock. This name, then, becames reserved from the struct that wants to use the mtx(9) KPI and other locking primitives cannot reuse it for their members. Namely such structs are the current struct mtx and the new struct mtx_padalign. The new structure will define an object which is the same as the same layout of a struct mtx but will be allocated in areas aligned to the cache line size and will be as big as a cache line. This is supposed to give higher performance for highly contented mutexes both spin or sleep (because of the adaptive spinning), where the cache line contention results in too much traffic on the system bus. The struct mtx_padalign can be used in a completely transparent way with the mtx(9) KPI. At the moment, a possibility to MFC the patch should be carefully evaluated because this patch breaks the low level KPI (not its representation though). Discussed with: jhb Reviewed by: jeff, andre Reviewed by: mdf (earlier version) Tested by: jimharris
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r--sys/kern/kern_mutex.c81
1 files changed, 64 insertions, 17 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 2ab7a09..2f13863 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -83,6 +83,12 @@ PMC_SOFT_DEFINE( , , lock, failed);
#endif
/*
+ * Return the mutex address when the lock cookie address is provided.
+ * This functionality assumes that struct mtx* have a member named mtx_lock.
+ */
+#define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
+
+/*
* Internal utility macros.
*/
#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
@@ -195,11 +201,15 @@ owner_mtx(const struct lock_object *lock, struct thread **owner)
* modules and can also be called from assembly language if needed.
*/
void
-_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
+__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
{
+ struct mtx *m;
if (SCHEDULER_STOPPED())
return;
+
+ m = mtxlock2mtx(c);
+
KASSERT(!TD_IS_IDLETHREAD(curthread),
("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
curthread, m->lock_object.lo_name, file, line));
@@ -219,11 +229,15 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
}
void
-_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
+__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
{
+ struct mtx *m;
if (SCHEDULER_STOPPED())
return;
+
+ m = mtxlock2mtx(c);
+
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
@@ -241,11 +255,16 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
}
void
-_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
+ int line)
{
+ struct mtx *m;
if (SCHEDULER_STOPPED())
return;
+
+ m = mtxlock2mtx(c);
+
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
@@ -264,11 +283,16 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
}
void
-_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
+__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
+ int line)
{
+ struct mtx *m;
if (SCHEDULER_STOPPED())
return;
+
+ m = mtxlock2mtx(c);
+
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
@@ -288,8 +312,9 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
* is already owned, it will recursively acquire the lock.
*/
int
-mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line)
+_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
{
+ struct mtx *m;
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
@@ -299,6 +324,8 @@ mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return (1);
+ m = mtxlock2mtx(c);
+
KASSERT(!TD_IS_IDLETHREAD(curthread),
("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
curthread, m->lock_object.lo_name, file, line));
@@ -330,15 +357,16 @@ mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line)
}
/*
- * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
+ * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
*
* We call this if the lock is either contested (i.e. we need to go to
* sleep waiting for it), or if we need to recurse on it.
*/
void
-_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
- int line)
+__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
+ const char *file, int line)
{
+ struct mtx *m;
struct turnstile *ts;
uintptr_t v;
#ifdef ADAPTIVE_MUTEXES
@@ -360,6 +388,8 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
if (SCHEDULER_STOPPED())
return;
+ m = mtxlock2mtx(c);
+
if (mtx_owned(m)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
@@ -518,15 +548,16 @@ _mtx_lock_spin_failed(struct mtx *m)
#ifdef SMP
/*
- * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
+ * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
*
* This is only called if we need to actually spin for the lock. Recursion
* is handled inline.
*/
void
-_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
- int line)
+_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
+ const char *file, int line)
{
+ struct mtx *m;
int i = 0;
#ifdef LOCK_PROFILING
int contested = 0;
@@ -536,6 +567,8 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
if (SCHEDULER_STOPPED())
return;
+ m = mtxlock2mtx(c);
+
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
@@ -684,19 +717,22 @@ thread_lock_set(struct thread *td, struct mtx *new)
}
/*
- * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
+ * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
*
* We are only called here if the lock is recursed or contested (i.e. we
* need to wake up a blocked thread).
*/
void
-_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
+__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
{
+ struct mtx *m;
struct turnstile *ts;
if (SCHEDULER_STOPPED())
return;
+ m = mtxlock2mtx(c);
+
if (mtx_recursed(m)) {
if (--(m->mtx_recurse) == 0)
atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
@@ -735,11 +771,15 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
*/
#ifdef INVARIANT_SUPPORT
void
-_mtx_assert(const struct mtx *m, int what, const char *file, int line)
+__mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
{
+ const struct mtx *m;
if (panicstr != NULL || dumping)
return;
+
+ m = mtxlock2mtx(c);
+
switch (what) {
case MA_OWNED:
case MA_OWNED | MA_RECURSED:
@@ -806,7 +846,8 @@ mtx_sysinit(void *arg)
{
struct mtx_args *margs = arg;
- mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
+ mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
+ margs->ma_opts);
}
/*
@@ -816,11 +857,14 @@ mtx_sysinit(void *arg)
* witness.
*/
void
-mtx_init(struct mtx *m, const char *name, const char *type, int opts)
+_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
{
+ struct mtx *m;
struct lock_class *class;
int flags;
+ m = mtxlock2mtx(c);
+
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
@@ -863,8 +907,11 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
* flags.
*/
void
-mtx_destroy(struct mtx *m)
+_mtx_destroy(volatile uintptr_t *c)
{
+ struct mtx *m;
+
+ m = mtxlock2mtx(c);
if (!mtx_owned(m))
MPASS(mtx_unowned(m));
OpenPOWER on IntegriCloud