summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2005-01-05 21:13:27 +0000
committerjhb <jhb@FreeBSD.org>2005-01-05 21:13:27 +0000
commit65d4d80800169bac83dae081a87e3d9e14719043 (patch)
tree08a77e52d9c917cc5536ec8885e05c9cb42f5cd1 /sys
parent90af8e5e9ad80d28a3fa66629927c8b4848f0bd5 (diff)
downloadFreeBSD-src-65d4d80800169bac83dae081a87e3d9e14719043.zip
FreeBSD-src-65d4d80800169bac83dae081a87e3d9e14719043.tar.gz
Rework the optimization for spinlocks on UP to be slightly less drastic and
turn it back on. Specifically, the actual changes are now less intrusive in that the _get_spin_lock() and _rel_spin_lock() macros now have their contents changed for UP vs SMP kernels which centralizes the changes. Also, UP kernels do not use _mtx_lock_spin() and no longer include it. The UP versions of the spin lock functions do not use any atomic operations, but simple compares and stores which allow mtx_owned() to still work for spin locks while removing the overhead of atomic operations. Tested on: i386, alpha
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_mutex.c10
-rw-r--r--sys/sys/mutex.h31
2 files changed, 28 insertions, 13 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index af14067..ed8f7ec 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -374,11 +374,7 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
m->mtx_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
file, line);
-#if defined(SMP) || LOCK_DEBUG > 0 || 1
_get_spin_lock(m, curthread, opts, file, line);
-#else
- critical_enter();
-#endif
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
@@ -396,11 +392,7 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
mtx_assert(m, MA_OWNED);
-#if defined(SMP) || LOCK_DEBUG > 0 || 1
_rel_spin_lock(m);
-#else
- critical_exit();
-#endif
}
/*
@@ -573,6 +565,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
return;
}
+#ifdef SMP
/*
* _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
*
@@ -620,6 +613,7 @@ _mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file,
return;
}
+#endif /* SMP */
/*
* _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index 2275685..50c9617 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -103,8 +103,10 @@ void mutex_init(void);
void _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts,
const char *file, int line);
void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
+#ifdef SMP
void _mtx_lock_spin(struct mtx *m, struct thread *td, int opts,
const char *file, int line);
+#endif
void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
int _mtx_trylock(struct mtx *m, int opts, const char *file, int line);
void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line);
@@ -161,6 +163,7 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
* a deal.
*/
#ifndef _get_spin_lock
+#ifdef SMP
#define _get_spin_lock(mp, tid, opts, file, line) do { \
struct thread *_tid = (tid); \
\
@@ -172,6 +175,19 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
} \
} while (0)
+#else /* SMP */
+#define _get_spin_lock(mp, tid, opts, file, line) do { \
+ struct thread *_tid = (tid); \
+ \
+ critical_enter(); \
+ if ((mp)->mtx_lock == (uintptr_t)_tid) \
+ (mp)->mtx_recurse++; \
+ else { \
+ KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \
+ (mp)->mtx_lock = (uintptr_t)_tid; \
+ } \
+} while (0)
+#endif /* SMP */
#endif
/*
@@ -196,6 +212,7 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
* releasing a spin lock. This includes the recursion cases.
*/
#ifndef _rel_spin_lock
+#ifdef SMP
#define _rel_spin_lock(mp) do { \
if (mtx_recursed((mp))) \
(mp)->mtx_recurse--; \
@@ -203,6 +220,15 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
_release_lock_quick((mp)); \
critical_exit(); \
} while (0)
+#else /* SMP */
+#define _rel_spin_lock(mp) do { \
+ if (mtx_recursed((mp))) \
+ (mp)->mtx_recurse--; \
+ else \
+ (mp)->mtx_lock = MTX_UNOWNED; \
+ critical_exit(); \
+} while (0)
+#endif /* SMP */
#endif
/*
@@ -283,15 +309,10 @@ extern struct mtx_pool *mtxpool_sleep;
_get_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
#define mtx_unlock_flags(m, opts) \
_rel_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
-#ifndef SMPnotyet
#define mtx_lock_spin_flags(m, opts) \
_get_spin_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
#define mtx_unlock_spin_flags(m, opts) \
_rel_spin_lock((m))
-#else /* SMP */
-#define mtx_lock_spin_flags(m, opts) critical_enter()
-#define mtx_unlock_spin_flags(m, opts) critical_exit()
-#endif /* SMP */
#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */
#define mtx_trylock_flags(m, opts) \
OpenPOWER on IntegriCloud