summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2005-01-05 21:13:27 +0000
committerjhb <jhb@FreeBSD.org>2005-01-05 21:13:27 +0000
commit65d4d80800169bac83dae081a87e3d9e14719043 (patch)
tree08a77e52d9c917cc5536ec8885e05c9cb42f5cd1 /sys/kern
parent90af8e5e9ad80d28a3fa66629927c8b4848f0bd5 (diff)
downloadFreeBSD-src-65d4d80800169bac83dae081a87e3d9e14719043.zip
FreeBSD-src-65d4d80800169bac83dae081a87e3d9e14719043.tar.gz
Rework the optimization for spinlocks on UP to be slightly less drastic and
turn it back on. Specifically, the actual changes are now less intrusive in that the _get_spin_lock() and _rel_spin_lock() macros now have their contents changed for UP vs SMP kernels which centralizes the changes. Also, UP kernels do not use _mtx_lock_spin() and no longer include it. The UP versions of the spin lock functions do not use any atomic operations, but simple compares and stores which allow mtx_owned() to still work for spin locks while removing the overhead of atomic operations. Tested on: i386, alpha
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_mutex.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index af14067..ed8f7ec 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -374,11 +374,7 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
m->mtx_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
file, line);
-#if defined(SMP) || LOCK_DEBUG > 0 || 1
_get_spin_lock(m, curthread, opts, file, line);
-#else
- critical_enter();
-#endif
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
@@ -396,11 +392,7 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
mtx_assert(m, MA_OWNED);
-#if defined(SMP) || LOCK_DEBUG > 0 || 1
_rel_spin_lock(m);
-#else
- critical_exit();
-#endif
}
/*
@@ -573,6 +565,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
return;
}
+#ifdef SMP
/*
* _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
*
@@ -620,6 +613,7 @@ _mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file,
return;
}
+#endif /* SMP */
/*
* _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
OpenPOWER on IntegriCloud