summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/thread/thr_mutex.c
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-07-18 02:46:30 +0000
committerdeischen <deischen@FreeBSD.org>2003-07-18 02:46:30 +0000
commit875c5215cc8eb12453d2815a29a56842237d4327 (patch)
treeb47c0a599d105c7a35e226f6b60c3c2afc7eb204 /lib/libpthread/thread/thr_mutex.c
parent1d77fe8eb48f2bc045975da4f4e8914083417a6c (diff)
downloadFreeBSD-src-875c5215cc8eb12453d2815a29a56842237d4327.zip
FreeBSD-src-875c5215cc8eb12453d2815a29a56842237d4327.tar.gz
Add a preemption point when a mutex or condition variable is
handed-off/signaled to a higher priority thread. Note that when there are idle KSEs that could run the higher priority thread, we still add the preemption point because it seems to take the kernel a while to schedule an idle KSE. The drawbacks are that threads will be swapped more often between CPUs (KSEs) and that there will be an extra userland context switch (the idle KSE is still woken and will probably resume the preempted thread). We'll revisit this if and when idle CPU/KSE wakeup times improve. Inspired by: Petri Helenius <pete@he.iki.fi> Reviewed by: davidxu
Diffstat (limited to 'lib/libpthread/thread/thr_mutex.c')
-rw-r--r--lib/libpthread/thread/thr_mutex.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c
index 8d0d76c..c2c28e0 100644
--- a/lib/libpthread/thread/thr_mutex.c
+++ b/lib/libpthread/thread/thr_mutex.c
@@ -859,8 +859,8 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
static int
mutex_unlock_common(pthread_mutex_t *m, int add_reference)
{
- struct pthread *curthread = _get_curthread();
- int ret = 0;
+ struct pthread *curthread = _get_curthread();
+ int ret = 0;
if (m == NULL || *m == NULL)
ret = EINVAL;
@@ -997,7 +997,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
*/
THR_SCHED_LOCK(curthread, curthread);
curthread->inherited_priority =
- (*m)->m_saved_prio;
+ (*m)->m_saved_prio;
curthread->active_priority =
MAX(curthread->inherited_priority,
curthread->base_priority);
@@ -1006,7 +1006,6 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* This thread now owns one less priority mutex.
*/
curthread->priority_mutex_count--;
- THR_SCHED_UNLOCK(curthread, curthread);
/* Remove the mutex from the threads queue. */
MUTEX_ASSERT_IS_OWNED(*m);
@@ -1464,7 +1463,7 @@ _mutex_lock_backout(struct pthread *curthread)
static void
mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
{
- struct pthread *pthread;
+ struct pthread *pthread;
/* Keep dequeueing until we find a valid thread: */
mutex->m_owner = NULL;
@@ -1566,8 +1565,13 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
/* Make the thread runnable and unlock the scheduling queue: */
_thr_setrunnable_unlocked(pthread);
- THR_SCHED_UNLOCK(curthread, pthread);
+ /* Add a preemption point. */
+ if ((curthread->kseg == pthread->kseg) &&
+ (pthread->active_priority > curthread->active_priority))
+ curthread->critical_yield = 1;
+
+ THR_SCHED_UNLOCK(curthread, pthread);
if (mutex->m_owner == pthread)
/* We're done; a valid owner was found. */
break;
OpenPOWER on IntegriCloud