summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/thread/thr_mutex.c
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-04-28 23:56:12 +0000
committerdeischen <deischen@FreeBSD.org>2003-04-28 23:56:12 +0000
commit6deabb439b7870759f16db0c9a3968e3755bf61a (patch)
tree90f4ce3660289c401aab5d0a2c4f105e5aa3e877 /lib/libpthread/thread/thr_mutex.c
parent0fbdf3450d750b66e54d688d645fc4bafd8739a1 (diff)
downloadFreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.zip
FreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.tar.gz
o Don't add a scope system thread's KSE to the list of available
KSEs when it's thread exits; allow the GC handler to do that. o Make spinlock/spinlock critical regions. The following were submitted by davidxu o Alow thr_switch() to take a null mailbox argument. o Better protect cancellation checks. o Don't set KSE specific data when creating new KSEs; rely on the first upcall of the KSE to set it. o Add the ability to set the maximum concurrency level and do this automatically. We should have a way to enable/disable this with some sort of tunable because some applications may not want this to be the default. o Hold the scheduling lock across thread switch calls. o If scheduling of a thread fails, make sure to remove it from the list of active threads. o Better protect accesses to a joining threads when the target thread is exited and detached. o Remove some macro definitions that are now provided by <sys/kse.h>. o Don't leave the library in threaded mode if creation of the initial KSE fails. o Wakeup idle KSEs when there are threads ready to run. o Maintain the number of threads active in the priority queue.
Diffstat (limited to 'lib/libpthread/thread/thr_mutex.c')
-rw-r--r--lib/libpthread/thread/thr_mutex.c35
1 files changed, 26 insertions, 9 deletions
diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c
index 1ae12ea9..bb97db1 100644
--- a/lib/libpthread/thread/thr_mutex.c
+++ b/lib/libpthread/thread/thr_mutex.c
@@ -325,6 +325,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
/* Lock the mutex for the running thread: */
(*mutex)->m_owner = curthread;
+ THR_SCHED_LOCK(curthread, curthread);
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -335,6 +336,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
(*mutex)->m_prio = curthread->active_priority;
(*mutex)->m_saved_prio =
curthread->inherited_priority;
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Add to the list of owned mutexes: */
MUTEX_ASSERT_NOT_OWNED(*mutex);
@@ -358,6 +360,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
/* Lock the mutex for the running thread: */
(*mutex)->m_owner = curthread;
+ THR_SCHED_LOCK(curthread, curthread);
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -371,7 +374,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
curthread->inherited_priority;
curthread->inherited_priority =
(*mutex)->m_prio;
-
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Add to the list of owned mutexes: */
MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
@@ -503,6 +506,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
* region (holding the mutex lock); we should
* be able to safely set the state.
*/
+ THR_LOCK_SWITCH(curthread);
THR_SET_STATE(curthread, PS_MUTEX_WAIT);
/* Unlock the mutex structure: */
@@ -510,6 +514,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
/* Schedule the next thread: */
_thr_sched_switch(curthread);
+ THR_UNLOCK_SWITCH(curthread);
}
break;
@@ -520,6 +525,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
/* Lock the mutex for this thread: */
(*m)->m_owner = curthread;
+ THR_SCHED_LOCK(curthread, curthread);
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -529,7 +535,6 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
* Make sure the thread's scheduling lock is
* held while priorities are adjusted.
*/
- THR_SCHED_LOCK(curthread, curthread);
(*m)->m_prio = curthread->active_priority;
(*m)->m_saved_prio =
curthread->inherited_priority;
@@ -561,17 +566,18 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
* region (holding the mutex lock); we should
* be able to safely set the state.
*/
- THR_SET_STATE(curthread, PS_MUTEX_WAIT);
-
if (curthread->active_priority > (*m)->m_prio)
/* Adjust priorities: */
mutex_priority_adjust(curthread, *m);
+ THR_LOCK_SWITCH(curthread);
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
/* Unlock the mutex structure: */
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
/* Schedule the next thread: */
_thr_sched_switch(curthread);
+ THR_UNLOCK_SWITCH(curthread);
}
break;
@@ -591,6 +597,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
*/
(*m)->m_owner = curthread;
+ THR_SCHED_LOCK(curthread, curthread);
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -601,7 +608,6 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
* scheduling lock is held while priorities
* are adjusted.
*/
- THR_SCHED_LOCK(curthread, curthread);
curthread->active_priority = (*m)->m_prio;
(*m)->m_saved_prio =
curthread->inherited_priority;
@@ -636,6 +642,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
* region (holding the mutex lock); we should
* be able to safely set the state.
*/
+
+ THR_LOCK_SWITCH(curthread);
THR_SET_STATE(curthread, PS_MUTEX_WAIT);
/* Unlock the mutex structure: */
@@ -643,7 +651,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
/* Schedule the next thread: */
_thr_sched_switch(curthread);
-
+ THR_UNLOCK_SWITCH(curthread);
/*
* The threads priority may have changed while
* waiting for the mutex causing a ceiling
@@ -749,9 +757,15 @@ _mutex_cv_unlock(pthread_mutex_t *m)
int
_mutex_cv_lock(pthread_mutex_t *m)
{
+ struct pthread *curthread;
int ret;
- if ((ret = _pthread_mutex_lock(m)) == 0)
+
+ curthread = _get_curthread();
+ if ((ret = _pthread_mutex_lock(m)) == 0) {
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
(*m)->m_refcount--;
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ }
return (ret);
}
@@ -807,6 +821,8 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
* What SS2 define as a 'normal' mutex. Intentionally
* deadlock on attempts to get a lock you already own.
*/
+
+ THR_LOCK_SWITCH(curthread);
THR_SET_STATE(curthread, PS_DEADLOCK);
/* Unlock the mutex structure: */
@@ -814,6 +830,7 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
/* Schedule the next thread: */
_thr_sched_switch(curthread);
+ THR_UNLOCK_SWITCH(curthread);
break;
case PTHREAD_MUTEX_RECURSIVE:
@@ -917,12 +934,12 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
curthread->active_priority =
MAX(curthread->inherited_priority,
curthread->base_priority);
- THR_SCHED_UNLOCK(curthread, curthread);
/*
* This thread now owns one less priority mutex.
*/
curthread->priority_mutex_count--;
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Remove the mutex from the threads queue. */
MUTEX_ASSERT_IS_OWNED(*m);
@@ -974,12 +991,12 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
curthread->active_priority =
MAX(curthread->inherited_priority,
curthread->base_priority);
- THR_SCHED_UNLOCK(curthread, curthread);
/*
* This thread now owns one less priority mutex.
*/
curthread->priority_mutex_count--;
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Remove the mutex from the threads queue. */
MUTEX_ASSERT_IS_OWNED(*m);
OpenPOWER on IntegriCloud