summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/thread/thr_spinlock.c
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-04-28 23:56:12 +0000
committerdeischen <deischen@FreeBSD.org>2003-04-28 23:56:12 +0000
commit6deabb439b7870759f16db0c9a3968e3755bf61a (patch)
tree90f4ce3660289c401aab5d0a2c4f105e5aa3e877 /lib/libpthread/thread/thr_spinlock.c
parent0fbdf3450d750b66e54d688d645fc4bafd8739a1 (diff)
downloadFreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.zip
FreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.tar.gz
o Don't add a scope system thread's KSE to the list of available
KSEs when it's thread exits; allow the GC handler to do that. o Make spinlock/spinlock critical regions. The following were submitted by davidxu o Alow thr_switch() to take a null mailbox argument. o Better protect cancellation checks. o Don't set KSE specific data when creating new KSEs; rely on the first upcall of the KSE to set it. o Add the ability to set the maximum concurrency level and do this automatically. We should have a way to enable/disable this with some sort of tunable because some applications may not want this to be the default. o Hold the scheduling lock across thread switch calls. o If scheduling of a thread fails, make sure to remove it from the list of active threads. o Better protect accesses to a joining threads when the target thread is exited and detached. o Remove some macro definitions that are now provided by <sys/kse.h>. o Don't leave the library in threaded mode if creation of the initial KSE fails. o Wakeup idle KSEs when there are threads ready to run. o Maintain the number of threads active in the priority queue.
Diffstat (limited to 'lib/libpthread/thread/thr_spinlock.c')
-rw-r--r--lib/libpthread/thread/thr_spinlock.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/lib/libpthread/thread/thr_spinlock.c b/lib/libpthread/thread/thr_spinlock.c
index cb71a46..56691dd 100644
--- a/lib/libpthread/thread/thr_spinlock.c
+++ b/lib/libpthread/thread/thr_spinlock.c
@@ -33,12 +33,8 @@
*
*/
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <sched.h>
-#include <pthread.h>
-#include <unistd.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
#include <libc_private.h>
#include "spinlock.h"
@@ -52,7 +48,12 @@
void
_spinunlock(spinlock_t *lck)
{
- lck->access_lock = 0;
+ kse_critical_t crit;
+
+ crit = (kse_critical_t)lck->fname;
+ atomic_store_rel_long(&lck->access_lock, 0);
+ if (crit != NULL)
+ _kse_critical_leave(crit);
}
@@ -65,14 +66,21 @@ _spinunlock(spinlock_t *lck)
void
_spinlock(spinlock_t *lck)
{
+ kse_critical_t crit;
+
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
+ if (_kse_isthreaded())
+ crit = _kse_critical_enter();
+ else
+ crit = NULL;
while(_atomic_lock(&lck->access_lock)) {
while (lck->access_lock)
;
}
+ lck->fname = (char *)crit;
}
/*
@@ -88,12 +96,5 @@ _spinlock(spinlock_t *lck)
void
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
- /*
- * Try to grab the lock and loop if another thread grabs
- * it before we do.
- */
- while(_atomic_lock(&lck->access_lock)) {
- while (lck->access_lock)
- ;
- }
+ _spinlock(lck);
}
OpenPOWER on IntegriCloud