summaryrefslogtreecommitdiffstats
path: root/lib/libpthread
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-11-16 15:01:26 +0000
committerdeischen <deischen@FreeBSD.org>2003-11-16 15:01:26 +0000
commitb1926e392ed0eee6eba473c1a39c291a5f097794 (patch)
tree3c07e7581ea50d438774ec53ca25f47131d0e64f /lib/libpthread
parent5fdb15dc0a4f04757eba1f6a4c3c6b9eab3b0d0d (diff)
downloadFreeBSD-src-b1926e392ed0eee6eba473c1a39c291a5f097794.zip
FreeBSD-src-b1926e392ed0eee6eba473c1a39c291a5f097794.tar.gz
Back out last change and go back to using KSE locks instead of thread
locks until we know why this breaks ia64. Reported by: marcel
Diffstat (limited to 'lib/libpthread')
-rw-r--r--lib/libpthread/thread/thr_spinlock.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/lib/libpthread/thread/thr_spinlock.c b/lib/libpthread/thread/thr_spinlock.c
index 1c689d3..6bdab5f 100644
--- a/lib/libpthread/thread/thr_spinlock.c
+++ b/lib/libpthread/thread/thr_spinlock.c
@@ -45,7 +45,7 @@
struct spinlock_extra {
spinlock_t *owner;
struct lock lock;
- struct pthread *crit;
+ kse_critical_t crit;
};
static void init_spinlock(spinlock_t *lck);
@@ -64,11 +64,12 @@ void
_spinunlock(spinlock_t *lck)
{
struct spinlock_extra *extra;
- struct pthread *curthr = _get_curthread();
+ kse_critical_t crit;
extra = (struct spinlock_extra *)lck->fname;
- THR_ASSERT(extra->crit == curthr, "_spinunlock called without owned.");
- THR_LOCK_RELEASE(curthr, &extra->lock);
+ crit = extra->crit;
+ KSE_LOCK_RELEASE(_get_curkse(), &extra->lock);
+ _kse_critical_leave(crit);
}
@@ -81,8 +82,8 @@ _spinunlock(spinlock_t *lck)
void
_spinlock(spinlock_t *lck)
{
- struct spinlock_extra *extra;
- struct pthread *curthr;
+ struct spinlock_extra *extra;
+ kse_critical_t crit;
THR_ASSERT(__isthreaded != 0, "Spinlock called when not threaded.");
THR_ASSERT(initialized != 0, "Spinlocks not initialized.");
@@ -90,12 +91,12 @@ _spinlock(spinlock_t *lck)
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
- curthr = _get_curthread();
+ crit = _kse_critical_enter();
if (lck->fname == NULL)
init_spinlock(lck);
extra = (struct spinlock_extra *)lck->fname;
- THR_LOCK_ACQUIRE(curthr, &extra->lock);
- extra->crit = curthr;
+ KSE_LOCK_ACQUIRE(_get_curkse(), &extra->lock);
+ extra->crit = crit;
}
/*
@@ -117,15 +118,15 @@ _spinlock_debug(spinlock_t *lck, char *fname, int lineno)
static void
init_spinlock(spinlock_t *lck)
{
- struct pthread *curthr = _get_curthread();
+ struct kse *curkse = _get_curkse();
- THR_LOCK_ACQUIRE(curthr, &spinlock_static_lock);
+ KSE_LOCK_ACQUIRE(curkse, &spinlock_static_lock);
if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
lck->fname = (char *)&extra[spinlock_count];
extra[spinlock_count].owner = lck;
spinlock_count++;
}
- THR_LOCK_RELEASE(curthr, &spinlock_static_lock);
+ KSE_LOCK_RELEASE(curkse, &spinlock_static_lock);
THR_ASSERT(lck->fname != NULL, "Exceeded max spinlocks");
}
@@ -136,19 +137,19 @@ _thr_spinlock_init(void)
if (initialized != 0) {
_lock_reinit(&spinlock_static_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup);
+ _kse_lock_wait, _kse_lock_wakeup);
for (i = 0; i < spinlock_count; i++) {
_lock_reinit(&extra[i].lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup);
+ _kse_lock_wait, _kse_lock_wakeup);
}
spinlock_count = 0;
} else {
if (_lock_init(&spinlock_static_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup) != 0)
PANIC("Cannot initialize spinlock_static_lock");
for (i = 0; i < MAX_SPINLOCKS; i++) {
if (_lock_init(&extra[i].lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup) != 0)
PANIC("Cannot initialize spinlock extra");
}
initialized = 1;
OpenPOWER on IntegriCloud