diff options
author | deischen <deischen@FreeBSD.org> | 2003-04-28 23:56:12 +0000 |
---|---|---|
committer | deischen <deischen@FreeBSD.org> | 2003-04-28 23:56:12 +0000 |
commit | 6deabb439b7870759f16db0c9a3968e3755bf61a (patch) | |
tree | 90f4ce3660289c401aab5d0a2c4f105e5aa3e877 /lib | |
parent | 0fbdf3450d750b66e54d688d645fc4bafd8739a1 (diff) | |
download | FreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.zip FreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.tar.gz |
o Don't add a scope system thread's KSE to the list of available
KSEs when it's thread exits; allow the GC handler to do that.
o Make spinlock/spinlock critical regions.
The following were submitted by davidxu
o Alow thr_switch() to take a null mailbox argument.
o Better protect cancellation checks.
o Don't set KSE specific data when creating new KSEs; rely on the
first upcall of the KSE to set it.
o Add the ability to set the maximum concurrency level and do this
automatically. We should have a way to enable/disable this with
some sort of tunable because some applications may not want this
to be the default.
o Hold the scheduling lock across thread switch calls.
o If scheduling of a thread fails, make sure to remove it from the list
of active threads.
o Better protect accesses to a joining threads when the target thread is
exited and detached.
o Remove some macro definitions that are now provided by <sys/kse.h>.
o Don't leave the library in threaded mode if creation of the initial
KSE fails.
o Wakeup idle KSEs when there are threads ready to run.
o Maintain the number of threads active in the priority queue.
Diffstat (limited to 'lib')
35 files changed, 640 insertions, 316 deletions
diff --git a/lib/libkse/thread/thr_cancel.c b/lib/libkse/thread/thr_cancel.c index 23e0dfa..fc05b55 100644 --- a/lib/libkse/thread/thr_cancel.c +++ b/lib/libkse/thread/thr_cancel.c @@ -217,7 +217,6 @@ static void testcancel(struct pthread *curthread) { /* Take the scheduling lock while fiddling with the state: */ - THR_SCHED_LOCK(curthread, curthread); if (checkcancel(curthread) != 0) { /* Unlock before exiting: */ @@ -227,8 +226,6 @@ testcancel(struct pthread *curthread) pthread_exit(PTHREAD_CANCELED); PANIC("cancel"); } - - THR_SCHED_UNLOCK(curthread, curthread); } void @@ -236,23 +233,29 @@ _pthread_testcancel(void) { struct pthread *curthread = _get_curthread(); + THR_SCHED_LOCK(curthread, curthread); testcancel(curthread); + THR_SCHED_UNLOCK(curthread, curthread); } void _thr_enter_cancellation_point(struct pthread *thread) { /* Look for a cancellation before we block: */ + THR_SCHED_LOCK(thread, thread); testcancel(thread); thread->cancelflags |= THR_AT_CANCEL_POINT; + THR_SCHED_UNLOCK(thread, thread); } void _thr_leave_cancellation_point(struct pthread *thread) { + THR_SCHED_LOCK(thread, thread); thread->cancelflags &= ~THR_AT_CANCEL_POINT; /* Look for a cancellation after we unblock: */ testcancel(thread); + THR_SCHED_UNLOCK(thread, thread); } static void @@ -263,9 +266,12 @@ finish_cancellation(void *arg) curthread->continuation = NULL; curthread->interrupted = 0; + THR_SCHED_LOCK(curthread, curthread); if ((curthread->cancelflags & THR_CANCEL_NEEDED) != 0) { curthread->cancelflags &= ~THR_CANCEL_NEEDED; + THR_SCHED_UNLOCK(curthread, curthread); _thr_exit_cleanup(); pthread_exit(PTHREAD_CANCELED); } + THR_SCHED_UNLOCK(curthread, curthread); } diff --git a/lib/libkse/thread/thr_concurrency.c b/lib/libkse/thread/thr_concurrency.c index a4b055f..db84121 100644 --- a/lib/libkse/thread/thr_concurrency.c +++ b/lib/libkse/thread/thr_concurrency.c @@ -28,6 +28,8 @@ */ #include <errno.h> #include <pthread.h> +#include <sys/types.h> +#include <sys/sysctl.h> #include "thr_private.h" @@ -52,14 +54,8 @@ _pthread_getconcurrency(void) int _pthread_setconcurrency(int new_level) { - struct pthread *curthread; - struct kse *newkse; - kse_critical_t crit; - int kse_count; - int i; int ret; - if (new_level < 0) ret = EINVAL; else if (new_level == level) @@ -71,50 +67,72 @@ _pthread_setconcurrency(int new_level) DBG_MSG("Can't enable threading.\n"); ret = EAGAIN; } else { - ret = 0; - curthread = _get_curthread(); - /* Race condition, but so what. */ - kse_count = _kse_initial->k_kseg->kg_ksecount; - for (i = kse_count; i < new_level; i++) { - newkse = _kse_alloc(curthread); - if (newkse == NULL) { - DBG_MSG("Can't alloc new KSE.\n"); - ret = EAGAIN; - break; - } - newkse->k_kseg = _kse_initial->k_kseg; - newkse->k_schedq = _kse_initial->k_schedq; - newkse->k_curthread = NULL; - crit = _kse_critical_enter(); + ret = _thr_setconcurrency(new_level); + if (ret == 0) + level = new_level; + } + return (ret); +} + +int +_thr_setconcurrency(int new_level) +{ + struct pthread *curthread; + struct kse *newkse; + kse_critical_t crit; + int kse_count; + int i; + int ret; + + ret = 0; + curthread = _get_curthread(); + /* Race condition, but so what. */ + kse_count = _kse_initial->k_kseg->kg_ksecount; + for (i = kse_count; i < new_level; i++) { + newkse = _kse_alloc(curthread); + if (newkse == NULL) { + DBG_MSG("Can't alloc new KSE.\n"); + ret = EAGAIN; + break; + } + newkse->k_kseg = _kse_initial->k_kseg; + newkse->k_schedq = _kse_initial->k_schedq; + newkse->k_curthread = NULL; + crit = _kse_critical_enter(); + KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg); + TAILQ_INSERT_TAIL(&newkse->k_kseg->kg_kseq, + newkse, k_kgqe); + newkse->k_kseg->kg_ksecount++; + KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg); + if (kse_create(&newkse->k_mbx, 0) != 0) { KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg); - TAILQ_INSERT_TAIL(&newkse->k_kseg->kg_kseq, + TAILQ_REMOVE(&newkse->k_kseg->kg_kseq, newkse, k_kgqe); - newkse->k_kseg->kg_ksecount++; + newkse->k_kseg->kg_ksecount--; KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg); - if (_ksd_setprivate(&_kse_initial->k_ksd) != 0) { - /* This should never happen. */ - PANIC("pthread_setconcurrency: Unable to " - "set KSE specific data"); - } - newkse->k_flags |= KF_INITIALIZED; - if (kse_create(&newkse->k_mbx, 0) != 0) { - _ksd_setprivate(&curthread->kse->k_ksd); - KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg); - TAILQ_REMOVE(&newkse->k_kseg->kg_kseq, - newkse, k_kgqe); - newkse->k_kseg->kg_ksecount--; - KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg); - _kse_critical_leave(crit); - _kse_free(curthread, newkse); - DBG_MSG("kse_create syscall failed.\n"); - ret = EAGAIN; - break; - } - _ksd_setprivate(&curthread->kse->k_ksd); + _kse_critical_leave(crit); + _kse_free(curthread, newkse); + DBG_MSG("kse_create syscall failed.\n"); + ret = EAGAIN; + break; + } else { _kse_critical_leave(crit); } - if (ret == 0) - level = new_level; } return (ret); } + +int +_thr_setmaxconcurrency(void) +{ + int vcpu; + int len; + int ret; + + len = sizeof(vcpu); + ret = sysctlbyname("kern.threads.virtual_cpu", &vcpu, &len, NULL, NULL); + if (ret == 0 && vcpu > 0) + ret = _thr_setconcurrency(vcpu); + return (ret); +} + diff --git a/lib/libkse/thread/thr_cond.c b/lib/libkse/thread/thr_cond.c index 537c264..32a152d 100644 --- a/lib/libkse/thread/thr_cond.c +++ b/lib/libkse/thread/thr_cond.c @@ -267,6 +267,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) * lock); we should be able to safely * set the state. */ + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_COND_WAIT); /* Remember the CV: */ @@ -280,6 +281,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) _thr_sched_switch(curthread); curthread->data.cond = NULL; + THR_UNLOCK_SWITCH(curthread); /* * XXX - This really isn't a good check @@ -324,8 +326,10 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) * that the mutex be reaquired prior to * cancellation. */ - if (done != 0) + if (done != 0) { rval = _mutex_cv_lock(mutex); + unlock_mutex = 1; + } } } break; @@ -475,6 +479,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, * lock); we should be able to safely * set the state. */ + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_COND_WAIT); /* Remember the CV: */ @@ -488,6 +493,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, _thr_sched_switch(curthread); curthread->data.cond = NULL; + THR_UNLOCK_SWITCH(curthread); /* * XXX - This really isn't a good check diff --git a/lib/libkse/thread/thr_create.c b/lib/libkse/thread/thr_create.c index deb26de..4c65d3c 100644 --- a/lib/libkse/thread/thr_create.c +++ b/lib/libkse/thread/thr_create.c @@ -297,6 +297,13 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, * pair if necessary. */ ret = _thr_schedule_add(curthread, new_thread); + if (ret != 0) { + KSE_LOCK_ACQUIRE(curthread->kse, + &_thread_list_lock); + THR_LIST_REMOVE(new_thread); + KSE_LOCK_RELEASE(curthread->kse, + &_thread_list_lock); + } _kse_critical_leave(crit); if (ret != 0) free_thread(curthread, new_thread); diff --git a/lib/libkse/thread/thr_detach.c b/lib/libkse/thread/thr_detach.c index 30b7ce5..66db912 100644 --- a/lib/libkse/thread/thr_detach.c +++ b/lib/libkse/thread/thr_detach.c @@ -44,8 +44,6 @@ _pthread_detach(pthread_t pthread) { struct pthread *curthread = _get_curthread(); struct pthread *joiner; - kse_critical_t crit; - int dead; int rval = 0; /* Check for invalid calling parameters: */ @@ -56,7 +54,6 @@ _pthread_detach(pthread_t pthread) else if ((rval = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) { /* Return an error: */ - _thr_leave_cancellation_point(curthread); } /* Check if the thread is already detached: */ @@ -91,20 +88,11 @@ _pthread_detach(pthread_t pthread) } joiner = NULL; } - dead = (pthread->flags & THR_FLAGS_GC_SAFE) != 0; THR_SCHED_UNLOCK(curthread, pthread); - - if (dead != 0) { - crit = _kse_critical_enter(); - KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); - THR_GCLIST_ADD(pthread); - KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); - _kse_critical_leave(crit); - } _thr_ref_delete(curthread, pthread); /* See if there is a thread waiting in pthread_join(): */ - if (joiner != NULL) { + if (joiner != NULL && _thr_ref_add(curthread, joiner, 0) == 0) { /* Lock the joiner before fiddling with it. */ THR_SCHED_LOCK(curthread, joiner); if (joiner->join_status.thread == pthread) { @@ -118,6 +106,7 @@ _pthread_detach(pthread_t pthread) _thr_setrunnable_unlocked(joiner); } THR_SCHED_UNLOCK(curthread, joiner); + _thr_ref_delete(curthread, joiner); } } diff --git a/lib/libkse/thread/thr_exit.c b/lib/libkse/thread/thr_exit.c index 4a82b12..8435f43 100644 --- a/lib/libkse/thread/thr_exit.c +++ b/lib/libkse/thread/thr_exit.c @@ -104,7 +104,9 @@ _pthread_exit(void *status) * Flag this thread as exiting. Threads should now be prevented * from joining to this thread. */ + THR_SCHED_LOCK(curthread, curthread); curthread->flags |= THR_FLAGS_EXITING; + THR_SCHED_UNLOCK(curthread, curthread); /* Save the return value: */ curthread->ret = status; @@ -121,10 +123,11 @@ _pthread_exit(void *status) } /* This thread will never be re-scheduled. */ - THR_SCHED_LOCK(curthread, curthread); + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_DEAD); - THR_SCHED_UNLOCK(curthread, curthread); _thr_sched_switch(curthread); + /* Never reach! */ + THR_UNLOCK_SWITCH(curthread); /* This point should not be reached. */ PANIC("Dead thread has resumed"); diff --git a/lib/libkse/thread/thr_find_thread.c b/lib/libkse/thread/thr_find_thread.c index b5cae66..0c813db 100644 --- a/lib/libkse/thread/thr_find_thread.c +++ b/lib/libkse/thread/thr_find_thread.c @@ -85,6 +85,9 @@ _thr_ref_delete(struct pthread *curthread, struct pthread *thread) KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); thread->refcount--; curthread->critical_count--; + if ((thread->refcount == 0) && + (thread->flags & THR_FLAGS_GC_SAFE) != 0) + THR_GCLIST_ADD(thread); KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); } diff --git a/lib/libkse/thread/thr_join.c b/lib/libkse/thread/thr_join.c index 77e3a8c..7cbc192 100644 --- a/lib/libkse/thread/thr_join.c +++ b/lib/libkse/thread/thr_join.c @@ -70,16 +70,16 @@ _pthread_join(pthread_t pthread, void **thread_return) return (ESRCH); } + THR_SCHED_LOCK(curthread, pthread); /* Check if this thread has been detached: */ if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) { + THR_SCHED_UNLOCK(curthread, pthread); /* Remove the reference and return an error: */ _thr_ref_delete(curthread, pthread); ret = ESRCH; } else { /* Lock the target thread while checking its state. */ - THR_SCHED_LOCK(curthread, pthread); - if ((pthread->state == PS_DEAD) || - ((pthread->flags & THR_FLAGS_EXITING) != 0)) { + if (pthread->state == PS_DEAD) { if (thread_return != NULL) /* Return the thread's return value: */ *thread_return = pthread->ret; @@ -123,15 +123,13 @@ _pthread_join(pthread_t pthread, void **thread_return) THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); - THR_SCHED_LOCK(curthread, curthread); - if (curthread->join_status.thread == pthread) - THR_SET_STATE(curthread, PS_JOIN); - THR_SCHED_UNLOCK(curthread, curthread); - + THR_LOCK_SWITCH(curthread); while (curthread->join_status.thread == pthread) { + THR_SET_STATE(curthread, PS_JOIN); /* Schedule the next thread: */ _thr_sched_switch(curthread); } + THR_UNLOCK_SWITCH(curthread); /* * The thread return value and error are set by the diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c index 63283f7..5762219 100644 --- a/lib/libkse/thread/thr_kern.c +++ b/lib/libkse/thread/thr_kern.c @@ -95,14 +95,7 @@ __FBSDID("$FreeBSD$"); _pq_remove(&(kse)->k_schedq->sq_runq, thrd) #define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq) -/* - * XXX - Remove when David commits kernel changes to support these. - */ -#ifndef KMF_NOUPCALL -#define KMF_NOUPCALL 0x01 -#define KMF_NOCOMPLETED 0x02 -#endif - +#define KSE_RUNQ_THREADS(kse) ((kse)->k_schedq->sq_runq.pq_threads) /* * We've got to keep track of everything that is allocated, not only @@ -140,11 +133,11 @@ static void kseg_free_unlocked(struct kse_group *kseg); static void kseg_init(struct kse_group *kseg); static void kseg_reinit(struct kse_group *kseg); static void kse_waitq_insert(struct pthread *thread); +static void kse_wakeup_multi(struct kse *curkse); +static void kse_wakeup_one(struct pthread *thread); static void thr_cleanup(struct kse *kse, struct pthread *curthread); -#ifdef NOT_YET static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp); -#endif static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, struct pthread_sigframe *psf); static int thr_timedout(struct pthread *thread, struct timespec *curtime); @@ -368,11 +361,13 @@ _kse_setthreaded(int threaded) _kse_initial->k_flags |= KF_STARTED; if (kse_create(&_kse_initial->k_mbx, 0) != 0) { _kse_initial->k_flags &= ~KF_STARTED; + __isthreaded = 0; /* may abort() */ DBG_MSG("kse_create failed\n"); return (-1); } KSE_SET_MBOX(_kse_initial, _thr_initial); + _thr_setmaxconcurrency(); } return (0); } @@ -466,16 +461,14 @@ _thr_lock_wait(struct lock *lock, struct lockuser *lu) while (_LCK_BUSY(lu) && count < 300) count++; while (_LCK_BUSY(lu)) { - THR_SCHED_LOCK(curthread, curthread); + THR_LOCK_SWITCH(curthread); if (_LCK_BUSY(lu)) { /* Wait for the lock: */ atomic_store_rel_int(&curthread->need_wakeup, 1); THR_SET_STATE(curthread, PS_LOCKWAIT); - THR_SCHED_UNLOCK(curthread, curthread); _thr_sched_switch(curthread); } - else - THR_SCHED_UNLOCK(curthread, curthread); + THR_UNLOCK_SWITCH(curthread); } } @@ -484,14 +477,26 @@ _thr_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct pthread *thread; struct pthread *curthread; + int unlock; curthread = _get_curthread(); thread = (struct pthread *)_LCK_GET_PRIVATE(lu); - THR_SCHED_LOCK(curthread, thread); + unlock = 0; + if (curthread->kseg == thread->kseg) { + /* Not already locked */ + if (curthread->lock_switch == 0) { + THR_SCHED_LOCK(curthread, thread); + unlock = 1; + } + } else { + THR_SCHED_LOCK(curthread, thread); + unlock = 1; + } _thr_setrunnable_unlocked(thread); atomic_store_rel_int(&thread->need_wakeup, 0); - THR_SCHED_UNLOCK(curthread, thread); + if (unlock) + THR_SCHED_UNLOCK(curthread, thread); } kse_critical_t @@ -541,12 +546,12 @@ void _thr_sched_switch(struct pthread *curthread) { struct pthread_sigframe psf; - kse_critical_t crit; struct kse *curkse; volatile int once = 0; /* We're in the scheduler, 5 by 5: */ - crit = _kse_critical_enter(); + THR_ASSERT(curthread->lock_switch, "lock_switch"); + THR_ASSERT(_kse_in_critical(), "not in critical region"); curkse = _get_curkse(); curthread->need_switchout = 1; /* The thread yielded on its own. */ @@ -568,7 +573,11 @@ _thr_sched_switch(struct pthread *curthread) */ if ((once == 0) && (!THR_IN_CRITICAL(curthread))) { once = 1; + curthread->critical_count++; + THR_UNLOCK_SWITCH(curthread); + curthread->critical_count--; thr_resume_check(curthread, &curthread->tmbx.tm_context, &psf); + THR_LOCK_SWITCH(curthread); } } @@ -734,14 +743,20 @@ kse_sched_multi(struct kse *curkse) KSE_CLEAR_WAIT(curkse); } - /* Lock the scheduling lock. */ - KSE_SCHED_LOCK(curkse, curkse->k_kseg); + curthread = curkse->k_curthread; + if (curthread == NULL || curthread->lock_switch == 0) { + /* + * curthread was preempted by upcall, it is not a volunteer + * context switch. Lock the scheduling lock. + */ + KSE_SCHED_LOCK(curkse, curkse->k_kseg); + } /* * If the current thread was completed in another KSE, then * it will be in the run queue. Don't mark it as being blocked. */ - if (((curthread = curkse->k_curthread) != NULL) && + if ((curthread != NULL) && ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) && (curthread->need_switchout == 0)) { /* @@ -774,17 +789,22 @@ kse_sched_multi(struct kse *curkse) * Resume the thread and tell it to yield when * it leaves the critical region. */ - curthread->critical_yield = 0; + curthread->critical_yield = 1; curthread->active = 1; if ((curthread->flags & THR_FLAGS_IN_RUNQ) != 0) KSE_RUNQ_REMOVE(curkse, curthread); curkse->k_curthread = curthread; curthread->kse = curkse; - KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); DBG_MSG("Continuing thread %p in critical region\n", curthread); - ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); + if (curthread->lock_switch) { + KSE_SCHED_LOCK(curkse, curkse->k_kseg); + ret = _thread_switch(&curthread->tmbx, 0); + } else { + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); + ret = _thread_switch(&curthread->tmbx, + &curkse->k_mbx.km_curthread); + } if (ret != 0) PANIC("Can't resume thread in critical region\n"); } @@ -792,6 +812,8 @@ kse_sched_multi(struct kse *curkse) kse_switchout_thread(curkse, curthread); curkse->k_curthread = NULL; + kse_wakeup_multi(curkse); + /* This has to be done without the scheduling lock held. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_check_signals(curkse); @@ -868,6 +890,8 @@ kse_sched_multi(struct kse *curkse) curframe = curthread->curframe; curthread->curframe = NULL; + kse_wakeup_multi(curkse); + /* Unlock the scheduling queue: */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); @@ -881,10 +905,13 @@ kse_sched_multi(struct kse *curkse) #ifdef NOT_YET if ((curframe == NULL) && ((curthread->check_pending != 0) || (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && - ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) { + ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) + signalcontext(&curthread->tmbx.tm_context, 0, + (__sighandler_t *)thr_resume_wrapper); +#else + if ((curframe == NULL) && (curthread->check_pending != 0)) signalcontext(&curthread->tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); - } #endif #ifdef GS_HACK /* XXX - The kernel sometimes forgets to restore %gs properly. */ @@ -893,7 +920,13 @@ kse_sched_multi(struct kse *curkse) /* * Continue the thread at its current frame: */ - ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); + if (curthread->lock_switch) { + KSE_SCHED_LOCK(curkse, curkse->k_kseg); + ret = _thread_switch(&curthread->tmbx, 0); + } else { + ret = _thread_switch(&curthread->tmbx, + &curkse->k_mbx.km_curthread); + } if (ret != 0) PANIC("Thread has returned from _thread_switch"); @@ -932,7 +965,6 @@ kse_check_signals(struct kse *curkse) } } -#ifdef NOT_YET static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp) { @@ -940,7 +972,6 @@ thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp) thr_resume_check(curthread, ucp, NULL); } -#endif static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, @@ -960,9 +991,11 @@ thr_resume_check(struct pthread *curthread, ucontext_t *ucp, _thr_sig_rundown(curthread, ucp, psf); } +#ifdef NOT_YET if (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) pthread_testcancel(); +#endif } /* @@ -1071,6 +1104,7 @@ _thr_gc(struct pthread *curthread) * referenced. It is safe to remove all * remnants of the thread. */ + THR_LIST_REMOVE(td); TAILQ_INSERT_HEAD(&worklist, td, gcle); } } @@ -1172,11 +1206,11 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) if (need_start != 0) kse_create(&newthread->kse->k_mbx, 0); else if ((newthread->state == PS_RUNNING) && - KSE_WAITING(newthread->kse)) { + KSE_IS_IDLE(newthread->kse)) { /* * The thread is being scheduled on another KSEG. */ - KSE_WAKEUP(newthread->kse); + kse_wakeup_one(newthread); } ret = 0; } @@ -1326,6 +1360,8 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) */ DBG_MSG("Switching out thread %p, state %d\n", thread, thread->state); if (thread->blocked != 0) { + thread->active = 0; + thread->need_switchout = 0; /* This thread must have blocked in the kernel. */ /* thread->slice_usec = -1;*/ /* restart timeslice */ /* @@ -1346,6 +1382,8 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) * stack. It is safe to do garbage collecting * here. */ + thread->active = 0; + thread->need_switchout = 0; thr_cleanup(kse, thread); return; break; @@ -1456,14 +1494,18 @@ kse_wait(struct kse *kse, struct pthread *td_wait) } /* Don't sleep for negative times. */ if ((ts_sleep.tv_sec >= 0) && (ts_sleep.tv_nsec >= 0)) { - KSE_SET_WAIT(kse); + KSE_SET_IDLE(kse); + kse->k_kseg->kg_idle_kses++; KSE_SCHED_UNLOCK(kse, kse->k_kseg); saved_flags = kse->k_mbx.km_flags; kse->k_mbx.km_flags |= KMF_NOUPCALL; kse_release(&ts_sleep); kse->k_mbx.km_flags = saved_flags; - KSE_CLEAR_WAIT(kse); KSE_SCHED_LOCK(kse, kse->k_kseg); + if (KSE_IS_IDLE(kse)) { + KSE_CLEAR_IDLE(kse); + kse->k_kseg->kg_idle_kses--; + } } } @@ -1495,11 +1537,13 @@ kse_fini(struct kse *kse) * Add this KSE to the list of free KSEs along with * the KSEG if is now orphaned. */ +#ifdef NOT_YET KSE_LOCK_ACQUIRE(kse, &kse_lock); if (free_kseg != NULL) kseg_free_unlocked(free_kseg); kse_free_unlocked(kse); KSE_LOCK_RELEASE(kse, &kse_lock); +#endif kse_exit(); /* Never returns. */ } else { @@ -1620,7 +1664,7 @@ _thr_setrunnable_unlocked(struct pthread *thread) if ((thread->kseg->kg_flags & KGF_SINGLE_THREAD) != 0) /* No silly queues for these threads. */ THR_SET_STATE(thread, PS_RUNNING); - else { + else if (thread->state != PS_RUNNING) { if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0) KSE_WAITQ_REMOVE(thread->kse, thread); THR_SET_STATE(thread, PS_RUNNING); @@ -1641,8 +1685,47 @@ _thr_setrunnable_unlocked(struct pthread *thread) * (the KSE). If the KSE wakes up and doesn't find any more * work it will again go back to waiting so no harm is done. */ - if (KSE_WAITING(thread->kse)) + kse_wakeup_one(thread); +} + +static void +kse_wakeup_one(struct pthread *thread) +{ + struct kse *ke; + + if (KSE_IS_IDLE(thread->kse)) { + KSE_CLEAR_IDLE(thread->kse); + thread->kseg->kg_idle_kses--; KSE_WAKEUP(thread->kse); + } else { + TAILQ_FOREACH(ke, &thread->kseg->kg_kseq, k_kgqe) { + if (KSE_IS_IDLE(ke)) { + KSE_CLEAR_IDLE(ke); + ke->k_kseg->kg_idle_kses--; + KSE_WAKEUP(ke); + return; + } + } + } +} + +static void +kse_wakeup_multi(struct kse *curkse) +{ + struct kse *ke; + int tmp; + + if ((tmp = KSE_RUNQ_THREADS(curkse)) && curkse->k_kseg->kg_idle_kses) { + TAILQ_FOREACH(ke, &curkse->k_kseg->kg_kseq, k_kgqe) { + if (KSE_IS_IDLE(ke)) { + KSE_CLEAR_IDLE(ke); + ke->k_kseg->kg_idle_kses--; + KSE_WAKEUP(ke); + if (--tmp == 0) + break; + } + } + } } struct pthread * @@ -1876,7 +1959,7 @@ kse_reinit(struct kse *kse) sigemptyset(&kse->k_sigmask); bzero(&kse->k_sigq, sizeof(kse->k_sigq)); kse->k_check_sigq = 0; - kse->k_flags = KF_INITIALIZED; + kse->k_flags = 0; kse->k_waiting = 0; kse->k_error = 0; kse->k_cpu = 0; @@ -1948,6 +2031,7 @@ _thr_alloc(struct pthread *curthread) free_thread_count--; } KSE_LOCK_RELEASE(curthread->kse, &thread_lock); + _kse_critical_leave(crit); } } if (thread == NULL) @@ -1971,9 +2055,6 @@ _thr_free(struct pthread *curthread, struct pthread *thread) } else { crit = _kse_critical_enter(); - KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); - THR_LIST_REMOVE(thread); - KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock); TAILQ_INSERT_HEAD(&free_threadq, thread, tle); free_thread_count++; diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c index 1ae12ea9..bb97db1 100644 --- a/lib/libkse/thread/thr_mutex.c +++ b/lib/libkse/thread/thr_mutex.c @@ -325,6 +325,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; + THR_SCHED_LOCK(curthread, curthread); /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -335,6 +336,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; + THR_SCHED_UNLOCK(curthread, curthread); /* Add to the list of owned mutexes: */ MUTEX_ASSERT_NOT_OWNED(*mutex); @@ -358,6 +360,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; + THR_SCHED_LOCK(curthread, curthread); /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -371,7 +374,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; - + THR_SCHED_UNLOCK(curthread, curthread); /* Add to the list of owned mutexes: */ MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, @@ -503,6 +506,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * region (holding the mutex lock); we should * be able to safely set the state. */ + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); /* Unlock the mutex structure: */ @@ -510,6 +514,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) /* Schedule the next thread: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); } break; @@ -520,6 +525,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) /* Lock the mutex for this thread: */ (*m)->m_owner = curthread; + THR_SCHED_LOCK(curthread, curthread); /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -529,7 +535,6 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * Make sure the thread's scheduling lock is * held while priorities are adjusted. */ - THR_SCHED_LOCK(curthread, curthread); (*m)->m_prio = curthread->active_priority; (*m)->m_saved_prio = curthread->inherited_priority; @@ -561,17 +566,18 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * region (holding the mutex lock); we should * be able to safely set the state. */ - THR_SET_STATE(curthread, PS_MUTEX_WAIT); - if (curthread->active_priority > (*m)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(curthread, *m); + THR_LOCK_SWITCH(curthread); + THR_SET_STATE(curthread, PS_MUTEX_WAIT); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); } break; @@ -591,6 +597,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) */ (*m)->m_owner = curthread; + THR_SCHED_LOCK(curthread, curthread); /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -601,7 +608,6 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * scheduling lock is held while priorities * are adjusted. */ - THR_SCHED_LOCK(curthread, curthread); curthread->active_priority = (*m)->m_prio; (*m)->m_saved_prio = curthread->inherited_priority; @@ -636,6 +642,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * region (holding the mutex lock); we should * be able to safely set the state. */ + + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); /* Unlock the mutex structure: */ @@ -643,7 +651,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) /* Schedule the next thread: */ _thr_sched_switch(curthread); - + THR_UNLOCK_SWITCH(curthread); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling @@ -749,9 +757,15 @@ _mutex_cv_unlock(pthread_mutex_t *m) int _mutex_cv_lock(pthread_mutex_t *m) { + struct pthread *curthread; int ret; - if ((ret = _pthread_mutex_lock(m)) == 0) + + curthread = _get_curthread(); + if ((ret = _pthread_mutex_lock(m)) == 0) { + THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); (*m)->m_refcount--; + THR_LOCK_RELEASE(curthread, &(*m)->m_lock); + } return (ret); } @@ -807,6 +821,8 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m) * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ + + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_DEADLOCK); /* Unlock the mutex structure: */ @@ -814,6 +830,7 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m) /* Schedule the next thread: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); break; case PTHREAD_MUTEX_RECURSIVE: @@ -917,12 +934,12 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference) curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); - THR_SCHED_UNLOCK(curthread, curthread); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; + THR_SCHED_UNLOCK(curthread, curthread); /* Remove the mutex from the threads queue. */ MUTEX_ASSERT_IS_OWNED(*m); @@ -974,12 +991,12 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference) curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); - THR_SCHED_UNLOCK(curthread, curthread); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; + THR_SCHED_UNLOCK(curthread, curthread); /* Remove the mutex from the threads queue. */ MUTEX_ASSERT_IS_OWNED(*m); diff --git a/lib/libkse/thread/thr_nanosleep.c b/lib/libkse/thread/thr_nanosleep.c index bec3b66..e83f4f4 100644 --- a/lib/libkse/thread/thr_nanosleep.c +++ b/lib/libkse/thread/thr_nanosleep.c @@ -60,14 +60,14 @@ _nanosleep(const struct timespec *time_to_sleep, /* Calculate the time for the current thread to wake up: */ TIMESPEC_ADD(&curthread->wakeup_time, &ts, time_to_sleep); - THR_SCHED_LOCK(curthread, curthread); + THR_LOCK_SWITCH(curthread); curthread->interrupted = 0; THR_SET_STATE(curthread, PS_SLEEP_WAIT); - THR_SCHED_UNLOCK(curthread, curthread); /* Reschedule the current thread to sleep: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); /* Calculate the remaining time to sleep: */ KSE_GET_TOD(curthread->kse, &ts1); diff --git a/lib/libkse/thread/thr_priority_queue.c b/lib/libkse/thread/thr_priority_queue.c index 3f261f8..2822aa8 100644 --- a/lib/libkse/thread/thr_priority_queue.c +++ b/lib/libkse/thread/thr_priority_queue.c @@ -126,6 +126,7 @@ _pq_init(pq_queue_t *pq) /* Initialize the priority queue: */ TAILQ_INIT(&pq->pq_queue); pq->pq_flags = 0; + pq->pq_threads = 0; } return (ret); } @@ -151,10 +152,10 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread) * from the priority queue when _pq_first is called. */ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); - + pq->pq_threads--; /* This thread is now longer in the priority queue. */ pthread->flags &= ~THR_FLAGS_IN_RUNQ; - + PQ_CLEAR_ACTIVE(pq); } @@ -177,7 +178,7 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread) if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); - + pq->pq_threads++; /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; @@ -203,7 +204,7 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread) if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); - + pq->pq_threads++; /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h index fc2e4ee..4d43242 100644 --- a/lib/libkse/thread/thr_private.h +++ b/lib/libkse/thread/thr_private.h @@ -143,6 +143,7 @@ typedef struct pq_queue { int pq_size; /* number of priority lists */ #define PQF_ACTIVE 0x0001 int pq_flags; + int pq_threads; } pq_queue_t; /* @@ -197,6 +198,7 @@ struct kse { #define KF_STARTED 0x0001 /* kernel kse created */ #define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */ int k_waiting; + int k_idle; /* kse is idle */ int k_error; /* syscall errno in critical */ int k_cpu; /* CPU ID when bound */ int k_done; /* this KSE is done */ @@ -294,11 +296,15 @@ do { \ #define KSE_SET_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 1) -#define KSE_CLEAR_WAIT(kse) atomic_set_acq_int(&(kse)->k_waiting, 0) +#define KSE_CLEAR_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 0) #define KSE_WAITING(kse) (kse)->k_waiting != 0 #define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx) +#define KSE_SET_IDLE(kse) ((kse)->k_idle = 1) +#define KSE_CLEAR_IDLE(kse) ((kse)->k_idle = 0) +#define KSE_IS_IDLE(kse) ((kse)->k_idle != 0) + /* * TailQ initialization values. */ @@ -658,6 +664,7 @@ struct pthread { /* Thread state: */ enum pthread_state state; + int lock_switch; /* * Number of microseconds accumulated by this thread when @@ -803,8 +810,11 @@ struct pthread { #define THR_YIELD_CHECK(thrd) \ do { \ if (((thrd)->critical_yield != 0) && \ - !(THR_IN_CRITICAL(thrd))) \ + !(THR_IN_CRITICAL(thrd))) { \ + THR_LOCK_SWITCH(thrd); \ _thr_sched_switch(thrd); \ + THR_UNLOCK_SWITCH(thrd); \ + } \ else if (((thrd)->check_pending != 0) && \ !(THR_IN_CRITICAL(thrd))) \ _thr_sig_check_pending(thrd); \ @@ -828,15 +838,31 @@ do { \ _lock_release((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1]); \ (thrd)->locklevel--; \ - if ((thrd)->locklevel != 0) \ + if ((thrd)->lock_switch) \ ; \ - else if ((thrd)->critical_yield != 0) \ - _thr_sched_switch(thrd); \ - else if ((thrd)->check_pending != 0) \ - _thr_sig_check_pending(thrd); \ + else { \ + THR_YIELD_CHECK(thrd); \ + } \ } \ } while (0) +#define THR_LOCK_SWITCH(thrd) \ +do { \ + THR_ASSERT(!(thrd)->lock_switch, "context switch locked"); \ + _kse_critical_enter(); \ + KSE_SCHED_LOCK((thrd)->kse, (thrd)->kseg); \ + (thrd)->lock_switch = 1; \ +} while (0) + +#define THR_UNLOCK_SWITCH(thrd) \ +do { \ + THR_ASSERT((thrd)->lock_switch, "context switch not locked"); \ + THR_ASSERT(_kse_in_critical(), "Er,not in critical region"); \ + (thrd)->lock_switch = 0; \ + KSE_SCHED_UNLOCK((thrd)->kse, (thrd)->kseg); \ + _kse_critical_leave(&thrd->tmbx); \ +} while (0) + /* * For now, threads will have their own lock separate from their * KSE scheduling lock. @@ -907,8 +933,6 @@ do { \ KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \ (curthr)->locklevel--; \ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ - if ((curthr)->locklevel == 0) \ - THR_YIELD_CHECK(curthr); \ } while (0) #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ @@ -917,7 +941,9 @@ do { \ if (((thr)->critical_yield != 0) && \ ((thr)->critical_count == 0)) { \ (thr)->critical_yield = 0; \ + THR_LOCK_SWITCH(thr); \ _thr_sched_switch(thr); \ + THR_UNLOCK_SWITCH(thr); \ } \ } while (0) @@ -1092,6 +1118,8 @@ void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf) void _thr_seterrno(struct pthread *, int); void _thr_enter_cancellation_point(struct pthread *); void _thr_leave_cancellation_point(struct pthread *); +int _thr_setconcurrency(int new_level); +int _thr_setmaxconcurrency(void); /* XXX - Stuff that goes away when my sources get more up to date. */ /* #include <sys/kse.h> */ diff --git a/lib/libkse/thread/thr_sigsuspend.c b/lib/libkse/thread/thr_sigsuspend.c index 7ce027a..9ada1b2 100644 --- a/lib/libkse/thread/thr_sigsuspend.c +++ b/lib/libkse/thread/thr_sigsuspend.c @@ -53,13 +53,14 @@ _sigsuspend(const sigset_t *set) memcpy(&curthread->tmbx.tm_context.uc_sigmask, set, sizeof(sigset_t)); + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_SIGSUSPEND); - THR_SCHED_UNLOCK(curthread, curthread); - /* Wait for a signal: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); + /* Always return an interrupted error: */ errno = EINTR; diff --git a/lib/libkse/thread/thr_sigwait.c b/lib/libkse/thread/thr_sigwait.c index b955251..7b5a31f 100644 --- a/lib/libkse/thread/thr_sigwait.c +++ b/lib/libkse/thread/thr_sigwait.c @@ -134,11 +134,10 @@ _sigwait(const sigset_t *set, int *sig) curthread->data.sigwait = &waitset; /* Wait for a signal: */ - THR_SCHED_LOCK(curthread, curthread); + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_SIGWAIT); - THR_SCHED_UNLOCK(curthread, curthread); _thr_sched_switch(curthread); - + THR_UNLOCK_SWITCH(curthread); /* Return the signal number to the caller: */ *sig = curthread->signo; diff --git a/lib/libkse/thread/thr_spinlock.c b/lib/libkse/thread/thr_spinlock.c index cb71a46..56691dd 100644 --- a/lib/libkse/thread/thr_spinlock.c +++ b/lib/libkse/thread/thr_spinlock.c @@ -33,12 +33,8 @@ * */ -#include <stdlib.h> -#include <stdio.h> -#include <string.h> -#include <sched.h> -#include <pthread.h> -#include <unistd.h> +#include <sys/types.h> +#include <machine/atomic.h> #include <libc_private.h> #include "spinlock.h" @@ -52,7 +48,12 @@ void _spinunlock(spinlock_t *lck) { - lck->access_lock = 0; + kse_critical_t crit; + + crit = (kse_critical_t)lck->fname; + atomic_store_rel_long(&lck->access_lock, 0); + if (crit != NULL) + _kse_critical_leave(crit); } @@ -65,14 +66,21 @@ _spinunlock(spinlock_t *lck) void _spinlock(spinlock_t *lck) { + kse_critical_t crit; + /* * Try to grab the lock and loop if another thread grabs * it before we do. */ + if (_kse_isthreaded()) + crit = _kse_critical_enter(); + else + crit = NULL; while(_atomic_lock(&lck->access_lock)) { while (lck->access_lock) ; } + lck->fname = (char *)crit; } /* @@ -88,12 +96,5 @@ _spinlock(spinlock_t *lck) void _spinlock_debug(spinlock_t *lck, char *fname, int lineno) { - /* - * Try to grab the lock and loop if another thread grabs - * it before we do. - */ - while(_atomic_lock(&lck->access_lock)) { - while (lck->access_lock) - ; - } + _spinlock(lck); } diff --git a/lib/libkse/thread/thr_yield.c b/lib/libkse/thread/thr_yield.c index acaa3c5..dfe7278 100644 --- a/lib/libkse/thread/thr_yield.c +++ b/lib/libkse/thread/thr_yield.c @@ -46,8 +46,9 @@ _sched_yield(void) curthread->slice_usec = -1; /* Schedule the next thread: */ + THR_LOCK_SWITCH(curthread); _thr_sched_switch(curthread); - + THR_UNLOCK_SWITCH(curthread); /* Always return no error. */ return(0); } @@ -62,5 +63,7 @@ _pthread_yield(void) curthread->slice_usec = -1; /* Schedule the next thread: */ + THR_LOCK_SWITCH(curthread); _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); } diff --git a/lib/libpthread/arch/i386/i386/thr_switch.S b/lib/libpthread/arch/i386/i386/thr_switch.S index 4438b90..f8da6b6 100644 --- a/lib/libpthread/arch/i386/i386/thr_switch.S +++ b/lib/libpthread/arch/i386/i386/thr_switch.S @@ -103,9 +103,11 @@ ENTRY(_thread_switch) * all registers are now moved out of mailbox, * it's safe to set current thread pointer */ + cmpl $0, %ecx + je 8f movl %ebx,(%ecx) - popl %ecx /* %ecx off stack */ +8: popl %ecx /* %ecx off stack */ popl %ebx /* %ebx off stack */ popfl /* flags off stack */ -5: ret /* %eip off stack */ +9: ret /* %eip off stack */ diff --git a/lib/libpthread/thread/thr_cancel.c b/lib/libpthread/thread/thr_cancel.c index 23e0dfa..fc05b55 100644 --- a/lib/libpthread/thread/thr_cancel.c +++ b/lib/libpthread/thread/thr_cancel.c @@ -217,7 +217,6 @@ static void testcancel(struct pthread *curthread) { /* Take the scheduling lock while fiddling with the state: */ - THR_SCHED_LOCK(curthread, curthread); if (checkcancel(curthread) != 0) { /* Unlock before exiting: */ @@ -227,8 +226,6 @@ testcancel(struct pthread *curthread) pthread_exit(PTHREAD_CANCELED); PANIC("cancel"); } - - THR_SCHED_UNLOCK(curthread, curthread); } void @@ -236,23 +233,29 @@ _pthread_testcancel(void) { struct pthread *curthread = _get_curthread(); + THR_SCHED_LOCK(curthread, curthread); testcancel(curthread); + THR_SCHED_UNLOCK(curthread, curthread); } void _thr_enter_cancellation_point(struct pthread *thread) { /* Look for a cancellation before we block: */ + THR_SCHED_LOCK(thread, thread); testcancel(thread); thread->cancelflags |= THR_AT_CANCEL_POINT; + THR_SCHED_UNLOCK(thread, thread); } void _thr_leave_cancellation_point(struct pthread *thread) { + THR_SCHED_LOCK(thread, thread); thread->cancelflags &= ~THR_AT_CANCEL_POINT; /* Look for a cancellation after we unblock: */ testcancel(thread); + THR_SCHED_UNLOCK(thread, thread); } static void @@ -263,9 +266,12 @@ finish_cancellation(void *arg) curthread->continuation = NULL; curthread->interrupted = 0; + THR_SCHED_LOCK(curthread, curthread); if ((curthread->cancelflags & THR_CANCEL_NEEDED) != 0) { curthread->cancelflags &= ~THR_CANCEL_NEEDED; + THR_SCHED_UNLOCK(curthread, curthread); _thr_exit_cleanup(); pthread_exit(PTHREAD_CANCELED); } + THR_SCHED_UNLOCK(curthread, curthread); } diff --git a/lib/libpthread/thread/thr_concurrency.c b/lib/libpthread/thread/thr_concurrency.c index a4b055f..db84121 100644 --- a/lib/libpthread/thread/thr_concurrency.c +++ b/lib/libpthread/thread/thr_concurrency.c @@ -28,6 +28,8 @@ */ #include <errno.h> #include <pthread.h> +#include <sys/types.h> +#include <sys/sysctl.h> #include "thr_private.h" @@ -52,14 +54,8 @@ _pthread_getconcurrency(void) int _pthread_setconcurrency(int new_level) { - struct pthread *curthread; - struct kse *newkse; - kse_critical_t crit; - int kse_count; - int i; int ret; - if (new_level < 0) ret = EINVAL; else if (new_level == level) @@ -71,50 +67,72 @@ _pthread_setconcurrency(int new_level) DBG_MSG("Can't enable threading.\n"); ret = EAGAIN; } else { - ret = 0; - curthread = _get_curthread(); - /* Race condition, but so what. */ - kse_count = _kse_initial->k_kseg->kg_ksecount; - for (i = kse_count; i < new_level; i++) { - newkse = _kse_alloc(curthread); - if (newkse == NULL) { - DBG_MSG("Can't alloc new KSE.\n"); - ret = EAGAIN; - break; - } - newkse->k_kseg = _kse_initial->k_kseg; - newkse->k_schedq = _kse_initial->k_schedq; - newkse->k_curthread = NULL; - crit = _kse_critical_enter(); + ret = _thr_setconcurrency(new_level); + if (ret == 0) + level = new_level; + } + return (ret); +} + +int +_thr_setconcurrency(int new_level) +{ + struct pthread *curthread; + struct kse *newkse; + kse_critical_t crit; + int kse_count; + int i; + int ret; + + ret = 0; + curthread = _get_curthread(); + /* Race condition, but so what. */ + kse_count = _kse_initial->k_kseg->kg_ksecount; + for (i = kse_count; i < new_level; i++) { + newkse = _kse_alloc(curthread); + if (newkse == NULL) { + DBG_MSG("Can't alloc new KSE.\n"); + ret = EAGAIN; + break; + } + newkse->k_kseg = _kse_initial->k_kseg; + newkse->k_schedq = _kse_initial->k_schedq; + newkse->k_curthread = NULL; + crit = _kse_critical_enter(); + KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg); + TAILQ_INSERT_TAIL(&newkse->k_kseg->kg_kseq, + newkse, k_kgqe); + newkse->k_kseg->kg_ksecount++; + KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg); + if (kse_create(&newkse->k_mbx, 0) != 0) { KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg); - TAILQ_INSERT_TAIL(&newkse->k_kseg->kg_kseq, + TAILQ_REMOVE(&newkse->k_kseg->kg_kseq, newkse, k_kgqe); - newkse->k_kseg->kg_ksecount++; + newkse->k_kseg->kg_ksecount--; KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg); - if (_ksd_setprivate(&_kse_initial->k_ksd) != 0) { - /* This should never happen. */ - PANIC("pthread_setconcurrency: Unable to " - "set KSE specific data"); - } - newkse->k_flags |= KF_INITIALIZED; - if (kse_create(&newkse->k_mbx, 0) != 0) { - _ksd_setprivate(&curthread->kse->k_ksd); - KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg); - TAILQ_REMOVE(&newkse->k_kseg->kg_kseq, - newkse, k_kgqe); - newkse->k_kseg->kg_ksecount--; - KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg); - _kse_critical_leave(crit); - _kse_free(curthread, newkse); - DBG_MSG("kse_create syscall failed.\n"); - ret = EAGAIN; - break; - } - _ksd_setprivate(&curthread->kse->k_ksd); + _kse_critical_leave(crit); + _kse_free(curthread, newkse); + DBG_MSG("kse_create syscall failed.\n"); + ret = EAGAIN; + break; + } else { _kse_critical_leave(crit); } - if (ret == 0) - level = new_level; } return (ret); } + +int +_thr_setmaxconcurrency(void) +{ + int vcpu; + int len; + int ret; + + len = sizeof(vcpu); + ret = sysctlbyname("kern.threads.virtual_cpu", &vcpu, &len, NULL, NULL); + if (ret == 0 && vcpu > 0) + ret = _thr_setconcurrency(vcpu); + return (ret); +} + diff --git a/lib/libpthread/thread/thr_cond.c b/lib/libpthread/thread/thr_cond.c index 537c264..32a152d 100644 --- a/lib/libpthread/thread/thr_cond.c +++ b/lib/libpthread/thread/thr_cond.c @@ -267,6 +267,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) * lock); we should be able to safely * set the state. */ + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_COND_WAIT); /* Remember the CV: */ @@ -280,6 +281,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) _thr_sched_switch(curthread); curthread->data.cond = NULL; + THR_UNLOCK_SWITCH(curthread); /* * XXX - This really isn't a good check @@ -324,8 +326,10 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) * that the mutex be reaquired prior to * cancellation. */ - if (done != 0) + if (done != 0) { rval = _mutex_cv_lock(mutex); + unlock_mutex = 1; + } } } break; @@ -475,6 +479,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, * lock); we should be able to safely * set the state. */ + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_COND_WAIT); /* Remember the CV: */ @@ -488,6 +493,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, _thr_sched_switch(curthread); curthread->data.cond = NULL; + THR_UNLOCK_SWITCH(curthread); /* * XXX - This really isn't a good check diff --git a/lib/libpthread/thread/thr_create.c b/lib/libpthread/thread/thr_create.c index deb26de..4c65d3c 100644 --- a/lib/libpthread/thread/thr_create.c +++ b/lib/libpthread/thread/thr_create.c @@ -297,6 +297,13 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, * pair if necessary. */ ret = _thr_schedule_add(curthread, new_thread); + if (ret != 0) { + KSE_LOCK_ACQUIRE(curthread->kse, + &_thread_list_lock); + THR_LIST_REMOVE(new_thread); + KSE_LOCK_RELEASE(curthread->kse, + &_thread_list_lock); + } _kse_critical_leave(crit); if (ret != 0) free_thread(curthread, new_thread); diff --git a/lib/libpthread/thread/thr_detach.c b/lib/libpthread/thread/thr_detach.c index 30b7ce5..66db912 100644 --- a/lib/libpthread/thread/thr_detach.c +++ b/lib/libpthread/thread/thr_detach.c @@ -44,8 +44,6 @@ _pthread_detach(pthread_t pthread) { struct pthread *curthread = _get_curthread(); struct pthread *joiner; - kse_critical_t crit; - int dead; int rval = 0; /* Check for invalid calling parameters: */ @@ -56,7 +54,6 @@ _pthread_detach(pthread_t pthread) else if ((rval = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) { /* Return an error: */ - _thr_leave_cancellation_point(curthread); } /* Check if the thread is already detached: */ @@ -91,20 +88,11 @@ _pthread_detach(pthread_t pthread) } joiner = NULL; } - dead = (pthread->flags & THR_FLAGS_GC_SAFE) != 0; THR_SCHED_UNLOCK(curthread, pthread); - - if (dead != 0) { - crit = _kse_critical_enter(); - KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); - THR_GCLIST_ADD(pthread); - KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); - _kse_critical_leave(crit); - } _thr_ref_delete(curthread, pthread); /* See if there is a thread waiting in pthread_join(): */ - if (joiner != NULL) { + if (joiner != NULL && _thr_ref_add(curthread, joiner, 0) == 0) { /* Lock the joiner before fiddling with it. */ THR_SCHED_LOCK(curthread, joiner); if (joiner->join_status.thread == pthread) { @@ -118,6 +106,7 @@ _pthread_detach(pthread_t pthread) _thr_setrunnable_unlocked(joiner); } THR_SCHED_UNLOCK(curthread, joiner); + _thr_ref_delete(curthread, joiner); } } diff --git a/lib/libpthread/thread/thr_exit.c b/lib/libpthread/thread/thr_exit.c index 4a82b12..8435f43 100644 --- a/lib/libpthread/thread/thr_exit.c +++ b/lib/libpthread/thread/thr_exit.c @@ -104,7 +104,9 @@ _pthread_exit(void *status) * Flag this thread as exiting. Threads should now be prevented * from joining to this thread. */ + THR_SCHED_LOCK(curthread, curthread); curthread->flags |= THR_FLAGS_EXITING; + THR_SCHED_UNLOCK(curthread, curthread); /* Save the return value: */ curthread->ret = status; @@ -121,10 +123,11 @@ _pthread_exit(void *status) } /* This thread will never be re-scheduled. */ - THR_SCHED_LOCK(curthread, curthread); + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_DEAD); - THR_SCHED_UNLOCK(curthread, curthread); _thr_sched_switch(curthread); + /* Never reach! */ + THR_UNLOCK_SWITCH(curthread); /* This point should not be reached. */ PANIC("Dead thread has resumed"); diff --git a/lib/libpthread/thread/thr_find_thread.c b/lib/libpthread/thread/thr_find_thread.c index b5cae66..0c813db 100644 --- a/lib/libpthread/thread/thr_find_thread.c +++ b/lib/libpthread/thread/thr_find_thread.c @@ -85,6 +85,9 @@ _thr_ref_delete(struct pthread *curthread, struct pthread *thread) KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); thread->refcount--; curthread->critical_count--; + if ((thread->refcount == 0) && + (thread->flags & THR_FLAGS_GC_SAFE) != 0) + THR_GCLIST_ADD(thread); KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); } diff --git a/lib/libpthread/thread/thr_join.c b/lib/libpthread/thread/thr_join.c index 77e3a8c..7cbc192 100644 --- a/lib/libpthread/thread/thr_join.c +++ b/lib/libpthread/thread/thr_join.c @@ -70,16 +70,16 @@ _pthread_join(pthread_t pthread, void **thread_return) return (ESRCH); } + THR_SCHED_LOCK(curthread, pthread); /* Check if this thread has been detached: */ if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) { + THR_SCHED_UNLOCK(curthread, pthread); /* Remove the reference and return an error: */ _thr_ref_delete(curthread, pthread); ret = ESRCH; } else { /* Lock the target thread while checking its state. */ - THR_SCHED_LOCK(curthread, pthread); - if ((pthread->state == PS_DEAD) || - ((pthread->flags & THR_FLAGS_EXITING) != 0)) { + if (pthread->state == PS_DEAD) { if (thread_return != NULL) /* Return the thread's return value: */ *thread_return = pthread->ret; @@ -123,15 +123,13 @@ _pthread_join(pthread_t pthread, void **thread_return) THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); - THR_SCHED_LOCK(curthread, curthread); - if (curthread->join_status.thread == pthread) - THR_SET_STATE(curthread, PS_JOIN); - THR_SCHED_UNLOCK(curthread, curthread); - + THR_LOCK_SWITCH(curthread); while (curthread->join_status.thread == pthread) { + THR_SET_STATE(curthread, PS_JOIN); /* Schedule the next thread: */ _thr_sched_switch(curthread); } + THR_UNLOCK_SWITCH(curthread); /* * The thread return value and error are set by the diff --git a/lib/libpthread/thread/thr_kern.c b/lib/libpthread/thread/thr_kern.c index 63283f7..5762219 100644 --- a/lib/libpthread/thread/thr_kern.c +++ b/lib/libpthread/thread/thr_kern.c @@ -95,14 +95,7 @@ __FBSDID("$FreeBSD$"); _pq_remove(&(kse)->k_schedq->sq_runq, thrd) #define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq) -/* - * XXX - Remove when David commits kernel changes to support these. - */ -#ifndef KMF_NOUPCALL -#define KMF_NOUPCALL 0x01 -#define KMF_NOCOMPLETED 0x02 -#endif - +#define KSE_RUNQ_THREADS(kse) ((kse)->k_schedq->sq_runq.pq_threads) /* * We've got to keep track of everything that is allocated, not only @@ -140,11 +133,11 @@ static void kseg_free_unlocked(struct kse_group *kseg); static void kseg_init(struct kse_group *kseg); static void kseg_reinit(struct kse_group *kseg); static void kse_waitq_insert(struct pthread *thread); +static void kse_wakeup_multi(struct kse *curkse); +static void kse_wakeup_one(struct pthread *thread); static void thr_cleanup(struct kse *kse, struct pthread *curthread); -#ifdef NOT_YET static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp); -#endif static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, struct pthread_sigframe *psf); static int thr_timedout(struct pthread *thread, struct timespec *curtime); @@ -368,11 +361,13 @@ _kse_setthreaded(int threaded) _kse_initial->k_flags |= KF_STARTED; if (kse_create(&_kse_initial->k_mbx, 0) != 0) { _kse_initial->k_flags &= ~KF_STARTED; + __isthreaded = 0; /* may abort() */ DBG_MSG("kse_create failed\n"); return (-1); } KSE_SET_MBOX(_kse_initial, _thr_initial); + _thr_setmaxconcurrency(); } return (0); } @@ -466,16 +461,14 @@ _thr_lock_wait(struct lock *lock, struct lockuser *lu) while (_LCK_BUSY(lu) && count < 300) count++; while (_LCK_BUSY(lu)) { - THR_SCHED_LOCK(curthread, curthread); + THR_LOCK_SWITCH(curthread); if (_LCK_BUSY(lu)) { /* Wait for the lock: */ atomic_store_rel_int(&curthread->need_wakeup, 1); THR_SET_STATE(curthread, PS_LOCKWAIT); - THR_SCHED_UNLOCK(curthread, curthread); _thr_sched_switch(curthread); } - else - THR_SCHED_UNLOCK(curthread, curthread); + THR_UNLOCK_SWITCH(curthread); } } @@ -484,14 +477,26 @@ _thr_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct pthread *thread; struct pthread *curthread; + int unlock; curthread = _get_curthread(); thread = (struct pthread *)_LCK_GET_PRIVATE(lu); - THR_SCHED_LOCK(curthread, thread); + unlock = 0; + if (curthread->kseg == thread->kseg) { + /* Not already locked */ + if (curthread->lock_switch == 0) { + THR_SCHED_LOCK(curthread, thread); + unlock = 1; + } + } else { + THR_SCHED_LOCK(curthread, thread); + unlock = 1; + } _thr_setrunnable_unlocked(thread); atomic_store_rel_int(&thread->need_wakeup, 0); - THR_SCHED_UNLOCK(curthread, thread); + if (unlock) + THR_SCHED_UNLOCK(curthread, thread); } kse_critical_t @@ -541,12 +546,12 @@ void _thr_sched_switch(struct pthread *curthread) { struct pthread_sigframe psf; - kse_critical_t crit; struct kse *curkse; volatile int once = 0; /* We're in the scheduler, 5 by 5: */ - crit = _kse_critical_enter(); + THR_ASSERT(curthread->lock_switch, "lock_switch"); + THR_ASSERT(_kse_in_critical(), "not in critical region"); curkse = _get_curkse(); curthread->need_switchout = 1; /* The thread yielded on its own. */ @@ -568,7 +573,11 @@ _thr_sched_switch(struct pthread *curthread) */ if ((once == 0) && (!THR_IN_CRITICAL(curthread))) { once = 1; + curthread->critical_count++; + THR_UNLOCK_SWITCH(curthread); + curthread->critical_count--; thr_resume_check(curthread, &curthread->tmbx.tm_context, &psf); + THR_LOCK_SWITCH(curthread); } } @@ -734,14 +743,20 @@ kse_sched_multi(struct kse *curkse) KSE_CLEAR_WAIT(curkse); } - /* Lock the scheduling lock. */ - KSE_SCHED_LOCK(curkse, curkse->k_kseg); + curthread = curkse->k_curthread; + if (curthread == NULL || curthread->lock_switch == 0) { + /* + * curthread was preempted by upcall, it is not a volunteer + * context switch. Lock the scheduling lock. + */ + KSE_SCHED_LOCK(curkse, curkse->k_kseg); + } /* * If the current thread was completed in another KSE, then * it will be in the run queue. Don't mark it as being blocked. */ - if (((curthread = curkse->k_curthread) != NULL) && + if ((curthread != NULL) && ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) && (curthread->need_switchout == 0)) { /* @@ -774,17 +789,22 @@ kse_sched_multi(struct kse *curkse) * Resume the thread and tell it to yield when * it leaves the critical region. */ - curthread->critical_yield = 0; + curthread->critical_yield = 1; curthread->active = 1; if ((curthread->flags & THR_FLAGS_IN_RUNQ) != 0) KSE_RUNQ_REMOVE(curkse, curthread); curkse->k_curthread = curthread; curthread->kse = curkse; - KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); DBG_MSG("Continuing thread %p in critical region\n", curthread); - ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); + if (curthread->lock_switch) { + KSE_SCHED_LOCK(curkse, curkse->k_kseg); + ret = _thread_switch(&curthread->tmbx, 0); + } else { + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); + ret = _thread_switch(&curthread->tmbx, + &curkse->k_mbx.km_curthread); + } if (ret != 0) PANIC("Can't resume thread in critical region\n"); } @@ -792,6 +812,8 @@ kse_sched_multi(struct kse *curkse) kse_switchout_thread(curkse, curthread); curkse->k_curthread = NULL; + kse_wakeup_multi(curkse); + /* This has to be done without the scheduling lock held. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_check_signals(curkse); @@ -868,6 +890,8 @@ kse_sched_multi(struct kse *curkse) curframe = curthread->curframe; curthread->curframe = NULL; + kse_wakeup_multi(curkse); + /* Unlock the scheduling queue: */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); @@ -881,10 +905,13 @@ kse_sched_multi(struct kse *curkse) #ifdef NOT_YET if ((curframe == NULL) && ((curthread->check_pending != 0) || (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && - ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) { + ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) + signalcontext(&curthread->tmbx.tm_context, 0, + (__sighandler_t *)thr_resume_wrapper); +#else + if ((curframe == NULL) && (curthread->check_pending != 0)) signalcontext(&curthread->tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); - } #endif #ifdef GS_HACK /* XXX - The kernel sometimes forgets to restore %gs properly. */ @@ -893,7 +920,13 @@ kse_sched_multi(struct kse *curkse) /* * Continue the thread at its current frame: */ - ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); + if (curthread->lock_switch) { + KSE_SCHED_LOCK(curkse, curkse->k_kseg); + ret = _thread_switch(&curthread->tmbx, 0); + } else { + ret = _thread_switch(&curthread->tmbx, + &curkse->k_mbx.km_curthread); + } if (ret != 0) PANIC("Thread has returned from _thread_switch"); @@ -932,7 +965,6 @@ kse_check_signals(struct kse *curkse) } } -#ifdef NOT_YET static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp) { @@ -940,7 +972,6 @@ thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp) thr_resume_check(curthread, ucp, NULL); } -#endif static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, @@ -960,9 +991,11 @@ thr_resume_check(struct pthread *curthread, ucontext_t *ucp, _thr_sig_rundown(curthread, ucp, psf); } +#ifdef NOT_YET if (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) pthread_testcancel(); +#endif } /* @@ -1071,6 +1104,7 @@ _thr_gc(struct pthread *curthread) * referenced. It is safe to remove all * remnants of the thread. */ + THR_LIST_REMOVE(td); TAILQ_INSERT_HEAD(&worklist, td, gcle); } } @@ -1172,11 +1206,11 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) if (need_start != 0) kse_create(&newthread->kse->k_mbx, 0); else if ((newthread->state == PS_RUNNING) && - KSE_WAITING(newthread->kse)) { + KSE_IS_IDLE(newthread->kse)) { /* * The thread is being scheduled on another KSEG. */ - KSE_WAKEUP(newthread->kse); + kse_wakeup_one(newthread); } ret = 0; } @@ -1326,6 +1360,8 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) */ DBG_MSG("Switching out thread %p, state %d\n", thread, thread->state); if (thread->blocked != 0) { + thread->active = 0; + thread->need_switchout = 0; /* This thread must have blocked in the kernel. */ /* thread->slice_usec = -1;*/ /* restart timeslice */ /* @@ -1346,6 +1382,8 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) * stack. It is safe to do garbage collecting * here. */ + thread->active = 0; + thread->need_switchout = 0; thr_cleanup(kse, thread); return; break; @@ -1456,14 +1494,18 @@ kse_wait(struct kse *kse, struct pthread *td_wait) } /* Don't sleep for negative times. */ if ((ts_sleep.tv_sec >= 0) && (ts_sleep.tv_nsec >= 0)) { - KSE_SET_WAIT(kse); + KSE_SET_IDLE(kse); + kse->k_kseg->kg_idle_kses++; KSE_SCHED_UNLOCK(kse, kse->k_kseg); saved_flags = kse->k_mbx.km_flags; kse->k_mbx.km_flags |= KMF_NOUPCALL; kse_release(&ts_sleep); kse->k_mbx.km_flags = saved_flags; - KSE_CLEAR_WAIT(kse); KSE_SCHED_LOCK(kse, kse->k_kseg); + if (KSE_IS_IDLE(kse)) { + KSE_CLEAR_IDLE(kse); + kse->k_kseg->kg_idle_kses--; + } } } @@ -1495,11 +1537,13 @@ kse_fini(struct kse *kse) * Add this KSE to the list of free KSEs along with * the KSEG if is now orphaned. */ +#ifdef NOT_YET KSE_LOCK_ACQUIRE(kse, &kse_lock); if (free_kseg != NULL) kseg_free_unlocked(free_kseg); kse_free_unlocked(kse); KSE_LOCK_RELEASE(kse, &kse_lock); +#endif kse_exit(); /* Never returns. */ } else { @@ -1620,7 +1664,7 @@ _thr_setrunnable_unlocked(struct pthread *thread) if ((thread->kseg->kg_flags & KGF_SINGLE_THREAD) != 0) /* No silly queues for these threads. */ THR_SET_STATE(thread, PS_RUNNING); - else { + else if (thread->state != PS_RUNNING) { if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0) KSE_WAITQ_REMOVE(thread->kse, thread); THR_SET_STATE(thread, PS_RUNNING); @@ -1641,8 +1685,47 @@ _thr_setrunnable_unlocked(struct pthread *thread) * (the KSE). If the KSE wakes up and doesn't find any more * work it will again go back to waiting so no harm is done. */ - if (KSE_WAITING(thread->kse)) + kse_wakeup_one(thread); +} + +static void +kse_wakeup_one(struct pthread *thread) +{ + struct kse *ke; + + if (KSE_IS_IDLE(thread->kse)) { + KSE_CLEAR_IDLE(thread->kse); + thread->kseg->kg_idle_kses--; KSE_WAKEUP(thread->kse); + } else { + TAILQ_FOREACH(ke, &thread->kseg->kg_kseq, k_kgqe) { + if (KSE_IS_IDLE(ke)) { + KSE_CLEAR_IDLE(ke); + ke->k_kseg->kg_idle_kses--; + KSE_WAKEUP(ke); + return; + } + } + } +} + +static void +kse_wakeup_multi(struct kse *curkse) +{ + struct kse *ke; + int tmp; + + if ((tmp = KSE_RUNQ_THREADS(curkse)) && curkse->k_kseg->kg_idle_kses) { + TAILQ_FOREACH(ke, &curkse->k_kseg->kg_kseq, k_kgqe) { + if (KSE_IS_IDLE(ke)) { + KSE_CLEAR_IDLE(ke); + ke->k_kseg->kg_idle_kses--; + KSE_WAKEUP(ke); + if (--tmp == 0) + break; + } + } + } } struct pthread * @@ -1876,7 +1959,7 @@ kse_reinit(struct kse *kse) sigemptyset(&kse->k_sigmask); bzero(&kse->k_sigq, sizeof(kse->k_sigq)); kse->k_check_sigq = 0; - kse->k_flags = KF_INITIALIZED; + kse->k_flags = 0; kse->k_waiting = 0; kse->k_error = 0; kse->k_cpu = 0; @@ -1948,6 +2031,7 @@ _thr_alloc(struct pthread *curthread) free_thread_count--; } KSE_LOCK_RELEASE(curthread->kse, &thread_lock); + _kse_critical_leave(crit); } } if (thread == NULL) @@ -1971,9 +2055,6 @@ _thr_free(struct pthread *curthread, struct pthread *thread) } else { crit = _kse_critical_enter(); - KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); - THR_LIST_REMOVE(thread); - KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock); TAILQ_INSERT_HEAD(&free_threadq, thread, tle); free_thread_count++; diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c index 1ae12ea9..bb97db1 100644 --- a/lib/libpthread/thread/thr_mutex.c +++ b/lib/libpthread/thread/thr_mutex.c @@ -325,6 +325,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; + THR_SCHED_LOCK(curthread, curthread); /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -335,6 +336,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; + THR_SCHED_UNLOCK(curthread, curthread); /* Add to the list of owned mutexes: */ MUTEX_ASSERT_NOT_OWNED(*mutex); @@ -358,6 +360,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; + THR_SCHED_LOCK(curthread, curthread); /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -371,7 +374,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; - + THR_SCHED_UNLOCK(curthread, curthread); /* Add to the list of owned mutexes: */ MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, @@ -503,6 +506,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * region (holding the mutex lock); we should * be able to safely set the state. */ + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); /* Unlock the mutex structure: */ @@ -510,6 +514,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) /* Schedule the next thread: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); } break; @@ -520,6 +525,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) /* Lock the mutex for this thread: */ (*m)->m_owner = curthread; + THR_SCHED_LOCK(curthread, curthread); /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -529,7 +535,6 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * Make sure the thread's scheduling lock is * held while priorities are adjusted. */ - THR_SCHED_LOCK(curthread, curthread); (*m)->m_prio = curthread->active_priority; (*m)->m_saved_prio = curthread->inherited_priority; @@ -561,17 +566,18 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * region (holding the mutex lock); we should * be able to safely set the state. */ - THR_SET_STATE(curthread, PS_MUTEX_WAIT); - if (curthread->active_priority > (*m)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(curthread, *m); + THR_LOCK_SWITCH(curthread); + THR_SET_STATE(curthread, PS_MUTEX_WAIT); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); } break; @@ -591,6 +597,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) */ (*m)->m_owner = curthread; + THR_SCHED_LOCK(curthread, curthread); /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -601,7 +608,6 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * scheduling lock is held while priorities * are adjusted. */ - THR_SCHED_LOCK(curthread, curthread); curthread->active_priority = (*m)->m_prio; (*m)->m_saved_prio = curthread->inherited_priority; @@ -636,6 +642,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * region (holding the mutex lock); we should * be able to safely set the state. */ + + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); /* Unlock the mutex structure: */ @@ -643,7 +651,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) /* Schedule the next thread: */ _thr_sched_switch(curthread); - + THR_UNLOCK_SWITCH(curthread); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling @@ -749,9 +757,15 @@ _mutex_cv_unlock(pthread_mutex_t *m) int _mutex_cv_lock(pthread_mutex_t *m) { + struct pthread *curthread; int ret; - if ((ret = _pthread_mutex_lock(m)) == 0) + + curthread = _get_curthread(); + if ((ret = _pthread_mutex_lock(m)) == 0) { + THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); (*m)->m_refcount--; + THR_LOCK_RELEASE(curthread, &(*m)->m_lock); + } return (ret); } @@ -807,6 +821,8 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m) * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ + + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_DEADLOCK); /* Unlock the mutex structure: */ @@ -814,6 +830,7 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m) /* Schedule the next thread: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); break; case PTHREAD_MUTEX_RECURSIVE: @@ -917,12 +934,12 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference) curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); - THR_SCHED_UNLOCK(curthread, curthread); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; + THR_SCHED_UNLOCK(curthread, curthread); /* Remove the mutex from the threads queue. */ MUTEX_ASSERT_IS_OWNED(*m); @@ -974,12 +991,12 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference) curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); - THR_SCHED_UNLOCK(curthread, curthread); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; + THR_SCHED_UNLOCK(curthread, curthread); /* Remove the mutex from the threads queue. */ MUTEX_ASSERT_IS_OWNED(*m); diff --git a/lib/libpthread/thread/thr_nanosleep.c b/lib/libpthread/thread/thr_nanosleep.c index bec3b66..e83f4f4 100644 --- a/lib/libpthread/thread/thr_nanosleep.c +++ b/lib/libpthread/thread/thr_nanosleep.c @@ -60,14 +60,14 @@ _nanosleep(const struct timespec *time_to_sleep, /* Calculate the time for the current thread to wake up: */ TIMESPEC_ADD(&curthread->wakeup_time, &ts, time_to_sleep); - THR_SCHED_LOCK(curthread, curthread); + THR_LOCK_SWITCH(curthread); curthread->interrupted = 0; THR_SET_STATE(curthread, PS_SLEEP_WAIT); - THR_SCHED_UNLOCK(curthread, curthread); /* Reschedule the current thread to sleep: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); /* Calculate the remaining time to sleep: */ KSE_GET_TOD(curthread->kse, &ts1); diff --git a/lib/libpthread/thread/thr_priority_queue.c b/lib/libpthread/thread/thr_priority_queue.c index 3f261f8..2822aa8 100644 --- a/lib/libpthread/thread/thr_priority_queue.c +++ b/lib/libpthread/thread/thr_priority_queue.c @@ -126,6 +126,7 @@ _pq_init(pq_queue_t *pq) /* Initialize the priority queue: */ TAILQ_INIT(&pq->pq_queue); pq->pq_flags = 0; + pq->pq_threads = 0; } return (ret); } @@ -151,10 +152,10 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread) * from the priority queue when _pq_first is called. */ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); - + pq->pq_threads--; /* This thread is now longer in the priority queue. */ pthread->flags &= ~THR_FLAGS_IN_RUNQ; - + PQ_CLEAR_ACTIVE(pq); } @@ -177,7 +178,7 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread) if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); - + pq->pq_threads++; /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; @@ -203,7 +204,7 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread) if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); - + pq->pq_threads++; /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h index fc2e4ee..4d43242 100644 --- a/lib/libpthread/thread/thr_private.h +++ b/lib/libpthread/thread/thr_private.h @@ -143,6 +143,7 @@ typedef struct pq_queue { int pq_size; /* number of priority lists */ #define PQF_ACTIVE 0x0001 int pq_flags; + int pq_threads; } pq_queue_t; /* @@ -197,6 +198,7 @@ struct kse { #define KF_STARTED 0x0001 /* kernel kse created */ #define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */ int k_waiting; + int k_idle; /* kse is idle */ int k_error; /* syscall errno in critical */ int k_cpu; /* CPU ID when bound */ int k_done; /* this KSE is done */ @@ -294,11 +296,15 @@ do { \ #define KSE_SET_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 1) -#define KSE_CLEAR_WAIT(kse) atomic_set_acq_int(&(kse)->k_waiting, 0) +#define KSE_CLEAR_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 0) #define KSE_WAITING(kse) (kse)->k_waiting != 0 #define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx) +#define KSE_SET_IDLE(kse) ((kse)->k_idle = 1) +#define KSE_CLEAR_IDLE(kse) ((kse)->k_idle = 0) +#define KSE_IS_IDLE(kse) ((kse)->k_idle != 0) + /* * TailQ initialization values. */ @@ -658,6 +664,7 @@ struct pthread { /* Thread state: */ enum pthread_state state; + int lock_switch; /* * Number of microseconds accumulated by this thread when @@ -803,8 +810,11 @@ struct pthread { #define THR_YIELD_CHECK(thrd) \ do { \ if (((thrd)->critical_yield != 0) && \ - !(THR_IN_CRITICAL(thrd))) \ + !(THR_IN_CRITICAL(thrd))) { \ + THR_LOCK_SWITCH(thrd); \ _thr_sched_switch(thrd); \ + THR_UNLOCK_SWITCH(thrd); \ + } \ else if (((thrd)->check_pending != 0) && \ !(THR_IN_CRITICAL(thrd))) \ _thr_sig_check_pending(thrd); \ @@ -828,15 +838,31 @@ do { \ _lock_release((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1]); \ (thrd)->locklevel--; \ - if ((thrd)->locklevel != 0) \ + if ((thrd)->lock_switch) \ ; \ - else if ((thrd)->critical_yield != 0) \ - _thr_sched_switch(thrd); \ - else if ((thrd)->check_pending != 0) \ - _thr_sig_check_pending(thrd); \ + else { \ + THR_YIELD_CHECK(thrd); \ + } \ } \ } while (0) +#define THR_LOCK_SWITCH(thrd) \ +do { \ + THR_ASSERT(!(thrd)->lock_switch, "context switch locked"); \ + _kse_critical_enter(); \ + KSE_SCHED_LOCK((thrd)->kse, (thrd)->kseg); \ + (thrd)->lock_switch = 1; \ +} while (0) + +#define THR_UNLOCK_SWITCH(thrd) \ +do { \ + THR_ASSERT((thrd)->lock_switch, "context switch not locked"); \ + THR_ASSERT(_kse_in_critical(), "Er,not in critical region"); \ + (thrd)->lock_switch = 0; \ + KSE_SCHED_UNLOCK((thrd)->kse, (thrd)->kseg); \ + _kse_critical_leave(&thrd->tmbx); \ +} while (0) + /* * For now, threads will have their own lock separate from their * KSE scheduling lock. @@ -907,8 +933,6 @@ do { \ KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \ (curthr)->locklevel--; \ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ - if ((curthr)->locklevel == 0) \ - THR_YIELD_CHECK(curthr); \ } while (0) #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ @@ -917,7 +941,9 @@ do { \ if (((thr)->critical_yield != 0) && \ ((thr)->critical_count == 0)) { \ (thr)->critical_yield = 0; \ + THR_LOCK_SWITCH(thr); \ _thr_sched_switch(thr); \ + THR_UNLOCK_SWITCH(thr); \ } \ } while (0) @@ -1092,6 +1118,8 @@ void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf) void _thr_seterrno(struct pthread *, int); void _thr_enter_cancellation_point(struct pthread *); void _thr_leave_cancellation_point(struct pthread *); +int _thr_setconcurrency(int new_level); +int _thr_setmaxconcurrency(void); /* XXX - Stuff that goes away when my sources get more up to date. */ /* #include <sys/kse.h> */ diff --git a/lib/libpthread/thread/thr_sigsuspend.c b/lib/libpthread/thread/thr_sigsuspend.c index 7ce027a..9ada1b2 100644 --- a/lib/libpthread/thread/thr_sigsuspend.c +++ b/lib/libpthread/thread/thr_sigsuspend.c @@ -53,13 +53,14 @@ _sigsuspend(const sigset_t *set) memcpy(&curthread->tmbx.tm_context.uc_sigmask, set, sizeof(sigset_t)); + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_SIGSUSPEND); - THR_SCHED_UNLOCK(curthread, curthread); - /* Wait for a signal: */ _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); + /* Always return an interrupted error: */ errno = EINTR; diff --git a/lib/libpthread/thread/thr_sigwait.c b/lib/libpthread/thread/thr_sigwait.c index b955251..7b5a31f 100644 --- a/lib/libpthread/thread/thr_sigwait.c +++ b/lib/libpthread/thread/thr_sigwait.c @@ -134,11 +134,10 @@ _sigwait(const sigset_t *set, int *sig) curthread->data.sigwait = &waitset; /* Wait for a signal: */ - THR_SCHED_LOCK(curthread, curthread); + THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_SIGWAIT); - THR_SCHED_UNLOCK(curthread, curthread); _thr_sched_switch(curthread); - + THR_UNLOCK_SWITCH(curthread); /* Return the signal number to the caller: */ *sig = curthread->signo; diff --git a/lib/libpthread/thread/thr_spinlock.c b/lib/libpthread/thread/thr_spinlock.c index cb71a46..56691dd 100644 --- a/lib/libpthread/thread/thr_spinlock.c +++ b/lib/libpthread/thread/thr_spinlock.c @@ -33,12 +33,8 @@ * */ -#include <stdlib.h> -#include <stdio.h> -#include <string.h> -#include <sched.h> -#include <pthread.h> -#include <unistd.h> +#include <sys/types.h> +#include <machine/atomic.h> #include <libc_private.h> #include "spinlock.h" @@ -52,7 +48,12 @@ void _spinunlock(spinlock_t *lck) { - lck->access_lock = 0; + kse_critical_t crit; + + crit = (kse_critical_t)lck->fname; + atomic_store_rel_long(&lck->access_lock, 0); + if (crit != NULL) + _kse_critical_leave(crit); } @@ -65,14 +66,21 @@ _spinunlock(spinlock_t *lck) void _spinlock(spinlock_t *lck) { + kse_critical_t crit; + /* * Try to grab the lock and loop if another thread grabs * it before we do. */ + if (_kse_isthreaded()) + crit = _kse_critical_enter(); + else + crit = NULL; while(_atomic_lock(&lck->access_lock)) { while (lck->access_lock) ; } + lck->fname = (char *)crit; } /* @@ -88,12 +96,5 @@ _spinlock(spinlock_t *lck) void _spinlock_debug(spinlock_t *lck, char *fname, int lineno) { - /* - * Try to grab the lock and loop if another thread grabs - * it before we do. - */ - while(_atomic_lock(&lck->access_lock)) { - while (lck->access_lock) - ; - } + _spinlock(lck); } diff --git a/lib/libpthread/thread/thr_yield.c b/lib/libpthread/thread/thr_yield.c index acaa3c5..dfe7278 100644 --- a/lib/libpthread/thread/thr_yield.c +++ b/lib/libpthread/thread/thr_yield.c @@ -46,8 +46,9 @@ _sched_yield(void) curthread->slice_usec = -1; /* Schedule the next thread: */ + THR_LOCK_SWITCH(curthread); _thr_sched_switch(curthread); - + THR_UNLOCK_SWITCH(curthread); /* Always return no error. */ return(0); } @@ -62,5 +63,7 @@ _pthread_yield(void) curthread->slice_usec = -1; /* Schedule the next thread: */ + THR_LOCK_SWITCH(curthread); _thr_sched_switch(curthread); + THR_UNLOCK_SWITCH(curthread); } |