diff options
author | deischen <deischen@FreeBSD.org> | 2003-04-28 23:56:12 +0000 |
---|---|---|
committer | deischen <deischen@FreeBSD.org> | 2003-04-28 23:56:12 +0000 |
commit | 6deabb439b7870759f16db0c9a3968e3755bf61a (patch) | |
tree | 90f4ce3660289c401aab5d0a2c4f105e5aa3e877 /lib/libpthread/thread/thr_cancel.c | |
parent | 0fbdf3450d750b66e54d688d645fc4bafd8739a1 (diff) | |
download | FreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.zip FreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.tar.gz |
o Don't add a scope system thread's KSE to the list of available
KSEs when it's thread exits; allow the GC handler to do that.
o Make spinlock/spinlock critical regions.
The following were submitted by davidxu
o Alow thr_switch() to take a null mailbox argument.
o Better protect cancellation checks.
o Don't set KSE specific data when creating new KSEs; rely on the
first upcall of the KSE to set it.
o Add the ability to set the maximum concurrency level and do this
automatically. We should have a way to enable/disable this with
some sort of tunable because some applications may not want this
to be the default.
o Hold the scheduling lock across thread switch calls.
o If scheduling of a thread fails, make sure to remove it from the list
of active threads.
o Better protect accesses to a joining threads when the target thread is
exited and detached.
o Remove some macro definitions that are now provided by <sys/kse.h>.
o Don't leave the library in threaded mode if creation of the initial
KSE fails.
o Wakeup idle KSEs when there are threads ready to run.
o Maintain the number of threads active in the priority queue.
Diffstat (limited to 'lib/libpthread/thread/thr_cancel.c')
-rw-r--r-- | lib/libpthread/thread/thr_cancel.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/lib/libpthread/thread/thr_cancel.c b/lib/libpthread/thread/thr_cancel.c index 23e0dfa..fc05b55 100644 --- a/lib/libpthread/thread/thr_cancel.c +++ b/lib/libpthread/thread/thr_cancel.c @@ -217,7 +217,6 @@ static void testcancel(struct pthread *curthread) { /* Take the scheduling lock while fiddling with the state: */ - THR_SCHED_LOCK(curthread, curthread); if (checkcancel(curthread) != 0) { /* Unlock before exiting: */ @@ -227,8 +226,6 @@ testcancel(struct pthread *curthread) pthread_exit(PTHREAD_CANCELED); PANIC("cancel"); } - - THR_SCHED_UNLOCK(curthread, curthread); } void @@ -236,23 +233,29 @@ _pthread_testcancel(void) { struct pthread *curthread = _get_curthread(); + THR_SCHED_LOCK(curthread, curthread); testcancel(curthread); + THR_SCHED_UNLOCK(curthread, curthread); } void _thr_enter_cancellation_point(struct pthread *thread) { /* Look for a cancellation before we block: */ + THR_SCHED_LOCK(thread, thread); testcancel(thread); thread->cancelflags |= THR_AT_CANCEL_POINT; + THR_SCHED_UNLOCK(thread, thread); } void _thr_leave_cancellation_point(struct pthread *thread) { + THR_SCHED_LOCK(thread, thread); thread->cancelflags &= ~THR_AT_CANCEL_POINT; /* Look for a cancellation after we unblock: */ testcancel(thread); + THR_SCHED_UNLOCK(thread, thread); } static void @@ -263,9 +266,12 @@ finish_cancellation(void *arg) curthread->continuation = NULL; curthread->interrupted = 0; + THR_SCHED_LOCK(curthread, curthread); if ((curthread->cancelflags & THR_CANCEL_NEEDED) != 0) { curthread->cancelflags &= ~THR_CANCEL_NEEDED; + THR_SCHED_UNLOCK(curthread, curthread); _thr_exit_cleanup(); pthread_exit(PTHREAD_CANCELED); } + THR_SCHED_UNLOCK(curthread, curthread); } |