diff options
28 files changed, 686 insertions, 628 deletions
diff --git a/lib/libkse/sys/lock.c b/lib/libkse/sys/lock.c index 12ce1a0..d005baa 100644 --- a/lib/libkse/sys/lock.c +++ b/lib/libkse/sys/lock.c @@ -65,7 +65,7 @@ _lock_init(struct lock *lck, enum lock_type ltype, lck->l_head->lr_watcher = NULL; lck->l_head->lr_owner = NULL; lck->l_head->lr_waiting = 0; - lck->l_head->lr_handshake = 0; + lck->l_head->lr_active = 1; lck->l_tail = lck->l_head; } return (0); @@ -85,7 +85,7 @@ _lockuser_init(struct lockuser *lu, void *priv) lu->lu_myreq->lr_watcher = NULL; lu->lu_myreq->lr_owner = lu; lu->lu_myreq->lr_waiting = 0; - lu->lu_myreq->lr_handshake = 0; + lu->lu_myreq->lr_active = 0; lu->lu_watchreq = NULL; lu->lu_priority = 0; lu->lu_private = priv; @@ -166,19 +166,16 @@ _lock_acquire(struct lock *lck, struct lockuser *lu, int prio) for (i = 0; i < MAX_SPINS; i++) { if (lu->lu_watchreq->lr_locked == 0) return; + if (lu->lu_watchreq->lr_active == 0) + break; } atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1); while (lu->lu_watchreq->lr_locked != 0) lck->l_wait(lck, lu); atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0); - /* - * Wait for original owner to stop accessing the - * lockreq object. - */ - while (lu->lu_watchreq->lr_handshake) - ; } } + lu->lu_myreq->lr_active = 1; } /* @@ -240,24 +237,21 @@ _lock_release(struct lock *lck, struct lockuser *lu) } } if (lu_h != NULL) { - lu_h->lu_watchreq->lr_handshake = 1; /* Give the lock to the highest priority user. */ - atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0); if ((lu_h->lu_watchreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher); - atomic_store_rel_long(&lu_h->lu_watchreq->lr_handshake, - 0); + else + atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0); } else { - myreq->lr_handshake = 1; - /* Give the lock to the previous request. */ - atomic_store_rel_long(&myreq->lr_locked, 0); if ((myreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, myreq->lr_watcher); - atomic_store_rel_long(&myreq->lr_handshake, 0); + else + /* Give the lock to the previous request. */ + atomic_store_rel_long(&myreq->lr_locked, 0); } } else { /* @@ -270,19 +264,25 @@ _lock_release(struct lock *lck, struct lockuser *lu) lu->lu_watchreq = NULL; lu->lu_myreq->lr_locked = 1; lu->lu_myreq->lr_waiting = 0; - if (lck->l_wakeup) { - /* Start wakeup */ - myreq->lr_handshake = 1; + if (myreq->lr_waiting != 0 && lck->l_wakeup) + /* Notify the sleeper */ + lck->l_wakeup(lck, myreq->lr_watcher); + else /* Give the lock to the previous request. */ atomic_store_rel_long(&myreq->lr_locked, 0); - if (myreq->lr_waiting != 0) { - /* Notify the sleeper */ - lck->l_wakeup(lck, myreq->lr_watcher); - } - /* Stop wakeup */ - atomic_store_rel_long(&myreq->lr_handshake, 0); - } else { - atomic_store_rel_long(&myreq->lr_locked, 0); - } } + lu->lu_myreq->lr_active = 0; } + +void +_lock_grant(struct lock *lck /* unused */, struct lockuser *lu) +{ + atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 0); +} + +void +_lockuser_setactive(struct lockuser *lu, int active) +{ + lu->lu_myreq->lr_active = active; +} + diff --git a/lib/libkse/sys/lock.h b/lib/libkse/sys/lock.h index e397111..6fa23e2 100644 --- a/lib/libkse/sys/lock.h +++ b/lib/libkse/sys/lock.h @@ -55,7 +55,7 @@ struct lockreq { struct lockuser *lr_watcher; /* only used for priority locks */ struct lockuser *lr_owner; /* only used for priority locks */ long lr_waiting; /* non-zero when wakeup needed */ - volatile long lr_handshake; /* non-zero when wakeup in progress */ + volatile int lr_active; /* non-zero if the lock is last lock for thread */ }; struct lockuser { @@ -72,6 +72,7 @@ struct lockuser { #define _LCK_REQUEST_INITIALIZER { 0, NULL, NULL, 0 } #define _LCK_BUSY(lu) ((lu)->lu_watchreq->lr_locked != 0) +#define _LCK_ACTIVE(lu) ((lu)->lu_watchreq->lr_active != 0) #define _LCK_GRANTED(lu) ((lu)->lu_watchreq->lr_locked == 0) #define _LCK_SET_PRIVATE(lu, p) (lu)->lu_private = (void *)(p) @@ -84,7 +85,9 @@ int _lock_init(struct lock *, enum lock_type, lock_handler_t *, lock_handler_t *); int _lockuser_init(struct lockuser *lu, void *priv); void _lockuser_destroy(struct lockuser *lu); +void _lockuser_setactive(struct lockuser *lu, int active); void _lock_acquire(struct lock *, struct lockuser *, int); void _lock_release(struct lock *, struct lockuser *); +void _lock_grant(struct lock *, struct lockuser *); #endif diff --git a/lib/libkse/thread/thr_cond.c b/lib/libkse/thread/thr_cond.c index 12c14db..1b0325f 100644 --- a/lib/libkse/thread/thr_cond.c +++ b/lib/libkse/thread/thr_cond.c @@ -267,11 +267,12 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) * lock); we should be able to safely * set the state. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_COND_WAIT); /* Remember the CV: */ curthread->data.cond = *cond; + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the CV structure: */ THR_LOCK_RELEASE(curthread, @@ -281,7 +282,6 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) _thr_sched_switch(curthread); curthread->data.cond = NULL; - THR_UNLOCK_SWITCH(curthread); /* * XXX - This really isn't a good check @@ -479,11 +479,12 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, * lock); we should be able to safely * set the state. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_COND_WAIT); /* Remember the CV: */ curthread->data.cond = *cond; + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the CV structure: */ THR_LOCK_RELEASE(curthread, @@ -493,7 +494,6 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, _thr_sched_switch(curthread); curthread->data.cond = NULL; - THR_UNLOCK_SWITCH(curthread); /* * XXX - This really isn't a good check diff --git a/lib/libkse/thread/thr_exit.c b/lib/libkse/thread/thr_exit.c index 8435f43..22f187b 100644 --- a/lib/libkse/thread/thr_exit.c +++ b/lib/libkse/thread/thr_exit.c @@ -125,9 +125,8 @@ _pthread_exit(void *status) /* This thread will never be re-scheduled. */ THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_DEAD); - _thr_sched_switch(curthread); + _thr_sched_switch_unlocked(curthread); /* Never reach! */ - THR_UNLOCK_SWITCH(curthread); /* This point should not be reached. */ PANIC("Dead thread has resumed"); diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c index f06df6c..5bfb3c7 100644 --- a/lib/libkse/thread/thr_init.c +++ b/lib/libkse/thread/thr_init.c @@ -72,6 +72,7 @@ int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); int __pthread_mutex_trylock(pthread_mutex_t *); +void _thread_init_hack(void); static void init_private(void); static void init_main_thread(struct pthread *thread); @@ -131,6 +132,7 @@ static void *references[] = { &_sigsuspend, &_socket, &_socketpair, + &_thread_init_hack, &_wait4, &_write, &_writev diff --git a/lib/libkse/thread/thr_join.c b/lib/libkse/thread/thr_join.c index 7cbc192..dd69bbb 100644 --- a/lib/libkse/thread/thr_join.c +++ b/lib/libkse/thread/thr_join.c @@ -123,13 +123,15 @@ _pthread_join(pthread_t pthread, void **thread_return) THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); while (curthread->join_status.thread == pthread) { THR_SET_STATE(curthread, PS_JOIN); + THR_SCHED_UNLOCK(curthread, curthread); /* Schedule the next thread: */ _thr_sched_switch(curthread); + THR_SCHED_LOCK(curthread, curthread); } - THR_UNLOCK_SWITCH(curthread); + THR_SCHED_UNLOCK(curthread, curthread); /* * The thread return value and error are set by the diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c index db47878..2df1634 100644 --- a/lib/libkse/thread/thr_kern.c +++ b/lib/libkse/thread/thr_kern.c @@ -398,7 +398,6 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu) */ ts.tv_sec = 0; ts.tv_nsec = 1000000; /* 1 sec */ - KSE_SET_WAIT(curkse); while (_LCK_BUSY(lu)) { /* * Yield the kse and wait to be notified when the lock @@ -408,14 +407,7 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu) curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED; kse_release(&ts); curkse->k_mbx.km_flags = saved_flags; - - /* - * Make sure that the wait flag is set again in case - * we wokeup without the lock being granted. - */ - KSE_SET_WAIT(curkse); } - KSE_CLEAR_WAIT(curkse); } void @@ -423,17 +415,23 @@ _kse_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct kse *curkse; struct kse *kse; + struct kse_mailbox *mbx; curkse = _get_curkse(); kse = (struct kse *)_LCK_GET_PRIVATE(lu); if (kse == curkse) PANIC("KSE trying to wake itself up in lock"); - else if (KSE_WAITING(kse)) { + else { + mbx = &kse->k_mbx; + _lock_grant(lock, lu); /* * Notify the owning kse that it has the lock. + * It is safe to pass invalid address to kse_wakeup + * even if the mailbox is not in kernel at all, + * and waking up a wrong kse is also harmless. */ - KSE_WAKEUP(kse); + kse_wakeup(mbx); } } @@ -446,30 +444,13 @@ void _thr_lock_wait(struct lock *lock, struct lockuser *lu) { struct pthread *curthread = (struct pthread *)lu->lu_private; - int count; - /* - * Spin for a bit. - * - * XXX - We probably want to make this a bit smarter. It - * doesn't make sense to spin unless there is more - * than 1 CPU. A thread that is holding one of these - * locks is prevented from being swapped out for another - * thread within the same scheduling entity. - */ - count = 0; - while (_LCK_BUSY(lu) && count < 300) - count++; - while (_LCK_BUSY(lu)) { - THR_LOCK_SWITCH(curthread); - if (_LCK_BUSY(lu)) { - /* Wait for the lock: */ - atomic_store_rel_int(&curthread->need_wakeup, 1); - THR_SET_STATE(curthread, PS_LOCKWAIT); - _thr_sched_switch(curthread); - } - THR_UNLOCK_SWITCH(curthread); - } + do { + THR_SCHED_LOCK(curthread, curthread); + THR_SET_STATE(curthread, PS_LOCKWAIT); + THR_SCHED_UNLOCK(curthread, curthread); + _thr_sched_switch(curthread); + } while _LCK_BUSY(lu); } void @@ -477,26 +458,14 @@ _thr_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct pthread *thread; struct pthread *curthread; - int unlock; curthread = _get_curthread(); thread = (struct pthread *)_LCK_GET_PRIVATE(lu); - unlock = 0; - if (curthread->kseg == thread->kseg) { - /* Not already locked */ - if (curthread->lock_switch == 0) { - THR_SCHED_LOCK(curthread, thread); - unlock = 1; - } - } else { - THR_SCHED_LOCK(curthread, thread); - unlock = 1; - } + THR_SCHED_LOCK(curthread, thread); + _lock_grant(lock, lu); _thr_setrunnable_unlocked(thread); - atomic_store_rel_int(&thread->need_wakeup, 0); - if (unlock) - THR_SCHED_UNLOCK(curthread, thread); + THR_SCHED_UNLOCK(curthread, thread); } kse_critical_t @@ -537,27 +506,42 @@ _thr_critical_leave(struct pthread *thread) THR_YIELD_CHECK(thread); } +void +_thr_sched_switch(struct pthread *curthread) +{ + struct kse *curkse; + + (void)_kse_critical_enter(); + curkse = _get_curkse(); + KSE_SCHED_LOCK(curkse, curkse->k_kseg); + _thr_sched_switch_unlocked(curthread); +} + /* * XXX - We may need to take the scheduling lock before calling * this, or perhaps take the lock within here before * doing anything else. */ void -_thr_sched_switch(struct pthread *curthread) +_thr_sched_switch_unlocked(struct pthread *curthread) { + struct pthread *td; struct pthread_sigframe psf; struct kse *curkse; - volatile int once = 0; + int ret; + volatile int uts_once; + volatile int resume_once = 0; /* We're in the scheduler, 5 by 5: */ - THR_ASSERT(curthread->lock_switch, "lock_switch"); - THR_ASSERT(_kse_in_critical(), "not in critical region"); curkse = _get_curkse(); curthread->need_switchout = 1; /* The thread yielded on its own. */ curthread->critical_yield = 0; /* No need to yield anymore. */ curthread->slice_usec = -1; /* Restart the time slice. */ + /* Thread can unlock the scheduler lock. */ + curthread->lock_switch = 1; + /* * The signal frame is allocated off the stack because * a thread can be interrupted by other signals while @@ -566,19 +550,95 @@ _thr_sched_switch(struct pthread *curthread) sigemptyset(&psf.psf_sigset); curthread->curframe = &psf; - _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx); + /* + * Enter the scheduler if any one of the following is true: + * + * o The current thread is dead; it's stack needs to be + * cleaned up and it can't be done while operating on + * it. + * o There are no runnable threads. + * o The next thread to run won't unlock the scheduler + * lock. A side note: the current thread may be run + * instead of the next thread in the run queue, but + * we don't bother checking for that. + */ + if ((curthread->state == PS_DEAD) || + (((td = KSE_RUNQ_FIRST(curkse)) == NULL) && + (curthread->state != PS_RUNNING)) || + ((td != NULL) && (td->lock_switch == 0))) + _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx); + else { + uts_once = 0; + THR_GETCONTEXT(&curthread->tmbx.tm_context); + if (uts_once == 0) { + uts_once = 1; + + /* Switchout the current thread. */ + kse_switchout_thread(curkse, curthread); + + /* Choose another thread to run. */ + td = KSE_RUNQ_FIRST(curkse); + KSE_RUNQ_REMOVE(curkse, td); + curkse->k_curthread = td; + + /* + * Make sure the current thread's kse points to + * this kse. + */ + td->kse = curkse; + + /* + * Reset accounting. + */ + td->tmbx.tm_uticks = 0; + td->tmbx.tm_sticks = 0; + + /* + * Reset the time slice if this thread is running + * for the first time or running again after using + * its full time slice allocation. + */ + if (td->slice_usec == -1) + td->slice_usec = 0; + + /* Mark the thread active. */ + td->active = 1; + + /* Remove the frame reference. */ + td->curframe = NULL; + /* + * Continue the thread at its current frame: + */ + ret = _thread_switch(&td->tmbx, NULL); + /* This point should not be reached. */ + if (ret != 0) + PANIC("Bad return from _thread_switch"); + PANIC("Thread has returned from _thread_switch"); + } + } + + if (curthread->lock_switch != 0) { + /* + * Unlock the scheduling queue and leave the + * critical region. + */ + /* Don't trust this after a switch! */ + curkse = _get_curkse(); + + curthread->lock_switch = 0; + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); + _kse_critical_leave(&curthread->tmbx); + } /* * This thread is being resumed; check for cancellations. */ - if ((once == 0) && (!THR_IN_CRITICAL(curthread))) { - once = 1; - curthread->critical_count++; - THR_UNLOCK_SWITCH(curthread); - curthread->critical_count--; + if ((resume_once == 0) && (!THR_IN_CRITICAL(curthread))) { + resume_once = 1; thr_resume_check(curthread, &curthread->tmbx.tm_context, &psf); - THR_LOCK_SWITCH(curthread); } + + THR_ACTIVATE_LAST_LOCK(curthread); } /* @@ -743,12 +803,10 @@ kse_sched_multi(struct kse *curkse) KSE_CLEAR_WAIT(curkse); } + /* Lock the scheduling lock. */ curthread = curkse->k_curthread; - if (curthread == NULL || curthread->lock_switch == 0) { - /* - * curthread was preempted by upcall, it is not a volunteer - * context switch. Lock the scheduling lock. - */ + if ((curthread == NULL) || (curthread->need_switchout == 0)) { + /* This is an upcall; take the scheduler lock. */ KSE_SCHED_LOCK(curkse, curkse->k_kseg); } @@ -798,14 +856,9 @@ kse_sched_multi(struct kse *curkse) DBG_MSG("Continuing thread %p in critical region\n", curthread); kse_wakeup_multi(curkse); - if (curthread->lock_switch) { - KSE_SCHED_LOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, 0); - } else { - KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); - } + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); + ret = _thread_switch(&curthread->tmbx, + &curkse->k_mbx.km_curthread); if (ret != 0) PANIC("Can't resume thread in critical region\n"); } @@ -895,9 +948,6 @@ kse_sched_multi(struct kse *curkse) kse_wakeup_multi(curkse); - /* Unlock the scheduling queue: */ - KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - /* * The thread's current signal frame will only be NULL if it * is being resumed after being blocked in the kernel. In @@ -906,25 +956,30 @@ kse_sched_multi(struct kse *curkse) * signal frame to the thread's context. */ #ifdef NOT_YET - if ((curframe == NULL) && ((curthread->check_pending != 0) || + if ((curframe == NULL) && ((curthread->have_signals != 0) || (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) signalcontext(&curthread->tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); #else - if ((curframe == NULL) && (curthread->check_pending != 0)) + if ((curframe == NULL) && (curthread->have_signals != 0)) signalcontext(&curthread->tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); #endif /* * Continue the thread at its current frame: */ - if (curthread->lock_switch) { - KSE_SCHED_LOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, 0); + if (curthread->lock_switch != 0) { + /* + * This thread came from a scheduler switch; it will + * unlock the scheduler lock and set the mailbox. + */ + ret = _thread_switch(&curthread->tmbx, NULL); } else { + /* This thread won't unlock the scheduler lock. */ + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); + &curkse->k_mbx.km_curthread); } if (ret != 0) PANIC("Thread has returned from _thread_switch"); @@ -977,9 +1032,9 @@ thr_resume_check(struct pthread *curthread, ucontext_t *ucp, struct pthread_sigframe *psf) { /* Check signals before cancellations. */ - while (curthread->check_pending != 0) { + while (curthread->have_signals != 0) { /* Clear the pending flag. */ - curthread->check_pending = 0; + curthread->have_signals = 0; /* * It's perfectly valid, though not portable, for @@ -1262,6 +1317,11 @@ kse_check_completed(struct kse *kse) THR_SET_STATE(thread, PS_SUSPENDED); else KSE_RUNQ_INSERT_TAIL(kse, thread); + if ((thread->kse != kse) && + (thread->kse->k_curthread == thread)) { + thread->kse->k_curthread = NULL; + thread->active = 0; + } } completed = completed->tm_next; } @@ -1360,12 +1420,15 @@ static void kse_switchout_thread(struct kse *kse, struct pthread *thread) { int level; + int i; /* * Place the currently running thread into the * appropriate queue(s). */ DBG_MSG("Switching out thread %p, state %d\n", thread, thread->state); + + THR_DEACTIVATE_LAST_LOCK(thread); if (thread->blocked != 0) { thread->active = 0; thread->need_switchout = 0; @@ -1473,6 +1536,15 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) } thread->active = 0; thread->need_switchout = 0; + if (thread->check_pending != 0) { + /* Install pending signals into the frame. */ + thread->check_pending = 0; + for (i = 0; i < _SIG_MAXSIG; i++) { + if (sigismember(&thread->sigpend, i) && + !sigismember(&thread->tmbx.tm_context.uc_sigmask, i)) + _thr_sig_add(thread, i, &thread->siginfo[i]); + } + } } /* @@ -1584,37 +1656,6 @@ kse_fini(struct kse *kse) } void -_thr_sig_add(struct pthread *thread, int sig, siginfo_t *info, ucontext_t *ucp) -{ - struct kse *curkse; - - curkse = _get_curkse(); - - KSE_SCHED_LOCK(curkse, thread->kseg); - /* - * A threads assigned KSE can't change out from under us - * when we hold the scheduler lock. - */ - if (THR_IS_ACTIVE(thread)) { - /* Thread is active. Can't install the signal for it. */ - /* Make a note in the thread that it has a signal. */ - sigaddset(&thread->sigpend, sig); - thread->check_pending = 1; - } - else { - /* Make a note in the thread that it has a signal. */ - sigaddset(&thread->sigpend, sig); - thread->check_pending = 1; - - if (thread->blocked != 0) { - /* Tell the kernel to interrupt the thread. */ - kse_thr_interrupt(&thread->tmbx); - } - } - KSE_SCHED_UNLOCK(curkse, thread->kseg); -} - -void _thr_set_timeout(const struct timespec *timeout) { struct pthread *curthread = _get_curthread(); @@ -1675,14 +1716,14 @@ _thr_setrunnable_unlocked(struct pthread *thread) THR_SET_STATE(thread, PS_SUSPENDED); else THR_SET_STATE(thread, PS_RUNNING); - }else if (thread->state != PS_RUNNING) { + } else if (thread->state != PS_RUNNING) { if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0) KSE_WAITQ_REMOVE(thread->kse, thread); if ((thread->flags & THR_FLAGS_SUSPENDED) != 0) THR_SET_STATE(thread, PS_SUSPENDED); else { THR_SET_STATE(thread, PS_RUNNING); - if ((thread->blocked == 0) && + if ((thread->blocked == 0) && (thread->active == 0) && (thread->flags & THR_FLAGS_IN_RUNQ) == 0) THR_RUNQ_INSERT_TAIL(thread); } diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c index bb97db1..4e3e79b 100644 --- a/lib/libkse/thread/thr_mutex.c +++ b/lib/libkse/thread/thr_mutex.c @@ -500,21 +500,20 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) */ mutex_queue_enq(*m, curthread); curthread->data.mutex = *m; - /* * This thread is active and is in a critical * region (holding the mutex lock); we should * be able to safely set the state. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); } break; @@ -570,14 +569,15 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) /* Adjust priorities: */ mutex_priority_adjust(curthread, *m); - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); + THR_SCHED_UNLOCK(curthread, curthread); + /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); } break; @@ -643,15 +643,15 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * be able to safely set the state. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling @@ -822,15 +822,15 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m) * deadlock on attempts to get a lock you already own. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_DEADLOCK); + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &m->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); break; case PTHREAD_MUTEX_RECURSIVE: diff --git a/lib/libkse/thread/thr_nanosleep.c b/lib/libkse/thread/thr_nanosleep.c index e83f4f4..7266286 100644 --- a/lib/libkse/thread/thr_nanosleep.c +++ b/lib/libkse/thread/thr_nanosleep.c @@ -62,12 +62,10 @@ _nanosleep(const struct timespec *time_to_sleep, THR_LOCK_SWITCH(curthread); curthread->interrupted = 0; - THR_SET_STATE(curthread, PS_SLEEP_WAIT); /* Reschedule the current thread to sleep: */ - _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); + _thr_sched_switch_unlocked(curthread); /* Calculate the remaining time to sleep: */ KSE_GET_TOD(curthread->kse, &ts1); diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h index f1b7fd9..a6e8653 100644 --- a/lib/libkse/thread/thr_private.h +++ b/lib/libkse/thread/thr_private.h @@ -662,11 +662,12 @@ struct pthread { sigset_t sigpend; int sigmask_seqno; int check_pending; + int have_signals; int refcount; /* Thread state: */ enum pthread_state state; - int lock_switch; + volatile int lock_switch; /* * Number of microseconds accumulated by this thread when @@ -812,11 +813,8 @@ struct pthread { #define THR_YIELD_CHECK(thrd) \ do { \ if (((thrd)->critical_yield != 0) && \ - !(THR_IN_CRITICAL(thrd))) { \ - THR_LOCK_SWITCH(thrd); \ + !(THR_IN_CRITICAL(thrd))) \ _thr_sched_switch(thrd); \ - THR_UNLOCK_SWITCH(thrd); \ - } \ else if (((thrd)->check_pending != 0) && \ !(THR_IN_CRITICAL(thrd))) \ _thr_sig_check_pending(thrd); \ @@ -827,6 +825,7 @@ do { \ if ((thrd)->locklevel >= MAX_THR_LOCKLEVEL) \ PANIC("Exceeded maximum lock level"); \ else { \ + THR_DEACTIVATE_LAST_LOCK(thrd); \ (thrd)->locklevel++; \ _lock_acquire((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1], \ @@ -840,29 +839,24 @@ do { \ _lock_release((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1]); \ (thrd)->locklevel--; \ - if ((thrd)->lock_switch) \ - ; \ - else { \ + THR_ACTIVATE_LAST_LOCK(thrd); \ + if ((thrd)->locklevel == 0) \ THR_YIELD_CHECK(thrd); \ - } \ } \ } while (0) -#define THR_LOCK_SWITCH(thrd) \ +#define THR_ACTIVATE_LAST_LOCK(thrd) \ do { \ - THR_ASSERT(!(thrd)->lock_switch, "context switch locked"); \ - _kse_critical_enter(); \ - KSE_SCHED_LOCK((thrd)->kse, (thrd)->kseg); \ - (thrd)->lock_switch = 1; \ + if ((thrd)->locklevel > 0) \ + _lockuser_setactive( \ + &(thrd)->lockusers[(thrd)->locklevel - 1], 1); \ } while (0) -#define THR_UNLOCK_SWITCH(thrd) \ +#define THR_DEACTIVATE_LAST_LOCK(thrd) \ do { \ - THR_ASSERT((thrd)->lock_switch, "context switch not locked"); \ - THR_ASSERT(_kse_in_critical(), "Er,not in critical region"); \ - (thrd)->lock_switch = 0; \ - KSE_SCHED_UNLOCK((thrd)->kse, (thrd)->kseg); \ - _kse_critical_leave(&thrd->tmbx); \ + if ((thrd)->locklevel > 0) \ + _lockuser_setactive( \ + &(thrd)->lockusers[(thrd)->locklevel - 1], 0); \ } while (0) /* @@ -937,15 +931,19 @@ do { \ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ } while (0) +/* Take the scheduling lock with the intent to call the scheduler. */ +#define THR_LOCK_SWITCH(curthr) do { \ + (void)_kse_critical_enter(); \ + KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \ +} while (0) + #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ #define THR_CRITICAL_LEAVE(thr) do { \ (thr)->critical_count--; \ if (((thr)->critical_yield != 0) && \ ((thr)->critical_count == 0)) { \ (thr)->critical_yield = 0; \ - THR_LOCK_SWITCH(thr); \ _thr_sched_switch(thr); \ - THR_UNLOCK_SWITCH(thr); \ } \ } while (0) @@ -1101,7 +1099,7 @@ int _thr_schedule_add(struct pthread *, struct pthread *); void _thr_schedule_remove(struct pthread *, struct pthread *); void _thr_setrunnable(struct pthread *curthread, struct pthread *thread); void _thr_setrunnable_unlocked(struct pthread *thread); -void _thr_sig_add(struct pthread *, int, siginfo_t *, ucontext_t *); +void _thr_sig_add(struct pthread *, int, siginfo_t *); void _thr_sig_dispatch(struct kse *, int, siginfo_t *); int _thr_stack_alloc(struct pthread_attr *); void _thr_stack_free(struct pthread_attr *); @@ -1114,6 +1112,7 @@ void _thread_dump_info(void); void _thread_printf(int, const char *, ...); void _thr_sched_frame(struct pthread_sigframe *); void _thr_sched_switch(struct pthread *); +void _thr_sched_switch_unlocked(struct pthread *); void _thr_set_timeout(const struct timespec *); void _thr_sig_handler(int, siginfo_t *, ucontext_t *); void _thr_sig_check_pending(struct pthread *); diff --git a/lib/libkse/thread/thr_sig.c b/lib/libkse/thread/thr_sig.c index ba31073..3ec0eca 100644 --- a/lib/libkse/thread/thr_sig.c +++ b/lib/libkse/thread/thr_sig.c @@ -45,7 +45,7 @@ /* Prototypes: */ static void build_siginfo(siginfo_t *info, int signo); -static void thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info); +/* static void thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info); */ static void thr_sig_check_state(struct pthread *pthread, int sig); static struct pthread *thr_sig_find(struct kse *curkse, int sig, siginfo_t *info); @@ -158,7 +158,7 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info) */ DBG_MSG("Got signal %d, selecting thread %p\n", sig, thread); KSE_SCHED_LOCK(curkse, thread->kseg); - thr_sig_add(thread, sig, info); + _thr_sig_add(thread, sig, info); KSE_SCHED_UNLOCK(curkse, thread->kseg); } } @@ -571,146 +571,138 @@ handle_special_signals(struct kse *curkse, int sig) * * This must be called with the thread's scheduling lock held. */ -static void -thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info) +void +_thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info) { int restart; int suppress_handler = 0; - restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; + if (pthread->curframe == NULL) { + /* + * This thread is active. Just add it to the + * thread's pending set. + */ + sigaddset(&pthread->sigpend, sig); + pthread->check_pending = 1; + if (info == NULL) + build_siginfo(&pthread->siginfo[sig], sig); + else if (info != &pthread->siginfo[sig]) + memcpy(&pthread->siginfo[sig], info, + sizeof(*info)); + if ((pthread->blocked != 0) && !THR_IN_CRITICAL(pthread)) + kse_thr_interrupt(&pthread->tmbx /* XXX - restart?!?! */); + } + else { + restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; - /* Make sure this signal isn't still in the pending set: */ - sigdelset(&pthread->sigpend, sig); + /* Make sure this signal isn't still in the pending set: */ + sigdelset(&pthread->sigpend, sig); - /* - * Process according to thread state: - */ - switch (pthread->state) { - /* - * States which do not change when a signal is trapped: - */ - case PS_DEAD: - case PS_DEADLOCK: - case PS_LOCKWAIT: - case PS_SUSPENDED: - case PS_STATE_MAX: /* - * You can't call a signal handler for threads in these - * states. + * Process according to thread state: */ - suppress_handler = 1; - break; - - /* - * States which do not need any cleanup handling when signals - * occur: - */ - case PS_RUNNING: + switch (pthread->state) { /* - * Remove the thread from the queue before changing its - * priority: + * States which do not change when a signal is trapped: */ - if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) - THR_RUNQ_REMOVE(pthread); - else { + case PS_DEAD: + case PS_DEADLOCK: + case PS_LOCKWAIT: + case PS_SUSPENDED: + case PS_STATE_MAX: /* - * This thread is active; add the signal to the - * pending set and mark it as having pending - * signals. + * You can't call a signal handler for threads in these + * states. */ suppress_handler = 1; - sigaddset(&pthread->sigpend, sig); - build_siginfo(&pthread->siginfo[sig], sig); - pthread->check_pending = 1; - if ((pthread->blocked != 0) && - !THR_IN_CRITICAL(pthread)) - kse_thr_interrupt(&pthread->tmbx /* XXX - restart?!?! */); - } - break; + break; - /* - * States which cannot be interrupted but still require the - * signal handler to run: - */ - case PS_COND_WAIT: - case PS_MUTEX_WAIT: /* - * Remove the thread from the wait queue. It will - * be added back to the wait queue once all signal - * handlers have been invoked. + * States which do not need any cleanup handling when signals + * occur: */ - KSE_WAITQ_REMOVE(pthread->kse, pthread); - break; + case PS_RUNNING: + /* + * Remove the thread from the queue before changing its + * priority: + */ + if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) + THR_RUNQ_REMOVE(pthread); + break; - case PS_SLEEP_WAIT: /* - * Unmasked signals always cause sleep to terminate early, - * regardless of SA_RESTART: + * States which cannot be interrupted but still require the + * signal handler to run: */ - pthread->interrupted = 1; - KSE_WAITQ_REMOVE(pthread->kse, pthread); - THR_SET_STATE(pthread, PS_RUNNING); - break; - - case PS_JOIN: - case PS_SIGSUSPEND: - KSE_WAITQ_REMOVE(pthread->kse, pthread); - THR_SET_STATE(pthread, PS_RUNNING); - break; + case PS_COND_WAIT: + case PS_MUTEX_WAIT: + /* + * Remove the thread from the wait queue. It will + * be added back to the wait queue once all signal + * handlers have been invoked. + */ + KSE_WAITQ_REMOVE(pthread->kse, pthread); + break; - case PS_SIGWAIT: - /* The signal handler is not called for threads in SIGWAIT. */ - suppress_handler = 1; - /* Wake up the thread if the signal is blocked. */ - if (sigismember(pthread->data.sigwait, sig)) { - /* Return the signal number: */ - pthread->signo = sig; + case PS_SLEEP_WAIT: + /* + * Unmasked signals always cause sleep to terminate + * early regardless of SA_RESTART: + */ + pthread->interrupted = 1; + KSE_WAITQ_REMOVE(pthread->kse, pthread); + THR_SET_STATE(pthread, PS_RUNNING); + break; - /* Make the thread runnable: */ - _thr_setrunnable_unlocked(pthread); - } else - /* Increment the pending signal count. */ - sigaddset(&pthread->sigpend, sig); - break; - } + case PS_JOIN: + case PS_SIGSUSPEND: + KSE_WAITQ_REMOVE(pthread->kse, pthread); + THR_SET_STATE(pthread, PS_RUNNING); + break; - if (suppress_handler == 0) { - if (pthread->curframe == NULL) { + case PS_SIGWAIT: /* - * This thread is active. Just add it to the - * thread's pending set. + * The signal handler is not called for threads in + * SIGWAIT. */ - sigaddset(&pthread->sigpend, sig); - pthread->check_pending = 1; - if (info == NULL) - build_siginfo(&pthread->siginfo[sig], sig); - else - memcpy(&pthread->siginfo[sig], info, - sizeof(*info)); - } else { + suppress_handler = 1; + /* Wake up the thread if the signal is blocked. */ + if (sigismember(pthread->data.sigwait, sig)) { + /* Return the signal number: */ + pthread->signo = sig; + + /* Make the thread runnable: */ + _thr_setrunnable_unlocked(pthread); + } else + /* Increment the pending signal count. */ + sigaddset(&pthread->sigpend, sig); + break; + } + + if (suppress_handler == 0) { /* * Setup a signal frame and save the current threads * state: */ thr_sigframe_add(pthread, sig, info); - } - if (pthread->state != PS_RUNNING) - THR_SET_STATE(pthread, PS_RUNNING); + if (pthread->state != PS_RUNNING) + THR_SET_STATE(pthread, PS_RUNNING); - /* - * The thread should be removed from all scheduling - * queues at this point. Raise the priority and - * place the thread in the run queue. It is also - * possible for a signal to be sent to a suspended - * thread, mostly via pthread_kill(). If a thread - * is suspended, don't insert it into the priority - * queue; just set its state to suspended and it - * will run the signal handler when it is resumed. - */ - pthread->active_priority |= THR_SIGNAL_PRIORITY; - if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) - THR_RUNQ_INSERT_TAIL(pthread); + /* + * The thread should be removed from all scheduling + * queues at this point. Raise the priority and + * place the thread in the run queue. It is also + * possible for a signal to be sent to a suspended + * thread, mostly via pthread_kill(). If a thread + * is suspended, don't insert it into the priority + * queue; just set its state to suspended and it + * will run the signal handler when it is resumed. + */ + pthread->active_priority |= THR_SIGNAL_PRIORITY; + if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) + THR_RUNQ_INSERT_TAIL(pthread); + } } } @@ -834,7 +826,7 @@ _thr_sig_send(struct pthread *pthread, int sig) * Perform any state changes due to signal * arrival: */ - thr_sig_add(pthread, sig, NULL); + _thr_sig_add(pthread, sig, NULL); THR_SCHED_UNLOCK(curthread, pthread); } } @@ -846,20 +838,20 @@ thr_sigframe_add(struct pthread *thread, int sig, siginfo_t *info) if (thread->curframe == NULL) PANIC("Thread doesn't have signal frame "); - if (thread->check_pending == 0) { + if (thread->have_signals == 0) { /* * Multiple signals can be added to the same signal * frame. Only save the thread's state the first time. */ thr_sigframe_save(thread, thread->curframe); - thread->check_pending = 1; + thread->have_signals = 1; thread->flags &= THR_FLAGS_PRIVATE; } sigaddset(&thread->curframe->psf_sigset, sig); - if (info != NULL) - memcpy(&thread->siginfo[sig], info, sizeof(*info)); - else + if (info == NULL) build_siginfo(&thread->siginfo[sig], sig); + else if (info != &thread->siginfo[sig]) + memcpy(&thread->siginfo[sig], info, sizeof(*info)); /* Setup the new signal mask. */ SIGSETOR(thread->tmbx.tm_context.uc_sigmask, diff --git a/lib/libkse/thread/thr_sigsuspend.c b/lib/libkse/thread/thr_sigsuspend.c index 9ada1b2..5916156 100644 --- a/lib/libkse/thread/thr_sigsuspend.c +++ b/lib/libkse/thread/thr_sigsuspend.c @@ -57,9 +57,7 @@ _sigsuspend(const sigset_t *set) THR_SET_STATE(curthread, PS_SIGSUSPEND); /* Wait for a signal: */ - _thr_sched_switch(curthread); - - THR_UNLOCK_SWITCH(curthread); + _thr_sched_switch_unlocked(curthread); /* Always return an interrupted error: */ errno = EINTR; diff --git a/lib/libkse/thread/thr_sigwait.c b/lib/libkse/thread/thr_sigwait.c index 9bb4285..c8c7762 100644 --- a/lib/libkse/thread/thr_sigwait.c +++ b/lib/libkse/thread/thr_sigwait.c @@ -136,8 +136,7 @@ _sigwait(const sigset_t *set, int *sig) /* Wait for a signal: */ THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_SIGWAIT); - _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); + _thr_sched_switch_unlocked(curthread); /* Return the signal number to the caller: */ *sig = curthread->signo; diff --git a/lib/libkse/thread/thr_yield.c b/lib/libkse/thread/thr_yield.c index dfe7278..b41072f 100644 --- a/lib/libkse/thread/thr_yield.c +++ b/lib/libkse/thread/thr_yield.c @@ -46,9 +46,7 @@ _sched_yield(void) curthread->slice_usec = -1; /* Schedule the next thread: */ - THR_LOCK_SWITCH(curthread); _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); /* Always return no error. */ return(0); } @@ -63,7 +61,5 @@ _pthread_yield(void) curthread->slice_usec = -1; /* Schedule the next thread: */ - THR_LOCK_SWITCH(curthread); _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); } diff --git a/lib/libpthread/sys/lock.c b/lib/libpthread/sys/lock.c index 12ce1a0..d005baa 100644 --- a/lib/libpthread/sys/lock.c +++ b/lib/libpthread/sys/lock.c @@ -65,7 +65,7 @@ _lock_init(struct lock *lck, enum lock_type ltype, lck->l_head->lr_watcher = NULL; lck->l_head->lr_owner = NULL; lck->l_head->lr_waiting = 0; - lck->l_head->lr_handshake = 0; + lck->l_head->lr_active = 1; lck->l_tail = lck->l_head; } return (0); @@ -85,7 +85,7 @@ _lockuser_init(struct lockuser *lu, void *priv) lu->lu_myreq->lr_watcher = NULL; lu->lu_myreq->lr_owner = lu; lu->lu_myreq->lr_waiting = 0; - lu->lu_myreq->lr_handshake = 0; + lu->lu_myreq->lr_active = 0; lu->lu_watchreq = NULL; lu->lu_priority = 0; lu->lu_private = priv; @@ -166,19 +166,16 @@ _lock_acquire(struct lock *lck, struct lockuser *lu, int prio) for (i = 0; i < MAX_SPINS; i++) { if (lu->lu_watchreq->lr_locked == 0) return; + if (lu->lu_watchreq->lr_active == 0) + break; } atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1); while (lu->lu_watchreq->lr_locked != 0) lck->l_wait(lck, lu); atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0); - /* - * Wait for original owner to stop accessing the - * lockreq object. - */ - while (lu->lu_watchreq->lr_handshake) - ; } } + lu->lu_myreq->lr_active = 1; } /* @@ -240,24 +237,21 @@ _lock_release(struct lock *lck, struct lockuser *lu) } } if (lu_h != NULL) { - lu_h->lu_watchreq->lr_handshake = 1; /* Give the lock to the highest priority user. */ - atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0); if ((lu_h->lu_watchreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher); - atomic_store_rel_long(&lu_h->lu_watchreq->lr_handshake, - 0); + else + atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0); } else { - myreq->lr_handshake = 1; - /* Give the lock to the previous request. */ - atomic_store_rel_long(&myreq->lr_locked, 0); if ((myreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, myreq->lr_watcher); - atomic_store_rel_long(&myreq->lr_handshake, 0); + else + /* Give the lock to the previous request. */ + atomic_store_rel_long(&myreq->lr_locked, 0); } } else { /* @@ -270,19 +264,25 @@ _lock_release(struct lock *lck, struct lockuser *lu) lu->lu_watchreq = NULL; lu->lu_myreq->lr_locked = 1; lu->lu_myreq->lr_waiting = 0; - if (lck->l_wakeup) { - /* Start wakeup */ - myreq->lr_handshake = 1; + if (myreq->lr_waiting != 0 && lck->l_wakeup) + /* Notify the sleeper */ + lck->l_wakeup(lck, myreq->lr_watcher); + else /* Give the lock to the previous request. */ atomic_store_rel_long(&myreq->lr_locked, 0); - if (myreq->lr_waiting != 0) { - /* Notify the sleeper */ - lck->l_wakeup(lck, myreq->lr_watcher); - } - /* Stop wakeup */ - atomic_store_rel_long(&myreq->lr_handshake, 0); - } else { - atomic_store_rel_long(&myreq->lr_locked, 0); - } } + lu->lu_myreq->lr_active = 0; } + +void +_lock_grant(struct lock *lck /* unused */, struct lockuser *lu) +{ + atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 0); +} + +void +_lockuser_setactive(struct lockuser *lu, int active) +{ + lu->lu_myreq->lr_active = active; +} + diff --git a/lib/libpthread/sys/lock.h b/lib/libpthread/sys/lock.h index e397111..6fa23e2 100644 --- a/lib/libpthread/sys/lock.h +++ b/lib/libpthread/sys/lock.h @@ -55,7 +55,7 @@ struct lockreq { struct lockuser *lr_watcher; /* only used for priority locks */ struct lockuser *lr_owner; /* only used for priority locks */ long lr_waiting; /* non-zero when wakeup needed */ - volatile long lr_handshake; /* non-zero when wakeup in progress */ + volatile int lr_active; /* non-zero if the lock is last lock for thread */ }; struct lockuser { @@ -72,6 +72,7 @@ struct lockuser { #define _LCK_REQUEST_INITIALIZER { 0, NULL, NULL, 0 } #define _LCK_BUSY(lu) ((lu)->lu_watchreq->lr_locked != 0) +#define _LCK_ACTIVE(lu) ((lu)->lu_watchreq->lr_active != 0) #define _LCK_GRANTED(lu) ((lu)->lu_watchreq->lr_locked == 0) #define _LCK_SET_PRIVATE(lu, p) (lu)->lu_private = (void *)(p) @@ -84,7 +85,9 @@ int _lock_init(struct lock *, enum lock_type, lock_handler_t *, lock_handler_t *); int _lockuser_init(struct lockuser *lu, void *priv); void _lockuser_destroy(struct lockuser *lu); +void _lockuser_setactive(struct lockuser *lu, int active); void _lock_acquire(struct lock *, struct lockuser *, int); void _lock_release(struct lock *, struct lockuser *); +void _lock_grant(struct lock *, struct lockuser *); #endif diff --git a/lib/libpthread/thread/thr_cond.c b/lib/libpthread/thread/thr_cond.c index 12c14db..1b0325f 100644 --- a/lib/libpthread/thread/thr_cond.c +++ b/lib/libpthread/thread/thr_cond.c @@ -267,11 +267,12 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) * lock); we should be able to safely * set the state. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_COND_WAIT); /* Remember the CV: */ curthread->data.cond = *cond; + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the CV structure: */ THR_LOCK_RELEASE(curthread, @@ -281,7 +282,6 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) _thr_sched_switch(curthread); curthread->data.cond = NULL; - THR_UNLOCK_SWITCH(curthread); /* * XXX - This really isn't a good check @@ -479,11 +479,12 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, * lock); we should be able to safely * set the state. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_COND_WAIT); /* Remember the CV: */ curthread->data.cond = *cond; + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the CV structure: */ THR_LOCK_RELEASE(curthread, @@ -493,7 +494,6 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, _thr_sched_switch(curthread); curthread->data.cond = NULL; - THR_UNLOCK_SWITCH(curthread); /* * XXX - This really isn't a good check diff --git a/lib/libpthread/thread/thr_exit.c b/lib/libpthread/thread/thr_exit.c index 8435f43..22f187b 100644 --- a/lib/libpthread/thread/thr_exit.c +++ b/lib/libpthread/thread/thr_exit.c @@ -125,9 +125,8 @@ _pthread_exit(void *status) /* This thread will never be re-scheduled. */ THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_DEAD); - _thr_sched_switch(curthread); + _thr_sched_switch_unlocked(curthread); /* Never reach! */ - THR_UNLOCK_SWITCH(curthread); /* This point should not be reached. */ PANIC("Dead thread has resumed"); diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c index f06df6c..5bfb3c7 100644 --- a/lib/libpthread/thread/thr_init.c +++ b/lib/libpthread/thread/thr_init.c @@ -72,6 +72,7 @@ int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); int __pthread_mutex_trylock(pthread_mutex_t *); +void _thread_init_hack(void); static void init_private(void); static void init_main_thread(struct pthread *thread); @@ -131,6 +132,7 @@ static void *references[] = { &_sigsuspend, &_socket, &_socketpair, + &_thread_init_hack, &_wait4, &_write, &_writev diff --git a/lib/libpthread/thread/thr_join.c b/lib/libpthread/thread/thr_join.c index 7cbc192..dd69bbb 100644 --- a/lib/libpthread/thread/thr_join.c +++ b/lib/libpthread/thread/thr_join.c @@ -123,13 +123,15 @@ _pthread_join(pthread_t pthread, void **thread_return) THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); while (curthread->join_status.thread == pthread) { THR_SET_STATE(curthread, PS_JOIN); + THR_SCHED_UNLOCK(curthread, curthread); /* Schedule the next thread: */ _thr_sched_switch(curthread); + THR_SCHED_LOCK(curthread, curthread); } - THR_UNLOCK_SWITCH(curthread); + THR_SCHED_UNLOCK(curthread, curthread); /* * The thread return value and error are set by the diff --git a/lib/libpthread/thread/thr_kern.c b/lib/libpthread/thread/thr_kern.c index db47878..2df1634 100644 --- a/lib/libpthread/thread/thr_kern.c +++ b/lib/libpthread/thread/thr_kern.c @@ -398,7 +398,6 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu) */ ts.tv_sec = 0; ts.tv_nsec = 1000000; /* 1 sec */ - KSE_SET_WAIT(curkse); while (_LCK_BUSY(lu)) { /* * Yield the kse and wait to be notified when the lock @@ -408,14 +407,7 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu) curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED; kse_release(&ts); curkse->k_mbx.km_flags = saved_flags; - - /* - * Make sure that the wait flag is set again in case - * we wokeup without the lock being granted. - */ - KSE_SET_WAIT(curkse); } - KSE_CLEAR_WAIT(curkse); } void @@ -423,17 +415,23 @@ _kse_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct kse *curkse; struct kse *kse; + struct kse_mailbox *mbx; curkse = _get_curkse(); kse = (struct kse *)_LCK_GET_PRIVATE(lu); if (kse == curkse) PANIC("KSE trying to wake itself up in lock"); - else if (KSE_WAITING(kse)) { + else { + mbx = &kse->k_mbx; + _lock_grant(lock, lu); /* * Notify the owning kse that it has the lock. + * It is safe to pass invalid address to kse_wakeup + * even if the mailbox is not in kernel at all, + * and waking up a wrong kse is also harmless. */ - KSE_WAKEUP(kse); + kse_wakeup(mbx); } } @@ -446,30 +444,13 @@ void _thr_lock_wait(struct lock *lock, struct lockuser *lu) { struct pthread *curthread = (struct pthread *)lu->lu_private; - int count; - /* - * Spin for a bit. - * - * XXX - We probably want to make this a bit smarter. It - * doesn't make sense to spin unless there is more - * than 1 CPU. A thread that is holding one of these - * locks is prevented from being swapped out for another - * thread within the same scheduling entity. - */ - count = 0; - while (_LCK_BUSY(lu) && count < 300) - count++; - while (_LCK_BUSY(lu)) { - THR_LOCK_SWITCH(curthread); - if (_LCK_BUSY(lu)) { - /* Wait for the lock: */ - atomic_store_rel_int(&curthread->need_wakeup, 1); - THR_SET_STATE(curthread, PS_LOCKWAIT); - _thr_sched_switch(curthread); - } - THR_UNLOCK_SWITCH(curthread); - } + do { + THR_SCHED_LOCK(curthread, curthread); + THR_SET_STATE(curthread, PS_LOCKWAIT); + THR_SCHED_UNLOCK(curthread, curthread); + _thr_sched_switch(curthread); + } while _LCK_BUSY(lu); } void @@ -477,26 +458,14 @@ _thr_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct pthread *thread; struct pthread *curthread; - int unlock; curthread = _get_curthread(); thread = (struct pthread *)_LCK_GET_PRIVATE(lu); - unlock = 0; - if (curthread->kseg == thread->kseg) { - /* Not already locked */ - if (curthread->lock_switch == 0) { - THR_SCHED_LOCK(curthread, thread); - unlock = 1; - } - } else { - THR_SCHED_LOCK(curthread, thread); - unlock = 1; - } + THR_SCHED_LOCK(curthread, thread); + _lock_grant(lock, lu); _thr_setrunnable_unlocked(thread); - atomic_store_rel_int(&thread->need_wakeup, 0); - if (unlock) - THR_SCHED_UNLOCK(curthread, thread); + THR_SCHED_UNLOCK(curthread, thread); } kse_critical_t @@ -537,27 +506,42 @@ _thr_critical_leave(struct pthread *thread) THR_YIELD_CHECK(thread); } +void +_thr_sched_switch(struct pthread *curthread) +{ + struct kse *curkse; + + (void)_kse_critical_enter(); + curkse = _get_curkse(); + KSE_SCHED_LOCK(curkse, curkse->k_kseg); + _thr_sched_switch_unlocked(curthread); +} + /* * XXX - We may need to take the scheduling lock before calling * this, or perhaps take the lock within here before * doing anything else. */ void -_thr_sched_switch(struct pthread *curthread) +_thr_sched_switch_unlocked(struct pthread *curthread) { + struct pthread *td; struct pthread_sigframe psf; struct kse *curkse; - volatile int once = 0; + int ret; + volatile int uts_once; + volatile int resume_once = 0; /* We're in the scheduler, 5 by 5: */ - THR_ASSERT(curthread->lock_switch, "lock_switch"); - THR_ASSERT(_kse_in_critical(), "not in critical region"); curkse = _get_curkse(); curthread->need_switchout = 1; /* The thread yielded on its own. */ curthread->critical_yield = 0; /* No need to yield anymore. */ curthread->slice_usec = -1; /* Restart the time slice. */ + /* Thread can unlock the scheduler lock. */ + curthread->lock_switch = 1; + /* * The signal frame is allocated off the stack because * a thread can be interrupted by other signals while @@ -566,19 +550,95 @@ _thr_sched_switch(struct pthread *curthread) sigemptyset(&psf.psf_sigset); curthread->curframe = &psf; - _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx); + /* + * Enter the scheduler if any one of the following is true: + * + * o The current thread is dead; it's stack needs to be + * cleaned up and it can't be done while operating on + * it. + * o There are no runnable threads. + * o The next thread to run won't unlock the scheduler + * lock. A side note: the current thread may be run + * instead of the next thread in the run queue, but + * we don't bother checking for that. + */ + if ((curthread->state == PS_DEAD) || + (((td = KSE_RUNQ_FIRST(curkse)) == NULL) && + (curthread->state != PS_RUNNING)) || + ((td != NULL) && (td->lock_switch == 0))) + _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx); + else { + uts_once = 0; + THR_GETCONTEXT(&curthread->tmbx.tm_context); + if (uts_once == 0) { + uts_once = 1; + + /* Switchout the current thread. */ + kse_switchout_thread(curkse, curthread); + + /* Choose another thread to run. */ + td = KSE_RUNQ_FIRST(curkse); + KSE_RUNQ_REMOVE(curkse, td); + curkse->k_curthread = td; + + /* + * Make sure the current thread's kse points to + * this kse. + */ + td->kse = curkse; + + /* + * Reset accounting. + */ + td->tmbx.tm_uticks = 0; + td->tmbx.tm_sticks = 0; + + /* + * Reset the time slice if this thread is running + * for the first time or running again after using + * its full time slice allocation. + */ + if (td->slice_usec == -1) + td->slice_usec = 0; + + /* Mark the thread active. */ + td->active = 1; + + /* Remove the frame reference. */ + td->curframe = NULL; + /* + * Continue the thread at its current frame: + */ + ret = _thread_switch(&td->tmbx, NULL); + /* This point should not be reached. */ + if (ret != 0) + PANIC("Bad return from _thread_switch"); + PANIC("Thread has returned from _thread_switch"); + } + } + + if (curthread->lock_switch != 0) { + /* + * Unlock the scheduling queue and leave the + * critical region. + */ + /* Don't trust this after a switch! */ + curkse = _get_curkse(); + + curthread->lock_switch = 0; + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); + _kse_critical_leave(&curthread->tmbx); + } /* * This thread is being resumed; check for cancellations. */ - if ((once == 0) && (!THR_IN_CRITICAL(curthread))) { - once = 1; - curthread->critical_count++; - THR_UNLOCK_SWITCH(curthread); - curthread->critical_count--; + if ((resume_once == 0) && (!THR_IN_CRITICAL(curthread))) { + resume_once = 1; thr_resume_check(curthread, &curthread->tmbx.tm_context, &psf); - THR_LOCK_SWITCH(curthread); } + + THR_ACTIVATE_LAST_LOCK(curthread); } /* @@ -743,12 +803,10 @@ kse_sched_multi(struct kse *curkse) KSE_CLEAR_WAIT(curkse); } + /* Lock the scheduling lock. */ curthread = curkse->k_curthread; - if (curthread == NULL || curthread->lock_switch == 0) { - /* - * curthread was preempted by upcall, it is not a volunteer - * context switch. Lock the scheduling lock. - */ + if ((curthread == NULL) || (curthread->need_switchout == 0)) { + /* This is an upcall; take the scheduler lock. */ KSE_SCHED_LOCK(curkse, curkse->k_kseg); } @@ -798,14 +856,9 @@ kse_sched_multi(struct kse *curkse) DBG_MSG("Continuing thread %p in critical region\n", curthread); kse_wakeup_multi(curkse); - if (curthread->lock_switch) { - KSE_SCHED_LOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, 0); - } else { - KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); - } + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); + ret = _thread_switch(&curthread->tmbx, + &curkse->k_mbx.km_curthread); if (ret != 0) PANIC("Can't resume thread in critical region\n"); } @@ -895,9 +948,6 @@ kse_sched_multi(struct kse *curkse) kse_wakeup_multi(curkse); - /* Unlock the scheduling queue: */ - KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - /* * The thread's current signal frame will only be NULL if it * is being resumed after being blocked in the kernel. In @@ -906,25 +956,30 @@ kse_sched_multi(struct kse *curkse) * signal frame to the thread's context. */ #ifdef NOT_YET - if ((curframe == NULL) && ((curthread->check_pending != 0) || + if ((curframe == NULL) && ((curthread->have_signals != 0) || (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) signalcontext(&curthread->tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); #else - if ((curframe == NULL) && (curthread->check_pending != 0)) + if ((curframe == NULL) && (curthread->have_signals != 0)) signalcontext(&curthread->tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); #endif /* * Continue the thread at its current frame: */ - if (curthread->lock_switch) { - KSE_SCHED_LOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, 0); + if (curthread->lock_switch != 0) { + /* + * This thread came from a scheduler switch; it will + * unlock the scheduler lock and set the mailbox. + */ + ret = _thread_switch(&curthread->tmbx, NULL); } else { + /* This thread won't unlock the scheduler lock. */ + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); + &curkse->k_mbx.km_curthread); } if (ret != 0) PANIC("Thread has returned from _thread_switch"); @@ -977,9 +1032,9 @@ thr_resume_check(struct pthread *curthread, ucontext_t *ucp, struct pthread_sigframe *psf) { /* Check signals before cancellations. */ - while (curthread->check_pending != 0) { + while (curthread->have_signals != 0) { /* Clear the pending flag. */ - curthread->check_pending = 0; + curthread->have_signals = 0; /* * It's perfectly valid, though not portable, for @@ -1262,6 +1317,11 @@ kse_check_completed(struct kse *kse) THR_SET_STATE(thread, PS_SUSPENDED); else KSE_RUNQ_INSERT_TAIL(kse, thread); + if ((thread->kse != kse) && + (thread->kse->k_curthread == thread)) { + thread->kse->k_curthread = NULL; + thread->active = 0; + } } completed = completed->tm_next; } @@ -1360,12 +1420,15 @@ static void kse_switchout_thread(struct kse *kse, struct pthread *thread) { int level; + int i; /* * Place the currently running thread into the * appropriate queue(s). */ DBG_MSG("Switching out thread %p, state %d\n", thread, thread->state); + + THR_DEACTIVATE_LAST_LOCK(thread); if (thread->blocked != 0) { thread->active = 0; thread->need_switchout = 0; @@ -1473,6 +1536,15 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) } thread->active = 0; thread->need_switchout = 0; + if (thread->check_pending != 0) { + /* Install pending signals into the frame. */ + thread->check_pending = 0; + for (i = 0; i < _SIG_MAXSIG; i++) { + if (sigismember(&thread->sigpend, i) && + !sigismember(&thread->tmbx.tm_context.uc_sigmask, i)) + _thr_sig_add(thread, i, &thread->siginfo[i]); + } + } } /* @@ -1584,37 +1656,6 @@ kse_fini(struct kse *kse) } void -_thr_sig_add(struct pthread *thread, int sig, siginfo_t *info, ucontext_t *ucp) -{ - struct kse *curkse; - - curkse = _get_curkse(); - - KSE_SCHED_LOCK(curkse, thread->kseg); - /* - * A threads assigned KSE can't change out from under us - * when we hold the scheduler lock. - */ - if (THR_IS_ACTIVE(thread)) { - /* Thread is active. Can't install the signal for it. */ - /* Make a note in the thread that it has a signal. */ - sigaddset(&thread->sigpend, sig); - thread->check_pending = 1; - } - else { - /* Make a note in the thread that it has a signal. */ - sigaddset(&thread->sigpend, sig); - thread->check_pending = 1; - - if (thread->blocked != 0) { - /* Tell the kernel to interrupt the thread. */ - kse_thr_interrupt(&thread->tmbx); - } - } - KSE_SCHED_UNLOCK(curkse, thread->kseg); -} - -void _thr_set_timeout(const struct timespec *timeout) { struct pthread *curthread = _get_curthread(); @@ -1675,14 +1716,14 @@ _thr_setrunnable_unlocked(struct pthread *thread) THR_SET_STATE(thread, PS_SUSPENDED); else THR_SET_STATE(thread, PS_RUNNING); - }else if (thread->state != PS_RUNNING) { + } else if (thread->state != PS_RUNNING) { if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0) KSE_WAITQ_REMOVE(thread->kse, thread); if ((thread->flags & THR_FLAGS_SUSPENDED) != 0) THR_SET_STATE(thread, PS_SUSPENDED); else { THR_SET_STATE(thread, PS_RUNNING); - if ((thread->blocked == 0) && + if ((thread->blocked == 0) && (thread->active == 0) && (thread->flags & THR_FLAGS_IN_RUNQ) == 0) THR_RUNQ_INSERT_TAIL(thread); } diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c index bb97db1..4e3e79b 100644 --- a/lib/libpthread/thread/thr_mutex.c +++ b/lib/libpthread/thread/thr_mutex.c @@ -500,21 +500,20 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) */ mutex_queue_enq(*m, curthread); curthread->data.mutex = *m; - /* * This thread is active and is in a critical * region (holding the mutex lock); we should * be able to safely set the state. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); } break; @@ -570,14 +569,15 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) /* Adjust priorities: */ mutex_priority_adjust(curthread, *m); - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); + THR_SCHED_UNLOCK(curthread, curthread); + /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); } break; @@ -643,15 +643,15 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m) * be able to safely set the state. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_MUTEX_WAIT); + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling @@ -822,15 +822,15 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m) * deadlock on attempts to get a lock you already own. */ - THR_LOCK_SWITCH(curthread); + THR_SCHED_LOCK(curthread, curthread); THR_SET_STATE(curthread, PS_DEADLOCK); + THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &m->m_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); break; case PTHREAD_MUTEX_RECURSIVE: diff --git a/lib/libpthread/thread/thr_nanosleep.c b/lib/libpthread/thread/thr_nanosleep.c index e83f4f4..7266286 100644 --- a/lib/libpthread/thread/thr_nanosleep.c +++ b/lib/libpthread/thread/thr_nanosleep.c @@ -62,12 +62,10 @@ _nanosleep(const struct timespec *time_to_sleep, THR_LOCK_SWITCH(curthread); curthread->interrupted = 0; - THR_SET_STATE(curthread, PS_SLEEP_WAIT); /* Reschedule the current thread to sleep: */ - _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); + _thr_sched_switch_unlocked(curthread); /* Calculate the remaining time to sleep: */ KSE_GET_TOD(curthread->kse, &ts1); diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h index f1b7fd9..a6e8653 100644 --- a/lib/libpthread/thread/thr_private.h +++ b/lib/libpthread/thread/thr_private.h @@ -662,11 +662,12 @@ struct pthread { sigset_t sigpend; int sigmask_seqno; int check_pending; + int have_signals; int refcount; /* Thread state: */ enum pthread_state state; - int lock_switch; + volatile int lock_switch; /* * Number of microseconds accumulated by this thread when @@ -812,11 +813,8 @@ struct pthread { #define THR_YIELD_CHECK(thrd) \ do { \ if (((thrd)->critical_yield != 0) && \ - !(THR_IN_CRITICAL(thrd))) { \ - THR_LOCK_SWITCH(thrd); \ + !(THR_IN_CRITICAL(thrd))) \ _thr_sched_switch(thrd); \ - THR_UNLOCK_SWITCH(thrd); \ - } \ else if (((thrd)->check_pending != 0) && \ !(THR_IN_CRITICAL(thrd))) \ _thr_sig_check_pending(thrd); \ @@ -827,6 +825,7 @@ do { \ if ((thrd)->locklevel >= MAX_THR_LOCKLEVEL) \ PANIC("Exceeded maximum lock level"); \ else { \ + THR_DEACTIVATE_LAST_LOCK(thrd); \ (thrd)->locklevel++; \ _lock_acquire((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1], \ @@ -840,29 +839,24 @@ do { \ _lock_release((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1]); \ (thrd)->locklevel--; \ - if ((thrd)->lock_switch) \ - ; \ - else { \ + THR_ACTIVATE_LAST_LOCK(thrd); \ + if ((thrd)->locklevel == 0) \ THR_YIELD_CHECK(thrd); \ - } \ } \ } while (0) -#define THR_LOCK_SWITCH(thrd) \ +#define THR_ACTIVATE_LAST_LOCK(thrd) \ do { \ - THR_ASSERT(!(thrd)->lock_switch, "context switch locked"); \ - _kse_critical_enter(); \ - KSE_SCHED_LOCK((thrd)->kse, (thrd)->kseg); \ - (thrd)->lock_switch = 1; \ + if ((thrd)->locklevel > 0) \ + _lockuser_setactive( \ + &(thrd)->lockusers[(thrd)->locklevel - 1], 1); \ } while (0) -#define THR_UNLOCK_SWITCH(thrd) \ +#define THR_DEACTIVATE_LAST_LOCK(thrd) \ do { \ - THR_ASSERT((thrd)->lock_switch, "context switch not locked"); \ - THR_ASSERT(_kse_in_critical(), "Er,not in critical region"); \ - (thrd)->lock_switch = 0; \ - KSE_SCHED_UNLOCK((thrd)->kse, (thrd)->kseg); \ - _kse_critical_leave(&thrd->tmbx); \ + if ((thrd)->locklevel > 0) \ + _lockuser_setactive( \ + &(thrd)->lockusers[(thrd)->locklevel - 1], 0); \ } while (0) /* @@ -937,15 +931,19 @@ do { \ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ } while (0) +/* Take the scheduling lock with the intent to call the scheduler. */ +#define THR_LOCK_SWITCH(curthr) do { \ + (void)_kse_critical_enter(); \ + KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \ +} while (0) + #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ #define THR_CRITICAL_LEAVE(thr) do { \ (thr)->critical_count--; \ if (((thr)->critical_yield != 0) && \ ((thr)->critical_count == 0)) { \ (thr)->critical_yield = 0; \ - THR_LOCK_SWITCH(thr); \ _thr_sched_switch(thr); \ - THR_UNLOCK_SWITCH(thr); \ } \ } while (0) @@ -1101,7 +1099,7 @@ int _thr_schedule_add(struct pthread *, struct pthread *); void _thr_schedule_remove(struct pthread *, struct pthread *); void _thr_setrunnable(struct pthread *curthread, struct pthread *thread); void _thr_setrunnable_unlocked(struct pthread *thread); -void _thr_sig_add(struct pthread *, int, siginfo_t *, ucontext_t *); +void _thr_sig_add(struct pthread *, int, siginfo_t *); void _thr_sig_dispatch(struct kse *, int, siginfo_t *); int _thr_stack_alloc(struct pthread_attr *); void _thr_stack_free(struct pthread_attr *); @@ -1114,6 +1112,7 @@ void _thread_dump_info(void); void _thread_printf(int, const char *, ...); void _thr_sched_frame(struct pthread_sigframe *); void _thr_sched_switch(struct pthread *); +void _thr_sched_switch_unlocked(struct pthread *); void _thr_set_timeout(const struct timespec *); void _thr_sig_handler(int, siginfo_t *, ucontext_t *); void _thr_sig_check_pending(struct pthread *); diff --git a/lib/libpthread/thread/thr_sig.c b/lib/libpthread/thread/thr_sig.c index ba31073..3ec0eca 100644 --- a/lib/libpthread/thread/thr_sig.c +++ b/lib/libpthread/thread/thr_sig.c @@ -45,7 +45,7 @@ /* Prototypes: */ static void build_siginfo(siginfo_t *info, int signo); -static void thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info); +/* static void thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info); */ static void thr_sig_check_state(struct pthread *pthread, int sig); static struct pthread *thr_sig_find(struct kse *curkse, int sig, siginfo_t *info); @@ -158,7 +158,7 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info) */ DBG_MSG("Got signal %d, selecting thread %p\n", sig, thread); KSE_SCHED_LOCK(curkse, thread->kseg); - thr_sig_add(thread, sig, info); + _thr_sig_add(thread, sig, info); KSE_SCHED_UNLOCK(curkse, thread->kseg); } } @@ -571,146 +571,138 @@ handle_special_signals(struct kse *curkse, int sig) * * This must be called with the thread's scheduling lock held. */ -static void -thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info) +void +_thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info) { int restart; int suppress_handler = 0; - restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; + if (pthread->curframe == NULL) { + /* + * This thread is active. Just add it to the + * thread's pending set. + */ + sigaddset(&pthread->sigpend, sig); + pthread->check_pending = 1; + if (info == NULL) + build_siginfo(&pthread->siginfo[sig], sig); + else if (info != &pthread->siginfo[sig]) + memcpy(&pthread->siginfo[sig], info, + sizeof(*info)); + if ((pthread->blocked != 0) && !THR_IN_CRITICAL(pthread)) + kse_thr_interrupt(&pthread->tmbx /* XXX - restart?!?! */); + } + else { + restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; - /* Make sure this signal isn't still in the pending set: */ - sigdelset(&pthread->sigpend, sig); + /* Make sure this signal isn't still in the pending set: */ + sigdelset(&pthread->sigpend, sig); - /* - * Process according to thread state: - */ - switch (pthread->state) { - /* - * States which do not change when a signal is trapped: - */ - case PS_DEAD: - case PS_DEADLOCK: - case PS_LOCKWAIT: - case PS_SUSPENDED: - case PS_STATE_MAX: /* - * You can't call a signal handler for threads in these - * states. + * Process according to thread state: */ - suppress_handler = 1; - break; - - /* - * States which do not need any cleanup handling when signals - * occur: - */ - case PS_RUNNING: + switch (pthread->state) { /* - * Remove the thread from the queue before changing its - * priority: + * States which do not change when a signal is trapped: */ - if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) - THR_RUNQ_REMOVE(pthread); - else { + case PS_DEAD: + case PS_DEADLOCK: + case PS_LOCKWAIT: + case PS_SUSPENDED: + case PS_STATE_MAX: /* - * This thread is active; add the signal to the - * pending set and mark it as having pending - * signals. + * You can't call a signal handler for threads in these + * states. */ suppress_handler = 1; - sigaddset(&pthread->sigpend, sig); - build_siginfo(&pthread->siginfo[sig], sig); - pthread->check_pending = 1; - if ((pthread->blocked != 0) && - !THR_IN_CRITICAL(pthread)) - kse_thr_interrupt(&pthread->tmbx /* XXX - restart?!?! */); - } - break; + break; - /* - * States which cannot be interrupted but still require the - * signal handler to run: - */ - case PS_COND_WAIT: - case PS_MUTEX_WAIT: /* - * Remove the thread from the wait queue. It will - * be added back to the wait queue once all signal - * handlers have been invoked. + * States which do not need any cleanup handling when signals + * occur: */ - KSE_WAITQ_REMOVE(pthread->kse, pthread); - break; + case PS_RUNNING: + /* + * Remove the thread from the queue before changing its + * priority: + */ + if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) + THR_RUNQ_REMOVE(pthread); + break; - case PS_SLEEP_WAIT: /* - * Unmasked signals always cause sleep to terminate early, - * regardless of SA_RESTART: + * States which cannot be interrupted but still require the + * signal handler to run: */ - pthread->interrupted = 1; - KSE_WAITQ_REMOVE(pthread->kse, pthread); - THR_SET_STATE(pthread, PS_RUNNING); - break; - - case PS_JOIN: - case PS_SIGSUSPEND: - KSE_WAITQ_REMOVE(pthread->kse, pthread); - THR_SET_STATE(pthread, PS_RUNNING); - break; + case PS_COND_WAIT: + case PS_MUTEX_WAIT: + /* + * Remove the thread from the wait queue. It will + * be added back to the wait queue once all signal + * handlers have been invoked. + */ + KSE_WAITQ_REMOVE(pthread->kse, pthread); + break; - case PS_SIGWAIT: - /* The signal handler is not called for threads in SIGWAIT. */ - suppress_handler = 1; - /* Wake up the thread if the signal is blocked. */ - if (sigismember(pthread->data.sigwait, sig)) { - /* Return the signal number: */ - pthread->signo = sig; + case PS_SLEEP_WAIT: + /* + * Unmasked signals always cause sleep to terminate + * early regardless of SA_RESTART: + */ + pthread->interrupted = 1; + KSE_WAITQ_REMOVE(pthread->kse, pthread); + THR_SET_STATE(pthread, PS_RUNNING); + break; - /* Make the thread runnable: */ - _thr_setrunnable_unlocked(pthread); - } else - /* Increment the pending signal count. */ - sigaddset(&pthread->sigpend, sig); - break; - } + case PS_JOIN: + case PS_SIGSUSPEND: + KSE_WAITQ_REMOVE(pthread->kse, pthread); + THR_SET_STATE(pthread, PS_RUNNING); + break; - if (suppress_handler == 0) { - if (pthread->curframe == NULL) { + case PS_SIGWAIT: /* - * This thread is active. Just add it to the - * thread's pending set. + * The signal handler is not called for threads in + * SIGWAIT. */ - sigaddset(&pthread->sigpend, sig); - pthread->check_pending = 1; - if (info == NULL) - build_siginfo(&pthread->siginfo[sig], sig); - else - memcpy(&pthread->siginfo[sig], info, - sizeof(*info)); - } else { + suppress_handler = 1; + /* Wake up the thread if the signal is blocked. */ + if (sigismember(pthread->data.sigwait, sig)) { + /* Return the signal number: */ + pthread->signo = sig; + + /* Make the thread runnable: */ + _thr_setrunnable_unlocked(pthread); + } else + /* Increment the pending signal count. */ + sigaddset(&pthread->sigpend, sig); + break; + } + + if (suppress_handler == 0) { /* * Setup a signal frame and save the current threads * state: */ thr_sigframe_add(pthread, sig, info); - } - if (pthread->state != PS_RUNNING) - THR_SET_STATE(pthread, PS_RUNNING); + if (pthread->state != PS_RUNNING) + THR_SET_STATE(pthread, PS_RUNNING); - /* - * The thread should be removed from all scheduling - * queues at this point. Raise the priority and - * place the thread in the run queue. It is also - * possible for a signal to be sent to a suspended - * thread, mostly via pthread_kill(). If a thread - * is suspended, don't insert it into the priority - * queue; just set its state to suspended and it - * will run the signal handler when it is resumed. - */ - pthread->active_priority |= THR_SIGNAL_PRIORITY; - if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) - THR_RUNQ_INSERT_TAIL(pthread); + /* + * The thread should be removed from all scheduling + * queues at this point. Raise the priority and + * place the thread in the run queue. It is also + * possible for a signal to be sent to a suspended + * thread, mostly via pthread_kill(). If a thread + * is suspended, don't insert it into the priority + * queue; just set its state to suspended and it + * will run the signal handler when it is resumed. + */ + pthread->active_priority |= THR_SIGNAL_PRIORITY; + if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) + THR_RUNQ_INSERT_TAIL(pthread); + } } } @@ -834,7 +826,7 @@ _thr_sig_send(struct pthread *pthread, int sig) * Perform any state changes due to signal * arrival: */ - thr_sig_add(pthread, sig, NULL); + _thr_sig_add(pthread, sig, NULL); THR_SCHED_UNLOCK(curthread, pthread); } } @@ -846,20 +838,20 @@ thr_sigframe_add(struct pthread *thread, int sig, siginfo_t *info) if (thread->curframe == NULL) PANIC("Thread doesn't have signal frame "); - if (thread->check_pending == 0) { + if (thread->have_signals == 0) { /* * Multiple signals can be added to the same signal * frame. Only save the thread's state the first time. */ thr_sigframe_save(thread, thread->curframe); - thread->check_pending = 1; + thread->have_signals = 1; thread->flags &= THR_FLAGS_PRIVATE; } sigaddset(&thread->curframe->psf_sigset, sig); - if (info != NULL) - memcpy(&thread->siginfo[sig], info, sizeof(*info)); - else + if (info == NULL) build_siginfo(&thread->siginfo[sig], sig); + else if (info != &thread->siginfo[sig]) + memcpy(&thread->siginfo[sig], info, sizeof(*info)); /* Setup the new signal mask. */ SIGSETOR(thread->tmbx.tm_context.uc_sigmask, diff --git a/lib/libpthread/thread/thr_sigsuspend.c b/lib/libpthread/thread/thr_sigsuspend.c index 9ada1b2..5916156 100644 --- a/lib/libpthread/thread/thr_sigsuspend.c +++ b/lib/libpthread/thread/thr_sigsuspend.c @@ -57,9 +57,7 @@ _sigsuspend(const sigset_t *set) THR_SET_STATE(curthread, PS_SIGSUSPEND); /* Wait for a signal: */ - _thr_sched_switch(curthread); - - THR_UNLOCK_SWITCH(curthread); + _thr_sched_switch_unlocked(curthread); /* Always return an interrupted error: */ errno = EINTR; diff --git a/lib/libpthread/thread/thr_sigwait.c b/lib/libpthread/thread/thr_sigwait.c index 9bb4285..c8c7762 100644 --- a/lib/libpthread/thread/thr_sigwait.c +++ b/lib/libpthread/thread/thr_sigwait.c @@ -136,8 +136,7 @@ _sigwait(const sigset_t *set, int *sig) /* Wait for a signal: */ THR_LOCK_SWITCH(curthread); THR_SET_STATE(curthread, PS_SIGWAIT); - _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); + _thr_sched_switch_unlocked(curthread); /* Return the signal number to the caller: */ *sig = curthread->signo; diff --git a/lib/libpthread/thread/thr_yield.c b/lib/libpthread/thread/thr_yield.c index dfe7278..b41072f 100644 --- a/lib/libpthread/thread/thr_yield.c +++ b/lib/libpthread/thread/thr_yield.c @@ -46,9 +46,7 @@ _sched_yield(void) curthread->slice_usec = -1; /* Schedule the next thread: */ - THR_LOCK_SWITCH(curthread); _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); /* Always return no error. */ return(0); } @@ -63,7 +61,5 @@ _pthread_yield(void) curthread->slice_usec = -1; /* Schedule the next thread: */ - THR_LOCK_SWITCH(curthread); _thr_sched_switch(curthread); - THR_UNLOCK_SWITCH(curthread); } |