summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c1
-rw-r--r--sys/kern/kern_condvar.c20
-rw-r--r--sys/kern/kern_fork.c1
-rw-r--r--sys/kern/kern_kse.c514
-rw-r--r--sys/kern/kern_sig.c5
-rw-r--r--sys/kern/kern_switch.c243
-rw-r--r--sys/kern/kern_synch.c17
-rw-r--r--sys/kern/kern_thread.c514
-rw-r--r--sys/kern/sched_4bsd.c2
-rw-r--r--sys/kern/subr_trap.c1
10 files changed, 699 insertions, 619 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 10b3a4b..e49ce08 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -380,6 +380,7 @@ proc0_init(void *dummy __unused)
ke->ke_oncpu = 0;
ke->ke_state = KES_THREAD;
ke->ke_thread = td;
+ ke->ke_owner = td;
p->p_peers = 0;
p->p_leader = p;
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index e2bb544..d36b0c1 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -116,26 +116,6 @@ cv_destroy(struct cv *cvp)
static __inline void
cv_switch(struct thread *td)
{
-
- /*
- * If we are capable of async syscalls and there isn't already
- * another one ready to return, start a new thread
- * and queue it as ready to run. Note that there is danger here
- * because we need to make sure that we don't sleep allocating
- * the thread (recursion here might be bad).
- * Hence the TDF_INMSLEEP flag.
- */
- if ((td->td_flags & (TDF_UNBOUND|TDF_INMSLEEP)) == TDF_UNBOUND) {
- /*
- * We don't need to upcall now, just queue it.
- * The upcall will happen when other n-kernel work
- * in this SKEGRP has completed.
- * Don't recurse here!
- */
- td->td_flags |= TDF_INMSLEEP;
- thread_schedule_upcall(td, td->td_kse);
- td->td_flags &= ~TDF_INMSLEEP;
- }
TD_SET_SLEEPING(td);
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index c63a839..1a8e4d2 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -492,6 +492,7 @@ again:
/* Set up the thread as an active thread (as if runnable). */
ke2->ke_state = KES_THREAD;
ke2->ke_thread = td2;
+ ke2->ke_owner = td2;
td2->td_kse = ke2;
td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 22f173e..8434c68 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -197,6 +197,7 @@ kse_link(struct kse *ke, struct ksegrp *kg)
ke->ke_state = KES_UNQUEUED;
ke->ke_proc = p;
ke->ke_ksegrp = kg;
+ ke->ke_owner = NULL;
ke->ke_thread = NULL;
ke->ke_oncpu = NOCPU;
}
@@ -208,10 +209,6 @@ kse_unlink(struct kse *ke)
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
- if (ke->ke_state == KES_IDLE) {
- kg->kg_idle_kses--;
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- }
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
if (--kg->kg_kses == 0) {
@@ -231,14 +228,12 @@ ksegrp_link(struct ksegrp *kg, struct proc *p)
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
- TAILQ_INIT(&kg->kg_iq); /* idle kses in ksegrp */
TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */
kg->kg_proc = p;
/* the following counters are in the -zero- section and may not need clearing */
kg->kg_numthreads = 0;
kg->kg_runnable = 0;
kg->kg_kses = 0;
- kg->kg_idle_kses = 0;
kg->kg_loan_kses = 0;
kg->kg_runq_kses = 0; /* XXXKSE change name */
/* link it in now that it's consistent */
@@ -351,7 +346,8 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
}
/*
- * Either returns as an upcall or exits
+ * Either becomes an upcall or waits for an awakening event and
+ * THEN becomes an upcall. Only error cases return.
*/
int
kse_release(struct thread * td, struct kse_release_args * uap)
@@ -369,30 +365,24 @@ kse_release(struct thread * td, struct kse_release_args * uap)
(td->td_flags & TDF_UNBOUND) ||
(td->td_kse->ke_mailbox == NULL))
return (EINVAL);
+
PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
+ /* Change OURSELF to become an upcall. */
+ td->td_flags = TDF_UPCALLING; /* BOUND */
if (kg->kg_completed == NULL) {
-#if 1 /* temp until signals make new threads */
- if (p->p_numthreads == 1) {
- /* change OURSELF to become an upcall */
- td->td_flags = TDF_UPCALLING;
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- /*
- * msleep will not call thread_sched_upcall
- * because thread is not UNBOUND.
- */
- msleep(p->p_sigacts, NULL,
- PPAUSE | PCATCH, "ksepause", 0);
- return (0);
- }
-#endif /* end temp */
- thread_exit();
+ /* XXXKSE also look for waiting signals etc. */
+ /*
+ * The KSE will however be lendable.
+ */
+ mtx_lock_spin(&sched_lock);
+ TD_SET_IDLE(td);
+ PROC_UNLOCK(p);
+ p->p_stats->p_ru.ru_nvcsw++;
+ mi_switch();
+ mtx_unlock_spin(&sched_lock);
+ } else {
+ PROC_UNLOCK(p);
}
- /* change OURSELF to become an upcall */
- td->td_flags = TDF_UPCALLING;
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
return (0);
}
@@ -403,45 +393,62 @@ int
kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
{
struct proc *p;
- struct kse *ke, *ke2;
+ struct kse *ke;
struct ksegrp *kg;
+ struct thread *td2;
p = td->td_proc;
+ td2 = NULL;
/* KSE-enabled processes only, please. */
if (!(p->p_flag & P_KSES))
return EINVAL;
- ke = NULL;
- mtx_lock_spin(&sched_lock);
+ PROC_LOCK(p);
if (uap->mbx) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
- FOREACH_KSE_IN_GROUP(kg, ke2) {
- if (ke2->ke_mailbox != uap->mbx)
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ if (ke->ke_mailbox != uap->mbx)
continue;
- if (ke2->ke_state == KES_IDLE) {
- ke = ke2;
- goto found;
- } else {
- mtx_unlock_spin(&sched_lock);
- td->td_retval[0] = 0;
- td->td_retval[1] = 0;
+ td2 = ke->ke_owner ;
+ KASSERT((td2 != NULL),("KSE with no owner"));
+ if (!TD_IS_IDLE(td2)) {
+ /* Return silently if no longer idle */
+ PROC_UNLOCK(p);
+ td->td_retval[0] = 0;
+ td->td_retval[1] = 0;
return (0);
}
+ break;
}
+ if (td2) {
+ break;
+ }
}
} else {
+ /*
+ * look for any idle KSE to resurrect.
+ */
kg = td->td_ksegrp;
- ke = TAILQ_FIRST(&kg->kg_iq);
+ mtx_lock_spin(&sched_lock);
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ td2 = ke->ke_owner;
+ KASSERT((td2 != NULL),("KSE with no owner2"));
+ if (TD_IS_IDLE(td2))
+ break;
+ }
}
- if (ke == NULL) {
+ if (td2) {
+ mtx_lock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ TD_CLR_IDLE(td2);
+ setrunnable(td2);
mtx_unlock_spin(&sched_lock);
- return (ESRCH);
- }
-found:
- thread_schedule_upcall(td, ke);
+ td->td_retval[0] = 0;
+ td->td_retval[1] = 0;
+ return (0);
+ }
mtx_unlock_spin(&sched_lock);
- td->td_retval[0] = 0;
- td->td_retval[1] = 0;
- return (0);
+ PROC_UNLOCK(p);
+ return (ESRCH);
}
/*
@@ -810,17 +817,14 @@ thread_export_context(struct thread *td)
addr = (void *)(&td->td_mailbox->tm_context);
#endif
error = copyin(addr, &uc, sizeof(ucontext_t));
- if (error == 0) {
- thread_getcontext(td, &uc);
- error = copyout(&uc, addr, sizeof(ucontext_t));
+ if (error)
+ goto bad;
+
+ thread_getcontext(td, &uc);
+ error = copyout(&uc, addr, sizeof(ucontext_t));
+ if (error)
+ goto bad;
- }
- if (error) {
- PROC_LOCK(p);
- psignal(p, SIGSEGV);
- PROC_UNLOCK(p);
- return (error);
- }
/* get address in latest mbox of list pointer */
#if 0
addr = (caddr_t)td->td_mailbox
@@ -835,6 +839,7 @@ thread_export_context(struct thread *td)
for (;;) {
mbx = (uintptr_t)kg->kg_completed;
if (suword(addr, mbx)) {
+ error = EFAULT;
goto bad;
}
PROC_LOCK(p);
@@ -856,7 +861,7 @@ bad:
PROC_LOCK(p);
psignal(p, SIGSEGV);
PROC_UNLOCK(p);
- return (EFAULT);
+ return (error);
}
/*
@@ -930,8 +935,6 @@ thread_update_uticks(void)
caddr_t addr;
uint uticks, sticks;
- KASSERT(!(td->td_flags & TDF_UNBOUND), ("thread not bound."));
-
if (ke->ke_mailbox == NULL)
return 0;
@@ -939,8 +942,12 @@ thread_update_uticks(void)
ke->ke_uuticks = 0;
sticks = ke->ke_usticks;
ke->ke_usticks = 0;
+#if 0
tmbx = (void *)fuword((caddr_t)ke->ke_mailbox
- + offsetof(struct kse_mailbox, km_curthread));
+ + offsetof(struct kse_mailbox, km_curthread));
+#else /* if user pointer arithmetic is ok in the kernel */
+ tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
+#endif
if ((tmbx == NULL) || (tmbx == (void *)-1))
return 0;
if (uticks) {
@@ -1028,18 +1035,21 @@ thread_exit(void)
}
/* Reassign this thread's KSE. */
- ke->ke_thread = NULL;
- td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;
- KASSERT((ke->ke_bound != td),
- ("thread_exit: entered with ke_bound set"));
/*
- * decide what to do with the KSE attached to this thread.
+ * Decide what to do with the KSE attached to this thread.
+ * XXX Possibly kse_reassign should do both cases as it already
+ * does some of this.
*/
if (ke->ke_flags & KEF_EXIT) {
+ KASSERT((ke->ke_owner == td),
+ ("thread_exit: KSE exiting with non-owner thread"));
+ ke->ke_thread = NULL;
+ td->td_kse = NULL;
kse_unlink(ke);
} else {
+ TD_SET_EXITING(td); /* definitly not runnable */
kse_reassign(ke);
}
PROC_UNLOCK(p);
@@ -1107,19 +1117,13 @@ thread_link(struct thread *td, struct ksegrp *kg)
void
kse_purge(struct proc *p, struct thread *td)
{
- struct kse *ke;
+ /* XXXKSE think about this..
+ may need to wake up threads on loan queue. */
struct ksegrp *kg;
KASSERT(p->p_numthreads == 1, ("bad thread number"));
mtx_lock_spin(&sched_lock);
while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
- while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses--;
- TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
- kg->kg_kses--;
- kse_stash(ke);
- }
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
p->p_numksegrps--;
KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
@@ -1137,38 +1141,28 @@ kse_purge(struct proc *p, struct thread *td)
/*
* Create a thread and schedule it for upcall on the KSE given.
+ * Use our thread's standin so that we don't have to allocate one.
*/
struct thread *
thread_schedule_upcall(struct thread *td, struct kse *ke)
{
struct thread *td2;
- struct ksegrp *kg;
int newkse;
mtx_assert(&sched_lock, MA_OWNED);
newkse = (ke != td->td_kse);
/*
- * If the kse is already owned by another thread then we can't
- * schedule an upcall because the other thread must be BOUND
- * which means it is not in a position to take an upcall.
- * We must be borrowing the KSE to allow us to complete some in-kernel
- * work. When we complete, the Bound thread will have teh chance to
+ * If the owner and kse are BOUND then that thread is planning to
+ * go to userland and upcalls are not expected. So don't make one.
+ * If it is not bound then make it so with the spare thread
+ * anf then borrw back the KSE to allow us to complete some in-kernel
+ * work. When we complete, the Bound thread will have the chance to
* complete. This thread will sleep as planned. Hopefully there will
* eventually be un unbound thread that can be converted to an
* upcall to report the completion of this thread.
*/
- if (ke->ke_bound && ((ke->ke_bound->td_flags & TDF_UNBOUND) == 0)) {
- return (NULL);
- }
- KASSERT((ke->ke_bound == NULL), ("kse already bound"));
- if (ke->ke_state == KES_IDLE) {
- kg = ke->ke_ksegrp;
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses--;
- ke->ke_state = KES_UNQUEUED;
- }
if ((td2 = td->td_standin) != NULL) {
td->td_standin = NULL;
} else {
@@ -1206,8 +1200,9 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
td2->td_kse = ke;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
+ ke->ke_owner = td2;
/*
- * If called from msleep(), we are working on the current
+ * If called from kse_reassign(), we are working on the current
* KSE so fake that we borrowed it. If called from
* kse_create(), don't, as we have a new kse too.
*/
@@ -1220,10 +1215,8 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
* from msleep() this is going to be "very soon" in nearly
* all cases.
*/
- ke->ke_bound = td2;
TD_SET_LOAN(td2);
} else {
- ke->ke_bound = NULL;
ke->ke_thread = td2;
ke->ke_state = KES_THREAD;
setrunqueue(td2);
@@ -1292,10 +1285,11 @@ thread_user_enter(struct proc *p, struct thread *td)
/*
* If we are doing a syscall in a KSE environment,
* note where our mailbox is. There is always the
- * possibility that we could do this lazily (in sleep()),
+ * possibility that we could do this lazily (in kse_reassign()),
* but for now do it every time.
*/
ke = td->td_kse;
+ td->td_flags &= ~TDF_UNBOUND;
if (ke->ke_mailbox != NULL) {
#if 0
td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
@@ -1308,7 +1302,7 @@ thread_user_enter(struct proc *p, struct thread *td)
(td->td_mailbox == (void *)-1)) {
td->td_mailbox = NULL; /* single thread it.. */
mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_UNBOUND;
+ td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
mtx_unlock_spin(&sched_lock);
} else {
/*
@@ -1324,8 +1318,11 @@ thread_user_enter(struct proc *p, struct thread *td)
td->td_standin = thread_alloc();
}
mtx_lock_spin(&sched_lock);
- td->td_flags |= TDF_UNBOUND;
+ td->td_flags |= TDF_CAN_UNBIND;
mtx_unlock_spin(&sched_lock);
+ KASSERT((ke->ke_owner == td),
+ ("thread_user_enter: No starting owner "));
+ ke->ke_owner = td;
td->td_usticks = 0;
}
}
@@ -1350,187 +1347,215 @@ thread_userret(struct thread *td, struct trapframe *frame)
int unbound;
struct kse *ke;
struct ksegrp *kg;
- struct thread *td2;
+ struct thread *worktodo;
struct proc *p;
struct timespec ts;
- error = 0;
+ KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner),
+ ("thread_userret: bad thread/kse pointers"));
+ KASSERT((td == curthread),
+ ("thread_userret: bad thread argument"));
- unbound = td->td_flags & TDF_UNBOUND;
kg = td->td_ksegrp;
p = td->td_proc;
+ error = 0;
+ unbound = TD_IS_UNBOUND(td);
+
+ mtx_lock_spin(&sched_lock);
+ if ((worktodo = kg->kg_last_assigned))
+ worktodo = TAILQ_NEXT(worktodo, td_runq);
+ else
+ worktodo = TAILQ_FIRST(&kg->kg_runq);
/*
- * Originally bound threads never upcall but they may
+ * Permanently bound threads never upcall but they may
* loan out their KSE at this point.
* Upcalls imply bound.. They also may want to do some Philantropy.
- * Unbound threads on the other hand either yield to other work
- * or transform into an upcall.
- * (having saved their context to user space in both cases)
+ * Temporarily bound threads on the other hand either yield
+ * to other work and transform into an upcall, or proceed back to
+ * userland.
*/
- if (unbound) {
- /*
- * We are an unbound thread, looking to return to
- * user space.
- * THere are several possibilities:
- * 1) we are using a borrowed KSE. save state and exit.
- * kse_reassign() will recycle the kse as needed,
- * 2) we are not.. save state, and then convert ourself
- * to be an upcall, bound to the KSE.
- * if there are others that need the kse,
- * give them a chance by doing an mi_switch().
- * Because we are bound, control will eventually return
- * to us here.
- * ***
- * Save the thread's context, and link it
- * into the KSEGRP's list of completed threads.
- */
+
+ if (TD_CAN_UNBIND(td)) {
+ td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
+ if (!worktodo && (kg->kg_completed == NULL)) {
+ /*
+ * This thread has not started any upcall.
+ * If there is no work to report other than
+ * ourself, then it can return direct to userland.
+ */
+justreturn:
+ mtx_unlock_spin(&sched_lock);
+ thread_update_uticks();
+ td->td_mailbox = NULL;
+ return (0);
+ }
+ mtx_unlock_spin(&sched_lock);
error = thread_export_context(td);
- td->td_mailbox = NULL;
td->td_usticks = 0;
if (error) {
/*
- * If we are not running on a borrowed KSE, then
+ * As we are not running on a borrowed KSE,
* failing to do the KSE operation just defaults
* back to synchonous operation, so just return from
- * the syscall. If it IS borrowed, there is nothing
- * we can do. We just lose that context. We
- * probably should note this somewhere and send
- * the process a signal.
+ * the syscall.
*/
- PROC_LOCK(td->td_proc);
- psignal(td->td_proc, SIGSEGV);
- mtx_lock_spin(&sched_lock);
- if (td->td_kse->ke_bound == NULL) {
- td->td_flags &= ~TDF_UNBOUND;
- PROC_UNLOCK(td->td_proc);
- mtx_unlock_spin(&sched_lock);
- thread_update_uticks();
- return (error); /* go sync */
- }
- thread_exit();
+ goto justreturn;
}
-
- /*
- * if the KSE is owned and we are borrowing it,
- * don't make an upcall, just exit so that the owner
- * can get its KSE if it wants it.
- * Our context is already safely stored for later
- * use by the UTS.
- */
- PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
- if (td->td_kse->ke_bound) {
- thread_exit();
- }
- PROC_UNLOCK(p);
-
/*
* Turn ourself into a bound upcall.
* We will rely on kse_reassign()
* to make us run at a later time.
- * We should look just like a sheduled upcall
- * from msleep() or cv_wait().
*/
- td->td_flags &= ~TDF_UNBOUND;
td->td_flags |= TDF_UPCALLING;
- /* Only get here if we have become an upcall */
- } else {
- mtx_lock_spin(&sched_lock);
+ /* there may be more work since we re-locked schedlock */
+ if ((worktodo = kg->kg_last_assigned))
+ worktodo = TAILQ_NEXT(worktodo, td_runq);
+ else
+ worktodo = TAILQ_FIRST(&kg->kg_runq);
+ } else if (unbound) {
+ /*
+ * We are an unbound thread, looking to
+ * return to user space. There must be another owner
+ * of this KSE.
+ * We are using a borrowed KSE. save state and exit.
+ * kse_reassign() will recycle the kse as needed,
+ */
+ mtx_unlock_spin(&sched_lock);
+ error = thread_export_context(td);
+ td->td_usticks = 0;
+ if (error) {
+ /*
+ * There is nothing we can do.
+ * We just lose that context. We
+ * probably should note this somewhere and send
+ * the process a signal.
+ */
+ PROC_LOCK(td->td_proc);
+ psignal(td->td_proc, SIGSEGV);
+ mtx_lock_spin(&sched_lock);
+ ke = td->td_kse;
+ /* possibly upcall with error? */
+ } else {
+ /*
+ * Don't make an upcall, just exit so that the owner
+ * can get its KSE if it wants it.
+ * Our context is already safely stored for later
+ * use by the UTS.
+ */
+ PROC_LOCK(p);
+ mtx_lock_spin(&sched_lock);
+ ke = td->td_kse;
+ }
+ /*
+ * If the owner is idling, we now have something for it
+ * to report, so make it runnable.
+ * If the owner is not an upcall, make an attempt to
+ * ensure that at least one of any IDLED upcalls can
+ * wake up.
+ */
+ if (ke->ke_owner->td_flags & TDF_UPCALLING) {
+ TD_CLR_IDLE(ke->ke_owner);
+ } else {
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ if (TD_IS_IDLE(ke->ke_owner)) {
+ TD_CLR_IDLE(ke->ke_owner);
+ }
+ }
+ }
+ thread_exit();
}
/*
* We ARE going back to userland with this KSE.
- * Check for threads that need to borrow it.
- * Optimisation: don't call mi_switch if no-one wants the KSE.
+ * We are permanently bound. We may be an upcall.
+ * If an upcall, check for threads that need to borrow the KSE.
* Any other thread that comes ready after this missed the boat.
*/
ke = td->td_kse;
- if ((td2 = kg->kg_last_assigned))
- td2 = TAILQ_NEXT(td2, td_runq);
- else
- td2 = TAILQ_FIRST(&kg->kg_runq);
- if (td2) {
- /*
- * force a switch to more urgent 'in kernel'
- * work. Control will return to this thread
- * when there is no more work to do.
- * kse_reassign() will do tha for us.
- */
- TD_SET_LOAN(td);
- ke->ke_bound = td;
- ke->ke_thread = NULL;
- p->p_stats->p_ru.ru_nvcsw++;
- mi_switch(); /* kse_reassign() will (re)find td2 */
- }
- mtx_unlock_spin(&sched_lock);
/*
- * Optimisation:
- * Ensure that we have a spare thread available,
- * for when we re-enter the kernel.
+ * If not upcalling, go back to userspace.
+ * If we are, get the upcall set up.
*/
- if (td->td_standin == NULL) {
- td->td_standin = thread_alloc();
- }
-
- thread_update_uticks();
- /*
- * To get here, we know there is no other need for our
- * KSE so we can proceed. If not upcalling, go back to
- * userspace. If we are, get the upcall set up.
- */
- if ((td->td_flags & TDF_UPCALLING) == 0)
- return (0);
+ if (td->td_flags & TDF_UPCALLING) {
+ if (worktodo) {
+ /*
+ * force a switch to more urgent 'in kernel'
+ * work. Control will return to this thread
+ * when there is no more work to do.
+ * kse_reassign() will do that for us.
+ */
+ TD_SET_LOAN(td); /* XXXKSE may not be needed */
+ p->p_stats->p_ru.ru_nvcsw++;
+ mi_switch(); /* kse_reassign() will (re)find worktodo */
+ }
+ td->td_flags &= ~TDF_UPCALLING;
+ mtx_unlock_spin(&sched_lock);
- /*
- * We must be an upcall to get this far.
- * There is no more work to do and we are going to ride
- * this thead/KSE up to userland as an upcall.
- * Do the last parts of the setup needed for the upcall.
- */
- CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
- td, td->td_proc->p_pid, td->td_proc->p_comm);
+ /*
+ * There is no more work to do and we are going to ride
+ * this thread/KSE up to userland as an upcall.
+ * Do the last parts of the setup needed for the upcall.
+ */
+ CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
+ td, td->td_proc->p_pid, td->td_proc->p_comm);
- /*
- * Set user context to the UTS.
- * Will use Giant in cpu_thread_clean() because it uses
- * kmem_free(kernel_map, ...)
- */
- cpu_set_upcall_kse(td, ke);
+ /*
+ * Set user context to the UTS.
+ * Will use Giant in cpu_thread_clean() because it uses
+ * kmem_free(kernel_map, ...)
+ */
+ cpu_set_upcall_kse(td, ke);
- /*
- * Put any completed mailboxes on this KSE's list.
- */
- error = thread_link_mboxes(kg, ke);
- if (error)
- goto bad;
+ /*
+ * Unhook the list of completed threads.
+ * anything that completes after this gets to
+ * come in next time.
+ * Put the list of completed thread mailboxes on
+ * this KSE's mailbox.
+ */
+ error = thread_link_mboxes(kg, ke);
+ if (error)
+ goto bad;
- /*
- * Set state and mailbox.
- * From now on we are just a bound outgoing process.
- * **Problem** userret is often called several times.
- * it would be nice if this all happenned only on the first time
- * through. (the scan for extra work etc.)
- */
- mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_UPCALLING;
- mtx_unlock_spin(&sched_lock);
+ /*
+ * Set state and clear the thread mailbox pointer.
+ * From now on we are just a bound outgoing process.
+ * **Problem** userret is often called several times.
+ * it would be nice if this all happenned only on the first
+ * time through. (the scan for extra work etc.)
+ */
#if 0
- error = suword((caddr_t)ke->ke_mailbox +
- offsetof(struct kse_mailbox, km_curthread), 0);
+ error = suword((caddr_t)ke->ke_mailbox +
+ offsetof(struct kse_mailbox, km_curthread), 0);
#else /* if user pointer arithmetic is ok in the kernel */
- error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
+ error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
#endif
- ke->ke_uuticks = ke->ke_usticks = 0;
- if (!error) {
+ ke->ke_uuticks = ke->ke_usticks = 0;
+ if (error)
+ goto bad;
nanotime(&ts);
- if (copyout(&ts, (caddr_t)&ke->ke_mailbox->km_timeofday,
- sizeof(ts))) {
+ if (copyout(&ts,
+ (caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) {
goto bad;
}
+ } else {
+ mtx_unlock_spin(&sched_lock);
}
+ /*
+ * Optimisation:
+ * Ensure that we have a spare thread available,
+ * for when we re-enter the kernel.
+ */
+ if (td->td_standin == NULL) {
+ td->td_standin = thread_alloc();
+ }
+
+ thread_update_uticks();
+ td->td_mailbox = NULL;
return (0);
bad:
@@ -1541,6 +1566,7 @@ bad:
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGSEGV);
PROC_UNLOCK(td->td_proc);
+ td->td_mailbox = NULL;
return (error); /* go sync */
}
@@ -1577,9 +1603,10 @@ thread_single(int force_exit)
if (p->p_singlethread)
return (1);
- if (force_exit == SINGLE_EXIT)
+ if (force_exit == SINGLE_EXIT) {
p->p_flag |= P_SINGLE_EXIT;
- else
+ td->td_flags &= ~TDF_UNBOUND;
+ } else
p->p_flag &= ~P_SINGLE_EXIT;
p->p_flag |= P_STOPPED_SINGLE;
p->p_singlethread = td;
@@ -1601,11 +1628,17 @@ thread_single(int force_exit)
else
abortsleep(td2);
}
+ if (TD_IS_IDLE(td2)) {
+ TD_CLR_IDLE(td2);
+ }
} else {
if (TD_IS_SUSPENDED(td2))
continue;
/* maybe other inhibitted states too? */
- if (TD_IS_SLEEPING(td2))
+ if (td2->td_inhibitors &
+ (TDI_SLEEPING | TDI_SWAPPED |
+ TDI_LOAN | TDI_IDLE |
+ TDI_EXITING))
thread_suspend_one(td2);
}
}
@@ -1707,15 +1740,14 @@ thread_suspend_check(int return_instead)
while (mtx_owned(&Giant))
mtx_unlock(&Giant);
/*
- * free extra kses and ksegrps, we needn't worry
- * about if current thread is in same ksegrp as
- * p_singlethread and last kse in the group
- * could be killed, this is protected by kg_numthreads,
- * in this case, we deduce that kg_numthreads must > 1.
+ * All threads should be exiting
+ * Unless they are the active "singlethread".
+ * destroy un-needed KSEs as we go..
+ * KSEGRPS may implode too as #kses -> 0.
*/
ke = td->td_kse;
- if (ke->ke_bound == NULL &&
- ((kg->kg_kses != 1) || (kg->kg_numthreads == 1)))
+ if (ke->ke_owner == td &&
+ (kg->kg_kses >= kg->kg_numthreads ))
ke->ke_flags |= KEF_EXIT;
thread_exit();
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index a6ecbf8..3329b87 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1525,6 +1525,11 @@ psignal(p, sig)
if (TD_IS_SLEEPING(td) &&
(td->td_flags & TDF_SINTR))
thread_suspend_one(td);
+ else if (TD_IS_IDLE(td)) {
+ thread_suspend_one(td);
+ }
+
+
}
if (p->p_suspcount == p->p_numthreads) {
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 13497f9..d1d7070 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -110,7 +110,6 @@ static void runq_readjust(struct runq *rq, struct kse *ke);
/************************************************************************
* Functions that manipulate runnability from a thread perspective. *
************************************************************************/
-
/*
* Select the KSE that will be run next. From that find the thread, and x
* remove it from the KSEGRP's run queue. If there is thread clustering,
@@ -128,27 +127,12 @@ retry:
td = ke->ke_thread;
KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
kg = ke->ke_ksegrp;
- if (td->td_flags & TDF_UNBOUND) {
+ if (TD_IS_UNBOUND(td)) {
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
if (kg->kg_last_assigned == td) {
- if (TAILQ_PREV(td, threadqueue, td_runq)
- != NULL)
- printf("Yo MAMA!\n");
kg->kg_last_assigned = TAILQ_PREV(td,
threadqueue, td_runq);
}
- /*
- * If we have started running an upcall,
- * Then TDF_UNBOUND WAS set because the thread was
- * created without a KSE. Now that we have one,
- * and it is our time to run, we make sure
- * that BOUND semantics apply for the rest of
- * the journey to userland, and into the UTS.
- */
-#ifdef NOTYET
- if (td->td_flags & TDF_UPCALLING)
- tdf->td_flags &= ~TDF_UNBOUND;
-#endif
}
kg->kg_runnable--;
CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
@@ -160,18 +144,26 @@ retry:
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
}
ke->ke_flags |= KEF_DIDRUN;
+
+ /*
+ * Only allow non system threads to run in panic
+ * if they are the one we are tracing. (I think.. [JRE])
+ */
if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
(td->td_flags & TDF_INPANIC) == 0))
goto retry;
+
TD_SET_RUNNING(td);
return (td);
}
/*
* Given a KSE (now surplus or at least loanable), either assign a new
- * runable thread to it
- * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
- * Or aybe give it back to its owner if it's been loaned.
+ * runable thread to it (and put it in the run queue) or put it in
+ * the ksegrp's idle KSE list.
+ * Or maybe give it back to its owner if it's been loaned.
+ * Assumes that the original thread is either not runnable or
+ * already on the run queue
*/
void
kse_reassign(struct kse *ke)
@@ -180,16 +172,79 @@ kse_reassign(struct kse *ke)
struct thread *td;
struct thread *owner;
struct thread *original;
+ int loaned;
+ KASSERT((ke->ke_owner), ("reassigning KSE with no owner"));
+ KASSERT((ke->ke_thread && TD_IS_INHIBITED(ke->ke_thread)),
+ ("reassigning KSE with no or runnable thread"));
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
- owner = ke->ke_bound;
+ owner = ke->ke_owner;
+ loaned = TD_LENDER(owner);
original = ke->ke_thread;
- KASSERT(!(owner && ((owner->td_kse != ke) ||
- (owner->td_flags & TDF_UNBOUND))),
- ("kse_reassign: bad thread bound state"));
+
+ if (TD_CAN_UNBIND(original) && (original->td_standin)) {
+ KASSERT((owner == original),
+ ("Early thread borrowing?"));
+ /*
+ * The outgoing thread is "threaded" and has never
+ * scheduled an upcall.
+ * decide whether this is a short or long term event
+ * and thus whether or not to schedule an upcall.
+ * if it is a short term event, just suspend it in
+ * a way that takes its KSE with it.
+ * Select the events for which we want to schedule upcalls.
+ * For now it's just sleep.
+ * Other threads that still have not fired an upcall
+ * are held to their KSE using the temorary Binding.
+ */
+ if (TD_ON_SLEEPQ(original)) {
+ /*
+ * An bound thread that can still unbind itself
+ * has been scheduled out.
+ * If it is sleeping, then we need to schedule an
+ * upcall.
+ * XXXKSE eventually almost any inhibition could do.
+ */
+ original->td_flags &= ~TDF_CAN_UNBIND;
+ original->td_flags |= TDF_UNBOUND;
+ thread_schedule_upcall(original, ke);
+ owner = ke->ke_owner;
+ loaned = 1;
+ }
+ }
+
+ /*
+ * If the current thread was borrowing, then make things consistent
+ * by giving it back to the owner for the moment. The original thread
+ * must be unbound and have already used its chance for
+ * firing off an upcall. Threads that have not yet made an upcall
+ * can not borrow KSEs.
+ */
+ if (loaned) {
+ KASSERT((original->td_standin == NULL),
+ ("kse_reassign: borrower still has standin thread"));
+ TD_CLR_LOAN(owner);
+ ke->ke_thread = owner;
+ original->td_kse = NULL; /* give it amnesia */
+ /*
+ * Upcalling threads have lower priority than all
+ * in-kernel threads, However threads that have loaned out
+ * their KSE and are NOT upcalling have the priority that
+ * they have. In other words, only look for other work if
+ * the owner is not runnable, OR is upcalling.
+ */
+ if (TD_CAN_RUN(owner) &&
+ ((owner->td_flags & TDF_UPCALLING) == 0)) {
+ setrunnable(owner);
+ CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
+ ke, owner);
+ return;
+ }
+ }
/*
+ * Either the owner is not runnable, or is an upcall.
* Find the first unassigned thread
* If there is a 'last assigned' then see what's next.
* otherwise look at what is first.
@@ -204,77 +259,67 @@ kse_reassign(struct kse *ke)
* If we found one assign it the kse, otherwise idle the kse.
*/
if (td) {
- /*
- * If the original is bound to us we can only be lent out so
- * make a loan, otherwise we just drop the
- * original thread.
+ /*
+ * Assign the new thread to the KSE.
+ * and make the KSE runnable again,
*/
- if (original) {
- if (((original->td_flags & TDF_UNBOUND) == 0)) {
- /*
- * Put the owner on the side
- */
- ke->ke_bound = original;
- TD_SET_LOAN(original);
- } else {
- original->td_kse = NULL;
- }
+ if (TD_IS_BOUND(owner)) {
+ /*
+ * If there is a reason to keep the previous
+ * owner, do so.
+ */
+ TD_SET_LOAN(owner);
+ } else {
+ /* otherwise, cut it free */
+ ke->ke_owner = td;
+ owner->td_kse = NULL;
}
kg->kg_last_assigned = td;
td->td_kse = ke;
ke->ke_thread = td;
sched_add(ke);
- /*
- * if we have already borrowed this,
- * just pass it to the new thread,
- * otherwise, enact the loan.
- */
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
return;
}
- if (owner) { /* already loaned out */
- /* effectivly unloan it */
- TD_CLR_LOAN(owner);
- ke->ke_thread = owner;
- ke->ke_bound = NULL;
- if (original)
- original->td_kse = NULL;
- original = owner;
- if (TD_CAN_RUN(owner)) {
- /*
- * If the owner thread is now runnable, run it..
- * Let it have its KSE back.
- */
- setrunqueue(owner);
- CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
- ke, owner);
- return;
- }
- }
/*
- * Presetly NOT loaned out.
- * If we are bound, we go on the loanable queue
- * otherwise onto the free queue.
+ * Now handle any waiting upcall.
+ * Since we didn't make them runnable before.
*/
- if (original) {
- if (((original->td_flags & TDF_UNBOUND) == 0)) {
- ke->ke_state = KES_THREAD;
- ke->ke_flags |= KEF_ONLOANQ;
- ke->ke_bound = NULL;
- TAILQ_INSERT_HEAD(&kg->kg_lq, ke, ke_kgrlist);
- kg->kg_loan_kses++;
- CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke);
- return;
- } else {
- original->td_kse = NULL;
- }
+ if (TD_CAN_RUN(owner)) {
+ setrunnable(owner);
+ CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
+ ke, owner);
+ return;
+ }
+
+ /*
+ * It is possible that this is the last thread in the group
+ * because the KSE is being shut down or the process
+ * is exiting.
+ */
+ if (TD_IS_EXITING(owner) || (ke->ke_flags & KEF_EXIT)) {
+ ke->ke_thread = NULL;
+ owner->td_kse = NULL;
+ kse_unlink(ke);
+ return;
}
- ke->ke_state = KES_IDLE;
- ke->ke_thread = NULL;
- TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses++;
- CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke);
+
+ /*
+ * At this stage all we know is that the owner
+ * is the same as the 'active' thread in the KSE
+ * and that it is
+ * Presently NOT loaned out.
+ * Put it on the loanable queue. Make it fifo
+ * so that long term sleepers donate their KSE's first.
+ */
+ KASSERT((TD_IS_BOUND(owner)), ("kse_reassign: UNBOUND lender"));
+ ke->ke_state = KES_THREAD;
+ ke->ke_flags |= KEF_ONLOANQ;
+ TAILQ_INSERT_TAIL(&kg->kg_lq, ke, ke_kgrlist);
+ kg->kg_loan_kses++;
+ CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke);
+ return;
}
#if 0
@@ -302,7 +347,7 @@ remrunqueue(struct thread *td)
CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
kg->kg_runnable--;
TD_SET_CAN_RUN(td);
- if ((td->td_flags & TDF_UNBOUND) == 0) {
+ if (TD_IS_BOUND(td)) {
/* Bring its kse with it, leave the thread attached */
sched_rem(ke);
ke->ke_state = KES_THREAD;
@@ -317,12 +362,12 @@ remrunqueue(struct thread *td)
* KSE to the next available thread. Then, we should
* see if we need to move the KSE in the run queues.
*/
+ sched_rem(ke);
+ ke->ke_state = KES_THREAD;
td2 = kg->kg_last_assigned;
KASSERT((td2 != NULL), ("last assigned has wrong value "));
if (td2 == td)
kg->kg_last_assigned = td3;
- td->td_kse = NULL;
- ke->ke_thread = NULL;
kse_reassign(ke);
}
}
@@ -345,7 +390,7 @@ adjustrunqueue( struct thread *td, int newpri)
*/
ke = td->td_kse;
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
- if ((td->td_flags & TDF_UNBOUND) == 0) {
+ if (TD_IS_BOUND(td)) {
/* We only care about the kse in the run queue. */
td->td_priority = newpri;
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
@@ -396,11 +441,16 @@ setrunqueue(struct thread *td)
sched_add(td->td_kse);
return;
}
- if ((td->td_flags & TDF_UNBOUND) == 0) {
+ /*
+ * If the process is threaded but the thread is bound then
+ * there is still a little extra to do re. KSE loaning.
+ */
+ if (TD_IS_BOUND(td)) {
KASSERT((td->td_kse != NULL),
("queueing BAD thread to run queue"));
ke = td->td_kse;
- ke->ke_bound = NULL;
+ KASSERT((ke->ke_owner == ke->ke_thread),
+ ("setrunqueue: Hey KSE loaned out"));
if (ke->ke_flags & KEF_ONLOANQ) {
ke->ke_flags &= ~KEF_ONLOANQ;
TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
@@ -422,15 +472,7 @@ setrunqueue(struct thread *td)
* If we can't get one, our priority is not high enough..
* that's ok..
*/
- if (kg->kg_idle_kses) {
- /*
- * There is a free one so it's ours for the asking..
- */
- ke = TAILQ_FIRST(&kg->kg_iq);
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- ke->ke_state = KES_THREAD;
- kg->kg_idle_kses--;
- } else if (kg->kg_loan_kses) {
+ if (kg->kg_loan_kses) {
/*
* Failing that see if we can borrow one.
*/
@@ -438,8 +480,7 @@ setrunqueue(struct thread *td)
TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
ke->ke_flags &= ~KEF_ONLOANQ;
ke->ke_state = KES_THREAD;
- TD_SET_LOAN(ke->ke_thread);
- ke->ke_bound = ke->ke_thread;
+ TD_SET_LOAN(ke->ke_owner);
ke->ke_thread = NULL;
kg->kg_loan_kses--;
} else if (tda && (tda->td_priority > td->td_priority)) {
@@ -456,7 +497,11 @@ setrunqueue(struct thread *td)
} else {
/*
* Temporarily disassociate so it looks like the other cases.
+ * If the owner wasn't lending before, then it is now..
*/
+ if (!TD_LENDER(ke->ke_owner)) {
+ TD_SET_LOAN(ke->ke_owner);
+ }
ke->ke_thread = NULL;
td->td_kse = NULL;
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 3f5b01d..3847c8e 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -172,23 +172,8 @@ msleep(ident, mtx, priority, wmesg, timo)
td->td_flags &= ~TDF_INTERRUPT;
return (EINTR);
}
- mtx_lock_spin(&sched_lock);
- if ((td->td_flags & (TDF_UNBOUND|TDF_INMSLEEP)) ==
- TDF_UNBOUND) {
- /*
- * Arrange for an upcall to be readied.
- * it will not actually happen until all
- * pending in-kernel work for this KSEGRP
- * has been done.
- */
- /* Don't recurse here! */
- td->td_flags |= TDF_INMSLEEP;
- thread_schedule_upcall(td, td->td_kse);
- td->td_flags &= ~TDF_INMSLEEP;
- }
- } else {
- mtx_lock_spin(&sched_lock);
}
+ mtx_lock_spin(&sched_lock);
if (cold ) {
/*
* During autoconfiguration, just give interrupts
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 22f173e..8434c68 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -197,6 +197,7 @@ kse_link(struct kse *ke, struct ksegrp *kg)
ke->ke_state = KES_UNQUEUED;
ke->ke_proc = p;
ke->ke_ksegrp = kg;
+ ke->ke_owner = NULL;
ke->ke_thread = NULL;
ke->ke_oncpu = NOCPU;
}
@@ -208,10 +209,6 @@ kse_unlink(struct kse *ke)
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
- if (ke->ke_state == KES_IDLE) {
- kg->kg_idle_kses--;
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- }
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
if (--kg->kg_kses == 0) {
@@ -231,14 +228,12 @@ ksegrp_link(struct ksegrp *kg, struct proc *p)
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
- TAILQ_INIT(&kg->kg_iq); /* idle kses in ksegrp */
TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */
kg->kg_proc = p;
/* the following counters are in the -zero- section and may not need clearing */
kg->kg_numthreads = 0;
kg->kg_runnable = 0;
kg->kg_kses = 0;
- kg->kg_idle_kses = 0;
kg->kg_loan_kses = 0;
kg->kg_runq_kses = 0; /* XXXKSE change name */
/* link it in now that it's consistent */
@@ -351,7 +346,8 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
}
/*
- * Either returns as an upcall or exits
+ * Either becomes an upcall or waits for an awakening event and
+ * THEN becomes an upcall. Only error cases return.
*/
int
kse_release(struct thread * td, struct kse_release_args * uap)
@@ -369,30 +365,24 @@ kse_release(struct thread * td, struct kse_release_args * uap)
(td->td_flags & TDF_UNBOUND) ||
(td->td_kse->ke_mailbox == NULL))
return (EINVAL);
+
PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
+ /* Change OURSELF to become an upcall. */
+ td->td_flags = TDF_UPCALLING; /* BOUND */
if (kg->kg_completed == NULL) {
-#if 1 /* temp until signals make new threads */
- if (p->p_numthreads == 1) {
- /* change OURSELF to become an upcall */
- td->td_flags = TDF_UPCALLING;
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- /*
- * msleep will not call thread_sched_upcall
- * because thread is not UNBOUND.
- */
- msleep(p->p_sigacts, NULL,
- PPAUSE | PCATCH, "ksepause", 0);
- return (0);
- }
-#endif /* end temp */
- thread_exit();
+ /* XXXKSE also look for waiting signals etc. */
+ /*
+ * The KSE will however be lendable.
+ */
+ mtx_lock_spin(&sched_lock);
+ TD_SET_IDLE(td);
+ PROC_UNLOCK(p);
+ p->p_stats->p_ru.ru_nvcsw++;
+ mi_switch();
+ mtx_unlock_spin(&sched_lock);
+ } else {
+ PROC_UNLOCK(p);
}
- /* change OURSELF to become an upcall */
- td->td_flags = TDF_UPCALLING;
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
return (0);
}
@@ -403,45 +393,62 @@ int
kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
{
struct proc *p;
- struct kse *ke, *ke2;
+ struct kse *ke;
struct ksegrp *kg;
+ struct thread *td2;
p = td->td_proc;
+ td2 = NULL;
/* KSE-enabled processes only, please. */
if (!(p->p_flag & P_KSES))
return EINVAL;
- ke = NULL;
- mtx_lock_spin(&sched_lock);
+ PROC_LOCK(p);
if (uap->mbx) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
- FOREACH_KSE_IN_GROUP(kg, ke2) {
- if (ke2->ke_mailbox != uap->mbx)
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ if (ke->ke_mailbox != uap->mbx)
continue;
- if (ke2->ke_state == KES_IDLE) {
- ke = ke2;
- goto found;
- } else {
- mtx_unlock_spin(&sched_lock);
- td->td_retval[0] = 0;
- td->td_retval[1] = 0;
+ td2 = ke->ke_owner ;
+ KASSERT((td2 != NULL),("KSE with no owner"));
+ if (!TD_IS_IDLE(td2)) {
+ /* Return silently if no longer idle */
+ PROC_UNLOCK(p);
+ td->td_retval[0] = 0;
+ td->td_retval[1] = 0;
return (0);
}
+ break;
}
+ if (td2) {
+ break;
+ }
}
} else {
+ /*
+ * look for any idle KSE to resurrect.
+ */
kg = td->td_ksegrp;
- ke = TAILQ_FIRST(&kg->kg_iq);
+ mtx_lock_spin(&sched_lock);
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ td2 = ke->ke_owner;
+ KASSERT((td2 != NULL),("KSE with no owner2"));
+ if (TD_IS_IDLE(td2))
+ break;
+ }
}
- if (ke == NULL) {
+ if (td2) {
+ mtx_lock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ TD_CLR_IDLE(td2);
+ setrunnable(td2);
mtx_unlock_spin(&sched_lock);
- return (ESRCH);
- }
-found:
- thread_schedule_upcall(td, ke);
+ td->td_retval[0] = 0;
+ td->td_retval[1] = 0;
+ return (0);
+ }
mtx_unlock_spin(&sched_lock);
- td->td_retval[0] = 0;
- td->td_retval[1] = 0;
- return (0);
+ PROC_UNLOCK(p);
+ return (ESRCH);
}
/*
@@ -810,17 +817,14 @@ thread_export_context(struct thread *td)
addr = (void *)(&td->td_mailbox->tm_context);
#endif
error = copyin(addr, &uc, sizeof(ucontext_t));
- if (error == 0) {
- thread_getcontext(td, &uc);
- error = copyout(&uc, addr, sizeof(ucontext_t));
+ if (error)
+ goto bad;
+
+ thread_getcontext(td, &uc);
+ error = copyout(&uc, addr, sizeof(ucontext_t));
+ if (error)
+ goto bad;
- }
- if (error) {
- PROC_LOCK(p);
- psignal(p, SIGSEGV);
- PROC_UNLOCK(p);
- return (error);
- }
/* get address in latest mbox of list pointer */
#if 0
addr = (caddr_t)td->td_mailbox
@@ -835,6 +839,7 @@ thread_export_context(struct thread *td)
for (;;) {
mbx = (uintptr_t)kg->kg_completed;
if (suword(addr, mbx)) {
+ error = EFAULT;
goto bad;
}
PROC_LOCK(p);
@@ -856,7 +861,7 @@ bad:
PROC_LOCK(p);
psignal(p, SIGSEGV);
PROC_UNLOCK(p);
- return (EFAULT);
+ return (error);
}
/*
@@ -930,8 +935,6 @@ thread_update_uticks(void)
caddr_t addr;
uint uticks, sticks;
- KASSERT(!(td->td_flags & TDF_UNBOUND), ("thread not bound."));
-
if (ke->ke_mailbox == NULL)
return 0;
@@ -939,8 +942,12 @@ thread_update_uticks(void)
ke->ke_uuticks = 0;
sticks = ke->ke_usticks;
ke->ke_usticks = 0;
+#if 0
tmbx = (void *)fuword((caddr_t)ke->ke_mailbox
- + offsetof(struct kse_mailbox, km_curthread));
+ + offsetof(struct kse_mailbox, km_curthread));
+#else /* if user pointer arithmetic is ok in the kernel */
+ tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
+#endif
if ((tmbx == NULL) || (tmbx == (void *)-1))
return 0;
if (uticks) {
@@ -1028,18 +1035,21 @@ thread_exit(void)
}
/* Reassign this thread's KSE. */
- ke->ke_thread = NULL;
- td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;
- KASSERT((ke->ke_bound != td),
- ("thread_exit: entered with ke_bound set"));
/*
- * decide what to do with the KSE attached to this thread.
+ * Decide what to do with the KSE attached to this thread.
+ * XXX Possibly kse_reassign should do both cases as it already
+ * does some of this.
*/
if (ke->ke_flags & KEF_EXIT) {
+ KASSERT((ke->ke_owner == td),
+ ("thread_exit: KSE exiting with non-owner thread"));
+ ke->ke_thread = NULL;
+ td->td_kse = NULL;
kse_unlink(ke);
} else {
+ TD_SET_EXITING(td); /* definitly not runnable */
kse_reassign(ke);
}
PROC_UNLOCK(p);
@@ -1107,19 +1117,13 @@ thread_link(struct thread *td, struct ksegrp *kg)
void
kse_purge(struct proc *p, struct thread *td)
{
- struct kse *ke;
+ /* XXXKSE think about this..
+ may need to wake up threads on loan queue. */
struct ksegrp *kg;
KASSERT(p->p_numthreads == 1, ("bad thread number"));
mtx_lock_spin(&sched_lock);
while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
- while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses--;
- TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
- kg->kg_kses--;
- kse_stash(ke);
- }
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
p->p_numksegrps--;
KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
@@ -1137,38 +1141,28 @@ kse_purge(struct proc *p, struct thread *td)
/*
* Create a thread and schedule it for upcall on the KSE given.
+ * Use our thread's standin so that we don't have to allocate one.
*/
struct thread *
thread_schedule_upcall(struct thread *td, struct kse *ke)
{
struct thread *td2;
- struct ksegrp *kg;
int newkse;
mtx_assert(&sched_lock, MA_OWNED);
newkse = (ke != td->td_kse);
/*
- * If the kse is already owned by another thread then we can't
- * schedule an upcall because the other thread must be BOUND
- * which means it is not in a position to take an upcall.
- * We must be borrowing the KSE to allow us to complete some in-kernel
- * work. When we complete, the Bound thread will have teh chance to
+ * If the owner and kse are BOUND then that thread is planning to
+ * go to userland and upcalls are not expected. So don't make one.
+ * If it is not bound then make it so with the spare thread
+ * anf then borrw back the KSE to allow us to complete some in-kernel
+ * work. When we complete, the Bound thread will have the chance to
* complete. This thread will sleep as planned. Hopefully there will
* eventually be un unbound thread that can be converted to an
* upcall to report the completion of this thread.
*/
- if (ke->ke_bound && ((ke->ke_bound->td_flags & TDF_UNBOUND) == 0)) {
- return (NULL);
- }
- KASSERT((ke->ke_bound == NULL), ("kse already bound"));
- if (ke->ke_state == KES_IDLE) {
- kg = ke->ke_ksegrp;
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses--;
- ke->ke_state = KES_UNQUEUED;
- }
if ((td2 = td->td_standin) != NULL) {
td->td_standin = NULL;
} else {
@@ -1206,8 +1200,9 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
td2->td_kse = ke;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
+ ke->ke_owner = td2;
/*
- * If called from msleep(), we are working on the current
+ * If called from kse_reassign(), we are working on the current
* KSE so fake that we borrowed it. If called from
* kse_create(), don't, as we have a new kse too.
*/
@@ -1220,10 +1215,8 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
* from msleep() this is going to be "very soon" in nearly
* all cases.
*/
- ke->ke_bound = td2;
TD_SET_LOAN(td2);
} else {
- ke->ke_bound = NULL;
ke->ke_thread = td2;
ke->ke_state = KES_THREAD;
setrunqueue(td2);
@@ -1292,10 +1285,11 @@ thread_user_enter(struct proc *p, struct thread *td)
/*
* If we are doing a syscall in a KSE environment,
* note where our mailbox is. There is always the
- * possibility that we could do this lazily (in sleep()),
+ * possibility that we could do this lazily (in kse_reassign()),
* but for now do it every time.
*/
ke = td->td_kse;
+ td->td_flags &= ~TDF_UNBOUND;
if (ke->ke_mailbox != NULL) {
#if 0
td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
@@ -1308,7 +1302,7 @@ thread_user_enter(struct proc *p, struct thread *td)
(td->td_mailbox == (void *)-1)) {
td->td_mailbox = NULL; /* single thread it.. */
mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_UNBOUND;
+ td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
mtx_unlock_spin(&sched_lock);
} else {
/*
@@ -1324,8 +1318,11 @@ thread_user_enter(struct proc *p, struct thread *td)
td->td_standin = thread_alloc();
}
mtx_lock_spin(&sched_lock);
- td->td_flags |= TDF_UNBOUND;
+ td->td_flags |= TDF_CAN_UNBIND;
mtx_unlock_spin(&sched_lock);
+ KASSERT((ke->ke_owner == td),
+ ("thread_user_enter: No starting owner "));
+ ke->ke_owner = td;
td->td_usticks = 0;
}
}
@@ -1350,187 +1347,215 @@ thread_userret(struct thread *td, struct trapframe *frame)
int unbound;
struct kse *ke;
struct ksegrp *kg;
- struct thread *td2;
+ struct thread *worktodo;
struct proc *p;
struct timespec ts;
- error = 0;
+ KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner),
+ ("thread_userret: bad thread/kse pointers"));
+ KASSERT((td == curthread),
+ ("thread_userret: bad thread argument"));
- unbound = td->td_flags & TDF_UNBOUND;
kg = td->td_ksegrp;
p = td->td_proc;
+ error = 0;
+ unbound = TD_IS_UNBOUND(td);
+
+ mtx_lock_spin(&sched_lock);
+ if ((worktodo = kg->kg_last_assigned))
+ worktodo = TAILQ_NEXT(worktodo, td_runq);
+ else
+ worktodo = TAILQ_FIRST(&kg->kg_runq);
/*
- * Originally bound threads never upcall but they may
+ * Permanently bound threads never upcall but they may
* loan out their KSE at this point.
* Upcalls imply bound.. They also may want to do some Philantropy.
- * Unbound threads on the other hand either yield to other work
- * or transform into an upcall.
- * (having saved their context to user space in both cases)
+ * Temporarily bound threads on the other hand either yield
+ * to other work and transform into an upcall, or proceed back to
+ * userland.
*/
- if (unbound) {
- /*
- * We are an unbound thread, looking to return to
- * user space.
- * THere are several possibilities:
- * 1) we are using a borrowed KSE. save state and exit.
- * kse_reassign() will recycle the kse as needed,
- * 2) we are not.. save state, and then convert ourself
- * to be an upcall, bound to the KSE.
- * if there are others that need the kse,
- * give them a chance by doing an mi_switch().
- * Because we are bound, control will eventually return
- * to us here.
- * ***
- * Save the thread's context, and link it
- * into the KSEGRP's list of completed threads.
- */
+
+ if (TD_CAN_UNBIND(td)) {
+ td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
+ if (!worktodo && (kg->kg_completed == NULL)) {
+ /*
+ * This thread has not started any upcall.
+ * If there is no work to report other than
+ * ourself, then it can return direct to userland.
+ */
+justreturn:
+ mtx_unlock_spin(&sched_lock);
+ thread_update_uticks();
+ td->td_mailbox = NULL;
+ return (0);
+ }
+ mtx_unlock_spin(&sched_lock);
error = thread_export_context(td);
- td->td_mailbox = NULL;
td->td_usticks = 0;
if (error) {
/*
- * If we are not running on a borrowed KSE, then
+ * As we are not running on a borrowed KSE,
* failing to do the KSE operation just defaults
* back to synchonous operation, so just return from
- * the syscall. If it IS borrowed, there is nothing
- * we can do. We just lose that context. We
- * probably should note this somewhere and send
- * the process a signal.
+ * the syscall.
*/
- PROC_LOCK(td->td_proc);
- psignal(td->td_proc, SIGSEGV);
- mtx_lock_spin(&sched_lock);
- if (td->td_kse->ke_bound == NULL) {
- td->td_flags &= ~TDF_UNBOUND;
- PROC_UNLOCK(td->td_proc);
- mtx_unlock_spin(&sched_lock);
- thread_update_uticks();
- return (error); /* go sync */
- }
- thread_exit();
+ goto justreturn;
}
-
- /*
- * if the KSE is owned and we are borrowing it,
- * don't make an upcall, just exit so that the owner
- * can get its KSE if it wants it.
- * Our context is already safely stored for later
- * use by the UTS.
- */
- PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
- if (td->td_kse->ke_bound) {
- thread_exit();
- }
- PROC_UNLOCK(p);
-
/*
* Turn ourself into a bound upcall.
* We will rely on kse_reassign()
* to make us run at a later time.
- * We should look just like a sheduled upcall
- * from msleep() or cv_wait().
*/
- td->td_flags &= ~TDF_UNBOUND;
td->td_flags |= TDF_UPCALLING;
- /* Only get here if we have become an upcall */
- } else {
- mtx_lock_spin(&sched_lock);
+ /* there may be more work since we re-locked schedlock */
+ if ((worktodo = kg->kg_last_assigned))
+ worktodo = TAILQ_NEXT(worktodo, td_runq);
+ else
+ worktodo = TAILQ_FIRST(&kg->kg_runq);
+ } else if (unbound) {
+ /*
+ * We are an unbound thread, looking to
+ * return to user space. There must be another owner
+ * of this KSE.
+ * We are using a borrowed KSE. save state and exit.
+ * kse_reassign() will recycle the kse as needed,
+ */
+ mtx_unlock_spin(&sched_lock);
+ error = thread_export_context(td);
+ td->td_usticks = 0;
+ if (error) {
+ /*
+ * There is nothing we can do.
+ * We just lose that context. We
+ * probably should note this somewhere and send
+ * the process a signal.
+ */
+ PROC_LOCK(td->td_proc);
+ psignal(td->td_proc, SIGSEGV);
+ mtx_lock_spin(&sched_lock);
+ ke = td->td_kse;
+ /* possibly upcall with error? */
+ } else {
+ /*
+ * Don't make an upcall, just exit so that the owner
+ * can get its KSE if it wants it.
+ * Our context is already safely stored for later
+ * use by the UTS.
+ */
+ PROC_LOCK(p);
+ mtx_lock_spin(&sched_lock);
+ ke = td->td_kse;
+ }
+ /*
+ * If the owner is idling, we now have something for it
+ * to report, so make it runnable.
+ * If the owner is not an upcall, make an attempt to
+ * ensure that at least one of any IDLED upcalls can
+ * wake up.
+ */
+ if (ke->ke_owner->td_flags & TDF_UPCALLING) {
+ TD_CLR_IDLE(ke->ke_owner);
+ } else {
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ if (TD_IS_IDLE(ke->ke_owner)) {
+ TD_CLR_IDLE(ke->ke_owner);
+ }
+ }
+ }
+ thread_exit();
}
/*
* We ARE going back to userland with this KSE.
- * Check for threads that need to borrow it.
- * Optimisation: don't call mi_switch if no-one wants the KSE.
+ * We are permanently bound. We may be an upcall.
+ * If an upcall, check for threads that need to borrow the KSE.
* Any other thread that comes ready after this missed the boat.
*/
ke = td->td_kse;
- if ((td2 = kg->kg_last_assigned))
- td2 = TAILQ_NEXT(td2, td_runq);
- else
- td2 = TAILQ_FIRST(&kg->kg_runq);
- if (td2) {
- /*
- * force a switch to more urgent 'in kernel'
- * work. Control will return to this thread
- * when there is no more work to do.
- * kse_reassign() will do tha for us.
- */
- TD_SET_LOAN(td);
- ke->ke_bound = td;
- ke->ke_thread = NULL;
- p->p_stats->p_ru.ru_nvcsw++;
- mi_switch(); /* kse_reassign() will (re)find td2 */
- }
- mtx_unlock_spin(&sched_lock);
/*
- * Optimisation:
- * Ensure that we have a spare thread available,
- * for when we re-enter the kernel.
+ * If not upcalling, go back to userspace.
+ * If we are, get the upcall set up.
*/
- if (td->td_standin == NULL) {
- td->td_standin = thread_alloc();
- }
-
- thread_update_uticks();
- /*
- * To get here, we know there is no other need for our
- * KSE so we can proceed. If not upcalling, go back to
- * userspace. If we are, get the upcall set up.
- */
- if ((td->td_flags & TDF_UPCALLING) == 0)
- return (0);
+ if (td->td_flags & TDF_UPCALLING) {
+ if (worktodo) {
+ /*
+ * force a switch to more urgent 'in kernel'
+ * work. Control will return to this thread
+ * when there is no more work to do.
+ * kse_reassign() will do that for us.
+ */
+ TD_SET_LOAN(td); /* XXXKSE may not be needed */
+ p->p_stats->p_ru.ru_nvcsw++;
+ mi_switch(); /* kse_reassign() will (re)find worktodo */
+ }
+ td->td_flags &= ~TDF_UPCALLING;
+ mtx_unlock_spin(&sched_lock);
- /*
- * We must be an upcall to get this far.
- * There is no more work to do and we are going to ride
- * this thead/KSE up to userland as an upcall.
- * Do the last parts of the setup needed for the upcall.
- */
- CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
- td, td->td_proc->p_pid, td->td_proc->p_comm);
+ /*
+ * There is no more work to do and we are going to ride
+ * this thread/KSE up to userland as an upcall.
+ * Do the last parts of the setup needed for the upcall.
+ */
+ CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
+ td, td->td_proc->p_pid, td->td_proc->p_comm);
- /*
- * Set user context to the UTS.
- * Will use Giant in cpu_thread_clean() because it uses
- * kmem_free(kernel_map, ...)
- */
- cpu_set_upcall_kse(td, ke);
+ /*
+ * Set user context to the UTS.
+ * Will use Giant in cpu_thread_clean() because it uses
+ * kmem_free(kernel_map, ...)
+ */
+ cpu_set_upcall_kse(td, ke);
- /*
- * Put any completed mailboxes on this KSE's list.
- */
- error = thread_link_mboxes(kg, ke);
- if (error)
- goto bad;
+ /*
+ * Unhook the list of completed threads.
+ * anything that completes after this gets to
+ * come in next time.
+ * Put the list of completed thread mailboxes on
+ * this KSE's mailbox.
+ */
+ error = thread_link_mboxes(kg, ke);
+ if (error)
+ goto bad;
- /*
- * Set state and mailbox.
- * From now on we are just a bound outgoing process.
- * **Problem** userret is often called several times.
- * it would be nice if this all happenned only on the first time
- * through. (the scan for extra work etc.)
- */
- mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_UPCALLING;
- mtx_unlock_spin(&sched_lock);
+ /*
+ * Set state and clear the thread mailbox pointer.
+ * From now on we are just a bound outgoing process.
+ * **Problem** userret is often called several times.
+ * it would be nice if this all happenned only on the first
+ * time through. (the scan for extra work etc.)
+ */
#if 0
- error = suword((caddr_t)ke->ke_mailbox +
- offsetof(struct kse_mailbox, km_curthread), 0);
+ error = suword((caddr_t)ke->ke_mailbox +
+ offsetof(struct kse_mailbox, km_curthread), 0);
#else /* if user pointer arithmetic is ok in the kernel */
- error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
+ error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
#endif
- ke->ke_uuticks = ke->ke_usticks = 0;
- if (!error) {
+ ke->ke_uuticks = ke->ke_usticks = 0;
+ if (error)
+ goto bad;
nanotime(&ts);
- if (copyout(&ts, (caddr_t)&ke->ke_mailbox->km_timeofday,
- sizeof(ts))) {
+ if (copyout(&ts,
+ (caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) {
goto bad;
}
+ } else {
+ mtx_unlock_spin(&sched_lock);
}
+ /*
+ * Optimisation:
+ * Ensure that we have a spare thread available,
+ * for when we re-enter the kernel.
+ */
+ if (td->td_standin == NULL) {
+ td->td_standin = thread_alloc();
+ }
+
+ thread_update_uticks();
+ td->td_mailbox = NULL;
return (0);
bad:
@@ -1541,6 +1566,7 @@ bad:
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGSEGV);
PROC_UNLOCK(td->td_proc);
+ td->td_mailbox = NULL;
return (error); /* go sync */
}
@@ -1577,9 +1603,10 @@ thread_single(int force_exit)
if (p->p_singlethread)
return (1);
- if (force_exit == SINGLE_EXIT)
+ if (force_exit == SINGLE_EXIT) {
p->p_flag |= P_SINGLE_EXIT;
- else
+ td->td_flags &= ~TDF_UNBOUND;
+ } else
p->p_flag &= ~P_SINGLE_EXIT;
p->p_flag |= P_STOPPED_SINGLE;
p->p_singlethread = td;
@@ -1601,11 +1628,17 @@ thread_single(int force_exit)
else
abortsleep(td2);
}
+ if (TD_IS_IDLE(td2)) {
+ TD_CLR_IDLE(td2);
+ }
} else {
if (TD_IS_SUSPENDED(td2))
continue;
/* maybe other inhibitted states too? */
- if (TD_IS_SLEEPING(td2))
+ if (td2->td_inhibitors &
+ (TDI_SLEEPING | TDI_SWAPPED |
+ TDI_LOAN | TDI_IDLE |
+ TDI_EXITING))
thread_suspend_one(td2);
}
}
@@ -1707,15 +1740,14 @@ thread_suspend_check(int return_instead)
while (mtx_owned(&Giant))
mtx_unlock(&Giant);
/*
- * free extra kses and ksegrps, we needn't worry
- * about if current thread is in same ksegrp as
- * p_singlethread and last kse in the group
- * could be killed, this is protected by kg_numthreads,
- * in this case, we deduce that kg_numthreads must > 1.
+ * All threads should be exiting
+ * Unless they are the active "singlethread".
+ * destroy un-needed KSEs as we go..
+ * KSEGRPS may implode too as #kses -> 0.
*/
ke = td->td_kse;
- if (ke->ke_bound == NULL &&
- ((kg->kg_kses != 1) || (kg->kg_numthreads == 1)))
+ if (ke->ke_owner == td &&
+ (kg->kg_kses >= kg->kg_numthreads ))
ke->ke_flags |= KEF_EXIT;
thread_exit();
}
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 7c4aa80..48020aa 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -116,7 +116,7 @@ maybe_resched(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
- if (td->td_priority < curthread->td_priority)
+ if (td->td_priority < curthread->td_priority && curthread->td_kse)
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 61052d3..eec2ae6 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -112,7 +112,6 @@ userret(td, frame, oticks)
*/
if (p->p_flag & P_KSES) {
thread_userret(td, frame);
- /* printf("KSE thread returned"); */
}
/*
OpenPOWER on IntegriCloud