diff options
author | julian <julian@FreeBSD.org> | 2002-09-29 23:04:34 +0000 |
---|---|---|
committer | julian <julian@FreeBSD.org> | 2002-09-29 23:04:34 +0000 |
commit | d91c37553eed3082a93a6edbef2fce6c7629aaf7 (patch) | |
tree | 511ef31f5693fdea87543a17c345986c726d9da3 /sys/kern | |
parent | 054b6a1aae2b1225e80402c333f0e68b8fafe944 (diff) | |
download | FreeBSD-src-d91c37553eed3082a93a6edbef2fce6c7629aaf7.zip FreeBSD-src-d91c37553eed3082a93a6edbef2fce6c7629aaf7.tar.gz |
Implement basic KSE loaning. This stops a hread that is blocked in BOUND mode
from stopping another thread from completing a syscall, and this allows it to
release its resources etc. Probably more related commits to follow (at least
one I know of)
Initial concept by: julian, dillon
Submitted by: davidxu
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_kse.c | 9 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 3 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_switch.c | 74 | ||||
-rw-r--r-- | sys/kern/kern_synch.c | 5 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 9 | ||||
-rw-r--r-- | sys/kern/subr_turnstile.c | 3 |
7 files changed, 98 insertions, 9 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index ede053c..907eba7 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -466,6 +466,8 @@ thread_exit(void) ke->ke_thread = NULL; td->td_kse = NULL; ke->ke_state = KES_UNQUEUED; + if (ke->ke_bound == td) + ke->ke_bound = NULL; kse_reassign(ke); /* Unlink this thread from its proc. and the kseg */ @@ -707,6 +709,13 @@ thread_userret(struct thread *td, struct trapframe *frame) int unbound; struct kse *ke; + if (td->td_kse->ke_bound) { + thread_export_context(td); + PROC_LOCK(td->td_proc); + mtx_lock_spin(&sched_lock); + thread_exit(); + } + /* Make the thread bound from now on, but remember what it was. */ unbound = td->td_flags & TDF_UNBOUND; td->td_flags &= ~TDF_UNBOUND; diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 34cc6d5..a5053fe 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -762,7 +762,8 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) td1->td_blocked = NULL; TD_CLR_MUTEX(td1); - setrunqueue(td1); + if (TD_CAN_RUN(td1)) + setrunqueue(td1); if (td->td_critnest == 1 && td1->td_priority < pri) { #ifdef notyet diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index a3ddae0..633c66e 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -249,13 +249,15 @@ ksegrp_link(struct ksegrp *kg, struct proc *p) TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ - TAILQ_INIT(&kg->kg_iq); /* all kses in ksegrp */ + TAILQ_INIT(&kg->kg_iq); /* idle kses in ksegrp */ + TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */ kg->kg_proc = p; /* the following counters are in the -zero- section and may not need clearing */ kg->kg_numthreads = 0; kg->kg_runnable = 0; kg->kg_kses = 0; kg->kg_idle_kses = 0; + kg->kg_loan_kses = 0; kg->kg_runq_kses = 0; /* XXXKSE change name */ /* link it in now that it's consistent */ p->p_numksegrps++; diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index 0139a8b..c6c667d 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -178,9 +178,24 @@ kse_reassign(struct kse *ke) { struct ksegrp *kg; struct thread *td; + struct thread *owner; mtx_assert(&sched_lock, MA_OWNED); kg = ke->ke_ksegrp; + owner = ke->ke_bound; + KASSERT(!(owner && ((owner->td_kse != ke) || + (owner->td_flags & TDF_UNBOUND))), + ("kse_reassign: bad thread bound state")); + if (owner && (owner->td_inhibitors == TDI_LOAN)) { + TD_CLR_LOAN(owner); + ke->ke_bound = NULL; + ke->ke_thread = owner; + owner->td_kse = ke; + setrunqueue(owner); + CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)", + ke, owner); + return; + } /* * Find the first unassigned thread @@ -201,13 +216,24 @@ kse_reassign(struct kse *ke) td->td_kse = ke; ke->ke_thread = td; runq_add(&runq, ke); + if (owner) + TD_SET_LOAN(owner); CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td); - } else { + } else if (!owner) { ke->ke_state = KES_IDLE; ke->ke_thread = NULL; TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); kg->kg_idle_kses++; CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke); + } else { + TD_CLR_LOAN(owner); + ke->ke_state = KES_THREAD; + ke->ke_thread = owner; + owner->td_kse = ke; + ke->ke_flags |= KEF_ONLOANQ; + TAILQ_INSERT_HEAD(&kg->kg_lq, ke, ke_kgrlist); + kg->kg_loan_kses++; + CTR1(KTR_RUNQ, "kse_reassign: ke%p is on loan queue", ke); } } @@ -226,7 +252,7 @@ kserunnable(void) void remrunqueue(struct thread *td) { - struct thread *td2, *td3; + struct thread *td2, *td3, *owner; struct ksegrp *kg; struct kse *ke; @@ -282,10 +308,33 @@ remrunqueue(struct thread *td) runq_remove(&runq, ke); KASSERT((ke->ke_state != KES_IDLE), ("kse already idle")); - ke->ke_state = KES_IDLE; - ke->ke_thread = NULL; - TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); - kg->kg_idle_kses++; + if (ke->ke_bound) { + owner = ke->ke_bound; + if (owner->td_inhibitors == TDI_LOAN) { + TD_CLR_LOAN(owner); + ke->ke_bound = NULL; + ke->ke_thread = owner; + owner->td_kse = ke; + setrunqueue(owner); + CTR2(KTR_RUNQ, + "remrunqueue: ke%p -> td%p (give back)", + ke, owner); + } else { + TD_CLR_LOAN(owner); + ke->ke_state = KES_THREAD; + ke->ke_thread = owner; + owner->td_kse = ke; + ke->ke_flags |= KEF_ONLOANQ; + TAILQ_INSERT_HEAD(&kg->kg_lq, ke, + ke_kgrlist); + kg->kg_loan_kses++; + } + } else { + ke->ke_state = KES_IDLE; + ke->ke_thread = NULL; + TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); + kg->kg_idle_kses++; + } } } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); @@ -309,6 +358,12 @@ setrunqueue(struct thread *td) if ((td->td_flags & TDF_UNBOUND) == 0) { KASSERT((td->td_kse != NULL), ("queueing BAD thread to run queue")); + ke = td->td_kse; + if (ke->ke_flags & KEF_ONLOANQ) { + ke->ke_flags &= ~KEF_ONLOANQ; + TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist); + kg->kg_loan_kses--; + } /* * Common path optimisation: Only one of everything * and the KSE is always already attached. @@ -337,6 +392,13 @@ setrunqueue(struct thread *td) TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); ke->ke_state = KES_THREAD; kg->kg_idle_kses--; + } else if (kg->kg_loan_kses) { + ke = TAILQ_FIRST(&kg->kg_lq); + TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist); + ke->ke_flags &= ~KEF_ONLOANQ; + ke->ke_state = KES_THREAD; + TD_SET_LOAN(ke->ke_bound); + kg->kg_loan_kses--; } else if (tda && (tda->td_priority > td->td_priority)) { /* * None free, but there is one we can commandeer. diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 74ef9ed..445936b 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -849,6 +849,11 @@ mi_switch(void) */ td->td_kse = NULL; kse_reassign(ke); + } else if (p->p_flag & P_KSES) { + KASSERT(((ke->ke_bound == NULL) || (ke->ke_bound == td)), + ("mi_switch: bad bound state")); + ke->ke_bound = td; + kse_reassign(ke); } cpu_switch(); /* SHAZAM!!*/ diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index ede053c..907eba7 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -466,6 +466,8 @@ thread_exit(void) ke->ke_thread = NULL; td->td_kse = NULL; ke->ke_state = KES_UNQUEUED; + if (ke->ke_bound == td) + ke->ke_bound = NULL; kse_reassign(ke); /* Unlink this thread from its proc. and the kseg */ @@ -707,6 +709,13 @@ thread_userret(struct thread *td, struct trapframe *frame) int unbound; struct kse *ke; + if (td->td_kse->ke_bound) { + thread_export_context(td); + PROC_LOCK(td->td_proc); + mtx_lock_spin(&sched_lock); + thread_exit(); + } + /* Make the thread bound from now on, but remember what it was. */ unbound = td->td_flags & TDF_UNBOUND; td->td_flags &= ~TDF_UNBOUND; diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index 34cc6d5..a5053fe 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -762,7 +762,8 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) td1->td_blocked = NULL; TD_CLR_MUTEX(td1); - setrunqueue(td1); + if (TD_CAN_RUN(td1)) + setrunqueue(td1); if (td->td_critnest == 1 && td1->td_priority < pri) { #ifdef notyet |