summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2002-09-29 23:04:34 +0000
committerjulian <julian@FreeBSD.org>2002-09-29 23:04:34 +0000
commitd91c37553eed3082a93a6edbef2fce6c7629aaf7 (patch)
tree511ef31f5693fdea87543a17c345986c726d9da3 /sys
parent054b6a1aae2b1225e80402c333f0e68b8fafe944 (diff)
downloadFreeBSD-src-d91c37553eed3082a93a6edbef2fce6c7629aaf7.zip
FreeBSD-src-d91c37553eed3082a93a6edbef2fce6c7629aaf7.tar.gz
Implement basic KSE loaning. This stops a hread that is blocked in BOUND mode
from stopping another thread from completing a syscall, and this allows it to release its resources etc. Probably more related commits to follow (at least one I know of) Initial concept by: julian, dillon Submitted by: davidxu
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_kse.c9
-rw-r--r--sys/kern/kern_mutex.c3
-rw-r--r--sys/kern/kern_proc.c4
-rw-r--r--sys/kern/kern_switch.c74
-rw-r--r--sys/kern/kern_synch.c5
-rw-r--r--sys/kern/kern_thread.c9
-rw-r--r--sys/kern/subr_turnstile.c3
-rw-r--r--sys/sys/proc.h6
8 files changed, 104 insertions, 9 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index ede053c..907eba7 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -466,6 +466,8 @@ thread_exit(void)
ke->ke_thread = NULL;
td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;
+ if (ke->ke_bound == td)
+ ke->ke_bound = NULL;
kse_reassign(ke);
/* Unlink this thread from its proc. and the kseg */
@@ -707,6 +709,13 @@ thread_userret(struct thread *td, struct trapframe *frame)
int unbound;
struct kse *ke;
+ if (td->td_kse->ke_bound) {
+ thread_export_context(td);
+ PROC_LOCK(td->td_proc);
+ mtx_lock_spin(&sched_lock);
+ thread_exit();
+ }
+
/* Make the thread bound from now on, but remember what it was. */
unbound = td->td_flags & TDF_UNBOUND;
td->td_flags &= ~TDF_UNBOUND;
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 34cc6d5..a5053fe 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -762,7 +762,8 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
td1->td_blocked = NULL;
TD_CLR_MUTEX(td1);
- setrunqueue(td1);
+ if (TD_CAN_RUN(td1))
+ setrunqueue(td1);
if (td->td_critnest == 1 && td1->td_priority < pri) {
#ifdef notyet
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index a3ddae0..633c66e 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -249,13 +249,15 @@ ksegrp_link(struct ksegrp *kg, struct proc *p)
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
- TAILQ_INIT(&kg->kg_iq); /* all kses in ksegrp */
+ TAILQ_INIT(&kg->kg_iq); /* idle kses in ksegrp */
+ TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */
kg->kg_proc = p;
/* the following counters are in the -zero- section and may not need clearing */
kg->kg_numthreads = 0;
kg->kg_runnable = 0;
kg->kg_kses = 0;
kg->kg_idle_kses = 0;
+ kg->kg_loan_kses = 0;
kg->kg_runq_kses = 0; /* XXXKSE change name */
/* link it in now that it's consistent */
p->p_numksegrps++;
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 0139a8b..c6c667d 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -178,9 +178,24 @@ kse_reassign(struct kse *ke)
{
struct ksegrp *kg;
struct thread *td;
+ struct thread *owner;
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
+ owner = ke->ke_bound;
+ KASSERT(!(owner && ((owner->td_kse != ke) ||
+ (owner->td_flags & TDF_UNBOUND))),
+ ("kse_reassign: bad thread bound state"));
+ if (owner && (owner->td_inhibitors == TDI_LOAN)) {
+ TD_CLR_LOAN(owner);
+ ke->ke_bound = NULL;
+ ke->ke_thread = owner;
+ owner->td_kse = ke;
+ setrunqueue(owner);
+ CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
+ ke, owner);
+ return;
+ }
/*
* Find the first unassigned thread
@@ -201,13 +216,24 @@ kse_reassign(struct kse *ke)
td->td_kse = ke;
ke->ke_thread = td;
runq_add(&runq, ke);
+ if (owner)
+ TD_SET_LOAN(owner);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
- } else {
+ } else if (!owner) {
ke->ke_state = KES_IDLE;
ke->ke_thread = NULL;
TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses++;
CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke);
+ } else {
+ TD_CLR_LOAN(owner);
+ ke->ke_state = KES_THREAD;
+ ke->ke_thread = owner;
+ owner->td_kse = ke;
+ ke->ke_flags |= KEF_ONLOANQ;
+ TAILQ_INSERT_HEAD(&kg->kg_lq, ke, ke_kgrlist);
+ kg->kg_loan_kses++;
+ CTR1(KTR_RUNQ, "kse_reassign: ke%p is on loan queue", ke);
}
}
@@ -226,7 +252,7 @@ kserunnable(void)
void
remrunqueue(struct thread *td)
{
- struct thread *td2, *td3;
+ struct thread *td2, *td3, *owner;
struct ksegrp *kg;
struct kse *ke;
@@ -282,10 +308,33 @@ remrunqueue(struct thread *td)
runq_remove(&runq, ke);
KASSERT((ke->ke_state != KES_IDLE),
("kse already idle"));
- ke->ke_state = KES_IDLE;
- ke->ke_thread = NULL;
- TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses++;
+ if (ke->ke_bound) {
+ owner = ke->ke_bound;
+ if (owner->td_inhibitors == TDI_LOAN) {
+ TD_CLR_LOAN(owner);
+ ke->ke_bound = NULL;
+ ke->ke_thread = owner;
+ owner->td_kse = ke;
+ setrunqueue(owner);
+ CTR2(KTR_RUNQ,
+ "remrunqueue: ke%p -> td%p (give back)",
+ ke, owner);
+ } else {
+ TD_CLR_LOAN(owner);
+ ke->ke_state = KES_THREAD;
+ ke->ke_thread = owner;
+ owner->td_kse = ke;
+ ke->ke_flags |= KEF_ONLOANQ;
+ TAILQ_INSERT_HEAD(&kg->kg_lq, ke,
+ ke_kgrlist);
+ kg->kg_loan_kses++;
+ }
+ } else {
+ ke->ke_state = KES_IDLE;
+ ke->ke_thread = NULL;
+ TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
+ kg->kg_idle_kses++;
+ }
}
}
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
@@ -309,6 +358,12 @@ setrunqueue(struct thread *td)
if ((td->td_flags & TDF_UNBOUND) == 0) {
KASSERT((td->td_kse != NULL),
("queueing BAD thread to run queue"));
+ ke = td->td_kse;
+ if (ke->ke_flags & KEF_ONLOANQ) {
+ ke->ke_flags &= ~KEF_ONLOANQ;
+ TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
+ kg->kg_loan_kses--;
+ }
/*
* Common path optimisation: Only one of everything
* and the KSE is always already attached.
@@ -337,6 +392,13 @@ setrunqueue(struct thread *td)
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
ke->ke_state = KES_THREAD;
kg->kg_idle_kses--;
+ } else if (kg->kg_loan_kses) {
+ ke = TAILQ_FIRST(&kg->kg_lq);
+ TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
+ ke->ke_flags &= ~KEF_ONLOANQ;
+ ke->ke_state = KES_THREAD;
+ TD_SET_LOAN(ke->ke_bound);
+ kg->kg_loan_kses--;
} else if (tda && (tda->td_priority > td->td_priority)) {
/*
* None free, but there is one we can commandeer.
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 74ef9ed..445936b 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -849,6 +849,11 @@ mi_switch(void)
*/
td->td_kse = NULL;
kse_reassign(ke);
+ } else if (p->p_flag & P_KSES) {
+ KASSERT(((ke->ke_bound == NULL) || (ke->ke_bound == td)),
+ ("mi_switch: bad bound state"));
+ ke->ke_bound = td;
+ kse_reassign(ke);
}
cpu_switch(); /* SHAZAM!!*/
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index ede053c..907eba7 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -466,6 +466,8 @@ thread_exit(void)
ke->ke_thread = NULL;
td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;
+ if (ke->ke_bound == td)
+ ke->ke_bound = NULL;
kse_reassign(ke);
/* Unlink this thread from its proc. and the kseg */
@@ -707,6 +709,13 @@ thread_userret(struct thread *td, struct trapframe *frame)
int unbound;
struct kse *ke;
+ if (td->td_kse->ke_bound) {
+ thread_export_context(td);
+ PROC_LOCK(td->td_proc);
+ mtx_lock_spin(&sched_lock);
+ thread_exit();
+ }
+
/* Make the thread bound from now on, but remember what it was. */
unbound = td->td_flags & TDF_UNBOUND;
td->td_flags &= ~TDF_UNBOUND;
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 34cc6d5..a5053fe 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -762,7 +762,8 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
td1->td_blocked = NULL;
TD_CLR_MUTEX(td1);
- setrunqueue(td1);
+ if (TD_CAN_RUN(td1))
+ setrunqueue(td1);
if (td->td_critnest == 1 && td1->td_priority < pri) {
#ifdef notyet
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index f42971f..fe6a068 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -333,6 +333,7 @@ struct thread {
#define TDI_SWAPPED 0x04 /* Stack not in mem.. bad juju if run. */
#define TDI_MUTEX 0x08 /* Stopped on a mutex. */
#define TDI_IWAIT 0x10 /* Awaiting interrupt. */
+#define TDI_LOAN 0x20 /* bound thread's KSE is lent */
#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
@@ -361,12 +362,14 @@ struct thread {
#define TD_SET_MUTEX(td) TD_SET_INHIB((td), TDI_MUTEX)
#define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED)
#define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT)
+#define TD_SET_LOAN(td) TD_SET_INHIB((td), TDI_LOAN)
#define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING)
#define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED)
#define TD_CLR_MUTEX(td) TD_CLR_INHIB((td), TDI_MUTEX)
#define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED)
#define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT)
+#define TD_CLR_LOAN(td) TD_CLR_INHIB((td), TDI_LOAN)
#define TD_SET_RUNNING(td) do {(td)->td_state = TDS_RUNNING; } while (0)
#define TD_SET_RUNQ(td) do {(td)->td_state = TDS_RUNQ; } while (0)
@@ -437,6 +440,7 @@ struct kse {
#define KEF_USER 0x00200 /* Process is not officially in the kernel */
#define KEF_ASTPENDING 0x00400 /* KSE has a pending ast. */
#define KEF_NEEDRESCHED 0x00800 /* Process needs to yield. */
+#define KEF_ONLOANQ 0x01000 /* KSE is on loan queue */
#define KEF_DIDRUN 0x02000 /* KSE actually ran. */
/*
@@ -459,6 +463,7 @@ struct ksegrp {
TAILQ_ENTRY(ksegrp) kg_ksegrp; /* Queue of KSEGs in kg_proc. */
TAILQ_HEAD(, kse) kg_kseq; /* (ke_kglist) All KSEs. */
TAILQ_HEAD(, kse) kg_iq; /* (ke_kgrlist) Idle KSEs. */
+ TAILQ_HEAD(, kse) kg_lq; /* (ke_kgrlist) Loan KSEs. */
TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */
TAILQ_HEAD(, thread) kg_slpq; /* (td_runq) NONRUNNABLE threads. */
@@ -469,6 +474,7 @@ struct ksegrp {
struct thread *kg_last_assigned; /* Last thread assigned to a KSE */
int kg_runnable; /* Num runnable threads on queue. */
int kg_runq_kses; /* Num KSEs on runq. */
+ int kg_loan_kses; /* Num KSEs on loan queue. */
struct kse_thr_mailbox *kg_completed; /* (c) completed thread mboxes */
#define kg_endzero kg_pri_class
OpenPOWER on IntegriCloud