summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2004-09-10 21:04:38 +0000
committerjulian <julian@FreeBSD.org>2004-09-10 21:04:38 +0000
commit9993c65718d2dabc9ad6b0381cb3bec1afcbb3db (patch)
tree552ced22e442075d306e4fedf42503cc58d70e99 /sys
parent76cce8b362d77cd72af8e5deae525359f32deebe (diff)
downloadFreeBSD-src-9993c65718d2dabc9ad6b0381cb3bec1afcbb3db.zip
FreeBSD-src-9993c65718d2dabc9ad6b0381cb3bec1afcbb3db.tar.gz
Add some code to allow threads to nominat a sibling to run if theyu are going to sleep.
MFC after: 1 week
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_switch.c10
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/sched_4bsd.c48
-rw-r--r--sys/kern/sched_ule.c2
-rw-r--r--sys/sys/sched.h2
5 files changed, 55 insertions, 9 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index aba4d45..9841f2a 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -192,7 +192,7 @@ retry:
* sched_thread_exit() (local)
* sched_switch() (local)
* sched_thread_exit() (local)
- * remrunqueue() (local) (commented out)
+ * remrunqueue() (local)
*/
static void
slot_fill(struct ksegrp *kg)
@@ -224,7 +224,7 @@ slot_fill(struct ksegrp *kg)
}
}
-#if 0
+#ifdef SCHED_4BSD
/*
* Remove a thread from its KSEGRP's run queue.
* This in turn may remove it from a KSE if it was already assigned
@@ -248,7 +248,7 @@ remrunqueue(struct thread *td)
* If it is not a threaded process, take the shortcut.
*/
if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
- /* Bring its kse with it, leave the thread attached */
+ /* remve from sys run queue and free up a slot */
sched_rem(td);
kg->kg_avail_opennings++;
ke->ke_state = KES_THREAD;
@@ -259,7 +259,7 @@ remrunqueue(struct thread *td)
kg->kg_runnable--;
if (ke->ke_state == KES_ONRUNQ) {
/*
- * This thread has been assigned to a KSE.
+ * This thread has been assigned to the system run queue.
* We need to dissociate it and try assign the
* KSE to the next available thread. Then, we should
* see if we need to move the KSE in the run queues.
@@ -271,7 +271,7 @@ remrunqueue(struct thread *td)
KASSERT((td2 != NULL), ("last assigned has wrong value"));
if (td2 == td)
kg->kg_last_assigned = td3;
- slot_fill(kg);
+ /* slot_fill(kg); */ /* will replace it with another */
}
}
#endif
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 2c97a5b..7534e21 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -337,7 +337,7 @@ mi_switch(int flags, struct thread *newtd)
(void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
if (td->td_proc->p_flag & P_SA)
newtd = thread_switchout(td, flags, newtd);
- sched_switch(td, newtd);
+ sched_switch(td, newtd, flags);
CTR4(KTR_PROC, "mi_switch: new thread %p (kse %p, pid %ld, %s)",
(void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 60b13ce..9a96344 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -247,7 +247,22 @@ static int forward_wakeup_use_htt = 0;
SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
&forward_wakeup_use_htt, 0,
"account for htt");
+
#endif
+static int sched_followon = 0;
+SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
+ &sched_followon, 0,
+ "allow threads to share a quantum");
+
+static int sched_pfollowons = 0;
+SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD,
+ &sched_pfollowons, 0,
+ "number of followons done to a different ksegrp");
+
+static int sched_kgfollowons = 0;
+SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD,
+ &sched_kgfollowons, 0,
+ "number of followons done in a ksegrp");
/*
* Arrange to reschedule if necessary, taking the priorities and
@@ -733,10 +748,13 @@ sched_sleep(struct thread *td)
td->td_base_pri = td->td_priority;
}
+static void remrunqueue(struct thread *td);
+
void
-sched_switch(struct thread *td, struct thread *newtd)
+sched_switch(struct thread *td, struct thread *newtd, int flags)
{
struct kse *ke;
+ struct ksegrp *kg;
struct proc *p;
ke = td->td_kse;
@@ -746,6 +764,33 @@ sched_switch(struct thread *td, struct thread *newtd)
if ((p->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
+
+ /*
+ * We are volunteering to switch out so we get to nominate
+ * a successor for the rest of our quantum
+ * First try another thread in our ksegrp, and then look for
+ * other ksegrps in our process.
+ */
+ if (sched_followon &&
+ (p->p_flag & P_HADTHREADS) &&
+ (flags & SW_VOL) &&
+ newtd == NULL) {
+ /* lets schedule another thread from this process */
+ kg = td->td_ksegrp;
+ if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
+ remrunqueue(newtd);
+ sched_kgfollowons++;
+ } else {
+ FOREACH_KSEGRP_IN_PROC(p, kg) {
+ if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
+ sched_pfollowons++;
+ remrunqueue(newtd);
+ break;
+ }
+ }
+ }
+ }
+
/*
* The thread we are about to run needs to be counted as if it had been
* added to the run queue and selected.
@@ -757,6 +802,7 @@ sched_switch(struct thread *td, struct thread *newtd)
if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt++;
}
+
td->td_lastcpu = td->td_oncpu;
td->td_flags &= ~TDF_NEEDRESCHED;
td->td_pflags &= ~TDP_OWEPREEMPT;
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 52349c5..03904ef 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1225,7 +1225,7 @@ sched_prio(struct thread *td, u_char prio)
}
void
-sched_switch(struct thread *td, struct thread *newtd)
+sched_switch(struct thread *td, struct thread *newtd, int flags)
{
struct kse *ke;
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 472af52..6912390 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -66,7 +66,7 @@ void sched_fork_thread(struct thread *td, struct thread *child);
fixpt_t sched_pctcpu(struct thread *td);
void sched_prio(struct thread *td, u_char prio);
void sched_sleep(struct thread *td);
-void sched_switch(struct thread *td, struct thread *newtd);
+void sched_switch(struct thread *td, struct thread *newtd, int flags);
void sched_userret(struct thread *td);
void sched_wakeup(struct thread *td);
OpenPOWER on IntegriCloud