summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/machdep.c4
-rw-r--r--sys/arm/at91/kb920x_machdep.c4
-rw-r--r--sys/arm/sa11x0/assabet_machdep.c4
-rw-r--r--sys/arm/xscale/i80321/ep80219_machdep.c4
-rw-r--r--sys/arm/xscale/i80321/iq31244_machdep.c4
-rw-r--r--sys/arm/xscale/ixp425/avila_machdep.c4
-rw-r--r--sys/ddb/db_ps.c5
-rw-r--r--sys/fs/procfs/procfs_status.c13
-rw-r--r--sys/i386/i386/machdep.c6
-rw-r--r--sys/ia64/ia64/machdep.c8
-rw-r--r--sys/kern/init_main.c32
-rw-r--r--sys/kern/kern_clock.c20
-rw-r--r--sys/kern/kern_fork.c18
-rw-r--r--sys/kern/kern_idle.c4
-rw-r--r--sys/kern/kern_intr.c6
-rw-r--r--sys/kern/kern_kse.c329
-rw-r--r--sys/kern/kern_poll.c4
-rw-r--r--sys/kern/kern_proc.c39
-rw-r--r--sys/kern/kern_resource.c89
-rw-r--r--sys/kern/kern_subr.c4
-rw-r--r--sys/kern/kern_switch.c635
-rw-r--r--sys/kern/kern_thr.c36
-rw-r--r--sys/kern/kern_thread.c254
-rw-r--r--sys/kern/kern_umtx.c6
-rw-r--r--sys/kern/ksched.c45
-rw-r--r--sys/kern/sched_4bsd.c764
-rw-r--r--sys/kern/sched_ule.c1131
-rw-r--r--sys/kern/subr_trap.c12
-rw-r--r--sys/kern/tty.c27
-rw-r--r--sys/pc98/pc98/machdep.c4
-rw-r--r--sys/powerpc/aim/machdep.c4
-rw-r--r--sys/powerpc/powerpc/machdep.c4
-rw-r--r--sys/sparc64/sparc64/machdep.c4
-rw-r--r--sys/sun4v/sun4v/machdep.c4
-rw-r--r--sys/sys/proc.h234
-rw-r--r--sys/sys/rtprio.h6
-rw-r--r--sys/sys/runq.h10
-rw-r--r--sys/sys/sched.h27
-rw-r--r--sys/vm/vm_glue.c42
-rw-r--r--sys/vm/vm_zeroidle.c4
40 files changed, 944 insertions, 2910 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index f0a6239..348bfef 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -1121,11 +1121,7 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
preload_bootstrap_relocate(KERNBASE);
diff --git a/sys/arm/at91/kb920x_machdep.c b/sys/arm/at91/kb920x_machdep.c
index 9bff888..7cb5af5 100644
--- a/sys/arm/at91/kb920x_machdep.c
+++ b/sys/arm/at91/kb920x_machdep.c
@@ -455,11 +455,7 @@ initarm(void *arg, void *arg2)
undefined_handler_address = (u_int)undefinedinstruction_bounce;
undefined_init();
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
diff --git a/sys/arm/sa11x0/assabet_machdep.c b/sys/arm/sa11x0/assabet_machdep.c
index b8f2879..482f3a5 100644
--- a/sys/arm/sa11x0/assabet_machdep.c
+++ b/sys/arm/sa11x0/assabet_machdep.c
@@ -422,11 +422,7 @@ initarm(void *arg, void *arg2)
/* Set stack for exception handlers */
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
diff --git a/sys/arm/xscale/i80321/ep80219_machdep.c b/sys/arm/xscale/i80321/ep80219_machdep.c
index 75e9e19..9b787a6 100644
--- a/sys/arm/xscale/i80321/ep80219_machdep.c
+++ b/sys/arm/xscale/i80321/ep80219_machdep.c
@@ -429,11 +429,7 @@ initarm(void *arg, void *arg2)
undefined_handler_address = (u_int)undefinedinstruction_bounce;
undefined_init();
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
diff --git a/sys/arm/xscale/i80321/iq31244_machdep.c b/sys/arm/xscale/i80321/iq31244_machdep.c
index 2a52c04..1af0063 100644
--- a/sys/arm/xscale/i80321/iq31244_machdep.c
+++ b/sys/arm/xscale/i80321/iq31244_machdep.c
@@ -427,11 +427,7 @@ initarm(void *arg, void *arg2)
undefined_handler_address = (u_int)undefinedinstruction_bounce;
undefined_init();
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
diff --git a/sys/arm/xscale/ixp425/avila_machdep.c b/sys/arm/xscale/ixp425/avila_machdep.c
index b6efecf..29c3d68 100644
--- a/sys/arm/xscale/ixp425/avila_machdep.c
+++ b/sys/arm/xscale/ixp425/avila_machdep.c
@@ -493,11 +493,7 @@ initarm(void *arg, void *arg2)
undefined_handler_address = (u_int)undefinedinstruction_bounce;
undefined_init();
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c
index 853675a..61e36b9 100644
--- a/sys/ddb/db_ps.c
+++ b/sys/ddb/db_ps.c
@@ -292,12 +292,7 @@ DB_SHOW_COMMAND(thread, db_show_thread)
td = kdb_thread;
db_printf("Thread %d at %p:\n", td->td_tid, td);
-#ifdef KSE
- db_printf(" proc (pid %d): %p ", td->td_proc->p_pid, td->td_proc);
- db_printf(" ksegrp: %p\n", td->td_ksegrp);
-#else
db_printf(" proc (pid %d): %p\n", td->td_proc->p_pid, td->td_proc);
-#endif
if (td->td_name[0] != '\0')
db_printf(" name: %s\n", td->td_name);
db_printf(" flags: %#x ", td->td_flags);
diff --git a/sys/fs/procfs/procfs_status.c b/sys/fs/procfs/procfs_status.c
index 60aa270..6382775 100644
--- a/sys/fs/procfs/procfs_status.c
+++ b/sys/fs/procfs/procfs_status.c
@@ -116,7 +116,9 @@ procfs_doprocstatus(PFS_FILL_ARGS)
#ifdef KSE
if (p->p_flag & P_SA)
wmesg = "-kse- ";
- else {
+ else
+#endif
+ {
tdfirst = FIRST_THREAD_IN_PROC(p);
if (tdfirst->td_wchan != NULL) {
KASSERT(tdfirst->td_wmesg != NULL,
@@ -125,15 +127,6 @@ procfs_doprocstatus(PFS_FILL_ARGS)
} else
wmesg = "nochan";
}
-#else
- tdfirst = FIRST_THREAD_IN_PROC(p);
- if (tdfirst->td_wchan != NULL) {
- KASSERT(tdfirst->td_wmesg != NULL,
- ("wchan %p has no wmesg", tdfirst->td_wchan));
- wmesg = tdfirst->td_wmesg;
- } else
- wmesg = "nochan";
-#endif
mtx_unlock_spin(&sched_lock);
if (p->p_sflag & PS_INMEM) {
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index c416c38..2dc54ec 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -2058,11 +2058,7 @@ init386(first)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
metadata_missing = 0;
if (bootinfo.bi_modulep) {
@@ -2297,7 +2293,7 @@ init386(first)
_udatasel = GSEL(GUDATA_SEL, SEL_UPL);
/* setup proc 0's pcb */
- thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
+ thread0.td_pcb->pcb_flags = 0;
#ifdef PAE
thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
#else
diff --git a/sys/ia64/ia64/machdep.c b/sys/ia64/ia64/machdep.c
index a57fc4f..7c2e1f5 100644
--- a/sys/ia64/ia64/machdep.c
+++ b/sys/ia64/ia64/machdep.c
@@ -776,12 +776,10 @@ ia64_init(void)
msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
msgbufinit(msgbufp, MSGBUF_SIZE);
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
-
+ /*
+ * Init mapping for kernel stack for proc 0
+ */
proc0kstack = (vm_offset_t)kstack;
thread0.td_kstack = proc0kstack;
thread0.td_kstack_pages = KSTACK_PAGES;
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index d002a99..a911e11 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -95,9 +95,6 @@ static struct session session0;
static struct pgrp pgrp0;
struct proc proc0;
struct thread thread0 __aligned(8);
-#ifdef KSE
-struct ksegrp ksegrp0;
-#endif
struct vmspace vmspace0;
struct proc *initproc;
@@ -366,34 +363,16 @@ proc0_init(void *dummy __unused)
struct proc *p;
unsigned i;
struct thread *td;
-#ifdef KSE
- struct ksegrp *kg;
-#endif
GIANT_REQUIRED;
p = &proc0;
td = &thread0;
-#ifdef KSE
- kg = &ksegrp0;
-#endif
/*
* Initialize magic number.
*/
p->p_magic = P_MAGIC;
-#ifdef KSE
- /*
- * Initialize thread, process and ksegrp structures.
- */
- procinit(); /* set up proc zone */
- threadinit(); /* set up thead, upcall and KSEGRP zones */
-
- /*
- * Initialise scheduler resources.
- * Add scheduler specific parts to proc, ksegrp, thread as needed.
- */
-#else
/*
* Initialize thread and process structures.
*/
@@ -404,7 +383,6 @@ proc0_init(void *dummy __unused)
* Initialise scheduler resources.
* Add scheduler specific parts to proc, thread as needed.
*/
-#endif
schedinit(); /* scheduler gets its house in order */
/*
* Initialize sleep queue hash table
@@ -440,15 +418,9 @@ proc0_init(void *dummy __unused)
STAILQ_INIT(&p->p_ktr);
p->p_nice = NZERO;
td->td_state = TDS_RUNNING;
-#ifdef KSE
- kg->kg_pri_class = PRI_TIMESHARE;
- kg->kg_user_pri = PUSER;
- kg->kg_base_user_pri = PUSER;
-#else
td->td_pri_class = PRI_TIMESHARE;
td->td_user_pri = PUSER;
td->td_base_user_pri = PUSER;
-#endif
td->td_priority = PVM;
td->td_base_pri = PUSER;
td->td_oncpu = 0;
@@ -758,11 +730,7 @@ kick_init(const void *udata __unused)
td = FIRST_THREAD_IN_PROC(initproc);
mtx_lock_spin(&sched_lock);
TD_SET_CAN_RUN(td);
-#ifdef KSE
- setrunqueue(td, SRQ_BORING); /* XXXKSE */
-#else
setrunqueue(td, SRQ_BORING);
-#endif
mtx_unlock_spin(&sched_lock);
}
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index cd98bdd..a3732a8 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -203,23 +203,12 @@ hardclock_cpu(int usermode)
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
sched_tick();
#ifdef KSE
+#if 0 /* for now do nothing */
if (p->p_flag & P_SA) {
- /* XXXKSE What to do? */
- } else {
- pstats = p->p_stats;
- if (usermode &&
- timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- p->p_sflag |= PS_ALRMPEND;
- td->td_flags |= TDF_ASTPENDING;
- }
- if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- p->p_sflag |= PS_PROFPEND;
- td->td_flags |= TDF_ASTPENDING;
- }
+ /* XXXKSE What to do? Should do more. */
}
-#else
+#endif
+#endif
pstats = p->p_stats;
if (usermode &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
@@ -232,7 +221,6 @@ hardclock_cpu(int usermode)
p->p_sflag |= PS_PROFPEND;
td->td_flags |= TDF_ASTPENDING;
}
-#endif
mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
#ifdef HWPMC_HOOKS
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 9c5597e..a12ed64 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -205,9 +205,6 @@ fork1(td, flags, pages, procp)
struct filedesc *fd;
struct filedesc_to_leader *fdtol;
struct thread *td2;
-#ifdef KSE
- struct ksegrp *kg2;
-#endif
struct sigacts *newsigacts;
int error;
@@ -477,9 +474,6 @@ again:
* then copy the section that is copied directly from the parent.
*/
td2 = FIRST_THREAD_IN_PROC(p2);
-#ifdef KSE
- kg2 = FIRST_KSEGRP_IN_PROC(p2);
-#endif
/* Allocate and switch to an alternate kstack if specified. */
if (pages != 0)
@@ -492,19 +486,11 @@ again:
__rangeof(struct proc, p_startzero, p_endzero));
bzero(&td2->td_startzero,
__rangeof(struct thread, td_startzero, td_endzero));
-#ifdef KSE
- bzero(&kg2->kg_startzero,
- __rangeof(struct ksegrp, kg_startzero, kg_endzero));
-#endif
bcopy(&p1->p_startcopy, &p2->p_startcopy,
__rangeof(struct proc, p_startcopy, p_endcopy));
bcopy(&td->td_startcopy, &td2->td_startcopy,
__rangeof(struct thread, td_startcopy, td_endcopy));
-#ifdef KSE
- bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy,
- __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
-#endif
td2->td_sigstk = td->td_sigstk;
td2->td_sigmask = td->td_sigmask;
@@ -526,11 +512,7 @@ again:
mtx_unlock_spin(&sched_lock);
p2->p_ucred = crhold(td->td_ucred);
-#ifdef KSE
- td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */
-#else
td2->td_ucred = crhold(p2->p_ucred);
-#endif
#ifdef AUDIT
audit_proc_fork(p1, p2);
#endif
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index f66056a..9096d985 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -79,11 +79,7 @@ idle_setup(void *dummy)
td = FIRST_THREAD_IN_PROC(p);
TD_SET_CAN_RUN(td);
td->td_flags |= TDF_IDLETD;
-#ifdef KSE
- sched_class(td->td_ksegrp, PRI_IDLE);
-#else
sched_class(td, PRI_IDLE);
-#endif
sched_prio(td, PRI_MAX_IDLE);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 2aa4c47..53cbc4b 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -296,11 +296,7 @@ ithread_create(const char *name)
panic("kthread_create() failed with %d", error);
td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
mtx_lock_spin(&sched_lock);
-#ifdef KSE
- td->td_ksegrp->kg_pri_class = PRI_ITHD;
-#else
- td->td_pri_class = PRI_ITHD;
-#endif
+ sched_class(td, PRI_ITHD);
TD_SET_IWAIT(td);
mtx_unlock_spin(&sched_lock);
td->td_pflags |= TDP_ITHREAD;
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index fadaa44..4d27d71 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -48,9 +48,6 @@ __FBSDID("$FreeBSD$");
#include <vm/uma.h>
#ifdef KSE
-/*
- * KSEGRP related storage.
- */
static uma_zone_t upcall_zone;
/* DEBUG ONLY */
@@ -86,24 +83,24 @@ upcall_free(struct kse_upcall *ku)
}
void
-upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
+upcall_link(struct kse_upcall *ku, struct proc *p)
{
mtx_assert(&sched_lock, MA_OWNED);
- TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
- ku->ku_ksegrp = kg;
- kg->kg_numupcalls++;
+ TAILQ_INSERT_TAIL(&p->p_upcalls, ku, ku_link);
+ ku->ku_proc = p;
+ p->p_numupcalls++;
}
void
upcall_unlink(struct kse_upcall *ku)
{
- struct ksegrp *kg = ku->ku_ksegrp;
+ struct proc *p = ku->ku_proc;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
- TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
- kg->kg_numupcalls--;
+ TAILQ_REMOVE(&p->p_upcalls, ku, ku_link);
+ p->p_numupcalls--;
upcall_stash(ku);
}
@@ -305,7 +302,6 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
{
#ifdef KSE
struct proc *p;
- struct ksegrp *kg;
struct kse_upcall *ku, *ku2;
int error, count;
@@ -316,11 +312,10 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
return (EINVAL);
- kg = td->td_ksegrp;
count = 0;
/*
- * Calculate the existing non-exiting upcalls in this ksegroup.
+ * Calculate the existing non-exiting upcalls in this process.
* If we are the last upcall but there are still other threads,
* then do not exit. We need the other threads to be able to
* complete whatever they are doing.
@@ -330,12 +325,12 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
*/
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
- FOREACH_UPCALL_IN_GROUP(kg, ku2) {
+ FOREACH_UPCALL_IN_PROC(p, ku2) {
if (ku2->ku_flags & KUF_EXITING)
count++;
}
- if ((kg->kg_numupcalls - count) == 1 &&
- (kg->kg_numthreads > 1)) {
+ if ((p->p_numupcalls - count) == 1 &&
+ (p->p_numthreads > 1)) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
return (EDEADLK);
@@ -360,20 +355,12 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
mtx_lock_spin(&sched_lock);
upcall_remove(td);
if (p->p_numthreads != 1) {
- /*
- * If we are not the last thread, but we are the last
- * thread in this ksegrp, then by definition this is not
- * the last group and we need to clean it up as well.
- * thread_exit will clean up the kseg as needed.
- */
thread_stopped(p);
thread_exit();
/* NOTREACHED */
}
/*
* This is the last thread. Just return to the user.
- * We know that there is only one ksegrp too, as any others
- * would have been discarded in previous calls to thread_exit().
* Effectively we have left threading mode..
* The only real thing left to do is ensure that the
* scheduler sets out concurrency back to 1 as that may be a
@@ -409,7 +396,6 @@ kse_release(struct thread *td, struct kse_release_args *uap)
{
#ifdef KSE
struct proc *p;
- struct ksegrp *kg;
struct kse_upcall *ku;
struct timespec timeout;
struct timeval tv;
@@ -417,7 +403,6 @@ kse_release(struct thread *td, struct kse_release_args *uap)
int error;
p = td->td_proc;
- kg = td->td_ksegrp;
if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
return (EINVAL);
if (uap->timeout != NULL) {
@@ -452,14 +437,14 @@ kse_release(struct thread *td, struct kse_release_args *uap)
} else {
if ((ku->ku_flags & KUF_DOUPCALL) == 0 &&
((ku->ku_mflags & KMF_NOCOMPLETED) ||
- (kg->kg_completed == NULL))) {
- kg->kg_upsleeps++;
+ (p->p_completed == NULL))) {
+ p->p_upsleeps++;
td->td_kflags |= TDK_KSEREL;
- error = msleep(&kg->kg_completed, &p->p_mtx,
+ error = msleep(&p->p_completed, &p->p_mtx,
PPAUSE|PCATCH, "kserel",
(uap->timeout ? tvtohz(&tv) : 0));
td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP);
- kg->kg_upsleeps--;
+ p->p_upsleeps--;
}
PROC_UNLOCK(p);
}
@@ -482,7 +467,6 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
{
#ifdef KSE
struct proc *p;
- struct ksegrp *kg;
struct kse_upcall *ku;
struct thread *td2;
@@ -495,23 +479,18 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if (uap->mbx) {
- FOREACH_KSEGRP_IN_PROC(p, kg) {
- FOREACH_UPCALL_IN_GROUP(kg, ku) {
- if (ku->ku_mailbox == uap->mbx)
- break;
- }
- if (ku)
+ FOREACH_UPCALL_IN_PROC(p, ku) {
+ if (ku->ku_mailbox == uap->mbx)
break;
}
} else {
- kg = td->td_ksegrp;
- if (kg->kg_upsleeps) {
+ if (p->p_upsleeps) {
mtx_unlock_spin(&sched_lock);
- wakeup(&kg->kg_completed);
+ wakeup(&p->p_completed);
PROC_UNLOCK(p);
return (0);
}
- ku = TAILQ_FIRST(&kg->kg_upcalls);
+ ku = TAILQ_FIRST(&p->p_upcalls);
}
if (ku == NULL) {
mtx_unlock_spin(&sched_lock);
@@ -526,7 +505,7 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
if (!(td2->td_kflags & TDK_WAKEUP)) {
td2->td_kflags |= TDK_WAKEUP;
if (td2->td_kflags & TDK_KSEREL)
- sleepq_remove(td2, &kg->kg_completed);
+ sleepq_remove(td2, &p->p_completed);
else
sleepq_remove(td2, &p->p_siglist);
}
@@ -542,11 +521,11 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
}
/*
- * No new KSEG: first call: use current KSE, don't schedule an upcall
+ * newgroup == 0: first call: use current KSE, don't schedule an upcall
* All other situations, do allocate max new KSEs and schedule an upcall.
*
* XXX should be changed so that 'first' behaviour lasts for as long
- * as you have not made a kse in this ksegrp. i.e. as long as we do not have
+ * as you have not made a thread in this proc. i.e. as long as we do not have
* a mailbox..
*/
/* struct kse_create_args {
@@ -557,8 +536,6 @@ int
kse_create(struct thread *td, struct kse_create_args *uap)
{
#ifdef KSE
- struct ksegrp *newkg;
- struct ksegrp *kg;
struct proc *p;
struct kse_mailbox mbx;
struct kse_upcall *newku;
@@ -566,135 +543,66 @@ kse_create(struct thread *td, struct kse_create_args *uap)
struct thread *newtd;
p = td->td_proc;
- kg = td->td_ksegrp;
- if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
- return (err);
- ncpus = mp_ncpus;
- if (virtual_cpu != 0)
- ncpus = virtual_cpu;
- /*
- * If the new UTS mailbox says that this
- * will be a BOUND lwp, then it had better
- * have its thread mailbox already there.
- * In addition, this ksegrp will be limited to
- * a concurrency of 1. There is more on this later.
- */
- if (mbx.km_flags & KMF_BOUND) {
- if (mbx.km_curthread == NULL)
- return (EINVAL);
- ncpus = 1;
- } else {
- sa = TDP_SA;
- }
-
- PROC_LOCK(p);
/*
* Processes using the other threading model can't
* suddenly start calling this one
+ * XXX maybe...
*/
if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
PROC_UNLOCK(p);
return (EINVAL);
}
-
- /*
- * Limit it to NCPU upcall contexts per ksegrp in any case.
- * There is a small race here as we don't hold proclock
- * until we inc the ksegrp count, but it's not really a big problem
- * if we get one too many, but we save a proc lock.
- */
- if ((!uap->newgroup) && (kg->kg_numupcalls >= ncpus)) {
- PROC_UNLOCK(p);
- return (EPROCLIM);
- }
-
if (!(p->p_flag & P_SA)) {
first = 1;
p->p_flag |= P_SA|P_HADTHREADS;
}
- PROC_UNLOCK(p);
+ if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
+ return (err);
+
+ ncpus = mp_ncpus;
+ if (virtual_cpu != 0)
+ ncpus = virtual_cpu;
/*
- * Now pay attention!
- * If we are going to be bound, then we need to be either
- * a new group, or the first call ever. In either
- * case we will be creating (or be) the only thread in a group.
- * and the concurrency will be set to 1.
- * This is not quite right, as we may still make ourself
- * bound after making other ksegrps but it will do for now.
- * The library will only try do this much.
+ * If the new UTS mailbox says that this
+ * will be a BOUND lwp, then it had better
+ * have its thread mailbox already there.
*/
- if (!sa && !(uap->newgroup || first))
- return (EINVAL);
-
- if (uap->newgroup) {
- newkg = ksegrp_alloc();
- bzero(&newkg->kg_startzero,
- __rangeof(struct ksegrp, kg_startzero, kg_endzero));
- bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
- __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
- sched_init_concurrency(newkg);
+ if ((mbx.km_flags & KMF_BOUND) || uap->newgroup) {
+ if (mbx.km_curthread == NULL)
+ return (EINVAL);
+ ncpus = 1;
+ if (!(uap->newgroup || first))
+ return (EINVAL);
+ } else {
+ sa = TDP_SA;
PROC_LOCK(p);
- if (p->p_numksegrps >= max_groups_per_proc) {
+ /*
+ * Limit it to NCPU upcall contexts per proc in any case.
+ */
+ if (p->p_numupcalls >= ncpus) {
PROC_UNLOCK(p);
- ksegrp_free(newkg);
return (EPROCLIM);
}
- ksegrp_link(newkg, p);
- mtx_lock_spin(&sched_lock);
- sched_fork_ksegrp(td, newkg);
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- } else {
/*
- * We want to make a thread in our own ksegrp.
+ * We want to make a thread (bound or unbound).
* If we are just the first call, either kind
* is ok, but if not then either we must be
* already an upcallable thread to make another,
* or a bound thread to make one of those.
* Once again, not quite right but good enough for now.. XXXKSE
+ * XXX bogus
*/
+ PROC_UNLOCK(p);
if (!first && ((td->td_pflags & TDP_SA) != sa))
return (EINVAL);
-
- newkg = kg;
+ if (p->p_numupcalls == 0) {
+ sched_set_concurrency(p, ncpus);
+ }
}
/*
- * This test is a bit "indirect".
- * It might simplify things if we made a direct way of testing
- * if a ksegrp has been worked on before.
- * In the case of a bound request and the concurrency being set to
- * one, the concurrency will already be 1 so it's just inefficient
- * but not dangerous to call this again. XXX
- */
- if (newkg->kg_numupcalls == 0) {
- /*
- * Initialize KSE group with the appropriate
- * concurrency.
- *
- * For a multiplexed group, create as as much concurrency
- * as the number of physical cpus.
- * This increases concurrency in the kernel even if the
- * userland is not MP safe and can only run on a single CPU.
- * In an ideal world, every physical cpu should execute a
- * thread. If there is enough concurrency, threads in the
- * kernel can be executed parallel on different cpus at
- * full speed without being restricted by the number of
- * upcalls the userland provides.
- * Adding more upcall structures only increases concurrency
- * in userland.
- *
- * For a bound thread group, because there is only one thread
- * in the group, we only set the concurrency for the group
- * to 1. A thread in this kind of group will never schedule
- * an upcall when blocked. This simulates pthread system
- * scope thread behaviour.
- */
- sched_set_concurrency(newkg, ncpus);
- }
- /*
* Even bound LWPs get a mailbox and an upcall to hold it.
*/
newku = upcall_alloc();
@@ -711,33 +619,38 @@ kse_create(struct thread *td, struct kse_create_args *uap)
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
- if (newkg->kg_numupcalls >= ncpus) {
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- upcall_free(newku);
- return (EPROCLIM);
- }
+ if (sa) {
+ if( p->p_numupcalls >= ncpus) {
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ upcall_free(newku);
+ return (EPROCLIM);
+ }
- /*
- * If we are the first time, and a normal thread,
- * then transfer all the signals back to the 'process'.
- * SA threading will make a special thread to handle them.
- */
- if (first && sa) {
- sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue,
- &td->td_sigqueue.sq_signals);
- SIGFILLSET(td->td_sigmask);
- SIG_CANTMASK(td->td_sigmask);
+ /*
+ * If we are the first time, and a normal thread,
+ * then transfer all the signals back to the 'process'.
+ * SA threading will make a special thread to handle them.
+ */
+ if (first) {
+ sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue,
+ &td->td_sigqueue.sq_signals);
+ SIGFILLSET(td->td_sigmask);
+ SIG_CANTMASK(td->td_sigmask);
+ }
+ } else {
+ /* should subtract from process count (later) */
}
/*
- * Make the new upcall available to the ksegrp.
+ * Make the new upcall available to the process.
* It may or may not use it, but it's available.
*/
- upcall_link(newku, newkg);
+ upcall_link(newku, p);
PROC_UNLOCK(p);
if (mbx.km_quantum)
- newkg->kg_upquantum = max(1, mbx.km_quantum / tick);
+/* XXX should this be in the thread? */
+ p->p_upquantum = max(1, mbx.km_quantum / tick);
/*
* Each upcall structure has an owner thread, find which
@@ -745,8 +658,11 @@ kse_create(struct thread *td, struct kse_create_args *uap)
*/
if (uap->newgroup) {
/*
- * Because the new ksegrp hasn't a thread,
- * create an initial upcall thread to own it.
+ * The newgroup parameter now means
+ * "bound, non SA, system scope"
+ * It is only used for the interrupt thread at the
+ * moment I think
+ * We'll rename it later.
*/
newtd = thread_schedule_upcall(td, newku);
} else {
@@ -771,6 +687,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
/*
* Let the UTS instance know its LWPID.
* It doesn't really care. But the debugger will.
+ * XXX warning.. remember that this moves.
*/
suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid);
@@ -785,6 +702,14 @@ kse_create(struct thread *td, struct kse_create_args *uap)
if (sa) {
newtd->td_pflags |= TDP_SA;
+ /*
+ * If we are starting a new thread, kick it off.
+ */
+ if (newtd != td) {
+ mtx_lock_spin(&sched_lock);
+ setrunqueue(newtd, SRQ_BORING);
+ mtx_unlock_spin(&sched_lock);
+ }
} else {
newtd->td_pflags &= ~TDP_SA;
@@ -816,17 +741,11 @@ kse_create(struct thread *td, struct kse_create_args *uap)
_PRELE(p);
}
PROC_UNLOCK(p);
+ mtx_lock_spin(&sched_lock);
+ setrunqueue(newtd, SRQ_BORING);
+ mtx_unlock_spin(&sched_lock);
}
}
-
- /*
- * If we are starting a new thread, kick it off.
- */
- if (newtd != td) {
- mtx_lock_spin(&sched_lock);
- setrunqueue(newtd, SRQ_BORING);
- mtx_unlock_spin(&sched_lock);
- }
return (0);
#else /* !KSE */
return (EOPNOTSUPP);
@@ -886,20 +805,18 @@ kse_GC(void)
/*
* Store the thread context in the UTS's mailbox.
* then add the mailbox at the head of a list we are building in user space.
- * The list is anchored in the ksegrp structure.
+ * The list is anchored in the proc structure.
*/
int
thread_export_context(struct thread *td, int willexit)
{
struct proc *p;
- struct ksegrp *kg;
uintptr_t mbx;
void *addr;
int error = 0, sig;
mcontext_t mc;
p = td->td_proc;
- kg = td->td_ksegrp;
/*
* Post sync signal, or process SIGKILL and SIGSTOP.
@@ -940,14 +857,14 @@ thread_export_context(struct thread *td, int willexit)
* entry into this one
*/
for (;;) {
- mbx = (uintptr_t)kg->kg_completed;
+ mbx = (uintptr_t)p->p_completed;
if (suword(addr, mbx)) {
error = EFAULT;
goto bad;
}
PROC_LOCK(p);
- if (mbx == (uintptr_t)kg->kg_completed) {
- kg->kg_completed = td->td_mailbox;
+ if (mbx == (uintptr_t)p->p_completed) {
+ p->p_completed = td->td_mailbox;
/*
* The thread context may be taken away by
* other upcall threads when we unlock
@@ -970,19 +887,18 @@ bad:
}
/*
- * Take the list of completed mailboxes for this KSEGRP and put them on this
+ * Take the list of completed mailboxes for this Process and put them on this
* upcall's mailbox as it's the next one going up.
*/
static int
-thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
+thread_link_mboxes(struct proc *p, struct kse_upcall *ku)
{
- struct proc *p = kg->kg_proc;
void *addr;
uintptr_t mbx;
addr = (void *)(&ku->ku_mailbox->km_completed);
for (;;) {
- mbx = (uintptr_t)kg->kg_completed;
+ mbx = (uintptr_t)p->p_completed;
if (suword(addr, mbx)) {
PROC_LOCK(p);
psignal(p, SIGSEGV);
@@ -990,8 +906,8 @@ thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
return (EFAULT);
}
PROC_LOCK(p);
- if (mbx == (uintptr_t)kg->kg_completed) {
- kg->kg_completed = NULL;
+ if (mbx == (uintptr_t)p->p_completed) {
+ p->p_completed = NULL;
PROC_UNLOCK(p);
break;
}
@@ -1109,7 +1025,7 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
*/
bcopy(&td->td_startcopy, &td2->td_startcopy,
__rangeof(struct thread, td_startcopy, td_endcopy));
- thread_link(td2, ku->ku_ksegrp);
+ thread_link(td2, ku->ku_proc);
/* inherit parts of blocked thread's context as a good template */
cpu_set_upcall(td2, td);
/* Let the new thread become owner of the upcall */
@@ -1210,7 +1126,6 @@ void
thread_user_enter(struct thread *td)
{
struct proc *p = td->td_proc;
- struct ksegrp *kg;
struct kse_upcall *ku;
struct kse_thr_mailbox *tmbx;
uint32_t flags;
@@ -1233,7 +1148,6 @@ thread_user_enter(struct thread *td)
* note where our mailbox is.
*/
- kg = td->td_ksegrp;
ku = td->td_upcall;
KASSERT(ku != NULL, ("no upcall owned"));
@@ -1291,10 +1205,9 @@ int
thread_userret(struct thread *td, struct trapframe *frame)
{
struct kse_upcall *ku;
- struct ksegrp *kg, *kg2;
struct proc *p;
struct timespec ts;
- int error = 0, upcalls, uts_crit;
+ int error = 0, uts_crit;
/* Nothing to do with bound thread */
if (!(td->td_pflags & TDP_SA))
@@ -1311,7 +1224,6 @@ thread_userret(struct thread *td, struct trapframe *frame)
}
p = td->td_proc;
- kg = td->td_ksegrp;
ku = td->td_upcall;
/*
@@ -1323,9 +1235,9 @@ thread_userret(struct thread *td, struct trapframe *frame)
if (TD_CAN_UNBIND(td)) {
td->td_pflags &= ~TDP_CAN_UNBIND;
if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
- (kg->kg_completed == NULL) &&
+ (p->p_completed == NULL) &&
(ku->ku_flags & KUF_DOUPCALL) == 0 &&
- (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
+ (p->p_upquantum && ticks < p->p_nextupcall)) {
nanotime(&ts);
error = copyout(&ts,
(caddr_t)&ku->ku_mailbox->km_timeofday,
@@ -1346,8 +1258,8 @@ thread_userret(struct thread *td, struct trapframe *frame)
} else if (td->td_mailbox && (ku == NULL)) {
thread_export_context(td, 1);
PROC_LOCK(p);
- if (kg->kg_upsleeps)
- wakeup(&kg->kg_completed);
+ if (p->p_upsleeps)
+ wakeup(&p->p_completed);
WITNESS_WARN(WARN_PANIC, &p->p_mtx.mtx_object,
"thread exiting in userret");
sigqueue_flush(&td->td_sigqueue);
@@ -1366,14 +1278,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
mtx_lock_spin(&sched_lock);
p->p_maxthrwaits++;
while (p->p_numthreads > max_threads_per_proc) {
- upcalls = 0;
- FOREACH_KSEGRP_IN_PROC(p, kg2) {
- if (kg2->kg_numupcalls == 0)
- upcalls++;
- else
- upcalls += kg2->kg_numupcalls;
- }
- if (upcalls >= max_threads_per_proc)
+ if (p->p_numupcalls >= max_threads_per_proc)
break;
mtx_unlock_spin(&sched_lock);
if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
@@ -1391,7 +1296,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
if (td->td_pflags & TDP_UPCALLING) {
uts_crit = 0;
- kg->kg_nextupcall = ticks + kg->kg_upquantum;
+ p->p_nextupcall = ticks + p->p_upquantum;
/*
* There is no more work to do and we are going to ride
* this thread up to userland as an upcall.
@@ -1436,7 +1341,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
* this KSE's mailbox.
*/
if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
- (error = thread_link_mboxes(kg, ku)) != 0)
+ (error = thread_link_mboxes(p, ku)) != 0)
goto out;
}
if (!uts_crit) {
@@ -1479,7 +1384,6 @@ out:
void
thread_continued(struct proc *p)
{
- struct ksegrp *kg;
struct kse_upcall *ku;
struct thread *td;
@@ -1490,18 +1394,13 @@ thread_continued(struct proc *p)
return;
if (p->p_flag & P_TRACED) {
- FOREACH_KSEGRP_IN_PROC(p, kg) {
- td = TAILQ_FIRST(&kg->kg_threads);
- if (td == NULL)
- continue;
- /* not a SA group, nothing to do */
- if (!(td->td_pflags & TDP_SA))
- continue;
- FOREACH_UPCALL_IN_GROUP(kg, ku) {
+ td = TAILQ_FIRST(&p->p_threads);
+ if (td && (td->td_pflags & TDP_SA)) {
+ FOREACH_UPCALL_IN_PROC(p, ku) {
mtx_lock_spin(&sched_lock);
ku->ku_flags |= KUF_DOUPCALL;
mtx_unlock_spin(&sched_lock);
- wakeup(&kg->kg_completed);
+ wakeup(&p->p_completed);
}
}
}
diff --git a/sys/kern/kern_poll.c b/sys/kern/kern_poll.c
index 67338b0..8e87607 100644
--- a/sys/kern/kern_poll.c
+++ b/sys/kern/kern_poll.c
@@ -581,11 +581,7 @@ poll_idle(void)
rtp.prio = RTP_PRIO_MAX; /* lowest priority */
rtp.type = RTP_PRIO_IDLE;
mtx_lock_spin(&sched_lock);
-#ifdef KSE
- rtp_to_pri(&rtp, td->td_ksegrp);
-#else
rtp_to_pri(&rtp, td);
-#endif
mtx_unlock_spin(&sched_lock);
for (;;) {
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 2401b6b..0eb4407 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -141,9 +141,6 @@ proc_dtor(void *mem, int size, void *arg)
{
struct proc *p;
struct thread *td;
-#if defined(INVARIANTS) && defined(KSE)
- struct ksegrp *kg;
-#endif
/* INVARIANTS checks go here */
p = (struct proc *)mem;
@@ -151,14 +148,7 @@ proc_dtor(void *mem, int size, void *arg)
#ifdef INVARIANTS
KASSERT((p->p_numthreads == 1),
("bad number of threads in exiting process"));
-#ifdef KSE
- KASSERT((p->p_numksegrps == 1), ("free proc with > 1 ksegrp"));
-#endif
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
-#ifdef KSE
- kg = FIRST_KSEGRP_IN_PROC(p);
- KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
-#endif
KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
#endif
@@ -181,25 +171,15 @@ proc_init(void *mem, int size, int flags)
{
struct proc *p;
struct thread *td;
-#ifdef KSE
- struct ksegrp *kg;
-#endif
p = (struct proc *)mem;
p->p_sched = (struct p_sched *)&p[1];
td = thread_alloc();
-#ifdef KSE
- kg = ksegrp_alloc();
-#endif
bzero(&p->p_mtx, sizeof(struct mtx));
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
p->p_stats = pstats_alloc();
-#ifdef KSE
- proc_linkup(p, kg, td);
- sched_newproc(p, kg, td);
-#else
proc_linkup(p, td);
-#endif
+ sched_newproc(p, td);
return (0);
}
@@ -215,9 +195,6 @@ proc_fini(void *mem, int size)
p = (struct proc *)mem;
pstats_free(p->p_stats);
-#ifdef KSE
- ksegrp_free(FIRST_KSEGRP_IN_PROC(p));
-#endif
thread_free(FIRST_THREAD_IN_PROC(p));
mtx_destroy(&p->p_mtx);
if (p->p_ksi != NULL)
@@ -782,9 +759,6 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
static void
fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
{
-#ifdef KSE
- struct ksegrp *kg;
-#endif
struct proc *p;
p = td->td_proc;
@@ -824,15 +798,6 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_stat = SIDL;
}
-#ifdef KSE
- kg = td->td_ksegrp;
-
- /* things in the KSE GROUP */
- kp->ki_estcpu = kg->kg_estcpu;
- kp->ki_slptime = kg->kg_slptime;
- kp->ki_pri.pri_user = kg->kg_user_pri;
- kp->ki_pri.pri_class = kg->kg_pri_class;
-#endif
/* Things in the thread */
kp->ki_wchan = td->td_wchan;
kp->ki_pri.pri_level = td->td_priority;
@@ -845,12 +810,10 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_pcb = td->td_pcb;
kp->ki_kstack = (void *)td->td_kstack;
kp->ki_pctcpu = sched_pctcpu(td);
-#ifndef KSE
kp->ki_estcpu = td->td_estcpu;
kp->ki_slptime = td->td_slptime;
kp->ki_pri.pri_class = td->td_pri_class;
kp->ki_pri.pri_user = td->td_user_pri;
-#endif
/* We can't get this anymore but ps etc never used it anyway. */
kp->ki_rqindex = 0;
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index f885953..e351d18 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -319,11 +319,7 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
else
td1 = thread_find(p, uap->lwpid);
if (td1 != NULL)
-#ifdef KSE
- pri_to_rtp(td1->td_ksegrp, &rtp);
-#else
pri_to_rtp(td1, &rtp);
-#endif
else
error = ESRCH;
mtx_unlock_spin(&sched_lock);
@@ -359,11 +355,7 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
else
td1 = thread_find(p, uap->lwpid);
if (td1 != NULL)
-#ifdef KSE
- error = rtp_to_pri(&rtp, td1->td_ksegrp);
-#else
error = rtp_to_pri(&rtp, td1);
-#endif
else
error = ESRCH;
mtx_unlock_spin(&sched_lock);
@@ -396,11 +388,7 @@ rtprio(td, uap)
{
struct proc *curp;
struct proc *p;
-#ifdef KSE
- struct ksegrp *kg;
-#else
struct thread *tdp;
-#endif
struct rtprio rtp;
int cierror, error;
@@ -436,23 +424,14 @@ rtprio(td, uap)
* as leaving it zero.
*/
if (uap->pid == 0) {
-#ifdef KSE
- pri_to_rtp(td->td_ksegrp, &rtp);
-#else
pri_to_rtp(td, &rtp);
-#endif
} else {
struct rtprio rtp2;
rtp.type = RTP_PRIO_IDLE;
rtp.prio = RTP_PRIO_MAX;
-#ifdef KSE
- FOREACH_KSEGRP_IN_PROC(p, kg) {
- pri_to_rtp(kg, &rtp2);
-#else
FOREACH_THREAD_IN_PROC(p, tdp) {
pri_to_rtp(tdp, &rtp2);
-#endif
if (rtp2.type < rtp.type ||
(rtp2.type == rtp.type &&
rtp2.prio < rtp.prio)) {
@@ -493,39 +472,19 @@ rtprio(td, uap)
}
}
-#ifdef KSE
- /*
- * If we are setting our own priority, set just our
- * KSEGRP but if we are doing another process,
- * do all the groups on that process. If we
- * specify our own pid we do the latter.
- */
-#else
/*
* If we are setting our own priority, set just our
* thread but if we are doing another process,
* do all the threads on that process. If we
* specify our own pid we do the latter.
*/
-#endif
mtx_lock_spin(&sched_lock);
if (uap->pid == 0) {
-#ifdef KSE
- error = rtp_to_pri(&rtp, td->td_ksegrp);
-#else
error = rtp_to_pri(&rtp, td);
-#endif
} else {
-#ifdef KSE
- FOREACH_KSEGRP_IN_PROC(p, kg) {
- if ((error = rtp_to_pri(&rtp, kg)) != 0) {
- break;
- }
-#else
FOREACH_THREAD_IN_PROC(p, td) {
if ((error = rtp_to_pri(&rtp, td)) != 0)
break;
-#endif
}
}
mtx_unlock_spin(&sched_lock);
@@ -539,11 +498,7 @@ rtprio(td, uap)
}
int
-#ifdef KSE
-rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
-#else
rtp_to_pri(struct rtprio *rtp, struct thread *td)
-#endif
{
u_char newpri;
@@ -552,87 +507,43 @@ rtp_to_pri(struct rtprio *rtp, struct thread *td)
return (EINVAL);
switch (RTP_PRIO_BASE(rtp->type)) {
case RTP_PRIO_REALTIME:
-#ifdef KSE
newpri = PRI_MIN_REALTIME + rtp->prio;
-#else
- newpri = PRI_MIN_REALTIME + rtp->prio;
-#endif
break;
case RTP_PRIO_NORMAL:
-#ifdef KSE
newpri = PRI_MIN_TIMESHARE + rtp->prio;
-#else
- newpri = PRI_MIN_TIMESHARE + rtp->prio;
-#endif
break;
case RTP_PRIO_IDLE:
-#ifdef KSE
newpri = PRI_MIN_IDLE + rtp->prio;
-#else
- newpri = PRI_MIN_IDLE + rtp->prio;
-#endif
break;
default:
return (EINVAL);
}
-#ifdef KSE
- sched_class(kg, rtp->type);
- sched_user_prio(kg, newpri);
- if (curthread->td_ksegrp == kg) {
- sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */
- }
-#else
sched_class(td, rtp->type); /* XXX fix */
sched_user_prio(td, newpri);
if (curthread == td)
sched_prio(curthread, td->td_user_pri); /* XXX dubious */
-#endif
return (0);
}
void
-#ifdef KSE
-pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
-#else
pri_to_rtp(struct thread *td, struct rtprio *rtp)
-#endif
{
mtx_assert(&sched_lock, MA_OWNED);
-#ifdef KSE
- switch (PRI_BASE(kg->kg_pri_class)) {
-#else
switch (PRI_BASE(td->td_pri_class)) {
-#endif
case PRI_REALTIME:
-#ifdef KSE
- rtp->prio = kg->kg_base_user_pri - PRI_MIN_REALTIME;
-#else
rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
-#endif
break;
case PRI_TIMESHARE:
-#ifdef KSE
- rtp->prio = kg->kg_base_user_pri - PRI_MIN_TIMESHARE;
-#else
rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
-#endif
break;
case PRI_IDLE:
-#ifdef KSE
- rtp->prio = kg->kg_base_user_pri - PRI_MIN_IDLE;
-#else
rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
-#endif
break;
default:
break;
}
-#ifdef KSE
- rtp->type = kg->kg_pri_class;
-#else
rtp->type = td->td_pri_class;
-#endif
}
#if defined(COMPAT_43)
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index 631dfa2..ba288ac 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -430,11 +430,7 @@ uio_yield(void)
td = curthread;
mtx_lock_spin(&sched_lock);
DROP_GIANT();
-#ifdef KSE
- sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */
-#else
sched_prio(td, td->td_user_pri);
-#endif
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 1521143..039c781 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -24,68 +24,6 @@
* SUCH DAMAGE.
*/
-#ifdef KSE
-/***
-Here is the logic..
-
-If there are N processors, then there are at most N KSEs (kernel
-schedulable entities) working to process threads that belong to a
-KSEGROUP (kg). If there are X of these KSEs actually running at the
-moment in question, then there are at most M (N-X) of these KSEs on
-the run queue, as running KSEs are not on the queue.
-
-Runnable threads are queued off the KSEGROUP in priority order.
-If there are M or more threads runnable, the top M threads
-(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
-their priority from those threads and are put on the run queue.
-
-The last thread that had a priority high enough to have a KSE associated
-with it, AND IS ON THE RUN QUEUE is pointed to by
-kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
-assigned as all the available KSEs are activly running, or because there
-are no threads queued, that pointer is NULL.
-
-When a KSE is removed from the run queue to become runnable, we know
-it was associated with the highest priority thread in the queue (at the head
-of the queue). If it is also the last assigned we know M was 1 and must
-now be 0. Since the thread is no longer queued that pointer must be
-removed from it. Since we know there were no more KSEs available,
-(M was 1 and is now 0) and since we are not FREEING our KSE
-but using it, we know there are STILL no more KSEs available, we can prove
-that the next thread in the ksegrp list will not have a KSE to assign to
-it, so we can show that the pointer must be made 'invalid' (NULL).
-
-The pointer exists so that when a new thread is made runnable, it can
-have its priority compared with the last assigned thread to see if
-it should 'steal' its KSE or not.. i.e. is it 'earlier'
-on the list than that thread or later.. If it's earlier, then the KSE is
-removed from the last assigned (which is now not assigned a KSE)
-and reassigned to the new thread, which is placed earlier in the list.
-The pointer is then backed up to the previous thread (which may or may not
-be the new thread).
-
-When a thread sleeps or is removed, the KSE becomes available and if there
-are queued threads that are not assigned KSEs, the highest priority one of
-them is assigned the KSE, which is then placed back on the run queue at
-the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
-to point to it.
-
-The following diagram shows 2 KSEs and 3 threads from a single process.
-
- RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
- \ \____
- \ \
- KSEGROUP---thread--thread--thread (queued in priority order)
- \ /
- \_______________/
- (last_assigned)
-
-The result of this scheme is that the M available KSEs are always
-queued at the priorities they have inherrited from the M highest priority
-threads for that KSEGROUP. If this situation changes, the KSEs are
-reassigned to keep this true.
-***/
-#endif
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -126,8 +64,6 @@ __FBSDID("$FreeBSD$");
CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
-#define td_kse td_sched
-
/*
* kern.sched.preemption allows user space to determine if preemption support
* is compiled in or not. It is not currently a boot or runtime flag that
@@ -144,79 +80,40 @@ SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
/************************************************************************
* Functions that manipulate runnability from a thread perspective. *
************************************************************************/
-#ifdef KSE
-/*
- * Select the KSE that will be run next. From that find the thread, and
- * remove it from the KSEGRP's run queue. If there is thread clustering,
- * this will be what does it.
- */
-#else
/*
* Select the thread that will be run next.
*/
-#endif
struct thread *
choosethread(void)
{
-#ifdef KSE
- struct kse *ke;
-#endif
+ struct td_sched *ts;
struct thread *td;
-#ifdef KSE
- struct ksegrp *kg;
-#endif
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
/* Shutting down, run idlethread on AP's */
td = PCPU_GET(idlethread);
-#ifdef KSE
- ke = td->td_kse;
-#endif
+ ts = td->td_sched;
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
-#ifdef KSE
- ke->ke_flags |= KEF_DIDRUN;
-#else
- td->td_kse->ke_flags |= KEF_DIDRUN;
-#endif
+ ts->ts_flags |= TSF_DIDRUN;
TD_SET_RUNNING(td);
return (td);
}
#endif
retry:
-#ifdef KSE
- ke = sched_choose();
- if (ke) {
- td = ke->ke_thread;
- KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
- kg = ke->ke_ksegrp;
- if (td->td_proc->p_flag & P_HADTHREADS) {
- if (kg->kg_last_assigned == td) {
- kg->kg_last_assigned = TAILQ_PREV(td,
- threadqueue, td_runq);
- }
- TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
- }
-#else
- td = sched_choose();
- if (td) {
-#endif
+ ts = sched_choose();
+ if (ts) {
+ td = ts->ts_thread;
CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
td, td->td_priority);
} else {
/* Simulate runq_choose() having returned the idle thread */
td = PCPU_GET(idlethread);
-#ifdef KSE
- ke = td->td_kse;
-#endif
+ ts = td->td_sched;
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
}
-#ifdef KSE
- ke->ke_flags |= KEF_DIDRUN;
-#else
- td->td_kse->ke_flags |= KEF_DIDRUN;
-#endif
+ ts->ts_flags |= TSF_DIDRUN;
/*
* If we are in panic, only allow system threads,
@@ -233,91 +130,24 @@ retry:
return (td);
}
-#ifdef KSE
-/*
- * Given a surplus system slot, try assign a new runnable thread to it.
- * Called from:
- * sched_thread_exit() (local)
- * sched_switch() (local)
- * sched_thread_exit() (local)
- * remrunqueue() (local) (not at the moment)
- */
-static void
-slot_fill(struct ksegrp *kg)
-{
- struct thread *td;
-
- mtx_assert(&sched_lock, MA_OWNED);
- while (kg->kg_avail_opennings > 0) {
- /*
- * Find the first unassigned thread
- */
- if ((td = kg->kg_last_assigned) != NULL)
- td = TAILQ_NEXT(td, td_runq);
- else
- td = TAILQ_FIRST(&kg->kg_runq);
-
- /*
- * If we found one, send it to the system scheduler.
- */
- if (td) {
- kg->kg_last_assigned = td;
- sched_add(td, SRQ_YIELDING);
- CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
- } else {
- /* no threads to use up the slots. quit now */
- break;
- }
- }
-}
-#ifdef SCHED_4BSD
+#if 0
/*
- * Remove a thread from its KSEGRP's run queue.
- * This in turn may remove it from a KSE if it was already assigned
- * to one, possibly causing a new thread to be assigned to the KSE
- * and the KSE getting a new priority.
+ * currently not used.. threads remove themselves from the
+ * run queue by running.
*/
static void
remrunqueue(struct thread *td)
{
- struct thread *td2, *td3;
- struct ksegrp *kg;
- struct kse *ke;
-
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
- kg = td->td_ksegrp;
- ke = td->td_kse;
CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
TD_SET_CAN_RUN(td);
- /*
- * If it is not a threaded process, take the shortcut.
- */
- if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
- /* remve from sys run queue and free up a slot */
- sched_rem(td);
- return;
- }
- td3 = TAILQ_PREV(td, threadqueue, td_runq);
- TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
- if (ke->ke_state == KES_ONRUNQ) {
- /*
- * This thread has been assigned to the system run queue.
- * We need to dissociate it and try assign the
- * KSE to the next available thread. Then, we should
- * see if we need to move the KSE in the run queues.
- */
- sched_rem(td);
- td2 = kg->kg_last_assigned;
- KASSERT((td2 != NULL), ("last assigned has wrong value"));
- if (td2 == td)
- kg->kg_last_assigned = td3;
- /* slot_fill(kg); */ /* will replace it with another */
- }
+ /* remove from sys run queue */
+ sched_rem(td);
+ return;
}
#endif
-#endif
/*
* Change the priority of a thread that is on the run queue.
@@ -325,229 +155,32 @@ remrunqueue(struct thread *td)
void
adjustrunqueue( struct thread *td, int newpri)
{
-#ifdef KSE
- struct ksegrp *kg;
-#endif
- struct kse *ke;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
- ke = td->td_kse;
+ ts = td->td_sched;
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
-#ifdef KSE
- /*
- * If it is not a threaded process, take the shortcut.
- */
- if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
- /* We only care about the kse in the run queue. */
- td->td_priority = newpri;
-#ifndef SCHED_CORE
- if (ke->ke_rqindex != (newpri / RQ_PPQ))
-#else
- if (ke->ke_rqindex != newpri)
-#endif
- {
- sched_rem(td);
- sched_add(td, SRQ_BORING);
- }
- return;
- }
-
- /* It is a threaded process */
- kg = td->td_ksegrp;
- if (ke->ke_state == KES_ONRUNQ
-#ifdef SCHED_ULE
- || ((ke->ke_flags & KEF_ASSIGNED) != 0 &&
- (ke->ke_flags & KEF_REMOVED) == 0)
-#endif
- ) {
- if (kg->kg_last_assigned == td) {
- kg->kg_last_assigned =
- TAILQ_PREV(td, threadqueue, td_runq);
- }
- sched_rem(td);
- }
- TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
- TD_SET_CAN_RUN(td);
- td->td_priority = newpri;
- setrunqueue(td, SRQ_BORING);
-#else
- /* We only care about the kse in the run queue. */
+ /* We only care about the td_sched in the run queue. */
td->td_priority = newpri;
#ifndef SCHED_CORE
- if (ke->ke_rqindex != (newpri / RQ_PPQ))
+ if (ts->ts_rqindex != (newpri / RQ_PPQ))
#else
- if (ke->ke_rqindex != newpri)
+ if (ts->ts_rqindex != newpri)
#endif
{
sched_rem(td);
sched_add(td, SRQ_BORING);
}
-#endif
-}
-
-#ifdef KSE
-/*
- * This function is called when a thread is about to be put on a
- * ksegrp run queue because it has been made runnable or its
- * priority has been adjusted and the ksegrp does not have a
- * free kse slot. It determines if a thread from the same ksegrp
- * should be preempted. If so, it tries to switch threads
- * if the thread is on the same cpu or notifies another cpu that
- * it should switch threads.
- */
-
-static void
-maybe_preempt_in_ksegrp(struct thread *td)
-#if !defined(SMP)
-{
- struct thread *running_thread;
-
- mtx_assert(&sched_lock, MA_OWNED);
- running_thread = curthread;
-
- if (running_thread->td_ksegrp != td->td_ksegrp)
- return;
-
- if (td->td_priority >= running_thread->td_priority)
- return;
-#ifdef PREEMPTION
-#ifndef FULL_PREEMPTION
- if (td->td_priority > PRI_MAX_ITHD) {
- running_thread->td_flags |= TDF_NEEDRESCHED;
- return;
- }
-#endif /* FULL_PREEMPTION */
-
- if (running_thread->td_critnest > 1)
- running_thread->td_owepreempt = 1;
- else
- mi_switch(SW_INVOL, NULL);
-
-#else /* PREEMPTION */
- running_thread->td_flags |= TDF_NEEDRESCHED;
-#endif /* PREEMPTION */
- return;
}
-#else /* SMP */
-{
- struct thread *running_thread;
- int worst_pri;
- struct ksegrp *kg;
- cpumask_t cpumask,dontuse;
- struct pcpu *pc;
- struct pcpu *best_pcpu;
- struct thread *cputhread;
-
- mtx_assert(&sched_lock, MA_OWNED);
-
- running_thread = curthread;
-
-#if !defined(KSEG_PEEMPT_BEST_CPU)
- if (running_thread->td_ksegrp != td->td_ksegrp) {
-#endif
- kg = td->td_ksegrp;
-
- /* if someone is ahead of this thread, wait our turn */
- if (td != TAILQ_FIRST(&kg->kg_runq))
- return;
-
- worst_pri = td->td_priority;
- best_pcpu = NULL;
- dontuse = stopped_cpus | idle_cpus_mask;
-
- /*
- * Find a cpu with the worst priority that runs at thread from
- * the same ksegrp - if multiple exist give first the last run
- * cpu and then the current cpu priority
- */
-
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
- cpumask = pc->pc_cpumask;
- cputhread = pc->pc_curthread;
-
- if ((cpumask & dontuse) ||
- cputhread->td_ksegrp != kg)
- continue;
-
- if (cputhread->td_priority > worst_pri) {
- worst_pri = cputhread->td_priority;
- best_pcpu = pc;
- continue;
- }
-
- if (cputhread->td_priority == worst_pri &&
- best_pcpu != NULL &&
- (td->td_lastcpu == pc->pc_cpuid ||
- (PCPU_GET(cpumask) == cpumask &&
- td->td_lastcpu != best_pcpu->pc_cpuid)))
- best_pcpu = pc;
- }
-
- /* Check if we need to preempt someone */
- if (best_pcpu == NULL)
- return;
-
-#if defined(IPI_PREEMPTION) && defined(PREEMPTION)
-#if !defined(FULL_PREEMPTION)
- if (td->td_priority <= PRI_MAX_ITHD)
-#endif /* ! FULL_PREEMPTION */
- {
- ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
- return;
- }
-#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
-
- if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
- best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
- ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
- return;
- }
-#if !defined(KSEG_PEEMPT_BEST_CPU)
- }
-#endif
-
- if (td->td_priority >= running_thread->td_priority)
- return;
-#ifdef PREEMPTION
-
-#if !defined(FULL_PREEMPTION)
- if (td->td_priority > PRI_MAX_ITHD) {
- running_thread->td_flags |= TDF_NEEDRESCHED;
- }
-#endif /* ! FULL_PREEMPTION */
-
- if (running_thread->td_critnest > 1)
- running_thread->td_owepreempt = 1;
- else
- mi_switch(SW_INVOL, NULL);
-
-#else /* PREEMPTION */
- running_thread->td_flags |= TDF_NEEDRESCHED;
-#endif /* PREEMPTION */
- return;
-}
-#endif /* !SMP */
-
-
-int limitcount;
-#endif
void
setrunqueue(struct thread *td, int flags)
{
-#ifdef KSE
- struct ksegrp *kg;
- struct thread *td2;
- struct thread *tda;
- CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
- td, td->td_ksegrp, td->td_proc->p_pid);
-#else
CTR2(KTR_RUNQ, "setrunqueue: td:%p pid:%d",
td, td->td_proc->p_pid);
-#endif
CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, curthread,
curthread->td_proc->p_comm);
@@ -557,101 +190,7 @@ setrunqueue(struct thread *td, int flags)
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
("setrunqueue: bad thread state"));
TD_SET_RUNQ(td);
-#ifdef KSE
- kg = td->td_ksegrp;
- if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
- /*
- * Common path optimisation: Only one of everything
- * and the KSE is always already attached.
- * Totally ignore the ksegrp run queue.
- */
- if (kg->kg_avail_opennings != 1) {
- if (limitcount < 1) {
- limitcount++;
- printf("pid %d: corrected slot count (%d->1)\n",
- td->td_proc->p_pid, kg->kg_avail_opennings);
-
- }
- kg->kg_avail_opennings = 1;
- }
- sched_add(td, flags);
- return;
- }
-
- /*
- * If the concurrency has reduced, and we would go in the
- * assigned section, then keep removing entries from the
- * system run queue, until we are not in that section
- * or there is room for us to be put in that section.
- * What we MUST avoid is the case where there are threads of less
- * priority than the new one scheduled, but it can not
- * be scheduled itself. That would lead to a non contiguous set
- * of scheduled threads, and everything would break.
- */
- tda = kg->kg_last_assigned;
- while ((kg->kg_avail_opennings <= 0) &&
- (tda && (tda->td_priority > td->td_priority))) {
- /*
- * None free, but there is one we can commandeer.
- */
- CTR2(KTR_RUNQ,
- "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
- sched_rem(tda);
- tda = kg->kg_last_assigned =
- TAILQ_PREV(tda, threadqueue, td_runq);
- }
-
- /*
- * Add the thread to the ksegrp's run queue at
- * the appropriate place.
- */
- TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
- if (td2->td_priority > td->td_priority) {
- TAILQ_INSERT_BEFORE(td2, td, td_runq);
- break;
- }
- }
- if (td2 == NULL) {
- /* We ran off the end of the TAILQ or it was empty. */
- TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
- }
-
- /*
- * If we have a slot to use, then put the thread on the system
- * run queue and if needed, readjust the last_assigned pointer.
- * it may be that we need to schedule something anyhow
- * even if the availabel slots are -ve so that
- * all the items < last_assigned are scheduled.
- */
- if (kg->kg_avail_opennings > 0) {
- if (tda == NULL) {
- /*
- * No pre-existing last assigned so whoever is first
- * gets the slot.. (maybe us)
- */
- td2 = TAILQ_FIRST(&kg->kg_runq);
- kg->kg_last_assigned = td2;
- } else if (tda->td_priority > td->td_priority) {
- td2 = td;
- } else {
- /*
- * We are past last_assigned, so
- * give the next slot to whatever is next,
- * which may or may not be us.
- */
- td2 = TAILQ_NEXT(tda, td_runq);
- kg->kg_last_assigned = td2;
- }
- sched_add(td2, flags);
- } else {
- CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
- td, td->td_ksegrp, td->td_proc->p_pid);
- if ((flags & SRQ_YIELDING) == 0)
- maybe_preempt_in_ksegrp(td);
- }
-#else
sched_add(td, flags);
-#endif
}
/*
@@ -737,14 +276,14 @@ maybe_preempt(struct thread *td)
* to the new thread.
*/
ctd = curthread;
- KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
+ KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
("thread has no (or wrong) sched-private part."));
KASSERT((td->td_inhibitors == 0),
("maybe_preempt: trying to run inhibitted thread"));
pri = td->td_priority;
cpri = ctd->td_priority;
if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
- TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
+ TD_IS_INHIBITED(ctd) || td->td_sched->ts_state != TSS_THREAD)
return (0);
#ifndef FULL_PREEMPTION
if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
@@ -762,25 +301,7 @@ maybe_preempt(struct thread *td)
* Thread is runnable but not yet put on system run queue.
*/
MPASS(TD_ON_RUNQ(td));
- MPASS(td->td_sched->ke_state != KES_ONRUNQ);
-#ifdef KSE
- if (td->td_proc->p_flag & P_HADTHREADS) {
- /*
- * If this is a threaded process we actually ARE on the
- * ksegrp run queue so take it off that first.
- * Also undo any damage done to the last_assigned pointer.
- * XXX Fix setrunqueue so this isn't needed
- */
- struct ksegrp *kg;
-
- kg = td->td_ksegrp;
- if (kg->kg_last_assigned == td)
- kg->kg_last_assigned =
- TAILQ_PREV(td, threadqueue, td_runq);
- TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
- }
-
-#endif
+ MPASS(td->td_sched->ts_state != TSS_ONRUNQ);
TD_SET_RUNNING(td);
CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
td->td_proc->p_pid, td->td_proc->p_comm);
@@ -880,25 +401,25 @@ runq_setbit(struct runq *rq, int pri)
}
/*
- * Add the KSE to the queue specified by its priority, and set the
+ * Add the thread to the queue specified by its priority, and set the
* corresponding status bit.
*/
void
-runq_add(struct runq *rq, struct kse *ke, int flags)
+runq_add(struct runq *rq, struct td_sched *ts, int flags)
{
struct rqhead *rqh;
int pri;
- pri = ke->ke_thread->td_priority / RQ_PPQ;
- ke->ke_rqindex = pri;
+ pri = ts->ts_thread->td_priority / RQ_PPQ;
+ ts->ts_rqindex = pri;
runq_setbit(rq, pri);
rqh = &rq->rq_queues[pri];
- CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
- ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
+ CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
+ ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
if (flags & SRQ_PREEMPTED) {
- TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
+ TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
} else {
- TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
+ TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
}
}
@@ -933,11 +454,11 @@ SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
/*
* Find the highest priority process on the run queue.
*/
-struct kse *
+struct td_sched *
runq_choose(struct runq *rq)
{
struct rqhead *rqh;
- struct kse *ke;
+ struct td_sched *ts;
int pri;
mtx_assert(&sched_lock, MA_OWNED);
@@ -952,23 +473,23 @@ runq_choose(struct runq *rq)
*/
int count = runq_fuzz;
int cpu = PCPU_GET(cpuid);
- struct kse *ke2;
- ke2 = ke = TAILQ_FIRST(rqh);
+ struct td_sched *ts2;
+ ts2 = ts = TAILQ_FIRST(rqh);
- while (count-- && ke2) {
- if (ke->ke_thread->td_lastcpu == cpu) {
- ke = ke2;
+ while (count-- && ts2) {
+ if (ts->ts_thread->td_lastcpu == cpu) {
+ ts = ts2;
break;
}
- ke2 = TAILQ_NEXT(ke2, ke_procq);
+ ts2 = TAILQ_NEXT(ts2, ts_procq);
}
} else
#endif
- ke = TAILQ_FIRST(rqh);
- KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
+ ts = TAILQ_FIRST(rqh);
+ KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
CTR3(KTR_RUNQ,
- "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
- return (ke);
+ "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
+ return (ts);
}
CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
@@ -976,28 +497,24 @@ runq_choose(struct runq *rq)
}
/*
- * Remove the KSE from the queue specified by its priority, and clear the
+ * Remove the thread from the queue specified by its priority, and clear the
* corresponding status bit if the queue becomes empty.
- * Caller must set ke->ke_state afterwards.
+ * Caller must set ts->ts_state afterwards.
*/
void
-runq_remove(struct runq *rq, struct kse *ke)
+runq_remove(struct runq *rq, struct td_sched *ts)
{
struct rqhead *rqh;
int pri;
-#ifdef KSE
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
-#else
- KASSERT(ke->ke_thread->td_proc->p_sflag & PS_INMEM,
-#endif
+ KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
("runq_remove: process swapped out"));
- pri = ke->ke_rqindex;
+ pri = ts->ts_rqindex;
rqh = &rq->rq_queues[pri];
- CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
- ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
- KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
- TAILQ_REMOVE(rqh, ke, ke_procq);
+ CTR5(KTR_RUNQ, "runq_remove: td=%p, ts=%p pri=%d %d rqh=%p",
+ ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
+ KASSERT(ts != NULL, ("runq_remove: no proc on busy queue"));
+ TAILQ_REMOVE(rqh, ts, ts_procq);
if (TAILQ_EMPTY(rqh)) {
CTR0(KTR_RUNQ, "runq_remove: empty");
runq_clrbit(rq, pri);
@@ -1008,23 +525,17 @@ runq_remove(struct runq *rq, struct kse *ke)
#include <vm/uma.h>
extern struct mtx kse_zombie_lock;
-#ifdef KSE
/*
* Allocate scheduler specific per-process resources.
- * The thread and ksegrp have already been linked in.
- * In this case just set the default concurrency value.
+ * The thread and proc have already been linked in.
*
* Called from:
* proc_init() (UMA init method)
*/
void
-sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
+sched_newproc(struct proc *p, struct thread *td)
{
-
- /* This can go in sched_fork */
- sched_init_concurrency(kg);
}
-#endif
/*
* thread is being either created or recycled.
@@ -1037,37 +548,27 @@ sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
void
sched_newthread(struct thread *td)
{
- struct td_sched *ke;
+ struct td_sched *ts;
- ke = (struct td_sched *) (td + 1);
- bzero(ke, sizeof(*ke));
- td->td_sched = ke;
- ke->ke_thread = td;
- ke->ke_state = KES_THREAD;
+ ts = (struct td_sched *) (td + 1);
+ bzero(ts, sizeof(*ts));
+ td->td_sched = ts;
+ ts->ts_thread = td;
+ ts->ts_state = TSS_THREAD;
}
-#ifdef KSE
/*
- * Set up an initial concurrency of 1
- * and set the given thread (if given) to be using that
- * concurrency slot.
- * May be used "offline"..before the ksegrp is attached to the world
- * and thus wouldn't need schedlock in that case.
* Called from:
* thr_create()
* proc_init() (UMA) via sched_newproc()
*/
void
-sched_init_concurrency(struct ksegrp *kg)
+sched_init_concurrency(struct proc *p)
{
-
- CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
- kg->kg_concurrency = 1;
- kg->kg_avail_opennings = 1;
}
/*
- * Change the concurrency of an existing ksegrp to N
+ * Change the concurrency of an existing proc to N
* Called from:
* kse_create()
* kse_exit()
@@ -1075,16 +576,8 @@ sched_init_concurrency(struct ksegrp *kg)
* thread_single()
*/
void
-sched_set_concurrency(struct ksegrp *kg, int concurrency)
+sched_set_concurrency(struct proc *p, int concurrency)
{
-
- CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
- kg,
- concurrency,
- kg->kg_avail_opennings,
- kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
- kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
- kg->kg_concurrency = concurrency;
}
/*
@@ -1099,10 +592,6 @@ sched_set_concurrency(struct ksegrp *kg, int concurrency)
void
sched_thread_exit(struct thread *td)
{
-
- SLOT_RELEASE(td->td_ksegrp);
- slot_fill(td->td_ksegrp);
}
-#endif
#endif /* KERN_SWITCH_INCLUDE */
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index 37e3df2..2769f45 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -142,18 +142,12 @@ create_thread(struct thread *td, mcontext_t *ctx,
{
stack_t stack;
struct thread *newtd;
-#ifdef KSE
- struct ksegrp *kg, *newkg;
-#endif
struct proc *p;
long id;
int error;
error = 0;
p = td->td_proc;
-#ifdef KSE
- kg = td->td_ksegrp;
-#endif
/* Have race condition but it is cheap. */
if (p->p_numthreads >= max_threads_per_proc)
@@ -177,7 +171,7 @@ create_thread(struct thread *td, mcontext_t *ctx,
}
}
- /* Initialize our td and new ksegrp.. */
+ /* Initialize our td */
newtd = thread_alloc();
/*
@@ -229,50 +223,22 @@ create_thread(struct thread *td, mcontext_t *ctx,
}
}
-#ifdef KSE
- newkg = ksegrp_alloc();
- bzero(&newkg->kg_startzero,
- __rangeof(struct ksegrp, kg_startzero, kg_endzero));
- bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
- __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
- sched_init_concurrency(newkg);
PROC_LOCK(td->td_proc);
td->td_proc->p_flag |= P_HADTHREADS;
newtd->td_sigmask = td->td_sigmask;
mtx_lock_spin(&sched_lock);
- ksegrp_link(newkg, p);
- thread_link(newtd, newkg);
- PROC_UNLOCK(p);
-#else
- PROC_LOCK(td->td_proc);
- td->td_proc->p_flag |= P_HADTHREADS;
- newtd->td_sigmask = td->td_sigmask;
- mtx_lock_spin(&sched_lock);
thread_link(newtd, p);
PROC_UNLOCK(p);
-#endif
-#ifdef KSE
/* let the scheduler know about these things. */
- sched_fork_ksegrp(td, newkg);
sched_fork_thread(td, newtd);
if (rtp != NULL) {
- if (!(kg->kg_pri_class == PRI_TIMESHARE &&
- rtp->type == RTP_PRIO_NORMAL)) {
- rtp_to_pri(rtp, newkg);
- sched_prio(newtd, newkg->kg_user_pri);
- } /* ignore timesharing class */
- }
-#else
- sched_fork(td, newtd);
- if (rtp != NULL) {
if (!(td->td_pri_class == PRI_TIMESHARE &&
rtp->type == RTP_PRIO_NORMAL)) {
rtp_to_pri(rtp, newtd);
sched_prio(newtd, newtd->td_user_pri);
} /* ignore timesharing class */
}
-#endif
TD_SET_CAN_RUN(newtd);
/* if ((flags & THR_SUSPENDED) == 0) */
setrunqueue(newtd, SRQ_BORING);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 74af901..1f9e80f 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -50,16 +50,9 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/uma.h>
-#ifdef KSE
-/*
- * KSEGRP related storage.
- */
-static uma_zone_t ksegrp_zone;
-#else
/*
* thread related storage.
*/
-#endif
static uma_zone_t thread_zone;
/* DEBUG ONLY */
@@ -85,9 +78,6 @@ int virtual_cpu;
#endif
TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
-#ifdef KSE
-TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
-#endif
struct mtx kse_zombie_lock;
MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
@@ -228,59 +218,6 @@ thread_fini(void *mem, int size)
vm_thread_dispose(td);
}
-#ifdef KSE
-/*
- * Initialize type-stable parts of a ksegrp (when newly created).
- */
-static int
-ksegrp_ctor(void *mem, int size, void *arg, int flags)
-{
- struct ksegrp *kg;
-
- kg = (struct ksegrp *)mem;
- bzero(mem, size);
- kg->kg_sched = (struct kg_sched *)&kg[1];
- return (0);
-}
-
-void
-ksegrp_link(struct ksegrp *kg, struct proc *p)
-{
-
- TAILQ_INIT(&kg->kg_threads);
- TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
- TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
- kg->kg_proc = p;
- /*
- * the following counters are in the -zero- section
- * and may not need clearing
- */
- kg->kg_numthreads = 0;
- kg->kg_numupcalls = 0;
- /* link it in now that it's consistent */
- p->p_numksegrps++;
- TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
-}
-
-/*
- * Called from:
- * thread-exit()
- */
-void
-ksegrp_unlink(struct ksegrp *kg)
-{
- struct proc *p;
-
- mtx_assert(&sched_lock, MA_OWNED);
- KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
- KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
-
- p = kg->kg_proc;
- TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
- p->p_numksegrps--;
-}
-#endif
-
/*
* For a newly created process,
* link up all the structures and its initial threads etc.
@@ -290,18 +227,10 @@ ksegrp_unlink(struct ksegrp *kg)
* proc_init()
*/
void
-#ifdef KSE
-proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
-#else
proc_linkup(struct proc *p, struct thread *td)
-#endif
{
-
-#ifdef KSE
- TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
-#endif
TAILQ_INIT(&p->p_threads); /* all threads in proc */
- TAILQ_INIT(&p->p_suspended); /* Threads suspended */
+ TAILQ_INIT(&p->p_upcalls); /* upcall list */
sigqueue_init(&p->p_sigqueue, p);
p->p_ksi = ksiginfo_alloc(1);
if (p->p_ksi != NULL) {
@@ -309,17 +238,8 @@ proc_linkup(struct proc *p, struct thread *td)
p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
}
LIST_INIT(&p->p_mqnotifier);
-#ifdef KSE
- p->p_numksegrps = 0;
-#endif
p->p_numthreads = 0;
-
-#ifdef KSE
- ksegrp_link(kg, p);
- thread_link(td, kg);
-#else
thread_link(td, p);
-#endif
}
/*
@@ -336,37 +256,22 @@ threadinit(void)
thread_ctor, thread_dtor, thread_init, thread_fini,
UMA_ALIGN_CACHE, 0);
#ifdef KSE
- ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
- ksegrp_ctor, NULL, NULL, NULL,
- UMA_ALIGN_CACHE, 0);
kseinit(); /* set up kse specific stuff e.g. upcall zone*/
#endif
}
/*
* Stash an embarasingly extra thread into the zombie thread queue.
+ * Use the slpq as that must be unused by now.
*/
void
thread_stash(struct thread *td)
{
mtx_lock_spin(&kse_zombie_lock);
- TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
+ TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
mtx_unlock_spin(&kse_zombie_lock);
}
-#ifdef KSE
-/*
- * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
- */
-void
-ksegrp_stash(struct ksegrp *kg)
-{
- mtx_lock_spin(&kse_zombie_lock);
- TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
- mtx_unlock_spin(&kse_zombie_lock);
-}
-#endif
-
/*
* Reap zombie kse resource.
*/
@@ -374,65 +279,27 @@ void
thread_reap(void)
{
struct thread *td_first, *td_next;
-#ifdef KSE
- struct ksegrp *kg_first, * kg_next;
-#endif
/*
* Don't even bother to lock if none at this instant,
* we really don't care about the next instant..
*/
-#ifdef KSE
- if ((!TAILQ_EMPTY(&zombie_threads))
- || (!TAILQ_EMPTY(&zombie_ksegrps))) {
-#else
if (!TAILQ_EMPTY(&zombie_threads)) {
-#endif
mtx_lock_spin(&kse_zombie_lock);
td_first = TAILQ_FIRST(&zombie_threads);
-#ifdef KSE
- kg_first = TAILQ_FIRST(&zombie_ksegrps);
-#endif
if (td_first)
TAILQ_INIT(&zombie_threads);
-#ifdef KSE
- if (kg_first)
- TAILQ_INIT(&zombie_ksegrps);
-#endif
mtx_unlock_spin(&kse_zombie_lock);
while (td_first) {
- td_next = TAILQ_NEXT(td_first, td_runq);
+ td_next = TAILQ_NEXT(td_first, td_slpq);
if (td_first->td_ucred)
crfree(td_first->td_ucred);
thread_free(td_first);
td_first = td_next;
}
-#ifdef KSE
- while (kg_first) {
- kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
- ksegrp_free(kg_first);
- kg_first = kg_next;
- }
- /*
- * there will always be a thread on the list if one of these
- * is there.
- */
- kse_GC();
-#endif
}
}
-#ifdef KSE
-/*
- * Allocate a ksegrp.
- */
-struct ksegrp *
-ksegrp_alloc(void)
-{
- return (uma_zalloc(ksegrp_zone, M_WAITOK));
-}
-#endif
-
/*
* Allocate a thread.
*/
@@ -444,16 +311,6 @@ thread_alloc(void)
return (uma_zalloc(thread_zone, M_WAITOK));
}
-#ifdef KSE
-/*
- * Deallocate a ksegrp.
- */
-void
-ksegrp_free(struct ksegrp *td)
-{
- uma_zfree(ksegrp_zone, td);
-}
-#endif
/*
* Deallocate a thread.
@@ -503,23 +360,14 @@ thread_exit(void)
uint64_t new_switchtime;
struct thread *td;
struct proc *p;
-#ifdef KSE
- struct ksegrp *kg;
-#endif
td = curthread;
-#ifdef KSE
- kg = td->td_ksegrp;
-#endif
p = td->td_proc;
mtx_assert(&sched_lock, MA_OWNED);
mtx_assert(&Giant, MA_NOTOWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
KASSERT(p != NULL, ("thread exiting without a process"));
-#ifdef KSE
- KASSERT(kg != NULL, ("thread exiting without a kse group"));
-#endif
CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
(long)p->p_pid, p->p_comm);
KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
@@ -583,13 +431,8 @@ thread_exit(void)
if (p->p_flag & P_HADTHREADS) {
if (p->p_numthreads > 1) {
thread_unlink(td);
-#ifdef KSE
- /* XXX first arg not used in 4BSD or ULE */
sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
-#else
- sched_exit(p, td);
-#endif
/*
* The test below is NOT true if we are the
@@ -614,38 +457,9 @@ thread_exit(void)
* there somehow.
*/
upcall_remove(td);
-
- /*
- * If the thread we unlinked above was the last one,
- * then this ksegrp should go away too.
- */
- if (kg->kg_numthreads == 0) {
- /*
- * let the scheduler know about this in case
- * it needs to recover stats or resources.
- * Theoretically we could let
- * sched_exit_ksegrp() do the equivalent of
- * setting the concurrency to 0
- * but don't do it yet to avoid changing
- * the existing scheduler code until we
- * are ready.
- * We supply a random other ksegrp
- * as the recipient of any built up
- * cpu usage etc. (If the scheduler wants it).
- * XXXKSE
- * This is probably not fair so think of
- * a better answer.
- */
- sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
- sched_set_concurrency(kg, 0); /* XXX TEMP */
- ksegrp_unlink(kg);
- ksegrp_stash(kg);
- }
#endif
+
PROC_UNLOCK(p);
-#ifdef KSE
- td->td_ksegrp = NULL;
-#endif
PCPU_SET(deadthread, td);
} else {
/*
@@ -689,9 +503,6 @@ thread_wait(struct proc *p)
mtx_assert(&Giant, MA_NOTOWNED);
KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
-#ifdef KSE
- KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
-#endif
FOREACH_THREAD_IN_PROC(p, td) {
#ifdef KSE
if (td->td_standin != NULL) {
@@ -718,46 +529,22 @@ thread_wait(struct proc *p)
* The thread is linked as if running but no KSE assigned.
* Called from:
* proc_linkup()
- * ifdef KSE
* thread_schedule_upcall()
- * endif
* thr_create()
*/
void
-#ifdef KSE
-thread_link(struct thread *td, struct ksegrp *kg)
-#else
thread_link(struct thread *td, struct proc *p)
-#endif
{
-#ifdef KSE
- struct proc *p;
-#endif
-#ifdef KSE
- p = kg->kg_proc;
-#endif
td->td_state = TDS_INACTIVE;
td->td_proc = p;
-#ifdef KSE
- td->td_ksegrp = kg;
-#endif
td->td_flags = 0;
-#ifdef KSE
- td->td_kflags = 0;
-#endif
LIST_INIT(&td->td_contested);
sigqueue_init(&td->td_sigqueue, p);
callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
-#ifdef KSE
- TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
-#endif
p->p_numthreads++;
-#ifdef KSE
- kg->kg_numthreads++;
-#endif
}
/*
@@ -781,7 +568,7 @@ thread_unthread(struct thread *td)
thread_stash(td->td_standin);
td->td_standin = NULL;
}
- sched_set_concurrency(td->td_ksegrp, 1);
+ sched_set_concurrency(p, 1);
#else
p->p_flag &= ~P_HADTHREADS;
#endif
@@ -795,23 +582,12 @@ void
thread_unlink(struct thread *td)
{
struct proc *p = td->td_proc;
-#ifdef KSE
- struct ksegrp *kg = td->td_ksegrp;
-#endif
mtx_assert(&sched_lock, MA_OWNED);
TAILQ_REMOVE(&p->p_threads, td, td_plist);
p->p_numthreads--;
-#ifdef KSE
- TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
- kg->kg_numthreads--;
-#endif
/* could clear a few other things here */
-#ifdef KSE
- /* Must NOT clear links to proc and ksegrp! */
-#else
/* Must NOT clear links to proc! */
-#endif
}
/*
@@ -1040,8 +816,7 @@ thread_suspend_check(int return_instead)
/*
* When a thread suspends, it just
- * moves to the processes's suspend queue
- * and stays there.
+ * gets taken off all queues.
*/
thread_suspend_one(td);
if (return_instead == 0) {
@@ -1074,7 +849,6 @@ thread_suspend_one(struct thread *td)
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
p->p_suspcount++;
TD_SET_SUSPENDED(td);
- TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
}
void
@@ -1084,7 +858,7 @@ thread_unsuspend_one(struct thread *td)
mtx_assert(&sched_lock, MA_OWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
- TAILQ_REMOVE(&p->p_suspended, td, td_runq);
+ KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
TD_CLR_SUSPENDED(td);
p->p_suspcount--;
setrunnable(td);
@@ -1101,8 +875,10 @@ thread_unsuspend(struct proc *p)
mtx_assert(&sched_lock, MA_OWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
if (!P_SHOULDSTOP(p)) {
- while ((td = TAILQ_FIRST(&p->p_suspended))) {
- thread_unsuspend_one(td);
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if (TD_IS_SUSPENDED(td)) {
+ thread_unsuspend_one(td);
+ }
}
} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
(p->p_numthreads == p->p_suspcount)) {
@@ -1137,8 +913,10 @@ thread_single_end(void)
* to continue however as this is a bad place to stop.
*/
if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
- while ((td = TAILQ_FIRST(&p->p_suspended))) {
- thread_unsuspend_one(td);
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if (TD_IS_SUSPENDED(td)) {
+ thread_unsuspend_one(td);
+ }
}
}
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_umtx.c b/sys/kern/kern_umtx.c
index 328fb57..645f02d 100644
--- a/sys/kern/kern_umtx.c
+++ b/sys/kern/kern_umtx.c
@@ -167,15 +167,9 @@ struct umtxq_chain {
* if it is using 100%CPU, this is unfair to other processes.
*/
-#ifdef KSE
-#define UPRI(td) (((td)->td_ksegrp->kg_user_pri >= PRI_MIN_TIMESHARE &&\
- (td)->td_ksegrp->kg_user_pri <= PRI_MAX_TIMESHARE) ?\
- PRI_MAX_TIMESHARE : (td)->td_ksegrp->kg_user_pri)
-#else
#define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
(td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
PRI_MAX_TIMESHARE : (td)->td_user_pri)
-#endif
#define GOLDEN_RATIO_PRIME 2654404609U
#define UMTX_CHAINS 128
diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c
index 4127367..785c04c 100644
--- a/sys/kern/ksched.c
+++ b/sys/kern/ksched.c
@@ -105,11 +105,7 @@ getscheduler(struct ksched *ksched, struct thread *td, int *policy)
int e = 0;
mtx_lock_spin(&sched_lock);
-#ifdef KSE
- pri_to_rtp(td->td_ksegrp, &rtp);
-#else
pri_to_rtp(td, &rtp);
-#endif
mtx_unlock_spin(&sched_lock);
switch (rtp.type)
{
@@ -156,11 +152,7 @@ ksched_getparam(struct ksched *ksched,
struct rtprio rtp;
mtx_lock_spin(&sched_lock);
-#ifdef KSE
- pri_to_rtp(td->td_ksegrp, &rtp);
-#else
pri_to_rtp(td, &rtp);
-#endif
mtx_unlock_spin(&sched_lock);
if (RTP_PRIO_IS_REALTIME(rtp.type))
param->sched_priority = rtpprio_to_p4prio(rtp.prio);
@@ -181,9 +173,6 @@ ksched_setscheduler(struct ksched *ksched,
{
int e = 0;
struct rtprio rtp;
-#ifdef KSE
- struct ksegrp *kg = td->td_ksegrp;
-#endif
switch(policy)
{
@@ -198,20 +187,7 @@ ksched_setscheduler(struct ksched *ksched,
? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;
mtx_lock_spin(&sched_lock);
-#ifdef KSE
- rtp_to_pri(&rtp, kg);
- FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
- if (TD_IS_RUNNING(td)) {
- td->td_flags |= TDF_NEEDRESCHED;
- } else if (TD_ON_RUNQ(td)) {
- if (td->td_priority > kg->kg_user_pri) {
- sched_prio(td, kg->kg_user_pri);
- }
- }
- }
-#else
rtp_to_pri(&rtp, td);
-#endif
mtx_unlock_spin(&sched_lock);
}
else
@@ -225,28 +201,7 @@ ksched_setscheduler(struct ksched *ksched,
rtp.type = RTP_PRIO_NORMAL;
rtp.prio = p4prio_to_rtpprio(param->sched_priority);
mtx_lock_spin(&sched_lock);
-#ifdef KSE
- rtp_to_pri(&rtp, kg);
-
- /* XXX Simply revert to whatever we had for last
- * normal scheduler priorities.
- * This puts a requirement
- * on the scheduling code: You must leave the
- * scheduling info alone.
- */
- FOREACH_THREAD_IN_GROUP(kg, td) {
- if (TD_IS_RUNNING(td)) {
- td->td_flags |= TDF_NEEDRESCHED;
- } else if (TD_ON_RUNQ(td)) {
- if (td->td_priority > kg->kg_user_pri) {
- sched_prio(td, kg->kg_user_pri);
- }
- }
-
- }
-#else
rtp_to_pri(&rtp, td);
-#endif
mtx_unlock_spin(&sched_lock);
}
break;
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index dc9d288..3fe2682 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -37,8 +37,6 @@ __FBSDID("$FreeBSD$");
#include "opt_hwpmc_hooks.h"
-#define kse td_sched
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -75,99 +73,38 @@ __FBSDID("$FreeBSD$");
#endif
#define NICE_WEIGHT 1 /* Priorities per nice level. */
-#ifdef KSE
-/*
- * The schedulable entity that can be given a context to run.
- * A process may have several of these. Probably one per processor
- * but possibly a few more. In this universe they are grouped
- * with a KSEG that contains the priority and niceness
- * for the group.
- */
-#else
/*
* The schedulable entity that runs a context.
- * A process may have several of these. Probably one per processor
- * but posibly a few more.
+ * This is an extension to the thread structure and is tailored to
+ * the requirements of this scheduler
*/
-#endif
-struct kse {
- TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
- struct thread *ke_thread; /* (*) Active associated thread. */
- fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
- u_char ke_rqindex; /* (j) Run queue index. */
+struct td_sched {
+ TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
+ struct thread *ts_thread; /* (*) Active associated thread. */
+ fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
+ u_char ts_rqindex; /* (j) Run queue index. */
enum {
- KES_THREAD = 0x0, /* slaved to thread state */
- KES_ONRUNQ
- } ke_state; /* (j) KSE status. */
- int ke_cpticks; /* (j) Ticks of cpu time. */
- struct runq *ke_runq; /* runq the kse is currently on */
+ TSS_THREAD = 0x0, /* slaved to thread state */
+ TSS_ONRUNQ
+ } ts_state; /* (j) TD_STAT in scheduler status. */
+ int ts_cpticks; /* (j) Ticks of cpu time. */
+ struct runq *ts_runq; /* runq the thread is currently on */
};
-#ifdef KSE
-#define ke_proc ke_thread->td_proc
-#define ke_ksegrp ke_thread->td_ksegrp
-#endif
-
-#define td_kse td_sched
-
/* flags kept in td_flags */
-#define TDF_DIDRUN TDF_SCHED0 /* KSE actually ran. */
-#define TDF_EXIT TDF_SCHED1 /* KSE is being killed. */
+#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
+#define TDF_EXIT TDF_SCHED1 /* thread is being killed. */
#define TDF_BOUND TDF_SCHED2
-#define ke_flags ke_thread->td_flags
-#define KEF_DIDRUN TDF_DIDRUN /* KSE actually ran. */
-#define KEF_EXIT TDF_EXIT /* KSE is being killed. */
-#define KEF_BOUND TDF_BOUND /* stuck to one CPU */
-
-#define SKE_RUNQ_PCPU(ke) \
- ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq)
-
-#ifdef KSE
-struct kg_sched {
- struct thread *skg_last_assigned; /* (j) Last thread assigned to */
- /* the system scheduler. */
- int skg_avail_opennings; /* (j) Num KSEs requested in group. */
- int skg_concurrency; /* (j) Num KSEs requested in group. */
-};
-#define kg_last_assigned kg_sched->skg_last_assigned
-#define kg_avail_opennings kg_sched->skg_avail_opennings
-#define kg_concurrency kg_sched->skg_concurrency
-
-#define SLOT_RELEASE(kg) \
-do { \
- kg->kg_avail_opennings++; \
- CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)", \
- kg, \
- kg->kg_concurrency, \
- kg->kg_avail_opennings); \
-/* KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency), \
- ("slots out of whack"));*/ \
-} while (0)
-
-#define SLOT_USE(kg) \
-do { \
- kg->kg_avail_opennings--; \
- CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)", \
- kg, \
- kg->kg_concurrency, \
- kg->kg_avail_opennings); \
-/* KASSERT((kg->kg_avail_opennings >= 0), \
- ("slots out of whack"));*/ \
-} while (0)
-#endif
+#define ts_flags ts_thread->td_flags
+#define TSF_DIDRUN TDF_DIDRUN /* thread actually ran. */
+#define TSF_EXIT TDF_EXIT /* thread is being killed. */
+#define TSF_BOUND TDF_BOUND /* stuck to one CPU */
-/*
- * KSE_CAN_MIGRATE macro returns true if the kse can migrate between
- * cpus.
- */
-#define KSE_CAN_MIGRATE(ke) \
- ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
+#define SKE_RUNQ_PCPU(ts) \
+ ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
-static struct kse kse0;
-#ifdef KSE
-static struct kg_sched kg_sched0;
-#endif
+static struct td_sched td_sched0;
static int sched_tdcnt; /* Total runnable threads in the system. */
static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
@@ -175,12 +112,7 @@ static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
static struct callout roundrobin_callout;
-#ifdef KSE
-static void slot_fill(struct ksegrp *kg);
-static struct kse *sched_choose(void); /* XXX Should be thread * */
-#else
-static struct thread *sched_choose(void);
-#endif
+static struct td_sched *sched_choose(void);
static void setup_runqs(void);
static void roundrobin(void *arg);
@@ -189,15 +121,9 @@ static void schedcpu_thread(void);
static void sched_priority(struct thread *td, u_char prio);
static void sched_setup(void *dummy);
static void maybe_resched(struct thread *td);
-#ifdef KSE
-static void updatepri(struct ksegrp *kg);
-static void resetpriority(struct ksegrp *kg);
-static void resetpriority_thread(struct thread *td, struct ksegrp *kg);
-#else
static void updatepri(struct thread *td);
static void resetpriority(struct thread *td);
static void resetpriority_thread(struct thread *td);
-#endif
#ifdef SMP
static int forward_wakeup(int cpunum);
#endif
@@ -300,21 +226,11 @@ SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
"account for htt");
#endif
-#ifdef KSE
+#if 0
static int sched_followon = 0;
SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
&sched_followon, 0,
"allow threads to share a quantum");
-
-static int sched_pfollowons = 0;
-SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD,
- &sched_pfollowons, 0,
- "number of followons done to a different ksegrp");
-
-static int sched_kgfollowons = 0;
-SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD,
- &sched_kgfollowons, 0,
- "number of followons done in a ksegrp");
#endif
static __inline void
@@ -366,40 +282,20 @@ roundrobin(void *arg)
/*
* Constants for digital decay and forget:
- * ifdef KSE
- * 90% of (kg_estcpu) usage in 5 * loadav time
- * else
* 90% of (td_estcpu) usage in 5 * loadav time
- * endif
- * 95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
+ * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
* Note that, as ps(1) mentions, this can let percentages
* total over 100% (I've seen 137.9% for 3 processes).
*
- * ifdef KSE
- * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
- * else
* Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
- * endif
*
- * ifdef KSE
- * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
- * else
* We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
- * endif
* That is, the system wants to compute a value of decay such
* that the following for loop:
* for (i = 0; i < (5 * loadavg); i++)
- * ifdef KSE
- * kg_estcpu *= decay;
- * else
* td_estcpu *= decay;
- * endif
* will compute
- * ifdef KSE
- * kg_estcpu *= 0.1;
- * else
* td_estcpu *= 0.1;
- * endif
* for all values of loadavg:
*
* Mathematically this loop can be expressed by saying:
@@ -452,7 +348,7 @@ roundrobin(void *arg)
#define loadfactor(loadav) (2 * (loadav))
#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
-/* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
+/* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
@@ -481,10 +377,7 @@ schedcpu(void)
register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
struct thread *td;
struct proc *p;
- struct kse *ke;
-#ifdef KSE
- struct ksegrp *kg;
-#endif
+ struct td_sched *ts;
int awake, realstathz;
realstathz = stathz ? stathz : hz;
@@ -499,126 +392,63 @@ schedcpu(void)
* 16-bit int's (remember them?) overflow takes 45 days.
*/
p->p_swtime++;
-#ifdef KSE
- FOREACH_KSEGRP_IN_PROC(p, kg) {
-#else
FOREACH_THREAD_IN_PROC(p, td) {
-#endif
awake = 0;
-#ifdef KSE
- FOREACH_THREAD_IN_GROUP(kg, td) {
- ke = td->td_kse;
- /*
- * Increment sleep time (if sleeping). We
- * ignore overflow, as above.
- */
- /*
- * The kse slptimes are not touched in wakeup
- * because the thread may not HAVE a KSE.
- */
- if (ke->ke_state == KES_ONRUNQ) {
- awake = 1;
- ke->ke_flags &= ~KEF_DIDRUN;
- } else if ((ke->ke_state == KES_THREAD) &&
- (TD_IS_RUNNING(td))) {
- awake = 1;
- /* Do not clear KEF_DIDRUN */
- } else if (ke->ke_flags & KEF_DIDRUN) {
- awake = 1;
- ke->ke_flags &= ~KEF_DIDRUN;
- }
-
- /*
- * ke_pctcpu is only for ps and ttyinfo().
- * Do it per kse, and add them up at the end?
- * XXXKSE
- */
- ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
- FSHIFT;
- /*
- * If the kse has been idle the entire second,
- * stop recalculating its priority until
- * it wakes up.
- */
- if (ke->ke_cpticks == 0)
- continue;
-#if (FSHIFT >= CCPU_SHIFT)
- ke->ke_pctcpu += (realstathz == 100)
- ? ((fixpt_t) ke->ke_cpticks) <<
- (FSHIFT - CCPU_SHIFT) :
- 100 * (((fixpt_t) ke->ke_cpticks)
- << (FSHIFT - CCPU_SHIFT)) / realstathz;
-#else
- ke->ke_pctcpu += ((FSCALE - ccpu) *
- (ke->ke_cpticks *
- FSCALE / realstathz)) >> FSHIFT;
-#endif
- ke->ke_cpticks = 0;
- } /* end of kse loop */
-#else
- ke = td->td_kse;
+ ts = td->td_sched;
/*
* Increment sleep time (if sleeping). We
* ignore overflow, as above.
*/
/*
- * The kse slptimes are not touched in wakeup
- * because the thread may not HAVE a KSE.
+ * The td_sched slptimes are not touched in wakeup
+ * because the thread may not HAVE everything in
+ * memory? XXX I think this is out of date.
*/
- if (ke->ke_state == KES_ONRUNQ) {
+ if (ts->ts_state == TSS_ONRUNQ) {
awake = 1;
- ke->ke_flags &= ~KEF_DIDRUN;
- } else if ((ke->ke_state == KES_THREAD) &&
+ ts->ts_flags &= ~TSF_DIDRUN;
+ } else if ((ts->ts_state == TSS_THREAD) &&
(TD_IS_RUNNING(td))) {
awake = 1;
- /* Do not clear KEF_DIDRUN */
- } else if (ke->ke_flags & KEF_DIDRUN) {
+ /* Do not clear TSF_DIDRUN */
+ } else if (ts->ts_flags & TSF_DIDRUN) {
awake = 1;
- ke->ke_flags &= ~KEF_DIDRUN;
+ ts->ts_flags &= ~TSF_DIDRUN;
}
/*
- * ke_pctcpu is only for ps and ttyinfo().
- * Do it per kse, and add them up at the end?
+ * ts_pctcpu is only for ps and ttyinfo().
+ * Do it per td_sched, and add them up at the end?
* XXXKSE
*/
- ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
- FSHIFT;
+ ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
/*
- * If the kse has been idle the entire second,
+ * If the td_sched has been idle the entire second,
* stop recalculating its priority until
* it wakes up.
*/
- if (ke->ke_cpticks != 0) {
+ if (ts->ts_cpticks != 0) {
#if (FSHIFT >= CCPU_SHIFT)
- ke->ke_pctcpu += (realstathz == 100)
- ? ((fixpt_t) ke->ke_cpticks) <<
- (FSHIFT - CCPU_SHIFT) :
- 100 * (((fixpt_t) ke->ke_cpticks)
- << (FSHIFT - CCPU_SHIFT)) / realstathz;
+ ts->ts_pctcpu += (realstathz == 100)
+ ? ((fixpt_t) ts->ts_cpticks) <<
+ (FSHIFT - CCPU_SHIFT) :
+ 100 * (((fixpt_t) ts->ts_cpticks)
+ << (FSHIFT - CCPU_SHIFT)) / realstathz;
#else
- ke->ke_pctcpu += ((FSCALE - ccpu) *
- (ke->ke_cpticks *
- FSCALE / realstathz)) >> FSHIFT;
+ ts->ts_pctcpu += ((FSCALE - ccpu) *
+ (ts->ts_cpticks *
+ FSCALE / realstathz)) >> FSHIFT;
#endif
- ke->ke_cpticks = 0;
+ ts->ts_cpticks = 0;
}
-#endif
-
/*
- * ifdef KSE
- * If there are ANY running threads in this KSEGRP,
- * else
* If there are ANY running threads in this process,
- * endif
* then don't count it as sleeping.
+XXX this is broken
+
*/
if (awake) {
-#ifdef KSE
- if (kg->kg_slptime > 1) {
-#else
- if (td->td_slptime > 1) {
-#endif
+ if (p->p_slptime > 1) {
/*
* In an ideal world, this should not
* happen, because whoever woke us
@@ -628,21 +458,6 @@ schedcpu(void)
* priority. Should KASSERT at some
* point when all the cases are fixed.
*/
-#ifdef KSE
- updatepri(kg);
- }
- kg->kg_slptime = 0;
- } else
- kg->kg_slptime++;
- if (kg->kg_slptime > 1)
- continue;
- kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
- resetpriority(kg);
- FOREACH_THREAD_IN_GROUP(kg, td) {
- resetpriority_thread(td, kg);
- }
- } /* end of ksegrp loop */
-#else
updatepri(td);
}
td->td_slptime = 0;
@@ -654,7 +469,6 @@ schedcpu(void)
resetpriority(td);
resetpriority_thread(td);
} /* end of thread loop */
-#endif
mtx_unlock_spin(&sched_lock);
} /* end of process loop */
sx_sunlock(&allproc_lock);
@@ -676,48 +490,24 @@ schedcpu_thread(void)
/*
* Recalculate the priority of a process after it has slept for a while.
- * ifdef KSE
- * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
- * least six times the loadfactor will decay kg_estcpu to zero.
- * else
* For all load averages >= 1 and max td_estcpu of 255, sleeping for at
* least six times the loadfactor will decay td_estcpu to zero.
- * endif
*/
static void
-#ifdef KSE
-updatepri(struct ksegrp *kg)
-#else
updatepri(struct thread *td)
-#endif
{
register fixpt_t loadfac;
register unsigned int newcpu;
loadfac = loadfactor(averunnable.ldavg[0]);
-#ifdef KSE
- if (kg->kg_slptime > 5 * loadfac)
- kg->kg_estcpu = 0;
-#else
if (td->td_slptime > 5 * loadfac)
td->td_estcpu = 0;
-#endif
else {
-#ifdef KSE
- newcpu = kg->kg_estcpu;
- kg->kg_slptime--; /* was incremented in schedcpu() */
- while (newcpu && --kg->kg_slptime)
-#else
newcpu = td->td_estcpu;
td->td_slptime--; /* was incremented in schedcpu() */
while (newcpu && --td->td_slptime)
-#endif
newcpu = decay_cpu(loadfac, newcpu);
-#ifdef KSE
- kg->kg_estcpu = newcpu;
-#else
td->td_estcpu = newcpu;
-#endif
}
}
@@ -727,43 +517,25 @@ updatepri(struct thread *td)
* than that of the current process.
*/
static void
-#ifdef KSE
-resetpriority(struct ksegrp *kg)
-#else
resetpriority(struct thread *td)
-#endif
{
register unsigned int newpriority;
-#ifdef KSE
- if (kg->kg_pri_class == PRI_TIMESHARE) {
- newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
- NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN);
-#else
if (td->td_pri_class == PRI_TIMESHARE) {
newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
-#endif
newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
PRI_MAX_TIMESHARE);
-#ifdef KSE
- sched_user_prio(kg, newpriority);
-#else
sched_user_prio(td, newpriority);
-#endif
}
}
/*
- * Update the thread's priority when the associated ksegroup's user
+ * Update the thread's priority when the associated process's user
* priority changes.
*/
static void
-#ifdef KSE
-resetpriority_thread(struct thread *td, struct ksegrp *kg)
-#else
resetpriority_thread(struct thread *td)
-#endif
{
/* Only change threads with a time sharing user priority. */
@@ -774,11 +546,7 @@ resetpriority_thread(struct thread *td)
/* XXX the whole needresched thing is broken, but not silly. */
maybe_resched(td);
-#ifdef KSE
- sched_prio(td, kg->kg_user_pri);
-#else
sched_prio(td, td->td_user_pri);
-#endif
}
/* ARGSUSED */
@@ -814,16 +582,9 @@ schedinit(void)
* Set up the scheduler specific parts of proc0.
*/
proc0.p_sched = NULL; /* XXX */
-#ifdef KSE
- ksegrp0.kg_sched = &kg_sched0;
-#endif
- thread0.td_sched = &kse0;
- kse0.ke_thread = &thread0;
- kse0.ke_state = KES_THREAD;
-#ifdef KSE
- kg_sched0.skg_concurrency = 1;
- kg_sched0.skg_avail_opennings = 0; /* we are already running */
-#endif
+ thread0.td_sched = &td_sched0;
+ td_sched0.ts_thread = &thread0;
+ td_sched0.ts_state = TSS_THREAD;
}
int
@@ -847,13 +608,8 @@ sched_rr_interval(void)
/*
* We adjust the priority of the current process. The priority of
* a process gets worse as it accumulates CPU time. The cpu usage
- * ifdef KSE
- * estimator (kg_estcpu) is increased here. resetpriority() will
- * compute a different priority each time kg_estcpu increases by
- * else
* estimator (td_estcpu) is increased here. resetpriority() will
* compute a different priority each time td_estcpu increases by
- * endif
* INVERSE_ESTCPU_WEIGHT
* (until MAXPRI is reached). The cpu usage estimator ramps up
* quite quickly when the process is running (linearly), and decays
@@ -866,163 +622,83 @@ sched_rr_interval(void)
void
sched_clock(struct thread *td)
{
-#ifdef KSE
- struct ksegrp *kg;
-#endif
- struct kse *ke;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
-#ifdef KSE
- kg = td->td_ksegrp;
-#endif
- ke = td->td_kse;
-
- ke->ke_cpticks++;
-#ifdef KSE
- kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
- if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
- resetpriority(kg);
- resetpriority_thread(td, kg);
-#else
+ ts = td->td_sched;
+
+ ts->ts_cpticks++;
td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
resetpriority(td);
resetpriority_thread(td);
-#endif
}
}
-#ifdef KSE
-/*
- * charge childs scheduling cpu usage to parent.
- *
- * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
- * Charge it to the ksegrp that did the wait since process estcpu is sum of
- * all ksegrps, this is strictly as expected. Assume that the child process
- * aggregated all the estcpu into the 'built-in' ksegrp.
- */
-#else
/*
* charge childs scheduling cpu usage to parent.
*/
-#endif
void
sched_exit(struct proc *p, struct thread *td)
{
-#ifdef KSE
- sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
- sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
-#else
- struct thread *parent = FIRST_THREAD_IN_PROC(p);
CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
td, td->td_proc->p_comm, td->td_priority);
- parent->td_estcpu = ESTCPULIM(parent->td_estcpu + td->td_estcpu);
- if ((td->td_proc->p_flag & P_NOLOAD) == 0)
- sched_load_rem();
-#endif
-}
-
-#ifdef KSE
-void
-sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd)
-{
-
- mtx_assert(&sched_lock, MA_OWNED);
- kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu);
+ sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
}
void
sched_exit_thread(struct thread *td, struct thread *child)
{
+ struct proc *childproc = child->td_proc;
+
CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
- child, child->td_proc->p_comm, child->td_priority);
+ child, childproc->p_comm, child->td_priority);
+ td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
+ childproc->p_estcpu = ESTCPULIM(childproc->p_estcpu +
+ child->td_estcpu);
if ((child->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_rem();
}
-#endif
void
sched_fork(struct thread *td, struct thread *childtd)
{
-#ifdef KSE
- sched_fork_ksegrp(td, childtd->td_ksegrp);
sched_fork_thread(td, childtd);
-#else
- childtd->td_estcpu = td->td_estcpu;
- sched_newthread(childtd);
-#endif
-}
-
-#ifdef KSE
-void
-sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
-{
- mtx_assert(&sched_lock, MA_OWNED);
- child->kg_estcpu = td->td_ksegrp->kg_estcpu;
}
void
sched_fork_thread(struct thread *td, struct thread *childtd)
{
+ childtd->td_estcpu = td->td_estcpu;
sched_newthread(childtd);
}
-#endif
void
sched_nice(struct proc *p, int nice)
{
-#ifdef KSE
- struct ksegrp *kg;
-#endif
struct thread *td;
PROC_LOCK_ASSERT(p, MA_OWNED);
mtx_assert(&sched_lock, MA_OWNED);
p->p_nice = nice;
-#ifdef KSE
- FOREACH_KSEGRP_IN_PROC(p, kg) {
- resetpriority(kg);
- FOREACH_THREAD_IN_GROUP(kg, td) {
- resetpriority_thread(td, kg);
- }
- }
-#else
FOREACH_THREAD_IN_PROC(p, td) {
resetpriority(td);
resetpriority_thread(td);
}
-#endif
}
void
-#ifdef KSE
-sched_class(struct ksegrp *kg, int class)
-#else
sched_class(struct thread *td, int class)
-#endif
{
mtx_assert(&sched_lock, MA_OWNED);
-#ifdef KSE
- kg->kg_pri_class = class;
-#else
td->td_pri_class = class;
-#endif
}
-#ifdef KSE
-/*
- * Adjust the priority of a thread.
- * This may include moving the thread within the KSEGRP,
- * changing the assignment of a kse to the thread,
- * and moving a KSE in the system run queue.
- */
-#else
/*
* Adjust the priority of a thread.
*/
-#endif
static void
sched_priority(struct thread *td, u_char prio)
{
@@ -1067,11 +743,7 @@ sched_unlend_prio(struct thread *td, u_char prio)
if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
td->td_base_pri <= PRI_MAX_TIMESHARE)
-#ifdef KSE
- base_pri = td->td_ksegrp->kg_user_pri;
-#else
base_pri = td->td_user_pri;
-#endif
else
base_pri = td->td_base_pri;
if (prio >= base_pri) {
@@ -1109,40 +781,15 @@ sched_prio(struct thread *td, u_char prio)
}
void
-#ifdef KSE
-sched_user_prio(struct ksegrp *kg, u_char prio)
-#else
sched_user_prio(struct thread *td, u_char prio)
-#endif
{
-#ifdef KSE
- struct thread *td;
-#endif
u_char oldprio;
-#ifdef KSE
- kg->kg_base_user_pri = prio;
-
- /* XXXKSE only for 1:1 */
-
- td = TAILQ_FIRST(&kg->kg_threads);
- if (td == NULL) {
- kg->kg_user_pri = prio;
- return;
- }
-
- if (td->td_flags & TDF_UBORROWING && kg->kg_user_pri <= prio)
- return;
-
- oldprio = kg->kg_user_pri;
- kg->kg_user_pri = prio;
-#else
td->td_base_user_pri = prio;
if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
return;
oldprio = td->td_user_pri;
td->td_user_pri = prio;
-#endif
if (TD_ON_UPILOCK(td) && oldprio != prio)
umtx_pi_adjust(td, oldprio);
@@ -1155,13 +802,8 @@ sched_lend_user_prio(struct thread *td, u_char prio)
td->td_flags |= TDF_UBORROWING;
-#ifdef KSE
- oldprio = td->td_ksegrp->kg_user_pri;
- td->td_ksegrp->kg_user_pri = prio;
-#else
oldprio = td->td_user_pri;
td->td_user_pri = prio;
-#endif
if (TD_ON_UPILOCK(td) && oldprio != prio)
umtx_pi_adjust(td, oldprio);
@@ -1170,23 +812,12 @@ sched_lend_user_prio(struct thread *td, u_char prio)
void
sched_unlend_user_prio(struct thread *td, u_char prio)
{
-#ifdef KSE
- struct ksegrp *kg = td->td_ksegrp;
-#endif
u_char base_pri;
-#ifdef KSE
- base_pri = kg->kg_base_user_pri;
-#else
base_pri = td->td_base_user_pri;
-#endif
if (prio >= base_pri) {
td->td_flags &= ~TDF_UBORROWING;
-#ifdef KSE
- sched_user_prio(kg, base_pri);
-#else
sched_user_prio(td, base_pri);
-#endif
} else
sched_lend_user_prio(td, prio);
}
@@ -1196,59 +827,37 @@ sched_sleep(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
-#ifdef KSE
- td->td_ksegrp->kg_slptime = 0;
-#else
td->td_slptime = 0;
-#endif
}
-#ifdef KSE
-static void remrunqueue(struct thread *td);
-#endif
-
void
sched_switch(struct thread *td, struct thread *newtd, int flags)
{
- struct kse *ke;
-#ifdef KSE
- struct ksegrp *kg;
-#endif
+ struct td_sched *ts;
struct proc *p;
- ke = td->td_kse;
+ ts = td->td_sched;
p = td->td_proc;
mtx_assert(&sched_lock, MA_OWNED);
if ((p->p_flag & P_NOLOAD) == 0)
sched_load_rem();
-#ifdef KSE
+#if 0
/*
* We are volunteering to switch out so we get to nominate
* a successor for the rest of our quantum
- * First try another thread in our ksegrp, and then look for
- * other ksegrps in our process.
+ * First try another thread in our process
+ *
+ * this is too expensive to do without per process run queues
+ * so skip it for now.
+ * XXX keep this comment as a marker.
*/
if (sched_followon &&
(p->p_flag & P_HADTHREADS) &&
(flags & SW_VOL) &&
- newtd == NULL) {
- /* lets schedule another thread from this process */
- kg = td->td_ksegrp;
- if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
- remrunqueue(newtd);
- sched_kgfollowons++;
- } else {
- FOREACH_KSEGRP_IN_PROC(p, kg) {
- if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
- sched_pfollowons++;
- remrunqueue(newtd);
- break;
- }
- }
- }
- }
+ newtd == NULL)
+ newtd = mumble();
#endif
if (newtd)
@@ -1267,25 +876,11 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (td == PCPU_GET(idlethread))
TD_SET_CAN_RUN(td);
else {
-#ifdef KSE
- SLOT_RELEASE(td->td_ksegrp);
-#endif
if (TD_IS_RUNNING(td)) {
- /* Put us back on the run queue (kse and all). */
+ /* Put us back on the run queue. */
setrunqueue(td, (flags & SW_PREEMPT) ?
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
SRQ_OURSELF|SRQ_YIELDING);
-#ifdef KSE
- } else if (p->p_flag & P_HADTHREADS) {
- /*
- * We will not be on the run queue. So we must be
- * sleeping or similar. As it's available,
- * someone else can use the KSE if they need it.
- * It's NOT available if we are about to need it
- */
- if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)
- slot_fill(td->td_ksegrp);
-#endif
}
}
if (newtd) {
@@ -1294,17 +889,12 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
* as if it had been added to the run queue and selected.
* It came from:
* * A preemption
- * ifdef KSE
* * An upcall
- * endif
* * A followon
*/
KASSERT((newtd->td_inhibitors == 0),
("trying to run inhibitted thread"));
-#ifdef KSE
- SLOT_USE(newtd->td_ksegrp);
-#endif
- newtd->td_kse->ke_flags |= KEF_DIDRUN;
+ newtd->td_sched->ts_flags |= TSF_DIDRUN;
TD_SET_RUNNING(newtd);
if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
@@ -1332,25 +922,12 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
void
sched_wakeup(struct thread *td)
{
-#ifdef KSE
- struct ksegrp *kg;
-#endif
-
mtx_assert(&sched_lock, MA_OWNED);
-#ifdef KSE
- kg = td->td_ksegrp;
- if (kg->kg_slptime > 1) {
- updatepri(kg);
- resetpriority(kg);
- }
- kg->kg_slptime = 0;
-#else
if (td->td_slptime > 1) {
updatepri(td);
resetpriority(td);
}
td->td_slptime = 0;
-#endif
setrunqueue(td, SRQ_BORING);
}
@@ -1481,22 +1058,17 @@ void
sched_add(struct thread *td, int flags)
#ifdef SMP
{
- struct kse *ke;
+ struct td_sched *ts;
int forwarded = 0;
int cpu;
int single_cpu = 0;
- ke = td->td_kse;
+ ts = td->td_sched;
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT(ke->ke_state != KES_ONRUNQ,
- ("sched_add: kse %p (%s) already in run queue", ke,
-#ifdef KSE
- ke->ke_proc->p_comm));
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
-#else
+ KASSERT(ts->ts_state != TSS_ONRUNQ,
+ ("sched_add: td_sched %p (%s) already in run queue", ts,
td->td_proc->p_comm));
KASSERT(td->td_proc->p_sflag & PS_INMEM,
-#endif
("sched_add: process swapped out"));
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, curthread,
@@ -1505,22 +1077,22 @@ sched_add(struct thread *td, int flags)
if (td->td_pinned != 0) {
cpu = td->td_lastcpu;
- ke->ke_runq = &runq_pcpu[cpu];
+ ts->ts_runq = &runq_pcpu[cpu];
single_cpu = 1;
CTR3(KTR_RUNQ,
- "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu);
- } else if ((ke)->ke_flags & KEF_BOUND) {
+ "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
+ } else if ((ts)->ts_flags & TSF_BOUND) {
/* Find CPU from bound runq */
- KASSERT(SKE_RUNQ_PCPU(ke),("sched_add: bound kse not on cpu runq"));
- cpu = ke->ke_runq - &runq_pcpu[0];
+ KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq"));
+ cpu = ts->ts_runq - &runq_pcpu[0];
single_cpu = 1;
CTR3(KTR_RUNQ,
- "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu);
+ "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
} else {
CTR2(KTR_RUNQ,
- "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
+ "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td);
cpu = NOCPU;
- ke->ke_runq = &runq;
+ ts->ts_runq = &runq;
}
if (single_cpu && (cpu != PCPU_GET(cpuid))) {
@@ -1546,32 +1118,24 @@ sched_add(struct thread *td, int flags)
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
-#ifdef KSE
- SLOT_USE(td->td_ksegrp);
-#endif
- runq_add(ke->ke_runq, ke, flags);
- ke->ke_state = KES_ONRUNQ;
+ runq_add(ts->ts_runq, ts, flags);
+ ts->ts_state = TSS_ONRUNQ;
}
#else /* SMP */
{
- struct kse *ke;
- ke = td->td_kse;
+ struct td_sched *ts;
+ ts = td->td_sched;
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT(ke->ke_state != KES_ONRUNQ,
- ("sched_add: kse %p (%s) already in run queue", ke,
-#ifdef KSE
- ke->ke_proc->p_comm));
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
-#else
+ KASSERT(ts->ts_state != TSS_ONRUNQ,
+ ("sched_add: td_sched %p (%s) already in run queue", ts,
td->td_proc->p_comm));
KASSERT(td->td_proc->p_sflag & PS_INMEM,
-#endif
("sched_add: process swapped out"));
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, curthread,
curthread->td_proc->p_comm);
- CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
- ke->ke_runq = &runq;
+ CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
+ ts->ts_runq = &runq;
/*
* If we are yielding (on the way out anyhow)
@@ -1590,11 +1154,8 @@ sched_add(struct thread *td, int flags)
}
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
-#ifdef KSE
- SLOT_USE(td->td_ksegrp);
-#endif
- runq_add(ke->ke_runq, ke, flags);
- ke->ke_state = KES_ONRUNQ;
+ runq_add(ts->ts_runq, ts, flags);
+ ts->ts_state = TSS_ONRUNQ;
maybe_resched(td);
}
#endif /* SMP */
@@ -1602,17 +1163,13 @@ sched_add(struct thread *td, int flags)
void
sched_rem(struct thread *td)
{
- struct kse *ke;
+ struct td_sched *ts;
- ke = td->td_kse;
-#ifdef KSE
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
-#else
+ ts = td->td_sched;
KASSERT(td->td_proc->p_sflag & PS_INMEM,
-#endif
("sched_rem: process swapped out"));
- KASSERT((ke->ke_state == KES_ONRUNQ),
- ("sched_rem: KSE not on run queue"));
+ KASSERT((ts->ts_state == TSS_ONRUNQ),
+ ("sched_rem: thread not on run queue"));
mtx_assert(&sched_lock, MA_OWNED);
CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, curthread,
@@ -1620,81 +1177,57 @@ sched_rem(struct thread *td)
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_rem();
-#ifdef KSE
- SLOT_RELEASE(td->td_ksegrp);
-#endif
- runq_remove(ke->ke_runq, ke);
+ runq_remove(ts->ts_runq, ts);
- ke->ke_state = KES_THREAD;
+ ts->ts_state = TSS_THREAD;
}
/*
* Select threads to run.
* Notice that the running threads still consume a slot.
*/
-#ifdef KSE
-struct kse *
-#else
-struct thread *
-#endif
+struct td_sched *
sched_choose(void)
{
- struct kse *ke;
+ struct td_sched *ts;
struct runq *rq;
#ifdef SMP
- struct kse *kecpu;
+ struct td_sched *kecpu;
rq = &runq;
- ke = runq_choose(&runq);
+ ts = runq_choose(&runq);
kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
- if (ke == NULL ||
+ if (ts == NULL ||
(kecpu != NULL &&
- kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
- CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu,
+ kecpu->ts_thread->td_priority < ts->ts_thread->td_priority)) {
+ CTR2(KTR_RUNQ, "choosing td_sched %p from pcpu runq %d", kecpu,
PCPU_GET(cpuid));
- ke = kecpu;
+ ts = kecpu;
rq = &runq_pcpu[PCPU_GET(cpuid)];
} else {
- CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke);
+ CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", ts);
}
#else
rq = &runq;
- ke = runq_choose(&runq);
+ ts = runq_choose(&runq);
#endif
-#ifdef KSE
- if (ke != NULL) {
-#else
- if (ke) {
-#endif
- runq_remove(rq, ke);
- ke->ke_state = KES_THREAD;
+ if (ts) {
+ runq_remove(rq, ts);
+ ts->ts_state = TSS_THREAD;
-#ifdef KSE
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
+ KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
("sched_choose: process swapped out"));
-#else
- KASSERT(ke->ke_thread->td_proc->p_sflag & PS_INMEM,
- ("sched_choose: process swapped out"));
- return (ke->ke_thread);
-#endif
}
-#ifdef KSE
- return (ke);
-#else
- return (NULL);
-#endif
+ return (ts);
}
void
sched_userret(struct thread *td)
{
-#ifdef KSE
- struct ksegrp *kg;
-#endif
/*
* XXX we cheat slightly on the locking here to avoid locking in
* the usual case. Setting td_priority here is essentially an
@@ -1706,42 +1239,32 @@ sched_userret(struct thread *td)
*/
KASSERT((td->td_flags & TDF_BORROWING) == 0,
("thread with borrowed priority returning to userland"));
-#ifdef KSE
- kg = td->td_ksegrp;
- if (td->td_priority != kg->kg_user_pri) {
- mtx_lock_spin(&sched_lock);
- td->td_priority = kg->kg_user_pri;
- td->td_base_pri = kg->kg_user_pri;
- mtx_unlock_spin(&sched_lock);
- }
-#else
if (td->td_priority != td->td_user_pri) {
mtx_lock_spin(&sched_lock);
td->td_priority = td->td_user_pri;
td->td_base_pri = td->td_user_pri;
mtx_unlock_spin(&sched_lock);
}
-#endif
}
void
sched_bind(struct thread *td, int cpu)
{
- struct kse *ke;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT(TD_IS_RUNNING(td),
("sched_bind: cannot bind non-running thread"));
- ke = td->td_kse;
+ ts = td->td_sched;
- ke->ke_flags |= KEF_BOUND;
+ ts->ts_flags |= TSF_BOUND;
#ifdef SMP
- ke->ke_runq = &runq_pcpu[cpu];
+ ts->ts_runq = &runq_pcpu[cpu];
if (PCPU_GET(cpuid) == cpu)
return;
- ke->ke_state = KES_THREAD;
+ ts->ts_state = TSS_THREAD;
mi_switch(SW_VOL, NULL);
#endif
@@ -1751,30 +1274,21 @@ void
sched_unbind(struct thread* td)
{
mtx_assert(&sched_lock, MA_OWNED);
- td->td_kse->ke_flags &= ~KEF_BOUND;
+ td->td_sched->ts_flags &= ~TSF_BOUND;
}
int
sched_is_bound(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
- return (td->td_kse->ke_flags & KEF_BOUND);
+ return (td->td_sched->ts_flags & TSF_BOUND);
}
void
sched_relinquish(struct thread *td)
{
-#ifdef KSE
- struct ksegrp *kg;
-
- kg = td->td_ksegrp;
-#endif
mtx_lock_spin(&sched_lock);
-#ifdef KSE
- if (kg->kg_pri_class == PRI_TIMESHARE)
-#else
if (td->td_pri_class == PRI_TIMESHARE)
-#endif
sched_prio(td, PRI_MAX_TIMESHARE);
mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
@@ -1786,14 +1300,6 @@ sched_load(void)
return (sched_tdcnt);
}
-#ifdef KSE
-int
-sched_sizeof_ksegrp(void)
-{
- return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
-}
-#endif
-
int
sched_sizeof_proc(void)
{
@@ -1803,16 +1309,16 @@ sched_sizeof_proc(void)
int
sched_sizeof_thread(void)
{
- return (sizeof(struct thread) + sizeof(struct kse));
+ return (sizeof(struct thread) + sizeof(struct td_sched));
}
fixpt_t
sched_pctcpu(struct thread *td)
{
- struct kse *ke;
+ struct td_sched *ts;
- ke = td->td_kse;
- return (ke->ke_pctcpu);
+ ts = td->td_sched;
+ return (ts->ts_pctcpu);
}
void
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 7afe0fb..98b867b 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -30,8 +30,6 @@ __FBSDID("$FreeBSD$");
#include "opt_hwpmc_hooks.h"
#include "opt_sched.h"
-#define kse td_sched
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kdb.h>
@@ -92,46 +90,45 @@ int tickincr = 1 << 10;
* but are scheduler specific.
*/
/*
- * The schedulable entity that can be given a context to run. A process may
- * have several of these.
+ * Thread scheduler specific section.
+ * fields int he thread structure that are specific to this scheduler.
*/
-struct td_sched { /* really kse */
- TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
- int ke_flags; /* (j) KEF_* flags. */
- struct thread *ke_thread; /* (*) Active associated thread. */
- fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
- u_char ke_rqindex; /* (j) Run queue index. */
+struct td_sched {
+ TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
+ int ts_flags; /* (j) TSF_* flags. */
+ struct thread *ts_thread; /* (*) Active associated thread. */
+ fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
+ u_char ts_rqindex; /* (j) Run queue index. */
enum {
- KES_THREAD = 0x0, /* slaved to thread state */
- KES_ONRUNQ
- } ke_state; /* (j) thread sched specific status. */
- int ke_slptime;
- int ke_slice;
- struct runq *ke_runq;
- u_char ke_cpu; /* CPU that we have affinity for. */
+ TSS_THREAD = 0x0, /* slaved to thread state */
+ TSS_ONRUNQ
+ } ts_state; /* (j) thread sched specific status. */
+ int ts_slptime;
+ int ts_slice;
+ struct runq *ts_runq;
+ u_char ts_cpu; /* CPU that we have affinity for. */
/* The following variables are only used for pctcpu calculation */
- int ke_ltick; /* Last tick that we were running on */
- int ke_ftick; /* First tick that we were running on */
- int ke_ticks; /* Tick count */
+ int ts_ltick; /* Last tick that we were running on */
+ int ts_ftick; /* First tick that we were running on */
+ int ts_ticks; /* Tick count */
/* originally from kg_sched */
int skg_slptime; /* Number of ticks we vol. slept */
int skg_runtime; /* Number of ticks we were running */
};
-#define td_kse td_sched
-#define ke_assign ke_procq.tqe_next
-/* flags kept in ke_flags */
-#define KEF_ASSIGNED 0x0001 /* Thread is being migrated. */
-#define KEF_BOUND 0x0002 /* Thread can not migrate. */
-#define KEF_XFERABLE 0x0004 /* Thread was added as transferable. */
-#define KEF_HOLD 0x0008 /* Thread is temporarily bound. */
-#define KEF_REMOVED 0x0010 /* Thread was removed while ASSIGNED */
-#define KEF_INTERNAL 0x0020 /* Thread added due to migration. */
-#define KEF_PREEMPTED 0x0040 /* Thread was preempted */
-#define KEF_DIDRUN 0x02000 /* Thread actually ran. */
-#define KEF_EXIT 0x04000 /* Thread is being killed. */
-
-static struct kse kse0;
+#define ts_assign ts_procq.tqe_next
+/* flags kept in ts_flags */
+#define TSF_ASSIGNED 0x0001 /* Thread is being migrated. */
+#define TSF_BOUND 0x0002 /* Thread can not migrate. */
+#define TSF_XFERABLE 0x0004 /* Thread was added as transferable. */
+#define TSF_HOLD 0x0008 /* Thread is temporarily bound. */
+#define TSF_REMOVED 0x0010 /* Thread was removed while ASSIGNED */
+#define TSF_INTERNAL 0x0020 /* Thread added due to migration. */
+#define TSF_PREEMPTED 0x0040 /* Thread was preempted */
+#define TSF_DIDRUN 0x02000 /* Thread actually ran. */
+#define TSF_EXIT 0x04000 /* Thread is being killed. */
+
+static struct td_sched td_sched0;
/*
* The priority is primarily determined by the interactivity score. Thus, we
@@ -191,9 +188,9 @@ static struct kse kse0;
*/
#define SCHED_INTERACTIVE(td) \
(sched_interact_score(td) < SCHED_INTERACT_THRESH)
-#define SCHED_CURR(td, ke) \
- ((ke->ke_thread->td_flags & TDF_BORROWING) || \
- (ke->ke_flags & KEF_PREEMPTED) || SCHED_INTERACTIVE(td))
+#define SCHED_CURR(td, ts) \
+ ((ts->ts_thread->td_flags & TDF_BORROWING) || \
+ (ts->ts_flags & TSF_PREEMPTED) || SCHED_INTERACTIVE(td))
/*
* Cpu percentage computation macros and defines.
@@ -206,22 +203,22 @@ static struct kse kse0;
#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME)
/*
- * kseq - per processor runqs and statistics.
+ * tdq - per processor runqs and statistics.
*/
-struct kseq {
+struct tdq {
struct runq ksq_idle; /* Queue of IDLE threads. */
struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */
struct runq *ksq_next; /* Next timeshare queue. */
struct runq *ksq_curr; /* Current queue. */
int ksq_load_timeshare; /* Load for timeshare. */
int ksq_load; /* Aggregate load. */
- short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
+ short ksq_nice[SCHED_PRI_NRESV]; /* threadss in each nice bin. */
short ksq_nicemin; /* Least nice. */
#ifdef SMP
int ksq_transferable;
- LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */
- struct kseq_group *ksq_group; /* Our processor group. */
- volatile struct kse *ksq_assigned; /* assigned by another CPU. */
+ LIST_ENTRY(tdq) ksq_siblings; /* Next in tdq group. */
+ struct tdq_group *ksq_group; /* Our processor group. */
+ volatile struct td_sched *ksq_assigned; /* assigned by another CPU. */
#else
int ksq_sysload; /* For loadavg, !ITHD load. */
#endif
@@ -229,21 +226,21 @@ struct kseq {
#ifdef SMP
/*
- * kseq groups are groups of processors which can cheaply share threads. When
+ * tdq groups are groups of processors which can cheaply share threads. When
* one processor in the group goes idle it will check the runqs of the other
* processors in its group prior to halting and waiting for an interrupt.
* These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
* In a numa environment we'd want an idle bitmap per group and a two tiered
* load balancer.
*/
-struct kseq_group {
- int ksg_cpus; /* Count of CPUs in this kseq group. */
+struct tdq_group {
+ int ksg_cpus; /* Count of CPUs in this tdq group. */
cpumask_t ksg_cpumask; /* Mask of cpus in this group. */
cpumask_t ksg_idlemask; /* Idle cpus in this group. */
cpumask_t ksg_mask; /* Bit mask for first cpu. */
int ksg_load; /* Total load of this group. */
int ksg_transferable; /* Transferable load of this group. */
- LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */
+ LIST_HEAD(, tdq) ksg_members; /* Linked list of all members. */
};
#endif
@@ -251,185 +248,185 @@ struct kseq_group {
* One kse queue per processor.
*/
#ifdef SMP
-static cpumask_t kseq_idle;
+static cpumask_t tdq_idle;
static int ksg_maxid;
-static struct kseq kseq_cpu[MAXCPU];
-static struct kseq_group kseq_groups[MAXCPU];
+static struct tdq tdq_cpu[MAXCPU];
+static struct tdq_group tdq_groups[MAXCPU];
static int bal_tick;
static int gbal_tick;
static int balance_groups;
-#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)])
-#define KSEQ_CPU(x) (&kseq_cpu[(x)])
-#define KSEQ_ID(x) ((x) - kseq_cpu)
-#define KSEQ_GROUP(x) (&kseq_groups[(x)])
+#define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)])
+#define TDQ_CPU(x) (&tdq_cpu[(x)])
+#define TDQ_ID(x) ((x) - tdq_cpu)
+#define TDQ_GROUP(x) (&tdq_groups[(x)])
#else /* !SMP */
-static struct kseq kseq_cpu;
+static struct tdq tdq_cpu;
-#define KSEQ_SELF() (&kseq_cpu)
-#define KSEQ_CPU(x) (&kseq_cpu)
+#define TDQ_SELF() (&tdq_cpu)
+#define TDQ_CPU(x) (&tdq_cpu)
#endif
-static struct kse *sched_choose(void); /* XXX Should be thread * */
-static void sched_slice(struct kse *);
+static struct td_sched *sched_choose(void); /* XXX Should be thread * */
+static void sched_slice(struct td_sched *);
static void sched_priority(struct thread *);
static void sched_thread_priority(struct thread *, u_char);
static int sched_interact_score(struct thread *);
static void sched_interact_update(struct thread *);
static void sched_interact_fork(struct thread *);
-static void sched_pctcpu_update(struct kse *);
+static void sched_pctcpu_update(struct td_sched *);
/* Operations on per processor queues */
-static struct kse * kseq_choose(struct kseq *);
-static void kseq_setup(struct kseq *);
-static void kseq_load_add(struct kseq *, struct kse *);
-static void kseq_load_rem(struct kseq *, struct kse *);
-static __inline void kseq_runq_add(struct kseq *, struct kse *, int);
-static __inline void kseq_runq_rem(struct kseq *, struct kse *);
-static void kseq_nice_add(struct kseq *, int);
-static void kseq_nice_rem(struct kseq *, int);
-void kseq_print(int cpu);
+static struct td_sched * tdq_choose(struct tdq *);
+static void tdq_setup(struct tdq *);
+static void tdq_load_add(struct tdq *, struct td_sched *);
+static void tdq_load_rem(struct tdq *, struct td_sched *);
+static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
+static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
+static void tdq_nice_add(struct tdq *, int);
+static void tdq_nice_rem(struct tdq *, int);
+void tdq_print(int cpu);
#ifdef SMP
-static int kseq_transfer(struct kseq *, struct kse *, int);
-static struct kse *runq_steal(struct runq *);
+static int tdq_transfer(struct tdq *, struct td_sched *, int);
+static struct td_sched *runq_steal(struct runq *);
static void sched_balance(void);
static void sched_balance_groups(void);
-static void sched_balance_group(struct kseq_group *);
-static void sched_balance_pair(struct kseq *, struct kseq *);
-static void kseq_move(struct kseq *, int);
-static int kseq_idled(struct kseq *);
-static void kseq_notify(struct kse *, int);
-static void kseq_assign(struct kseq *);
-static struct kse *kseq_steal(struct kseq *, int);
-#define KSE_CAN_MIGRATE(ke) \
- ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
+static void sched_balance_group(struct tdq_group *);
+static void sched_balance_pair(struct tdq *, struct tdq *);
+static void tdq_move(struct tdq *, int);
+static int tdq_idled(struct tdq *);
+static void tdq_notify(struct td_sched *, int);
+static void tdq_assign(struct tdq *);
+static struct td_sched *tdq_steal(struct tdq *, int);
+#define THREAD_CAN_MIGRATE(ts) \
+ ((ts)->ts_thread->td_pinned == 0 && ((ts)->ts_flags & TSF_BOUND) == 0)
#endif
void
-kseq_print(int cpu)
+tdq_print(int cpu)
{
- struct kseq *kseq;
+ struct tdq *tdq;
int i;
- kseq = KSEQ_CPU(cpu);
+ tdq = TDQ_CPU(cpu);
- printf("kseq:\n");
- printf("\tload: %d\n", kseq->ksq_load);
- printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
+ printf("tdq:\n");
+ printf("\tload: %d\n", tdq->ksq_load);
+ printf("\tload TIMESHARE: %d\n", tdq->ksq_load_timeshare);
#ifdef SMP
- printf("\tload transferable: %d\n", kseq->ksq_transferable);
+ printf("\tload transferable: %d\n", tdq->ksq_transferable);
#endif
- printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
+ printf("\tnicemin:\t%d\n", tdq->ksq_nicemin);
printf("\tnice counts:\n");
for (i = 0; i < SCHED_PRI_NRESV; i++)
- if (kseq->ksq_nice[i])
+ if (tdq->ksq_nice[i])
printf("\t\t%d = %d\n",
- i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
+ i - SCHED_PRI_NHALF, tdq->ksq_nice[i]);
}
static __inline void
-kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags)
+tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
{
#ifdef SMP
- if (KSE_CAN_MIGRATE(ke)) {
- kseq->ksq_transferable++;
- kseq->ksq_group->ksg_transferable++;
- ke->ke_flags |= KEF_XFERABLE;
+ if (THREAD_CAN_MIGRATE(ts)) {
+ tdq->ksq_transferable++;
+ tdq->ksq_group->ksg_transferable++;
+ ts->ts_flags |= TSF_XFERABLE;
}
#endif
- if (ke->ke_flags & KEF_PREEMPTED)
+ if (ts->ts_flags & TSF_PREEMPTED)
flags |= SRQ_PREEMPTED;
- runq_add(ke->ke_runq, ke, flags);
+ runq_add(ts->ts_runq, ts, flags);
}
static __inline void
-kseq_runq_rem(struct kseq *kseq, struct kse *ke)
+tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
{
#ifdef SMP
- if (ke->ke_flags & KEF_XFERABLE) {
- kseq->ksq_transferable--;
- kseq->ksq_group->ksg_transferable--;
- ke->ke_flags &= ~KEF_XFERABLE;
+ if (ts->ts_flags & TSF_XFERABLE) {
+ tdq->ksq_transferable--;
+ tdq->ksq_group->ksg_transferable--;
+ ts->ts_flags &= ~TSF_XFERABLE;
}
#endif
- runq_remove(ke->ke_runq, ke);
+ runq_remove(ts->ts_runq, ts);
}
static void
-kseq_load_add(struct kseq *kseq, struct kse *ke)
+tdq_load_add(struct tdq *tdq, struct td_sched *ts)
{
int class;
mtx_assert(&sched_lock, MA_OWNED);
- class = PRI_BASE(ke->ke_thread->td_pri_class);
+ class = PRI_BASE(ts->ts_thread->td_pri_class);
if (class == PRI_TIMESHARE)
- kseq->ksq_load_timeshare++;
- kseq->ksq_load++;
- CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
- if (class != PRI_ITHD && (ke->ke_thread->td_proc->p_flag & P_NOLOAD) == 0)
+ tdq->ksq_load_timeshare++;
+ tdq->ksq_load++;
+ CTR1(KTR_SCHED, "load: %d", tdq->ksq_load);
+ if (class != PRI_ITHD && (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
#ifdef SMP
- kseq->ksq_group->ksg_load++;
+ tdq->ksq_group->ksg_load++;
#else
- kseq->ksq_sysload++;
+ tdq->ksq_sysload++;
#endif
- if (ke->ke_thread->td_pri_class == PRI_TIMESHARE)
- kseq_nice_add(kseq, ke->ke_thread->td_proc->p_nice);
+ if (ts->ts_thread->td_pri_class == PRI_TIMESHARE)
+ tdq_nice_add(tdq, ts->ts_thread->td_proc->p_nice);
}
static void
-kseq_load_rem(struct kseq *kseq, struct kse *ke)
+tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
{
int class;
mtx_assert(&sched_lock, MA_OWNED);
- class = PRI_BASE(ke->ke_thread->td_pri_class);
+ class = PRI_BASE(ts->ts_thread->td_pri_class);
if (class == PRI_TIMESHARE)
- kseq->ksq_load_timeshare--;
- if (class != PRI_ITHD && (ke->ke_thread->td_proc->p_flag & P_NOLOAD) == 0)
+ tdq->ksq_load_timeshare--;
+ if (class != PRI_ITHD && (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
#ifdef SMP
- kseq->ksq_group->ksg_load--;
+ tdq->ksq_group->ksg_load--;
#else
- kseq->ksq_sysload--;
+ tdq->ksq_sysload--;
#endif
- kseq->ksq_load--;
- CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
- ke->ke_runq = NULL;
- if (ke->ke_thread->td_pri_class == PRI_TIMESHARE)
- kseq_nice_rem(kseq, ke->ke_thread->td_proc->p_nice);
+ tdq->ksq_load--;
+ CTR1(KTR_SCHED, "load: %d", tdq->ksq_load);
+ ts->ts_runq = NULL;
+ if (ts->ts_thread->td_pri_class == PRI_TIMESHARE)
+ tdq_nice_rem(tdq, ts->ts_thread->td_proc->p_nice);
}
static void
-kseq_nice_add(struct kseq *kseq, int nice)
+tdq_nice_add(struct tdq *tdq, int nice)
{
mtx_assert(&sched_lock, MA_OWNED);
/* Normalize to zero. */
- kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
- if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
- kseq->ksq_nicemin = nice;
+ tdq->ksq_nice[nice + SCHED_PRI_NHALF]++;
+ if (nice < tdq->ksq_nicemin || tdq->ksq_load_timeshare == 1)
+ tdq->ksq_nicemin = nice;
}
static void
-kseq_nice_rem(struct kseq *kseq, int nice)
+tdq_nice_rem(struct tdq *tdq, int nice)
{
int n;
mtx_assert(&sched_lock, MA_OWNED);
/* Normalize to zero. */
n = nice + SCHED_PRI_NHALF;
- kseq->ksq_nice[n]--;
- KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
+ tdq->ksq_nice[n]--;
+ KASSERT(tdq->ksq_nice[n] >= 0, ("Negative nice count."));
/*
* If this wasn't the smallest nice value or there are more in
* this bucket we can just return. Otherwise we have to recalculate
* the smallest nice.
*/
- if (nice != kseq->ksq_nicemin ||
- kseq->ksq_nice[n] != 0 ||
- kseq->ksq_load_timeshare == 0)
+ if (nice != tdq->ksq_nicemin ||
+ tdq->ksq_nice[n] != 0 ||
+ tdq->ksq_load_timeshare == 0)
return;
for (; n < SCHED_PRI_NRESV; n++)
- if (kseq->ksq_nice[n]) {
- kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
+ if (tdq->ksq_nice[n]) {
+ tdq->ksq_nicemin = n - SCHED_PRI_NHALF;
return;
}
}
@@ -454,9 +451,9 @@ kseq_nice_rem(struct kseq *kseq, int nice)
static void
sched_balance(void)
{
- struct kseq_group *high;
- struct kseq_group *low;
- struct kseq_group *ksg;
+ struct tdq_group *high;
+ struct tdq_group *low;
+ struct tdq_group *ksg;
int cnt;
int i;
@@ -466,7 +463,7 @@ sched_balance(void)
low = high = NULL;
i = random() % (ksg_maxid + 1);
for (cnt = 0; cnt <= ksg_maxid; cnt++) {
- ksg = KSEQ_GROUP(i);
+ ksg = TDQ_GROUP(i);
/*
* Find the CPU with the highest load that has some
* threads to transfer.
@@ -493,34 +490,34 @@ sched_balance_groups(void)
mtx_assert(&sched_lock, MA_OWNED);
if (smp_started)
for (i = 0; i <= ksg_maxid; i++)
- sched_balance_group(KSEQ_GROUP(i));
+ sched_balance_group(TDQ_GROUP(i));
}
static void
-sched_balance_group(struct kseq_group *ksg)
+sched_balance_group(struct tdq_group *ksg)
{
- struct kseq *kseq;
- struct kseq *high;
- struct kseq *low;
+ struct tdq *tdq;
+ struct tdq *high;
+ struct tdq *low;
int load;
if (ksg->ksg_transferable == 0)
return;
low = NULL;
high = NULL;
- LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
- load = kseq->ksq_load;
+ LIST_FOREACH(tdq, &ksg->ksg_members, ksq_siblings) {
+ load = tdq->ksq_load;
if (high == NULL || load > high->ksq_load)
- high = kseq;
+ high = tdq;
if (low == NULL || load < low->ksq_load)
- low = kseq;
+ low = tdq;
}
if (high != NULL && low != NULL && high != low)
sched_balance_pair(high, low);
}
static void
-sched_balance_pair(struct kseq *high, struct kseq *low)
+sched_balance_pair(struct tdq *high, struct tdq *low)
{
int transferable;
int high_load;
@@ -531,7 +528,7 @@ sched_balance_pair(struct kseq *high, struct kseq *low)
/*
* If we're transfering within a group we have to use this specific
- * kseq's transferable count, otherwise we can steal from other members
+ * tdq's transferable count, otherwise we can steal from other members
* of the group.
*/
if (high->ksq_group == low->ksq_group) {
@@ -555,135 +552,135 @@ sched_balance_pair(struct kseq *high, struct kseq *low)
move++;
move = min(move, transferable);
for (i = 0; i < move; i++)
- kseq_move(high, KSEQ_ID(low));
+ tdq_move(high, TDQ_ID(low));
return;
}
static void
-kseq_move(struct kseq *from, int cpu)
+tdq_move(struct tdq *from, int cpu)
{
- struct kseq *kseq;
- struct kseq *to;
- struct kse *ke;
-
- kseq = from;
- to = KSEQ_CPU(cpu);
- ke = kseq_steal(kseq, 1);
- if (ke == NULL) {
- struct kseq_group *ksg;
-
- ksg = kseq->ksq_group;
- LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
- if (kseq == from || kseq->ksq_transferable == 0)
+ struct tdq *tdq;
+ struct tdq *to;
+ struct td_sched *ts;
+
+ tdq = from;
+ to = TDQ_CPU(cpu);
+ ts = tdq_steal(tdq, 1);
+ if (ts == NULL) {
+ struct tdq_group *ksg;
+
+ ksg = tdq->ksq_group;
+ LIST_FOREACH(tdq, &ksg->ksg_members, ksq_siblings) {
+ if (tdq == from || tdq->ksq_transferable == 0)
continue;
- ke = kseq_steal(kseq, 1);
+ ts = tdq_steal(tdq, 1);
break;
}
- if (ke == NULL)
- panic("kseq_move: No KSEs available with a "
+ if (ts == NULL)
+ panic("tdq_move: No threads available with a "
"transferable count of %d\n",
ksg->ksg_transferable);
}
- if (kseq == to)
+ if (tdq == to)
return;
- ke->ke_state = KES_THREAD;
- kseq_runq_rem(kseq, ke);
- kseq_load_rem(kseq, ke);
- kseq_notify(ke, cpu);
+ ts->ts_state = TSS_THREAD;
+ tdq_runq_rem(tdq, ts);
+ tdq_load_rem(tdq, ts);
+ tdq_notify(ts, cpu);
}
static int
-kseq_idled(struct kseq *kseq)
+tdq_idled(struct tdq *tdq)
{
- struct kseq_group *ksg;
- struct kseq *steal;
- struct kse *ke;
+ struct tdq_group *ksg;
+ struct tdq *steal;
+ struct td_sched *ts;
- ksg = kseq->ksq_group;
+ ksg = tdq->ksq_group;
/*
* If we're in a cpu group, try and steal kses from another cpu in
* the group before idling.
*/
if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
- if (steal == kseq || steal->ksq_transferable == 0)
+ if (steal == tdq || steal->ksq_transferable == 0)
continue;
- ke = kseq_steal(steal, 0);
- if (ke == NULL)
+ ts = tdq_steal(steal, 0);
+ if (ts == NULL)
continue;
- ke->ke_state = KES_THREAD;
- kseq_runq_rem(steal, ke);
- kseq_load_rem(steal, ke);
- ke->ke_cpu = PCPU_GET(cpuid);
- ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
- sched_add(ke->ke_thread, SRQ_YIELDING);
+ ts->ts_state = TSS_THREAD;
+ tdq_runq_rem(steal, ts);
+ tdq_load_rem(steal, ts);
+ ts->ts_cpu = PCPU_GET(cpuid);
+ ts->ts_flags |= TSF_INTERNAL | TSF_HOLD;
+ sched_add(ts->ts_thread, SRQ_YIELDING);
return (0);
}
}
/*
* We only set the idled bit when all of the cpus in the group are
- * idle. Otherwise we could get into a situation where a KSE bounces
+ * idle. Otherwise we could get into a situation where a thread bounces
* back and forth between two idle cores on seperate physical CPUs.
*/
ksg->ksg_idlemask |= PCPU_GET(cpumask);
if (ksg->ksg_idlemask != ksg->ksg_cpumask)
return (1);
- atomic_set_int(&kseq_idle, ksg->ksg_mask);
+ atomic_set_int(&tdq_idle, ksg->ksg_mask);
return (1);
}
static void
-kseq_assign(struct kseq *kseq)
+tdq_assign(struct tdq *tdq)
{
- struct kse *nke;
- struct kse *ke;
+ struct td_sched *nts;
+ struct td_sched *ts;
do {
- *(volatile struct kse **)&ke = kseq->ksq_assigned;
- } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
- (uintptr_t)ke, (uintptr_t)NULL));
- for (; ke != NULL; ke = nke) {
- nke = ke->ke_assign;
- kseq->ksq_group->ksg_load--;
- kseq->ksq_load--;
- ke->ke_flags &= ~KEF_ASSIGNED;
- if (ke->ke_flags & KEF_REMOVED) {
- ke->ke_flags &= ~KEF_REMOVED;
+ *(volatile struct td_sched **)&ts = tdq->ksq_assigned;
+ } while(!atomic_cmpset_ptr((volatile uintptr_t *)&tdq->ksq_assigned,
+ (uintptr_t)ts, (uintptr_t)NULL));
+ for (; ts != NULL; ts = nts) {
+ nts = ts->ts_assign;
+ tdq->ksq_group->ksg_load--;
+ tdq->ksq_load--;
+ ts->ts_flags &= ~TSF_ASSIGNED;
+ if (ts->ts_flags & TSF_REMOVED) {
+ ts->ts_flags &= ~TSF_REMOVED;
continue;
}
- ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
- sched_add(ke->ke_thread, SRQ_YIELDING);
+ ts->ts_flags |= TSF_INTERNAL | TSF_HOLD;
+ sched_add(ts->ts_thread, SRQ_YIELDING);
}
}
static void
-kseq_notify(struct kse *ke, int cpu)
+tdq_notify(struct td_sched *ts, int cpu)
{
- struct kseq *kseq;
+ struct tdq *tdq;
struct thread *td;
struct pcpu *pcpu;
int class;
int prio;
- kseq = KSEQ_CPU(cpu);
+ tdq = TDQ_CPU(cpu);
/* XXX */
- class = PRI_BASE(ke->ke_thread->td_pri_class);
+ class = PRI_BASE(ts->ts_thread->td_pri_class);
if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
- (kseq_idle & kseq->ksq_group->ksg_mask))
- atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
- kseq->ksq_group->ksg_load++;
- kseq->ksq_load++;
- ke->ke_cpu = cpu;
- ke->ke_flags |= KEF_ASSIGNED;
- prio = ke->ke_thread->td_priority;
+ (tdq_idle & tdq->ksq_group->ksg_mask))
+ atomic_clear_int(&tdq_idle, tdq->ksq_group->ksg_mask);
+ tdq->ksq_group->ksg_load++;
+ tdq->ksq_load++;
+ ts->ts_cpu = cpu;
+ ts->ts_flags |= TSF_ASSIGNED;
+ prio = ts->ts_thread->td_priority;
/*
- * Place a KSE on another cpu's queue and force a resched.
+ * Place a thread on another cpu's queue and force a resched.
*/
do {
- *(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
- } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
- (uintptr_t)ke->ke_assign, (uintptr_t)ke));
+ *(volatile struct td_sched **)&ts->ts_assign = tdq->ksq_assigned;
+ } while(!atomic_cmpset_ptr((volatile uintptr_t *)&tdq->ksq_assigned,
+ (uintptr_t)ts->ts_assign, (uintptr_t)ts));
/*
* Without sched_lock we could lose a race where we set NEEDRESCHED
* on a thread that is switched out before the IPI is delivered. This
@@ -692,19 +689,19 @@ kseq_notify(struct kse *ke, int cpu)
*/
pcpu = pcpu_find(cpu);
td = pcpu->pc_curthread;
- if (ke->ke_thread->td_priority < td->td_priority ||
+ if (ts->ts_thread->td_priority < td->td_priority ||
td == pcpu->pc_idlethread) {
td->td_flags |= TDF_NEEDRESCHED;
ipi_selected(1 << cpu, IPI_AST);
}
}
-static struct kse *
+static struct td_sched *
runq_steal(struct runq *rq)
{
struct rqhead *rqh;
struct rqbits *rqb;
- struct kse *ke;
+ struct td_sched *ts;
int word;
int bit;
@@ -717,39 +714,39 @@ runq_steal(struct runq *rq)
if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
continue;
rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
- TAILQ_FOREACH(ke, rqh, ke_procq) {
- if (KSE_CAN_MIGRATE(ke))
- return (ke);
+ TAILQ_FOREACH(ts, rqh, ts_procq) {
+ if (THREAD_CAN_MIGRATE(ts))
+ return (ts);
}
}
}
return (NULL);
}
-static struct kse *
-kseq_steal(struct kseq *kseq, int stealidle)
+static struct td_sched *
+tdq_steal(struct tdq *tdq, int stealidle)
{
- struct kse *ke;
+ struct td_sched *ts;
/*
* Steal from next first to try to get a non-interactive task that
* may not have run for a while.
*/
- if ((ke = runq_steal(kseq->ksq_next)) != NULL)
- return (ke);
- if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
- return (ke);
+ if ((ts = runq_steal(tdq->ksq_next)) != NULL)
+ return (ts);
+ if ((ts = runq_steal(tdq->ksq_curr)) != NULL)
+ return (ts);
if (stealidle)
- return (runq_steal(&kseq->ksq_idle));
+ return (runq_steal(&tdq->ksq_idle));
return (NULL);
}
int
-kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
+tdq_transfer(struct tdq *tdq, struct td_sched *ts, int class)
{
- struct kseq_group *nksg;
- struct kseq_group *ksg;
- struct kseq *old;
+ struct tdq_group *nksg;
+ struct tdq_group *ksg;
+ struct tdq *old;
int cpu;
int idx;
@@ -767,16 +764,16 @@ kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
* some CPUs may idle. Too low and there will be excess migration
* and context switches.
*/
- old = KSEQ_CPU(ke->ke_cpu);
+ old = TDQ_CPU(ts->ts_cpu);
nksg = old->ksq_group;
- ksg = kseq->ksq_group;
- if (kseq_idle) {
- if (kseq_idle & nksg->ksg_mask) {
+ ksg = tdq->ksq_group;
+ if (tdq_idle) {
+ if (tdq_idle & nksg->ksg_mask) {
cpu = ffs(nksg->ksg_idlemask);
if (cpu) {
CTR2(KTR_SCHED,
- "kseq_transfer: %p found old cpu %X "
- "in idlemask.", ke, cpu);
+ "tdq_transfer: %p found old cpu %X "
+ "in idlemask.", ts, cpu);
goto migrate;
}
}
@@ -784,30 +781,30 @@ kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
* Multiple cpus could find this bit simultaneously
* but the race shouldn't be terrible.
*/
- cpu = ffs(kseq_idle);
+ cpu = ffs(tdq_idle);
if (cpu) {
- CTR2(KTR_SCHED, "kseq_transfer: %p found %X "
- "in idlemask.", ke, cpu);
+ CTR2(KTR_SCHED, "tdq_transfer: %p found %X "
+ "in idlemask.", ts, cpu);
goto migrate;
}
}
idx = 0;
#if 0
- if (old->ksq_load < kseq->ksq_load) {
- cpu = ke->ke_cpu + 1;
- CTR2(KTR_SCHED, "kseq_transfer: %p old cpu %X "
- "load less than ours.", ke, cpu);
+ if (old->ksq_load < tdq->ksq_load) {
+ cpu = ts->ts_cpu + 1;
+ CTR2(KTR_SCHED, "tdq_transfer: %p old cpu %X "
+ "load less than ours.", ts, cpu);
goto migrate;
}
/*
* No new CPU was found, look for one with less load.
*/
for (idx = 0; idx <= ksg_maxid; idx++) {
- nksg = KSEQ_GROUP(idx);
+ nksg = TDQ_GROUP(idx);
if (nksg->ksg_load /*+ (nksg->ksg_cpus * 2)*/ < ksg->ksg_load) {
cpu = ffs(nksg->ksg_cpumask);
- CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X load less "
- "than ours.", ke, cpu);
+ CTR2(KTR_SCHED, "tdq_transfer: %p cpu %X load less "
+ "than ours.", ts, cpu);
goto migrate;
}
}
@@ -819,8 +816,8 @@ kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
if (ksg->ksg_idlemask) {
cpu = ffs(ksg->ksg_idlemask);
if (cpu) {
- CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X idle in "
- "group.", ke, cpu);
+ CTR2(KTR_SCHED, "tdq_transfer: %p cpu %X idle in "
+ "group.", ts, cpu);
goto migrate;
}
}
@@ -830,8 +827,8 @@ migrate:
* Now that we've found an idle CPU, migrate the thread.
*/
cpu--;
- ke->ke_runq = NULL;
- kseq_notify(ke, cpu);
+ ts->ts_runq = NULL;
+ tdq_notify(ts, cpu);
return (1);
}
@@ -842,61 +839,61 @@ migrate:
* Pick the highest priority task we have and return it.
*/
-static struct kse *
-kseq_choose(struct kseq *kseq)
+static struct td_sched *
+tdq_choose(struct tdq *tdq)
{
struct runq *swap;
- struct kse *ke;
+ struct td_sched *ts;
int nice;
mtx_assert(&sched_lock, MA_OWNED);
swap = NULL;
for (;;) {
- ke = runq_choose(kseq->ksq_curr);
- if (ke == NULL) {
+ ts = runq_choose(tdq->ksq_curr);
+ if (ts == NULL) {
/*
* We already swapped once and didn't get anywhere.
*/
if (swap)
break;
- swap = kseq->ksq_curr;
- kseq->ksq_curr = kseq->ksq_next;
- kseq->ksq_next = swap;
+ swap = tdq->ksq_curr;
+ tdq->ksq_curr = tdq->ksq_next;
+ tdq->ksq_next = swap;
continue;
}
/*
- * If we encounter a slice of 0 the kse is in a
- * TIMESHARE kse group and its nice was too far out
+ * If we encounter a slice of 0 the td_sched is in a
+ * TIMESHARE td_sched group and its nice was too far out
* of the range that receives slices.
*/
- nice = ke->ke_thread->td_proc->p_nice + (0 - kseq->ksq_nicemin);
+ nice = ts->ts_thread->td_proc->p_nice + (0 - tdq->ksq_nicemin);
#if 0
- if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH &&
- ke->ke_thread->td_proc->p_nice != 0)) {
- runq_remove(ke->ke_runq, ke);
- sched_slice(ke);
- ke->ke_runq = kseq->ksq_next;
- runq_add(ke->ke_runq, ke, 0);
+ if (ts->ts_slice == 0 || (nice > SCHED_SLICE_NTHRESH &&
+ ts->ts_thread->td_proc->p_nice != 0)) {
+ runq_remove(ts->ts_runq, ts);
+ sched_slice(ts);
+ ts->ts_runq = tdq->ksq_next;
+ runq_add(ts->ts_runq, ts, 0);
continue;
}
#endif
- return (ke);
+ return (ts);
}
- return (runq_choose(&kseq->ksq_idle));
+ return (runq_choose(&tdq->ksq_idle));
}
static void
-kseq_setup(struct kseq *kseq)
+tdq_setup(struct tdq *tdq)
{
- runq_init(&kseq->ksq_timeshare[0]);
- runq_init(&kseq->ksq_timeshare[1]);
- runq_init(&kseq->ksq_idle);
- kseq->ksq_curr = &kseq->ksq_timeshare[0];
- kseq->ksq_next = &kseq->ksq_timeshare[1];
- kseq->ksq_load = 0;
- kseq->ksq_load_timeshare = 0;
+ runq_init(&tdq->ksq_timeshare[0]);
+ runq_init(&tdq->ksq_timeshare[1]);
+ runq_init(&tdq->ksq_idle);
+ tdq->ksq_curr = &tdq->ksq_timeshare[0];
+ tdq->ksq_next = &tdq->ksq_timeshare[1];
+ tdq->ksq_load = 0;
+ tdq->ksq_load_timeshare = 0;
}
static void
@@ -917,27 +914,27 @@ sched_setup(void *dummy)
#ifdef SMP
balance_groups = 0;
/*
- * Initialize the kseqs.
+ * Initialize the tdqs.
*/
for (i = 0; i < MAXCPU; i++) {
- struct kseq *ksq;
+ struct tdq *ksq;
- ksq = &kseq_cpu[i];
+ ksq = &tdq_cpu[i];
ksq->ksq_assigned = NULL;
- kseq_setup(&kseq_cpu[i]);
+ tdq_setup(&tdq_cpu[i]);
}
if (smp_topology == NULL) {
- struct kseq_group *ksg;
- struct kseq *ksq;
+ struct tdq_group *ksg;
+ struct tdq *ksq;
int cpus;
for (cpus = 0, i = 0; i < MAXCPU; i++) {
if (CPU_ABSENT(i))
continue;
- ksq = &kseq_cpu[i];
- ksg = &kseq_groups[cpus];
+ ksq = &tdq_cpu[i];
+ ksg = &tdq_groups[cpus];
/*
- * Setup a kseq group with one member.
+ * Setup a tdq group with one member.
*/
ksq->ksq_transferable = 0;
ksq->ksq_group = ksg;
@@ -952,13 +949,13 @@ sched_setup(void *dummy)
}
ksg_maxid = cpus - 1;
} else {
- struct kseq_group *ksg;
+ struct tdq_group *ksg;
struct cpu_group *cg;
int j;
for (i = 0; i < smp_topology->ct_count; i++) {
cg = &smp_topology->ct_group[i];
- ksg = &kseq_groups[i];
+ ksg = &tdq_groups[i];
/*
* Initialize the group.
*/
@@ -975,10 +972,10 @@ sched_setup(void *dummy)
if ((cg->cg_mask & (1 << j)) != 0) {
if (ksg->ksg_mask == 0)
ksg->ksg_mask = 1 << j;
- kseq_cpu[j].ksq_transferable = 0;
- kseq_cpu[j].ksq_group = ksg;
+ tdq_cpu[j].ksq_transferable = 0;
+ tdq_cpu[j].ksq_group = ksg;
LIST_INSERT_HEAD(&ksg->ksg_members,
- &kseq_cpu[j], ksq_siblings);
+ &tdq_cpu[j], ksq_siblings);
}
}
if (ksg->ksg_cpus > 1)
@@ -994,10 +991,10 @@ sched_setup(void *dummy)
if (balance_groups)
gbal_tick = ticks + (hz / 2);
#else
- kseq_setup(KSEQ_SELF());
+ tdq_setup(TDQ_SELF());
#endif
mtx_lock_spin(&sched_lock);
- kseq_load_add(KSEQ_SELF(), &kse0);
+ tdq_load_add(TDQ_SELF(), &td_sched0);
mtx_unlock_spin(&sched_lock);
}
@@ -1042,71 +1039,67 @@ sched_priority(struct thread *td)
else if (pri < PRI_MIN_TIMESHARE)
pri = PRI_MIN_TIMESHARE;
-#ifdef KSE
- sched_user_prio(kg, pri);
-#else
sched_user_prio(td, pri);
-#endif
return;
}
/*
- * Calculate a time slice based on the properties of the kseg and the runq
- * that we're on. This is only for PRI_TIMESHARE threads.
+ * Calculate a time slice based on the properties of the process
+ * and the runq that we're on. This is only for PRI_TIMESHARE threads.
*/
static void
-sched_slice(struct kse *ke)
+sched_slice(struct td_sched *ts)
{
- struct kseq *kseq;
+ struct tdq *tdq;
struct thread *td;
- td = ke->ke_thread;
- kseq = KSEQ_CPU(ke->ke_cpu);
+ td = ts->ts_thread;
+ tdq = TDQ_CPU(ts->ts_cpu);
if (td->td_flags & TDF_BORROWING) {
- ke->ke_slice = SCHED_SLICE_MIN;
+ ts->ts_slice = SCHED_SLICE_MIN;
return;
}
/*
* Rationale:
- * KSEs in interactive ksegs get a minimal slice so that we
+ * Threads in interactive procs get a minimal slice so that we
* quickly notice if it abuses its advantage.
*
- * KSEs in non-interactive ksegs are assigned a slice that is
- * based on the ksegs nice value relative to the least nice kseg
+ * Threads in non-interactive procs are assigned a slice that is
+ * based on the procs nice value relative to the least nice procs
* on the run queue for this cpu.
*
- * If the KSE is less nice than all others it gets the maximum
- * slice and other KSEs will adjust their slice relative to
+ * If the thread is less nice than all others it gets the maximum
+ * slice and other threads will adjust their slice relative to
* this when they first expire.
*
* There is 20 point window that starts relative to the least
- * nice kse on the run queue. Slice size is determined by
- * the kse distance from the last nice thread.
+ * nice td_sched on the run queue. Slice size is determined by
+ * the td_sched distance from the last nice thread.
*
- * If the kse is outside of the window it will get no slice
+ * If the td_sched is outside of the window it will get no slice
* and will be reevaluated each time it is selected on the
- * run queue. The exception to this is nice 0 ksegs when
+ * run queue. The exception to this is nice 0 procs when
* a nice -20 is running. They are always granted a minimum
* slice.
*/
if (!SCHED_INTERACTIVE(td)) {
int nice;
- nice = td->td_proc->p_nice + (0 - kseq->ksq_nicemin);
- if (kseq->ksq_load_timeshare == 0 ||
- td->td_proc->p_nice < kseq->ksq_nicemin)
- ke->ke_slice = SCHED_SLICE_MAX;
+ nice = td->td_proc->p_nice + (0 - tdq->ksq_nicemin);
+ if (tdq->ksq_load_timeshare == 0 ||
+ td->td_proc->p_nice < tdq->ksq_nicemin)
+ ts->ts_slice = SCHED_SLICE_MAX;
else if (nice <= SCHED_SLICE_NTHRESH)
- ke->ke_slice = SCHED_SLICE_NICE(nice);
+ ts->ts_slice = SCHED_SLICE_NICE(nice);
else if (td->td_proc->p_nice == 0)
- ke->ke_slice = SCHED_SLICE_MIN;
+ ts->ts_slice = SCHED_SLICE_MIN;
else
- ke->ke_slice = SCHED_SLICE_MIN; /* 0 */
+ ts->ts_slice = SCHED_SLICE_MIN; /* 0 */
} else
- ke->ke_slice = SCHED_SLICE_INTERACTIVE;
+ ts->ts_slice = SCHED_SLICE_INTERACTIVE;
return;
}
@@ -1187,9 +1180,9 @@ schedinit(void)
* Set up the scheduler specific parts of proc0.
*/
proc0.p_sched = NULL; /* XXX */
- thread0.td_sched = &kse0;
- kse0.ke_thread = &thread0;
- kse0.ke_state = KES_THREAD;
+ thread0.td_sched = &td_sched0;
+ td_sched0.ts_thread = &thread0;
+ td_sched0.ts_state = TSS_THREAD;
}
/*
@@ -1204,35 +1197,35 @@ sched_rr_interval(void)
}
static void
-sched_pctcpu_update(struct kse *ke)
+sched_pctcpu_update(struct td_sched *ts)
{
/*
* Adjust counters and watermark for pctcpu calc.
*/
- if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
+ if (ts->ts_ltick > ticks - SCHED_CPU_TICKS) {
/*
* Shift the tick count out so that the divide doesn't
* round away our results.
*/
- ke->ke_ticks <<= 10;
- ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
+ ts->ts_ticks <<= 10;
+ ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
SCHED_CPU_TICKS;
- ke->ke_ticks >>= 10;
+ ts->ts_ticks >>= 10;
} else
- ke->ke_ticks = 0;
- ke->ke_ltick = ticks;
- ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
+ ts->ts_ticks = 0;
+ ts->ts_ltick = ticks;
+ ts->ts_ftick = ts->ts_ltick - SCHED_CPU_TICKS;
}
void
sched_thread_priority(struct thread *td, u_char prio)
{
- struct kse *ke;
+ struct td_sched *ts;
CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, prio, curthread,
curthread->td_proc->p_comm);
- ke = td->td_kse;
+ ts = td->td_sched;
mtx_assert(&sched_lock, MA_OWNED);
if (td->td_priority == prio)
return;
@@ -1243,21 +1236,21 @@ sched_thread_priority(struct thread *td, u_char prio)
* queue. We still call adjustrunqueue below in case kse
* needs to fix things up.
*/
- if (prio < td->td_priority && ke->ke_runq != NULL &&
- (ke->ke_flags & KEF_ASSIGNED) == 0 &&
- ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
- runq_remove(ke->ke_runq, ke);
- ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
- runq_add(ke->ke_runq, ke, 0);
+ if (prio < td->td_priority && ts->ts_runq != NULL &&
+ (ts->ts_flags & TSF_ASSIGNED) == 0 &&
+ ts->ts_runq != TDQ_CPU(ts->ts_cpu)->ksq_curr) {
+ runq_remove(ts->ts_runq, ts);
+ ts->ts_runq = TDQ_CPU(ts->ts_cpu)->ksq_curr;
+ runq_add(ts->ts_runq, ts, 0);
}
/*
- * Hold this kse on this cpu so that sched_prio() doesn't
+ * Hold this td_sched on this cpu so that sched_prio() doesn't
* cause excessive migration. We only want migration to
* happen as the result of a wakeup.
*/
- ke->ke_flags |= KEF_HOLD;
+ ts->ts_flags |= TSF_HOLD;
adjustrunqueue(td, prio);
- ke->ke_flags &= ~KEF_HOLD;
+ ts->ts_flags &= ~TSF_HOLD;
} else
td->td_priority = prio;
}
@@ -1327,39 +1320,14 @@ sched_prio(struct thread *td, u_char prio)
}
void
-#ifdef KSE
-sched_user_prio(struct ksegrp *kg, u_char prio)
-#else
sched_user_prio(struct thread *td, u_char prio)
-#endif
{
-#ifdef KSE
- struct thread *td;
-#endif
u_char oldprio;
-#ifdef KSE
- kg->kg_base_user_pri = prio;
-
- /* XXXKSE only for 1:1 */
-
- td = TAILQ_FIRST(&kg->kg_threads);
- if (td == NULL) {
- kg->kg_user_pri = prio;
- return;
- }
-
- if (td->td_flags & TDF_UBORROWING && kg->kg_user_pri <= prio)
- return;
-
- oldprio = kg->kg_user_pri;
- kg->kg_user_pri = prio;
-#else
td->td_base_user_pri = prio;
oldprio = td->td_user_pri;
td->td_user_pri = prio;
-#endif
if (TD_ON_UPILOCK(td) && oldprio != prio)
umtx_pi_adjust(td, oldprio);
@@ -1372,13 +1340,8 @@ sched_lend_user_prio(struct thread *td, u_char prio)
td->td_flags |= TDF_UBORROWING;
-#ifdef KSE
- oldprio = td->td_ksegrp->kg_user_pri;
- td->td_ksegrp->kg_user_pri = prio;
-#else
oldprio = td->td_user_pri;
td->td_user_pri = prio;
-#endif
if (TD_ON_UPILOCK(td) && oldprio != prio)
umtx_pi_adjust(td, oldprio);
@@ -1387,23 +1350,12 @@ sched_lend_user_prio(struct thread *td, u_char prio)
void
sched_unlend_user_prio(struct thread *td, u_char prio)
{
-#ifdef KSE
- struct ksegrp *kg = td->td_ksegrp;
-#endif
u_char base_pri;
-#ifdef KSE
- base_pri = kg->kg_base_user_pri;
-#else
base_pri = td->td_base_user_pri;
-#endif
if (prio >= base_pri) {
td->td_flags &= ~TDF_UBORROWING;
-#ifdef KSE
- sched_user_prio(kg, base_pri);
-#else
sched_user_prio(td, base_pri);
-#endif
} else
sched_lend_user_prio(td, prio);
}
@@ -1411,13 +1363,13 @@ sched_unlend_user_prio(struct thread *td, u_char prio)
void
sched_switch(struct thread *td, struct thread *newtd, int flags)
{
- struct kseq *ksq;
- struct kse *ke;
+ struct tdq *ksq;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
- ksq = KSEQ_SELF();
+ ts = td->td_sched;
+ ksq = TDQ_SELF();
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
@@ -1425,24 +1377,24 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
td->td_owepreempt = 0;
/*
- * If the KSE has been assigned it may be in the process of switching
+ * If the thread has been assigned it may be in the process of switching
* to the new cpu. This is the case in sched_bind().
*/
if (td == PCPU_GET(idlethread)) {
TD_SET_CAN_RUN(td);
- } else if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
+ } else if ((ts->ts_flags & TSF_ASSIGNED) == 0) {
/* We are ending our run so make our slot available again */
- kseq_load_rem(ksq, ke);
+ tdq_load_rem(ksq, ts);
if (TD_IS_RUNNING(td)) {
/*
* Don't allow the thread to migrate
* from a preemption.
*/
- ke->ke_flags |= KEF_HOLD;
+ ts->ts_flags |= TSF_HOLD;
setrunqueue(td, (flags & SW_PREEMPT) ?
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
SRQ_OURSELF|SRQ_YIELDING);
- ke->ke_flags &= ~KEF_HOLD;
+ ts->ts_flags &= ~TSF_HOLD;
}
}
if (newtd != NULL) {
@@ -1450,10 +1402,10 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
* If we bring in a thread account for it as if it had been
* added to the run queue and then chosen.
*/
- newtd->td_kse->ke_flags |= KEF_DIDRUN;
- newtd->td_kse->ke_runq = ksq->ksq_curr;
+ newtd->td_sched->ts_flags |= TSF_DIDRUN;
+ newtd->td_sched->ts_runq = ksq->ksq_curr;
TD_SET_RUNNING(newtd);
- kseq_load_add(KSEQ_SELF(), newtd->td_kse);
+ tdq_load_add(TDQ_SELF(), newtd->td_sched);
} else
newtd = choosethread();
if (td != newtd) {
@@ -1477,23 +1429,23 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
void
sched_nice(struct proc *p, int nice)
{
- struct kse *ke;
+ struct td_sched *ts;
struct thread *td;
- struct kseq *kseq;
+ struct tdq *tdq;
PROC_LOCK_ASSERT(p, MA_OWNED);
mtx_assert(&sched_lock, MA_OWNED);
/*
- * We need to adjust the nice counts for running KSEs.
+ * We need to adjust the nice counts for running threads.
*/
FOREACH_THREAD_IN_PROC(p, td) {
if (td->td_pri_class == PRI_TIMESHARE) {
- ke = td->td_kse;
- if (ke->ke_runq == NULL)
+ ts = td->td_sched;
+ if (ts->ts_runq == NULL)
continue;
- kseq = KSEQ_CPU(ke->ke_cpu);
- kseq_nice_rem(kseq, p->p_nice);
- kseq_nice_add(kseq, nice);
+ tdq = TDQ_CPU(ts->ts_cpu);
+ tdq_nice_rem(tdq, p->p_nice);
+ tdq_nice_add(tdq, nice);
}
}
p->p_nice = nice;
@@ -1508,7 +1460,7 @@ sched_sleep(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
- td->td_kse->ke_slptime = ticks;
+ td->td_sched->ts_slptime = ticks;
}
void
@@ -1517,13 +1469,13 @@ sched_wakeup(struct thread *td)
mtx_assert(&sched_lock, MA_OWNED);
/*
- * Let the kseg know how long we slept for. This is because process
- * interactivity behavior is modeled in the kseg.
+ * Let the procs know how long we slept for. This is because process
+ * interactivity behavior is modeled in the procs.
*/
- if (td->td_kse->ke_slptime) {
+ if (td->td_sched->ts_slptime) {
int hzticks;
- hzticks = (ticks - td->td_kse->ke_slptime) << 10;
+ hzticks = (ticks - td->td_sched->ts_slptime) << 10;
if (hzticks >= SCHED_SLP_RUN_MAX) {
td->td_sched->skg_slptime = SCHED_SLP_RUN_MAX;
td->td_sched->skg_runtime = 1;
@@ -1532,8 +1484,8 @@ sched_wakeup(struct thread *td)
sched_interact_update(td);
}
sched_priority(td);
- sched_slice(td->td_kse);
- td->td_kse->ke_slptime = 0;
+ sched_slice(td->td_sched);
+ td->td_sched->ts_slptime = 0;
}
setrunqueue(td, SRQ_BORING);
}
@@ -1545,10 +1497,15 @@ sched_wakeup(struct thread *td)
void
sched_fork(struct thread *td, struct thread *child)
{
- struct kse *ke;
- struct kse *ke2;
-
mtx_assert(&sched_lock, MA_OWNED);
+ sched_fork_thread(td, child);
+}
+
+void
+sched_fork_thread(struct thread *td, struct thread *child)
+{
+ struct td_sched *ts;
+ struct td_sched *ts2;
child->td_sched->skg_slptime = td->td_sched->skg_slptime;
child->td_sched->skg_runtime = td->td_sched->skg_runtime;
@@ -1560,23 +1517,23 @@ sched_fork(struct thread *td, struct thread *child)
sched_newthread(child);
- ke = td->td_kse;
- ke2 = child->td_kse;
- ke2->ke_slice = 1; /* Attempt to quickly learn interactivity. */
- ke2->ke_cpu = ke->ke_cpu;
- ke2->ke_runq = NULL;
+ ts = td->td_sched;
+ ts2 = child->td_sched;
+ ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */
+ ts2->ts_cpu = ts->ts_cpu;
+ ts2->ts_runq = NULL;
/* Grab our parents cpu estimation information. */
- ke2->ke_ticks = ke->ke_ticks;
- ke2->ke_ltick = ke->ke_ltick;
- ke2->ke_ftick = ke->ke_ftick;
+ ts2->ts_ticks = ts->ts_ticks;
+ ts2->ts_ltick = ts->ts_ltick;
+ ts2->ts_ftick = ts->ts_ftick;
}
void
sched_class(struct thread *td, int class)
{
- struct kseq *kseq;
- struct kse *ke;
+ struct tdq *tdq;
+ struct td_sched *ts;
int nclass;
int oclass;
@@ -1586,36 +1543,36 @@ sched_class(struct thread *td, int class)
nclass = PRI_BASE(class);
oclass = PRI_BASE(td->td_pri_class);
- ke = td->td_kse;
- if ((ke->ke_state != KES_ONRUNQ &&
- ke->ke_state != KES_THREAD) || ke->ke_runq == NULL)
- continue;
- kseq = KSEQ_CPU(ke->ke_cpu);
+ ts = td->td_sched;
+ if (!((ts->ts_state != TSS_ONRUNQ &&
+ ts->ts_state != TSS_THREAD) || ts->ts_runq == NULL)) {
+ tdq = TDQ_CPU(ts->ts_cpu);
#ifdef SMP
- /*
- * On SMP if we're on the RUNQ we must adjust the transferable
- * count because could be changing to or from an interrupt
- * class.
- */
- if (ke->ke_state == KES_ONRUNQ) {
- if (KSE_CAN_MIGRATE(ke)) {
- kseq->ksq_transferable--;
- kseq->ksq_group->ksg_transferable--;
- }
- if (KSE_CAN_MIGRATE(ke)) {
- kseq->ksq_transferable++;
- kseq->ksq_group->ksg_transferable++;
+ /*
+ * On SMP if we're on the RUNQ we must adjust the transferable
+ * count because could be changing to or from an interrupt
+ * class.
+ */
+ if (ts->ts_state == TSS_ONRUNQ) {
+ if (THREAD_CAN_MIGRATE(ts)) {
+ tdq->ksq_transferable--;
+ tdq->ksq_group->ksg_transferable--;
+ }
+ if (THREAD_CAN_MIGRATE(ts)) {
+ tdq->ksq_transferable++;
+ tdq->ksq_group->ksg_transferable++;
+ }
}
- }
#endif
- if (oclass == PRI_TIMESHARE) {
- kseq->ksq_load_timeshare--;
- kseq_nice_rem(kseq, td->td_proc->p_nice);
- }
- if (nclass == PRI_TIMESHARE) {
- kseq->ksq_load_timeshare++;
- kseq_nice_add(kseq, td->td_proc->p_nice);
+ if (oclass == PRI_TIMESHARE) {
+ tdq->ksq_load_timeshare--;
+ tdq_nice_rem(tdq, td->td_proc->p_nice);
+ }
+ if (nclass == PRI_TIMESHARE) {
+ tdq->ksq_load_timeshare++;
+ tdq_nice_add(tdq, td->td_proc->p_nice);
+ }
}
td->td_pri_class = class;
@@ -1637,17 +1594,44 @@ sched_exit(struct proc *p, struct thread *childtd)
parent->td_sched->skg_runtime += childtd->td_sched->skg_runtime;
sched_interact_update(parent);
- kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse);
+ tdq_load_rem(TDQ_CPU(childtd->td_sched->ts_cpu), childtd->td_sched);
+}
+
+void
+sched_exit_thread(struct thread *td, struct thread *childtd)
+{
+}
+
+void
+sched_userret(struct thread *td)
+{
+ /*
+ * XXX we cheat slightly on the locking here to avoid locking in
+ * the usual case. Setting td_priority here is essentially an
+ * incomplete workaround for not setting it properly elsewhere.
+ * Now that some interrupt handlers are threads, not setting it
+ * properly elsewhere can clobber it in the window between setting
+ * it here and returning to user mode, so don't waste time setting
+ * it perfectly here.
+ */
+ KASSERT((td->td_flags & TDF_BORROWING) == 0,
+ ("thread with borrowed priority returning to userland"));
+ if (td->td_priority != td->td_user_pri) {
+ mtx_lock_spin(&sched_lock);
+ td->td_priority = td->td_user_pri;
+ td->td_base_pri = td->td_user_pri;
+ mtx_unlock_spin(&sched_lock);
+ }
}
void
sched_clock(struct thread *td)
{
- struct kseq *kseq;
- struct kse *ke;
+ struct tdq *tdq;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
- kseq = KSEQ_SELF();
+ tdq = TDQ_SELF();
#ifdef SMP
if (ticks >= bal_tick)
sched_balance();
@@ -1657,18 +1641,18 @@ sched_clock(struct thread *td)
* We could have been assigned a non real-time thread without an
* IPI.
*/
- if (kseq->ksq_assigned)
- kseq_assign(kseq); /* Potentially sets NEEDRESCHED */
+ if (tdq->ksq_assigned)
+ tdq_assign(tdq); /* Potentially sets NEEDRESCHED */
#endif
- ke = td->td_kse;
+ ts = td->td_sched;
/* Adjust ticks for pctcpu */
- ke->ke_ticks++;
- ke->ke_ltick = ticks;
+ ts->ts_ticks++;
+ ts->ts_ltick = ticks;
/* Go up to one second beyond our max and then trim back down */
- if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
- sched_pctcpu_update(ke);
+ if (ts->ts_ftick + SCHED_CPU_TICKS + hz < ts->ts_ltick)
+ sched_pctcpu_update(ts);
if (td->td_flags & TDF_IDLETD)
return;
@@ -1687,76 +1671,76 @@ sched_clock(struct thread *td)
/*
* We used up one time slice.
*/
- if (--ke->ke_slice > 0)
+ if (--ts->ts_slice > 0)
return;
/*
* We're out of time, recompute priorities and requeue.
*/
- kseq_load_rem(kseq, ke);
+ tdq_load_rem(tdq, ts);
sched_priority(td);
- sched_slice(ke);
- if (SCHED_CURR(td, ke))
- ke->ke_runq = kseq->ksq_curr;
+ sched_slice(ts);
+ if (SCHED_CURR(td, ts))
+ ts->ts_runq = tdq->ksq_curr;
else
- ke->ke_runq = kseq->ksq_next;
- kseq_load_add(kseq, ke);
+ ts->ts_runq = tdq->ksq_next;
+ tdq_load_add(tdq, ts);
td->td_flags |= TDF_NEEDRESCHED;
}
int
sched_runnable(void)
{
- struct kseq *kseq;
+ struct tdq *tdq;
int load;
load = 1;
- kseq = KSEQ_SELF();
+ tdq = TDQ_SELF();
#ifdef SMP
- if (kseq->ksq_assigned) {
+ if (tdq->ksq_assigned) {
mtx_lock_spin(&sched_lock);
- kseq_assign(kseq);
+ tdq_assign(tdq);
mtx_unlock_spin(&sched_lock);
}
#endif
if ((curthread->td_flags & TDF_IDLETD) != 0) {
- if (kseq->ksq_load > 0)
+ if (tdq->ksq_load > 0)
goto out;
} else
- if (kseq->ksq_load - 1 > 0)
+ if (tdq->ksq_load - 1 > 0)
goto out;
load = 0;
out:
return (load);
}
-struct kse *
+struct td_sched *
sched_choose(void)
{
- struct kseq *kseq;
- struct kse *ke;
+ struct tdq *tdq;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
- kseq = KSEQ_SELF();
+ tdq = TDQ_SELF();
#ifdef SMP
restart:
- if (kseq->ksq_assigned)
- kseq_assign(kseq);
+ if (tdq->ksq_assigned)
+ tdq_assign(tdq);
#endif
- ke = kseq_choose(kseq);
- if (ke) {
+ ts = tdq_choose(tdq);
+ if (ts) {
#ifdef SMP
- if (ke->ke_thread->td_pri_class == PRI_IDLE)
- if (kseq_idled(kseq) == 0)
+ if (ts->ts_thread->td_pri_class == PRI_IDLE)
+ if (tdq_idled(tdq) == 0)
goto restart;
#endif
- kseq_runq_rem(kseq, ke);
- ke->ke_state = KES_THREAD;
- ke->ke_flags &= ~KEF_PREEMPTED;
- return (ke);
+ tdq_runq_rem(tdq, ts);
+ ts->ts_state = TSS_THREAD;
+ ts->ts_flags &= ~TSF_PREEMPTED;
+ return (ts);
}
#ifdef SMP
- if (kseq_idled(kseq) == 0)
+ if (tdq_idled(tdq) == 0)
goto restart;
#endif
return (NULL);
@@ -1765,8 +1749,8 @@ restart:
void
sched_add(struct thread *td, int flags)
{
- struct kseq *kseq;
- struct kse *ke;
+ struct tdq *tdq;
+ struct td_sched *ts;
int preemptive;
int canmigrate;
int class;
@@ -1775,60 +1759,60 @@ sched_add(struct thread *td, int flags)
td, td->td_proc->p_comm, td->td_priority, curthread,
curthread->td_proc->p_comm);
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
+ ts = td->td_sched;
canmigrate = 1;
preemptive = !(flags & SRQ_YIELDING);
class = PRI_BASE(td->td_pri_class);
- kseq = KSEQ_SELF();
- ke->ke_flags &= ~KEF_INTERNAL;
+ tdq = TDQ_SELF();
+ ts->ts_flags &= ~TSF_INTERNAL;
#ifdef SMP
- if (ke->ke_flags & KEF_ASSIGNED) {
- if (ke->ke_flags & KEF_REMOVED)
- ke->ke_flags &= ~KEF_REMOVED;
+ if (ts->ts_flags & TSF_ASSIGNED) {
+ if (ts->ts_flags & TSF_REMOVED)
+ ts->ts_flags &= ~TSF_REMOVED;
return;
}
- canmigrate = KSE_CAN_MIGRATE(ke);
+ canmigrate = THREAD_CAN_MIGRATE(ts);
/*
* Don't migrate running threads here. Force the long term balancer
* to do it.
*/
- if (ke->ke_flags & KEF_HOLD) {
- ke->ke_flags &= ~KEF_HOLD;
+ if (ts->ts_flags & TSF_HOLD) {
+ ts->ts_flags &= ~TSF_HOLD;
canmigrate = 0;
}
#endif
- KASSERT(ke->ke_state != KES_ONRUNQ,
- ("sched_add: kse %p (%s) already in run queue", ke,
+ KASSERT(ts->ts_state != TSS_ONRUNQ,
+ ("sched_add: thread %p (%s) already in run queue", td,
td->td_proc->p_comm));
KASSERT(td->td_proc->p_sflag & PS_INMEM,
("sched_add: process swapped out"));
- KASSERT(ke->ke_runq == NULL,
- ("sched_add: KSE %p is still assigned to a run queue", ke));
+ KASSERT(ts->ts_runq == NULL,
+ ("sched_add: thread %p is still assigned to a run queue", td));
if (flags & SRQ_PREEMPTED)
- ke->ke_flags |= KEF_PREEMPTED;
+ ts->ts_flags |= TSF_PREEMPTED;
switch (class) {
case PRI_ITHD:
case PRI_REALTIME:
- ke->ke_runq = kseq->ksq_curr;
- ke->ke_slice = SCHED_SLICE_MAX;
+ ts->ts_runq = tdq->ksq_curr;
+ ts->ts_slice = SCHED_SLICE_MAX;
if (canmigrate)
- ke->ke_cpu = PCPU_GET(cpuid);
+ ts->ts_cpu = PCPU_GET(cpuid);
break;
case PRI_TIMESHARE:
- if (SCHED_CURR(td, ke))
- ke->ke_runq = kseq->ksq_curr;
+ if (SCHED_CURR(td, ts))
+ ts->ts_runq = tdq->ksq_curr;
else
- ke->ke_runq = kseq->ksq_next;
+ ts->ts_runq = tdq->ksq_next;
break;
case PRI_IDLE:
/*
* This is for priority prop.
*/
- if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
- ke->ke_runq = kseq->ksq_curr;
+ if (ts->ts_thread->td_priority < PRI_MIN_IDLE)
+ ts->ts_runq = tdq->ksq_curr;
else
- ke->ke_runq = &kseq->ksq_idle;
- ke->ke_slice = SCHED_SLICE_MIN;
+ ts->ts_runq = &tdq->ksq_idle;
+ ts->ts_slice = SCHED_SLICE_MIN;
break;
default:
panic("Unknown pri class.");
@@ -1838,9 +1822,9 @@ sched_add(struct thread *td, int flags)
/*
* If this thread is pinned or bound, notify the target cpu.
*/
- if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) {
- ke->ke_runq = NULL;
- kseq_notify(ke, ke->ke_cpu);
+ if (!canmigrate && ts->ts_cpu != PCPU_GET(cpuid) ) {
+ ts->ts_runq = NULL;
+ tdq_notify(ts, ts->ts_cpu);
return;
}
/*
@@ -1848,72 +1832,72 @@ sched_add(struct thread *td, int flags)
* the global bitmap. If not, see if we should transfer this thread.
*/
if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
- (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
+ (tdq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
/*
* Check to see if our group is unidling, and if so, remove it
* from the global idle mask.
*/
- if (kseq->ksq_group->ksg_idlemask ==
- kseq->ksq_group->ksg_cpumask)
- atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
+ if (tdq->ksq_group->ksg_idlemask ==
+ tdq->ksq_group->ksg_cpumask)
+ atomic_clear_int(&tdq_idle, tdq->ksq_group->ksg_mask);
/*
* Now remove ourselves from the group specific idle mask.
*/
- kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
- } else if (canmigrate && kseq->ksq_load > 1 && class != PRI_ITHD)
- if (kseq_transfer(kseq, ke, class))
+ tdq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
+ } else if (canmigrate && tdq->ksq_load > 1 && class != PRI_ITHD)
+ if (tdq_transfer(tdq, ts, class))
return;
- ke->ke_cpu = PCPU_GET(cpuid);
+ ts->ts_cpu = PCPU_GET(cpuid);
#endif
if (td->td_priority < curthread->td_priority &&
- ke->ke_runq == kseq->ksq_curr)
+ ts->ts_runq == tdq->ksq_curr)
curthread->td_flags |= TDF_NEEDRESCHED;
if (preemptive && maybe_preempt(td))
return;
- ke->ke_state = KES_ONRUNQ;
+ ts->ts_state = TSS_ONRUNQ;
- kseq_runq_add(kseq, ke, flags);
- kseq_load_add(kseq, ke);
+ tdq_runq_add(tdq, ts, flags);
+ tdq_load_add(tdq, ts);
}
void
sched_rem(struct thread *td)
{
- struct kseq *kseq;
- struct kse *ke;
+ struct tdq *tdq;
+ struct td_sched *ts;
CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, curthread,
curthread->td_proc->p_comm);
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
- ke->ke_flags &= ~KEF_PREEMPTED;
- if (ke->ke_flags & KEF_ASSIGNED) {
- ke->ke_flags |= KEF_REMOVED;
+ ts = td->td_sched;
+ ts->ts_flags &= ~TSF_PREEMPTED;
+ if (ts->ts_flags & TSF_ASSIGNED) {
+ ts->ts_flags |= TSF_REMOVED;
return;
}
- KASSERT((ke->ke_state == KES_ONRUNQ),
- ("sched_rem: KSE not on run queue"));
+ KASSERT((ts->ts_state == TSS_ONRUNQ),
+ ("sched_rem: thread not on run queue"));
- ke->ke_state = KES_THREAD;
- kseq = KSEQ_CPU(ke->ke_cpu);
- kseq_runq_rem(kseq, ke);
- kseq_load_rem(kseq, ke);
+ ts->ts_state = TSS_THREAD;
+ tdq = TDQ_CPU(ts->ts_cpu);
+ tdq_runq_rem(tdq, ts);
+ tdq_load_rem(tdq, ts);
}
fixpt_t
sched_pctcpu(struct thread *td)
{
fixpt_t pctcpu;
- struct kse *ke;
+ struct td_sched *ts;
pctcpu = 0;
- ke = td->td_kse;
- if (ke == NULL)
+ ts = td->td_sched;
+ if (ts == NULL)
return (0);
mtx_lock_spin(&sched_lock);
- if (ke->ke_ticks) {
+ if (ts->ts_ticks) {
int rtick;
/*
@@ -1921,15 +1905,15 @@ sched_pctcpu(struct thread *td)
* this causes the cpu usage to decay away too quickly due to
* rounding errors.
*/
- if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
- ke->ke_ltick < (ticks - (hz / 2)))
- sched_pctcpu_update(ke);
+ if (ts->ts_ftick + SCHED_CPU_TICKS < ts->ts_ltick ||
+ ts->ts_ltick < (ticks - (hz / 2)))
+ sched_pctcpu_update(ts);
/* How many rtick per second ? */
- rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
+ rtick = min(ts->ts_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
}
- td->td_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
+ td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
mtx_unlock_spin(&sched_lock);
return (pctcpu);
@@ -1938,18 +1922,18 @@ sched_pctcpu(struct thread *td)
void
sched_bind(struct thread *td, int cpu)
{
- struct kse *ke;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
- ke->ke_flags |= KEF_BOUND;
+ ts = td->td_sched;
+ ts->ts_flags |= TSF_BOUND;
#ifdef SMP
if (PCPU_GET(cpuid) == cpu)
return;
/* sched_rem without the runq_remove */
- ke->ke_state = KES_THREAD;
- kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
- kseq_notify(ke, cpu);
+ ts->ts_state = TSS_THREAD;
+ tdq_load_rem(TDQ_CPU(ts->ts_cpu), ts);
+ tdq_notify(ts, cpu);
/* When we return from mi_switch we'll be on the correct cpu. */
mi_switch(SW_VOL, NULL);
#endif
@@ -1959,30 +1943,21 @@ void
sched_unbind(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
- td->td_kse->ke_flags &= ~KEF_BOUND;
+ td->td_sched->ts_flags &= ~TSF_BOUND;
}
int
sched_is_bound(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
- return (td->td_kse->ke_flags & KEF_BOUND);
+ return (td->td_sched->ts_flags & TSF_BOUND);
}
void
sched_relinquish(struct thread *td)
{
-#ifdef KSE
- struct ksegrp *kg;
-
- kg = td->td_ksegrp;
-#endif
mtx_lock_spin(&sched_lock);
-#ifdef KSE
- if (kg->kg_pri_class == PRI_TIMESHARE)
-#else
if (td->td_pri_class == PRI_TIMESHARE)
-#endif
sched_prio(td, PRI_MAX_TIMESHARE);
mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
@@ -1997,10 +1972,10 @@ sched_load(void)
total = 0;
for (i = 0; i <= ksg_maxid; i++)
- total += KSEQ_GROUP(i)->ksg_load;
+ total += TDQ_GROUP(i)->ksg_load;
return (total);
#else
- return (KSEQ_SELF()->ksq_sysload);
+ return (TDQ_SELF()->ksq_sysload);
#endif
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index a6eca02..5e7446c 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -149,9 +149,6 @@ ast(struct trapframe *framep)
{
struct thread *td;
struct proc *p;
-#ifdef KSE
- struct ksegrp *kg;
-#endif
struct rlimit rlim;
int sflag;
int flags;
@@ -163,9 +160,6 @@ ast(struct trapframe *framep)
td = curthread;
p = td->td_proc;
-#ifdef KSE
- kg = td->td_ksegrp;
-#endif
CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
@@ -204,7 +198,7 @@ ast(struct trapframe *framep)
/*
* XXXKSE While the fact that we owe a user profiling
- * tick is stored per KSE in this code, the statistics
+ * tick is stored per thread in this code, the statistics
* themselves are still stored per process.
* This should probably change, by which I mean that
* possibly the location of both might change.
@@ -264,11 +258,7 @@ ast(struct trapframe *framep)
ktrcsw(1, 1);
#endif
mtx_lock_spin(&sched_lock);
-#ifdef KSE
- sched_prio(td, kg->kg_user_pri);
-#else
sched_prio(td, td->td_user_pri);
-#endif
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index bd7cf8d..5e7c596 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -2578,18 +2578,8 @@ ttyinfo(struct tty *tp)
if (proc_compare(pick, p))
pick = p;
- td = FIRST_THREAD_IN_PROC(pick); /* XXXKSE */
-#if 0
- KASSERT(td != NULL, ("ttyinfo: no thread"));
-#else
- if (td == NULL) {
- mtx_unlock_spin(&sched_lock);
- PGRP_UNLOCK(tp->t_pgrp);
- ttyprintf(tp, "foreground process without thread\n");
- tp->t_rocount = 0;
- return;
- }
-#endif
+ /*^T can only show state for 1 thread. just pick the first. */
+ td = FIRST_THREAD_IN_PROC(pick);
stateprefix = "";
if (TD_IS_RUNNING(td))
state = "running";
@@ -2669,11 +2659,7 @@ proc_compare(struct proc *p1, struct proc *p2)
{
int esta, estb;
-#ifdef KSE
- struct ksegrp *kg;
-#else
struct thread *td;
-#endif
mtx_assert(&sched_lock, MA_OWNED);
if (p1 == NULL)
return (1);
@@ -2694,19 +2680,10 @@ proc_compare(struct proc *p1, struct proc *p2)
* tie - favor one with highest recent cpu utilization
*/
esta = estb = 0;
-#ifdef KSE
- FOREACH_KSEGRP_IN_PROC(p1,kg) {
- esta += kg->kg_estcpu;
- }
- FOREACH_KSEGRP_IN_PROC(p2,kg) {
- estb += kg->kg_estcpu;
- }
-#else
FOREACH_THREAD_IN_PROC(p1, td)
esta += td->td_estcpu;
FOREACH_THREAD_IN_PROC(p2, td)
estb += td->td_estcpu;
-#endif
if (estb > esta)
return (1);
if (esta > estb)
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index c12c8d4..1bd8c25 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -1906,11 +1906,7 @@ init386(first)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
/*
* Initialize DMAC
diff --git a/sys/powerpc/aim/machdep.c b/sys/powerpc/aim/machdep.c
index 48d5b1b..5b158d0 100644
--- a/sys/powerpc/aim/machdep.c
+++ b/sys/powerpc/aim/machdep.c
@@ -295,11 +295,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
/*
* Start initializing proc0 and thread0.
*/
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
thread0.td_frame = &frame0;
/*
diff --git a/sys/powerpc/powerpc/machdep.c b/sys/powerpc/powerpc/machdep.c
index 48d5b1b..5b158d0 100644
--- a/sys/powerpc/powerpc/machdep.c
+++ b/sys/powerpc/powerpc/machdep.c
@@ -295,11 +295,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
/*
* Start initializing proc0 and thread0.
*/
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
thread0.td_frame = &frame0;
/*
diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c
index 84eea0b..5cf29d5 100644
--- a/sys/sparc64/sparc64/machdep.c
+++ b/sys/sparc64/sparc64/machdep.c
@@ -391,11 +391,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
/*
* Initialize proc0 stuff (p_contested needs to be done early).
*/
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
proc0.p_md.md_sigtramp = NULL;
proc0.p_md.md_utrap = NULL;
thread0.td_kstack = kstack0;
diff --git a/sys/sun4v/sun4v/machdep.c b/sys/sun4v/sun4v/machdep.c
index b76eb86..6f3d0db 100644
--- a/sys/sun4v/sun4v/machdep.c
+++ b/sys/sun4v/sun4v/machdep.c
@@ -343,11 +343,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
* Initialize proc0 stuff (p_contested needs to be done early).
*/
-#ifdef KSE
- proc_linkup(&proc0, &ksegrp0, &thread0);
-#else
proc_linkup(&proc0, &thread0);
-#endif
proc0.p_md.md_sigtramp = NULL;
proc0.p_md.md_utrap = NULL;
frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV;
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 99dbfc6..47e21bd 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -152,41 +152,32 @@ struct pargs {
*/
struct auditinfo;
struct kaudit_record;
-#ifdef KSE
-struct kg_sched;
-#else
struct td_sched;
-#endif
struct nlminfo;
struct kaioinfo;
struct p_sched;
struct proc;
struct sleepqueue;
-#ifdef KSE
-struct td_sched;
-#else
struct thread;
-#endif
struct trapframe;
struct turnstile;
struct mqueue_notifier;
-#ifdef KSE
/*
- * Here we define the three structures used for process information.
+ * Here we define the two structures used for process information.
*
* The first is the thread. It might be thought of as a "Kernel
* Schedulable Entity Context".
* This structure contains all the information as to where a thread of
* execution is now, or was when it was suspended, why it was suspended,
* and anything else that will be needed to restart it when it is
- * rescheduled. Always associated with a KSE when running, but can be
- * reassigned to an equivalent KSE when being restarted for
- * load balancing. Each of these is associated with a kernel stack
- * and a pcb.
+ * rescheduled. It includesa sscheduler specific substructure that is differnt
+ * for each scheduler.
*
- * It is important to remember that a particular thread structure may only
- * exist as long as the system call or kernel entrance (e.g. by pagefault)
+ * M:N notes.
+ * It is important to remember that when using M:N threading,
+ * a particular thread structure may only exist as long as
+ * the system call or kernel entrance (e.g. by pagefault)
* which it is currently executing. It should therefore NEVER be referenced
* by pointers in long lived structures that live longer than a single
* request. If several threads complete their work at the same time,
@@ -198,87 +189,37 @@ struct mqueue_notifier;
* get one when it needs a new one. There is also a system
* cache of free threads. Threads have priority and partake in priority
* inheritance schemes.
+ *
+ * The second is the proc (process) which owns all the resources of a process
+ * other than CPU cycles. which are pqarelled out to the threads.
*/
-struct thread;
-
-/*
- * The KSEGRP is allocated resources across a number of CPUs.
- * (Including a number of CPUxQUANTA. It parcels these QUANTA up among
- * its threads, each of which should be running in a different CPU.
- * BASE priority and total available quanta are properties of a KSEGRP.
- * Multiple KSEGRPs in a single process compete against each other
- * for total quanta in the same way that a forked child competes against
- * its parent process.
- */
-struct ksegrp;
-
-/*
- * A process is the owner of all system resources allocated to a task
- * except CPU quanta.
- * All KSEGs under one process see, and have the same access to, these
- * resources (e.g. files, memory, sockets, credential, kqueues).
- * A process may compete for CPU cycles on the same basis as a
- * forked process cluster by spawning several KSEGRPs.
- */
-struct proc;
/***************
- * In pictures:
+ * Threads are the unit of execution
With a single run queue used by all processors:
- RUNQ: --->KSE---KSE--... SLEEPQ:[]---THREAD---THREAD---THREAD
- \ \ []---THREAD
- KSEG---THREAD--THREAD--THREAD []
- []---THREAD---THREAD
-
- (processors run THREADs from the KSEG until they are exhausted or
- the KSEG exhausts its quantum)
-
-With PER-CPU run queues:
-KSEs on the separate run queues directly
-They would be given priorities calculated from the KSEG.
+ RUNQ: --->THREAD---THREAD--... SLEEPQ:[]---THREAD---THREAD---THREAD
+ []---THREAD
+ []
+ []---THREAD---THREAD
+With PER-CPU run queues:
+it gets more complicated.
*
*****************/
-#endif
-#ifdef KSE
/*
* Kernel runnable context (thread).
* This is what is put to sleep and reactivated.
- * The first KSE available in the correct group will run this thread.
- * If several are available, use the one on the same CPU as last time.
- * When waiting to be run, threads are hung off the KSEGRP in priority order.
- * With N runnable and queued KSEs in the KSEGRP, the first N threads
- * are linked to them. Other threads are not yet assigned.
- */
-#else
-/*
* Thread context. Processes may have multiple threads.
*/
-#endif
struct thread {
struct proc *td_proc; /* (*) Associated process. */
-#ifdef KSE
- struct ksegrp *td_ksegrp; /* (*) Associated KSEG. */
-#else
- void *was_td_ksegrp; /* Temporary padding. */
-#endif
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
-#ifdef KSE
- TAILQ_ENTRY(thread) td_kglist; /* (*) All threads in this ksegrp. */
-#else
- TAILQ_ENTRY(thread) was_td_kglist; /* Temporary padding. */
-#endif
/* The two queues below should someday be merged. */
TAILQ_ENTRY(thread) td_slpq; /* (j) Sleep queue. */
TAILQ_ENTRY(thread) td_lockq; /* (j) Lock queue. */
-#ifdef KSE
- TAILQ_ENTRY(thread) td_runq; /* (j/z) Run queue(s). XXXKSE */
-#else
- TAILQ_ENTRY(thread) td_runq; /* (j/z) Run queue(s). */
-#endif
TAILQ_HEAD(, selinfo) td_selq; /* (p) List of selinfos. */
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
@@ -307,23 +248,12 @@ struct thread {
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
int td_intr_nesting_level; /* (k) Interrupt recursion. */
int td_pinned; /* (k) Temporary cpu pin count. */
-#ifdef KSE
struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address. */
-#else
- void *was_td_mailbox; /* Temporary padding. */
-#endif
struct ucred *td_ucred; /* (k) Reference to credentials. */
-#ifdef KSE
struct thread *td_standin; /* (k + a) Use this for an upcall. */
struct kse_upcall *td_upcall; /* (k + j) Upcall structure. */
- u_int new_td_estcpu; /* Temporary padding. */
- u_int new_td_slptime; /* Temporary padding. */
-#else
- void *was_td_standin; /* Temporary padding. */
- void *was_td_upcall; /* Temporary padding. */
u_int td_estcpu; /* (j) Sum of the same field in KSEs. */
u_int td_slptime; /* (j) How long completely blocked. */
-#endif
u_int td_pticks; /* (k) Statclock hits for profiling */
u_int td_sticks; /* (k) Statclock hits in system mode. */
u_int td_iticks; /* (k) Statclock hits in intr mode. */
@@ -335,11 +265,7 @@ struct thread {
sigset_t td_sigmask; /* (c) Current signal mask. */
volatile u_int td_generation; /* (k) For detection of preemption */
stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
-#ifdef KSE
int td_kflags; /* (c) Flags for KSE threading. */
-#else
- int was_td_kflags; /* Temporary padding. */
-#endif
int td_xsig; /* (c) Signal for ptrace */
u_long td_profil_addr; /* (k) Temporary addr until AST. */
u_int td_profil_ticks; /* (k) Temporary ticks until AST. */
@@ -350,15 +276,9 @@ struct thread {
#define td_startcopy td_endzero
u_char td_base_pri; /* (j) Thread base kernel priority. */
u_char td_priority; /* (j) Thread active priority. */
-#ifdef KSE
- u_char new_td_pri_class; /* Temporary padding. */
- u_char new_td_user_pri; /* Temporary padding. */
- u_char new_td_base_user_pri; /* Temporary padding. */
-#else
u_char td_pri_class; /* (j) Scheduling class. */
u_char td_user_pri; /* (j) User pri from estcpu and nice. */
- u_char td_base_user_pri; /* (j) Base user pri */
-#endif
+ u_char td_base_user_pri; /* (j) Base user pri */
#define td_endcopy td_pcb
/*
@@ -427,27 +347,15 @@ struct thread {
#define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */
#define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */
#define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */
-#ifdef KSE
#define TDP_UPCALLING 0x00000008 /* This thread is doing an upcall. */
-#else
-/* 0x00000008 */
-#endif
#define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */
#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
-#ifdef KSE
#define TDP_SA 0x00000080 /* A scheduler activation based thread. */
-#else
-/* 0x00000080 */
-#endif
#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */
-#ifdef KSE
#define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */
-#else
-/* 0x00000800 */
-#endif
#define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */
#define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */
#define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */
@@ -467,18 +375,16 @@ struct thread {
#define TDI_LOCK 0x0008 /* Stopped on a lock. */
#define TDI_IWAIT 0x0010 /* Awaiting interrupt. */
-#ifdef KSE
/*
* flags (in kflags) related to M:N threading.
*/
-#define TDK_KSEREL 0x0001 /* Blocked in msleep on kg->kg_completed. */
+#define TDK_KSEREL 0x0001 /* Blocked in msleep on p->p_completed. */
#define TDK_KSERELSIG 0x0002 /* Blocked in msleep on p->p_siglist. */
#define TDK_WAKEUP 0x0004 /* Thread has been woken by kse_wakeup. */
#define TD_CAN_UNBIND(td) \
(((td)->td_pflags & TDP_CAN_UNBIND) && \
((td)->td_upcall != NULL))
-#endif
#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
@@ -520,14 +426,13 @@ struct thread {
#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ
#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN
-#ifdef KSE
/*
* An upcall is used when returning to userland. If a thread does not have
* an upcall on return to userland the thread exports its context and exits.
*/
struct kse_upcall {
- TAILQ_ENTRY(kse_upcall) ku_link; /* List of upcalls in KSEG. */
- struct ksegrp *ku_ksegrp; /* Associated KSEG. */
+ TAILQ_ENTRY(kse_upcall) ku_link; /* List of upcalls in proc. */
+ struct proc *ku_proc; /* Associated proc. */
struct thread *ku_owner; /* Owning thread. */
int ku_flags; /* KUF_* flags. */
struct kse_mailbox *ku_mailbox; /* Userland mailbox address. */
@@ -540,38 +445,6 @@ struct kse_upcall {
#define KUF_EXITING 0x00002 /* Upcall structure is exiting. */
/*
- * Kernel-scheduled entity group (KSEG). The scheduler considers each KSEG to
- * be an indivisible unit from a time-sharing perspective, though each KSEG may
- * contain multiple KSEs.
- */
-struct ksegrp {
- struct proc *kg_proc; /* (*) Proc that contains this KSEG. */
- TAILQ_ENTRY(ksegrp) kg_ksegrp; /* (*) Queue of KSEGs in kg_proc. */
- TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
- TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */
- TAILQ_HEAD(, kse_upcall) kg_upcalls; /* All upcalls in the group. */
-
-#define kg_startzero kg_estcpu
- u_int kg_estcpu; /* (j) Sum of the same field in KSEs. */
- u_int kg_slptime; /* (j) How long completely blocked. */
- int kg_numupcalls; /* (j) Num upcalls. */
- int kg_upsleeps; /* (c) Num threads in kse_release(). */
- struct kse_thr_mailbox *kg_completed; /* (c) Completed thread mboxes. */
- int kg_nextupcall; /* (n) Next upcall time. */
- int kg_upquantum; /* (n) Quantum to schedule an upcall. */
-#define kg_endzero kg_pri_class
-
-#define kg_startcopy kg_endzero
- u_char kg_pri_class; /* (j) Scheduling class. */
- u_char kg_user_pri; /* (j) User pri from estcpu and nice. */
- u_char kg_base_user_pri; /* (j) Base user pri */
-#define kg_endcopy kg_numthreads
- int kg_numthreads; /* (j) Num threads in total. */
- struct kg_sched *kg_sched; /* (*) Scheduler-specific data. */
-};
-#endif
-
-/*
* XXX: Does this belong in resource.h or resourcevar.h instead?
* Resource usage extension. The times in rusage structs in the kernel are
* never up to date. The actual times are kept as runtimes and tick counts
@@ -592,18 +465,12 @@ struct rusage_ext {
};
/*
- * The old fashionned process. May have multiple threads, KSEGRPs
- * and KSEs. Starts off with a single embedded KSEGRP and THREAD.
+ * The old fashionned process. May have multiple threads.
+ * Starts off with a single embedded THREAD.
*/
struct proc {
LIST_ENTRY(proc) p_list; /* (d) List of all processes. */
-#ifdef KSE
- TAILQ_HEAD(, ksegrp) p_ksegrps; /* (c)(kg_ksegrp) All KSEGs. */
-#else
- TAILQ_HEAD(, thread) was_p_ksegrps; /* Temporary padding. */
-#endif
TAILQ_HEAD(, thread) p_threads; /* (j)(td_plist) Threads. (shortcut) */
- TAILQ_HEAD(, thread) p_suspended; /* (td_runq) Suspended threads. */
struct ucred *p_ucred; /* (c) Process owner's identity. */
struct filedesc *p_fd; /* (b) Open files. */
struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */
@@ -611,6 +478,7 @@ struct proc {
struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */
struct plimit *p_limit; /* (c) Process limits. */
struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */
+ TAILQ_HEAD(, kse_upcall) p_upcalls; /* All upcalls in the proc. */
/*
* The following don't make too much sense.
@@ -666,6 +534,14 @@ struct proc {
int p_boundary_count;/* (c) Num threads at user boundary */
int p_pendingcnt; /* how many signals are pending */
struct itimers *p_itimers; /* (c) POSIX interval timers. */
+/* from ksegrp */
+ u_int p_estcpu; /* (j) Sum of the field in threads. */
+ u_int p_slptime; /* (j) How long completely blocked. */
+ int p_numupcalls; /* (j) Num upcalls. */
+ int p_upsleeps; /* (c) Num threads in kse_release(). */
+ struct kse_thr_mailbox *p_completed; /* (c) Completed thread mboxes. */
+ int p_nextupcall; /* (n) Next upcall time. */
+ int p_upquantum; /* (n) Quantum to schedule an upcall. */
/* End area that is zeroed on creation. */
#define p_endzero p_magic
@@ -684,11 +560,6 @@ struct proc {
u_short p_xstat; /* (c) Exit status; also stop sig. */
struct knlist p_klist; /* (c) Knotes attached to this proc. */
int p_numthreads; /* (j) Number of threads. */
-#ifdef KSE
- int p_numksegrps; /* (c) Number of ksegrps. */
-#else
- int was_p_numksegrps; /* Temporary padding. */
-#endif
struct mdproc p_md; /* Any machine-dependent fields. */
struct callout p_itcallout; /* (h + c) Interval timer callout. */
u_short p_acflag; /* (c) Accounting flags. */
@@ -797,22 +668,13 @@ MALLOC_DECLARE(M_ZOMBIE);
#define FOREACH_PROC_IN_SYSTEM(p) \
LIST_FOREACH((p), &allproc, p_list)
-#ifdef KSE
-#define FOREACH_KSEGRP_IN_PROC(p, kg) \
- TAILQ_FOREACH((kg), &(p)->p_ksegrps, kg_ksegrp)
-#define FOREACH_THREAD_IN_GROUP(kg, td) \
- TAILQ_FOREACH((td), &(kg)->kg_threads, td_kglist)
-#define FOREACH_UPCALL_IN_GROUP(kg, ku) \
- TAILQ_FOREACH((ku), &(kg)->kg_upcalls, ku_link)
-#endif
#define FOREACH_THREAD_IN_PROC(p, td) \
TAILQ_FOREACH((td), &(p)->p_threads, td_plist)
+#define FOREACH_UPCALL_IN_PROC(p, ku) \
+ TAILQ_FOREACH((ku), &(p)->p_upcalls, ku_link)
/* XXXKSE the following lines should probably only be used in 1:1 code: */
#define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads)
-#ifdef KSE
-#define FIRST_KSEGRP_IN_PROC(p) TAILQ_FIRST(&(p)->p_ksegrps)
-#endif
/*
* We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t,
@@ -923,9 +785,6 @@ extern u_long pgrphash;
extern struct sx allproc_lock;
extern struct sx proctree_lock;
extern struct mtx ppeers_lock;
-#ifdef KSE
-extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0. */
-#endif
extern struct proc proc0; /* Process slot for swapper. */
extern struct thread thread0; /* Primary thread in proc0. */
extern struct vmspace vmspace0; /* VM space for proc0. */
@@ -976,11 +835,7 @@ void pargs_drop(struct pargs *pa);
void pargs_free(struct pargs *pa);
void pargs_hold(struct pargs *pa);
void procinit(void);
-#ifdef KSE
-void proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td);
-#else
void proc_linkup(struct proc *p, struct thread *td);
-#endif
void proc_reparent(struct proc *child, struct proc *newparent);
struct pstats *pstats_alloc(void);
void pstats_fork(struct pstats *src, struct pstats *dst);
@@ -1008,11 +863,6 @@ void cpu_fork(struct thread *, struct proc *, struct thread *, int);
void cpu_set_fork_handler(struct thread *, void (*)(void *), void *);
/* New in KSE. */
-#ifdef KSE
-struct ksegrp *ksegrp_alloc(void);
-void ksegrp_free(struct ksegrp *kg);
-void ksegrp_stash(struct ksegrp *kg);
-#endif
void kse_GC(void);
void kseinit(void);
void cpu_set_upcall(struct thread *td, struct thread *td0);
@@ -1023,24 +873,14 @@ void cpu_thread_exit(struct thread *);
void cpu_thread_setup(struct thread *td);
void cpu_thread_swapin(struct thread *);
void cpu_thread_swapout(struct thread *);
-#ifdef KSE
-void ksegrp_link(struct ksegrp *kg, struct proc *p);
-void ksegrp_unlink(struct ksegrp *kg);
-#endif
struct thread *thread_alloc(void);
void thread_continued(struct proc *p);
void thread_exit(void) __dead2;
int thread_export_context(struct thread *td, int willexit);
void thread_free(struct thread *td);
-#ifdef KSE
-void thread_link(struct thread *td, struct ksegrp *kg);
-#else
void thread_link(struct thread *td, struct proc *p);
-#endif
void thread_reap(void);
-#ifdef KSE
struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
-#endif
void thread_signal_add(struct thread *td, ksiginfo_t *);
int thread_single(int how);
void thread_single_end(void);
@@ -1058,21 +898,17 @@ void thread_unlink(struct thread *td);
void thread_unsuspend(struct proc *p);
void thread_unsuspend_one(struct thread *td);
void thread_unthread(struct thread *td);
-#ifdef KSE
int thread_userret(struct thread *td, struct trapframe *frame);
void thread_user_enter(struct thread *td);
-#endif
void thread_wait(struct proc *p);
struct thread *thread_find(struct proc *p, lwpid_t tid);
void thr_exit1(void);
-#ifdef KSE
struct kse_upcall *upcall_alloc(void);
void upcall_free(struct kse_upcall *ku);
-void upcall_link(struct kse_upcall *ku, struct ksegrp *kg);
+void upcall_link(struct kse_upcall *ku, struct proc *p);
void upcall_unlink(struct kse_upcall *ku);
void upcall_remove(struct thread *td);
void upcall_stash(struct kse_upcall *ke);
-#endif
#endif /* _KERNEL */
diff --git a/sys/sys/rtprio.h b/sys/sys/rtprio.h
index ba02871..9178046 100644
--- a/sys/sys/rtprio.h
+++ b/sys/sys/rtprio.h
@@ -75,17 +75,11 @@ struct rtprio {
};
#ifdef _KERNEL
-#ifdef KSE
-struct ksegrp;
-int rtp_to_pri(struct rtprio *, struct ksegrp *);
-void pri_to_rtp(struct ksegrp *, struct rtprio *);
-#else
struct thread;
int rtp_to_pri(struct rtprio *, struct thread *);
void pri_to_rtp(struct thread *, struct rtprio *);
#endif
#endif
-#endif
#ifndef _KERNEL
#include <sys/cdefs.h>
diff --git a/sys/sys/runq.h b/sys/sys/runq.h
index 433d0e0..0f3524c 100644
--- a/sys/sys/runq.h
+++ b/sys/sys/runq.h
@@ -31,7 +31,7 @@
#include <machine/runq.h>
-struct kse;
+struct td_sched;
/*
* Run queue parameters.
@@ -43,7 +43,7 @@ struct kse;
/*
* Head of run queues.
*/
-TAILQ_HEAD(rqhead, kse);
+TAILQ_HEAD(rqhead, td_sched);
/*
* Bit array which maintains the status of a run queue. When a queue is
@@ -62,10 +62,10 @@ struct runq {
struct rqhead rq_queues[RQ_NQS];
};
-void runq_add(struct runq *, struct kse *, int flags);
+void runq_add(struct runq *, struct td_sched *, int flags);
int runq_check(struct runq *);
-struct kse *runq_choose(struct runq *);
+struct td_sched *runq_choose(struct runq *);
void runq_init(struct runq *);
-void runq_remove(struct runq *, struct kse *);
+void runq_remove(struct runq *, struct td_sched *);
#endif
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 69e4a0c..a9f1748 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -86,23 +86,15 @@ void sched_fork(struct thread *td, struct thread *childtd);
* KSE Groups contain scheduling priority information. They record the
* behavior of groups of KSEs and threads.
*/
-#ifdef KSE
-void sched_class(struct ksegrp *kg, int class);
-void sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd);
-void sched_fork_ksegrp(struct thread *td, struct ksegrp *child);
-#else
void sched_class(struct thread *td, int class);
-#endif
void sched_nice(struct proc *p, int nice);
/*
* Threads are switched in and out, block on resources, have temporary
- * priorities inherited from their ksegs, and use up cpu time.
+ * priorities inherited from their procs, and use up cpu time.
*/
-#ifdef KSE
void sched_exit_thread(struct thread *td, struct thread *child);
void sched_fork_thread(struct thread *td, struct thread *child);
-#endif
void sched_lend_prio(struct thread *td, u_char prio);
void sched_lend_user_prio(struct thread *td, u_char pri);
fixpt_t sched_pctcpu(struct thread *td);
@@ -111,11 +103,7 @@ void sched_sleep(struct thread *td);
void sched_switch(struct thread *td, struct thread *newtd, int flags);
void sched_unlend_prio(struct thread *td, u_char prio);
void sched_unlend_user_prio(struct thread *td, u_char pri);
-#ifdef KSE
-void sched_user_prio(struct ksegrp *kg, u_char prio);
-#else
void sched_user_prio(struct thread *td, u_char prio);
-#endif
void sched_userret(struct thread *td);
void sched_wakeup(struct thread *td);
@@ -142,9 +130,6 @@ int sched_is_bound(struct thread *td);
* These procedures tell the process data structure allocation code how
* many bytes to actually allocate.
*/
-#ifdef KSE
-int sched_sizeof_ksegrp(void);
-#endif
int sched_sizeof_proc(void);
int sched_sizeof_thread(void);
@@ -162,15 +147,11 @@ sched_unpin(void)
/* temporarily here */
void schedinit(void);
-#ifdef KSE
-void sched_init_concurrency(struct ksegrp *kg);
-void sched_set_concurrency(struct ksegrp *kg, int cuncurrency);
-#endif
+void sched_init_concurrency(struct proc *p);
+void sched_set_concurrency(struct proc *p, int cuncurrency);
void sched_schedinit(void);
-#ifdef KSE
-void sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td);
+void sched_newproc(struct proc *p, struct thread *td);
void sched_thread_exit(struct thread *td);
-#endif
void sched_newthread(struct thread *td);
#endif /* _KERNEL */
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index de56199..ff1d1ff 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -682,9 +682,6 @@ loop:
ppri = INT_MIN;
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
-#ifdef KSE
- struct ksegrp *kg;
-#endif
if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
continue;
}
@@ -696,18 +693,13 @@ loop:
*
*/
if (td->td_inhibitors == TDI_SWAPPED) {
-#ifdef KSE
- kg = td->td_ksegrp;
- pri = p->p_swtime + kg->kg_slptime;
-#else
pri = p->p_swtime + td->td_slptime;
-#endif
if ((p->p_sflag & PS_SWAPINREQ) == 0) {
pri -= p->p_nice * 8;
}
/*
- * if this ksegrp/thread is higher priority
+ * if this thread is higher priority
* and there is enough space, then select
* this process instead of the previous
* selection.
@@ -816,9 +808,6 @@ int action;
{
struct proc *p;
struct thread *td;
-#ifdef KSE
- struct ksegrp *kg;
-#endif
int didswap = 0;
retry:
@@ -892,24 +881,15 @@ retry:
* do not swapout a realtime process
* Check all the thread groups..
*/
-#ifdef KSE
- FOREACH_KSEGRP_IN_PROC(p, kg) {
- if (PRI_IS_REALTIME(kg->kg_pri_class))
-#else
FOREACH_THREAD_IN_PROC(p, td) {
if (PRI_IS_REALTIME(td->td_pri_class))
-#endif
goto nextproc;
/*
* Guarantee swap_idle_threshold1
* time in memory.
*/
-#ifdef KSE
- if (kg->kg_slptime < swap_idle_threshold1)
-#else
if (td->td_slptime < swap_idle_threshold1)
-#endif
goto nextproc;
/*
@@ -921,16 +901,9 @@ retry:
* This could be refined to support
* swapping out a thread.
*/
-#ifdef KSE
- FOREACH_THREAD_IN_GROUP(kg, td) {
- if ((td->td_priority) < PSOCK ||
- !thread_safetoswapout(td))
- goto nextproc;
- }
-#else
- if ((td->td_priority) < PSOCK || !thread_safetoswapout(td))
+ if ((td->td_priority) < PSOCK ||
+ !thread_safetoswapout(td))
goto nextproc;
-#endif
/*
* If the system is under memory stress,
* or if we are swapping
@@ -939,20 +912,11 @@ retry:
*/
if (((action & VM_SWAP_NORMAL) == 0) &&
(((action & VM_SWAP_IDLE) == 0) ||
-#ifdef KSE
- (kg->kg_slptime < swap_idle_threshold2)))
-#else
(td->td_slptime < swap_idle_threshold2)))
-#endif
goto nextproc;
-#ifdef KSE
- if (minslptime > kg->kg_slptime)
- minslptime = kg->kg_slptime;
-#else
if (minslptime > td->td_slptime)
minslptime = td->td_slptime;
-#endif
}
/*
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 14f47dc..b445003 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -179,11 +179,7 @@ pagezero_start(void __unused *arg)
PROC_UNLOCK(pagezero_proc);
mtx_lock_spin(&sched_lock);
td = FIRST_THREAD_IN_PROC(pagezero_proc);
-#ifdef KSE
- sched_class(td->td_ksegrp, PRI_IDLE);
-#else
sched_class(td, PRI_IDLE);
-#endif
sched_prio(td, PRI_MAX_IDLE);
setrunqueue(td, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
OpenPOWER on IntegriCloud