summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c1
-rw-r--r--sys/kern/kern_clock.c55
-rw-r--r--sys/kern/kern_exec.c3
-rw-r--r--sys/kern/kern_exit.c18
-rw-r--r--sys/kern/kern_fork.c2
-rw-r--r--sys/kern/kern_kse.c1159
-rw-r--r--sys/kern/kern_lock.c40
-rw-r--r--sys/kern/kern_resource.c134
-rw-r--r--sys/kern/kern_sig.c6
-rw-r--r--sys/kern/kern_switch.c234
-rw-r--r--sys/kern/kern_thread.c1159
-rw-r--r--sys/kern/subr_prof.c69
-rw-r--r--sys/kern/subr_trap.c55
-rw-r--r--sys/kern/subr_witness.c2
14 files changed, 1548 insertions, 1389 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 74424cc..4fdf604 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -379,7 +379,6 @@ proc0_init(void *dummy __unused)
ke->ke_oncpu = 0;
ke->ke_state = KES_THREAD;
ke->ke_thread = td;
- ke->ke_owner = td;
p->p_peers = 0;
p->p_leader = p;
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index c5e1b4a..ab2c9ee 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -320,6 +320,10 @@ startprofclock(p)
* cover psdiv, etc. as well.
*/
mtx_lock_spin(&sched_lock);
+ if (p->p_sflag & PS_STOPPROF) {
+ mtx_unlock_spin(&sched_lock);
+ return;
+ }
if ((p->p_sflag & PS_PROFIL) == 0) {
p->p_sflag |= PS_PROFIL;
if (++profprocs == 1 && stathz != 0) {
@@ -341,9 +345,19 @@ stopprofclock(p)
{
int s;
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+
+retry:
mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_PROFIL) {
- p->p_sflag &= ~PS_PROFIL;
+ if (p->p_profthreads) {
+ p->p_sflag |= PS_STOPPROF;
+ mtx_unlock_spin(&sched_lock);
+ msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
+ "stopprof", NULL);
+ goto retry;
+ }
+ p->p_sflag &= ~(PS_PROFIL|PS_STOPPROF);
if (--profprocs == 0 && stathz != 0) {
s = splstatclock();
psdiv = pscnt = 1;
@@ -363,10 +377,7 @@ stopprofclock(p)
* this function's relationship to statclock.
*/
void
-statclock_process(ke, pc, user)
- struct kse *ke;
- register_t pc;
- int user;
+statclock_process(struct thread *td, register_t pc, int user)
{
#ifdef GPROF
struct gmonparam *g;
@@ -376,27 +387,31 @@ statclock_process(ke, pc, user)
long rss;
struct rusage *ru;
struct vmspace *vm;
- struct proc *p = ke->ke_proc;
- struct thread *td = ke->ke_thread; /* current thread */
+ struct proc *p = td->td_proc;
- KASSERT(ke == curthread->td_kse, ("statclock_process: td != curthread"));
mtx_assert(&sched_lock, MA_OWNED);
if (user) {
/*
* Came from user mode; CPU was in user state.
* If this process is being profiled, record the tick.
*/
- if (p->p_sflag & PS_PROFIL)
- addupc_intr(ke, pc, 1);
+ if (p->p_sflag & PS_PROFIL) {
+ /* Only when thread is not in transition */
+ if (!(td->td_flags & TDF_UPCALLING))
+ addupc_intr(td, pc, 1);
+ }
if (pscnt < psdiv)
return;
/*
* Charge the time as appropriate.
*/
if (p->p_flag & P_KSES)
- thread_add_ticks_intr(1, 1);
- ke->ke_uticks++;
- if (ke->ke_ksegrp->kg_nice > NZERO)
+ thread_statclock(1);
+ /*
+ td->td_uticks++;
+ */
+ p->p_uticks++;
+ if (td->td_ksegrp->kg_nice > NZERO)
cp_time[CP_NICE]++;
else
cp_time[CP_USER]++;
@@ -429,12 +444,16 @@ statclock_process(ke, pc, user)
* in ``non-process'' (i.e., interrupt) work.
*/
if ((td->td_ithd != NULL) || td->td_intr_nesting_level >= 2) {
- ke->ke_iticks++;
+ p->p_iticks++;
+ /*
+ td->td_iticks++;
+ */
cp_time[CP_INTR]++;
} else {
if (p->p_flag & P_KSES)
- thread_add_ticks_intr(0, 1);
- ke->ke_sticks++;
+ thread_statclock(0);
+ td->td_sticks++;
+ p->p_sticks++;
if (p != PCPU_GET(idlethread)->td_proc)
cp_time[CP_SYS]++;
else
@@ -442,7 +461,7 @@ statclock_process(ke, pc, user)
}
}
- sched_clock(ke->ke_thread);
+ sched_clock(td);
/* Update resource usage integrals and maximums. */
if ((pstats = p->p_stats) != NULL &&
@@ -472,7 +491,7 @@ statclock(frame)
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
if (--pscnt == 0)
pscnt = psdiv;
- statclock_process(curthread->td_kse, CLKF_PC(frame), CLKF_USERMODE(frame));
+ statclock_process(curthread, CLKF_PC(frame), CLKF_USERMODE(frame));
mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
}
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 33a0764..b6d77d2 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -210,10 +210,7 @@ kern_execve(td, fname, argv, envv, mac_p)
* so unset the associated flags and lose KSE mode.
*/
p->p_flag &= ~P_KSES;
- td->td_flags &= ~TDF_UNBOUND;
td->td_mailbox = NULL;
- td->td_kse->ke_mailbox = NULL;
- td->td_kse->ke_flags &= ~KEF_DOUPCALL;
thread_single_end();
}
p->p_flag |= P_INEXEC;
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 0b2c2e8..ce9a18c 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -147,7 +147,7 @@ exit1(td, rv)
}
/*
- * XXXXKSE: MUST abort all other threads before proceeding past here.
+ * XXXKSE: MUST abort all other threads before proceeding past here.
*/
PROC_LOCK(p);
if (p->p_flag & P_KSES) {
@@ -156,17 +156,6 @@ exit1(td, rv)
* if so, act apropriatly, (exit or suspend);
*/
thread_suspend_check(0);
- /*
- * Here is a trick..
- * We need to free up our KSE to process other threads
- * so that we can safely set the UNBOUND flag
- * (whether or not we have a mailbox) as we are NEVER
- * going to return to the user.
- * The flag will not be set yet if we are exiting
- * because of a signal, pagefault, or similar
- * (or even an exit(2) from the UTS).
- */
- td->td_flags |= TDF_UNBOUND;
/*
* Kill off the other threads. This requires
@@ -192,7 +181,6 @@ exit1(td, rv)
* Turn off threading support.
*/
p->p_flag &= ~P_KSES;
- td->td_flags &= ~TDF_UNBOUND;
thread_single_end(); /* Don't need this any more. */
}
/*
@@ -237,8 +225,10 @@ exit1(td, rv)
*/
TAILQ_FOREACH(ep, &exit_list, next)
(*ep->function)(p);
-
+
+ PROC_LOCK(p);
stopprofclock(p);
+ PROC_UNLOCK(p);
MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage),
M_ZOMBIE, 0);
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index f84afa8..6c896eb 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -492,9 +492,7 @@ again:
/* Set up the thread as an active thread (as if runnable). */
ke2->ke_state = KES_THREAD;
ke2->ke_thread = td2;
- ke2->ke_owner = td2;
td2->td_kse = ke2;
- td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */
/*
* Duplicate sub-structures as needed.
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 78bce30..883f4d1 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -63,6 +63,7 @@
static uma_zone_t ksegrp_zone;
static uma_zone_t kse_zone;
static uma_zone_t thread_zone;
+static uma_zone_t upcall_zone;
/* DEBUG ONLY */
SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
@@ -78,16 +79,52 @@ static int max_groups_per_proc = 5;
SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
&max_groups_per_proc, 0, "Limit on thread groups per proc");
+static int virtual_cpu;
+
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
-struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
+TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
-struct mtx zombie_thread_lock;
-MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
- "zombie_thread_lock", MTX_SPIN);
+TAILQ_HEAD(, kse_upcall) zombie_upcalls =
+ TAILQ_HEAD_INITIALIZER(zombie_upcalls);
+struct mtx kse_zombie_lock;
+MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
static void kse_purge(struct proc *p, struct thread *td);
+static void kse_purge_group(struct thread *td);
+static int thread_update_usr_ticks(struct thread *td);
+static int thread_update_sys_ticks(struct thread *td);
+static void thread_alloc_spare(struct thread *td, struct thread *spare);
+
+static int
+sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
+{
+ int error, new_val;
+ int def_val;
+
+#ifdef SMP
+ def_val = mp_ncpus;
+#else
+ def_val = 1;
+#endif
+ if (virtual_cpu == 0)
+ new_val = def_val;
+ else
+ new_val = virtual_cpu;
+ error = sysctl_handle_int(oidp, &new_val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (new_val < 0)
+ return (EINVAL);
+ virtual_cpu = new_val;
+ return (0);
+}
+
+/* DEBUG ONLY */
+SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
+ 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
+ "debug virtual cpus");
/*
* Prepare a thread for use.
@@ -99,7 +136,6 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
- td->td_flags |= TDF_UNBOUND;
}
/*
@@ -161,6 +197,7 @@ thread_fini(void *mem, int size)
td = (struct thread *)mem;
pmap_dispose_thread(td);
}
+
/*
* Initialize type-stable parts of a kse (when newly created).
*/
@@ -172,6 +209,7 @@ kse_init(void *mem, int size)
ke = (struct kse *)mem;
ke->ke_sched = (struct ke_sched *)&ke[1];
}
+
/*
* Initialize type-stable parts of a ksegrp (when newly created).
*/
@@ -185,7 +223,7 @@ ksegrp_init(void *mem, int size)
}
/*
- * KSE is linked onto the idle queue.
+ * KSE is linked into kse group.
*/
void
kse_link(struct kse *ke, struct ksegrp *kg)
@@ -194,12 +232,12 @@ kse_link(struct kse *ke, struct ksegrp *kg)
TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
kg->kg_kses++;
- ke->ke_state = KES_UNQUEUED;
+ ke->ke_state = KES_UNQUEUED;
ke->ke_proc = p;
ke->ke_ksegrp = kg;
- ke->ke_owner = NULL;
ke->ke_thread = NULL;
- ke->ke_oncpu = NOCPU;
+ ke->ke_oncpu = NOCPU;
+ ke->ke_flags = 0;
}
void
@@ -209,11 +247,13 @@ kse_unlink(struct kse *ke)
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
-
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
- if (--kg->kg_kses == 0) {
- ksegrp_unlink(kg);
+ if (ke->ke_state == KES_IDLE) {
+ TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
+ kg->kg_idle_kses--;
}
+ if (--kg->kg_kses == 0)
+ ksegrp_unlink(kg);
/*
* Aggregate stats from the KSE
*/
@@ -228,15 +268,20 @@ ksegrp_link(struct ksegrp *kg, struct proc *p)
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
- TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */
- kg->kg_proc = p;
-/* the following counters are in the -zero- section and may not need clearing */
+ TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */
+ TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
+ kg->kg_proc = p;
+ /*
+ * the following counters are in the -zero- section
+ * and may not need clearing
+ */
kg->kg_numthreads = 0;
- kg->kg_runnable = 0;
- kg->kg_kses = 0;
- kg->kg_loan_kses = 0;
- kg->kg_runq_kses = 0; /* XXXKSE change name */
-/* link it in now that it's consistent */
+ kg->kg_runnable = 0;
+ kg->kg_kses = 0;
+ kg->kg_runq_kses = 0; /* XXXKSE change name */
+ kg->kg_idle_kses = 0;
+ kg->kg_numupcalls = 0;
+ /* link it in now that it's consistent */
p->p_numksegrps++;
TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
}
@@ -247,9 +292,11 @@ ksegrp_unlink(struct ksegrp *kg)
struct proc *p;
mtx_assert(&sched_lock, MA_OWNED);
+ KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
+ KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
+ KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
+
p = kg->kg_proc;
- KASSERT(((kg->kg_numthreads == 0) && (kg->kg_kses == 0)),
- ("kseg_unlink: residual threads or KSEs"));
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
p->p_numksegrps--;
/*
@@ -258,13 +305,63 @@ ksegrp_unlink(struct ksegrp *kg)
ksegrp_stash(kg);
}
+struct kse_upcall *
+upcall_alloc(void)
+{
+ struct kse_upcall *ku;
+
+ ku = uma_zalloc(upcall_zone, 0);
+ bzero(ku, sizeof(*ku));
+ return (ku);
+}
+
+void
+upcall_free(struct kse_upcall *ku)
+{
+
+ uma_zfree(upcall_zone, ku);
+}
+
+void
+upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
+{
+
+ mtx_assert(&sched_lock, MA_OWNED);
+ TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
+ ku->ku_ksegrp = kg;
+ kg->kg_numupcalls++;
+}
+
+void
+upcall_unlink(struct kse_upcall *ku)
+{
+ struct ksegrp *kg = ku->ku_ksegrp;
+
+ mtx_assert(&sched_lock, MA_OWNED);
+ KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
+ TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
+ kg->kg_numupcalls--;
+ upcall_stash(ku);
+}
+
+void
+upcall_remove(struct thread *td)
+{
+
+ if (td->td_upcall) {
+ td->td_upcall->ku_owner = NULL;
+ upcall_unlink(td->td_upcall);
+ td->td_upcall = 0;
+ }
+}
+
/*
- * for a newly created process,
- * link up a the structure and its initial threads etc.
+ * For a newly created process,
+ * link up all the structures and its initial threads etc.
*/
void
proc_linkup(struct proc *p, struct ksegrp *kg,
- struct kse *ke, struct thread *td)
+ struct kse *ke, struct thread *td)
{
TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
@@ -278,6 +375,11 @@ proc_linkup(struct proc *p, struct ksegrp *kg,
thread_link(td, kg);
}
+/*
+struct kse_thr_interrupt_args {
+ struct kse_thr_mailbox * tmbx;
+};
+*/
int
kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
{
@@ -285,10 +387,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
struct thread *td2;
p = td->td_proc;
- /* KSE-enabled processes only, please. */
- if (!(p->p_flag & P_KSES))
- return (EINVAL);
- if (uap->tmbx == NULL)
+ if (!(p->p_flag & P_KSES) || (uap->tmbx == NULL))
return (EINVAL);
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td2) {
@@ -299,7 +398,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
cv_abort(td2);
else
abortsleep(td2);
- }
+ }
mtx_unlock_spin(&sched_lock);
return (0);
}
@@ -308,6 +407,11 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
return (ESRCH);
}
+/*
+struct kse_exit_args {
+ register_t dummy;
+};
+*/
int
kse_exit(struct thread *td, struct kse_exit_args *uap)
{
@@ -316,11 +420,16 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
struct kse *ke;
p = td->td_proc;
- /* Only UTS can do the syscall */
- if (!(p->p_flag & P_KSES) || (td->td_mailbox != NULL))
+ /*
+ * Only UTS can call the syscall and current group
+ * should be a threaded group.
+ */
+ if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
return (EINVAL);
+ KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
+
kg = td->td_ksegrp;
- /* serialize killing kse */
+ /* Serialize killing KSE */
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if ((kg->kg_kses == 1) && (kg->kg_numthreads > 1)) {
@@ -329,14 +438,17 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
return (EDEADLK);
}
ke = td->td_kse;
+ upcall_remove(td);
if (p->p_numthreads == 1) {
- ke->ke_flags &= ~KEF_DOUPCALL;
- ke->ke_mailbox = NULL;
+ kse_purge(p, td);
p->p_flag &= ~P_KSES;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
- ke->ke_flags |= KEF_EXIT;
+ if (kg->kg_numthreads == 1) { /* Shutdown a group */
+ kse_purge_group(td);
+ ke->ke_flags |= KEF_EXIT;
+ }
thread_exit();
/* NOTREACHED */
}
@@ -345,10 +457,15 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
/*
* Either becomes an upcall or waits for an awakening event and
- * THEN becomes an upcall. Only error cases return.
+ * then becomes an upcall. Only error cases return.
*/
+/*
+struct kse_release_args {
+ register_t dummy;
+};
+*/
int
-kse_release(struct thread * td, struct kse_release_args * uap)
+kse_release(struct thread *td, struct kse_release_args *uap)
{
struct proc *p;
struct ksegrp *kg;
@@ -356,28 +473,25 @@ kse_release(struct thread * td, struct kse_release_args * uap)
p = td->td_proc;
kg = td->td_ksegrp;
/*
- * kse must have a mailbox ready for upcall, and only UTS can
- * do the syscall.
- */
- if (!(p->p_flag & P_KSES) ||
- (td->td_mailbox != NULL) ||
- (td->td_kse->ke_mailbox == NULL))
+ * Only UTS can call the syscall and current group
+ * should be a threaded group.
+ */
+ if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
return (EINVAL);
+ KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
/* Change OURSELF to become an upcall. */
- td->td_flags = TDF_UPCALLING; /* BOUND */
- if (!(td->td_kse->ke_flags & (KEF_DOUPCALL|KEF_ASTPENDING)) &&
+ td->td_flags = TDF_UPCALLING;
+ if ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 &&
(kg->kg_completed == NULL)) {
- /*
- * The KSE will however be lendable.
- */
- TD_SET_IDLE(td);
- PROC_UNLOCK(p);
- p->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ kg->kg_upsleeps++;
mtx_unlock_spin(&sched_lock);
+ msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, "ksepause",
+ NULL);
+ kg->kg_upsleeps--;
+ PROC_UNLOCK(p);
} else {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
@@ -392,61 +506,59 @@ int
kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
{
struct proc *p;
- struct kse *ke;
struct ksegrp *kg;
+ struct kse_upcall *ku;
struct thread *td2;
p = td->td_proc;
td2 = NULL;
+ ku = NULL;
/* KSE-enabled processes only, please. */
if (!(p->p_flag & P_KSES))
- return EINVAL;
+ return (EINVAL);
+ PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if (uap->mbx) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
- FOREACH_KSE_IN_GROUP(kg, ke) {
- if (ke->ke_mailbox != uap->mbx)
- continue;
- td2 = ke->ke_owner;
- KASSERT((td2 != NULL),("KSE with no owner"));
- break;
+ FOREACH_UPCALL_IN_GROUP(kg, ku) {
+ if (ku->ku_mailbox == uap->mbx)
+ break;
}
- if (td2) {
+ if (ku)
break;
- }
}
} else {
- /*
- * look for any idle KSE to resurrect.
- */
kg = td->td_ksegrp;
- FOREACH_KSE_IN_GROUP(kg, ke) {
- td2 = ke->ke_owner;
- KASSERT((td2 != NULL),("KSE with no owner2"));
- if (TD_IS_IDLE(td2))
- break;
+ if (kg->kg_upsleeps) {
+ wakeup_one(&kg->kg_completed);
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ return (0);
}
- KASSERT((td2 != NULL), ("no thread(s)"));
+ ku = TAILQ_FIRST(&kg->kg_upcalls);
}
- if (td2) {
- if (TD_IS_IDLE(td2)) {
- TD_CLR_IDLE(td2);
- setrunnable(td2);
- } else if (td != td2) {
- /* guarantee do an upcall ASAP */
- td2->td_kse->ke_flags |= KEF_DOUPCALL;
+ if (ku) {
+ if ((td2 = ku->ku_owner) == NULL) {
+ panic("%s: no owner", __func__);
+ } else if (TD_ON_SLEEPQ(td2) &&
+ (td2->td_wchan == &kg->kg_completed)) {
+ abortsleep(td2);
+ } else {
+ ku->ku_flags |= KUF_DOUPCALL;
}
mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
return (0);
}
mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
return (ESRCH);
}
/*
* No new KSEG: first call: use current KSE, don't schedule an upcall
- * All other situations, do allocate a new KSE and schedule an upcall on it.
+ * All other situations, do allocate max new KSEs and schedule an upcall.
*/
/* struct kse_create_args {
struct kse_mailbox *mbx;
@@ -456,112 +568,140 @@ int
kse_create(struct thread *td, struct kse_create_args *uap)
{
struct kse *newke;
- struct kse *ke;
struct ksegrp *newkg;
struct ksegrp *kg;
struct proc *p;
struct kse_mailbox mbx;
- int err;
+ struct kse_upcall *newku;
+ int err, ncpus;
p = td->td_proc;
if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
return (err);
- p->p_flag |= P_KSES; /* easier to just set it than to test and set */
+ /* Too bad, why hasn't kernel always a cpu counter !? */
+#ifdef SMP
+ ncpus = mp_ncpus;
+#else
+ ncpus = 1;
+#endif
+ if (thread_debug && virtual_cpu != 0)
+ ncpus = virtual_cpu;
+
+ /* Easier to just set it than to test and set */
+ p->p_flag |= P_KSES;
kg = td->td_ksegrp;
if (uap->newgroup) {
+ /* Have race condition but it is cheap */
if (p->p_numksegrps >= max_groups_per_proc)
return (EPROCLIM);
/*
* If we want a new KSEGRP it doesn't matter whether
* we have already fired up KSE mode before or not.
- * We put the process in KSE mode and create a new KSEGRP
- * and KSE. If our KSE has not got a mailbox yet then
- * that doesn't matter, just leave it that way. It will
- * ensure that this thread stay BOUND. It's possible
- * that the call came form a threaded library and the main
- * program knows nothing of threads.
+ * We put the process in KSE mode and create a new KSEGRP.
*/
newkg = ksegrp_alloc();
bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
- kg_startzero, kg_endzero));
+ kg_startzero, kg_endzero));
bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
- newke = kse_alloc();
+ mtx_lock_spin(&sched_lock);
+ ksegrp_link(newkg, p);
+ if (p->p_numksegrps >= max_groups_per_proc) {
+ ksegrp_unlink(newkg);
+ mtx_unlock_spin(&sched_lock);
+ return (EPROCLIM);
+ }
+ mtx_unlock_spin(&sched_lock);
} else {
- /*
- * Otherwise, if we have already set this KSE
- * to have a mailbox, we want to make another KSE here,
- * but only if there are not already the limit, which
- * is 1 per CPU max.
- *
- * If the current KSE doesn't have a mailbox we just use it
- * and give it one.
- *
- * Because we don't like to access
- * the KSE outside of schedlock if we are UNBOUND,
- * (because it can change if we are preempted by an interrupt)
- * we can deduce it as having a mailbox if we are UNBOUND,
- * and only need to actually look at it if we are BOUND,
- * which is safe.
+ newkg = kg;
+ }
+
+ /*
+ * Creating upcalls more than number of physical cpu does
+ * not help performance.
+ */
+ if (newkg->kg_numupcalls >= ncpus)
+ return (EPROCLIM);
+
+ if (newkg->kg_numupcalls == 0) {
+ /*
+ * Initialize KSE group, optimized for MP.
+ * Create KSEs as many as physical cpus, this increases
+ * concurrent even if userland is not MP safe and can only run
+ * on single CPU (for early version of libpthread, it is true).
+ * In ideal world, every physical cpu should execute a thread.
+ * If there is enough KSEs, threads in kernel can be
+ * executed parallel on different cpus with full speed,
+ * Concurrent in kernel shouldn't be restricted by number of
+ * upcalls userland provides.
+ * Adding more upcall structures only increases concurrent
+ * in userland.
+ * Highest performance configuration is:
+ * N kses = N upcalls = N phyiscal cpus
*/
- if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) {
- if (thread_debug == 0) { /* if debugging, allow more */
-#ifdef SMP
- if (kg->kg_kses > mp_ncpus)
-#endif
- return (EPROCLIM);
- }
+ while (newkg->kg_kses < ncpus) {
newke = kse_alloc();
- } else {
- newke = NULL;
- }
- newkg = NULL;
- }
- if (newke) {
- bzero(&newke->ke_startzero, RANGEOF(struct kse,
- ke_startzero, ke_endzero));
+ bzero(&newke->ke_startzero, RANGEOF(struct kse,
+ ke_startzero, ke_endzero));
#if 0
- bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
- RANGEOF(struct kse, ke_startcopy, ke_endcopy));
+ mtx_lock_spin(&sched_lock);
+ bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
+ RANGEOF(struct kse, ke_startcopy, ke_endcopy));
+ mtx_unlock_spin(&sched_lock);
#endif
- /* For the first call this may not have been set */
- if (td->td_standin == NULL) {
- td->td_standin = thread_alloc();
- }
- mtx_lock_spin(&sched_lock);
- if (newkg) {
- if (p->p_numksegrps >= max_groups_per_proc) {
- mtx_unlock_spin(&sched_lock);
- ksegrp_free(newkg);
- kse_free(newke);
- return (EPROCLIM);
- }
- ksegrp_link(newkg, p);
+ mtx_lock_spin(&sched_lock);
+ kse_link(newke, newkg);
+ if (p->p_sflag & PS_NEEDSIGCHK)
+ newke->ke_flags |= KEF_ASTPENDING;
+ /* Add engine */
+ kse_reassign(newke);
+ mtx_unlock_spin(&sched_lock);
}
- else
- newkg = kg;
- kse_link(newke, newkg);
- if (p->p_sflag & PS_NEEDSIGCHK)
- newke->ke_flags |= KEF_ASTPENDING;
- newke->ke_mailbox = uap->mbx;
- newke->ke_upcall = mbx.km_func;
- bcopy(&mbx.km_stack, &newke->ke_stack, sizeof(stack_t));
- thread_schedule_upcall(td, newke);
+ }
+ newku = upcall_alloc();
+ newku->ku_mailbox = uap->mbx;
+ newku->ku_func = mbx.km_func;
+ bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
+
+ /* For the first call this may not have been set */
+ if (td->td_standin == NULL)
+ thread_alloc_spare(td, NULL);
+
+ mtx_lock_spin(&sched_lock);
+ if (newkg->kg_numupcalls >= ncpus) {
+ upcall_free(newku);
mtx_unlock_spin(&sched_lock);
+ return (EPROCLIM);
+ }
+ upcall_link(newku, newkg);
+
+ /*
+ * Each upcall structure has an owner thread, find which
+ * one owns it.
+ */
+ if (uap->newgroup) {
+ /*
+ * Because new ksegrp hasn't thread,
+ * create an initial upcall thread to own it.
+ */
+ thread_schedule_upcall(td, newku);
} else {
/*
- * If we didn't allocate a new KSE then the we are using
- * the exisiting (BOUND) kse.
+ * If current thread hasn't an upcall structure,
+ * just assign the upcall to it.
*/
- ke = td->td_kse;
- ke->ke_mailbox = uap->mbx;
- ke->ke_upcall = mbx.km_func;
- bcopy(&mbx.km_stack, &ke->ke_stack, sizeof(stack_t));
+ if (td->td_upcall == NULL) {
+ newku->ku_owner = td;
+ td->td_upcall = newku;
+ } else {
+ /*
+ * Create a new upcall thread to own it.
+ */
+ thread_schedule_upcall(td, newku);
+ }
}
- /*
- * Fill out the KSE-mode specific fields of the new kse.
- */
+ mtx_unlock_spin(&sched_lock);
return (0);
}
@@ -642,6 +782,8 @@ threadinit(void)
kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
NULL, NULL, kse_init, NULL,
UMA_ALIGN_CACHE, 0);
+ upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
}
/*
@@ -650,9 +792,9 @@ threadinit(void)
void
thread_stash(struct thread *td)
{
- mtx_lock_spin(&zombie_thread_lock);
+ mtx_lock_spin(&kse_zombie_lock);
TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
- mtx_unlock_spin(&zombie_thread_lock);
+ mtx_unlock_spin(&kse_zombie_lock);
}
/*
@@ -661,9 +803,21 @@ thread_stash(struct thread *td)
void
kse_stash(struct kse *ke)
{
- mtx_lock_spin(&zombie_thread_lock);
+ mtx_lock_spin(&kse_zombie_lock);
TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
- mtx_unlock_spin(&zombie_thread_lock);
+ mtx_unlock_spin(&kse_zombie_lock);
+}
+
+/*
+ * Stash an embarasingly extra upcall into the zombie upcall queue.
+ */
+
+void
+upcall_stash(struct kse_upcall *ku)
+{
+ mtx_lock_spin(&kse_zombie_lock);
+ TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
+ mtx_unlock_spin(&kse_zombie_lock);
}
/*
@@ -672,13 +826,13 @@ kse_stash(struct kse *ke)
void
ksegrp_stash(struct ksegrp *kg)
{
- mtx_lock_spin(&zombie_thread_lock);
+ mtx_lock_spin(&kse_zombie_lock);
TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
- mtx_unlock_spin(&zombie_thread_lock);
+ mtx_unlock_spin(&kse_zombie_lock);
}
/*
- * Reap zombie threads.
+ * Reap zombie kse resource.
*/
void
thread_reap(void)
@@ -686,27 +840,34 @@ thread_reap(void)
struct thread *td_first, *td_next;
struct kse *ke_first, *ke_next;
struct ksegrp *kg_first, * kg_next;
+ struct kse_upcall *ku_first, *ku_next;
/*
- * don't even bother to lock if none at this instant
- * We really don't care about the next instant..
+ * Don't even bother to lock if none at this instant,
+ * we really don't care about the next instant..
*/
if ((!TAILQ_EMPTY(&zombie_threads))
|| (!TAILQ_EMPTY(&zombie_kses))
- || (!TAILQ_EMPTY(&zombie_ksegrps))) {
- mtx_lock_spin(&zombie_thread_lock);
+ || (!TAILQ_EMPTY(&zombie_ksegrps))
+ || (!TAILQ_EMPTY(&zombie_upcalls))) {
+ mtx_lock_spin(&kse_zombie_lock);
td_first = TAILQ_FIRST(&zombie_threads);
ke_first = TAILQ_FIRST(&zombie_kses);
kg_first = TAILQ_FIRST(&zombie_ksegrps);
+ ku_first = TAILQ_FIRST(&zombie_upcalls);
if (td_first)
TAILQ_INIT(&zombie_threads);
if (ke_first)
TAILQ_INIT(&zombie_kses);
if (kg_first)
TAILQ_INIT(&zombie_ksegrps);
- mtx_unlock_spin(&zombie_thread_lock);
+ if (ku_first)
+ TAILQ_INIT(&zombie_upcalls);
+ mtx_unlock_spin(&kse_zombie_lock);
while (td_first) {
td_next = TAILQ_NEXT(td_first, td_runq);
+ if (td_first->td_ucred)
+ crfree(td_first->td_ucred);
thread_free(td_first);
td_first = td_next;
}
@@ -720,6 +881,11 @@ thread_reap(void)
ksegrp_free(kg_first);
kg_first = kg_next;
}
+ while (ku_first) {
+ ku_next = TAILQ_NEXT(ku_first, ku_link);
+ upcall_free(ku_first);
+ ku_first = ku_next;
+ }
}
}
@@ -792,20 +958,14 @@ thread_export_context(struct thread *td)
struct ksegrp *kg;
uintptr_t mbx;
void *addr;
- int error;
+ int error,temp;
ucontext_t uc;
- uint temp;
p = td->td_proc;
kg = td->td_ksegrp;
/* Export the user/machine context. */
-#if 0
- addr = (caddr_t)td->td_mailbox +
- offsetof(struct kse_thr_mailbox, tm_context);
-#else /* if user pointer arithmetic is valid in the kernel */
- addr = (void *)(&td->td_mailbox->tm_context);
-#endif
+ addr = (void *)(&td->td_mailbox->tm_context);
error = copyin(addr, &uc, sizeof(ucontext_t));
if (error)
goto bad;
@@ -815,13 +975,14 @@ thread_export_context(struct thread *td)
if (error)
goto bad;
- /* get address in latest mbox of list pointer */
-#if 0
- addr = (caddr_t)td->td_mailbox
- + offsetof(struct kse_thr_mailbox , tm_next);
-#else /* if user pointer arithmetic is valid in the kernel */
+ /* Exports clock ticks in kernel mode */
+ addr = (caddr_t)(&td->td_mailbox->tm_sticks);
+ temp = fuword(addr) + td->td_usticks;
+ if (suword(addr, temp))
+ goto bad;
+
+ /* Get address in latest mbox of list pointer */
addr = (void *)(&td->td_mailbox->tm_next);
-#endif
/*
* Put the saved address of the previous first
* entry into this one
@@ -835,42 +996,43 @@ thread_export_context(struct thread *td)
PROC_LOCK(p);
if (mbx == (uintptr_t)kg->kg_completed) {
kg->kg_completed = td->td_mailbox;
+ /*
+ * The thread context may be taken away by
+ * other upcall threads when we unlock
+ * process lock. it's no longer valid to
+ * use it again in any other places.
+ */
+ td->td_mailbox = NULL;
PROC_UNLOCK(p);
break;
}
PROC_UNLOCK(p);
}
- addr = (caddr_t)td->td_mailbox
- + offsetof(struct kse_thr_mailbox, tm_sticks);
- temp = fuword(addr) + td->td_usticks;
- if (suword(addr, temp))
- goto bad;
+ td->td_usticks = 0;
return (0);
bad:
PROC_LOCK(p);
psignal(p, SIGSEGV);
PROC_UNLOCK(p);
+ /* The mailbox is bad, don't use it */
+ td->td_mailbox = NULL;
+ td->td_usticks = 0;
return (error);
}
/*
* Take the list of completed mailboxes for this KSEGRP and put them on this
- * KSE's mailbox as it's the next one going up.
+ * upcall's mailbox as it's the next one going up.
*/
static int
-thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
+thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
{
struct proc *p = kg->kg_proc;
void *addr;
uintptr_t mbx;
-#if 0
- addr = (caddr_t)ke->ke_mailbox
- + offsetof(struct kse_mailbox, km_completed);
-#else /* if user pointer arithmetic is valid in the kernel */
- addr = (void *)(&ke->ke_mailbox->km_completed);
-#endif
+ addr = (void *)(&ku->ku_mailbox->km_completed);
for (;;) {
mbx = (uintptr_t)kg->kg_completed;
if (suword(addr, mbx)) {
@@ -895,69 +1057,91 @@ thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
* This function should be called at statclock interrupt time
*/
int
-thread_add_ticks_intr(int user, uint ticks)
+thread_statclock(int user)
{
struct thread *td = curthread;
- struct kse *ke = td->td_kse;
- if (ke->ke_mailbox == NULL)
- return -1;
+ if (td->td_ksegrp->kg_numupcalls == 0)
+ return (-1);
if (user) {
/* Current always do via ast() */
- ke->ke_flags |= KEF_ASTPENDING;
- ke->ke_uuticks += ticks;
+ td->td_flags |= (TDF_ASTPENDING|TDF_USTATCLOCK);
+ td->td_uuticks += ticks;
} else {
if (td->td_mailbox != NULL)
td->td_usticks += ticks;
- else
- ke->ke_usticks += ticks;
+ else {
+ /* XXXKSE
+ * We will call thread_user_enter() for every
+ * kernel entry in future, so if the thread mailbox
+ * is NULL, it must be a UTS kernel, don't account
+ * clock ticks for it.
+ */
+ }
}
- return 0;
+ return (0);
}
+/*
+ * Export user mode state clock ticks
+ */
static int
-thread_update_uticks(void)
+thread_update_usr_ticks(struct thread *td)
{
- struct thread *td = curthread;
struct proc *p = td->td_proc;
- struct kse *ke = td->td_kse;
struct kse_thr_mailbox *tmbx;
+ struct kse_upcall *ku;
caddr_t addr;
- uint uticks, sticks;
-
- if (ke->ke_mailbox == NULL)
- return 0;
+ uint uticks;
- uticks = ke->ke_uuticks;
- ke->ke_uuticks = 0;
- sticks = ke->ke_usticks;
- ke->ke_usticks = 0;
-#if 0
- tmbx = (void *)fuword((caddr_t)ke->ke_mailbox
- + offsetof(struct kse_mailbox, km_curthread));
-#else /* if user pointer arithmetic is ok in the kernel */
- tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
-#endif
+ if ((ku = td->td_upcall) == NULL)
+ return (-1);
+
+ tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
if ((tmbx == NULL) || (tmbx == (void *)-1))
- return 0;
+ return (-1);
+ uticks = td->td_uuticks;
+ td->td_uuticks = 0;
if (uticks) {
- addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_uticks);
+ addr = (caddr_t)&tmbx->tm_uticks;
uticks += fuword(addr);
- if (suword(addr, uticks))
- goto bad;
+ if (suword(addr, uticks)) {
+ PROC_LOCK(p);
+ psignal(p, SIGSEGV);
+ PROC_UNLOCK(p);
+ return (-2);
+ }
}
- if (sticks) {
- addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_sticks);
- sticks += fuword(addr);
- if (suword(addr, sticks))
- goto bad;
+ return (0);
+}
+
+/*
+ * Export kernel mode state clock ticks
+ */
+
+static int
+thread_update_sys_ticks(struct thread *td)
+{
+ struct proc *p = td->td_proc;
+ caddr_t addr;
+ int sticks;
+
+ if (td->td_mailbox == NULL)
+ return (-1);
+ if (td->td_usticks == 0)
+ return (0);
+ addr = (caddr_t)&td->td_mailbox->tm_sticks;
+ sticks = fuword(addr);
+ /* XXXKSE use XCHG instead */
+ sticks += td->td_usticks;
+ td->td_usticks = 0;
+ if (suword(addr, sticks)) {
+ PROC_LOCK(p);
+ psignal(p, SIGSEGV);
+ PROC_UNLOCK(p);
+ return (-2);
}
- return 0;
-bad:
- PROC_LOCK(p);
- psignal(p, SIGSEGV);
- PROC_UNLOCK(p);
- return -1;
+ return (0);
}
/*
@@ -1013,6 +1197,7 @@ thread_exit(void)
p->p_numthreads--;
TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
kg->kg_numthreads--;
+
/*
* The test below is NOT true if we are the
* sole exiting thread. P_STOPPED_SNGL is unset
@@ -1024,25 +1209,28 @@ thread_exit(void)
}
}
- /* Reassign this thread's KSE. */
+ /*
+ * Because each upcall structure has an owner thread,
+ * owner thread exits only when process is in exiting
+ * state, so upcall to userland is no longer needed,
+ * deleting upcall structure is safe here.
+ * So when all threads in a group is exited, all upcalls
+ * in the group should be automatically freed.
+ */
+ if (td->td_upcall)
+ upcall_remove(td);
+
ke->ke_state = KES_UNQUEUED;
-
+ ke->ke_thread = NULL;
/*
* Decide what to do with the KSE attached to this thread.
- * XXX Possibly kse_reassign should do both cases as it already
- * does some of this.
*/
- if (ke->ke_flags & KEF_EXIT) {
- KASSERT((ke->ke_owner == td),
- ("thread_exit: KSE exiting with non-owner thread"));
- ke->ke_thread = NULL;
- td->td_kse = NULL;
+ if (ke->ke_flags & KEF_EXIT)
kse_unlink(ke);
- } else {
- TD_SET_EXITING(td); /* definitly not runnable */
+ else
kse_reassign(ke);
- }
PROC_UNLOCK(p);
+ td->td_kse = NULL;
td->td_state = TDS_INACTIVE;
td->td_proc = NULL;
td->td_ksegrp = NULL;
@@ -1090,10 +1278,12 @@ thread_link(struct thread *td, struct ksegrp *kg)
struct proc *p;
p = kg->kg_proc;
- td->td_state = TDS_INACTIVE;
- td->td_proc = p;
- td->td_ksegrp = kg;
- td->td_last_kse = NULL;
+ td->td_state = TDS_INACTIVE;
+ td->td_proc = p;
+ td->td_ksegrp = kg;
+ td->td_last_kse = NULL;
+ td->td_flags = 0;
+ td->td_kse = NULL;
LIST_INIT(&td->td_contested);
callout_init(&td->td_slpcallout, 1);
@@ -1101,116 +1291,139 @@ thread_link(struct thread *td, struct ksegrp *kg)
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
p->p_numthreads++;
kg->kg_numthreads++;
- td->td_kse = NULL;
}
+/*
+ * Purge a ksegrp resource. When a ksegrp is preparing to
+ * exit, it calls this function.
+ */
+void
+kse_purge_group(struct thread *td)
+{
+ struct ksegrp *kg;
+ struct kse *ke;
+
+ kg = td->td_ksegrp;
+ KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
+ while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
+ KASSERT(ke->ke_state == KES_IDLE,
+ ("%s: wrong idle KSE state", __func__));
+ kse_unlink(ke);
+ }
+ KASSERT((kg->kg_kses == 1),
+ ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
+ KASSERT((kg->kg_numupcalls == 0),
+ ("%s: ksegrp still has %d upcall datas",
+ __func__, kg->kg_numupcalls));
+}
+
+/*
+ * Purge a process's KSE resource. When a process is preparing to
+ * exit, it calls kse_purge to release any extra KSE resources in
+ * the process.
+ */
void
kse_purge(struct proc *p, struct thread *td)
{
- /* XXXKSE think about this..
- may need to wake up threads on loan queue. */
struct ksegrp *kg;
+ struct kse *ke;
KASSERT(p->p_numthreads == 1, ("bad thread number"));
mtx_lock_spin(&sched_lock);
while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
p->p_numksegrps--;
+ /*
+ * There is no ownership for KSE, after all threads
+ * in the group exited, it is possible that some KSEs
+ * were left in idle queue, gc them now.
+ */
+ while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
+ KASSERT(ke->ke_state == KES_IDLE,
+ ("%s: wrong idle KSE state", __func__));
+ TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
+ kg->kg_idle_kses--;
+ TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
+ kg->kg_kses--;
+ kse_stash(ke);
+ }
KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
- ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
- ("wrong kg_kses"));
- if (kg != td->td_ksegrp) {
+ ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
+ ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
+ KASSERT((kg->kg_numupcalls == 0),
+ ("%s: ksegrp still has %d upcall datas",
+ __func__, kg->kg_numupcalls));
+
+ if (kg != td->td_ksegrp)
ksegrp_stash(kg);
- }
}
TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
p->p_numksegrps++;
mtx_unlock_spin(&sched_lock);
}
+/*
+ * This function is intended to be used to initialize a spare thread
+ * for upcall. Initialize thread's large data area outside sched_lock
+ * for thread_schedule_upcall().
+ */
+void
+thread_alloc_spare(struct thread *td, struct thread *spare)
+{
+ if (td->td_standin)
+ return;
+ if (spare == NULL)
+ spare = thread_alloc();
+ td->td_standin = spare;
+ bzero(&spare->td_startzero,
+ (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
+ spare->td_proc = td->td_proc;
+ /* Setup PCB and fork address */
+ cpu_set_upcall(spare, td->td_pcb);
+ /*
+ * XXXKSE do we really need this? (default values for the
+ * frame).
+ */
+ bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe));
+ spare->td_ucred = crhold(td->td_ucred);
+}
/*
* Create a thread and schedule it for upcall on the KSE given.
* Use our thread's standin so that we don't have to allocate one.
*/
struct thread *
-thread_schedule_upcall(struct thread *td, struct kse *ke)
+thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
{
struct thread *td2;
- int newkse;
mtx_assert(&sched_lock, MA_OWNED);
- newkse = (ke != td->td_kse);
/*
- * If the owner and kse are BOUND then that thread is planning to
- * go to userland and upcalls are not expected. So don't make one.
- * If it is not bound then make it so with the spare thread
- * anf then borrw back the KSE to allow us to complete some in-kernel
- * work. When we complete, the Bound thread will have the chance to
- * complete. This thread will sleep as planned. Hopefully there will
- * eventually be un unbound thread that can be converted to an
- * upcall to report the completion of this thread.
+ * Schedule an upcall thread on specified kse_upcall,
+ * the kse_upcall must be free.
+ * td must have a spare thread.
*/
-
+ KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
if ((td2 = td->td_standin) != NULL) {
td->td_standin = NULL;
} else {
- if (newkse)
- panic("no reserve thread when called with a new kse");
- /*
- * If called from (e.g.) sleep and we do not have
- * a reserve thread, then we've used it, so do not
- * create an upcall.
- */
+ panic("no reserve thread when scheduling an upcall");
return (NULL);
}
CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
td2, td->td_proc->p_pid, td->td_proc->p_comm);
- bzero(&td2->td_startzero,
- (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
bcopy(&td->td_startcopy, &td2->td_startcopy,
(unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
- thread_link(td2, ke->ke_ksegrp);
- cpu_set_upcall(td2, td->td_pcb);
-
- /*
- * XXXKSE do we really need this? (default values for the
- * frame).
- */
- bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
-
- /*
- * Bind the new thread to the KSE,
- * and if it's our KSE, lend it back to ourself
- * so we can continue running.
- */
- td2->td_ucred = crhold(td->td_ucred);
- td2->td_flags = TDF_UPCALLING; /* note: BOUND */
- td2->td_kse = ke;
- td2->td_state = TDS_CAN_RUN;
+ thread_link(td2, ku->ku_ksegrp);
+ /* Let the new thread become owner of the upcall */
+ ku->ku_owner = td2;
+ td2->td_upcall = ku;
+ td2->td_flags = TDF_UPCALLING;
+ td2->td_kse = NULL;
+ td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
- ke->ke_owner = td2;
- /*
- * If called from kse_reassign(), we are working on the current
- * KSE so fake that we borrowed it. If called from
- * kse_create(), don't, as we have a new kse too.
- */
- if (!newkse) {
- /*
- * This thread will be scheduled when the current thread
- * blocks, exits or tries to enter userspace, (which ever
- * happens first). When that happens the KSe will "revert"
- * to this thread in a BOUND manner. Since we are called
- * from msleep() this is going to be "very soon" in nearly
- * all cases.
- */
- TD_SET_LOAN(td2);
- } else {
- ke->ke_thread = td2;
- ke->ke_state = KES_THREAD;
- setrunqueue(td2);
- }
+ setrunqueue(td2);
return (td2); /* bogus.. should be a void function */
}
@@ -1222,14 +1435,16 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
struct thread *
signal_upcall(struct proc *p, int sig)
{
+#if 0
struct thread *td, *td2;
struct kse *ke;
sigset_t ss;
int error;
+#endif
PROC_LOCK_ASSERT(p, MA_OWNED);
return (NULL);
-
+#if 0
td = FIRST_THREAD_IN_PROC(p);
ke = td->td_kse;
PROC_UNLOCK(p);
@@ -1244,28 +1459,31 @@ return (NULL);
if (error)
return (NULL);
if (td->td_standin == NULL)
- td->td_standin = thread_alloc();
+ thread_alloc_spare(td, NULL);
mtx_lock_spin(&sched_lock);
td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */
mtx_unlock_spin(&sched_lock);
return (td2);
+#endif
}
/*
- * setup done on the thread when it enters the kernel.
+ * Setup done on the thread when it enters the kernel.
* XXXKSE Presently only for syscalls but eventually all kernel entries.
*/
void
thread_user_enter(struct proc *p, struct thread *td)
{
- struct kse *ke;
+ struct ksegrp *kg;
+ struct kse_upcall *ku;
+ kg = td->td_ksegrp;
/*
* First check that we shouldn't just abort.
* But check if we are the single thread first!
* XXX p_singlethread not locked, but should be safe.
*/
- if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) {
+ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
thread_exit();
@@ -1278,43 +1496,37 @@ thread_user_enter(struct proc *p, struct thread *td)
* possibility that we could do this lazily (in kse_reassign()),
* but for now do it every time.
*/
- ke = td->td_kse;
- td->td_flags &= ~TDF_UNBOUND;
- if (ke->ke_mailbox != NULL) {
-#if 0
- td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
- + offsetof(struct kse_mailbox, km_curthread));
-#else /* if user pointer arithmetic is ok in the kernel */
+ kg = td->td_ksegrp;
+ if (kg->kg_numupcalls) {
+ ku = td->td_upcall;
+ KASSERT(ku, ("%s: no upcall owned", __func__));
+ KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
td->td_mailbox =
- (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
-#endif
+ (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
if ((td->td_mailbox == NULL) ||
(td->td_mailbox == (void *)-1)) {
- td->td_mailbox = NULL; /* single thread it.. */
+ /* Don't schedule upcall when blocked */
+ td->td_mailbox = NULL;
mtx_lock_spin(&sched_lock);
- td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
+ td->td_flags &= ~TDF_CAN_UNBIND;
mtx_unlock_spin(&sched_lock);
} else {
- /*
- * when thread limit reached, act like that the thread
- * has already done an upcall.
- */
if (p->p_numthreads > max_threads_per_proc) {
- if (td->td_standin != NULL) {
- thread_stash(td->td_standin);
- td->td_standin = NULL;
- }
+ /*
+ * Since kernel thread limit reached,
+ * don't schedule upcall anymore.
+ * XXXKSE These code in fact needn't.
+ */
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_CAN_UNBIND;
+ mtx_unlock_spin(&sched_lock);
} else {
if (td->td_standin == NULL)
- td->td_standin = thread_alloc();
+ thread_alloc_spare(td, NULL);
+ mtx_lock_spin(&sched_lock);
+ td->td_flags |= TDF_CAN_UNBIND;
+ mtx_unlock_spin(&sched_lock);
}
- mtx_lock_spin(&sched_lock);
- td->td_flags |= TDF_CAN_UNBIND;
- mtx_unlock_spin(&sched_lock);
- KASSERT((ke->ke_owner == td),
- ("thread_user_enter: No starting owner "));
- ke->ke_owner = td;
- td->td_usticks = 0;
}
}
}
@@ -1335,165 +1547,90 @@ int
thread_userret(struct thread *td, struct trapframe *frame)
{
int error;
- int unbound;
- struct kse *ke;
+ struct kse_upcall *ku;
struct ksegrp *kg;
- struct thread *worktodo;
struct proc *p;
struct timespec ts;
- KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner),
- ("thread_userret: bad thread/kse pointers"));
- KASSERT((td == curthread),
- ("thread_userret: bad thread argument"));
-
-
- kg = td->td_ksegrp;
p = td->td_proc;
- error = 0;
- unbound = TD_IS_UNBOUND(td);
+ kg = td->td_ksegrp;
- mtx_lock_spin(&sched_lock);
- if ((worktodo = kg->kg_last_assigned))
- worktodo = TAILQ_NEXT(worktodo, td_runq);
- else
- worktodo = TAILQ_FIRST(&kg->kg_runq);
+ /* Nothing to do with non-threaded group/process */
+ if (td->td_ksegrp->kg_numupcalls == 0)
+ return (0);
/*
- * Permanently bound threads never upcall but they may
- * loan out their KSE at this point.
- * Upcalls imply bound.. They also may want to do some Philantropy.
- * Temporarily bound threads on the other hand either yield
- * to other work and transform into an upcall, or proceed back to
- * userland.
+ * State clock interrupt hit in userland, it
+ * is returning from interrupt, charge thread's
+ * userland time for UTS.
*/
+ if (td->td_flags & TDF_USTATCLOCK) {
+ thread_update_usr_ticks(td);
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_USTATCLOCK;
+ mtx_unlock_spin(&sched_lock);
+ }
+ /*
+ * Optimisation:
+ * This thread has not started any upcall.
+ * If there is no work to report other than ourself,
+ * then it can return direct to userland.
+ */
if (TD_CAN_UNBIND(td)) {
- td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
- if (!worktodo && (kg->kg_completed == NULL) &&
- !(td->td_kse->ke_flags & KEF_DOUPCALL)) {
- /*
- * This thread has not started any upcall.
- * If there is no work to report other than
- * ourself, then it can return direct to userland.
- */
-justreturn:
- mtx_unlock_spin(&sched_lock);
- thread_update_uticks();
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_CAN_UNBIND;
+ mtx_unlock_spin(&sched_lock);
+ if ((kg->kg_completed == NULL) &&
+ (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) {
+ thread_update_sys_ticks(td);
td->td_mailbox = NULL;
return (0);
}
- mtx_unlock_spin(&sched_lock);
error = thread_export_context(td);
- td->td_usticks = 0;
if (error) {
/*
- * As we are not running on a borrowed KSE,
- * failing to do the KSE operation just defaults
+ * Failing to do the KSE operation just defaults
* back to synchonous operation, so just return from
* the syscall.
*/
- goto justreturn;
+ return (0);
}
- mtx_lock_spin(&sched_lock);
/*
- * Turn ourself into a bound upcall.
- * We will rely on kse_reassign()
- * to make us run at a later time.
+ * There is something to report, and we own an upcall
+ * strucuture, we can go to userland.
+ * Turn ourself into an upcall thread.
*/
+ mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_UPCALLING;
-
- /* there may be more work since we re-locked schedlock */
- if ((worktodo = kg->kg_last_assigned))
- worktodo = TAILQ_NEXT(worktodo, td_runq);
- else
- worktodo = TAILQ_FIRST(&kg->kg_runq);
- } else if (unbound) {
- /*
- * We are an unbound thread, looking to
- * return to user space. There must be another owner
- * of this KSE.
- * We are using a borrowed KSE. save state and exit.
- * kse_reassign() will recycle the kse as needed,
- */
mtx_unlock_spin(&sched_lock);
+ } else if (td->td_mailbox) {
error = thread_export_context(td);
- td->td_usticks = 0;
if (error) {
- /*
- * There is nothing we can do.
- * We just lose that context. We
- * probably should note this somewhere and send
- * the process a signal.
- */
PROC_LOCK(td->td_proc);
- psignal(td->td_proc, SIGSEGV);
mtx_lock_spin(&sched_lock);
- ke = td->td_kse;
/* possibly upcall with error? */
} else {
+ PROC_LOCK(td->td_proc);
+ mtx_lock_spin(&sched_lock);
/*
- * Don't make an upcall, just exit so that the owner
- * can get its KSE if it wants it.
- * Our context is already safely stored for later
- * use by the UTS.
+ * There are upcall threads waiting for
+ * work to do, wake one of them up.
+ * XXXKSE Maybe wake all of them up.
*/
- PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
- ke = td->td_kse;
- }
- /*
- * If the owner is idling, we now have something for it
- * to report, so make it runnable.
- * If the owner is not an upcall, make an attempt to
- * ensure that at least one of any IDLED upcalls can
- * wake up.
- */
- if (ke->ke_owner->td_flags & TDF_UPCALLING) {
- TD_CLR_IDLE(ke->ke_owner);
- } else {
- FOREACH_KSE_IN_GROUP(kg, ke) {
- if (TD_IS_IDLE(ke->ke_owner)) {
- TD_CLR_IDLE(ke->ke_owner);
- setrunnable(ke->ke_owner);
- break;
- }
- }
+ if (kg->kg_upsleeps)
+ wakeup_one(&kg->kg_completed);
}
thread_exit();
+ /* NOTREACHED */
}
- /*
- * We ARE going back to userland with this KSE.
- * We are permanently bound. We may be an upcall.
- * If an upcall, check for threads that need to borrow the KSE.
- * Any other thread that comes ready after this missed the boat.
- */
- ke = td->td_kse;
- /*
- * If not upcalling, go back to userspace.
- * If we are, get the upcall set up.
- */
if (td->td_flags & TDF_UPCALLING) {
- if (worktodo) {
- /*
- * force a switch to more urgent 'in kernel'
- * work. Control will return to this thread
- * when there is no more work to do.
- * kse_reassign() will do that for us.
- */
- TD_SET_LOAN(td);
- p->p_stats->p_ru.ru_nvcsw++;
- mi_switch(); /* kse_reassign() will (re)find worktodo */
- }
- td->td_flags &= ~TDF_UPCALLING;
- if (ke->ke_flags & KEF_DOUPCALL)
- ke->ke_flags &= ~KEF_DOUPCALL;
- mtx_unlock_spin(&sched_lock);
-
+ KASSERT(TD_CAN_UNBIND(td) == 0, ("upcall thread can unbind"));
+ ku = td->td_upcall;
/*
* There is no more work to do and we are going to ride
- * this thread/KSE up to userland as an upcall.
+ * this thread up to userland as an upcall.
* Do the last parts of the setup needed for the upcall.
*/
CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
@@ -1504,16 +1641,27 @@ justreturn:
* Will use Giant in cpu_thread_clean() because it uses
* kmem_free(kernel_map, ...)
*/
- cpu_set_upcall_kse(td, ke);
+ cpu_set_upcall_kse(td, ku);
- /*
+ /*
+ * Clear TDF_UPCALLING after set upcall context,
+ * profiling code looks TDF_UPCALLING to avoid account
+ * a wrong user %EIP
+ */
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_UPCALLING;
+ if (ku->ku_flags & KUF_DOUPCALL)
+ ku->ku_flags &= ~KUF_DOUPCALL;
+ mtx_unlock_spin(&sched_lock);
+
+ /*
* Unhook the list of completed threads.
* anything that completes after this gets to
* come in next time.
* Put the list of completed thread mailboxes on
* this KSE's mailbox.
*/
- error = thread_link_mboxes(kg, ke);
+ error = thread_link_mboxes(kg, ku);
if (error)
goto bad;
@@ -1524,34 +1672,33 @@ justreturn:
* it would be nice if this all happenned only on the first
* time through. (the scan for extra work etc.)
*/
-#if 0
- error = suword((caddr_t)ke->ke_mailbox +
- offsetof(struct kse_mailbox, km_curthread), 0);
-#else /* if user pointer arithmetic is ok in the kernel */
- error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
-#endif
- ke->ke_uuticks = ke->ke_usticks = 0;
+ error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0);
if (error)
goto bad;
+
+ /* Export current system time */
nanotime(&ts);
if (copyout(&ts,
- (caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) {
+ (caddr_t)&ku->ku_mailbox->km_timeofday, sizeof(ts))) {
goto bad;
}
- } else {
- mtx_unlock_spin(&sched_lock);
}
/*
* Optimisation:
* Ensure that we have a spare thread available,
* for when we re-enter the kernel.
*/
- if (td->td_standin == NULL) {
- td->td_standin = thread_alloc();
- }
+ if (td->td_standin == NULL)
+ thread_alloc_spare(td, NULL);
- thread_update_uticks();
+ /*
+ * Clear thread mailbox first, then clear system tick count.
+ * The order is important because thread_statclock() use
+ * mailbox pointer to see if it is an userland thread or
+ * an UTS kernel thread.
+ */
td->td_mailbox = NULL;
+ td->td_usticks = 0;
return (0);
bad:
@@ -1563,6 +1710,7 @@ bad:
psignal(td->td_proc, SIGSEGV);
PROC_UNLOCK(td->td_proc);
td->td_mailbox = NULL;
+ td->td_usticks = 0;
return (error); /* go sync */
}
@@ -1601,7 +1749,6 @@ thread_single(int force_exit)
if (force_exit == SINGLE_EXIT) {
p->p_flag |= P_SINGLE_EXIT;
- td->td_flags &= ~TDF_UNBOUND;
} else
p->p_flag &= ~P_SINGLE_EXIT;
p->p_flag |= P_STOPPED_SINGLE;
@@ -1624,17 +1771,16 @@ thread_single(int force_exit)
else
abortsleep(td2);
}
- if (TD_IS_IDLE(td2)) {
- TD_CLR_IDLE(td2);
- }
} else {
if (TD_IS_SUSPENDED(td2))
continue;
- /* maybe other inhibitted states too? */
+ /*
+ * maybe other inhibitted states too?
+ * XXXKSE Is it totally safe to
+ * suspend a non-interruptable thread?
+ */
if (td2->td_inhibitors &
- (TDI_SLEEPING | TDI_SWAPPED |
- TDI_LOAN | TDI_IDLE |
- TDI_EXITING))
+ (TDI_SLEEPING | TDI_SWAPPED))
thread_suspend_one(td2);
}
}
@@ -1660,8 +1806,14 @@ thread_single(int force_exit)
mtx_lock(&Giant);
PROC_LOCK(p);
}
- if (force_exit == SINGLE_EXIT)
+ if (force_exit == SINGLE_EXIT) {
+ if (td->td_upcall) {
+ mtx_lock_spin(&sched_lock);
+ upcall_remove(td);
+ mtx_unlock_spin(&sched_lock);
+ }
kse_purge(p, td);
+ }
return (0);
}
@@ -1703,7 +1855,6 @@ thread_suspend_check(int return_instead)
{
struct thread *td;
struct proc *p;
- struct kse *ke;
struct ksegrp *kg;
td = curthread;
@@ -1735,16 +1886,6 @@ thread_suspend_check(int return_instead)
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock(&Giant);
- /*
- * All threads should be exiting
- * Unless they are the active "singlethread".
- * destroy un-needed KSEs as we go..
- * KSEGRPS may implode too as #kses -> 0.
- */
- ke = td->td_kse;
- if (ke->ke_owner == td &&
- (kg->kg_kses >= kg->kg_numthreads ))
- ke->ke_flags |= KEF_EXIT;
thread_exit();
}
@@ -1752,14 +1893,6 @@ thread_suspend_check(int return_instead)
* When a thread suspends, it just
* moves to the processes's suspend queue
* and stays there.
- *
- * XXXKSE if TDF_BOUND is true
- * it will not release it's KSE which might
- * lead to deadlock if there are not enough KSEs
- * to complete all waiting threads.
- * Maybe be able to 'lend' it out again.
- * (lent kse's can not go back to userland?)
- * and can only be lent in STOPPED state.
*/
mtx_lock_spin(&sched_lock);
if ((p->p_flag & P_STOPPED_SIG) &&
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index d96547b..9000bc9 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -219,7 +219,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
#endif
{
int error;
- pid_t pid;
+ struct thread *thr;
int extflags, lockflags;
CTR5(KTR_LOCKMGR,
@@ -228,9 +228,9 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
error = 0;
if (td == NULL)
- pid = LK_KERNPROC;
+ thr = LK_KERNPROC;
else
- pid = td->td_proc->p_pid;
+ thr = td;
mtx_lock(lkp->lk_interlock);
if (flags & LK_INTERLOCK) {
@@ -257,7 +257,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
* lock requests or upgrade requests ( but not the exclusive
* lock itself ).
*/
- if (lkp->lk_lockholder != pid) {
+ if (lkp->lk_lockholder != thr) {
lockflags = LK_HAVE_EXCL;
mtx_lock_spin(&sched_lock);
if (td != NULL && !(td->td_flags & TDF_DEADLKTREAT))
@@ -268,7 +268,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
break;
sharelock(lkp, 1);
#if defined(DEBUG_LOCKS)
- lkp->lk_slockholder = pid;
+ lkp->lk_slockholder = thr;
lkp->lk_sfilename = file;
lkp->lk_slineno = line;
lkp->lk_slockername = name;
@@ -283,14 +283,14 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
/* FALLTHROUGH downgrade */
case LK_DOWNGRADE:
- KASSERT(lkp->lk_lockholder == pid && lkp->lk_exclusivecount != 0,
+ KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
("lockmgr: not holding exclusive lock "
- "(owner pid (%d) != pid (%d), exlcnt (%d) != 0",
- lkp->lk_lockholder, pid, lkp->lk_exclusivecount));
+ "(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
+ lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
sharelock(lkp, lkp->lk_exclusivecount);
lkp->lk_exclusivecount = 0;
lkp->lk_flags &= ~LK_HAVE_EXCL;
- lkp->lk_lockholder = LK_NOPROC;
+ lkp->lk_lockholder = (struct thread *)LK_NOPROC;
if (lkp->lk_waitcount)
wakeup((void *)lkp);
break;
@@ -317,7 +317,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
* after the upgrade). If we return an error, the file
* will always be unlocked.
*/
- if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
+ if ((lkp->lk_lockholder == thr) || (lkp->lk_sharecount <= 0))
panic("lockmgr: upgrade exclusive lock");
shareunlock(lkp, 1);
/*
@@ -342,7 +342,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
if (error)
break;
lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = pid;
+ lkp->lk_lockholder = thr;
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
@@ -364,7 +364,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
/* FALLTHROUGH exclusive request */
case LK_EXCLUSIVE:
- if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
+ if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
/*
* Recursive lock.
*/
@@ -398,7 +398,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
if (error)
break;
lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = pid;
+ lkp->lk_lockholder = thr;
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
@@ -411,10 +411,10 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
case LK_RELEASE:
if (lkp->lk_exclusivecount != 0) {
- if (lkp->lk_lockholder != pid &&
+ if (lkp->lk_lockholder != thr &&
lkp->lk_lockholder != LK_KERNPROC) {
- panic("lockmgr: pid %d, not %s %d unlocking",
- pid, "exclusive lock holder",
+ panic("lockmgr: thread %p, not %s %p unlocking",
+ thr, "exclusive lock holder",
lkp->lk_lockholder);
}
if (lkp->lk_exclusivecount == 1) {
@@ -437,14 +437,14 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
* check for holding a shared lock, but at least we can
* check for an exclusive one.
*/
- if (lkp->lk_lockholder == pid)
+ if (lkp->lk_lockholder == thr)
panic("lockmgr: draining against myself");
error = acquiredrain(lkp, extflags);
if (error)
break;
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
- lkp->lk_lockholder = pid;
+ lkp->lk_lockholder = thr;
lkp->lk_exclusivecount = 1;
#if defined(DEBUG_LOCKS)
lkp->lk_filename = file;
@@ -589,7 +589,7 @@ lockstatus(lkp, td)
mtx_lock(lkp->lk_interlock);
if (lkp->lk_exclusivecount != 0) {
- if (td == NULL || lkp->lk_lockholder == td->td_proc->p_pid)
+ if (td == NULL || lkp->lk_lockholder == td)
lock_type = LK_EXCLUSIVE;
else
lock_type = LK_EXCLOTHER;
@@ -627,7 +627,7 @@ lockmgr_printinfo(lkp)
printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
lkp->lk_sharecount);
else if (lkp->lk_flags & LK_HAVE_EXCL)
- printf(" lock type %s: EXCL (count %d) by pid %d",
+ printf(" lock type %s: EXCL (count %d) by thread %p",
lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
if (lkp->lk_waitcount > 0)
printf(" with %d pending", lkp->lk_waitcount);
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 4ade890..cc7c493 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -671,32 +671,23 @@ calcru(p, up, sp, ip)
{
/* {user, system, interrupt, total} {ticks, usec}; previous tu: */
u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
- u_int64_t uut = 0, sut = 0, iut = 0;
- int s;
struct timeval tv;
struct bintime bt;
- struct kse *ke;
- struct ksegrp *kg;
mtx_assert(&sched_lock, MA_OWNED);
/* XXX: why spl-protect ? worst case is an off-by-one report */
- FOREACH_KSEGRP_IN_PROC(p, kg) {
- /* we could accumulate per ksegrp and per process here*/
- FOREACH_KSE_IN_GROUP(kg, ke) {
- s = splstatclock();
- ut = ke->ke_uticks;
- st = ke->ke_sticks;
- it = ke->ke_iticks;
- splx(s);
-
- tt = ut + st + it;
- if (tt == 0) {
- st = 1;
- tt = 1;
- }
+ ut = p->p_uticks;
+ st = p->p_sticks;
+ it = p->p_iticks;
+
+ tt = ut + st + it;
+ if (tt == 0) {
+ st = 1;
+ tt = 1;
+ }
- if (ke == curthread->td_kse) {
+ if (curthread->td_proc == p) {
/*
* Adjust for the current time slice. This is actually fairly
* important since the error here is on the order of a time
@@ -705,64 +696,59 @@ calcru(p, up, sp, ip)
* processors also being 'current'.
*/
- binuptime(&bt);
- bintime_sub(&bt, PCPU_PTR(switchtime));
- bintime_add(&bt, &p->p_runtime);
- } else {
- bt = p->p_runtime;
- }
- bintime2timeval(&bt, &tv);
- tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
- ptu = ke->ke_uu + ke->ke_su + ke->ke_iu;
- if (tu < ptu || (int64_t)tu < 0) {
- /* XXX no %qd in kernel. Truncate. */
- printf("calcru: negative time of %ld usec for pid %d (%s)\n",
- (long)tu, p->p_pid, p->p_comm);
- tu = ptu;
- }
+ binuptime(&bt);
+ bintime_sub(&bt, PCPU_PTR(switchtime));
+ bintime_add(&bt, &p->p_runtime);
+ } else {
+ bt = p->p_runtime;
+ }
+ bintime2timeval(&bt, &tv);
+ tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+ ptu = p->p_uu + p->p_su + p->p_iu;
+ if (tu < ptu || (int64_t)tu < 0) {
+ /* XXX no %qd in kernel. Truncate. */
+ printf("calcru: negative time of %ld usec for pid %d (%s)\n",
+ (long)tu, p->p_pid, p->p_comm);
+ tu = ptu;
+ }
- /* Subdivide tu. */
- uu = (tu * ut) / tt;
- su = (tu * st) / tt;
- iu = tu - uu - su;
+ /* Subdivide tu. */
+ uu = (tu * ut) / tt;
+ su = (tu * st) / tt;
+ iu = tu - uu - su;
- /* Enforce monotonicity. */
- if (uu < ke->ke_uu || su < ke->ke_su || iu < ke->ke_iu) {
- if (uu < ke->ke_uu)
- uu = ke->ke_uu;
- else if (uu + ke->ke_su + ke->ke_iu > tu)
- uu = tu - ke->ke_su - ke->ke_iu;
- if (st == 0)
- su = ke->ke_su;
- else {
- su = ((tu - uu) * st) / (st + it);
- if (su < ke->ke_su)
- su = ke->ke_su;
- else if (uu + su + ke->ke_iu > tu)
- su = tu - uu - ke->ke_iu;
- }
- KASSERT(uu + su + ke->ke_iu <= tu,
- ("calcru: monotonisation botch 1"));
- iu = tu - uu - su;
- KASSERT(iu >= ke->ke_iu,
- ("calcru: monotonisation botch 2"));
- }
- ke->ke_uu = uu;
- ke->ke_su = su;
- ke->ke_iu = iu;
- uut += uu;
- sut += su;
- iut += iu;
-
- } /* end kse loop */
- } /* end kseg loop */
- up->tv_sec = uut / 1000000;
- up->tv_usec = uut % 1000000;
- sp->tv_sec = sut / 1000000;
- sp->tv_usec = sut % 1000000;
+ /* Enforce monotonicity. */
+ if (uu < p->p_uu || su < p->p_su || iu < p->p_iu) {
+ if (uu < p->p_uu)
+ uu = p->p_uu;
+ else if (uu + p->p_su + p->p_iu > tu)
+ uu = tu - p->p_su - p->p_iu;
+ if (st == 0)
+ su = p->p_su;
+ else {
+ su = ((tu - uu) * st) / (st + it);
+ if (su < p->p_su)
+ su = p->p_su;
+ else if (uu + su + p->p_iu > tu)
+ su = tu - uu - p->p_iu;
+ }
+ KASSERT(uu + su + p->p_iu <= tu,
+ ("calcru: monotonisation botch 1"));
+ iu = tu - uu - su;
+ KASSERT(iu >= p->p_iu,
+ ("calcru: monotonisation botch 2"));
+ }
+ p->p_uu = uu;
+ p->p_su = su;
+ p->p_iu = iu;
+
+ up->tv_sec = uu / 1000000;
+ up->tv_usec = uu % 1000000;
+ sp->tv_sec = su / 1000000;
+ sp->tv_usec = su % 1000000;
if (ip != NULL) {
- ip->tv_sec = iut / 1000000;
- ip->tv_usec = iut % 1000000;
+ ip->tv_sec = iu / 1000000;
+ ip->tv_usec = iu % 1000000;
}
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index c84cbd1..a6ecbf8 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1525,9 +1525,6 @@ psignal(p, sig)
if (TD_IS_SLEEPING(td) &&
(td->td_flags & TDF_SINTR))
thread_suspend_one(td);
- else if (TD_IS_IDLE(td)) {
- thread_suspend_one(td);
- }
}
if (p->p_suspcount == p->p_numthreads) {
mtx_unlock_spin(&sched_lock);
@@ -1640,9 +1637,6 @@ tdsignal(struct thread *td, int sig, sig_t action)
cv_abort(td);
else
abortsleep(td);
- } else if (TD_IS_IDLE(td)) {
- TD_CLR_IDLE(td);
- setrunnable(td);
}
#ifdef SMP
else {
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 6651f70..5cefb1c 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -111,7 +111,7 @@ static void runq_readjust(struct runq *rq, struct kse *ke);
* Functions that manipulate runnability from a thread perspective. *
************************************************************************/
/*
- * Select the KSE that will be run next. From that find the thread, and x
+ * Select the KSE that will be run next. From that find the thread, and
* remove it from the KSEGRP's run queue. If there is thread clustering,
* this will be what does it.
*/
@@ -127,7 +127,7 @@ retry:
td = ke->ke_thread;
KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
kg = ke->ke_ksegrp;
- if (TD_IS_UNBOUND(td)) {
+ if (td->td_proc->p_flag & P_KSES) {
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
if (kg->kg_last_assigned == td) {
kg->kg_last_assigned = TAILQ_PREV(td,
@@ -158,9 +158,8 @@ retry:
}
/*
- * Given a KSE (now surplus or at least loanable), either assign a new
- * runable thread to it (and put it in the run queue) or put it in
- * the ksegrp's idle KSE list.
+ * Given a surplus KSE, either assign a new runable thread to it
+ * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
* Or maybe give it back to its owner if it's been loaned.
* Assumes that the original thread is either not runnable or
* already on the run queue
@@ -170,108 +169,54 @@ kse_reassign(struct kse *ke)
{
struct ksegrp *kg;
struct thread *td;
- struct thread *owner;
struct thread *original;
- int loaned;
+ struct kse_upcall *ku;
- KASSERT((ke->ke_owner), ("reassigning KSE with no owner"));
- KASSERT((ke->ke_thread && TD_IS_INHIBITED(ke->ke_thread)),
- ("reassigning KSE with no or runnable thread"));
mtx_assert(&sched_lock, MA_OWNED);
- kg = ke->ke_ksegrp;
- owner = ke->ke_owner;
- loaned = TD_LENDER(owner);
original = ke->ke_thread;
-
- if (TD_CAN_UNBIND(original) && (original->td_standin)) {
- KASSERT((owner == original),
- ("Early thread borrowing?"));
+ KASSERT(original == NULL || TD_IS_INHIBITED(original),
+ ("reassigning KSE with runnable thread"));
+ kg = ke->ke_ksegrp;
+ if (original) {
/*
- * The outgoing thread is "threaded" and has never
- * scheduled an upcall.
- * decide whether this is a short or long term event
- * and thus whether or not to schedule an upcall.
- * if it is a short term event, just suspend it in
+ * If the outgoing thread is in threaded group and has never
+ * scheduled an upcall, decide whether this is a short
+ * or long term event and thus whether or not to schedule
+ * an upcall.
+ * If it is a short term event, just suspend it in
* a way that takes its KSE with it.
* Select the events for which we want to schedule upcalls.
* For now it's just sleep.
- * Other threads that still have not fired an upcall
- * are held to their KSE using the temorary Binding.
+ * XXXKSE eventually almost any inhibition could do.
*/
- if (TD_ON_SLEEPQ(original)) {
- /*
- * An bound thread that can still unbind itself
- * has been scheduled out.
- * If it is sleeping, then we need to schedule an
- * upcall.
- * XXXKSE eventually almost any inhibition could do.
+ if (TD_CAN_UNBIND(original) && (original->td_standin) &&
+ TD_ON_SLEEPQ(original)) {
+ /*
+ * Release ownership of upcall, and schedule an upcall
+ * thread, this new upcall thread becomes the owner of
+ * the upcall structure.
*/
+ ku = original->td_upcall;
+ ku->ku_owner = NULL;
+ original->td_upcall = NULL;
original->td_flags &= ~TDF_CAN_UNBIND;
- original->td_flags |= TDF_UNBOUND;
- thread_schedule_upcall(original, ke);
- owner = ke->ke_owner;
- loaned = 1;
+ thread_schedule_upcall(original, ku);
}
+ original->td_kse = NULL;
}
- /*
- * If the current thread was borrowing, then make things consistent
- * by giving it back to the owner for the moment. The original thread
- * must be unbound and have already used its chance for
- * firing off an upcall. Threads that have not yet made an upcall
- * can not borrow KSEs.
- */
- if (loaned) {
- TD_CLR_LOAN(owner);
- ke->ke_thread = owner;
- original->td_kse = NULL; /* give it amnesia */
- /*
- * Upcalling threads have lower priority than all
- * in-kernel threads, However threads that have loaned out
- * their KSE and are NOT upcalling have the priority that
- * they have. In other words, only look for other work if
- * the owner is not runnable, OR is upcalling.
- */
- if (TD_CAN_RUN(owner) &&
- ((owner->td_flags & TDF_UPCALLING) == 0)) {
- setrunnable(owner);
- CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
- ke, owner);
- return;
- }
- }
-
/*
- * Either the owner is not runnable, or is an upcall.
* Find the first unassigned thread
- * If there is a 'last assigned' then see what's next.
- * otherwise look at what is first.
*/
- if ((td = kg->kg_last_assigned)) {
+ if ((td = kg->kg_last_assigned) != NULL)
td = TAILQ_NEXT(td, td_runq);
- } else {
+ else
td = TAILQ_FIRST(&kg->kg_runq);
- }
/*
- * If we found one assign it the kse, otherwise idle the kse.
+ * If we found one, assign it the kse, otherwise idle the kse.
*/
if (td) {
- /*
- * Assign the new thread to the KSE.
- * and make the KSE runnable again,
- */
- if (TD_IS_BOUND(owner)) {
- /*
- * If there is a reason to keep the previous
- * owner, do so.
- */
- TD_SET_LOAN(owner);
- } else {
- /* otherwise, cut it free */
- ke->ke_owner = td;
- owner->td_kse = NULL;
- }
kg->kg_last_assigned = td;
td->td_kse = ke;
ke->ke_thread = td;
@@ -280,43 +225,11 @@ kse_reassign(struct kse *ke)
return;
}
- /*
- * Now handle any waiting upcall.
- * Since we didn't make them runnable before.
- */
- if (TD_CAN_RUN(owner)) {
- setrunnable(owner);
- CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
- ke, owner);
- return;
- }
-
- /*
- * It is possible that this is the last thread in the group
- * because the KSE is being shut down or the process
- * is exiting.
- */
- if (TD_IS_EXITING(owner) || (ke->ke_flags & KEF_EXIT)) {
- ke->ke_thread = NULL;
- owner->td_kse = NULL;
- kse_unlink(ke);
- return;
- }
-
- /*
- * At this stage all we know is that the owner
- * is the same as the 'active' thread in the KSE
- * and that it is
- * Presently NOT loaned out.
- * Put it on the loanable queue. Make it fifo
- * so that long term sleepers donate their KSE's first.
- */
- KASSERT((TD_IS_BOUND(owner)), ("kse_reassign: UNBOUND lender"));
- ke->ke_state = KES_THREAD;
- ke->ke_flags |= KEF_ONLOANQ;
- TAILQ_INSERT_TAIL(&kg->kg_lq, ke, ke_kgrlist);
- kg->kg_loan_kses++;
- CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke);
+ ke->ke_state = KES_IDLE;
+ ke->ke_thread = NULL;
+ TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
+ kg->kg_idle_kses++;
+ CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
return;
}
@@ -325,7 +238,7 @@ kse_reassign(struct kse *ke)
* Remove a thread from its KSEGRP's run queue.
* This in turn may remove it from a KSE if it was already assigned
* to one, possibly causing a new thread to be assigned to the KSE
- * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
+ * and the KSE getting a new priority.
*/
static void
remrunqueue(struct thread *td)
@@ -335,17 +248,16 @@ remrunqueue(struct thread *td)
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
+ KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
kg = td->td_ksegrp;
ke = td->td_kse;
- /*
- * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
- * threads are BOUND.
- */
CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
kg->kg_runnable--;
TD_SET_CAN_RUN(td);
- if (TD_IS_BOUND(td)) {
+ /*
+ * If it is not a threaded process, take the shortcut.
+ */
+ if ((td->td_proc->p_flag & P_KSES) == 0) {
/* Bring its kse with it, leave the thread attached */
sched_rem(ke);
ke->ke_state = KES_THREAD;
@@ -363,7 +275,7 @@ remrunqueue(struct thread *td)
sched_rem(ke);
ke->ke_state = KES_THREAD;
td2 = kg->kg_last_assigned;
- KASSERT((td2 != NULL), ("last assigned has wrong value "));
+ KASSERT((td2 != NULL), ("last assigned has wrong value"));
if (td2 == td)
kg->kg_last_assigned = td3;
kse_reassign(ke);
@@ -381,14 +293,14 @@ adjustrunqueue( struct thread *td, int newpri)
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT ((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
- /*
- * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
- * threads are BOUND.
- */
+ KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
+
ke = td->td_kse;
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
- if (TD_IS_BOUND(td)) {
+ /*
+ * If it is not a threaded process, take the shortcut.
+ */
+ if ((td->td_proc->p_flag & P_KSES) == 0) {
/* We only care about the kse in the run queue. */
td->td_priority = newpri;
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
@@ -397,9 +309,8 @@ adjustrunqueue( struct thread *td, int newpri)
}
return;
}
- /*
- * An unbound thread. This is not optimised yet.
- */
+
+ /* It is a threaded process */
kg = td->td_ksegrp;
kg->kg_runnable--;
TD_SET_CAN_RUN(td);
@@ -439,48 +350,17 @@ setrunqueue(struct thread *td)
sched_add(td->td_kse);
return;
}
- /*
- * If the process is threaded but the thread is bound then
- * there is still a little extra to do re. KSE loaning.
- */
- if (TD_IS_BOUND(td)) {
- KASSERT((td->td_kse != NULL),
- ("queueing BAD thread to run queue"));
- ke = td->td_kse;
- KASSERT((ke->ke_owner == ke->ke_thread),
- ("setrunqueue: Hey KSE loaned out"));
- if (ke->ke_flags & KEF_ONLOANQ) {
- ke->ke_flags &= ~KEF_ONLOANQ;
- TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
- kg->kg_loan_kses--;
- }
- sched_add(td->td_kse);
- return;
- }
- /*
- * Ok, so we are threading with this thread.
- * We don't have a KSE, see if we can get one..
- */
tda = kg->kg_last_assigned;
if ((ke = td->td_kse) == NULL) {
- /*
- * We will need a KSE, see if there is one..
- * First look for a free one, before getting desperate.
- * If we can't get one, our priority is not high enough..
- * that's ok..
- */
- if (kg->kg_loan_kses) {
+ if (kg->kg_idle_kses) {
/*
- * Failing that see if we can borrow one.
+ * There is a free one so it's ours for the asking..
*/
- ke = TAILQ_FIRST(&kg->kg_lq);
- TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
- ke->ke_flags &= ~KEF_ONLOANQ;
+ ke = TAILQ_FIRST(&kg->kg_iq);
+ TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
ke->ke_state = KES_THREAD;
- TD_SET_LOAN(ke->ke_owner);
- ke->ke_thread = NULL;
- kg->kg_loan_kses--;
+ kg->kg_idle_kses--;
} else if (tda && (tda->td_priority > td->td_priority)) {
/*
* None free, but there is one we can commandeer.
@@ -495,11 +375,7 @@ setrunqueue(struct thread *td)
} else {
/*
* Temporarily disassociate so it looks like the other cases.
- * If the owner wasn't lending before, then it is now..
*/
- if (!TD_LENDER(ke->ke_owner)) {
- TD_SET_LOAN(ke->ke_owner);
- }
ke->ke_thread = NULL;
td->td_kse = NULL;
}
@@ -831,6 +707,7 @@ thread_sanity_check(struct thread *td, char *string)
if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
panc(string, "where on earth does lastassigned point?");
}
+#if 0
FOREACH_THREAD_IN_GROUP(kg, td2) {
if (((td2->td_flags & TDF_UNBOUND) == 0) &&
(TD_ON_RUNQ(td2))) {
@@ -840,6 +717,7 @@ thread_sanity_check(struct thread *td, char *string)
}
}
}
+#endif
#if 0
if ((unassigned + assigned) != kg->kg_runnable) {
panc(string, "wrong number in runnable");
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 78bce30..883f4d1 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -63,6 +63,7 @@
static uma_zone_t ksegrp_zone;
static uma_zone_t kse_zone;
static uma_zone_t thread_zone;
+static uma_zone_t upcall_zone;
/* DEBUG ONLY */
SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
@@ -78,16 +79,52 @@ static int max_groups_per_proc = 5;
SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
&max_groups_per_proc, 0, "Limit on thread groups per proc");
+static int virtual_cpu;
+
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
-struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
+TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
-struct mtx zombie_thread_lock;
-MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
- "zombie_thread_lock", MTX_SPIN);
+TAILQ_HEAD(, kse_upcall) zombie_upcalls =
+ TAILQ_HEAD_INITIALIZER(zombie_upcalls);
+struct mtx kse_zombie_lock;
+MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
static void kse_purge(struct proc *p, struct thread *td);
+static void kse_purge_group(struct thread *td);
+static int thread_update_usr_ticks(struct thread *td);
+static int thread_update_sys_ticks(struct thread *td);
+static void thread_alloc_spare(struct thread *td, struct thread *spare);
+
+static int
+sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
+{
+ int error, new_val;
+ int def_val;
+
+#ifdef SMP
+ def_val = mp_ncpus;
+#else
+ def_val = 1;
+#endif
+ if (virtual_cpu == 0)
+ new_val = def_val;
+ else
+ new_val = virtual_cpu;
+ error = sysctl_handle_int(oidp, &new_val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (new_val < 0)
+ return (EINVAL);
+ virtual_cpu = new_val;
+ return (0);
+}
+
+/* DEBUG ONLY */
+SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
+ 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
+ "debug virtual cpus");
/*
* Prepare a thread for use.
@@ -99,7 +136,6 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
- td->td_flags |= TDF_UNBOUND;
}
/*
@@ -161,6 +197,7 @@ thread_fini(void *mem, int size)
td = (struct thread *)mem;
pmap_dispose_thread(td);
}
+
/*
* Initialize type-stable parts of a kse (when newly created).
*/
@@ -172,6 +209,7 @@ kse_init(void *mem, int size)
ke = (struct kse *)mem;
ke->ke_sched = (struct ke_sched *)&ke[1];
}
+
/*
* Initialize type-stable parts of a ksegrp (when newly created).
*/
@@ -185,7 +223,7 @@ ksegrp_init(void *mem, int size)
}
/*
- * KSE is linked onto the idle queue.
+ * KSE is linked into kse group.
*/
void
kse_link(struct kse *ke, struct ksegrp *kg)
@@ -194,12 +232,12 @@ kse_link(struct kse *ke, struct ksegrp *kg)
TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
kg->kg_kses++;
- ke->ke_state = KES_UNQUEUED;
+ ke->ke_state = KES_UNQUEUED;
ke->ke_proc = p;
ke->ke_ksegrp = kg;
- ke->ke_owner = NULL;
ke->ke_thread = NULL;
- ke->ke_oncpu = NOCPU;
+ ke->ke_oncpu = NOCPU;
+ ke->ke_flags = 0;
}
void
@@ -209,11 +247,13 @@ kse_unlink(struct kse *ke)
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
-
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
- if (--kg->kg_kses == 0) {
- ksegrp_unlink(kg);
+ if (ke->ke_state == KES_IDLE) {
+ TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
+ kg->kg_idle_kses--;
}
+ if (--kg->kg_kses == 0)
+ ksegrp_unlink(kg);
/*
* Aggregate stats from the KSE
*/
@@ -228,15 +268,20 @@ ksegrp_link(struct ksegrp *kg, struct proc *p)
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
- TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */
- kg->kg_proc = p;
-/* the following counters are in the -zero- section and may not need clearing */
+ TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */
+ TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
+ kg->kg_proc = p;
+ /*
+ * the following counters are in the -zero- section
+ * and may not need clearing
+ */
kg->kg_numthreads = 0;
- kg->kg_runnable = 0;
- kg->kg_kses = 0;
- kg->kg_loan_kses = 0;
- kg->kg_runq_kses = 0; /* XXXKSE change name */
-/* link it in now that it's consistent */
+ kg->kg_runnable = 0;
+ kg->kg_kses = 0;
+ kg->kg_runq_kses = 0; /* XXXKSE change name */
+ kg->kg_idle_kses = 0;
+ kg->kg_numupcalls = 0;
+ /* link it in now that it's consistent */
p->p_numksegrps++;
TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
}
@@ -247,9 +292,11 @@ ksegrp_unlink(struct ksegrp *kg)
struct proc *p;
mtx_assert(&sched_lock, MA_OWNED);
+ KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
+ KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
+ KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
+
p = kg->kg_proc;
- KASSERT(((kg->kg_numthreads == 0) && (kg->kg_kses == 0)),
- ("kseg_unlink: residual threads or KSEs"));
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
p->p_numksegrps--;
/*
@@ -258,13 +305,63 @@ ksegrp_unlink(struct ksegrp *kg)
ksegrp_stash(kg);
}
+struct kse_upcall *
+upcall_alloc(void)
+{
+ struct kse_upcall *ku;
+
+ ku = uma_zalloc(upcall_zone, 0);
+ bzero(ku, sizeof(*ku));
+ return (ku);
+}
+
+void
+upcall_free(struct kse_upcall *ku)
+{
+
+ uma_zfree(upcall_zone, ku);
+}
+
+void
+upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
+{
+
+ mtx_assert(&sched_lock, MA_OWNED);
+ TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
+ ku->ku_ksegrp = kg;
+ kg->kg_numupcalls++;
+}
+
+void
+upcall_unlink(struct kse_upcall *ku)
+{
+ struct ksegrp *kg = ku->ku_ksegrp;
+
+ mtx_assert(&sched_lock, MA_OWNED);
+ KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
+ TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
+ kg->kg_numupcalls--;
+ upcall_stash(ku);
+}
+
+void
+upcall_remove(struct thread *td)
+{
+
+ if (td->td_upcall) {
+ td->td_upcall->ku_owner = NULL;
+ upcall_unlink(td->td_upcall);
+ td->td_upcall = 0;
+ }
+}
+
/*
- * for a newly created process,
- * link up a the structure and its initial threads etc.
+ * For a newly created process,
+ * link up all the structures and its initial threads etc.
*/
void
proc_linkup(struct proc *p, struct ksegrp *kg,
- struct kse *ke, struct thread *td)
+ struct kse *ke, struct thread *td)
{
TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
@@ -278,6 +375,11 @@ proc_linkup(struct proc *p, struct ksegrp *kg,
thread_link(td, kg);
}
+/*
+struct kse_thr_interrupt_args {
+ struct kse_thr_mailbox * tmbx;
+};
+*/
int
kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
{
@@ -285,10 +387,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
struct thread *td2;
p = td->td_proc;
- /* KSE-enabled processes only, please. */
- if (!(p->p_flag & P_KSES))
- return (EINVAL);
- if (uap->tmbx == NULL)
+ if (!(p->p_flag & P_KSES) || (uap->tmbx == NULL))
return (EINVAL);
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td2) {
@@ -299,7 +398,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
cv_abort(td2);
else
abortsleep(td2);
- }
+ }
mtx_unlock_spin(&sched_lock);
return (0);
}
@@ -308,6 +407,11 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
return (ESRCH);
}
+/*
+struct kse_exit_args {
+ register_t dummy;
+};
+*/
int
kse_exit(struct thread *td, struct kse_exit_args *uap)
{
@@ -316,11 +420,16 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
struct kse *ke;
p = td->td_proc;
- /* Only UTS can do the syscall */
- if (!(p->p_flag & P_KSES) || (td->td_mailbox != NULL))
+ /*
+ * Only UTS can call the syscall and current group
+ * should be a threaded group.
+ */
+ if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
return (EINVAL);
+ KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
+
kg = td->td_ksegrp;
- /* serialize killing kse */
+ /* Serialize killing KSE */
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if ((kg->kg_kses == 1) && (kg->kg_numthreads > 1)) {
@@ -329,14 +438,17 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
return (EDEADLK);
}
ke = td->td_kse;
+ upcall_remove(td);
if (p->p_numthreads == 1) {
- ke->ke_flags &= ~KEF_DOUPCALL;
- ke->ke_mailbox = NULL;
+ kse_purge(p, td);
p->p_flag &= ~P_KSES;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
- ke->ke_flags |= KEF_EXIT;
+ if (kg->kg_numthreads == 1) { /* Shutdown a group */
+ kse_purge_group(td);
+ ke->ke_flags |= KEF_EXIT;
+ }
thread_exit();
/* NOTREACHED */
}
@@ -345,10 +457,15 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
/*
* Either becomes an upcall or waits for an awakening event and
- * THEN becomes an upcall. Only error cases return.
+ * then becomes an upcall. Only error cases return.
*/
+/*
+struct kse_release_args {
+ register_t dummy;
+};
+*/
int
-kse_release(struct thread * td, struct kse_release_args * uap)
+kse_release(struct thread *td, struct kse_release_args *uap)
{
struct proc *p;
struct ksegrp *kg;
@@ -356,28 +473,25 @@ kse_release(struct thread * td, struct kse_release_args * uap)
p = td->td_proc;
kg = td->td_ksegrp;
/*
- * kse must have a mailbox ready for upcall, and only UTS can
- * do the syscall.
- */
- if (!(p->p_flag & P_KSES) ||
- (td->td_mailbox != NULL) ||
- (td->td_kse->ke_mailbox == NULL))
+ * Only UTS can call the syscall and current group
+ * should be a threaded group.
+ */
+ if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0))
return (EINVAL);
+ KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__));
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
/* Change OURSELF to become an upcall. */
- td->td_flags = TDF_UPCALLING; /* BOUND */
- if (!(td->td_kse->ke_flags & (KEF_DOUPCALL|KEF_ASTPENDING)) &&
+ td->td_flags = TDF_UPCALLING;
+ if ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 &&
(kg->kg_completed == NULL)) {
- /*
- * The KSE will however be lendable.
- */
- TD_SET_IDLE(td);
- PROC_UNLOCK(p);
- p->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ kg->kg_upsleeps++;
mtx_unlock_spin(&sched_lock);
+ msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, "ksepause",
+ NULL);
+ kg->kg_upsleeps--;
+ PROC_UNLOCK(p);
} else {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
@@ -392,61 +506,59 @@ int
kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
{
struct proc *p;
- struct kse *ke;
struct ksegrp *kg;
+ struct kse_upcall *ku;
struct thread *td2;
p = td->td_proc;
td2 = NULL;
+ ku = NULL;
/* KSE-enabled processes only, please. */
if (!(p->p_flag & P_KSES))
- return EINVAL;
+ return (EINVAL);
+ PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if (uap->mbx) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
- FOREACH_KSE_IN_GROUP(kg, ke) {
- if (ke->ke_mailbox != uap->mbx)
- continue;
- td2 = ke->ke_owner;
- KASSERT((td2 != NULL),("KSE with no owner"));
- break;
+ FOREACH_UPCALL_IN_GROUP(kg, ku) {
+ if (ku->ku_mailbox == uap->mbx)
+ break;
}
- if (td2) {
+ if (ku)
break;
- }
}
} else {
- /*
- * look for any idle KSE to resurrect.
- */
kg = td->td_ksegrp;
- FOREACH_KSE_IN_GROUP(kg, ke) {
- td2 = ke->ke_owner;
- KASSERT((td2 != NULL),("KSE with no owner2"));
- if (TD_IS_IDLE(td2))
- break;
+ if (kg->kg_upsleeps) {
+ wakeup_one(&kg->kg_completed);
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ return (0);
}
- KASSERT((td2 != NULL), ("no thread(s)"));
+ ku = TAILQ_FIRST(&kg->kg_upcalls);
}
- if (td2) {
- if (TD_IS_IDLE(td2)) {
- TD_CLR_IDLE(td2);
- setrunnable(td2);
- } else if (td != td2) {
- /* guarantee do an upcall ASAP */
- td2->td_kse->ke_flags |= KEF_DOUPCALL;
+ if (ku) {
+ if ((td2 = ku->ku_owner) == NULL) {
+ panic("%s: no owner", __func__);
+ } else if (TD_ON_SLEEPQ(td2) &&
+ (td2->td_wchan == &kg->kg_completed)) {
+ abortsleep(td2);
+ } else {
+ ku->ku_flags |= KUF_DOUPCALL;
}
mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
return (0);
}
mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
return (ESRCH);
}
/*
* No new KSEG: first call: use current KSE, don't schedule an upcall
- * All other situations, do allocate a new KSE and schedule an upcall on it.
+ * All other situations, do allocate max new KSEs and schedule an upcall.
*/
/* struct kse_create_args {
struct kse_mailbox *mbx;
@@ -456,112 +568,140 @@ int
kse_create(struct thread *td, struct kse_create_args *uap)
{
struct kse *newke;
- struct kse *ke;
struct ksegrp *newkg;
struct ksegrp *kg;
struct proc *p;
struct kse_mailbox mbx;
- int err;
+ struct kse_upcall *newku;
+ int err, ncpus;
p = td->td_proc;
if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
return (err);
- p->p_flag |= P_KSES; /* easier to just set it than to test and set */
+ /* Too bad, why hasn't kernel always a cpu counter !? */
+#ifdef SMP
+ ncpus = mp_ncpus;
+#else
+ ncpus = 1;
+#endif
+ if (thread_debug && virtual_cpu != 0)
+ ncpus = virtual_cpu;
+
+ /* Easier to just set it than to test and set */
+ p->p_flag |= P_KSES;
kg = td->td_ksegrp;
if (uap->newgroup) {
+ /* Have race condition but it is cheap */
if (p->p_numksegrps >= max_groups_per_proc)
return (EPROCLIM);
/*
* If we want a new KSEGRP it doesn't matter whether
* we have already fired up KSE mode before or not.
- * We put the process in KSE mode and create a new KSEGRP
- * and KSE. If our KSE has not got a mailbox yet then
- * that doesn't matter, just leave it that way. It will
- * ensure that this thread stay BOUND. It's possible
- * that the call came form a threaded library and the main
- * program knows nothing of threads.
+ * We put the process in KSE mode and create a new KSEGRP.
*/
newkg = ksegrp_alloc();
bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
- kg_startzero, kg_endzero));
+ kg_startzero, kg_endzero));
bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
- newke = kse_alloc();
+ mtx_lock_spin(&sched_lock);
+ ksegrp_link(newkg, p);
+ if (p->p_numksegrps >= max_groups_per_proc) {
+ ksegrp_unlink(newkg);
+ mtx_unlock_spin(&sched_lock);
+ return (EPROCLIM);
+ }
+ mtx_unlock_spin(&sched_lock);
} else {
- /*
- * Otherwise, if we have already set this KSE
- * to have a mailbox, we want to make another KSE here,
- * but only if there are not already the limit, which
- * is 1 per CPU max.
- *
- * If the current KSE doesn't have a mailbox we just use it
- * and give it one.
- *
- * Because we don't like to access
- * the KSE outside of schedlock if we are UNBOUND,
- * (because it can change if we are preempted by an interrupt)
- * we can deduce it as having a mailbox if we are UNBOUND,
- * and only need to actually look at it if we are BOUND,
- * which is safe.
+ newkg = kg;
+ }
+
+ /*
+ * Creating upcalls more than number of physical cpu does
+ * not help performance.
+ */
+ if (newkg->kg_numupcalls >= ncpus)
+ return (EPROCLIM);
+
+ if (newkg->kg_numupcalls == 0) {
+ /*
+ * Initialize KSE group, optimized for MP.
+ * Create KSEs as many as physical cpus, this increases
+ * concurrent even if userland is not MP safe and can only run
+ * on single CPU (for early version of libpthread, it is true).
+ * In ideal world, every physical cpu should execute a thread.
+ * If there is enough KSEs, threads in kernel can be
+ * executed parallel on different cpus with full speed,
+ * Concurrent in kernel shouldn't be restricted by number of
+ * upcalls userland provides.
+ * Adding more upcall structures only increases concurrent
+ * in userland.
+ * Highest performance configuration is:
+ * N kses = N upcalls = N phyiscal cpus
*/
- if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) {
- if (thread_debug == 0) { /* if debugging, allow more */
-#ifdef SMP
- if (kg->kg_kses > mp_ncpus)
-#endif
- return (EPROCLIM);
- }
+ while (newkg->kg_kses < ncpus) {
newke = kse_alloc();
- } else {
- newke = NULL;
- }
- newkg = NULL;
- }
- if (newke) {
- bzero(&newke->ke_startzero, RANGEOF(struct kse,
- ke_startzero, ke_endzero));
+ bzero(&newke->ke_startzero, RANGEOF(struct kse,
+ ke_startzero, ke_endzero));
#if 0
- bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
- RANGEOF(struct kse, ke_startcopy, ke_endcopy));
+ mtx_lock_spin(&sched_lock);
+ bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
+ RANGEOF(struct kse, ke_startcopy, ke_endcopy));
+ mtx_unlock_spin(&sched_lock);
#endif
- /* For the first call this may not have been set */
- if (td->td_standin == NULL) {
- td->td_standin = thread_alloc();
- }
- mtx_lock_spin(&sched_lock);
- if (newkg) {
- if (p->p_numksegrps >= max_groups_per_proc) {
- mtx_unlock_spin(&sched_lock);
- ksegrp_free(newkg);
- kse_free(newke);
- return (EPROCLIM);
- }
- ksegrp_link(newkg, p);
+ mtx_lock_spin(&sched_lock);
+ kse_link(newke, newkg);
+ if (p->p_sflag & PS_NEEDSIGCHK)
+ newke->ke_flags |= KEF_ASTPENDING;
+ /* Add engine */
+ kse_reassign(newke);
+ mtx_unlock_spin(&sched_lock);
}
- else
- newkg = kg;
- kse_link(newke, newkg);
- if (p->p_sflag & PS_NEEDSIGCHK)
- newke->ke_flags |= KEF_ASTPENDING;
- newke->ke_mailbox = uap->mbx;
- newke->ke_upcall = mbx.km_func;
- bcopy(&mbx.km_stack, &newke->ke_stack, sizeof(stack_t));
- thread_schedule_upcall(td, newke);
+ }
+ newku = upcall_alloc();
+ newku->ku_mailbox = uap->mbx;
+ newku->ku_func = mbx.km_func;
+ bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
+
+ /* For the first call this may not have been set */
+ if (td->td_standin == NULL)
+ thread_alloc_spare(td, NULL);
+
+ mtx_lock_spin(&sched_lock);
+ if (newkg->kg_numupcalls >= ncpus) {
+ upcall_free(newku);
mtx_unlock_spin(&sched_lock);
+ return (EPROCLIM);
+ }
+ upcall_link(newku, newkg);
+
+ /*
+ * Each upcall structure has an owner thread, find which
+ * one owns it.
+ */
+ if (uap->newgroup) {
+ /*
+ * Because new ksegrp hasn't thread,
+ * create an initial upcall thread to own it.
+ */
+ thread_schedule_upcall(td, newku);
} else {
/*
- * If we didn't allocate a new KSE then the we are using
- * the exisiting (BOUND) kse.
+ * If current thread hasn't an upcall structure,
+ * just assign the upcall to it.
*/
- ke = td->td_kse;
- ke->ke_mailbox = uap->mbx;
- ke->ke_upcall = mbx.km_func;
- bcopy(&mbx.km_stack, &ke->ke_stack, sizeof(stack_t));
+ if (td->td_upcall == NULL) {
+ newku->ku_owner = td;
+ td->td_upcall = newku;
+ } else {
+ /*
+ * Create a new upcall thread to own it.
+ */
+ thread_schedule_upcall(td, newku);
+ }
}
- /*
- * Fill out the KSE-mode specific fields of the new kse.
- */
+ mtx_unlock_spin(&sched_lock);
return (0);
}
@@ -642,6 +782,8 @@ threadinit(void)
kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
NULL, NULL, kse_init, NULL,
UMA_ALIGN_CACHE, 0);
+ upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
}
/*
@@ -650,9 +792,9 @@ threadinit(void)
void
thread_stash(struct thread *td)
{
- mtx_lock_spin(&zombie_thread_lock);
+ mtx_lock_spin(&kse_zombie_lock);
TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
- mtx_unlock_spin(&zombie_thread_lock);
+ mtx_unlock_spin(&kse_zombie_lock);
}
/*
@@ -661,9 +803,21 @@ thread_stash(struct thread *td)
void
kse_stash(struct kse *ke)
{
- mtx_lock_spin(&zombie_thread_lock);
+ mtx_lock_spin(&kse_zombie_lock);
TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
- mtx_unlock_spin(&zombie_thread_lock);
+ mtx_unlock_spin(&kse_zombie_lock);
+}
+
+/*
+ * Stash an embarasingly extra upcall into the zombie upcall queue.
+ */
+
+void
+upcall_stash(struct kse_upcall *ku)
+{
+ mtx_lock_spin(&kse_zombie_lock);
+ TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
+ mtx_unlock_spin(&kse_zombie_lock);
}
/*
@@ -672,13 +826,13 @@ kse_stash(struct kse *ke)
void
ksegrp_stash(struct ksegrp *kg)
{
- mtx_lock_spin(&zombie_thread_lock);
+ mtx_lock_spin(&kse_zombie_lock);
TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
- mtx_unlock_spin(&zombie_thread_lock);
+ mtx_unlock_spin(&kse_zombie_lock);
}
/*
- * Reap zombie threads.
+ * Reap zombie kse resource.
*/
void
thread_reap(void)
@@ -686,27 +840,34 @@ thread_reap(void)
struct thread *td_first, *td_next;
struct kse *ke_first, *ke_next;
struct ksegrp *kg_first, * kg_next;
+ struct kse_upcall *ku_first, *ku_next;
/*
- * don't even bother to lock if none at this instant
- * We really don't care about the next instant..
+ * Don't even bother to lock if none at this instant,
+ * we really don't care about the next instant..
*/
if ((!TAILQ_EMPTY(&zombie_threads))
|| (!TAILQ_EMPTY(&zombie_kses))
- || (!TAILQ_EMPTY(&zombie_ksegrps))) {
- mtx_lock_spin(&zombie_thread_lock);
+ || (!TAILQ_EMPTY(&zombie_ksegrps))
+ || (!TAILQ_EMPTY(&zombie_upcalls))) {
+ mtx_lock_spin(&kse_zombie_lock);
td_first = TAILQ_FIRST(&zombie_threads);
ke_first = TAILQ_FIRST(&zombie_kses);
kg_first = TAILQ_FIRST(&zombie_ksegrps);
+ ku_first = TAILQ_FIRST(&zombie_upcalls);
if (td_first)
TAILQ_INIT(&zombie_threads);
if (ke_first)
TAILQ_INIT(&zombie_kses);
if (kg_first)
TAILQ_INIT(&zombie_ksegrps);
- mtx_unlock_spin(&zombie_thread_lock);
+ if (ku_first)
+ TAILQ_INIT(&zombie_upcalls);
+ mtx_unlock_spin(&kse_zombie_lock);
while (td_first) {
td_next = TAILQ_NEXT(td_first, td_runq);
+ if (td_first->td_ucred)
+ crfree(td_first->td_ucred);
thread_free(td_first);
td_first = td_next;
}
@@ -720,6 +881,11 @@ thread_reap(void)
ksegrp_free(kg_first);
kg_first = kg_next;
}
+ while (ku_first) {
+ ku_next = TAILQ_NEXT(ku_first, ku_link);
+ upcall_free(ku_first);
+ ku_first = ku_next;
+ }
}
}
@@ -792,20 +958,14 @@ thread_export_context(struct thread *td)
struct ksegrp *kg;
uintptr_t mbx;
void *addr;
- int error;
+ int error,temp;
ucontext_t uc;
- uint temp;
p = td->td_proc;
kg = td->td_ksegrp;
/* Export the user/machine context. */
-#if 0
- addr = (caddr_t)td->td_mailbox +
- offsetof(struct kse_thr_mailbox, tm_context);
-#else /* if user pointer arithmetic is valid in the kernel */
- addr = (void *)(&td->td_mailbox->tm_context);
-#endif
+ addr = (void *)(&td->td_mailbox->tm_context);
error = copyin(addr, &uc, sizeof(ucontext_t));
if (error)
goto bad;
@@ -815,13 +975,14 @@ thread_export_context(struct thread *td)
if (error)
goto bad;
- /* get address in latest mbox of list pointer */
-#if 0
- addr = (caddr_t)td->td_mailbox
- + offsetof(struct kse_thr_mailbox , tm_next);
-#else /* if user pointer arithmetic is valid in the kernel */
+ /* Exports clock ticks in kernel mode */
+ addr = (caddr_t)(&td->td_mailbox->tm_sticks);
+ temp = fuword(addr) + td->td_usticks;
+ if (suword(addr, temp))
+ goto bad;
+
+ /* Get address in latest mbox of list pointer */
addr = (void *)(&td->td_mailbox->tm_next);
-#endif
/*
* Put the saved address of the previous first
* entry into this one
@@ -835,42 +996,43 @@ thread_export_context(struct thread *td)
PROC_LOCK(p);
if (mbx == (uintptr_t)kg->kg_completed) {
kg->kg_completed = td->td_mailbox;
+ /*
+ * The thread context may be taken away by
+ * other upcall threads when we unlock
+ * process lock. it's no longer valid to
+ * use it again in any other places.
+ */
+ td->td_mailbox = NULL;
PROC_UNLOCK(p);
break;
}
PROC_UNLOCK(p);
}
- addr = (caddr_t)td->td_mailbox
- + offsetof(struct kse_thr_mailbox, tm_sticks);
- temp = fuword(addr) + td->td_usticks;
- if (suword(addr, temp))
- goto bad;
+ td->td_usticks = 0;
return (0);
bad:
PROC_LOCK(p);
psignal(p, SIGSEGV);
PROC_UNLOCK(p);
+ /* The mailbox is bad, don't use it */
+ td->td_mailbox = NULL;
+ td->td_usticks = 0;
return (error);
}
/*
* Take the list of completed mailboxes for this KSEGRP and put them on this
- * KSE's mailbox as it's the next one going up.
+ * upcall's mailbox as it's the next one going up.
*/
static int
-thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
+thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
{
struct proc *p = kg->kg_proc;
void *addr;
uintptr_t mbx;
-#if 0
- addr = (caddr_t)ke->ke_mailbox
- + offsetof(struct kse_mailbox, km_completed);
-#else /* if user pointer arithmetic is valid in the kernel */
- addr = (void *)(&ke->ke_mailbox->km_completed);
-#endif
+ addr = (void *)(&ku->ku_mailbox->km_completed);
for (;;) {
mbx = (uintptr_t)kg->kg_completed;
if (suword(addr, mbx)) {
@@ -895,69 +1057,91 @@ thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
* This function should be called at statclock interrupt time
*/
int
-thread_add_ticks_intr(int user, uint ticks)
+thread_statclock(int user)
{
struct thread *td = curthread;
- struct kse *ke = td->td_kse;
- if (ke->ke_mailbox == NULL)
- return -1;
+ if (td->td_ksegrp->kg_numupcalls == 0)
+ return (-1);
if (user) {
/* Current always do via ast() */
- ke->ke_flags |= KEF_ASTPENDING;
- ke->ke_uuticks += ticks;
+ td->td_flags |= (TDF_ASTPENDING|TDF_USTATCLOCK);
+ td->td_uuticks += ticks;
} else {
if (td->td_mailbox != NULL)
td->td_usticks += ticks;
- else
- ke->ke_usticks += ticks;
+ else {
+ /* XXXKSE
+ * We will call thread_user_enter() for every
+ * kernel entry in future, so if the thread mailbox
+ * is NULL, it must be a UTS kernel, don't account
+ * clock ticks for it.
+ */
+ }
}
- return 0;
+ return (0);
}
+/*
+ * Export user mode state clock ticks
+ */
static int
-thread_update_uticks(void)
+thread_update_usr_ticks(struct thread *td)
{
- struct thread *td = curthread;
struct proc *p = td->td_proc;
- struct kse *ke = td->td_kse;
struct kse_thr_mailbox *tmbx;
+ struct kse_upcall *ku;
caddr_t addr;
- uint uticks, sticks;
-
- if (ke->ke_mailbox == NULL)
- return 0;
+ uint uticks;
- uticks = ke->ke_uuticks;
- ke->ke_uuticks = 0;
- sticks = ke->ke_usticks;
- ke->ke_usticks = 0;
-#if 0
- tmbx = (void *)fuword((caddr_t)ke->ke_mailbox
- + offsetof(struct kse_mailbox, km_curthread));
-#else /* if user pointer arithmetic is ok in the kernel */
- tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
-#endif
+ if ((ku = td->td_upcall) == NULL)
+ return (-1);
+
+ tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
if ((tmbx == NULL) || (tmbx == (void *)-1))
- return 0;
+ return (-1);
+ uticks = td->td_uuticks;
+ td->td_uuticks = 0;
if (uticks) {
- addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_uticks);
+ addr = (caddr_t)&tmbx->tm_uticks;
uticks += fuword(addr);
- if (suword(addr, uticks))
- goto bad;
+ if (suword(addr, uticks)) {
+ PROC_LOCK(p);
+ psignal(p, SIGSEGV);
+ PROC_UNLOCK(p);
+ return (-2);
+ }
}
- if (sticks) {
- addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_sticks);
- sticks += fuword(addr);
- if (suword(addr, sticks))
- goto bad;
+ return (0);
+}
+
+/*
+ * Export kernel mode state clock ticks
+ */
+
+static int
+thread_update_sys_ticks(struct thread *td)
+{
+ struct proc *p = td->td_proc;
+ caddr_t addr;
+ int sticks;
+
+ if (td->td_mailbox == NULL)
+ return (-1);
+ if (td->td_usticks == 0)
+ return (0);
+ addr = (caddr_t)&td->td_mailbox->tm_sticks;
+ sticks = fuword(addr);
+ /* XXXKSE use XCHG instead */
+ sticks += td->td_usticks;
+ td->td_usticks = 0;
+ if (suword(addr, sticks)) {
+ PROC_LOCK(p);
+ psignal(p, SIGSEGV);
+ PROC_UNLOCK(p);
+ return (-2);
}
- return 0;
-bad:
- PROC_LOCK(p);
- psignal(p, SIGSEGV);
- PROC_UNLOCK(p);
- return -1;
+ return (0);
}
/*
@@ -1013,6 +1197,7 @@ thread_exit(void)
p->p_numthreads--;
TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
kg->kg_numthreads--;
+
/*
* The test below is NOT true if we are the
* sole exiting thread. P_STOPPED_SNGL is unset
@@ -1024,25 +1209,28 @@ thread_exit(void)
}
}
- /* Reassign this thread's KSE. */
+ /*
+ * Because each upcall structure has an owner thread,
+ * owner thread exits only when process is in exiting
+ * state, so upcall to userland is no longer needed,
+ * deleting upcall structure is safe here.
+ * So when all threads in a group is exited, all upcalls
+ * in the group should be automatically freed.
+ */
+ if (td->td_upcall)
+ upcall_remove(td);
+
ke->ke_state = KES_UNQUEUED;
-
+ ke->ke_thread = NULL;
/*
* Decide what to do with the KSE attached to this thread.
- * XXX Possibly kse_reassign should do both cases as it already
- * does some of this.
*/
- if (ke->ke_flags & KEF_EXIT) {
- KASSERT((ke->ke_owner == td),
- ("thread_exit: KSE exiting with non-owner thread"));
- ke->ke_thread = NULL;
- td->td_kse = NULL;
+ if (ke->ke_flags & KEF_EXIT)
kse_unlink(ke);
- } else {
- TD_SET_EXITING(td); /* definitly not runnable */
+ else
kse_reassign(ke);
- }
PROC_UNLOCK(p);
+ td->td_kse = NULL;
td->td_state = TDS_INACTIVE;
td->td_proc = NULL;
td->td_ksegrp = NULL;
@@ -1090,10 +1278,12 @@ thread_link(struct thread *td, struct ksegrp *kg)
struct proc *p;
p = kg->kg_proc;
- td->td_state = TDS_INACTIVE;
- td->td_proc = p;
- td->td_ksegrp = kg;
- td->td_last_kse = NULL;
+ td->td_state = TDS_INACTIVE;
+ td->td_proc = p;
+ td->td_ksegrp = kg;
+ td->td_last_kse = NULL;
+ td->td_flags = 0;
+ td->td_kse = NULL;
LIST_INIT(&td->td_contested);
callout_init(&td->td_slpcallout, 1);
@@ -1101,116 +1291,139 @@ thread_link(struct thread *td, struct ksegrp *kg)
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
p->p_numthreads++;
kg->kg_numthreads++;
- td->td_kse = NULL;
}
+/*
+ * Purge a ksegrp resource. When a ksegrp is preparing to
+ * exit, it calls this function.
+ */
+void
+kse_purge_group(struct thread *td)
+{
+ struct ksegrp *kg;
+ struct kse *ke;
+
+ kg = td->td_ksegrp;
+ KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
+ while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
+ KASSERT(ke->ke_state == KES_IDLE,
+ ("%s: wrong idle KSE state", __func__));
+ kse_unlink(ke);
+ }
+ KASSERT((kg->kg_kses == 1),
+ ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
+ KASSERT((kg->kg_numupcalls == 0),
+ ("%s: ksegrp still has %d upcall datas",
+ __func__, kg->kg_numupcalls));
+}
+
+/*
+ * Purge a process's KSE resource. When a process is preparing to
+ * exit, it calls kse_purge to release any extra KSE resources in
+ * the process.
+ */
void
kse_purge(struct proc *p, struct thread *td)
{
- /* XXXKSE think about this..
- may need to wake up threads on loan queue. */
struct ksegrp *kg;
+ struct kse *ke;
KASSERT(p->p_numthreads == 1, ("bad thread number"));
mtx_lock_spin(&sched_lock);
while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
p->p_numksegrps--;
+ /*
+ * There is no ownership for KSE, after all threads
+ * in the group exited, it is possible that some KSEs
+ * were left in idle queue, gc them now.
+ */
+ while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
+ KASSERT(ke->ke_state == KES_IDLE,
+ ("%s: wrong idle KSE state", __func__));
+ TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
+ kg->kg_idle_kses--;
+ TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
+ kg->kg_kses--;
+ kse_stash(ke);
+ }
KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
- ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
- ("wrong kg_kses"));
- if (kg != td->td_ksegrp) {
+ ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
+ ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
+ KASSERT((kg->kg_numupcalls == 0),
+ ("%s: ksegrp still has %d upcall datas",
+ __func__, kg->kg_numupcalls));
+
+ if (kg != td->td_ksegrp)
ksegrp_stash(kg);
- }
}
TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
p->p_numksegrps++;
mtx_unlock_spin(&sched_lock);
}
+/*
+ * This function is intended to be used to initialize a spare thread
+ * for upcall. Initialize thread's large data area outside sched_lock
+ * for thread_schedule_upcall().
+ */
+void
+thread_alloc_spare(struct thread *td, struct thread *spare)
+{
+ if (td->td_standin)
+ return;
+ if (spare == NULL)
+ spare = thread_alloc();
+ td->td_standin = spare;
+ bzero(&spare->td_startzero,
+ (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
+ spare->td_proc = td->td_proc;
+ /* Setup PCB and fork address */
+ cpu_set_upcall(spare, td->td_pcb);
+ /*
+ * XXXKSE do we really need this? (default values for the
+ * frame).
+ */
+ bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe));
+ spare->td_ucred = crhold(td->td_ucred);
+}
/*
* Create a thread and schedule it for upcall on the KSE given.
* Use our thread's standin so that we don't have to allocate one.
*/
struct thread *
-thread_schedule_upcall(struct thread *td, struct kse *ke)
+thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
{
struct thread *td2;
- int newkse;
mtx_assert(&sched_lock, MA_OWNED);
- newkse = (ke != td->td_kse);
/*
- * If the owner and kse are BOUND then that thread is planning to
- * go to userland and upcalls are not expected. So don't make one.
- * If it is not bound then make it so with the spare thread
- * anf then borrw back the KSE to allow us to complete some in-kernel
- * work. When we complete, the Bound thread will have the chance to
- * complete. This thread will sleep as planned. Hopefully there will
- * eventually be un unbound thread that can be converted to an
- * upcall to report the completion of this thread.
+ * Schedule an upcall thread on specified kse_upcall,
+ * the kse_upcall must be free.
+ * td must have a spare thread.
*/
-
+ KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
if ((td2 = td->td_standin) != NULL) {
td->td_standin = NULL;
} else {
- if (newkse)
- panic("no reserve thread when called with a new kse");
- /*
- * If called from (e.g.) sleep and we do not have
- * a reserve thread, then we've used it, so do not
- * create an upcall.
- */
+ panic("no reserve thread when scheduling an upcall");
return (NULL);
}
CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
td2, td->td_proc->p_pid, td->td_proc->p_comm);
- bzero(&td2->td_startzero,
- (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
bcopy(&td->td_startcopy, &td2->td_startcopy,
(unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
- thread_link(td2, ke->ke_ksegrp);
- cpu_set_upcall(td2, td->td_pcb);
-
- /*
- * XXXKSE do we really need this? (default values for the
- * frame).
- */
- bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
-
- /*
- * Bind the new thread to the KSE,
- * and if it's our KSE, lend it back to ourself
- * so we can continue running.
- */
- td2->td_ucred = crhold(td->td_ucred);
- td2->td_flags = TDF_UPCALLING; /* note: BOUND */
- td2->td_kse = ke;
- td2->td_state = TDS_CAN_RUN;
+ thread_link(td2, ku->ku_ksegrp);
+ /* Let the new thread become owner of the upcall */
+ ku->ku_owner = td2;
+ td2->td_upcall = ku;
+ td2->td_flags = TDF_UPCALLING;
+ td2->td_kse = NULL;
+ td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
- ke->ke_owner = td2;
- /*
- * If called from kse_reassign(), we are working on the current
- * KSE so fake that we borrowed it. If called from
- * kse_create(), don't, as we have a new kse too.
- */
- if (!newkse) {
- /*
- * This thread will be scheduled when the current thread
- * blocks, exits or tries to enter userspace, (which ever
- * happens first). When that happens the KSe will "revert"
- * to this thread in a BOUND manner. Since we are called
- * from msleep() this is going to be "very soon" in nearly
- * all cases.
- */
- TD_SET_LOAN(td2);
- } else {
- ke->ke_thread = td2;
- ke->ke_state = KES_THREAD;
- setrunqueue(td2);
- }
+ setrunqueue(td2);
return (td2); /* bogus.. should be a void function */
}
@@ -1222,14 +1435,16 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
struct thread *
signal_upcall(struct proc *p, int sig)
{
+#if 0
struct thread *td, *td2;
struct kse *ke;
sigset_t ss;
int error;
+#endif
PROC_LOCK_ASSERT(p, MA_OWNED);
return (NULL);
-
+#if 0
td = FIRST_THREAD_IN_PROC(p);
ke = td->td_kse;
PROC_UNLOCK(p);
@@ -1244,28 +1459,31 @@ return (NULL);
if (error)
return (NULL);
if (td->td_standin == NULL)
- td->td_standin = thread_alloc();
+ thread_alloc_spare(td, NULL);
mtx_lock_spin(&sched_lock);
td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */
mtx_unlock_spin(&sched_lock);
return (td2);
+#endif
}
/*
- * setup done on the thread when it enters the kernel.
+ * Setup done on the thread when it enters the kernel.
* XXXKSE Presently only for syscalls but eventually all kernel entries.
*/
void
thread_user_enter(struct proc *p, struct thread *td)
{
- struct kse *ke;
+ struct ksegrp *kg;
+ struct kse_upcall *ku;
+ kg = td->td_ksegrp;
/*
* First check that we shouldn't just abort.
* But check if we are the single thread first!
* XXX p_singlethread not locked, but should be safe.
*/
- if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) {
+ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
thread_exit();
@@ -1278,43 +1496,37 @@ thread_user_enter(struct proc *p, struct thread *td)
* possibility that we could do this lazily (in kse_reassign()),
* but for now do it every time.
*/
- ke = td->td_kse;
- td->td_flags &= ~TDF_UNBOUND;
- if (ke->ke_mailbox != NULL) {
-#if 0
- td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
- + offsetof(struct kse_mailbox, km_curthread));
-#else /* if user pointer arithmetic is ok in the kernel */
+ kg = td->td_ksegrp;
+ if (kg->kg_numupcalls) {
+ ku = td->td_upcall;
+ KASSERT(ku, ("%s: no upcall owned", __func__));
+ KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
td->td_mailbox =
- (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
-#endif
+ (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
if ((td->td_mailbox == NULL) ||
(td->td_mailbox == (void *)-1)) {
- td->td_mailbox = NULL; /* single thread it.. */
+ /* Don't schedule upcall when blocked */
+ td->td_mailbox = NULL;
mtx_lock_spin(&sched_lock);
- td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
+ td->td_flags &= ~TDF_CAN_UNBIND;
mtx_unlock_spin(&sched_lock);
} else {
- /*
- * when thread limit reached, act like that the thread
- * has already done an upcall.
- */
if (p->p_numthreads > max_threads_per_proc) {
- if (td->td_standin != NULL) {
- thread_stash(td->td_standin);
- td->td_standin = NULL;
- }
+ /*
+ * Since kernel thread limit reached,
+ * don't schedule upcall anymore.
+ * XXXKSE These code in fact needn't.
+ */
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_CAN_UNBIND;
+ mtx_unlock_spin(&sched_lock);
} else {
if (td->td_standin == NULL)
- td->td_standin = thread_alloc();
+ thread_alloc_spare(td, NULL);
+ mtx_lock_spin(&sched_lock);
+ td->td_flags |= TDF_CAN_UNBIND;
+ mtx_unlock_spin(&sched_lock);
}
- mtx_lock_spin(&sched_lock);
- td->td_flags |= TDF_CAN_UNBIND;
- mtx_unlock_spin(&sched_lock);
- KASSERT((ke->ke_owner == td),
- ("thread_user_enter: No starting owner "));
- ke->ke_owner = td;
- td->td_usticks = 0;
}
}
}
@@ -1335,165 +1547,90 @@ int
thread_userret(struct thread *td, struct trapframe *frame)
{
int error;
- int unbound;
- struct kse *ke;
+ struct kse_upcall *ku;
struct ksegrp *kg;
- struct thread *worktodo;
struct proc *p;
struct timespec ts;
- KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner),
- ("thread_userret: bad thread/kse pointers"));
- KASSERT((td == curthread),
- ("thread_userret: bad thread argument"));
-
-
- kg = td->td_ksegrp;
p = td->td_proc;
- error = 0;
- unbound = TD_IS_UNBOUND(td);
+ kg = td->td_ksegrp;
- mtx_lock_spin(&sched_lock);
- if ((worktodo = kg->kg_last_assigned))
- worktodo = TAILQ_NEXT(worktodo, td_runq);
- else
- worktodo = TAILQ_FIRST(&kg->kg_runq);
+ /* Nothing to do with non-threaded group/process */
+ if (td->td_ksegrp->kg_numupcalls == 0)
+ return (0);
/*
- * Permanently bound threads never upcall but they may
- * loan out their KSE at this point.
- * Upcalls imply bound.. They also may want to do some Philantropy.
- * Temporarily bound threads on the other hand either yield
- * to other work and transform into an upcall, or proceed back to
- * userland.
+ * State clock interrupt hit in userland, it
+ * is returning from interrupt, charge thread's
+ * userland time for UTS.
*/
+ if (td->td_flags & TDF_USTATCLOCK) {
+ thread_update_usr_ticks(td);
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_USTATCLOCK;
+ mtx_unlock_spin(&sched_lock);
+ }
+ /*
+ * Optimisation:
+ * This thread has not started any upcall.
+ * If there is no work to report other than ourself,
+ * then it can return direct to userland.
+ */
if (TD_CAN_UNBIND(td)) {
- td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
- if (!worktodo && (kg->kg_completed == NULL) &&
- !(td->td_kse->ke_flags & KEF_DOUPCALL)) {
- /*
- * This thread has not started any upcall.
- * If there is no work to report other than
- * ourself, then it can return direct to userland.
- */
-justreturn:
- mtx_unlock_spin(&sched_lock);
- thread_update_uticks();
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_CAN_UNBIND;
+ mtx_unlock_spin(&sched_lock);
+ if ((kg->kg_completed == NULL) &&
+ (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) {
+ thread_update_sys_ticks(td);
td->td_mailbox = NULL;
return (0);
}
- mtx_unlock_spin(&sched_lock);
error = thread_export_context(td);
- td->td_usticks = 0;
if (error) {
/*
- * As we are not running on a borrowed KSE,
- * failing to do the KSE operation just defaults
+ * Failing to do the KSE operation just defaults
* back to synchonous operation, so just return from
* the syscall.
*/
- goto justreturn;
+ return (0);
}
- mtx_lock_spin(&sched_lock);
/*
- * Turn ourself into a bound upcall.
- * We will rely on kse_reassign()
- * to make us run at a later time.
+ * There is something to report, and we own an upcall
+ * strucuture, we can go to userland.
+ * Turn ourself into an upcall thread.
*/
+ mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_UPCALLING;
-
- /* there may be more work since we re-locked schedlock */
- if ((worktodo = kg->kg_last_assigned))
- worktodo = TAILQ_NEXT(worktodo, td_runq);
- else
- worktodo = TAILQ_FIRST(&kg->kg_runq);
- } else if (unbound) {
- /*
- * We are an unbound thread, looking to
- * return to user space. There must be another owner
- * of this KSE.
- * We are using a borrowed KSE. save state and exit.
- * kse_reassign() will recycle the kse as needed,
- */
mtx_unlock_spin(&sched_lock);
+ } else if (td->td_mailbox) {
error = thread_export_context(td);
- td->td_usticks = 0;
if (error) {
- /*
- * There is nothing we can do.
- * We just lose that context. We
- * probably should note this somewhere and send
- * the process a signal.
- */
PROC_LOCK(td->td_proc);
- psignal(td->td_proc, SIGSEGV);
mtx_lock_spin(&sched_lock);
- ke = td->td_kse;
/* possibly upcall with error? */
} else {
+ PROC_LOCK(td->td_proc);
+ mtx_lock_spin(&sched_lock);
/*
- * Don't make an upcall, just exit so that the owner
- * can get its KSE if it wants it.
- * Our context is already safely stored for later
- * use by the UTS.
+ * There are upcall threads waiting for
+ * work to do, wake one of them up.
+ * XXXKSE Maybe wake all of them up.
*/
- PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
- ke = td->td_kse;
- }
- /*
- * If the owner is idling, we now have something for it
- * to report, so make it runnable.
- * If the owner is not an upcall, make an attempt to
- * ensure that at least one of any IDLED upcalls can
- * wake up.
- */
- if (ke->ke_owner->td_flags & TDF_UPCALLING) {
- TD_CLR_IDLE(ke->ke_owner);
- } else {
- FOREACH_KSE_IN_GROUP(kg, ke) {
- if (TD_IS_IDLE(ke->ke_owner)) {
- TD_CLR_IDLE(ke->ke_owner);
- setrunnable(ke->ke_owner);
- break;
- }
- }
+ if (kg->kg_upsleeps)
+ wakeup_one(&kg->kg_completed);
}
thread_exit();
+ /* NOTREACHED */
}
- /*
- * We ARE going back to userland with this KSE.
- * We are permanently bound. We may be an upcall.
- * If an upcall, check for threads that need to borrow the KSE.
- * Any other thread that comes ready after this missed the boat.
- */
- ke = td->td_kse;
- /*
- * If not upcalling, go back to userspace.
- * If we are, get the upcall set up.
- */
if (td->td_flags & TDF_UPCALLING) {
- if (worktodo) {
- /*
- * force a switch to more urgent 'in kernel'
- * work. Control will return to this thread
- * when there is no more work to do.
- * kse_reassign() will do that for us.
- */
- TD_SET_LOAN(td);
- p->p_stats->p_ru.ru_nvcsw++;
- mi_switch(); /* kse_reassign() will (re)find worktodo */
- }
- td->td_flags &= ~TDF_UPCALLING;
- if (ke->ke_flags & KEF_DOUPCALL)
- ke->ke_flags &= ~KEF_DOUPCALL;
- mtx_unlock_spin(&sched_lock);
-
+ KASSERT(TD_CAN_UNBIND(td) == 0, ("upcall thread can unbind"));
+ ku = td->td_upcall;
/*
* There is no more work to do and we are going to ride
- * this thread/KSE up to userland as an upcall.
+ * this thread up to userland as an upcall.
* Do the last parts of the setup needed for the upcall.
*/
CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
@@ -1504,16 +1641,27 @@ justreturn:
* Will use Giant in cpu_thread_clean() because it uses
* kmem_free(kernel_map, ...)
*/
- cpu_set_upcall_kse(td, ke);
+ cpu_set_upcall_kse(td, ku);
- /*
+ /*
+ * Clear TDF_UPCALLING after set upcall context,
+ * profiling code looks TDF_UPCALLING to avoid account
+ * a wrong user %EIP
+ */
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_UPCALLING;
+ if (ku->ku_flags & KUF_DOUPCALL)
+ ku->ku_flags &= ~KUF_DOUPCALL;
+ mtx_unlock_spin(&sched_lock);
+
+ /*
* Unhook the list of completed threads.
* anything that completes after this gets to
* come in next time.
* Put the list of completed thread mailboxes on
* this KSE's mailbox.
*/
- error = thread_link_mboxes(kg, ke);
+ error = thread_link_mboxes(kg, ku);
if (error)
goto bad;
@@ -1524,34 +1672,33 @@ justreturn:
* it would be nice if this all happenned only on the first
* time through. (the scan for extra work etc.)
*/
-#if 0
- error = suword((caddr_t)ke->ke_mailbox +
- offsetof(struct kse_mailbox, km_curthread), 0);
-#else /* if user pointer arithmetic is ok in the kernel */
- error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
-#endif
- ke->ke_uuticks = ke->ke_usticks = 0;
+ error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0);
if (error)
goto bad;
+
+ /* Export current system time */
nanotime(&ts);
if (copyout(&ts,
- (caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) {
+ (caddr_t)&ku->ku_mailbox->km_timeofday, sizeof(ts))) {
goto bad;
}
- } else {
- mtx_unlock_spin(&sched_lock);
}
/*
* Optimisation:
* Ensure that we have a spare thread available,
* for when we re-enter the kernel.
*/
- if (td->td_standin == NULL) {
- td->td_standin = thread_alloc();
- }
+ if (td->td_standin == NULL)
+ thread_alloc_spare(td, NULL);
- thread_update_uticks();
+ /*
+ * Clear thread mailbox first, then clear system tick count.
+ * The order is important because thread_statclock() use
+ * mailbox pointer to see if it is an userland thread or
+ * an UTS kernel thread.
+ */
td->td_mailbox = NULL;
+ td->td_usticks = 0;
return (0);
bad:
@@ -1563,6 +1710,7 @@ bad:
psignal(td->td_proc, SIGSEGV);
PROC_UNLOCK(td->td_proc);
td->td_mailbox = NULL;
+ td->td_usticks = 0;
return (error); /* go sync */
}
@@ -1601,7 +1749,6 @@ thread_single(int force_exit)
if (force_exit == SINGLE_EXIT) {
p->p_flag |= P_SINGLE_EXIT;
- td->td_flags &= ~TDF_UNBOUND;
} else
p->p_flag &= ~P_SINGLE_EXIT;
p->p_flag |= P_STOPPED_SINGLE;
@@ -1624,17 +1771,16 @@ thread_single(int force_exit)
else
abortsleep(td2);
}
- if (TD_IS_IDLE(td2)) {
- TD_CLR_IDLE(td2);
- }
} else {
if (TD_IS_SUSPENDED(td2))
continue;
- /* maybe other inhibitted states too? */
+ /*
+ * maybe other inhibitted states too?
+ * XXXKSE Is it totally safe to
+ * suspend a non-interruptable thread?
+ */
if (td2->td_inhibitors &
- (TDI_SLEEPING | TDI_SWAPPED |
- TDI_LOAN | TDI_IDLE |
- TDI_EXITING))
+ (TDI_SLEEPING | TDI_SWAPPED))
thread_suspend_one(td2);
}
}
@@ -1660,8 +1806,14 @@ thread_single(int force_exit)
mtx_lock(&Giant);
PROC_LOCK(p);
}
- if (force_exit == SINGLE_EXIT)
+ if (force_exit == SINGLE_EXIT) {
+ if (td->td_upcall) {
+ mtx_lock_spin(&sched_lock);
+ upcall_remove(td);
+ mtx_unlock_spin(&sched_lock);
+ }
kse_purge(p, td);
+ }
return (0);
}
@@ -1703,7 +1855,6 @@ thread_suspend_check(int return_instead)
{
struct thread *td;
struct proc *p;
- struct kse *ke;
struct ksegrp *kg;
td = curthread;
@@ -1735,16 +1886,6 @@ thread_suspend_check(int return_instead)
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock(&Giant);
- /*
- * All threads should be exiting
- * Unless they are the active "singlethread".
- * destroy un-needed KSEs as we go..
- * KSEGRPS may implode too as #kses -> 0.
- */
- ke = td->td_kse;
- if (ke->ke_owner == td &&
- (kg->kg_kses >= kg->kg_numthreads ))
- ke->ke_flags |= KEF_EXIT;
thread_exit();
}
@@ -1752,14 +1893,6 @@ thread_suspend_check(int return_instead)
* When a thread suspends, it just
* moves to the processes's suspend queue
* and stays there.
- *
- * XXXKSE if TDF_BOUND is true
- * it will not release it's KSE which might
- * lead to deadlock if there are not enough KSEs
- * to complete all waiting threads.
- * Maybe be able to 'lend' it out again.
- * (lent kse's can not go back to userland?)
- * and can only be lent in STOPPED state.
*/
mtx_lock_spin(&sched_lock);
if ((p->p_flag & P_STOPPED_SIG) &&
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 2c22a92..ecf309e 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -358,7 +358,9 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
return (0);
if (state == GMON_PROF_OFF) {
gp->state = state;
+ PROC_LOCK(&proc0);
stopprofclock(&proc0);
+ PROC_UNLOCK(&proc0);
stopguprof(gp);
} else if (state == GMON_PROF_ON) {
gp->state = GMON_PROF_OFF;
@@ -369,7 +371,9 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
#ifdef GUPROF
} else if (state == GMON_PROF_HIRES) {
gp->state = GMON_PROF_OFF;
+ PROC_LOCK(&proc0);
stopprofclock(&proc0);
+ PROC_UNLOCK(&proc0);
startguprof(gp);
gp->state = state;
#endif
@@ -419,7 +423,7 @@ profil(td, uap)
struct thread *td;
register struct profil_args *uap;
{
- register struct uprof *upp;
+ struct uprof *upp;
int s;
int error = 0;
@@ -430,7 +434,9 @@ profil(td, uap)
goto done2;
}
if (uap->scale == 0) {
+ PROC_LOCK(td->td_proc);
stopprofclock(td->td_proc);
+ PROC_UNLOCK(td->td_proc);
goto done2;
}
upp = &td->td_proc->p_stats->p_prof;
@@ -472,19 +478,16 @@ done2:
* inaccurate.
*/
void
-addupc_intr(ke, pc, ticks)
- register struct kse *ke;
- register uintptr_t pc;
- u_int ticks;
+addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
{
- register struct uprof *prof;
- register caddr_t addr;
- register u_int i;
- register int v;
+ struct uprof *prof;
+ caddr_t addr;
+ u_int i;
+ int v;
if (ticks == 0)
return;
- prof = &ke->ke_proc->p_stats->p_prof;
+ prof = &td->td_proc->p_stats->p_prof;
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
return; /* out of range; ignore */
@@ -492,9 +495,9 @@ addupc_intr(ke, pc, ticks)
addr = prof->pr_base + i;
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
mtx_lock_spin(&sched_lock);
- prof->pr_addr = pc;
- prof->pr_ticks = ticks;
- ke->ke_flags |= KEF_OWEUPC | KEF_ASTPENDING ;
+ td->td_praddr = pc;
+ td->td_prticks = ticks;
+ td->td_flags |= (TDF_OWEUPC | TDF_ASTPENDING);
mtx_unlock_spin(&sched_lock);
}
}
@@ -502,34 +505,56 @@ addupc_intr(ke, pc, ticks)
/*
* Much like before, but we can afford to take faults here. If the
* update fails, we simply turn off profiling.
+ * XXXKSE, don't use kse unless we got sched lock.
*/
void
-addupc_task(ke, pc, ticks)
- register struct kse *ke;
- register uintptr_t pc;
- u_int ticks;
+addupc_task(struct thread *td, uintptr_t pc, u_int ticks)
{
- struct proc *p = ke->ke_proc;
+ struct proc *p = td->td_proc;
register struct uprof *prof;
register caddr_t addr;
register u_int i;
u_short v;
+ int stop = 0;
if (ticks == 0)
return;
+ PROC_LOCK(p);
+ mtx_lock_spin(&sched_lock);
+ if (!(p->p_sflag & PS_PROFIL)) {
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ return;
+ }
+ p->p_profthreads++;
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
prof = &p->p_stats->p_prof;
if (pc < prof->pr_off ||
- (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
- return;
+ (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
+ goto out;
+ }
addr = prof->pr_base + i;
if (copyin(addr, &v, sizeof(v)) == 0) {
v += ticks;
if (copyout(&v, addr, sizeof(v)) == 0)
- return;
+ goto out;
+ }
+ stop = 1;
+
+out:
+ PROC_LOCK(p);
+ if (--p->p_profthreads == 0) {
+ if (p->p_sflag & PS_STOPPROF) {
+ wakeup(&p->p_profthreads);
+ stop = 0;
+ }
}
- stopprofclock(p);
+ if (stop)
+ stopprofclock(p);
+ PROC_UNLOCK(p);
}
#if defined(__i386__) && __GNUC__ >= 2
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index eec2ae6..ecd00c1 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -73,15 +73,21 @@ userret(td, frame, oticks)
u_int oticks;
{
struct proc *p = td->td_proc;
- struct kse *ke = td->td_kse;
+#ifdef INVARIANTS
+ struct kse *ke;
+#endif
CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
#ifdef INVARIANTS
- /* Check that we called signotify() enough. */
+ /*
+ * Check that we called signotify() enough.
+ * XXXKSE this checking is bogus for threaded program,
+ */
mtx_lock(&Giant);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
+ ke = td->td_kse;
if (SIGPENDING(p) && ((p->p_sflag & PS_NEEDSIGCHK) == 0 ||
(td->td_kse->ke_flags & KEF_ASTPENDING) == 0))
printf("failed to set signal flags properly for ast()\n");
@@ -96,6 +102,18 @@ userret(td, frame, oticks)
sched_userret(td);
/*
+ * Charge system time if profiling.
+ *
+ * XXX should move PS_PROFIL to a place that can obviously be
+ * accessed safely without sched_lock.
+ */
+
+ if (p->p_sflag & PS_PROFIL) {
+ ticks = td->td_sticks - oticks;
+ addupc_task(td, TRAPF_PC(frame), (u_int)ticks * psratio);
+ }
+
+ /*
* We need to check to see if we have to exit or wait due to a
* single threading requirement or some other STOP condition.
* Don't bother doing all the work if the stop bits are not set
@@ -113,21 +131,6 @@ userret(td, frame, oticks)
if (p->p_flag & P_KSES) {
thread_userret(td, frame);
}
-
- /*
- * Charge system time if profiling.
- *
- * XXX should move PS_PROFIL to a place that can obviously be
- * accessed safely without sched_lock.
- */
- if (p->p_sflag & PS_PROFIL) {
- quad_t ticks;
-
- mtx_lock_spin(&sched_lock);
- ticks = ke->ke_sticks - oticks;
- mtx_unlock_spin(&sched_lock);
- addupc_task(ke, TRAPF_PC(frame), (u_int)ticks * psratio);
- }
}
/*
@@ -146,6 +149,7 @@ ast(struct trapframe *framep)
u_int prticks, sticks;
int sflag;
int flags;
+ int tflags;
int sig;
#if defined(DEV_NPX) && !defined(SMP)
int ucode;
@@ -175,19 +179,21 @@ ast(struct trapframe *framep)
*/
mtx_lock_spin(&sched_lock);
ke = td->td_kse;
- sticks = ke->ke_sticks;
+ sticks = td->td_sticks;
+ tflags = td->td_flags;
flags = ke->ke_flags;
sflag = p->p_sflag;
p->p_sflag &= ~(PS_ALRMPEND | PS_NEEDSIGCHK | PS_PROFPEND | PS_XCPU);
#ifdef MAC
p->p_sflag &= ~PS_MACPEND;
#endif
- ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED | KEF_OWEUPC);
+ ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED);
+ td->td_flags &= ~(TDF_ASTPENDING | TDF_OWEUPC);
cnt.v_soft++;
prticks = 0;
- if (flags & KEF_OWEUPC && sflag & PS_PROFIL) {
- prticks = p->p_stats->p_prof.pr_ticks;
- p->p_stats->p_prof.pr_ticks = 0;
+ if (tflags & TDF_OWEUPC && sflag & PS_PROFIL) {
+ prticks = td->td_prticks;
+ td->td_prticks = 0;
}
mtx_unlock_spin(&sched_lock);
/*
@@ -200,8 +206,9 @@ ast(struct trapframe *framep)
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
- if (flags & KEF_OWEUPC && sflag & PS_PROFIL)
- addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks);
+ if (tflags & TDF_OWEUPC && sflag & PS_PROFIL) {
+ addupc_task(td, td->td_praddr, prticks);
+ }
if (sflag & PS_ALRMPEND) {
PROC_LOCK(p);
psignal(p, SIGVTALRM);
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 3ab4a33..b958ceb 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -244,7 +244,7 @@ static struct witness_order_list_entry order_lists[] = {
#endif
{ "clk", &lock_class_mtx_spin },
{ "mutex profiling lock", &lock_class_mtx_spin },
- { "zombie_thread_lock", &lock_class_mtx_spin },
+ { "kse zombie lock", &lock_class_mtx_spin },
{ "ALD Queue", &lock_class_mtx_spin },
#ifdef __ia64__
{ "MCA spin lock", &lock_class_mtx_spin },
OpenPOWER on IntegriCloud