diff options
author | julian <julian@FreeBSD.org> | 2003-02-01 12:17:09 +0000 |
---|---|---|
committer | julian <julian@FreeBSD.org> | 2003-02-01 12:17:09 +0000 |
commit | e8efa7328e487806fb77d3ec54bf5fa5f8b017ed (patch) | |
tree | 5ab8f2c0a0aaeb3da3779201a31e710dee48d388 /sys/kern | |
parent | 1c5753d03f1205c6e2831f320c4007946a4b050f (diff) | |
download | FreeBSD-src-e8efa7328e487806fb77d3ec54bf5fa5f8b017ed.zip FreeBSD-src-e8efa7328e487806fb77d3ec54bf5fa5f8b017ed.tar.gz |
Reversion of commit by Davidxu plus fixes since applied.
I'm not convinced there is anything major wrong with the patch but
them's the rules..
I am using my "David's mentor" hat to revert this as he's
offline for a while.
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/init_main.c | 1 | ||||
-rw-r--r-- | sys/kern/kern_clock.c | 55 | ||||
-rw-r--r-- | sys/kern/kern_exec.c | 3 | ||||
-rw-r--r-- | sys/kern/kern_exit.c | 18 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_kse.c | 1163 | ||||
-rw-r--r-- | sys/kern/kern_lock.c | 40 | ||||
-rw-r--r-- | sys/kern/kern_resource.c | 134 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_switch.c | 234 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 1163 | ||||
-rw-r--r-- | sys/kern/subr_prof.c | 69 | ||||
-rw-r--r-- | sys/kern/subr_trap.c | 56 | ||||
-rw-r--r-- | sys/kern/subr_witness.c | 2 |
14 files changed, 1393 insertions, 1553 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index d068ae0..0a1a934 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -379,6 +379,7 @@ proc0_init(void *dummy __unused) ke->ke_oncpu = 0; ke->ke_state = KES_THREAD; ke->ke_thread = td; + ke->ke_owner = td; p->p_peers = 0; p->p_leader = p; diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index ab2c9ee..c5e1b4a 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -320,10 +320,6 @@ startprofclock(p) * cover psdiv, etc. as well. */ mtx_lock_spin(&sched_lock); - if (p->p_sflag & PS_STOPPROF) { - mtx_unlock_spin(&sched_lock); - return; - } if ((p->p_sflag & PS_PROFIL) == 0) { p->p_sflag |= PS_PROFIL; if (++profprocs == 1 && stathz != 0) { @@ -345,19 +341,9 @@ stopprofclock(p) { int s; - PROC_LOCK_ASSERT(p, MA_OWNED); - -retry: mtx_lock_spin(&sched_lock); if (p->p_sflag & PS_PROFIL) { - if (p->p_profthreads) { - p->p_sflag |= PS_STOPPROF; - mtx_unlock_spin(&sched_lock); - msleep(&p->p_profthreads, &p->p_mtx, PPAUSE, - "stopprof", NULL); - goto retry; - } - p->p_sflag &= ~(PS_PROFIL|PS_STOPPROF); + p->p_sflag &= ~PS_PROFIL; if (--profprocs == 0 && stathz != 0) { s = splstatclock(); psdiv = pscnt = 1; @@ -377,7 +363,10 @@ retry: * this function's relationship to statclock. */ void -statclock_process(struct thread *td, register_t pc, int user) +statclock_process(ke, pc, user) + struct kse *ke; + register_t pc; + int user; { #ifdef GPROF struct gmonparam *g; @@ -387,31 +376,27 @@ statclock_process(struct thread *td, register_t pc, int user) long rss; struct rusage *ru; struct vmspace *vm; - struct proc *p = td->td_proc; + struct proc *p = ke->ke_proc; + struct thread *td = ke->ke_thread; /* current thread */ + KASSERT(ke == curthread->td_kse, ("statclock_process: td != curthread")); mtx_assert(&sched_lock, MA_OWNED); if (user) { /* * Came from user mode; CPU was in user state. * If this process is being profiled, record the tick. */ - if (p->p_sflag & PS_PROFIL) { - /* Only when thread is not in transition */ - if (!(td->td_flags & TDF_UPCALLING)) - addupc_intr(td, pc, 1); - } + if (p->p_sflag & PS_PROFIL) + addupc_intr(ke, pc, 1); if (pscnt < psdiv) return; /* * Charge the time as appropriate. */ if (p->p_flag & P_KSES) - thread_statclock(1); - /* - td->td_uticks++; - */ - p->p_uticks++; - if (td->td_ksegrp->kg_nice > NZERO) + thread_add_ticks_intr(1, 1); + ke->ke_uticks++; + if (ke->ke_ksegrp->kg_nice > NZERO) cp_time[CP_NICE]++; else cp_time[CP_USER]++; @@ -444,16 +429,12 @@ statclock_process(struct thread *td, register_t pc, int user) * in ``non-process'' (i.e., interrupt) work. */ if ((td->td_ithd != NULL) || td->td_intr_nesting_level >= 2) { - p->p_iticks++; - /* - td->td_iticks++; - */ + ke->ke_iticks++; cp_time[CP_INTR]++; } else { if (p->p_flag & P_KSES) - thread_statclock(0); - td->td_sticks++; - p->p_sticks++; + thread_add_ticks_intr(0, 1); + ke->ke_sticks++; if (p != PCPU_GET(idlethread)->td_proc) cp_time[CP_SYS]++; else @@ -461,7 +442,7 @@ statclock_process(struct thread *td, register_t pc, int user) } } - sched_clock(td); + sched_clock(ke->ke_thread); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && @@ -491,7 +472,7 @@ statclock(frame) mtx_lock_spin_flags(&sched_lock, MTX_QUIET); if (--pscnt == 0) pscnt = psdiv; - statclock_process(curthread, CLKF_PC(frame), CLKF_USERMODE(frame)); + statclock_process(curthread->td_kse, CLKF_PC(frame), CLKF_USERMODE(frame)); mtx_unlock_spin_flags(&sched_lock, MTX_QUIET); } diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index b6d77d2..33a0764 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -210,7 +210,10 @@ kern_execve(td, fname, argv, envv, mac_p) * so unset the associated flags and lose KSE mode. */ p->p_flag &= ~P_KSES; + td->td_flags &= ~TDF_UNBOUND; td->td_mailbox = NULL; + td->td_kse->ke_mailbox = NULL; + td->td_kse->ke_flags &= ~KEF_DOUPCALL; thread_single_end(); } p->p_flag |= P_INEXEC; diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index ce9a18c..0b2c2e8 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -147,7 +147,7 @@ exit1(td, rv) } /* - * XXXKSE: MUST abort all other threads before proceeding past here. + * XXXXKSE: MUST abort all other threads before proceeding past here. */ PROC_LOCK(p); if (p->p_flag & P_KSES) { @@ -156,6 +156,17 @@ exit1(td, rv) * if so, act apropriatly, (exit or suspend); */ thread_suspend_check(0); + /* + * Here is a trick.. + * We need to free up our KSE to process other threads + * so that we can safely set the UNBOUND flag + * (whether or not we have a mailbox) as we are NEVER + * going to return to the user. + * The flag will not be set yet if we are exiting + * because of a signal, pagefault, or similar + * (or even an exit(2) from the UTS). + */ + td->td_flags |= TDF_UNBOUND; /* * Kill off the other threads. This requires @@ -181,6 +192,7 @@ exit1(td, rv) * Turn off threading support. */ p->p_flag &= ~P_KSES; + td->td_flags &= ~TDF_UNBOUND; thread_single_end(); /* Don't need this any more. */ } /* @@ -225,10 +237,8 @@ exit1(td, rv) */ TAILQ_FOREACH(ep, &exit_list, next) (*ep->function)(p); - - PROC_LOCK(p); + stopprofclock(p); - PROC_UNLOCK(p); MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage), M_ZOMBIE, 0); diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 6c896eb..f84afa8 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -492,7 +492,9 @@ again: /* Set up the thread as an active thread (as if runnable). */ ke2->ke_state = KES_THREAD; ke2->ke_thread = td2; + ke2->ke_owner = td2; td2->td_kse = ke2; + td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */ /* * Duplicate sub-structures as needed. diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 0585172..78bce30 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -63,7 +63,6 @@ static uma_zone_t ksegrp_zone; static uma_zone_t kse_zone; static uma_zone_t thread_zone; -static uma_zone_t upcall_zone; /* DEBUG ONLY */ SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); @@ -79,52 +78,16 @@ static int max_groups_per_proc = 5; SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, &max_groups_per_proc, 0, "Limit on thread groups per proc"); -static int virtual_cpu; - #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) -TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); +struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); -TAILQ_HEAD(, kse_upcall) zombie_upcalls = - TAILQ_HEAD_INITIALIZER(zombie_upcalls); -struct mtx kse_zombie_lock; -MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); +struct mtx zombie_thread_lock; +MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock, + "zombie_thread_lock", MTX_SPIN); static void kse_purge(struct proc *p, struct thread *td); -static void kse_purge_group(struct thread *td); -static int thread_update_usr_ticks(struct thread *td); -static int thread_update_sys_ticks(struct thread *td); -static void thread_alloc_spare(struct thread *td, struct thread *spare); - -static int -sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) -{ - int error, new_val; - int def_val; - -#ifdef SMP - def_val = mp_ncpus; -#else - def_val = 1; -#endif - if (virtual_cpu == 0) - new_val = def_val; - else - new_val = virtual_cpu; - error = sysctl_handle_int(oidp, &new_val, 0, req); - if (error != 0 || req->newptr == NULL) - return (error); - if (new_val < 0) - return (EINVAL); - virtual_cpu = new_val; - return (0); -} - -/* DEBUG ONLY */ -SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, - 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", - "debug virtual cpus"); /* * Prepare a thread for use. @@ -136,6 +99,7 @@ thread_ctor(void *mem, int size, void *arg) td = (struct thread *)mem; td->td_state = TDS_INACTIVE; + td->td_flags |= TDF_UNBOUND; } /* @@ -197,7 +161,6 @@ thread_fini(void *mem, int size) td = (struct thread *)mem; pmap_dispose_thread(td); } - /* * Initialize type-stable parts of a kse (when newly created). */ @@ -209,7 +172,6 @@ kse_init(void *mem, int size) ke = (struct kse *)mem; ke->ke_sched = (struct ke_sched *)&ke[1]; } - /* * Initialize type-stable parts of a ksegrp (when newly created). */ @@ -223,7 +185,7 @@ ksegrp_init(void *mem, int size) } /* - * KSE is linked into kse group. + * KSE is linked onto the idle queue. */ void kse_link(struct kse *ke, struct ksegrp *kg) @@ -232,12 +194,12 @@ kse_link(struct kse *ke, struct ksegrp *kg) TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); kg->kg_kses++; - ke->ke_state = KES_UNQUEUED; + ke->ke_state = KES_UNQUEUED; ke->ke_proc = p; ke->ke_ksegrp = kg; + ke->ke_owner = NULL; ke->ke_thread = NULL; - ke->ke_oncpu = NOCPU; - ke->ke_flags = 0; + ke->ke_oncpu = NOCPU; } void @@ -247,13 +209,11 @@ kse_unlink(struct kse *ke) mtx_assert(&sched_lock, MA_OWNED); kg = ke->ke_ksegrp; + TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); - if (ke->ke_state == KES_IDLE) { - TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); - kg->kg_idle_kses--; + if (--kg->kg_kses == 0) { + ksegrp_unlink(kg); } - if (--kg->kg_kses == 0) - ksegrp_unlink(kg); /* * Aggregate stats from the KSE */ @@ -268,20 +228,15 @@ ksegrp_link(struct ksegrp *kg, struct proc *p) TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ - TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ - TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ - kg->kg_proc = p; - /* - * the following counters are in the -zero- section - * and may not need clearing - */ + TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */ + kg->kg_proc = p; +/* the following counters are in the -zero- section and may not need clearing */ kg->kg_numthreads = 0; - kg->kg_runnable = 0; - kg->kg_kses = 0; - kg->kg_runq_kses = 0; /* XXXKSE change name */ - kg->kg_idle_kses = 0; - kg->kg_numupcalls = 0; - /* link it in now that it's consistent */ + kg->kg_runnable = 0; + kg->kg_kses = 0; + kg->kg_loan_kses = 0; + kg->kg_runq_kses = 0; /* XXXKSE change name */ +/* link it in now that it's consistent */ p->p_numksegrps++; TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); } @@ -292,11 +247,9 @@ ksegrp_unlink(struct ksegrp *kg) struct proc *p; mtx_assert(&sched_lock, MA_OWNED); - KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); - KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); - KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); - p = kg->kg_proc; + KASSERT(((kg->kg_numthreads == 0) && (kg->kg_kses == 0)), + ("kseg_unlink: residual threads or KSEs")); TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); p->p_numksegrps--; /* @@ -305,63 +258,13 @@ ksegrp_unlink(struct ksegrp *kg) ksegrp_stash(kg); } -struct kse_upcall * -upcall_alloc(void) -{ - struct kse_upcall *ku; - - ku = uma_zalloc(upcall_zone, 0); - bzero(ku, sizeof(*ku)); - return (ku); -} - -void -upcall_free(struct kse_upcall *ku) -{ - - uma_zfree(upcall_zone, ku); -} - -void -upcall_link(struct kse_upcall *ku, struct ksegrp *kg) -{ - - mtx_assert(&sched_lock, MA_OWNED); - TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); - ku->ku_ksegrp = kg; - kg->kg_numupcalls++; -} - -void -upcall_unlink(struct kse_upcall *ku) -{ - struct ksegrp *kg = ku->ku_ksegrp; - - mtx_assert(&sched_lock, MA_OWNED); - KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); - TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); - kg->kg_numupcalls--; - upcall_stash(ku); -} - -void -upcall_remove(struct thread *td) -{ - - if (td->td_upcall) { - td->td_upcall->ku_owner = NULL; - upcall_unlink(td->td_upcall); - td->td_upcall = 0; - } -} - /* - * For a newly created process, - * link up all the structures and its initial threads etc. + * for a newly created process, + * link up a the structure and its initial threads etc. */ void proc_linkup(struct proc *p, struct ksegrp *kg, - struct kse *ke, struct thread *td) + struct kse *ke, struct thread *td) { TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ @@ -375,11 +278,6 @@ proc_linkup(struct proc *p, struct ksegrp *kg, thread_link(td, kg); } -/* -struct kse_thr_interrupt_args { - struct kse_thr_mailbox * tmbx; -}; -*/ int kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) { @@ -387,7 +285,10 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) struct thread *td2; p = td->td_proc; - if (!(p->p_flag & P_KSES) || (uap->tmbx == NULL)) + /* KSE-enabled processes only, please. */ + if (!(p->p_flag & P_KSES)) + return (EINVAL); + if (uap->tmbx == NULL) return (EINVAL); mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td2) { @@ -398,7 +299,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) cv_abort(td2); else abortsleep(td2); - } + } mtx_unlock_spin(&sched_lock); return (0); } @@ -407,11 +308,6 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) return (ESRCH); } -/* -struct kse_exit_args { - register_t dummy; -}; -*/ int kse_exit(struct thread *td, struct kse_exit_args *uap) { @@ -420,35 +316,27 @@ kse_exit(struct thread *td, struct kse_exit_args *uap) struct kse *ke; p = td->td_proc; - /* - * Only UTS can call the syscall and current group - * should be a threaded group. - */ - if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) + /* Only UTS can do the syscall */ + if (!(p->p_flag & P_KSES) || (td->td_mailbox != NULL)) return (EINVAL); - KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); - kg = td->td_ksegrp; - /* Serialize removing upcall */ + /* serialize killing kse */ PROC_LOCK(p); mtx_lock_spin(&sched_lock); - if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) { + if ((kg->kg_kses == 1) && (kg->kg_numthreads > 1)) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (EDEADLK); } ke = td->td_kse; - upcall_remove(td); if (p->p_numthreads == 1) { - kse_purge(p, td); + ke->ke_flags &= ~KEF_DOUPCALL; + ke->ke_mailbox = NULL; p->p_flag &= ~P_KSES; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } else { - if (kg->kg_numthreads == 1) { /* Shutdown a group */ - kse_purge_group(td); - ke->ke_flags |= KEF_EXIT; - } + ke->ke_flags |= KEF_EXIT; thread_exit(); /* NOTREACHED */ } @@ -457,15 +345,10 @@ kse_exit(struct thread *td, struct kse_exit_args *uap) /* * Either becomes an upcall or waits for an awakening event and - * then becomes an upcall. Only error cases return. + * THEN becomes an upcall. Only error cases return. */ -/* -struct kse_release_args { - register_t dummy; -}; -*/ int -kse_release(struct thread *td, struct kse_release_args *uap) +kse_release(struct thread * td, struct kse_release_args * uap) { struct proc *p; struct ksegrp *kg; @@ -473,25 +356,28 @@ kse_release(struct thread *td, struct kse_release_args *uap) p = td->td_proc; kg = td->td_ksegrp; /* - * Only UTS can call the syscall and current group - * should be a threaded group. - */ - if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) + * kse must have a mailbox ready for upcall, and only UTS can + * do the syscall. + */ + if (!(p->p_flag & P_KSES) || + (td->td_mailbox != NULL) || + (td->td_kse->ke_mailbox == NULL)) return (EINVAL); - KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); PROC_LOCK(p); mtx_lock_spin(&sched_lock); /* Change OURSELF to become an upcall. */ - td->td_flags = TDF_UPCALLING; - if ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && + td->td_flags = TDF_UPCALLING; /* BOUND */ + if (!(td->td_kse->ke_flags & (KEF_DOUPCALL|KEF_ASTPENDING)) && (kg->kg_completed == NULL)) { - kg->kg_upsleeps++; - mtx_unlock_spin(&sched_lock); - msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, "ksepause", - NULL); - kg->kg_upsleeps--; + /* + * The KSE will however be lendable. + */ + TD_SET_IDLE(td); PROC_UNLOCK(p); + p->p_stats->p_ru.ru_nvcsw++; + mi_switch(); + mtx_unlock_spin(&sched_lock); } else { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); @@ -506,59 +392,61 @@ int kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) { struct proc *p; + struct kse *ke; struct ksegrp *kg; - struct kse_upcall *ku; struct thread *td2; p = td->td_proc; td2 = NULL; - ku = NULL; /* KSE-enabled processes only, please. */ if (!(p->p_flag & P_KSES)) - return (EINVAL); + return EINVAL; - PROC_LOCK(p); mtx_lock_spin(&sched_lock); if (uap->mbx) { FOREACH_KSEGRP_IN_PROC(p, kg) { - FOREACH_UPCALL_IN_GROUP(kg, ku) { - if (ku->ku_mailbox == uap->mbx) - break; + FOREACH_KSE_IN_GROUP(kg, ke) { + if (ke->ke_mailbox != uap->mbx) + continue; + td2 = ke->ke_owner; + KASSERT((td2 != NULL),("KSE with no owner")); + break; } - if (ku) + if (td2) { break; + } } } else { + /* + * look for any idle KSE to resurrect. + */ kg = td->td_ksegrp; - if (kg->kg_upsleeps) { - wakeup_one(&kg->kg_completed); - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - return (0); + FOREACH_KSE_IN_GROUP(kg, ke) { + td2 = ke->ke_owner; + KASSERT((td2 != NULL),("KSE with no owner2")); + if (TD_IS_IDLE(td2)) + break; } - ku = TAILQ_FIRST(&kg->kg_upcalls); + KASSERT((td2 != NULL), ("no thread(s)")); } - if (ku) { - if ((td2 = ku->ku_owner) == NULL) { - panic("%s: no owner", __func__); - } else if (TD_ON_SLEEPQ(td2) && - (td2->td_wchan == &kg->kg_completed)) { - abortsleep(td2); - } else { - ku->ku_flags |= KUF_DOUPCALL; + if (td2) { + if (TD_IS_IDLE(td2)) { + TD_CLR_IDLE(td2); + setrunnable(td2); + } else if (td != td2) { + /* guarantee do an upcall ASAP */ + td2->td_kse->ke_flags |= KEF_DOUPCALL; } mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); return (0); } mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); return (ESRCH); } /* * No new KSEG: first call: use current KSE, don't schedule an upcall - * All other situations, do allocate max new KSEs and schedule an upcall. + * All other situations, do allocate a new KSE and schedule an upcall on it. */ /* struct kse_create_args { struct kse_mailbox *mbx; @@ -568,140 +456,112 @@ int kse_create(struct thread *td, struct kse_create_args *uap) { struct kse *newke; + struct kse *ke; struct ksegrp *newkg; struct ksegrp *kg; struct proc *p; struct kse_mailbox mbx; - struct kse_upcall *newku; - int err, ncpus; + int err; p = td->td_proc; if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) return (err); - /* Too bad, why hasn't kernel always a cpu counter !? */ -#ifdef SMP - ncpus = mp_ncpus; -#else - ncpus = 1; -#endif - if (thread_debug && virtual_cpu != 0) - ncpus = virtual_cpu; - - /* Easier to just set it than to test and set */ - p->p_flag |= P_KSES; + p->p_flag |= P_KSES; /* easier to just set it than to test and set */ kg = td->td_ksegrp; if (uap->newgroup) { - /* Have race condition but it is cheap */ if (p->p_numksegrps >= max_groups_per_proc) return (EPROCLIM); /* * If we want a new KSEGRP it doesn't matter whether * we have already fired up KSE mode before or not. - * We put the process in KSE mode and create a new KSEGRP. + * We put the process in KSE mode and create a new KSEGRP + * and KSE. If our KSE has not got a mailbox yet then + * that doesn't matter, just leave it that way. It will + * ensure that this thread stay BOUND. It's possible + * that the call came form a threaded library and the main + * program knows nothing of threads. */ newkg = ksegrp_alloc(); bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, - kg_startzero, kg_endzero)); + kg_startzero, kg_endzero)); bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); - mtx_lock_spin(&sched_lock); - ksegrp_link(newkg, p); - if (p->p_numksegrps >= max_groups_per_proc) { - ksegrp_unlink(newkg); - mtx_unlock_spin(&sched_lock); - return (EPROCLIM); - } - mtx_unlock_spin(&sched_lock); + newke = kse_alloc(); } else { - newkg = kg; - } - - /* - * Creating upcalls more than number of physical cpu does - * not help performance. - */ - if (newkg->kg_numupcalls >= ncpus) - return (EPROCLIM); - - if (newkg->kg_numupcalls == 0) { - /* - * Initialize KSE group, optimized for MP. - * Create KSEs as many as physical cpus, this increases - * concurrent even if userland is not MP safe and can only run - * on single CPU (for early version of libpthread, it is true). - * In ideal world, every physical cpu should execute a thread. - * If there is enough KSEs, threads in kernel can be - * executed parallel on different cpus with full speed, - * Concurrent in kernel shouldn't be restricted by number of - * upcalls userland provides. - * Adding more upcall structures only increases concurrent - * in userland. - * Highest performance configuration is: - * N kses = N upcalls = N phyiscal cpus + /* + * Otherwise, if we have already set this KSE + * to have a mailbox, we want to make another KSE here, + * but only if there are not already the limit, which + * is 1 per CPU max. + * + * If the current KSE doesn't have a mailbox we just use it + * and give it one. + * + * Because we don't like to access + * the KSE outside of schedlock if we are UNBOUND, + * (because it can change if we are preempted by an interrupt) + * we can deduce it as having a mailbox if we are UNBOUND, + * and only need to actually look at it if we are BOUND, + * which is safe. */ - while (newkg->kg_kses < ncpus) { + if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) { + if (thread_debug == 0) { /* if debugging, allow more */ +#ifdef SMP + if (kg->kg_kses > mp_ncpus) +#endif + return (EPROCLIM); + } newke = kse_alloc(); - bzero(&newke->ke_startzero, RANGEOF(struct kse, - ke_startzero, ke_endzero)); + } else { + newke = NULL; + } + newkg = NULL; + } + if (newke) { + bzero(&newke->ke_startzero, RANGEOF(struct kse, + ke_startzero, ke_endzero)); #if 0 - mtx_lock_spin(&sched_lock); - bcopy(&ke->ke_startcopy, &newke->ke_startcopy, - RANGEOF(struct kse, ke_startcopy, ke_endcopy)); - mtx_unlock_spin(&sched_lock); + bcopy(&ke->ke_startcopy, &newke->ke_startcopy, + RANGEOF(struct kse, ke_startcopy, ke_endcopy)); #endif - mtx_lock_spin(&sched_lock); - kse_link(newke, newkg); - if (p->p_sflag & PS_NEEDSIGCHK) - newke->ke_flags |= KEF_ASTPENDING; - /* Add engine */ - kse_reassign(newke); - mtx_unlock_spin(&sched_lock); + /* For the first call this may not have been set */ + if (td->td_standin == NULL) { + td->td_standin = thread_alloc(); } - } - newku = upcall_alloc(); - newku->ku_mailbox = uap->mbx; - newku->ku_func = mbx.km_func; - bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); - - /* For the first call this may not have been set */ - if (td->td_standin == NULL) - thread_alloc_spare(td, NULL); - - mtx_lock_spin(&sched_lock); - if (newkg->kg_numupcalls >= ncpus) { - upcall_free(newku); + mtx_lock_spin(&sched_lock); + if (newkg) { + if (p->p_numksegrps >= max_groups_per_proc) { + mtx_unlock_spin(&sched_lock); + ksegrp_free(newkg); + kse_free(newke); + return (EPROCLIM); + } + ksegrp_link(newkg, p); + } + else + newkg = kg; + kse_link(newke, newkg); + if (p->p_sflag & PS_NEEDSIGCHK) + newke->ke_flags |= KEF_ASTPENDING; + newke->ke_mailbox = uap->mbx; + newke->ke_upcall = mbx.km_func; + bcopy(&mbx.km_stack, &newke->ke_stack, sizeof(stack_t)); + thread_schedule_upcall(td, newke); mtx_unlock_spin(&sched_lock); - return (EPROCLIM); - } - upcall_link(newku, newkg); - - /* - * Each upcall structure has an owner thread, find which - * one owns it. - */ - if (uap->newgroup) { - /* - * Because new ksegrp hasn't thread, - * create an initial upcall thread to own it. - */ - thread_schedule_upcall(td, newku); } else { /* - * If current thread hasn't an upcall structure, - * just assign the upcall to it. + * If we didn't allocate a new KSE then the we are using + * the exisiting (BOUND) kse. */ - if (td->td_upcall == NULL) { - newku->ku_owner = td; - td->td_upcall = newku; - } else { - /* - * Create a new upcall thread to own it. - */ - thread_schedule_upcall(td, newku); - } + ke = td->td_kse; + ke->ke_mailbox = uap->mbx; + ke->ke_upcall = mbx.km_func; + bcopy(&mbx.km_stack, &ke->ke_stack, sizeof(stack_t)); } - mtx_unlock_spin(&sched_lock); + /* + * Fill out the KSE-mode specific fields of the new kse. + */ return (0); } @@ -782,8 +642,6 @@ threadinit(void) kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), NULL, NULL, kse_init, NULL, UMA_ALIGN_CACHE, 0); - upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), - NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); } /* @@ -792,9 +650,9 @@ threadinit(void) void thread_stash(struct thread *td) { - mtx_lock_spin(&kse_zombie_lock); + mtx_lock_spin(&zombie_thread_lock); TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); - mtx_unlock_spin(&kse_zombie_lock); + mtx_unlock_spin(&zombie_thread_lock); } /* @@ -803,21 +661,9 @@ thread_stash(struct thread *td) void kse_stash(struct kse *ke) { - mtx_lock_spin(&kse_zombie_lock); + mtx_lock_spin(&zombie_thread_lock); TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); - mtx_unlock_spin(&kse_zombie_lock); -} - -/* - * Stash an embarasingly extra upcall into the zombie upcall queue. - */ - -void -upcall_stash(struct kse_upcall *ku) -{ - mtx_lock_spin(&kse_zombie_lock); - TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); - mtx_unlock_spin(&kse_zombie_lock); + mtx_unlock_spin(&zombie_thread_lock); } /* @@ -826,13 +672,13 @@ upcall_stash(struct kse_upcall *ku) void ksegrp_stash(struct ksegrp *kg) { - mtx_lock_spin(&kse_zombie_lock); + mtx_lock_spin(&zombie_thread_lock); TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); - mtx_unlock_spin(&kse_zombie_lock); + mtx_unlock_spin(&zombie_thread_lock); } /* - * Reap zombie kse resource. + * Reap zombie threads. */ void thread_reap(void) @@ -840,34 +686,27 @@ thread_reap(void) struct thread *td_first, *td_next; struct kse *ke_first, *ke_next; struct ksegrp *kg_first, * kg_next; - struct kse_upcall *ku_first, *ku_next; /* - * Don't even bother to lock if none at this instant, - * we really don't care about the next instant.. + * don't even bother to lock if none at this instant + * We really don't care about the next instant.. */ if ((!TAILQ_EMPTY(&zombie_threads)) || (!TAILQ_EMPTY(&zombie_kses)) - || (!TAILQ_EMPTY(&zombie_ksegrps)) - || (!TAILQ_EMPTY(&zombie_upcalls))) { - mtx_lock_spin(&kse_zombie_lock); + || (!TAILQ_EMPTY(&zombie_ksegrps))) { + mtx_lock_spin(&zombie_thread_lock); td_first = TAILQ_FIRST(&zombie_threads); ke_first = TAILQ_FIRST(&zombie_kses); kg_first = TAILQ_FIRST(&zombie_ksegrps); - ku_first = TAILQ_FIRST(&zombie_upcalls); if (td_first) TAILQ_INIT(&zombie_threads); if (ke_first) TAILQ_INIT(&zombie_kses); if (kg_first) TAILQ_INIT(&zombie_ksegrps); - if (ku_first) - TAILQ_INIT(&zombie_upcalls); - mtx_unlock_spin(&kse_zombie_lock); + mtx_unlock_spin(&zombie_thread_lock); while (td_first) { td_next = TAILQ_NEXT(td_first, td_runq); - if (td_first->td_ucred) - crfree(td_first->td_ucred); thread_free(td_first); td_first = td_next; } @@ -881,11 +720,6 @@ thread_reap(void) ksegrp_free(kg_first); kg_first = kg_next; } - while (ku_first) { - ku_next = TAILQ_NEXT(ku_first, ku_link); - upcall_free(ku_first); - ku_first = ku_next; - } } } @@ -958,14 +792,20 @@ thread_export_context(struct thread *td) struct ksegrp *kg; uintptr_t mbx; void *addr; - int error,temp; + int error; ucontext_t uc; + uint temp; p = td->td_proc; kg = td->td_ksegrp; /* Export the user/machine context. */ - addr = (void *)(&td->td_mailbox->tm_context); +#if 0 + addr = (caddr_t)td->td_mailbox + + offsetof(struct kse_thr_mailbox, tm_context); +#else /* if user pointer arithmetic is valid in the kernel */ + addr = (void *)(&td->td_mailbox->tm_context); +#endif error = copyin(addr, &uc, sizeof(ucontext_t)); if (error) goto bad; @@ -975,14 +815,13 @@ thread_export_context(struct thread *td) if (error) goto bad; - /* Exports clock ticks in kernel mode */ - addr = (caddr_t)(&td->td_mailbox->tm_sticks); - temp = fuword(addr) + td->td_usticks; - if (suword(addr, temp)) - goto bad; - - /* Get address in latest mbox of list pointer */ + /* get address in latest mbox of list pointer */ +#if 0 + addr = (caddr_t)td->td_mailbox + + offsetof(struct kse_thr_mailbox , tm_next); +#else /* if user pointer arithmetic is valid in the kernel */ addr = (void *)(&td->td_mailbox->tm_next); +#endif /* * Put the saved address of the previous first * entry into this one @@ -996,43 +835,42 @@ thread_export_context(struct thread *td) PROC_LOCK(p); if (mbx == (uintptr_t)kg->kg_completed) { kg->kg_completed = td->td_mailbox; - /* - * The thread context may be taken away by - * other upcall threads when we unlock - * process lock. it's no longer valid to - * use it again in any other places. - */ - td->td_mailbox = NULL; PROC_UNLOCK(p); break; } PROC_UNLOCK(p); } - td->td_usticks = 0; + addr = (caddr_t)td->td_mailbox + + offsetof(struct kse_thr_mailbox, tm_sticks); + temp = fuword(addr) + td->td_usticks; + if (suword(addr, temp)) + goto bad; return (0); bad: PROC_LOCK(p); psignal(p, SIGSEGV); PROC_UNLOCK(p); - /* The mailbox is bad, don't use it */ - td->td_mailbox = NULL; - td->td_usticks = 0; return (error); } /* * Take the list of completed mailboxes for this KSEGRP and put them on this - * upcall's mailbox as it's the next one going up. + * KSE's mailbox as it's the next one going up. */ static int -thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) +thread_link_mboxes(struct ksegrp *kg, struct kse *ke) { struct proc *p = kg->kg_proc; void *addr; uintptr_t mbx; - addr = (void *)(&ku->ku_mailbox->km_completed); +#if 0 + addr = (caddr_t)ke->ke_mailbox + + offsetof(struct kse_mailbox, km_completed); +#else /* if user pointer arithmetic is valid in the kernel */ + addr = (void *)(&ke->ke_mailbox->km_completed); +#endif for (;;) { mbx = (uintptr_t)kg->kg_completed; if (suword(addr, mbx)) { @@ -1057,91 +895,69 @@ thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) * This function should be called at statclock interrupt time */ int -thread_statclock(int user) +thread_add_ticks_intr(int user, uint ticks) { struct thread *td = curthread; + struct kse *ke = td->td_kse; - if (td->td_ksegrp->kg_numupcalls == 0) - return (-1); + if (ke->ke_mailbox == NULL) + return -1; if (user) { /* Current always do via ast() */ - td->td_flags |= (TDF_ASTPENDING|TDF_USTATCLOCK); - td->td_uuticks++; + ke->ke_flags |= KEF_ASTPENDING; + ke->ke_uuticks += ticks; } else { if (td->td_mailbox != NULL) - td->td_usticks++; - else { - /* XXXKSE - * We will call thread_user_enter() for every - * kernel entry in future, so if the thread mailbox - * is NULL, it must be a UTS kernel, don't account - * clock ticks for it. - */ - } + td->td_usticks += ticks; + else + ke->ke_usticks += ticks; } - return (0); + return 0; } -/* - * Export user mode state clock ticks - */ static int -thread_update_usr_ticks(struct thread *td) +thread_update_uticks(void) { + struct thread *td = curthread; struct proc *p = td->td_proc; + struct kse *ke = td->td_kse; struct kse_thr_mailbox *tmbx; - struct kse_upcall *ku; caddr_t addr; - uint uticks; + uint uticks, sticks; - if ((ku = td->td_upcall) == NULL) - return (-1); - - tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); + if (ke->ke_mailbox == NULL) + return 0; + + uticks = ke->ke_uuticks; + ke->ke_uuticks = 0; + sticks = ke->ke_usticks; + ke->ke_usticks = 0; +#if 0 + tmbx = (void *)fuword((caddr_t)ke->ke_mailbox + + offsetof(struct kse_mailbox, km_curthread)); +#else /* if user pointer arithmetic is ok in the kernel */ + tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread); +#endif if ((tmbx == NULL) || (tmbx == (void *)-1)) - return (-1); - uticks = td->td_uuticks; - td->td_uuticks = 0; + return 0; if (uticks) { - addr = (caddr_t)&tmbx->tm_uticks; + addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_uticks); uticks += fuword(addr); - if (suword(addr, uticks)) { - PROC_LOCK(p); - psignal(p, SIGSEGV); - PROC_UNLOCK(p); - return (-2); - } + if (suword(addr, uticks)) + goto bad; } - return (0); -} - -/* - * Export kernel mode state clock ticks - */ - -static int -thread_update_sys_ticks(struct thread *td) -{ - struct proc *p = td->td_proc; - caddr_t addr; - int sticks; - - if (td->td_mailbox == NULL) - return (-1); - if (td->td_usticks == 0) - return (0); - addr = (caddr_t)&td->td_mailbox->tm_sticks; - sticks = fuword(addr); - /* XXXKSE use XCHG instead */ - sticks += td->td_usticks; - td->td_usticks = 0; - if (suword(addr, sticks)) { - PROC_LOCK(p); - psignal(p, SIGSEGV); - PROC_UNLOCK(p); - return (-2); + if (sticks) { + addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_sticks); + sticks += fuword(addr); + if (suword(addr, sticks)) + goto bad; } - return (0); + return 0; +bad: + PROC_LOCK(p); + psignal(p, SIGSEGV); + PROC_UNLOCK(p); + return -1; } /* @@ -1197,7 +1013,6 @@ thread_exit(void) p->p_numthreads--; TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); kg->kg_numthreads--; - /* * The test below is NOT true if we are the * sole exiting thread. P_STOPPED_SNGL is unset @@ -1209,28 +1024,25 @@ thread_exit(void) } } - /* - * Because each upcall structure has an owner thread, - * owner thread exits only when process is in exiting - * state, so upcall to userland is no longer needed, - * deleting upcall structure is safe here. - * So when all threads in a group is exited, all upcalls - * in the group should be automatically freed. - */ - if (td->td_upcall) - upcall_remove(td); - + /* Reassign this thread's KSE. */ ke->ke_state = KES_UNQUEUED; - ke->ke_thread = NULL; + /* * Decide what to do with the KSE attached to this thread. + * XXX Possibly kse_reassign should do both cases as it already + * does some of this. */ - if (ke->ke_flags & KEF_EXIT) + if (ke->ke_flags & KEF_EXIT) { + KASSERT((ke->ke_owner == td), + ("thread_exit: KSE exiting with non-owner thread")); + ke->ke_thread = NULL; + td->td_kse = NULL; kse_unlink(ke); - else + } else { + TD_SET_EXITING(td); /* definitly not runnable */ kse_reassign(ke); + } PROC_UNLOCK(p); - td->td_kse = NULL; td->td_state = TDS_INACTIVE; td->td_proc = NULL; td->td_ksegrp = NULL; @@ -1278,12 +1090,10 @@ thread_link(struct thread *td, struct ksegrp *kg) struct proc *p; p = kg->kg_proc; - td->td_state = TDS_INACTIVE; - td->td_proc = p; - td->td_ksegrp = kg; - td->td_last_kse = NULL; - td->td_flags = 0; - td->td_kse = NULL; + td->td_state = TDS_INACTIVE; + td->td_proc = p; + td->td_ksegrp = kg; + td->td_last_kse = NULL; LIST_INIT(&td->td_contested); callout_init(&td->td_slpcallout, 1); @@ -1291,139 +1101,116 @@ thread_link(struct thread *td, struct ksegrp *kg) TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); p->p_numthreads++; kg->kg_numthreads++; + td->td_kse = NULL; } -/* - * Purge a ksegrp resource. When a ksegrp is preparing to - * exit, it calls this function. - */ -void -kse_purge_group(struct thread *td) -{ - struct ksegrp *kg; - struct kse *ke; - - kg = td->td_ksegrp; - KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); - while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { - KASSERT(ke->ke_state == KES_IDLE, - ("%s: wrong idle KSE state", __func__)); - kse_unlink(ke); - } - KASSERT((kg->kg_kses == 1), - ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); - KASSERT((kg->kg_numupcalls == 0), - ("%s: ksegrp still has %d upcall datas", - __func__, kg->kg_numupcalls)); -} - -/* - * Purge a process's KSE resource. When a process is preparing to - * exit, it calls kse_purge to release any extra KSE resources in - * the process. - */ void kse_purge(struct proc *p, struct thread *td) { + /* XXXKSE think about this.. + may need to wake up threads on loan queue. */ struct ksegrp *kg; - struct kse *ke; KASSERT(p->p_numthreads == 1, ("bad thread number")); mtx_lock_spin(&sched_lock); while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); p->p_numksegrps--; - /* - * There is no ownership for KSE, after all threads - * in the group exited, it is possible that some KSEs - * were left in idle queue, gc them now. - */ - while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { - KASSERT(ke->ke_state == KES_IDLE, - ("%s: wrong idle KSE state", __func__)); - TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); - kg->kg_idle_kses--; - TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); - kg->kg_kses--; - kse_stash(ke); - } KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || - ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), - ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); - KASSERT((kg->kg_numupcalls == 0), - ("%s: ksegrp still has %d upcall datas", - __func__, kg->kg_numupcalls)); - - if (kg != td->td_ksegrp) + ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), + ("wrong kg_kses")); + if (kg != td->td_ksegrp) { ksegrp_stash(kg); + } } TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); p->p_numksegrps++; mtx_unlock_spin(&sched_lock); } -/* - * This function is intended to be used to initialize a spare thread - * for upcall. Initialize thread's large data area outside sched_lock - * for thread_schedule_upcall(). - */ -void -thread_alloc_spare(struct thread *td, struct thread *spare) -{ - if (td->td_standin) - return; - if (spare == NULL) - spare = thread_alloc(); - td->td_standin = spare; - bzero(&spare->td_startzero, - (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); - spare->td_proc = td->td_proc; - /* Setup PCB and fork address */ - cpu_set_upcall(spare, td->td_pcb); - /* - * XXXKSE do we really need this? (default values for the - * frame). - */ - bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe)); - spare->td_ucred = crhold(td->td_ucred); -} /* * Create a thread and schedule it for upcall on the KSE given. * Use our thread's standin so that we don't have to allocate one. */ struct thread * -thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) +thread_schedule_upcall(struct thread *td, struct kse *ke) { struct thread *td2; + int newkse; mtx_assert(&sched_lock, MA_OWNED); + newkse = (ke != td->td_kse); /* - * Schedule an upcall thread on specified kse_upcall, - * the kse_upcall must be free. - * td must have a spare thread. + * If the owner and kse are BOUND then that thread is planning to + * go to userland and upcalls are not expected. So don't make one. + * If it is not bound then make it so with the spare thread + * anf then borrw back the KSE to allow us to complete some in-kernel + * work. When we complete, the Bound thread will have the chance to + * complete. This thread will sleep as planned. Hopefully there will + * eventually be un unbound thread that can be converted to an + * upcall to report the completion of this thread. */ - KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); + if ((td2 = td->td_standin) != NULL) { td->td_standin = NULL; } else { - panic("no reserve thread when scheduling an upcall"); + if (newkse) + panic("no reserve thread when called with a new kse"); + /* + * If called from (e.g.) sleep and we do not have + * a reserve thread, then we've used it, so do not + * create an upcall. + */ return (NULL); } CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", td2, td->td_proc->p_pid, td->td_proc->p_comm); + bzero(&td2->td_startzero, + (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &td2->td_startcopy, (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); - thread_link(td2, ku->ku_ksegrp); - /* Let the new thread become owner of the upcall */ - ku->ku_owner = td2; - td2->td_upcall = ku; - td2->td_flags = TDF_UPCALLING; - td2->td_kse = NULL; - td2->td_state = TDS_CAN_RUN; + thread_link(td2, ke->ke_ksegrp); + cpu_set_upcall(td2, td->td_pcb); + + /* + * XXXKSE do we really need this? (default values for the + * frame). + */ + bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe)); + + /* + * Bind the new thread to the KSE, + * and if it's our KSE, lend it back to ourself + * so we can continue running. + */ + td2->td_ucred = crhold(td->td_ucred); + td2->td_flags = TDF_UPCALLING; /* note: BOUND */ + td2->td_kse = ke; + td2->td_state = TDS_CAN_RUN; td2->td_inhibitors = 0; - setrunqueue(td2); + ke->ke_owner = td2; + /* + * If called from kse_reassign(), we are working on the current + * KSE so fake that we borrowed it. If called from + * kse_create(), don't, as we have a new kse too. + */ + if (!newkse) { + /* + * This thread will be scheduled when the current thread + * blocks, exits or tries to enter userspace, (which ever + * happens first). When that happens the KSe will "revert" + * to this thread in a BOUND manner. Since we are called + * from msleep() this is going to be "very soon" in nearly + * all cases. + */ + TD_SET_LOAN(td2); + } else { + ke->ke_thread = td2; + ke->ke_state = KES_THREAD; + setrunqueue(td2); + } return (td2); /* bogus.. should be a void function */ } @@ -1435,16 +1222,14 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) struct thread * signal_upcall(struct proc *p, int sig) { -#if 0 struct thread *td, *td2; struct kse *ke; sigset_t ss; int error; -#endif PROC_LOCK_ASSERT(p, MA_OWNED); return (NULL); -#if 0 + td = FIRST_THREAD_IN_PROC(p); ke = td->td_kse; PROC_UNLOCK(p); @@ -1459,31 +1244,28 @@ return (NULL); if (error) return (NULL); if (td->td_standin == NULL) - thread_alloc_spare(td, NULL); + td->td_standin = thread_alloc(); mtx_lock_spin(&sched_lock); td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */ mtx_unlock_spin(&sched_lock); return (td2); -#endif } /* - * Setup done on the thread when it enters the kernel. + * setup done on the thread when it enters the kernel. * XXXKSE Presently only for syscalls but eventually all kernel entries. */ void thread_user_enter(struct proc *p, struct thread *td) { - struct ksegrp *kg; - struct kse_upcall *ku; + struct kse *ke; - kg = td->td_ksegrp; /* * First check that we shouldn't just abort. * But check if we are the single thread first! * XXX p_singlethread not locked, but should be safe. */ - if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { + if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) { PROC_LOCK(p); mtx_lock_spin(&sched_lock); thread_exit(); @@ -1496,37 +1278,43 @@ thread_user_enter(struct proc *p, struct thread *td) * possibility that we could do this lazily (in kse_reassign()), * but for now do it every time. */ - kg = td->td_ksegrp; - if (kg->kg_numupcalls) { - ku = td->td_upcall; - KASSERT(ku, ("%s: no upcall owned", __func__)); - KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); + ke = td->td_kse; + td->td_flags &= ~TDF_UNBOUND; + if (ke->ke_mailbox != NULL) { +#if 0 + td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox + + offsetof(struct kse_mailbox, km_curthread)); +#else /* if user pointer arithmetic is ok in the kernel */ td->td_mailbox = - (void *)fuword((void *)&ku->ku_mailbox->km_curthread); + (void *)fuword( (void *)&ke->ke_mailbox->km_curthread); +#endif if ((td->td_mailbox == NULL) || (td->td_mailbox == (void *)-1)) { - /* Don't schedule upcall when blocked */ - td->td_mailbox = NULL; + td->td_mailbox = NULL; /* single thread it.. */ mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_CAN_UNBIND; + td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND); mtx_unlock_spin(&sched_lock); } else { + /* + * when thread limit reached, act like that the thread + * has already done an upcall. + */ if (p->p_numthreads > max_threads_per_proc) { - /* - * Since kernel thread limit reached, - * don't schedule upcall anymore. - * XXXKSE These code in fact needn't. - */ - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_CAN_UNBIND; - mtx_unlock_spin(&sched_lock); + if (td->td_standin != NULL) { + thread_stash(td->td_standin); + td->td_standin = NULL; + } } else { if (td->td_standin == NULL) - thread_alloc_spare(td, NULL); - mtx_lock_spin(&sched_lock); - td->td_flags |= TDF_CAN_UNBIND; - mtx_unlock_spin(&sched_lock); + td->td_standin = thread_alloc(); } + mtx_lock_spin(&sched_lock); + td->td_flags |= TDF_CAN_UNBIND; + mtx_unlock_spin(&sched_lock); + KASSERT((ke->ke_owner == td), + ("thread_user_enter: No starting owner ")); + ke->ke_owner = td; + td->td_usticks = 0; } } } @@ -1547,90 +1335,165 @@ int thread_userret(struct thread *td, struct trapframe *frame) { int error; - struct kse_upcall *ku; + int unbound; + struct kse *ke; struct ksegrp *kg; + struct thread *worktodo; struct proc *p; struct timespec ts; - p = td->td_proc; + KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner), + ("thread_userret: bad thread/kse pointers")); + KASSERT((td == curthread), + ("thread_userret: bad thread argument")); + + kg = td->td_ksegrp; + p = td->td_proc; + error = 0; + unbound = TD_IS_UNBOUND(td); - /* Nothing to do with non-threaded group/process */ - if (td->td_ksegrp->kg_numupcalls == 0) - return (0); + mtx_lock_spin(&sched_lock); + if ((worktodo = kg->kg_last_assigned)) + worktodo = TAILQ_NEXT(worktodo, td_runq); + else + worktodo = TAILQ_FIRST(&kg->kg_runq); /* - * State clock interrupt hit in userland, it - * is returning from interrupt, charge thread's - * userland time for UTS. + * Permanently bound threads never upcall but they may + * loan out their KSE at this point. + * Upcalls imply bound.. They also may want to do some Philantropy. + * Temporarily bound threads on the other hand either yield + * to other work and transform into an upcall, or proceed back to + * userland. */ - if (td->td_flags & TDF_USTATCLOCK) { - thread_update_usr_ticks(td); - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_USTATCLOCK; - mtx_unlock_spin(&sched_lock); - } - /* - * Optimisation: - * This thread has not started any upcall. - * If there is no work to report other than ourself, - * then it can return direct to userland. - */ if (TD_CAN_UNBIND(td)) { - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_CAN_UNBIND; - mtx_unlock_spin(&sched_lock); - if ((kg->kg_completed == NULL) && - (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) { - thread_update_sys_ticks(td); + td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND); + if (!worktodo && (kg->kg_completed == NULL) && + !(td->td_kse->ke_flags & KEF_DOUPCALL)) { + /* + * This thread has not started any upcall. + * If there is no work to report other than + * ourself, then it can return direct to userland. + */ +justreturn: + mtx_unlock_spin(&sched_lock); + thread_update_uticks(); td->td_mailbox = NULL; return (0); } + mtx_unlock_spin(&sched_lock); error = thread_export_context(td); + td->td_usticks = 0; if (error) { /* - * Failing to do the KSE operation just defaults + * As we are not running on a borrowed KSE, + * failing to do the KSE operation just defaults * back to synchonous operation, so just return from * the syscall. */ - return (0); + goto justreturn; } + mtx_lock_spin(&sched_lock); /* - * There is something to report, and we own an upcall - * strucuture, we can go to userland. - * Turn ourself into an upcall thread. + * Turn ourself into a bound upcall. + * We will rely on kse_reassign() + * to make us run at a later time. */ - mtx_lock_spin(&sched_lock); td->td_flags |= TDF_UPCALLING; + + /* there may be more work since we re-locked schedlock */ + if ((worktodo = kg->kg_last_assigned)) + worktodo = TAILQ_NEXT(worktodo, td_runq); + else + worktodo = TAILQ_FIRST(&kg->kg_runq); + } else if (unbound) { + /* + * We are an unbound thread, looking to + * return to user space. There must be another owner + * of this KSE. + * We are using a borrowed KSE. save state and exit. + * kse_reassign() will recycle the kse as needed, + */ mtx_unlock_spin(&sched_lock); - } else if (td->td_mailbox) { error = thread_export_context(td); + td->td_usticks = 0; if (error) { + /* + * There is nothing we can do. + * We just lose that context. We + * probably should note this somewhere and send + * the process a signal. + */ PROC_LOCK(td->td_proc); + psignal(td->td_proc, SIGSEGV); mtx_lock_spin(&sched_lock); + ke = td->td_kse; /* possibly upcall with error? */ } else { - PROC_LOCK(td->td_proc); - mtx_lock_spin(&sched_lock); /* - * There are upcall threads waiting for - * work to do, wake one of them up. - * XXXKSE Maybe wake all of them up. + * Don't make an upcall, just exit so that the owner + * can get its KSE if it wants it. + * Our context is already safely stored for later + * use by the UTS. */ - if (kg->kg_upsleeps) - wakeup_one(&kg->kg_completed); + PROC_LOCK(p); + mtx_lock_spin(&sched_lock); + ke = td->td_kse; + } + /* + * If the owner is idling, we now have something for it + * to report, so make it runnable. + * If the owner is not an upcall, make an attempt to + * ensure that at least one of any IDLED upcalls can + * wake up. + */ + if (ke->ke_owner->td_flags & TDF_UPCALLING) { + TD_CLR_IDLE(ke->ke_owner); + } else { + FOREACH_KSE_IN_GROUP(kg, ke) { + if (TD_IS_IDLE(ke->ke_owner)) { + TD_CLR_IDLE(ke->ke_owner); + setrunnable(ke->ke_owner); + break; + } + } } thread_exit(); - /* NOTREACHED */ } + /* + * We ARE going back to userland with this KSE. + * We are permanently bound. We may be an upcall. + * If an upcall, check for threads that need to borrow the KSE. + * Any other thread that comes ready after this missed the boat. + */ + ke = td->td_kse; + /* + * If not upcalling, go back to userspace. + * If we are, get the upcall set up. + */ if (td->td_flags & TDF_UPCALLING) { - KASSERT(TD_CAN_UNBIND(td) == 0, ("upcall thread can unbind")); - ku = td->td_upcall; + if (worktodo) { + /* + * force a switch to more urgent 'in kernel' + * work. Control will return to this thread + * when there is no more work to do. + * kse_reassign() will do that for us. + */ + TD_SET_LOAN(td); + p->p_stats->p_ru.ru_nvcsw++; + mi_switch(); /* kse_reassign() will (re)find worktodo */ + } + td->td_flags &= ~TDF_UPCALLING; + if (ke->ke_flags & KEF_DOUPCALL) + ke->ke_flags &= ~KEF_DOUPCALL; + mtx_unlock_spin(&sched_lock); + /* * There is no more work to do and we are going to ride - * this thread up to userland as an upcall. + * this thread/KSE up to userland as an upcall. * Do the last parts of the setup needed for the upcall. */ CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", @@ -1641,27 +1504,16 @@ thread_userret(struct thread *td, struct trapframe *frame) * Will use Giant in cpu_thread_clean() because it uses * kmem_free(kernel_map, ...) */ - cpu_set_upcall_kse(td, ku); - - /* - * Clear TDF_UPCALLING after set upcall context, - * profiling code looks TDF_UPCALLING to avoid account - * a wrong user %EIP - */ - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_UPCALLING; - if (ku->ku_flags & KUF_DOUPCALL) - ku->ku_flags &= ~KUF_DOUPCALL; - mtx_unlock_spin(&sched_lock); + cpu_set_upcall_kse(td, ke); - /* + /* * Unhook the list of completed threads. * anything that completes after this gets to * come in next time. * Put the list of completed thread mailboxes on * this KSE's mailbox. */ - error = thread_link_mboxes(kg, ku); + error = thread_link_mboxes(kg, ke); if (error) goto bad; @@ -1672,33 +1524,34 @@ thread_userret(struct thread *td, struct trapframe *frame) * it would be nice if this all happenned only on the first * time through. (the scan for extra work etc.) */ - error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0); +#if 0 + error = suword((caddr_t)ke->ke_mailbox + + offsetof(struct kse_mailbox, km_curthread), 0); +#else /* if user pointer arithmetic is ok in the kernel */ + error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0); +#endif + ke->ke_uuticks = ke->ke_usticks = 0; if (error) goto bad; - - /* Export current system time */ nanotime(&ts); if (copyout(&ts, - (caddr_t)&ku->ku_mailbox->km_timeofday, sizeof(ts))) { + (caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) { goto bad; } + } else { + mtx_unlock_spin(&sched_lock); } /* * Optimisation: * Ensure that we have a spare thread available, * for when we re-enter the kernel. */ - if (td->td_standin == NULL) - thread_alloc_spare(td, NULL); + if (td->td_standin == NULL) { + td->td_standin = thread_alloc(); + } - /* - * Clear thread mailbox first, then clear system tick count. - * The order is important because thread_statclock() use - * mailbox pointer to see if it is an userland thread or - * an UTS kernel thread. - */ + thread_update_uticks(); td->td_mailbox = NULL; - td->td_usticks = 0; return (0); bad: @@ -1710,7 +1563,6 @@ bad: psignal(td->td_proc, SIGSEGV); PROC_UNLOCK(td->td_proc); td->td_mailbox = NULL; - td->td_usticks = 0; return (error); /* go sync */ } @@ -1749,6 +1601,7 @@ thread_single(int force_exit) if (force_exit == SINGLE_EXIT) { p->p_flag |= P_SINGLE_EXIT; + td->td_flags &= ~TDF_UNBOUND; } else p->p_flag &= ~P_SINGLE_EXIT; p->p_flag |= P_STOPPED_SINGLE; @@ -1771,16 +1624,17 @@ thread_single(int force_exit) else abortsleep(td2); } + if (TD_IS_IDLE(td2)) { + TD_CLR_IDLE(td2); + } } else { if (TD_IS_SUSPENDED(td2)) continue; - /* - * maybe other inhibitted states too? - * XXXKSE Is it totally safe to - * suspend a non-interruptable thread? - */ + /* maybe other inhibitted states too? */ if (td2->td_inhibitors & - (TDI_SLEEPING | TDI_SWAPPED)) + (TDI_SLEEPING | TDI_SWAPPED | + TDI_LOAN | TDI_IDLE | + TDI_EXITING)) thread_suspend_one(td2); } } @@ -1806,14 +1660,8 @@ thread_single(int force_exit) mtx_lock(&Giant); PROC_LOCK(p); } - if (force_exit == SINGLE_EXIT) { - if (td->td_upcall) { - mtx_lock_spin(&sched_lock); - upcall_remove(td); - mtx_unlock_spin(&sched_lock); - } + if (force_exit == SINGLE_EXIT) kse_purge(p, td); - } return (0); } @@ -1855,6 +1703,7 @@ thread_suspend_check(int return_instead) { struct thread *td; struct proc *p; + struct kse *ke; struct ksegrp *kg; td = curthread; @@ -1886,6 +1735,16 @@ thread_suspend_check(int return_instead) mtx_lock_spin(&sched_lock); while (mtx_owned(&Giant)) mtx_unlock(&Giant); + /* + * All threads should be exiting + * Unless they are the active "singlethread". + * destroy un-needed KSEs as we go.. + * KSEGRPS may implode too as #kses -> 0. + */ + ke = td->td_kse; + if (ke->ke_owner == td && + (kg->kg_kses >= kg->kg_numthreads )) + ke->ke_flags |= KEF_EXIT; thread_exit(); } @@ -1893,6 +1752,14 @@ thread_suspend_check(int return_instead) * When a thread suspends, it just * moves to the processes's suspend queue * and stays there. + * + * XXXKSE if TDF_BOUND is true + * it will not release it's KSE which might + * lead to deadlock if there are not enough KSEs + * to complete all waiting threads. + * Maybe be able to 'lend' it out again. + * (lent kse's can not go back to userland?) + * and can only be lent in STOPPED state. */ mtx_lock_spin(&sched_lock); if ((p->p_flag & P_STOPPED_SIG) && diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 9000bc9..d96547b 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -219,7 +219,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) #endif { int error; - struct thread *thr; + pid_t pid; int extflags, lockflags; CTR5(KTR_LOCKMGR, @@ -228,9 +228,9 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) error = 0; if (td == NULL) - thr = LK_KERNPROC; + pid = LK_KERNPROC; else - thr = td; + pid = td->td_proc->p_pid; mtx_lock(lkp->lk_interlock); if (flags & LK_INTERLOCK) { @@ -257,7 +257,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) * lock requests or upgrade requests ( but not the exclusive * lock itself ). */ - if (lkp->lk_lockholder != thr) { + if (lkp->lk_lockholder != pid) { lockflags = LK_HAVE_EXCL; mtx_lock_spin(&sched_lock); if (td != NULL && !(td->td_flags & TDF_DEADLKTREAT)) @@ -268,7 +268,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) break; sharelock(lkp, 1); #if defined(DEBUG_LOCKS) - lkp->lk_slockholder = thr; + lkp->lk_slockholder = pid; lkp->lk_sfilename = file; lkp->lk_slineno = line; lkp->lk_slockername = name; @@ -283,14 +283,14 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) /* FALLTHROUGH downgrade */ case LK_DOWNGRADE: - KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0, + KASSERT(lkp->lk_lockholder == pid && lkp->lk_exclusivecount != 0, ("lockmgr: not holding exclusive lock " - "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", - lkp->lk_lockholder, thr, lkp->lk_exclusivecount)); + "(owner pid (%d) != pid (%d), exlcnt (%d) != 0", + lkp->lk_lockholder, pid, lkp->lk_exclusivecount)); sharelock(lkp, lkp->lk_exclusivecount); lkp->lk_exclusivecount = 0; lkp->lk_flags &= ~LK_HAVE_EXCL; - lkp->lk_lockholder = (struct thread *)LK_NOPROC; + lkp->lk_lockholder = LK_NOPROC; if (lkp->lk_waitcount) wakeup((void *)lkp); break; @@ -317,7 +317,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) * after the upgrade). If we return an error, the file * will always be unlocked. */ - if ((lkp->lk_lockholder == thr) || (lkp->lk_sharecount <= 0)) + if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0)) panic("lockmgr: upgrade exclusive lock"); shareunlock(lkp, 1); /* @@ -342,7 +342,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) if (error) break; lkp->lk_flags |= LK_HAVE_EXCL; - lkp->lk_lockholder = thr; + lkp->lk_lockholder = pid; if (lkp->lk_exclusivecount != 0) panic("lockmgr: non-zero exclusive count"); lkp->lk_exclusivecount = 1; @@ -364,7 +364,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) /* FALLTHROUGH exclusive request */ case LK_EXCLUSIVE: - if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) { + if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { /* * Recursive lock. */ @@ -398,7 +398,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) if (error) break; lkp->lk_flags |= LK_HAVE_EXCL; - lkp->lk_lockholder = thr; + lkp->lk_lockholder = pid; if (lkp->lk_exclusivecount != 0) panic("lockmgr: non-zero exclusive count"); lkp->lk_exclusivecount = 1; @@ -411,10 +411,10 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) case LK_RELEASE: if (lkp->lk_exclusivecount != 0) { - if (lkp->lk_lockholder != thr && + if (lkp->lk_lockholder != pid && lkp->lk_lockholder != LK_KERNPROC) { - panic("lockmgr: thread %p, not %s %p unlocking", - thr, "exclusive lock holder", + panic("lockmgr: pid %d, not %s %d unlocking", + pid, "exclusive lock holder", lkp->lk_lockholder); } if (lkp->lk_exclusivecount == 1) { @@ -437,14 +437,14 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line) * check for holding a shared lock, but at least we can * check for an exclusive one. */ - if (lkp->lk_lockholder == thr) + if (lkp->lk_lockholder == pid) panic("lockmgr: draining against myself"); error = acquiredrain(lkp, extflags); if (error) break; lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; - lkp->lk_lockholder = thr; + lkp->lk_lockholder = pid; lkp->lk_exclusivecount = 1; #if defined(DEBUG_LOCKS) lkp->lk_filename = file; @@ -589,7 +589,7 @@ lockstatus(lkp, td) mtx_lock(lkp->lk_interlock); if (lkp->lk_exclusivecount != 0) { - if (td == NULL || lkp->lk_lockholder == td) + if (td == NULL || lkp->lk_lockholder == td->td_proc->p_pid) lock_type = LK_EXCLUSIVE; else lock_type = LK_EXCLOTHER; @@ -627,7 +627,7 @@ lockmgr_printinfo(lkp) printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, lkp->lk_sharecount); else if (lkp->lk_flags & LK_HAVE_EXCL) - printf(" lock type %s: EXCL (count %d) by thread %p", + printf(" lock type %s: EXCL (count %d) by pid %d", lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); if (lkp->lk_waitcount > 0) printf(" with %d pending", lkp->lk_waitcount); diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index cc7c493..4ade890 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -671,23 +671,32 @@ calcru(p, up, sp, ip) { /* {user, system, interrupt, total} {ticks, usec}; previous tu: */ u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu; + u_int64_t uut = 0, sut = 0, iut = 0; + int s; struct timeval tv; struct bintime bt; + struct kse *ke; + struct ksegrp *kg; mtx_assert(&sched_lock, MA_OWNED); /* XXX: why spl-protect ? worst case is an off-by-one report */ - ut = p->p_uticks; - st = p->p_sticks; - it = p->p_iticks; - - tt = ut + st + it; - if (tt == 0) { - st = 1; - tt = 1; - } + FOREACH_KSEGRP_IN_PROC(p, kg) { + /* we could accumulate per ksegrp and per process here*/ + FOREACH_KSE_IN_GROUP(kg, ke) { + s = splstatclock(); + ut = ke->ke_uticks; + st = ke->ke_sticks; + it = ke->ke_iticks; + splx(s); + + tt = ut + st + it; + if (tt == 0) { + st = 1; + tt = 1; + } - if (curthread->td_proc == p) { + if (ke == curthread->td_kse) { /* * Adjust for the current time slice. This is actually fairly * important since the error here is on the order of a time @@ -696,59 +705,64 @@ calcru(p, up, sp, ip) * processors also being 'current'. */ - binuptime(&bt); - bintime_sub(&bt, PCPU_PTR(switchtime)); - bintime_add(&bt, &p->p_runtime); - } else { - bt = p->p_runtime; - } - bintime2timeval(&bt, &tv); - tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec; - ptu = p->p_uu + p->p_su + p->p_iu; - if (tu < ptu || (int64_t)tu < 0) { - /* XXX no %qd in kernel. Truncate. */ - printf("calcru: negative time of %ld usec for pid %d (%s)\n", - (long)tu, p->p_pid, p->p_comm); - tu = ptu; - } + binuptime(&bt); + bintime_sub(&bt, PCPU_PTR(switchtime)); + bintime_add(&bt, &p->p_runtime); + } else { + bt = p->p_runtime; + } + bintime2timeval(&bt, &tv); + tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec; + ptu = ke->ke_uu + ke->ke_su + ke->ke_iu; + if (tu < ptu || (int64_t)tu < 0) { + /* XXX no %qd in kernel. Truncate. */ + printf("calcru: negative time of %ld usec for pid %d (%s)\n", + (long)tu, p->p_pid, p->p_comm); + tu = ptu; + } - /* Subdivide tu. */ - uu = (tu * ut) / tt; - su = (tu * st) / tt; - iu = tu - uu - su; + /* Subdivide tu. */ + uu = (tu * ut) / tt; + su = (tu * st) / tt; + iu = tu - uu - su; - /* Enforce monotonicity. */ - if (uu < p->p_uu || su < p->p_su || iu < p->p_iu) { - if (uu < p->p_uu) - uu = p->p_uu; - else if (uu + p->p_su + p->p_iu > tu) - uu = tu - p->p_su - p->p_iu; - if (st == 0) - su = p->p_su; - else { - su = ((tu - uu) * st) / (st + it); - if (su < p->p_su) - su = p->p_su; - else if (uu + su + p->p_iu > tu) - su = tu - uu - p->p_iu; - } - KASSERT(uu + su + p->p_iu <= tu, - ("calcru: monotonisation botch 1")); - iu = tu - uu - su; - KASSERT(iu >= p->p_iu, - ("calcru: monotonisation botch 2")); - } - p->p_uu = uu; - p->p_su = su; - p->p_iu = iu; - - up->tv_sec = uu / 1000000; - up->tv_usec = uu % 1000000; - sp->tv_sec = su / 1000000; - sp->tv_usec = su % 1000000; + /* Enforce monotonicity. */ + if (uu < ke->ke_uu || su < ke->ke_su || iu < ke->ke_iu) { + if (uu < ke->ke_uu) + uu = ke->ke_uu; + else if (uu + ke->ke_su + ke->ke_iu > tu) + uu = tu - ke->ke_su - ke->ke_iu; + if (st == 0) + su = ke->ke_su; + else { + su = ((tu - uu) * st) / (st + it); + if (su < ke->ke_su) + su = ke->ke_su; + else if (uu + su + ke->ke_iu > tu) + su = tu - uu - ke->ke_iu; + } + KASSERT(uu + su + ke->ke_iu <= tu, + ("calcru: monotonisation botch 1")); + iu = tu - uu - su; + KASSERT(iu >= ke->ke_iu, + ("calcru: monotonisation botch 2")); + } + ke->ke_uu = uu; + ke->ke_su = su; + ke->ke_iu = iu; + uut += uu; + sut += su; + iut += iu; + + } /* end kse loop */ + } /* end kseg loop */ + up->tv_sec = uut / 1000000; + up->tv_usec = uut % 1000000; + sp->tv_sec = sut / 1000000; + sp->tv_usec = sut % 1000000; if (ip != NULL) { - ip->tv_sec = iu / 1000000; - ip->tv_usec = iu % 1000000; + ip->tv_sec = iut / 1000000; + ip->tv_usec = iut % 1000000; } } diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 24351de..d440256 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1522,6 +1522,9 @@ psignal(p, sig) if (TD_IS_SLEEPING(td) && (td->td_flags & TDF_SINTR)) thread_suspend_one(td); + else if (TD_IS_IDLE(td)) { + thread_suspend_one(td); + } } if (p->p_suspcount == p->p_numthreads) { mtx_unlock_spin(&sched_lock); @@ -1634,6 +1637,9 @@ tdsignal(struct thread *td, int sig, sig_t action) cv_abort(td); else abortsleep(td); + } else if (TD_IS_IDLE(td)) { + TD_CLR_IDLE(td); + setrunnable(td); } #ifdef SMP else { diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index 5cefb1c..6651f70 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -111,7 +111,7 @@ static void runq_readjust(struct runq *rq, struct kse *ke); * Functions that manipulate runnability from a thread perspective. * ************************************************************************/ /* - * Select the KSE that will be run next. From that find the thread, and + * Select the KSE that will be run next. From that find the thread, and x * remove it from the KSEGRP's run queue. If there is thread clustering, * this will be what does it. */ @@ -127,7 +127,7 @@ retry: td = ke->ke_thread; KASSERT((td->td_kse == ke), ("kse/thread mismatch")); kg = ke->ke_ksegrp; - if (td->td_proc->p_flag & P_KSES) { + if (TD_IS_UNBOUND(td)) { TAILQ_REMOVE(&kg->kg_runq, td, td_runq); if (kg->kg_last_assigned == td) { kg->kg_last_assigned = TAILQ_PREV(td, @@ -158,8 +158,9 @@ retry: } /* - * Given a surplus KSE, either assign a new runable thread to it - * (and put it in the run queue) or put it in the ksegrp's idle KSE list. + * Given a KSE (now surplus or at least loanable), either assign a new + * runable thread to it (and put it in the run queue) or put it in + * the ksegrp's idle KSE list. * Or maybe give it back to its owner if it's been loaned. * Assumes that the original thread is either not runnable or * already on the run queue @@ -169,54 +170,108 @@ kse_reassign(struct kse *ke) { struct ksegrp *kg; struct thread *td; + struct thread *owner; struct thread *original; - struct kse_upcall *ku; + int loaned; + KASSERT((ke->ke_owner), ("reassigning KSE with no owner")); + KASSERT((ke->ke_thread && TD_IS_INHIBITED(ke->ke_thread)), + ("reassigning KSE with no or runnable thread")); mtx_assert(&sched_lock, MA_OWNED); - original = ke->ke_thread; - KASSERT(original == NULL || TD_IS_INHIBITED(original), - ("reassigning KSE with runnable thread")); kg = ke->ke_ksegrp; - if (original) { + owner = ke->ke_owner; + loaned = TD_LENDER(owner); + original = ke->ke_thread; + + if (TD_CAN_UNBIND(original) && (original->td_standin)) { + KASSERT((owner == original), + ("Early thread borrowing?")); /* - * If the outgoing thread is in threaded group and has never - * scheduled an upcall, decide whether this is a short - * or long term event and thus whether or not to schedule - * an upcall. - * If it is a short term event, just suspend it in + * The outgoing thread is "threaded" and has never + * scheduled an upcall. + * decide whether this is a short or long term event + * and thus whether or not to schedule an upcall. + * if it is a short term event, just suspend it in * a way that takes its KSE with it. * Select the events for which we want to schedule upcalls. * For now it's just sleep. - * XXXKSE eventually almost any inhibition could do. + * Other threads that still have not fired an upcall + * are held to their KSE using the temorary Binding. */ - if (TD_CAN_UNBIND(original) && (original->td_standin) && - TD_ON_SLEEPQ(original)) { - /* - * Release ownership of upcall, and schedule an upcall - * thread, this new upcall thread becomes the owner of - * the upcall structure. + if (TD_ON_SLEEPQ(original)) { + /* + * An bound thread that can still unbind itself + * has been scheduled out. + * If it is sleeping, then we need to schedule an + * upcall. + * XXXKSE eventually almost any inhibition could do. */ - ku = original->td_upcall; - ku->ku_owner = NULL; - original->td_upcall = NULL; original->td_flags &= ~TDF_CAN_UNBIND; - thread_schedule_upcall(original, ku); + original->td_flags |= TDF_UNBOUND; + thread_schedule_upcall(original, ke); + owner = ke->ke_owner; + loaned = 1; } - original->td_kse = NULL; } + /* + * If the current thread was borrowing, then make things consistent + * by giving it back to the owner for the moment. The original thread + * must be unbound and have already used its chance for + * firing off an upcall. Threads that have not yet made an upcall + * can not borrow KSEs. + */ + if (loaned) { + TD_CLR_LOAN(owner); + ke->ke_thread = owner; + original->td_kse = NULL; /* give it amnesia */ + /* + * Upcalling threads have lower priority than all + * in-kernel threads, However threads that have loaned out + * their KSE and are NOT upcalling have the priority that + * they have. In other words, only look for other work if + * the owner is not runnable, OR is upcalling. + */ + if (TD_CAN_RUN(owner) && + ((owner->td_flags & TDF_UPCALLING) == 0)) { + setrunnable(owner); + CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)", + ke, owner); + return; + } + } + /* + * Either the owner is not runnable, or is an upcall. * Find the first unassigned thread + * If there is a 'last assigned' then see what's next. + * otherwise look at what is first. */ - if ((td = kg->kg_last_assigned) != NULL) + if ((td = kg->kg_last_assigned)) { td = TAILQ_NEXT(td, td_runq); - else + } else { td = TAILQ_FIRST(&kg->kg_runq); + } /* - * If we found one, assign it the kse, otherwise idle the kse. + * If we found one assign it the kse, otherwise idle the kse. */ if (td) { + /* + * Assign the new thread to the KSE. + * and make the KSE runnable again, + */ + if (TD_IS_BOUND(owner)) { + /* + * If there is a reason to keep the previous + * owner, do so. + */ + TD_SET_LOAN(owner); + } else { + /* otherwise, cut it free */ + ke->ke_owner = td; + owner->td_kse = NULL; + } kg->kg_last_assigned = td; td->td_kse = ke; ke->ke_thread = td; @@ -225,11 +280,43 @@ kse_reassign(struct kse *ke) return; } - ke->ke_state = KES_IDLE; - ke->ke_thread = NULL; - TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist); - kg->kg_idle_kses++; - CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke); + /* + * Now handle any waiting upcall. + * Since we didn't make them runnable before. + */ + if (TD_CAN_RUN(owner)) { + setrunnable(owner); + CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)", + ke, owner); + return; + } + + /* + * It is possible that this is the last thread in the group + * because the KSE is being shut down or the process + * is exiting. + */ + if (TD_IS_EXITING(owner) || (ke->ke_flags & KEF_EXIT)) { + ke->ke_thread = NULL; + owner->td_kse = NULL; + kse_unlink(ke); + return; + } + + /* + * At this stage all we know is that the owner + * is the same as the 'active' thread in the KSE + * and that it is + * Presently NOT loaned out. + * Put it on the loanable queue. Make it fifo + * so that long term sleepers donate their KSE's first. + */ + KASSERT((TD_IS_BOUND(owner)), ("kse_reassign: UNBOUND lender")); + ke->ke_state = KES_THREAD; + ke->ke_flags |= KEF_ONLOANQ; + TAILQ_INSERT_TAIL(&kg->kg_lq, ke, ke_kgrlist); + kg->kg_loan_kses++; + CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke); return; } @@ -238,7 +325,7 @@ kse_reassign(struct kse *ke) * Remove a thread from its KSEGRP's run queue. * This in turn may remove it from a KSE if it was already assigned * to one, possibly causing a new thread to be assigned to the KSE - * and the KSE getting a new priority. + * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair). */ static void remrunqueue(struct thread *td) @@ -248,16 +335,17 @@ remrunqueue(struct thread *td) struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); - KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); + KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); kg = td->td_ksegrp; ke = td->td_kse; + /* + * If it's a bound thread/KSE pair, take the shortcut. All non-KSE + * threads are BOUND. + */ CTR1(KTR_RUNQ, "remrunqueue: td%p", td); kg->kg_runnable--; TD_SET_CAN_RUN(td); - /* - * If it is not a threaded process, take the shortcut. - */ - if ((td->td_proc->p_flag & P_KSES) == 0) { + if (TD_IS_BOUND(td)) { /* Bring its kse with it, leave the thread attached */ sched_rem(ke); ke->ke_state = KES_THREAD; @@ -275,7 +363,7 @@ remrunqueue(struct thread *td) sched_rem(ke); ke->ke_state = KES_THREAD; td2 = kg->kg_last_assigned; - KASSERT((td2 != NULL), ("last assigned has wrong value")); + KASSERT((td2 != NULL), ("last assigned has wrong value ")); if (td2 == td) kg->kg_last_assigned = td3; kse_reassign(ke); @@ -293,14 +381,14 @@ adjustrunqueue( struct thread *td, int newpri) struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); - KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); - - ke = td->td_kse; - CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); + KASSERT ((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); /* - * If it is not a threaded process, take the shortcut. + * If it's a bound thread/KSE pair, take the shortcut. All non-KSE + * threads are BOUND. */ - if ((td->td_proc->p_flag & P_KSES) == 0) { + ke = td->td_kse; + CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); + if (TD_IS_BOUND(td)) { /* We only care about the kse in the run queue. */ td->td_priority = newpri; if (ke->ke_rqindex != (newpri / RQ_PPQ)) { @@ -309,8 +397,9 @@ adjustrunqueue( struct thread *td, int newpri) } return; } - - /* It is a threaded process */ + /* + * An unbound thread. This is not optimised yet. + */ kg = td->td_ksegrp; kg->kg_runnable--; TD_SET_CAN_RUN(td); @@ -350,17 +439,48 @@ setrunqueue(struct thread *td) sched_add(td->td_kse); return; } + /* + * If the process is threaded but the thread is bound then + * there is still a little extra to do re. KSE loaning. + */ + if (TD_IS_BOUND(td)) { + KASSERT((td->td_kse != NULL), + ("queueing BAD thread to run queue")); + ke = td->td_kse; + KASSERT((ke->ke_owner == ke->ke_thread), + ("setrunqueue: Hey KSE loaned out")); + if (ke->ke_flags & KEF_ONLOANQ) { + ke->ke_flags &= ~KEF_ONLOANQ; + TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist); + kg->kg_loan_kses--; + } + sched_add(td->td_kse); + return; + } + /* + * Ok, so we are threading with this thread. + * We don't have a KSE, see if we can get one.. + */ tda = kg->kg_last_assigned; if ((ke = td->td_kse) == NULL) { - if (kg->kg_idle_kses) { + /* + * We will need a KSE, see if there is one.. + * First look for a free one, before getting desperate. + * If we can't get one, our priority is not high enough.. + * that's ok.. + */ + if (kg->kg_loan_kses) { /* - * There is a free one so it's ours for the asking.. + * Failing that see if we can borrow one. */ - ke = TAILQ_FIRST(&kg->kg_iq); - TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); + ke = TAILQ_FIRST(&kg->kg_lq); + TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist); + ke->ke_flags &= ~KEF_ONLOANQ; ke->ke_state = KES_THREAD; - kg->kg_idle_kses--; + TD_SET_LOAN(ke->ke_owner); + ke->ke_thread = NULL; + kg->kg_loan_kses--; } else if (tda && (tda->td_priority > td->td_priority)) { /* * None free, but there is one we can commandeer. @@ -375,7 +495,11 @@ setrunqueue(struct thread *td) } else { /* * Temporarily disassociate so it looks like the other cases. + * If the owner wasn't lending before, then it is now.. */ + if (!TD_LENDER(ke->ke_owner)) { + TD_SET_LOAN(ke->ke_owner); + } ke->ke_thread = NULL; td->td_kse = NULL; } @@ -707,7 +831,6 @@ thread_sanity_check(struct thread *td, char *string) if (kg->kg_last_assigned && (saw_lastassigned == 0)) { panc(string, "where on earth does lastassigned point?"); } -#if 0 FOREACH_THREAD_IN_GROUP(kg, td2) { if (((td2->td_flags & TDF_UNBOUND) == 0) && (TD_ON_RUNQ(td2))) { @@ -717,7 +840,6 @@ thread_sanity_check(struct thread *td, char *string) } } } -#endif #if 0 if ((unassigned + assigned) != kg->kg_runnable) { panc(string, "wrong number in runnable"); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 0585172..78bce30 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -63,7 +63,6 @@ static uma_zone_t ksegrp_zone; static uma_zone_t kse_zone; static uma_zone_t thread_zone; -static uma_zone_t upcall_zone; /* DEBUG ONLY */ SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); @@ -79,52 +78,16 @@ static int max_groups_per_proc = 5; SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, &max_groups_per_proc, 0, "Limit on thread groups per proc"); -static int virtual_cpu; - #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) -TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); +struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); -TAILQ_HEAD(, kse_upcall) zombie_upcalls = - TAILQ_HEAD_INITIALIZER(zombie_upcalls); -struct mtx kse_zombie_lock; -MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); +struct mtx zombie_thread_lock; +MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock, + "zombie_thread_lock", MTX_SPIN); static void kse_purge(struct proc *p, struct thread *td); -static void kse_purge_group(struct thread *td); -static int thread_update_usr_ticks(struct thread *td); -static int thread_update_sys_ticks(struct thread *td); -static void thread_alloc_spare(struct thread *td, struct thread *spare); - -static int -sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) -{ - int error, new_val; - int def_val; - -#ifdef SMP - def_val = mp_ncpus; -#else - def_val = 1; -#endif - if (virtual_cpu == 0) - new_val = def_val; - else - new_val = virtual_cpu; - error = sysctl_handle_int(oidp, &new_val, 0, req); - if (error != 0 || req->newptr == NULL) - return (error); - if (new_val < 0) - return (EINVAL); - virtual_cpu = new_val; - return (0); -} - -/* DEBUG ONLY */ -SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, - 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", - "debug virtual cpus"); /* * Prepare a thread for use. @@ -136,6 +99,7 @@ thread_ctor(void *mem, int size, void *arg) td = (struct thread *)mem; td->td_state = TDS_INACTIVE; + td->td_flags |= TDF_UNBOUND; } /* @@ -197,7 +161,6 @@ thread_fini(void *mem, int size) td = (struct thread *)mem; pmap_dispose_thread(td); } - /* * Initialize type-stable parts of a kse (when newly created). */ @@ -209,7 +172,6 @@ kse_init(void *mem, int size) ke = (struct kse *)mem; ke->ke_sched = (struct ke_sched *)&ke[1]; } - /* * Initialize type-stable parts of a ksegrp (when newly created). */ @@ -223,7 +185,7 @@ ksegrp_init(void *mem, int size) } /* - * KSE is linked into kse group. + * KSE is linked onto the idle queue. */ void kse_link(struct kse *ke, struct ksegrp *kg) @@ -232,12 +194,12 @@ kse_link(struct kse *ke, struct ksegrp *kg) TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); kg->kg_kses++; - ke->ke_state = KES_UNQUEUED; + ke->ke_state = KES_UNQUEUED; ke->ke_proc = p; ke->ke_ksegrp = kg; + ke->ke_owner = NULL; ke->ke_thread = NULL; - ke->ke_oncpu = NOCPU; - ke->ke_flags = 0; + ke->ke_oncpu = NOCPU; } void @@ -247,13 +209,11 @@ kse_unlink(struct kse *ke) mtx_assert(&sched_lock, MA_OWNED); kg = ke->ke_ksegrp; + TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); - if (ke->ke_state == KES_IDLE) { - TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); - kg->kg_idle_kses--; + if (--kg->kg_kses == 0) { + ksegrp_unlink(kg); } - if (--kg->kg_kses == 0) - ksegrp_unlink(kg); /* * Aggregate stats from the KSE */ @@ -268,20 +228,15 @@ ksegrp_link(struct ksegrp *kg, struct proc *p) TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ - TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ - TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ - kg->kg_proc = p; - /* - * the following counters are in the -zero- section - * and may not need clearing - */ + TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */ + kg->kg_proc = p; +/* the following counters are in the -zero- section and may not need clearing */ kg->kg_numthreads = 0; - kg->kg_runnable = 0; - kg->kg_kses = 0; - kg->kg_runq_kses = 0; /* XXXKSE change name */ - kg->kg_idle_kses = 0; - kg->kg_numupcalls = 0; - /* link it in now that it's consistent */ + kg->kg_runnable = 0; + kg->kg_kses = 0; + kg->kg_loan_kses = 0; + kg->kg_runq_kses = 0; /* XXXKSE change name */ +/* link it in now that it's consistent */ p->p_numksegrps++; TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); } @@ -292,11 +247,9 @@ ksegrp_unlink(struct ksegrp *kg) struct proc *p; mtx_assert(&sched_lock, MA_OWNED); - KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); - KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); - KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); - p = kg->kg_proc; + KASSERT(((kg->kg_numthreads == 0) && (kg->kg_kses == 0)), + ("kseg_unlink: residual threads or KSEs")); TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); p->p_numksegrps--; /* @@ -305,63 +258,13 @@ ksegrp_unlink(struct ksegrp *kg) ksegrp_stash(kg); } -struct kse_upcall * -upcall_alloc(void) -{ - struct kse_upcall *ku; - - ku = uma_zalloc(upcall_zone, 0); - bzero(ku, sizeof(*ku)); - return (ku); -} - -void -upcall_free(struct kse_upcall *ku) -{ - - uma_zfree(upcall_zone, ku); -} - -void -upcall_link(struct kse_upcall *ku, struct ksegrp *kg) -{ - - mtx_assert(&sched_lock, MA_OWNED); - TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); - ku->ku_ksegrp = kg; - kg->kg_numupcalls++; -} - -void -upcall_unlink(struct kse_upcall *ku) -{ - struct ksegrp *kg = ku->ku_ksegrp; - - mtx_assert(&sched_lock, MA_OWNED); - KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); - TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); - kg->kg_numupcalls--; - upcall_stash(ku); -} - -void -upcall_remove(struct thread *td) -{ - - if (td->td_upcall) { - td->td_upcall->ku_owner = NULL; - upcall_unlink(td->td_upcall); - td->td_upcall = 0; - } -} - /* - * For a newly created process, - * link up all the structures and its initial threads etc. + * for a newly created process, + * link up a the structure and its initial threads etc. */ void proc_linkup(struct proc *p, struct ksegrp *kg, - struct kse *ke, struct thread *td) + struct kse *ke, struct thread *td) { TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ @@ -375,11 +278,6 @@ proc_linkup(struct proc *p, struct ksegrp *kg, thread_link(td, kg); } -/* -struct kse_thr_interrupt_args { - struct kse_thr_mailbox * tmbx; -}; -*/ int kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) { @@ -387,7 +285,10 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) struct thread *td2; p = td->td_proc; - if (!(p->p_flag & P_KSES) || (uap->tmbx == NULL)) + /* KSE-enabled processes only, please. */ + if (!(p->p_flag & P_KSES)) + return (EINVAL); + if (uap->tmbx == NULL) return (EINVAL); mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td2) { @@ -398,7 +299,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) cv_abort(td2); else abortsleep(td2); - } + } mtx_unlock_spin(&sched_lock); return (0); } @@ -407,11 +308,6 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) return (ESRCH); } -/* -struct kse_exit_args { - register_t dummy; -}; -*/ int kse_exit(struct thread *td, struct kse_exit_args *uap) { @@ -420,35 +316,27 @@ kse_exit(struct thread *td, struct kse_exit_args *uap) struct kse *ke; p = td->td_proc; - /* - * Only UTS can call the syscall and current group - * should be a threaded group. - */ - if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) + /* Only UTS can do the syscall */ + if (!(p->p_flag & P_KSES) || (td->td_mailbox != NULL)) return (EINVAL); - KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); - kg = td->td_ksegrp; - /* Serialize removing upcall */ + /* serialize killing kse */ PROC_LOCK(p); mtx_lock_spin(&sched_lock); - if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) { + if ((kg->kg_kses == 1) && (kg->kg_numthreads > 1)) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (EDEADLK); } ke = td->td_kse; - upcall_remove(td); if (p->p_numthreads == 1) { - kse_purge(p, td); + ke->ke_flags &= ~KEF_DOUPCALL; + ke->ke_mailbox = NULL; p->p_flag &= ~P_KSES; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } else { - if (kg->kg_numthreads == 1) { /* Shutdown a group */ - kse_purge_group(td); - ke->ke_flags |= KEF_EXIT; - } + ke->ke_flags |= KEF_EXIT; thread_exit(); /* NOTREACHED */ } @@ -457,15 +345,10 @@ kse_exit(struct thread *td, struct kse_exit_args *uap) /* * Either becomes an upcall or waits for an awakening event and - * then becomes an upcall. Only error cases return. + * THEN becomes an upcall. Only error cases return. */ -/* -struct kse_release_args { - register_t dummy; -}; -*/ int -kse_release(struct thread *td, struct kse_release_args *uap) +kse_release(struct thread * td, struct kse_release_args * uap) { struct proc *p; struct ksegrp *kg; @@ -473,25 +356,28 @@ kse_release(struct thread *td, struct kse_release_args *uap) p = td->td_proc; kg = td->td_ksegrp; /* - * Only UTS can call the syscall and current group - * should be a threaded group. - */ - if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) + * kse must have a mailbox ready for upcall, and only UTS can + * do the syscall. + */ + if (!(p->p_flag & P_KSES) || + (td->td_mailbox != NULL) || + (td->td_kse->ke_mailbox == NULL)) return (EINVAL); - KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); PROC_LOCK(p); mtx_lock_spin(&sched_lock); /* Change OURSELF to become an upcall. */ - td->td_flags = TDF_UPCALLING; - if ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && + td->td_flags = TDF_UPCALLING; /* BOUND */ + if (!(td->td_kse->ke_flags & (KEF_DOUPCALL|KEF_ASTPENDING)) && (kg->kg_completed == NULL)) { - kg->kg_upsleeps++; - mtx_unlock_spin(&sched_lock); - msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, "ksepause", - NULL); - kg->kg_upsleeps--; + /* + * The KSE will however be lendable. + */ + TD_SET_IDLE(td); PROC_UNLOCK(p); + p->p_stats->p_ru.ru_nvcsw++; + mi_switch(); + mtx_unlock_spin(&sched_lock); } else { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); @@ -506,59 +392,61 @@ int kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) { struct proc *p; + struct kse *ke; struct ksegrp *kg; - struct kse_upcall *ku; struct thread *td2; p = td->td_proc; td2 = NULL; - ku = NULL; /* KSE-enabled processes only, please. */ if (!(p->p_flag & P_KSES)) - return (EINVAL); + return EINVAL; - PROC_LOCK(p); mtx_lock_spin(&sched_lock); if (uap->mbx) { FOREACH_KSEGRP_IN_PROC(p, kg) { - FOREACH_UPCALL_IN_GROUP(kg, ku) { - if (ku->ku_mailbox == uap->mbx) - break; + FOREACH_KSE_IN_GROUP(kg, ke) { + if (ke->ke_mailbox != uap->mbx) + continue; + td2 = ke->ke_owner; + KASSERT((td2 != NULL),("KSE with no owner")); + break; } - if (ku) + if (td2) { break; + } } } else { + /* + * look for any idle KSE to resurrect. + */ kg = td->td_ksegrp; - if (kg->kg_upsleeps) { - wakeup_one(&kg->kg_completed); - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - return (0); + FOREACH_KSE_IN_GROUP(kg, ke) { + td2 = ke->ke_owner; + KASSERT((td2 != NULL),("KSE with no owner2")); + if (TD_IS_IDLE(td2)) + break; } - ku = TAILQ_FIRST(&kg->kg_upcalls); + KASSERT((td2 != NULL), ("no thread(s)")); } - if (ku) { - if ((td2 = ku->ku_owner) == NULL) { - panic("%s: no owner", __func__); - } else if (TD_ON_SLEEPQ(td2) && - (td2->td_wchan == &kg->kg_completed)) { - abortsleep(td2); - } else { - ku->ku_flags |= KUF_DOUPCALL; + if (td2) { + if (TD_IS_IDLE(td2)) { + TD_CLR_IDLE(td2); + setrunnable(td2); + } else if (td != td2) { + /* guarantee do an upcall ASAP */ + td2->td_kse->ke_flags |= KEF_DOUPCALL; } mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); return (0); } mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); return (ESRCH); } /* * No new KSEG: first call: use current KSE, don't schedule an upcall - * All other situations, do allocate max new KSEs and schedule an upcall. + * All other situations, do allocate a new KSE and schedule an upcall on it. */ /* struct kse_create_args { struct kse_mailbox *mbx; @@ -568,140 +456,112 @@ int kse_create(struct thread *td, struct kse_create_args *uap) { struct kse *newke; + struct kse *ke; struct ksegrp *newkg; struct ksegrp *kg; struct proc *p; struct kse_mailbox mbx; - struct kse_upcall *newku; - int err, ncpus; + int err; p = td->td_proc; if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) return (err); - /* Too bad, why hasn't kernel always a cpu counter !? */ -#ifdef SMP - ncpus = mp_ncpus; -#else - ncpus = 1; -#endif - if (thread_debug && virtual_cpu != 0) - ncpus = virtual_cpu; - - /* Easier to just set it than to test and set */ - p->p_flag |= P_KSES; + p->p_flag |= P_KSES; /* easier to just set it than to test and set */ kg = td->td_ksegrp; if (uap->newgroup) { - /* Have race condition but it is cheap */ if (p->p_numksegrps >= max_groups_per_proc) return (EPROCLIM); /* * If we want a new KSEGRP it doesn't matter whether * we have already fired up KSE mode before or not. - * We put the process in KSE mode and create a new KSEGRP. + * We put the process in KSE mode and create a new KSEGRP + * and KSE. If our KSE has not got a mailbox yet then + * that doesn't matter, just leave it that way. It will + * ensure that this thread stay BOUND. It's possible + * that the call came form a threaded library and the main + * program knows nothing of threads. */ newkg = ksegrp_alloc(); bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, - kg_startzero, kg_endzero)); + kg_startzero, kg_endzero)); bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); - mtx_lock_spin(&sched_lock); - ksegrp_link(newkg, p); - if (p->p_numksegrps >= max_groups_per_proc) { - ksegrp_unlink(newkg); - mtx_unlock_spin(&sched_lock); - return (EPROCLIM); - } - mtx_unlock_spin(&sched_lock); + newke = kse_alloc(); } else { - newkg = kg; - } - - /* - * Creating upcalls more than number of physical cpu does - * not help performance. - */ - if (newkg->kg_numupcalls >= ncpus) - return (EPROCLIM); - - if (newkg->kg_numupcalls == 0) { - /* - * Initialize KSE group, optimized for MP. - * Create KSEs as many as physical cpus, this increases - * concurrent even if userland is not MP safe and can only run - * on single CPU (for early version of libpthread, it is true). - * In ideal world, every physical cpu should execute a thread. - * If there is enough KSEs, threads in kernel can be - * executed parallel on different cpus with full speed, - * Concurrent in kernel shouldn't be restricted by number of - * upcalls userland provides. - * Adding more upcall structures only increases concurrent - * in userland. - * Highest performance configuration is: - * N kses = N upcalls = N phyiscal cpus + /* + * Otherwise, if we have already set this KSE + * to have a mailbox, we want to make another KSE here, + * but only if there are not already the limit, which + * is 1 per CPU max. + * + * If the current KSE doesn't have a mailbox we just use it + * and give it one. + * + * Because we don't like to access + * the KSE outside of schedlock if we are UNBOUND, + * (because it can change if we are preempted by an interrupt) + * we can deduce it as having a mailbox if we are UNBOUND, + * and only need to actually look at it if we are BOUND, + * which is safe. */ - while (newkg->kg_kses < ncpus) { + if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) { + if (thread_debug == 0) { /* if debugging, allow more */ +#ifdef SMP + if (kg->kg_kses > mp_ncpus) +#endif + return (EPROCLIM); + } newke = kse_alloc(); - bzero(&newke->ke_startzero, RANGEOF(struct kse, - ke_startzero, ke_endzero)); + } else { + newke = NULL; + } + newkg = NULL; + } + if (newke) { + bzero(&newke->ke_startzero, RANGEOF(struct kse, + ke_startzero, ke_endzero)); #if 0 - mtx_lock_spin(&sched_lock); - bcopy(&ke->ke_startcopy, &newke->ke_startcopy, - RANGEOF(struct kse, ke_startcopy, ke_endcopy)); - mtx_unlock_spin(&sched_lock); + bcopy(&ke->ke_startcopy, &newke->ke_startcopy, + RANGEOF(struct kse, ke_startcopy, ke_endcopy)); #endif - mtx_lock_spin(&sched_lock); - kse_link(newke, newkg); - if (p->p_sflag & PS_NEEDSIGCHK) - newke->ke_flags |= KEF_ASTPENDING; - /* Add engine */ - kse_reassign(newke); - mtx_unlock_spin(&sched_lock); + /* For the first call this may not have been set */ + if (td->td_standin == NULL) { + td->td_standin = thread_alloc(); } - } - newku = upcall_alloc(); - newku->ku_mailbox = uap->mbx; - newku->ku_func = mbx.km_func; - bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); - - /* For the first call this may not have been set */ - if (td->td_standin == NULL) - thread_alloc_spare(td, NULL); - - mtx_lock_spin(&sched_lock); - if (newkg->kg_numupcalls >= ncpus) { - upcall_free(newku); + mtx_lock_spin(&sched_lock); + if (newkg) { + if (p->p_numksegrps >= max_groups_per_proc) { + mtx_unlock_spin(&sched_lock); + ksegrp_free(newkg); + kse_free(newke); + return (EPROCLIM); + } + ksegrp_link(newkg, p); + } + else + newkg = kg; + kse_link(newke, newkg); + if (p->p_sflag & PS_NEEDSIGCHK) + newke->ke_flags |= KEF_ASTPENDING; + newke->ke_mailbox = uap->mbx; + newke->ke_upcall = mbx.km_func; + bcopy(&mbx.km_stack, &newke->ke_stack, sizeof(stack_t)); + thread_schedule_upcall(td, newke); mtx_unlock_spin(&sched_lock); - return (EPROCLIM); - } - upcall_link(newku, newkg); - - /* - * Each upcall structure has an owner thread, find which - * one owns it. - */ - if (uap->newgroup) { - /* - * Because new ksegrp hasn't thread, - * create an initial upcall thread to own it. - */ - thread_schedule_upcall(td, newku); } else { /* - * If current thread hasn't an upcall structure, - * just assign the upcall to it. + * If we didn't allocate a new KSE then the we are using + * the exisiting (BOUND) kse. */ - if (td->td_upcall == NULL) { - newku->ku_owner = td; - td->td_upcall = newku; - } else { - /* - * Create a new upcall thread to own it. - */ - thread_schedule_upcall(td, newku); - } + ke = td->td_kse; + ke->ke_mailbox = uap->mbx; + ke->ke_upcall = mbx.km_func; + bcopy(&mbx.km_stack, &ke->ke_stack, sizeof(stack_t)); } - mtx_unlock_spin(&sched_lock); + /* + * Fill out the KSE-mode specific fields of the new kse. + */ return (0); } @@ -782,8 +642,6 @@ threadinit(void) kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), NULL, NULL, kse_init, NULL, UMA_ALIGN_CACHE, 0); - upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), - NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); } /* @@ -792,9 +650,9 @@ threadinit(void) void thread_stash(struct thread *td) { - mtx_lock_spin(&kse_zombie_lock); + mtx_lock_spin(&zombie_thread_lock); TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); - mtx_unlock_spin(&kse_zombie_lock); + mtx_unlock_spin(&zombie_thread_lock); } /* @@ -803,21 +661,9 @@ thread_stash(struct thread *td) void kse_stash(struct kse *ke) { - mtx_lock_spin(&kse_zombie_lock); + mtx_lock_spin(&zombie_thread_lock); TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); - mtx_unlock_spin(&kse_zombie_lock); -} - -/* - * Stash an embarasingly extra upcall into the zombie upcall queue. - */ - -void -upcall_stash(struct kse_upcall *ku) -{ - mtx_lock_spin(&kse_zombie_lock); - TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); - mtx_unlock_spin(&kse_zombie_lock); + mtx_unlock_spin(&zombie_thread_lock); } /* @@ -826,13 +672,13 @@ upcall_stash(struct kse_upcall *ku) void ksegrp_stash(struct ksegrp *kg) { - mtx_lock_spin(&kse_zombie_lock); + mtx_lock_spin(&zombie_thread_lock); TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); - mtx_unlock_spin(&kse_zombie_lock); + mtx_unlock_spin(&zombie_thread_lock); } /* - * Reap zombie kse resource. + * Reap zombie threads. */ void thread_reap(void) @@ -840,34 +686,27 @@ thread_reap(void) struct thread *td_first, *td_next; struct kse *ke_first, *ke_next; struct ksegrp *kg_first, * kg_next; - struct kse_upcall *ku_first, *ku_next; /* - * Don't even bother to lock if none at this instant, - * we really don't care about the next instant.. + * don't even bother to lock if none at this instant + * We really don't care about the next instant.. */ if ((!TAILQ_EMPTY(&zombie_threads)) || (!TAILQ_EMPTY(&zombie_kses)) - || (!TAILQ_EMPTY(&zombie_ksegrps)) - || (!TAILQ_EMPTY(&zombie_upcalls))) { - mtx_lock_spin(&kse_zombie_lock); + || (!TAILQ_EMPTY(&zombie_ksegrps))) { + mtx_lock_spin(&zombie_thread_lock); td_first = TAILQ_FIRST(&zombie_threads); ke_first = TAILQ_FIRST(&zombie_kses); kg_first = TAILQ_FIRST(&zombie_ksegrps); - ku_first = TAILQ_FIRST(&zombie_upcalls); if (td_first) TAILQ_INIT(&zombie_threads); if (ke_first) TAILQ_INIT(&zombie_kses); if (kg_first) TAILQ_INIT(&zombie_ksegrps); - if (ku_first) - TAILQ_INIT(&zombie_upcalls); - mtx_unlock_spin(&kse_zombie_lock); + mtx_unlock_spin(&zombie_thread_lock); while (td_first) { td_next = TAILQ_NEXT(td_first, td_runq); - if (td_first->td_ucred) - crfree(td_first->td_ucred); thread_free(td_first); td_first = td_next; } @@ -881,11 +720,6 @@ thread_reap(void) ksegrp_free(kg_first); kg_first = kg_next; } - while (ku_first) { - ku_next = TAILQ_NEXT(ku_first, ku_link); - upcall_free(ku_first); - ku_first = ku_next; - } } } @@ -958,14 +792,20 @@ thread_export_context(struct thread *td) struct ksegrp *kg; uintptr_t mbx; void *addr; - int error,temp; + int error; ucontext_t uc; + uint temp; p = td->td_proc; kg = td->td_ksegrp; /* Export the user/machine context. */ - addr = (void *)(&td->td_mailbox->tm_context); +#if 0 + addr = (caddr_t)td->td_mailbox + + offsetof(struct kse_thr_mailbox, tm_context); +#else /* if user pointer arithmetic is valid in the kernel */ + addr = (void *)(&td->td_mailbox->tm_context); +#endif error = copyin(addr, &uc, sizeof(ucontext_t)); if (error) goto bad; @@ -975,14 +815,13 @@ thread_export_context(struct thread *td) if (error) goto bad; - /* Exports clock ticks in kernel mode */ - addr = (caddr_t)(&td->td_mailbox->tm_sticks); - temp = fuword(addr) + td->td_usticks; - if (suword(addr, temp)) - goto bad; - - /* Get address in latest mbox of list pointer */ + /* get address in latest mbox of list pointer */ +#if 0 + addr = (caddr_t)td->td_mailbox + + offsetof(struct kse_thr_mailbox , tm_next); +#else /* if user pointer arithmetic is valid in the kernel */ addr = (void *)(&td->td_mailbox->tm_next); +#endif /* * Put the saved address of the previous first * entry into this one @@ -996,43 +835,42 @@ thread_export_context(struct thread *td) PROC_LOCK(p); if (mbx == (uintptr_t)kg->kg_completed) { kg->kg_completed = td->td_mailbox; - /* - * The thread context may be taken away by - * other upcall threads when we unlock - * process lock. it's no longer valid to - * use it again in any other places. - */ - td->td_mailbox = NULL; PROC_UNLOCK(p); break; } PROC_UNLOCK(p); } - td->td_usticks = 0; + addr = (caddr_t)td->td_mailbox + + offsetof(struct kse_thr_mailbox, tm_sticks); + temp = fuword(addr) + td->td_usticks; + if (suword(addr, temp)) + goto bad; return (0); bad: PROC_LOCK(p); psignal(p, SIGSEGV); PROC_UNLOCK(p); - /* The mailbox is bad, don't use it */ - td->td_mailbox = NULL; - td->td_usticks = 0; return (error); } /* * Take the list of completed mailboxes for this KSEGRP and put them on this - * upcall's mailbox as it's the next one going up. + * KSE's mailbox as it's the next one going up. */ static int -thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) +thread_link_mboxes(struct ksegrp *kg, struct kse *ke) { struct proc *p = kg->kg_proc; void *addr; uintptr_t mbx; - addr = (void *)(&ku->ku_mailbox->km_completed); +#if 0 + addr = (caddr_t)ke->ke_mailbox + + offsetof(struct kse_mailbox, km_completed); +#else /* if user pointer arithmetic is valid in the kernel */ + addr = (void *)(&ke->ke_mailbox->km_completed); +#endif for (;;) { mbx = (uintptr_t)kg->kg_completed; if (suword(addr, mbx)) { @@ -1057,91 +895,69 @@ thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) * This function should be called at statclock interrupt time */ int -thread_statclock(int user) +thread_add_ticks_intr(int user, uint ticks) { struct thread *td = curthread; + struct kse *ke = td->td_kse; - if (td->td_ksegrp->kg_numupcalls == 0) - return (-1); + if (ke->ke_mailbox == NULL) + return -1; if (user) { /* Current always do via ast() */ - td->td_flags |= (TDF_ASTPENDING|TDF_USTATCLOCK); - td->td_uuticks++; + ke->ke_flags |= KEF_ASTPENDING; + ke->ke_uuticks += ticks; } else { if (td->td_mailbox != NULL) - td->td_usticks++; - else { - /* XXXKSE - * We will call thread_user_enter() for every - * kernel entry in future, so if the thread mailbox - * is NULL, it must be a UTS kernel, don't account - * clock ticks for it. - */ - } + td->td_usticks += ticks; + else + ke->ke_usticks += ticks; } - return (0); + return 0; } -/* - * Export user mode state clock ticks - */ static int -thread_update_usr_ticks(struct thread *td) +thread_update_uticks(void) { + struct thread *td = curthread; struct proc *p = td->td_proc; + struct kse *ke = td->td_kse; struct kse_thr_mailbox *tmbx; - struct kse_upcall *ku; caddr_t addr; - uint uticks; + uint uticks, sticks; - if ((ku = td->td_upcall) == NULL) - return (-1); - - tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); + if (ke->ke_mailbox == NULL) + return 0; + + uticks = ke->ke_uuticks; + ke->ke_uuticks = 0; + sticks = ke->ke_usticks; + ke->ke_usticks = 0; +#if 0 + tmbx = (void *)fuword((caddr_t)ke->ke_mailbox + + offsetof(struct kse_mailbox, km_curthread)); +#else /* if user pointer arithmetic is ok in the kernel */ + tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread); +#endif if ((tmbx == NULL) || (tmbx == (void *)-1)) - return (-1); - uticks = td->td_uuticks; - td->td_uuticks = 0; + return 0; if (uticks) { - addr = (caddr_t)&tmbx->tm_uticks; + addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_uticks); uticks += fuword(addr); - if (suword(addr, uticks)) { - PROC_LOCK(p); - psignal(p, SIGSEGV); - PROC_UNLOCK(p); - return (-2); - } + if (suword(addr, uticks)) + goto bad; } - return (0); -} - -/* - * Export kernel mode state clock ticks - */ - -static int -thread_update_sys_ticks(struct thread *td) -{ - struct proc *p = td->td_proc; - caddr_t addr; - int sticks; - - if (td->td_mailbox == NULL) - return (-1); - if (td->td_usticks == 0) - return (0); - addr = (caddr_t)&td->td_mailbox->tm_sticks; - sticks = fuword(addr); - /* XXXKSE use XCHG instead */ - sticks += td->td_usticks; - td->td_usticks = 0; - if (suword(addr, sticks)) { - PROC_LOCK(p); - psignal(p, SIGSEGV); - PROC_UNLOCK(p); - return (-2); + if (sticks) { + addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_sticks); + sticks += fuword(addr); + if (suword(addr, sticks)) + goto bad; } - return (0); + return 0; +bad: + PROC_LOCK(p); + psignal(p, SIGSEGV); + PROC_UNLOCK(p); + return -1; } /* @@ -1197,7 +1013,6 @@ thread_exit(void) p->p_numthreads--; TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); kg->kg_numthreads--; - /* * The test below is NOT true if we are the * sole exiting thread. P_STOPPED_SNGL is unset @@ -1209,28 +1024,25 @@ thread_exit(void) } } - /* - * Because each upcall structure has an owner thread, - * owner thread exits only when process is in exiting - * state, so upcall to userland is no longer needed, - * deleting upcall structure is safe here. - * So when all threads in a group is exited, all upcalls - * in the group should be automatically freed. - */ - if (td->td_upcall) - upcall_remove(td); - + /* Reassign this thread's KSE. */ ke->ke_state = KES_UNQUEUED; - ke->ke_thread = NULL; + /* * Decide what to do with the KSE attached to this thread. + * XXX Possibly kse_reassign should do both cases as it already + * does some of this. */ - if (ke->ke_flags & KEF_EXIT) + if (ke->ke_flags & KEF_EXIT) { + KASSERT((ke->ke_owner == td), + ("thread_exit: KSE exiting with non-owner thread")); + ke->ke_thread = NULL; + td->td_kse = NULL; kse_unlink(ke); - else + } else { + TD_SET_EXITING(td); /* definitly not runnable */ kse_reassign(ke); + } PROC_UNLOCK(p); - td->td_kse = NULL; td->td_state = TDS_INACTIVE; td->td_proc = NULL; td->td_ksegrp = NULL; @@ -1278,12 +1090,10 @@ thread_link(struct thread *td, struct ksegrp *kg) struct proc *p; p = kg->kg_proc; - td->td_state = TDS_INACTIVE; - td->td_proc = p; - td->td_ksegrp = kg; - td->td_last_kse = NULL; - td->td_flags = 0; - td->td_kse = NULL; + td->td_state = TDS_INACTIVE; + td->td_proc = p; + td->td_ksegrp = kg; + td->td_last_kse = NULL; LIST_INIT(&td->td_contested); callout_init(&td->td_slpcallout, 1); @@ -1291,139 +1101,116 @@ thread_link(struct thread *td, struct ksegrp *kg) TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); p->p_numthreads++; kg->kg_numthreads++; + td->td_kse = NULL; } -/* - * Purge a ksegrp resource. When a ksegrp is preparing to - * exit, it calls this function. - */ -void -kse_purge_group(struct thread *td) -{ - struct ksegrp *kg; - struct kse *ke; - - kg = td->td_ksegrp; - KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); - while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { - KASSERT(ke->ke_state == KES_IDLE, - ("%s: wrong idle KSE state", __func__)); - kse_unlink(ke); - } - KASSERT((kg->kg_kses == 1), - ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); - KASSERT((kg->kg_numupcalls == 0), - ("%s: ksegrp still has %d upcall datas", - __func__, kg->kg_numupcalls)); -} - -/* - * Purge a process's KSE resource. When a process is preparing to - * exit, it calls kse_purge to release any extra KSE resources in - * the process. - */ void kse_purge(struct proc *p, struct thread *td) { + /* XXXKSE think about this.. + may need to wake up threads on loan queue. */ struct ksegrp *kg; - struct kse *ke; KASSERT(p->p_numthreads == 1, ("bad thread number")); mtx_lock_spin(&sched_lock); while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); p->p_numksegrps--; - /* - * There is no ownership for KSE, after all threads - * in the group exited, it is possible that some KSEs - * were left in idle queue, gc them now. - */ - while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { - KASSERT(ke->ke_state == KES_IDLE, - ("%s: wrong idle KSE state", __func__)); - TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); - kg->kg_idle_kses--; - TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); - kg->kg_kses--; - kse_stash(ke); - } KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || - ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), - ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); - KASSERT((kg->kg_numupcalls == 0), - ("%s: ksegrp still has %d upcall datas", - __func__, kg->kg_numupcalls)); - - if (kg != td->td_ksegrp) + ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), + ("wrong kg_kses")); + if (kg != td->td_ksegrp) { ksegrp_stash(kg); + } } TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); p->p_numksegrps++; mtx_unlock_spin(&sched_lock); } -/* - * This function is intended to be used to initialize a spare thread - * for upcall. Initialize thread's large data area outside sched_lock - * for thread_schedule_upcall(). - */ -void -thread_alloc_spare(struct thread *td, struct thread *spare) -{ - if (td->td_standin) - return; - if (spare == NULL) - spare = thread_alloc(); - td->td_standin = spare; - bzero(&spare->td_startzero, - (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); - spare->td_proc = td->td_proc; - /* Setup PCB and fork address */ - cpu_set_upcall(spare, td->td_pcb); - /* - * XXXKSE do we really need this? (default values for the - * frame). - */ - bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe)); - spare->td_ucred = crhold(td->td_ucred); -} /* * Create a thread and schedule it for upcall on the KSE given. * Use our thread's standin so that we don't have to allocate one. */ struct thread * -thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) +thread_schedule_upcall(struct thread *td, struct kse *ke) { struct thread *td2; + int newkse; mtx_assert(&sched_lock, MA_OWNED); + newkse = (ke != td->td_kse); /* - * Schedule an upcall thread on specified kse_upcall, - * the kse_upcall must be free. - * td must have a spare thread. + * If the owner and kse are BOUND then that thread is planning to + * go to userland and upcalls are not expected. So don't make one. + * If it is not bound then make it so with the spare thread + * anf then borrw back the KSE to allow us to complete some in-kernel + * work. When we complete, the Bound thread will have the chance to + * complete. This thread will sleep as planned. Hopefully there will + * eventually be un unbound thread that can be converted to an + * upcall to report the completion of this thread. */ - KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); + if ((td2 = td->td_standin) != NULL) { td->td_standin = NULL; } else { - panic("no reserve thread when scheduling an upcall"); + if (newkse) + panic("no reserve thread when called with a new kse"); + /* + * If called from (e.g.) sleep and we do not have + * a reserve thread, then we've used it, so do not + * create an upcall. + */ return (NULL); } CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", td2, td->td_proc->p_pid, td->td_proc->p_comm); + bzero(&td2->td_startzero, + (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &td2->td_startcopy, (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); - thread_link(td2, ku->ku_ksegrp); - /* Let the new thread become owner of the upcall */ - ku->ku_owner = td2; - td2->td_upcall = ku; - td2->td_flags = TDF_UPCALLING; - td2->td_kse = NULL; - td2->td_state = TDS_CAN_RUN; + thread_link(td2, ke->ke_ksegrp); + cpu_set_upcall(td2, td->td_pcb); + + /* + * XXXKSE do we really need this? (default values for the + * frame). + */ + bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe)); + + /* + * Bind the new thread to the KSE, + * and if it's our KSE, lend it back to ourself + * so we can continue running. + */ + td2->td_ucred = crhold(td->td_ucred); + td2->td_flags = TDF_UPCALLING; /* note: BOUND */ + td2->td_kse = ke; + td2->td_state = TDS_CAN_RUN; td2->td_inhibitors = 0; - setrunqueue(td2); + ke->ke_owner = td2; + /* + * If called from kse_reassign(), we are working on the current + * KSE so fake that we borrowed it. If called from + * kse_create(), don't, as we have a new kse too. + */ + if (!newkse) { + /* + * This thread will be scheduled when the current thread + * blocks, exits or tries to enter userspace, (which ever + * happens first). When that happens the KSe will "revert" + * to this thread in a BOUND manner. Since we are called + * from msleep() this is going to be "very soon" in nearly + * all cases. + */ + TD_SET_LOAN(td2); + } else { + ke->ke_thread = td2; + ke->ke_state = KES_THREAD; + setrunqueue(td2); + } return (td2); /* bogus.. should be a void function */ } @@ -1435,16 +1222,14 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) struct thread * signal_upcall(struct proc *p, int sig) { -#if 0 struct thread *td, *td2; struct kse *ke; sigset_t ss; int error; -#endif PROC_LOCK_ASSERT(p, MA_OWNED); return (NULL); -#if 0 + td = FIRST_THREAD_IN_PROC(p); ke = td->td_kse; PROC_UNLOCK(p); @@ -1459,31 +1244,28 @@ return (NULL); if (error) return (NULL); if (td->td_standin == NULL) - thread_alloc_spare(td, NULL); + td->td_standin = thread_alloc(); mtx_lock_spin(&sched_lock); td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */ mtx_unlock_spin(&sched_lock); return (td2); -#endif } /* - * Setup done on the thread when it enters the kernel. + * setup done on the thread when it enters the kernel. * XXXKSE Presently only for syscalls but eventually all kernel entries. */ void thread_user_enter(struct proc *p, struct thread *td) { - struct ksegrp *kg; - struct kse_upcall *ku; + struct kse *ke; - kg = td->td_ksegrp; /* * First check that we shouldn't just abort. * But check if we are the single thread first! * XXX p_singlethread not locked, but should be safe. */ - if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { + if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) { PROC_LOCK(p); mtx_lock_spin(&sched_lock); thread_exit(); @@ -1496,37 +1278,43 @@ thread_user_enter(struct proc *p, struct thread *td) * possibility that we could do this lazily (in kse_reassign()), * but for now do it every time. */ - kg = td->td_ksegrp; - if (kg->kg_numupcalls) { - ku = td->td_upcall; - KASSERT(ku, ("%s: no upcall owned", __func__)); - KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); + ke = td->td_kse; + td->td_flags &= ~TDF_UNBOUND; + if (ke->ke_mailbox != NULL) { +#if 0 + td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox + + offsetof(struct kse_mailbox, km_curthread)); +#else /* if user pointer arithmetic is ok in the kernel */ td->td_mailbox = - (void *)fuword((void *)&ku->ku_mailbox->km_curthread); + (void *)fuword( (void *)&ke->ke_mailbox->km_curthread); +#endif if ((td->td_mailbox == NULL) || (td->td_mailbox == (void *)-1)) { - /* Don't schedule upcall when blocked */ - td->td_mailbox = NULL; + td->td_mailbox = NULL; /* single thread it.. */ mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_CAN_UNBIND; + td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND); mtx_unlock_spin(&sched_lock); } else { + /* + * when thread limit reached, act like that the thread + * has already done an upcall. + */ if (p->p_numthreads > max_threads_per_proc) { - /* - * Since kernel thread limit reached, - * don't schedule upcall anymore. - * XXXKSE These code in fact needn't. - */ - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_CAN_UNBIND; - mtx_unlock_spin(&sched_lock); + if (td->td_standin != NULL) { + thread_stash(td->td_standin); + td->td_standin = NULL; + } } else { if (td->td_standin == NULL) - thread_alloc_spare(td, NULL); - mtx_lock_spin(&sched_lock); - td->td_flags |= TDF_CAN_UNBIND; - mtx_unlock_spin(&sched_lock); + td->td_standin = thread_alloc(); } + mtx_lock_spin(&sched_lock); + td->td_flags |= TDF_CAN_UNBIND; + mtx_unlock_spin(&sched_lock); + KASSERT((ke->ke_owner == td), + ("thread_user_enter: No starting owner ")); + ke->ke_owner = td; + td->td_usticks = 0; } } } @@ -1547,90 +1335,165 @@ int thread_userret(struct thread *td, struct trapframe *frame) { int error; - struct kse_upcall *ku; + int unbound; + struct kse *ke; struct ksegrp *kg; + struct thread *worktodo; struct proc *p; struct timespec ts; - p = td->td_proc; + KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner), + ("thread_userret: bad thread/kse pointers")); + KASSERT((td == curthread), + ("thread_userret: bad thread argument")); + + kg = td->td_ksegrp; + p = td->td_proc; + error = 0; + unbound = TD_IS_UNBOUND(td); - /* Nothing to do with non-threaded group/process */ - if (td->td_ksegrp->kg_numupcalls == 0) - return (0); + mtx_lock_spin(&sched_lock); + if ((worktodo = kg->kg_last_assigned)) + worktodo = TAILQ_NEXT(worktodo, td_runq); + else + worktodo = TAILQ_FIRST(&kg->kg_runq); /* - * State clock interrupt hit in userland, it - * is returning from interrupt, charge thread's - * userland time for UTS. + * Permanently bound threads never upcall but they may + * loan out their KSE at this point. + * Upcalls imply bound.. They also may want to do some Philantropy. + * Temporarily bound threads on the other hand either yield + * to other work and transform into an upcall, or proceed back to + * userland. */ - if (td->td_flags & TDF_USTATCLOCK) { - thread_update_usr_ticks(td); - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_USTATCLOCK; - mtx_unlock_spin(&sched_lock); - } - /* - * Optimisation: - * This thread has not started any upcall. - * If there is no work to report other than ourself, - * then it can return direct to userland. - */ if (TD_CAN_UNBIND(td)) { - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_CAN_UNBIND; - mtx_unlock_spin(&sched_lock); - if ((kg->kg_completed == NULL) && - (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) { - thread_update_sys_ticks(td); + td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND); + if (!worktodo && (kg->kg_completed == NULL) && + !(td->td_kse->ke_flags & KEF_DOUPCALL)) { + /* + * This thread has not started any upcall. + * If there is no work to report other than + * ourself, then it can return direct to userland. + */ +justreturn: + mtx_unlock_spin(&sched_lock); + thread_update_uticks(); td->td_mailbox = NULL; return (0); } + mtx_unlock_spin(&sched_lock); error = thread_export_context(td); + td->td_usticks = 0; if (error) { /* - * Failing to do the KSE operation just defaults + * As we are not running on a borrowed KSE, + * failing to do the KSE operation just defaults * back to synchonous operation, so just return from * the syscall. */ - return (0); + goto justreturn; } + mtx_lock_spin(&sched_lock); /* - * There is something to report, and we own an upcall - * strucuture, we can go to userland. - * Turn ourself into an upcall thread. + * Turn ourself into a bound upcall. + * We will rely on kse_reassign() + * to make us run at a later time. */ - mtx_lock_spin(&sched_lock); td->td_flags |= TDF_UPCALLING; + + /* there may be more work since we re-locked schedlock */ + if ((worktodo = kg->kg_last_assigned)) + worktodo = TAILQ_NEXT(worktodo, td_runq); + else + worktodo = TAILQ_FIRST(&kg->kg_runq); + } else if (unbound) { + /* + * We are an unbound thread, looking to + * return to user space. There must be another owner + * of this KSE. + * We are using a borrowed KSE. save state and exit. + * kse_reassign() will recycle the kse as needed, + */ mtx_unlock_spin(&sched_lock); - } else if (td->td_mailbox) { error = thread_export_context(td); + td->td_usticks = 0; if (error) { + /* + * There is nothing we can do. + * We just lose that context. We + * probably should note this somewhere and send + * the process a signal. + */ PROC_LOCK(td->td_proc); + psignal(td->td_proc, SIGSEGV); mtx_lock_spin(&sched_lock); + ke = td->td_kse; /* possibly upcall with error? */ } else { - PROC_LOCK(td->td_proc); - mtx_lock_spin(&sched_lock); /* - * There are upcall threads waiting for - * work to do, wake one of them up. - * XXXKSE Maybe wake all of them up. + * Don't make an upcall, just exit so that the owner + * can get its KSE if it wants it. + * Our context is already safely stored for later + * use by the UTS. */ - if (kg->kg_upsleeps) - wakeup_one(&kg->kg_completed); + PROC_LOCK(p); + mtx_lock_spin(&sched_lock); + ke = td->td_kse; + } + /* + * If the owner is idling, we now have something for it + * to report, so make it runnable. + * If the owner is not an upcall, make an attempt to + * ensure that at least one of any IDLED upcalls can + * wake up. + */ + if (ke->ke_owner->td_flags & TDF_UPCALLING) { + TD_CLR_IDLE(ke->ke_owner); + } else { + FOREACH_KSE_IN_GROUP(kg, ke) { + if (TD_IS_IDLE(ke->ke_owner)) { + TD_CLR_IDLE(ke->ke_owner); + setrunnable(ke->ke_owner); + break; + } + } } thread_exit(); - /* NOTREACHED */ } + /* + * We ARE going back to userland with this KSE. + * We are permanently bound. We may be an upcall. + * If an upcall, check for threads that need to borrow the KSE. + * Any other thread that comes ready after this missed the boat. + */ + ke = td->td_kse; + /* + * If not upcalling, go back to userspace. + * If we are, get the upcall set up. + */ if (td->td_flags & TDF_UPCALLING) { - KASSERT(TD_CAN_UNBIND(td) == 0, ("upcall thread can unbind")); - ku = td->td_upcall; + if (worktodo) { + /* + * force a switch to more urgent 'in kernel' + * work. Control will return to this thread + * when there is no more work to do. + * kse_reassign() will do that for us. + */ + TD_SET_LOAN(td); + p->p_stats->p_ru.ru_nvcsw++; + mi_switch(); /* kse_reassign() will (re)find worktodo */ + } + td->td_flags &= ~TDF_UPCALLING; + if (ke->ke_flags & KEF_DOUPCALL) + ke->ke_flags &= ~KEF_DOUPCALL; + mtx_unlock_spin(&sched_lock); + /* * There is no more work to do and we are going to ride - * this thread up to userland as an upcall. + * this thread/KSE up to userland as an upcall. * Do the last parts of the setup needed for the upcall. */ CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", @@ -1641,27 +1504,16 @@ thread_userret(struct thread *td, struct trapframe *frame) * Will use Giant in cpu_thread_clean() because it uses * kmem_free(kernel_map, ...) */ - cpu_set_upcall_kse(td, ku); - - /* - * Clear TDF_UPCALLING after set upcall context, - * profiling code looks TDF_UPCALLING to avoid account - * a wrong user %EIP - */ - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_UPCALLING; - if (ku->ku_flags & KUF_DOUPCALL) - ku->ku_flags &= ~KUF_DOUPCALL; - mtx_unlock_spin(&sched_lock); + cpu_set_upcall_kse(td, ke); - /* + /* * Unhook the list of completed threads. * anything that completes after this gets to * come in next time. * Put the list of completed thread mailboxes on * this KSE's mailbox. */ - error = thread_link_mboxes(kg, ku); + error = thread_link_mboxes(kg, ke); if (error) goto bad; @@ -1672,33 +1524,34 @@ thread_userret(struct thread *td, struct trapframe *frame) * it would be nice if this all happenned only on the first * time through. (the scan for extra work etc.) */ - error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0); +#if 0 + error = suword((caddr_t)ke->ke_mailbox + + offsetof(struct kse_mailbox, km_curthread), 0); +#else /* if user pointer arithmetic is ok in the kernel */ + error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0); +#endif + ke->ke_uuticks = ke->ke_usticks = 0; if (error) goto bad; - - /* Export current system time */ nanotime(&ts); if (copyout(&ts, - (caddr_t)&ku->ku_mailbox->km_timeofday, sizeof(ts))) { + (caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) { goto bad; } + } else { + mtx_unlock_spin(&sched_lock); } /* * Optimisation: * Ensure that we have a spare thread available, * for when we re-enter the kernel. */ - if (td->td_standin == NULL) - thread_alloc_spare(td, NULL); + if (td->td_standin == NULL) { + td->td_standin = thread_alloc(); + } - /* - * Clear thread mailbox first, then clear system tick count. - * The order is important because thread_statclock() use - * mailbox pointer to see if it is an userland thread or - * an UTS kernel thread. - */ + thread_update_uticks(); td->td_mailbox = NULL; - td->td_usticks = 0; return (0); bad: @@ -1710,7 +1563,6 @@ bad: psignal(td->td_proc, SIGSEGV); PROC_UNLOCK(td->td_proc); td->td_mailbox = NULL; - td->td_usticks = 0; return (error); /* go sync */ } @@ -1749,6 +1601,7 @@ thread_single(int force_exit) if (force_exit == SINGLE_EXIT) { p->p_flag |= P_SINGLE_EXIT; + td->td_flags &= ~TDF_UNBOUND; } else p->p_flag &= ~P_SINGLE_EXIT; p->p_flag |= P_STOPPED_SINGLE; @@ -1771,16 +1624,17 @@ thread_single(int force_exit) else abortsleep(td2); } + if (TD_IS_IDLE(td2)) { + TD_CLR_IDLE(td2); + } } else { if (TD_IS_SUSPENDED(td2)) continue; - /* - * maybe other inhibitted states too? - * XXXKSE Is it totally safe to - * suspend a non-interruptable thread? - */ + /* maybe other inhibitted states too? */ if (td2->td_inhibitors & - (TDI_SLEEPING | TDI_SWAPPED)) + (TDI_SLEEPING | TDI_SWAPPED | + TDI_LOAN | TDI_IDLE | + TDI_EXITING)) thread_suspend_one(td2); } } @@ -1806,14 +1660,8 @@ thread_single(int force_exit) mtx_lock(&Giant); PROC_LOCK(p); } - if (force_exit == SINGLE_EXIT) { - if (td->td_upcall) { - mtx_lock_spin(&sched_lock); - upcall_remove(td); - mtx_unlock_spin(&sched_lock); - } + if (force_exit == SINGLE_EXIT) kse_purge(p, td); - } return (0); } @@ -1855,6 +1703,7 @@ thread_suspend_check(int return_instead) { struct thread *td; struct proc *p; + struct kse *ke; struct ksegrp *kg; td = curthread; @@ -1886,6 +1735,16 @@ thread_suspend_check(int return_instead) mtx_lock_spin(&sched_lock); while (mtx_owned(&Giant)) mtx_unlock(&Giant); + /* + * All threads should be exiting + * Unless they are the active "singlethread". + * destroy un-needed KSEs as we go.. + * KSEGRPS may implode too as #kses -> 0. + */ + ke = td->td_kse; + if (ke->ke_owner == td && + (kg->kg_kses >= kg->kg_numthreads )) + ke->ke_flags |= KEF_EXIT; thread_exit(); } @@ -1893,6 +1752,14 @@ thread_suspend_check(int return_instead) * When a thread suspends, it just * moves to the processes's suspend queue * and stays there. + * + * XXXKSE if TDF_BOUND is true + * it will not release it's KSE which might + * lead to deadlock if there are not enough KSEs + * to complete all waiting threads. + * Maybe be able to 'lend' it out again. + * (lent kse's can not go back to userland?) + * and can only be lent in STOPPED state. */ mtx_lock_spin(&sched_lock); if ((p->p_flag & P_STOPPED_SIG) && diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c index ecf309e..2c22a92 100644 --- a/sys/kern/subr_prof.c +++ b/sys/kern/subr_prof.c @@ -358,9 +358,7 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS) return (0); if (state == GMON_PROF_OFF) { gp->state = state; - PROC_LOCK(&proc0); stopprofclock(&proc0); - PROC_UNLOCK(&proc0); stopguprof(gp); } else if (state == GMON_PROF_ON) { gp->state = GMON_PROF_OFF; @@ -371,9 +369,7 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS) #ifdef GUPROF } else if (state == GMON_PROF_HIRES) { gp->state = GMON_PROF_OFF; - PROC_LOCK(&proc0); stopprofclock(&proc0); - PROC_UNLOCK(&proc0); startguprof(gp); gp->state = state; #endif @@ -423,7 +419,7 @@ profil(td, uap) struct thread *td; register struct profil_args *uap; { - struct uprof *upp; + register struct uprof *upp; int s; int error = 0; @@ -434,9 +430,7 @@ profil(td, uap) goto done2; } if (uap->scale == 0) { - PROC_LOCK(td->td_proc); stopprofclock(td->td_proc); - PROC_UNLOCK(td->td_proc); goto done2; } upp = &td->td_proc->p_stats->p_prof; @@ -478,16 +472,19 @@ done2: * inaccurate. */ void -addupc_intr(struct thread *td, uintptr_t pc, u_int ticks) +addupc_intr(ke, pc, ticks) + register struct kse *ke; + register uintptr_t pc; + u_int ticks; { - struct uprof *prof; - caddr_t addr; - u_int i; - int v; + register struct uprof *prof; + register caddr_t addr; + register u_int i; + register int v; if (ticks == 0) return; - prof = &td->td_proc->p_stats->p_prof; + prof = &ke->ke_proc->p_stats->p_prof; if (pc < prof->pr_off || (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) return; /* out of range; ignore */ @@ -495,9 +492,9 @@ addupc_intr(struct thread *td, uintptr_t pc, u_int ticks) addr = prof->pr_base + i; if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) { mtx_lock_spin(&sched_lock); - td->td_praddr = pc; - td->td_prticks = ticks; - td->td_flags |= (TDF_OWEUPC | TDF_ASTPENDING); + prof->pr_addr = pc; + prof->pr_ticks = ticks; + ke->ke_flags |= KEF_OWEUPC | KEF_ASTPENDING ; mtx_unlock_spin(&sched_lock); } } @@ -505,56 +502,34 @@ addupc_intr(struct thread *td, uintptr_t pc, u_int ticks) /* * Much like before, but we can afford to take faults here. If the * update fails, we simply turn off profiling. - * XXXKSE, don't use kse unless we got sched lock. */ void -addupc_task(struct thread *td, uintptr_t pc, u_int ticks) +addupc_task(ke, pc, ticks) + register struct kse *ke; + register uintptr_t pc; + u_int ticks; { - struct proc *p = td->td_proc; + struct proc *p = ke->ke_proc; register struct uprof *prof; register caddr_t addr; register u_int i; u_short v; - int stop = 0; if (ticks == 0) return; - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - if (!(p->p_sflag & PS_PROFIL)) { - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); - return; - } - p->p_profthreads++; - mtx_unlock_spin(&sched_lock); - PROC_UNLOCK(p); prof = &p->p_stats->p_prof; if (pc < prof->pr_off || - (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) { - goto out; - } + (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) + return; addr = prof->pr_base + i; if (copyin(addr, &v, sizeof(v)) == 0) { v += ticks; if (copyout(&v, addr, sizeof(v)) == 0) - goto out; - } - stop = 1; - -out: - PROC_LOCK(p); - if (--p->p_profthreads == 0) { - if (p->p_sflag & PS_STOPPROF) { - wakeup(&p->p_profthreads); - stop = 0; - } + return; } - if (stop) - stopprofclock(p); - PROC_UNLOCK(p); + stopprofclock(p); } #if defined(__i386__) && __GNUC__ >= 2 diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index 92835a0..eec2ae6 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -73,22 +73,15 @@ userret(td, frame, oticks) u_int oticks; { struct proc *p = td->td_proc; -#ifdef INVARIANTS - struct kse *ke; -#endif - u_int64_t eticks; + struct kse *ke = td->td_kse; CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid, p->p_comm); #ifdef INVARIANTS - /* - * Check that we called signotify() enough. - * XXXKSE this checking is bogus for threaded program, - */ + /* Check that we called signotify() enough. */ mtx_lock(&Giant); PROC_LOCK(p); mtx_lock_spin(&sched_lock); - ke = td->td_kse; if (SIGPENDING(p) && ((p->p_sflag & PS_NEEDSIGCHK) == 0 || (td->td_kse->ke_flags & KEF_ASTPENDING) == 0)) printf("failed to set signal flags properly for ast()\n"); @@ -103,18 +96,6 @@ userret(td, frame, oticks) sched_userret(td); /* - * Charge system time if profiling. - * - * XXX should move PS_PROFIL to a place that can obviously be - * accessed safely without sched_lock. - */ - - if (p->p_sflag & PS_PROFIL) { - eticks = td->td_sticks - oticks; - addupc_task(td, TRAPF_PC(frame), (u_int)eticks * psratio); - } - - /* * We need to check to see if we have to exit or wait due to a * single threading requirement or some other STOP condition. * Don't bother doing all the work if the stop bits are not set @@ -132,6 +113,21 @@ userret(td, frame, oticks) if (p->p_flag & P_KSES) { thread_userret(td, frame); } + + /* + * Charge system time if profiling. + * + * XXX should move PS_PROFIL to a place that can obviously be + * accessed safely without sched_lock. + */ + if (p->p_sflag & PS_PROFIL) { + quad_t ticks; + + mtx_lock_spin(&sched_lock); + ticks = ke->ke_sticks - oticks; + mtx_unlock_spin(&sched_lock); + addupc_task(ke, TRAPF_PC(frame), (u_int)ticks * psratio); + } } /* @@ -150,7 +146,6 @@ ast(struct trapframe *framep) u_int prticks, sticks; int sflag; int flags; - int tflags; int sig; #if defined(DEV_NPX) && !defined(SMP) int ucode; @@ -180,21 +175,19 @@ ast(struct trapframe *framep) */ mtx_lock_spin(&sched_lock); ke = td->td_kse; - sticks = td->td_sticks; - tflags = td->td_flags; + sticks = ke->ke_sticks; flags = ke->ke_flags; sflag = p->p_sflag; p->p_sflag &= ~(PS_ALRMPEND | PS_NEEDSIGCHK | PS_PROFPEND | PS_XCPU); #ifdef MAC p->p_sflag &= ~PS_MACPEND; #endif - ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED); - td->td_flags &= ~(TDF_ASTPENDING | TDF_OWEUPC); + ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED | KEF_OWEUPC); cnt.v_soft++; prticks = 0; - if (tflags & TDF_OWEUPC && sflag & PS_PROFIL) { - prticks = td->td_prticks; - td->td_prticks = 0; + if (flags & KEF_OWEUPC && sflag & PS_PROFIL) { + prticks = p->p_stats->p_prof.pr_ticks; + p->p_stats->p_prof.pr_ticks = 0; } mtx_unlock_spin(&sched_lock); /* @@ -207,9 +200,8 @@ ast(struct trapframe *framep) if (td->td_ucred != p->p_ucred) cred_update_thread(td); - if (tflags & TDF_OWEUPC && sflag & PS_PROFIL) { - addupc_task(td, td->td_praddr, prticks); - } + if (flags & KEF_OWEUPC && sflag & PS_PROFIL) + addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks); if (sflag & PS_ALRMPEND) { PROC_LOCK(p); psignal(p, SIGVTALRM); diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c index b958ceb..3ab4a33 100644 --- a/sys/kern/subr_witness.c +++ b/sys/kern/subr_witness.c @@ -244,7 +244,7 @@ static struct witness_order_list_entry order_lists[] = { #endif { "clk", &lock_class_mtx_spin }, { "mutex profiling lock", &lock_class_mtx_spin }, - { "kse zombie lock", &lock_class_mtx_spin }, + { "zombie_thread_lock", &lock_class_mtx_spin }, { "ALD Queue", &lock_class_mtx_spin }, #ifdef __ia64__ { "MCA spin lock", &lock_class_mtx_spin }, |