diff options
author | davidxu <davidxu@FreeBSD.org> | 2003-01-26 11:41:35 +0000 |
---|---|---|
committer | davidxu <davidxu@FreeBSD.org> | 2003-01-26 11:41:35 +0000 |
commit | 4b9b549ca22658196f5ef73f96b4ed8ecd37401b (patch) | |
tree | e3b6f27545368a6af5135acaa9f1002337a9fac9 /sys/kern/subr_prof.c | |
parent | a74140ae0217edd23dbc303908778a13cf82376f (diff) | |
download | FreeBSD-src-4b9b549ca22658196f5ef73f96b4ed8ecd37401b.zip FreeBSD-src-4b9b549ca22658196f5ef73f96b4ed8ecd37401b.tar.gz |
Move UPCALL related data structure out of kse, introduce a new
data structure called kse_upcall to manage UPCALL. All KSE binding
and loaning code are gone.
A thread owns an upcall can collect all completed syscall contexts in
its ksegrp, turn itself into UPCALL mode, and takes those contexts back
to userland. Any thread without upcall structure has to export their
contexts and exit at user boundary.
Any thread running in user mode owns an upcall structure, when it enters
kernel, if the kse mailbox's current thread pointer is not NULL, then
when the thread is blocked in kernel, a new UPCALL thread is created and
the upcall structure is transfered to the new UPCALL thread. if the kse
mailbox's current thread pointer is NULL, then when a thread is blocked
in kernel, no UPCALL thread will be created.
Each upcall always has an owner thread. Userland can remove an upcall by
calling kse_exit, when all upcalls in ksegrp are removed, the group is
atomatically shutdown. An upcall owner thread also exits when process is
in exiting state. when an owner thread exits, the upcall it owns is also
removed.
KSE is a pure scheduler entity. it represents a virtual cpu. when a thread
is running, it always has a KSE associated with it. scheduler is free to
assign a KSE to thread according thread priority, if thread priority is changed,
KSE can be moved from one thread to another.
When a ksegrp is created, there is always N KSEs created in the group. the
N is the number of physical cpu in the current system. This makes it is
possible that even an userland UTS is single CPU safe, threads in kernel still
can execute on different cpu in parallel. Userland calls kse_create to add more
upcall structures into ksegrp to increase concurrent in userland itself, kernel
is not restricted by number of upcalls userland provides.
The code hasn't been tested under SMP by author due to lack of hardware.
Reviewed by: julian
Diffstat (limited to 'sys/kern/subr_prof.c')
-rw-r--r-- | sys/kern/subr_prof.c | 69 |
1 files changed, 47 insertions, 22 deletions
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c index 2c22a92..ecf309e 100644 --- a/sys/kern/subr_prof.c +++ b/sys/kern/subr_prof.c @@ -358,7 +358,9 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS) return (0); if (state == GMON_PROF_OFF) { gp->state = state; + PROC_LOCK(&proc0); stopprofclock(&proc0); + PROC_UNLOCK(&proc0); stopguprof(gp); } else if (state == GMON_PROF_ON) { gp->state = GMON_PROF_OFF; @@ -369,7 +371,9 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS) #ifdef GUPROF } else if (state == GMON_PROF_HIRES) { gp->state = GMON_PROF_OFF; + PROC_LOCK(&proc0); stopprofclock(&proc0); + PROC_UNLOCK(&proc0); startguprof(gp); gp->state = state; #endif @@ -419,7 +423,7 @@ profil(td, uap) struct thread *td; register struct profil_args *uap; { - register struct uprof *upp; + struct uprof *upp; int s; int error = 0; @@ -430,7 +434,9 @@ profil(td, uap) goto done2; } if (uap->scale == 0) { + PROC_LOCK(td->td_proc); stopprofclock(td->td_proc); + PROC_UNLOCK(td->td_proc); goto done2; } upp = &td->td_proc->p_stats->p_prof; @@ -472,19 +478,16 @@ done2: * inaccurate. */ void -addupc_intr(ke, pc, ticks) - register struct kse *ke; - register uintptr_t pc; - u_int ticks; +addupc_intr(struct thread *td, uintptr_t pc, u_int ticks) { - register struct uprof *prof; - register caddr_t addr; - register u_int i; - register int v; + struct uprof *prof; + caddr_t addr; + u_int i; + int v; if (ticks == 0) return; - prof = &ke->ke_proc->p_stats->p_prof; + prof = &td->td_proc->p_stats->p_prof; if (pc < prof->pr_off || (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) return; /* out of range; ignore */ @@ -492,9 +495,9 @@ addupc_intr(ke, pc, ticks) addr = prof->pr_base + i; if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) { mtx_lock_spin(&sched_lock); - prof->pr_addr = pc; - prof->pr_ticks = ticks; - ke->ke_flags |= KEF_OWEUPC | KEF_ASTPENDING ; + td->td_praddr = pc; + td->td_prticks = ticks; + td->td_flags |= (TDF_OWEUPC | TDF_ASTPENDING); mtx_unlock_spin(&sched_lock); } } @@ -502,34 +505,56 @@ addupc_intr(ke, pc, ticks) /* * Much like before, but we can afford to take faults here. If the * update fails, we simply turn off profiling. + * XXXKSE, don't use kse unless we got sched lock. */ void -addupc_task(ke, pc, ticks) - register struct kse *ke; - register uintptr_t pc; - u_int ticks; +addupc_task(struct thread *td, uintptr_t pc, u_int ticks) { - struct proc *p = ke->ke_proc; + struct proc *p = td->td_proc; register struct uprof *prof; register caddr_t addr; register u_int i; u_short v; + int stop = 0; if (ticks == 0) return; + PROC_LOCK(p); + mtx_lock_spin(&sched_lock); + if (!(p->p_sflag & PS_PROFIL)) { + mtx_unlock_spin(&sched_lock); + PROC_UNLOCK(p); + return; + } + p->p_profthreads++; + mtx_unlock_spin(&sched_lock); + PROC_UNLOCK(p); prof = &p->p_stats->p_prof; if (pc < prof->pr_off || - (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) - return; + (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) { + goto out; + } addr = prof->pr_base + i; if (copyin(addr, &v, sizeof(v)) == 0) { v += ticks; if (copyout(&v, addr, sizeof(v)) == 0) - return; + goto out; + } + stop = 1; + +out: + PROC_LOCK(p); + if (--p->p_profthreads == 0) { + if (p->p_sflag & PS_STOPPROF) { + wakeup(&p->p_profthreads); + stop = 0; + } } - stopprofclock(p); + if (stop) + stopprofclock(p); + PROC_UNLOCK(p); } #if defined(__i386__) && __GNUC__ >= 2 |