summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-05-20 22:11:50 +0000
committerjeff <jeff@FreeBSD.org>2007-05-20 22:11:50 +0000
commit027ae03b495b7cd9633acb01b6fddea042645da3 (patch)
tree42c8e52aead43f7431897ada079682da98afc241 /sys
parentbcfa98d0197abe20a89b307cefcf22f4e04ac8ff (diff)
downloadFreeBSD-src-027ae03b495b7cd9633acb01b6fddea042645da3.zip
FreeBSD-src-027ae03b495b7cd9633acb01b6fddea042645da3.tar.gz
- Move clock synchronization into a seperate clock lock so the global
scheduler lock is not involved. sched_lock still protects the sched_clock call. Another patch will remedy this. Contributed by: Attilio Rao <attilio@FreeBSD.org> Tested by: kris, jeff
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_clock.c23
-rw-r--r--sys/kern/subr_prof.c10
-rw-r--r--sys/kern/subr_witness.c1
-rw-r--r--sys/sys/systm.h2
4 files changed, 21 insertions, 15 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 063c2f3..a12bd70 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -83,6 +83,9 @@ SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
/* Some of these don't belong here, but it's easiest to concentrate them. */
long cp_time[CPUSTATES];
+/* Spin-lock protecting profiling statistics. */
+struct mtx time_lock;
+
static int
sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
{
@@ -172,6 +175,7 @@ initclocks(dummy)
* code do its bit.
*/
cpu_initclocks();
+ mtx_init(&time_lock, "time lock", NULL, MTX_SPIN);
/*
* Compute profhz/stathz, and fix profhz if needed.
@@ -349,20 +353,15 @@ startprofclock(p)
register struct proc *p;
{
- /*
- * XXX; Right now sched_lock protects statclock(), but perhaps
- * it should be protected later on by a time_lock, which would
- * cover psdiv, etc. as well.
- */
PROC_LOCK_ASSERT(p, MA_OWNED);
if (p->p_flag & P_STOPPROF)
return;
if ((p->p_flag & P_PROFIL) == 0) {
- mtx_lock_spin(&sched_lock);
p->p_flag |= P_PROFIL;
+ mtx_lock_spin(&time_lock);
if (++profprocs == 1)
cpu_startprofclock();
- mtx_unlock_spin(&sched_lock);
+ mtx_unlock_spin(&time_lock);
}
}
@@ -385,11 +384,11 @@ stopprofclock(p)
}
if ((p->p_flag & P_PROFIL) == 0)
return;
- mtx_lock_spin(&sched_lock);
p->p_flag &= ~P_PROFIL;
+ mtx_lock_spin(&time_lock);
if (--profprocs == 0)
cpu_stopprofclock();
- mtx_unlock_spin(&sched_lock);
+ mtx_unlock_spin(&time_lock);
}
}
@@ -412,7 +411,6 @@ statclock(int usermode)
td = curthread;
p = td->td_proc;
- mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
if (usermode) {
/*
* Charge the time as appropriate.
@@ -422,6 +420,7 @@ statclock(int usermode)
thread_statclock(1);
#endif
td->td_uticks++;
+ mtx_lock_spin_flags(&time_lock, MTX_QUIET);
if (p->p_nice > NZERO)
cp_time[CP_NICE]++;
else
@@ -442,6 +441,7 @@ statclock(int usermode)
if ((td->td_pflags & TDP_ITHREAD) ||
td->td_intr_nesting_level >= 2) {
td->td_iticks++;
+ mtx_lock_spin_flags(&time_lock, MTX_QUIET);
cp_time[CP_INTR]++;
} else {
#ifdef KSE
@@ -450,15 +450,18 @@ statclock(int usermode)
#endif
td->td_pticks++;
td->td_sticks++;
+ mtx_lock_spin_flags(&time_lock, MTX_QUIET);
if (!TD_IS_IDLETHREAD(td))
cp_time[CP_SYS]++;
else
cp_time[CP_IDLE]++;
}
}
+ mtx_unlock_spin_flags(&time_lock, MTX_QUIET);
CTR4(KTR_SCHED, "statclock: %p(%s) prio %d stathz %d",
td, td->td_proc->p_comm, td->td_priority, (stathz)?stathz:hz);
+ mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
sched_clock(td);
/* Update resource usage integrals and maximums. */
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 3cec375..498d9d2 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -423,12 +423,12 @@ profil(td, uap)
}
PROC_LOCK(p);
upp = &td->td_proc->p_stats->p_prof;
- mtx_lock_spin(&sched_lock);
+ mtx_lock_spin(&time_lock);
upp->pr_off = uap->offset;
upp->pr_scale = uap->scale;
upp->pr_base = uap->samples;
upp->pr_size = uap->size;
- mtx_unlock_spin(&sched_lock);
+ mtx_unlock_spin(&time_lock);
startprofclock(p);
PROC_UNLOCK(p);
@@ -468,15 +468,15 @@ addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
if (ticks == 0)
return;
prof = &td->td_proc->p_stats->p_prof;
- mtx_lock_spin(&sched_lock);
+ mtx_lock_spin(&time_lock);
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
- mtx_unlock_spin(&sched_lock);
+ mtx_unlock_spin(&time_lock);
return; /* out of range; ignore */
}
addr = prof->pr_base + i;
- mtx_unlock_spin(&sched_lock);
+ mtx_unlock_spin(&time_lock);
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
td->td_profil_addr = pc;
td->td_profil_ticks = ticks;
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 7224568..1f750e5 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -410,6 +410,7 @@ static struct witness_order_list_entry order_lists[] = {
{ "callout", &lock_class_mtx_spin },
{ "entropy harvest mutex", &lock_class_mtx_spin },
{ "syscons video lock", &lock_class_mtx_spin },
+ { "time lock", &lock_class_mtx_spin },
/*
* leaf locks
*/
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index 817982c..dc07e65 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -71,6 +71,8 @@ extern int bootverbose; /* nonzero to print verbose messages */
extern int maxusers; /* system tune hint */
+extern struct mtx time_lock; /* time lock for profiling */
+
#ifdef INVARIANTS /* The option is always available */
#define KASSERT(exp,msg) do { \
if (__predict_false(!(exp))) \
OpenPOWER on IntegriCloud