summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2004-01-25 03:54:52 +0000
committerjeff <jeff@FreeBSD.org>2004-01-25 03:54:52 +0000
commitc85cdc3d0fd697f504c2f05524ded0b3282c75bf (patch)
tree5db1ead5d1c75b7e2ece1d370fa6708964bf49f8
parentca8a19313e2f06d57456af75aaa70204aa2efede (diff)
downloadFreeBSD-src-c85cdc3d0fd697f504c2f05524ded0b3282c75bf.zip
FreeBSD-src-c85cdc3d0fd697f504c2f05524ded0b3282c75bf.tar.gz
- Add a flags parameter to mi_switch. The value of flags may be SW_VOL or
SW_INVOL. Assert that one of these is set in mi_switch() and propery adjust the rusage statistics. This is to simplify the large number of users of this interface which were previously all required to adjust the proper counter prior to calling mi_switch(). This also facilitates more switch and locking optimizations. - Change all callers of mi_switch() to pass the appropriate paramter and remove direct references to the process statistics.
-rw-r--r--sys/kern/kern_condvar.c9
-rw-r--r--sys/kern/kern_idle.c3
-rw-r--r--sys/kern/kern_intr.c6
-rw-r--r--sys/kern/kern_kse.c6
-rw-r--r--sys/kern/kern_mutex.c3
-rw-r--r--sys/kern/kern_poll.c3
-rw-r--r--sys/kern/kern_shutdown.c6
-rw-r--r--sys/kern/kern_sig.c6
-rw-r--r--sys/kern/kern_subr.c3
-rw-r--r--sys/kern/kern_synch.c17
-rw-r--r--sys/kern/kern_thread.c6
-rw-r--r--sys/kern/sched_ule.c3
-rw-r--r--sys/kern/subr_trap.c3
-rw-r--r--sys/kern/subr_turnstile.c3
-rw-r--r--sys/sys/proc.h5
-rw-r--r--sys/vm/vm_zeroidle.c3
16 files changed, 37 insertions, 48 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index d95d9e2..580334a 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -119,8 +119,7 @@ static __inline void
cv_switch(struct thread *td)
{
TD_SET_SLEEPING(td);
- td->td_proc->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ mi_switch(SW_VOL);
CTR3(KTR_PROC, "cv_switch: resume thread %p (pid %d, %s)", td,
td->td_proc->p_pid, td->td_proc->p_comm);
}
@@ -370,8 +369,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
* Go back to sleep.
*/
TD_SET_SLEEPING(td);
- td->td_proc->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
td->td_flags &= ~TDF_TIMOFAIL;
}
@@ -447,8 +445,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
* Go back to sleep.
*/
TD_SET_SLEEPING(td);
- td->td_proc->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
td->td_flags &= ~TDF_TIMOFAIL;
}
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index cb273b7..c5fff0a 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -87,8 +87,7 @@ idle_proc(void *dummy)
mtx_lock_spin(&sched_lock);
td->td_state = TDS_CAN_RUN;
- p->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index e171da0..515c1b9 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -407,10 +407,9 @@ ithread_schedule(struct ithd *ithread, int do_switch)
(ctd->td_critnest == 1) ) {
KASSERT((TD_IS_RUNNING(ctd)),
("ithread_schedule: Bad state for curthread."));
- ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
if (ctd->td_flags & TDF_IDLETD)
ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
- mi_switch();
+ mi_switch(SW_INVOL);
} else {
curthread->td_flags |= TDF_NEEDRESCHED;
}
@@ -566,9 +565,8 @@ restart:
if (ithd->it_enable != NULL)
ithd->it_enable(ithd->it_vector);
TD_SET_IWAIT(td); /* we're idle */
- p->p_stats->p_ru.ru_nvcsw++;
CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
- mi_switch();
+ mi_switch(SW_VOL);
CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
}
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index fa7306b..1a3ae43 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -1939,8 +1939,7 @@ thread_single(int force_exit)
thread_suspend_one(td);
DROP_GIANT();
PROC_UNLOCK(p);
- p->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);
@@ -2042,8 +2041,7 @@ thread_suspend_check(int return_instead)
}
DROP_GIANT();
PROC_UNLOCK(p);
- p->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index ca45e67..0ab4fb6 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -654,8 +654,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
"_mtx_unlock_sleep: %p switching out lock=%p", m,
(void *)m->mtx_lock);
- td->td_proc->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
m, (void *)m->mtx_lock);
diff --git a/sys/kern/kern_poll.c b/sys/kern/kern_poll.c
index 9aab066..a597548 100644
--- a/sys/kern/kern_poll.c
+++ b/sys/kern/kern_poll.c
@@ -504,8 +504,7 @@ poll_idle(void)
mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
mtx_lock_spin(&sched_lock);
- td->td_proc->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
} else {
idlepoll_sleeping = 1;
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index 8778e8a..e3763d5 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -308,8 +308,10 @@ boot(int howto)
DROP_GIANT();
for (subiter = 0; subiter < 50 * iter; subiter++) {
mtx_lock_spin(&sched_lock);
- curthread->td_proc->p_stats->p_ru.ru_nvcsw++;
- mi_switch(); /* Allow interrupt threads to run */
+ /*
+ * Allow interrupt threads to run
+ */
+ mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
DELAY(1000);
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index b0a7ce3..da414bc 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -2036,8 +2036,7 @@ ptracestop(struct thread *td, int sig)
thread_suspend_one(td);
PROC_UNLOCK(p);
DROP_GIANT();
- p->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
}
@@ -2186,8 +2185,7 @@ issignal(td)
thread_suspend_one(td);
PROC_UNLOCK(p);
DROP_GIANT();
- p->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index 931a893..a078b11 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -441,8 +441,7 @@ uio_yield(void)
mtx_lock_spin(&sched_lock);
DROP_GIANT();
sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */
- td->td_proc->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index c0bab3b..291937c 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -250,9 +250,8 @@ msleep(ident, mtx, priority, wmesg, timo)
sched_sleep(td, priority & PRIMASK);
if (TD_ON_SLEEPQ(td)) {
- p->p_stats->p_ru.ru_nvcsw++;
TD_SET_SLEEPING(td);
- mi_switch();
+ mi_switch(SW_VOL);
}
/*
* We're awake from voluntary sleep.
@@ -278,8 +277,7 @@ msleep(ident, mtx, priority, wmesg, timo)
* the wrong msleep(). Yuck.
*/
TD_SET_SLEEPING(td);
- p->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
td->td_flags &= ~TDF_TIMOFAIL;
}
if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) &&
@@ -453,7 +451,7 @@ wakeup_one(ident)
* The machine independent parts of mi_switch().
*/
void
-mi_switch(void)
+mi_switch(int flags)
{
struct bintime new_switchtime;
struct thread *td;
@@ -469,7 +467,13 @@ mi_switch(void)
#endif
KASSERT(td->td_critnest == 1,
("mi_switch: switch in a critical section"));
+ KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
+ ("mi_switch: switch must be voluntary or involuntary"));
+ if (flags & SW_VOL)
+ p->p_stats->p_ru.ru_nvcsw++;
+ else
+ p->p_stats->p_ru.ru_nivcsw++;
/*
* Compute the amount of time during which the current
* process was running, and add that to its total so far.
@@ -647,9 +651,8 @@ yield(struct thread *td, struct yield_args *uap)
kg = td->td_ksegrp;
mtx_assert(&Giant, MA_NOTOWNED);
mtx_lock_spin(&sched_lock);
- kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
sched_prio(td, PRI_MAX_TIMESHARE);
- mi_switch();
+ mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = 0;
return (0);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index fa7306b..1a3ae43 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -1939,8 +1939,7 @@ thread_single(int force_exit)
thread_suspend_one(td);
DROP_GIANT();
PROC_UNLOCK(p);
- p->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);
@@ -2042,8 +2041,7 @@ thread_suspend_check(int return_instead)
}
DROP_GIANT();
PROC_UNLOCK(p);
- p->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 15f83e2..dfa1970 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1707,8 +1707,7 @@ sched_bind(struct thread *td, int cpu)
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
kseq_notify(ke, cpu);
/* When we return from mi_switch we'll be on the correct cpu. */
- td->td_proc->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ mi_switch(SW_VOL);
#endif
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 7a08f53..2390087 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -247,8 +247,7 @@ ast(struct trapframe *framep)
#endif
mtx_lock_spin(&sched_lock);
sched_prio(td, kg->kg_user_pri);
- p->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
+ mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 1f877f4..d198b94 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -513,8 +513,7 @@ turnstile_wait(struct turnstile *ts, struct lock_object *lock,
CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td,
lock, lock->lo_name);
- td->td_proc->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ mi_switch(SW_VOL);
if (LOCK_LOG_TEST(lock, 0))
CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s",
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 83dd537..674d516 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -841,7 +841,10 @@ void fork_exit(void (*)(void *, struct trapframe *), void *,
void fork_return(struct thread *, struct trapframe *);
int inferior(struct proc *p);
int leavepgrp(struct proc *p);
-void mi_switch(void);
+void mi_switch(int flags);
+/* Flags for mi_switch(). */
+#define SW_VOL 0x0001 /* Voluntary switch. */
+#define SW_INVOL 0x0002 /* Involuntary switch. */
int p_candebug(struct thread *td, struct proc *p);
int p_cansee(struct thread *td, struct proc *p);
int p_cansched(struct thread *td, struct proc *p);
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 9dc0419..863804a 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -155,8 +155,7 @@ vm_pagezero(void)
pages += vm_page_zero_idle();
if (pages > idlezero_maxrun || sched_runnable()) {
mtx_lock_spin(&sched_lock);
- td->td_proc->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
+ mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
pages = 0;
}
OpenPOWER on IntegriCloud