summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_switch.c139
-rw-r--r--sys/kern/sched_4bsd.c90
-rw-r--r--sys/sys/runq.h1
3 files changed, 116 insertions, 114 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index caab072..feb4767 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -45,9 +45,6 @@ __FBSDID("$FreeBSD$");
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
#include <sys/smp.h>
#endif
-#if defined(SMP) && defined(SCHED_4BSD)
-#include <sys/sysctl.h>
-#endif
#include <machine/cpu.h>
@@ -192,106 +189,6 @@ critical_exit(void)
(long)td->td_proc->p_pid, td->td_name, td->td_critnest);
}
-/*
- * This function is called when a thread is about to be put on run queue
- * because it has been made runnable or its priority has been adjusted. It
- * determines if the new thread should be immediately preempted to. If so,
- * it switches to it and eventually returns true. If not, it returns false
- * so that the caller may place the thread on an appropriate run queue.
- */
-int
-maybe_preempt(struct thread *td)
-{
-#ifdef PREEMPTION
- struct thread *ctd;
- int cpri, pri;
-#endif
-
-#ifdef PREEMPTION
- /*
- * The new thread should not preempt the current thread if any of the
- * following conditions are true:
- *
- * - The kernel is in the throes of crashing (panicstr).
- * - The current thread has a higher (numerically lower) or
- * equivalent priority. Note that this prevents curthread from
- * trying to preempt to itself.
- * - It is too early in the boot for context switches (cold is set).
- * - The current thread has an inhibitor set or is in the process of
- * exiting. In this case, the current thread is about to switch
- * out anyways, so there's no point in preempting. If we did,
- * the current thread would not be properly resumed as well, so
- * just avoid that whole landmine.
- * - If the new thread's priority is not a realtime priority and
- * the current thread's priority is not an idle priority and
- * FULL_PREEMPTION is disabled.
- *
- * If all of these conditions are false, but the current thread is in
- * a nested critical section, then we have to defer the preemption
- * until we exit the critical section. Otherwise, switch immediately
- * to the new thread.
- */
- ctd = curthread;
- THREAD_LOCK_ASSERT(td, MA_OWNED);
- KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
- ("thread has no (or wrong) sched-private part."));
- KASSERT((td->td_inhibitors == 0),
- ("maybe_preempt: trying to run inhibited thread"));
- pri = td->td_priority;
- cpri = ctd->td_priority;
- if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
- TD_IS_INHIBITED(ctd))
- return (0);
-#ifndef FULL_PREEMPTION
- if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
- return (0);
-#endif
-
- if (ctd->td_critnest > 1) {
- CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
- ctd->td_critnest);
- ctd->td_owepreempt = 1;
- return (0);
- }
- /*
- * Thread is runnable but not yet put on system run queue.
- */
- MPASS(ctd->td_lock == td->td_lock);
- MPASS(TD_ON_RUNQ(td));
- TD_SET_RUNNING(td);
- CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
- td->td_proc->p_pid, td->td_name);
- SCHED_STAT_INC(switch_preempt);
- mi_switch(SW_INVOL|SW_PREEMPT, td);
- /*
- * td's lock pointer may have changed. We have to return with it
- * locked.
- */
- spinlock_enter();
- thread_unlock(ctd);
- thread_lock(td);
- spinlock_exit();
- return (1);
-#else
- return (0);
-#endif
-}
-
-#if 0
-#ifndef PREEMPTION
-/* XXX: There should be a non-static version of this. */
-static void
-printf_caddr_t(void *data)
-{
- printf("%s", (char *)data);
-}
-static char preempt_warning[] =
- "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
-SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
- preempt_warning);
-#endif
-#endif
-
/************************************************************************
* SYSTEM RUN QUEUE manipulations and tests *
************************************************************************/
@@ -460,16 +357,11 @@ runq_check(struct runq *rq)
return (0);
}
-#if defined(SMP) && defined(SCHED_4BSD)
-int runq_fuzz = 1;
-SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
-#endif
-
/*
* Find the highest priority process on the run queue.
*/
struct td_sched *
-runq_choose(struct runq *rq)
+runq_choose_fuzz(struct runq *rq, int fuzz)
{
struct rqhead *rqh;
struct td_sched *ts;
@@ -477,14 +369,13 @@ runq_choose(struct runq *rq)
while ((pri = runq_findbit(rq)) != -1) {
rqh = &rq->rq_queues[pri];
-#if defined(SMP) && defined(SCHED_4BSD)
/* fuzz == 1 is normal.. 0 or less are ignored */
- if (runq_fuzz > 1) {
+ if (fuzz > 1) {
/*
* In the first couple of entries, check if
* there is one for our CPU as a preference.
*/
- int count = runq_fuzz;
+ int count = fuzz;
int cpu = PCPU_GET(cpuid);
struct td_sched *ts2;
ts2 = ts = TAILQ_FIRST(rqh);
@@ -497,8 +388,30 @@ runq_choose(struct runq *rq)
ts2 = TAILQ_NEXT(ts2, ts_procq);
}
} else
-#endif
ts = TAILQ_FIRST(rqh);
+ KASSERT(ts != NULL, ("runq_choose_fuzz: no proc on busy queue"));
+ CTR3(KTR_RUNQ,
+ "runq_choose_fuzz: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
+ return (ts);
+ }
+ CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
+
+ return (NULL);
+}
+
+/*
+ * Find the highest priority process on the run queue.
+ */
+struct td_sched *
+runq_choose(struct runq *rq)
+{
+ struct rqhead *rqh;
+ struct td_sched *ts;
+ int pri;
+
+ while ((pri = runq_findbit(rq)) != -1) {
+ rqh = &rq->rq_queues[pri];
+ ts = TAILQ_FIRST(rqh);
KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
CTR3(KTR_RUNQ,
"runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 92e303ec..6acb043 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -186,6 +186,9 @@ SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
/* Enable forwarding of wakeups to all other cpus */
SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
+static int runq_fuzz = 1;
+SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
+
static int forward_wakeup_enabled = 1;
SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
&forward_wakeup_enabled, 0,
@@ -256,6 +259,91 @@ maybe_resched(struct thread *td)
}
/*
+ * This function is called when a thread is about to be put on run queue
+ * because it has been made runnable or its priority has been adjusted. It
+ * determines if the new thread should be immediately preempted to. If so,
+ * it switches to it and eventually returns true. If not, it returns false
+ * so that the caller may place the thread on an appropriate run queue.
+ */
+int
+maybe_preempt(struct thread *td)
+{
+#ifdef PREEMPTION
+ struct thread *ctd;
+ int cpri, pri;
+#endif
+
+#ifdef PREEMPTION
+ /*
+ * The new thread should not preempt the current thread if any of the
+ * following conditions are true:
+ *
+ * - The kernel is in the throes of crashing (panicstr).
+ * - The current thread has a higher (numerically lower) or
+ * equivalent priority. Note that this prevents curthread from
+ * trying to preempt to itself.
+ * - It is too early in the boot for context switches (cold is set).
+ * - The current thread has an inhibitor set or is in the process of
+ * exiting. In this case, the current thread is about to switch
+ * out anyways, so there's no point in preempting. If we did,
+ * the current thread would not be properly resumed as well, so
+ * just avoid that whole landmine.
+ * - If the new thread's priority is not a realtime priority and
+ * the current thread's priority is not an idle priority and
+ * FULL_PREEMPTION is disabled.
+ *
+ * If all of these conditions are false, but the current thread is in
+ * a nested critical section, then we have to defer the preemption
+ * until we exit the critical section. Otherwise, switch immediately
+ * to the new thread.
+ */
+ ctd = curthread;
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+ KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
+ ("thread has no (or wrong) sched-private part."));
+ KASSERT((td->td_inhibitors == 0),
+ ("maybe_preempt: trying to run inhibited thread"));
+ pri = td->td_priority;
+ cpri = ctd->td_priority;
+ if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
+ TD_IS_INHIBITED(ctd))
+ return (0);
+#ifndef FULL_PREEMPTION
+ if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
+ return (0);
+#endif
+
+ if (ctd->td_critnest > 1) {
+ CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
+ ctd->td_critnest);
+ ctd->td_owepreempt = 1;
+ return (0);
+ }
+ /*
+ * Thread is runnable but not yet put on system run queue.
+ */
+ MPASS(ctd->td_lock == td->td_lock);
+ MPASS(TD_ON_RUNQ(td));
+ TD_SET_RUNNING(td);
+ CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
+ td->td_proc->p_pid, td->td_name);
+ SCHED_STAT_INC(switch_preempt);
+ mi_switch(SW_INVOL|SW_PREEMPT, td);
+ /*
+ * td's lock pointer may have changed. We have to return with it
+ * locked.
+ */
+ spinlock_enter();
+ thread_unlock(ctd);
+ thread_lock(td);
+ spinlock_exit();
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+/*
* Constants for digital decay and forget:
* 90% of (td_estcpu) usage in 5 * loadav time
* 95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
@@ -1217,7 +1305,7 @@ sched_choose(void)
struct td_sched *kecpu;
rq = &runq;
- ts = runq_choose(&runq);
+ ts = runq_choose_fuzz(&runq, runq_fuzz);
kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
if (ts == NULL ||
diff --git a/sys/sys/runq.h b/sys/sys/runq.h
index 4b14e2d..4d46bb7 100644
--- a/sys/sys/runq.h
+++ b/sys/sys/runq.h
@@ -67,6 +67,7 @@ void runq_add_pri(struct runq *, struct td_sched *, u_char, int);
int runq_check(struct runq *);
struct td_sched *runq_choose(struct runq *);
struct td_sched *runq_choose_from(struct runq *, u_char);
+struct td_sched *runq_choose_fuzz(struct runq *, int);
void runq_init(struct runq *);
void runq_remove(struct runq *, struct td_sched *);
void runq_remove_idx(struct runq *, struct td_sched *, u_char *);
OpenPOWER on IntegriCloud