summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2012-08-10 19:02:49 +0000
committermav <mav@FreeBSD.org>2012-08-10 19:02:49 +0000
commit5b837de0b30c64efe81d5ee95949ecc3ce5978c9 (patch)
tree1f818147214783cba5a546aa9c20fed7152836b4 /sys/kern/sched_ule.c
parent50fe3717a6a062e999254b82b00c757ffca8f8ad (diff)
downloadFreeBSD-src-5b837de0b30c64efe81d5ee95949ecc3ce5978c9.zip
FreeBSD-src-5b837de0b30c64efe81d5ee95949ecc3ce5978c9.tar.gz
Some minor tunings/cleanups inspired by bde@ after previous commits:
- remove extra dynamic variable initializations; - restore (4BSD) and implement (ULE) hogticks variable setting; - make sched_rr_interval() more tolerant to options; - restore (4BSD) and implement (ULE) kern.sched.quantum sysctl, a more user-friendly wrapper for sched_slice; - tune some sysctl descriptions; - make some style fixes.
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c70
1 files changed, 40 insertions, 30 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 719592a..5d5e908 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -201,9 +201,9 @@ static struct td_sched td_sched0;
* preempt_thresh: Priority threshold for preemption and remote IPIs.
*/
static int sched_interact = SCHED_INTERACT_THRESH;
-static int realstathz;
-static int tickincr;
-static int sched_slice = 1;
+static int realstathz = 127;
+static int tickincr = 8 << SCHED_TICK_SHIFT;;
+static int sched_slice = 12;
#ifdef PREEMPTION
#ifdef FULL_PREEMPTION
static int preempt_thresh = PRI_MAX_IDLE;
@@ -1363,13 +1363,6 @@ sched_setup(void *dummy)
#else
tdq_setup(tdq);
#endif
- /*
- * To avoid divide-by-zero, we set realstathz a dummy value
- * in case which sched_clock() called before sched_initticks().
- */
- realstathz = hz;
- sched_slice = (realstathz/10); /* ~100ms */
- tickincr = 1 << SCHED_TICK_SHIFT;
/* Add thread0's load since it's running. */
TDQ_LOCK(tdq);
@@ -1380,7 +1373,7 @@ sched_setup(void *dummy)
}
/*
- * This routine determines the tickincr after stathz and hz are setup.
+ * This routine determines time constants after stathz and hz are setup.
*/
/* ARGSUSED */
static void
@@ -1389,7 +1382,8 @@ sched_initticks(void *dummy)
int incr;
realstathz = stathz ? stathz : hz;
- sched_slice = (realstathz/10); /* ~100ms */
+ sched_slice = realstathz / 10; /* ~100ms */
+ hogticks = max(1, 2 * hz * sched_slice / realstathz);
/*
* tickincr is shifted out by 10 to avoid rounding errors due to
@@ -1606,8 +1600,8 @@ int
sched_rr_interval(void)
{
- /* Convert sched_slice to hz */
- return (hz/(realstathz/sched_slice));
+ /* Convert sched_slice from stathz to hz. */
+ return (max(1, (sched_slice * hz + realstathz / 2) / realstathz));
}
/*
@@ -2231,16 +2225,15 @@ sched_clock(struct thread *td)
sched_interact_update(td);
sched_priority(td);
}
+
/*
- * We used up one time slice.
- */
- if (--ts->ts_slice > 0)
- return;
- /*
- * We're out of time, force a requeue at userret().
+ * Force a context switch if the current thread has used up a full
+ * time slice (default is 100ms).
*/
- ts->ts_slice = sched_slice;
- td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
+ if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
+ ts->ts_slice = sched_slice;
+ td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
+ }
}
/*
@@ -2795,11 +2788,31 @@ sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS)
#endif
+static int
+sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
+{
+ int error, new_val, period;
+
+ period = 1000000 / realstathz;
+ new_val = period * sched_slice;
+ error = sysctl_handle_int(oidp, &new_val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (new_val <= 0)
+ return (EINVAL);
+ sched_slice = max(1, (new_val + period / 2) / period);
+ hogticks = max(1, 2 * hz * sched_slice / realstathz);
+ return (0);
+}
+
SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
"Scheduler name");
+SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
+ NULL, 0, sysctl_kern_quantum, "I",
+ "Length of time granted to timeshare threads in microseconds");
SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
- "Slice size for timeshare threads");
+ "Length of time granted to timeshare threads in stathz ticks");
SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
"Interactivity score threshold");
SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
@@ -2807,9 +2820,9 @@ SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost,
0,"Controls whether static kernel priorities are assigned to sleeping threads.");
SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins,
- 0,"Number of times idle will spin waiting for new work.");
+ 0,"Number of times idle thread will spin waiting for new work.");
SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW, &sched_idlespinthresh,
- 0,"Threshold before we will permit idle spinning.");
+ 0,"Threshold before we will permit idle thread spinning.");
#ifdef SMP
SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
"Number of hz ticks to keep thread affinity for");
@@ -2817,17 +2830,14 @@ SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
"Enables the long-term load balancer");
SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
&balance_interval, 0,
- "Average frequency in stathz ticks to run the long-term balancer");
+ "Average period in stathz ticks to run the long-term balancer");
SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
"Attempts to steal work from other cores before idling");
SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
"Minimum load on remote cpu before we'll steal");
-
-/* Retrieve SMP topology */
SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING |
- CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A",
+ CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A",
"XML dump of detected CPU topology");
-
#endif
/* ps compat. All cpu percentages from ULE are weighted. */
OpenPOWER on IntegriCloud