diff options
author | jeff <jeff@FreeBSD.org> | 2008-03-02 07:58:42 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2008-03-02 07:58:42 +0000 |
commit | ad2a31513f336da73b206794695829f59113b3a7 (patch) | |
tree | a49a50ce8694ae981e503de20b9650bb0f5a1776 /sys/kern/sched_ule.c | |
parent | 0a56287482dbdc42be0a66082c61cf783dd56d5f (diff) | |
download | FreeBSD-src-ad2a31513f336da73b206794695829f59113b3a7.zip FreeBSD-src-ad2a31513f336da73b206794695829f59113b3a7.tar.gz |
- Remove the old smp cpu topology specification with a new, more flexible
tree structure that encodes the level of cache sharing and other
properties.
- Provide several convenience functions for creating one and two level
cpu trees as well as a default flat topology. The system now always
has some topology.
- On i386 and amd64 create a seperate level in the hierarchy for HTT
and multi-core cpus. This will allow the scheduler to intelligently
load balance non-uniform cores. Presently we don't detect what level
of the cache hierarchy is shared at each level in the topology.
- Add a mechanism for testing common topologies that have more information
than the MD code is able to provide via the kern.smp.topology tunable.
This should be considered a debugging tool only and not a stable api.
Sponsored by: Nokia
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r-- | sys/kern/sched_ule.c | 65 |
1 files changed, 1 insertions, 64 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 4445b05..d17cd58 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -243,7 +243,6 @@ static int tryself = 1; static int steal_htt = 1; static int steal_idle = 1; static int steal_thresh = 2; -static int topology = 0; /* * One thread queue per processor. @@ -1212,43 +1211,6 @@ tdg_add(struct tdq_group *tdg, struct tdq *tdq) } static void -sched_setup_topology(void) -{ - struct tdq_group *tdg; - struct cpu_group *cg; - int balance_groups; - struct tdq *tdq; - int i; - int j; - - topology = 1; - balance_groups = 0; - for (i = 0; i < smp_topology->ct_count; i++) { - cg = &smp_topology->ct_group[i]; - tdg = &tdq_groups[i]; - /* - * Initialize the group. - */ - tdg_setup(tdg); - /* - * Find all of the group members and add them. - */ - for (j = 0; j < MAXCPU; j++) { - if ((cg->cg_mask & (1 << j)) != 0) { - tdq = TDQ_CPU(j); - tdq_setup(tdq); - tdg_add(tdg, tdq); - } - } - if (tdg->tdg_cpus > 1) - balance_groups = 1; - } - tdg_maxid = smp_topology->ct_count - 1; - if (balance_groups) - sched_balance_groups(); -} - -static void sched_setup_smp(void) { struct tdq_group *tdg; @@ -1271,25 +1233,6 @@ sched_setup_smp(void) } tdg_maxid = cpus - 1; } - -/* - * Fake a topology with one group containing all CPUs. - */ -static void -sched_fake_topo(void) -{ -#ifdef SCHED_FAKE_TOPOLOGY - static struct cpu_top top; - static struct cpu_group group; - - top.ct_count = 1; - top.ct_group = &group; - group.cg_mask = all_cpus; - group.cg_count = mp_ncpus; - group.cg_children = 0; - smp_topology = ⊤ -#endif -} #endif /* @@ -1303,15 +1246,11 @@ sched_setup(void *dummy) tdq = TDQ_SELF(); #ifdef SMP - sched_fake_topo(); /* * Setup tdqs based on a topology configuration or vanilla SMP based * on mp_maxid. */ - if (smp_topology == NULL) - sched_setup_smp(); - else - sched_setup_topology(); + sched_setup_smp(); balance_tdq = tdq; sched_balance(); #else @@ -2692,8 +2631,6 @@ SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, "Attempts to steal work from other cores before idling"); SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, "Minimum load on remote cpu before we'll steal"); -SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0, - "True when a topology has been specified by the MD code."); #endif /* ps compat. All cpu percentages from ULE are weighted. */ |