summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c478
1 files changed, 477 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c0c60c9..c9dec2a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -34,6 +34,7 @@
#include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/suspend.h>
+#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/smp.h>
@@ -1289,6 +1290,9 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
}
}
+ if (p->last_waker_cpu != this_cpu)
+ goto out_set_cpu;
+
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
goto out_set_cpu;
@@ -1359,6 +1363,8 @@ out_set_cpu:
cpu = task_cpu(p);
}
+ p->last_waker_cpu = this_cpu;
+
out_activate:
#endif /* CONFIG_SMP */
if (old_state == TASK_UNINTERRUPTIBLE) {
@@ -1440,9 +1446,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
#ifdef CONFIG_SCHEDSTATS
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
+ p->last_waker_cpu = cpu;
+#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
p->oncpu = 0;
#endif
+#endif
#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
@@ -5082,7 +5091,470 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
#define SD_NODES_PER_DOMAIN 16
+/*
+ * Self-tuning task migration cost measurement between source and target CPUs.
+ *
+ * This is done by measuring the cost of manipulating buffers of varying
+ * sizes. For a given buffer-size here are the steps that are taken:
+ *
+ * 1) the source CPU reads+dirties a shared buffer
+ * 2) the target CPU reads+dirties the same shared buffer
+ *
+ * We measure how long they take, in the following 4 scenarios:
+ *
+ * - source: CPU1, target: CPU2 | cost1
+ * - source: CPU2, target: CPU1 | cost2
+ * - source: CPU1, target: CPU1 | cost3
+ * - source: CPU2, target: CPU2 | cost4
+ *
+ * We then calculate the cost3+cost4-cost1-cost2 difference - this is
+ * the cost of migration.
+ *
+ * We then start off from a small buffer-size and iterate up to larger
+ * buffer sizes, in 5% steps - measuring each buffer-size separately, and
+ * doing a maximum search for the cost. (The maximum cost for a migration
+ * normally occurs when the working set size is around the effective cache
+ * size.)
+ */
+#define SEARCH_SCOPE 2
+#define MIN_CACHE_SIZE (64*1024U)
+#define DEFAULT_CACHE_SIZE (5*1024*1024U)
+#define ITERATIONS 2
+#define SIZE_THRESH 130
+#define COST_THRESH 130
+
+/*
+ * The migration cost is a function of 'domain distance'. Domain
+ * distance is the number of steps a CPU has to iterate down its
+ * domain tree to share a domain with the other CPU. The farther
+ * two CPUs are from each other, the larger the distance gets.
+ *
+ * Note that we use the distance only to cache measurement results,
+ * the distance value is not used numerically otherwise. When two
+ * CPUs have the same distance it is assumed that the migration
+ * cost is the same. (this is a simplification but quite practical)
+ */
+#define MAX_DOMAIN_DISTANCE 32
+
+static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
+ { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL };
+
+/*
+ * Allow override of migration cost - in units of microseconds.
+ * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost
+ * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs:
+ */
+static int __init migration_cost_setup(char *str)
+{
+ int ints[MAX_DOMAIN_DISTANCE+1], i;
+
+ str = get_options(str, ARRAY_SIZE(ints), ints);
+
+ printk("#ints: %d\n", ints[0]);
+ for (i = 1; i <= ints[0]; i++) {
+ migration_cost[i-1] = (unsigned long long)ints[i]*1000;
+ printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]);
+ }
+ return 1;
+}
+
+__setup ("migration_cost=", migration_cost_setup);
+
+/*
+ * Global multiplier (divisor) for migration-cutoff values,
+ * in percentiles. E.g. use a value of 150 to get 1.5 times
+ * longer cache-hot cutoff times.
+ *
+ * (We scale it from 100 to 128 to long long handling easier.)
+ */
+
+#define MIGRATION_FACTOR_SCALE 128
+
+static unsigned int migration_factor = MIGRATION_FACTOR_SCALE;
+
+static int __init setup_migration_factor(char *str)
+{
+ get_option(&str, &migration_factor);
+ migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100;
+ return 1;
+}
+
+__setup("migration_factor=", setup_migration_factor);
+
+/*
+ * Estimated distance of two CPUs, measured via the number of domains
+ * we have to pass for the two CPUs to be in the same span:
+ */
+static unsigned long domain_distance(int cpu1, int cpu2)
+{
+ unsigned long distance = 0;
+ struct sched_domain *sd;
+
+ for_each_domain(cpu1, sd) {
+ WARN_ON(!cpu_isset(cpu1, sd->span));
+ if (cpu_isset(cpu2, sd->span))
+ return distance;
+ distance++;
+ }
+ if (distance >= MAX_DOMAIN_DISTANCE) {
+ WARN_ON(1);
+ distance = MAX_DOMAIN_DISTANCE-1;
+ }
+
+ return distance;
+}
+
+static unsigned int migration_debug;
+
+static int __init setup_migration_debug(char *str)
+{
+ get_option(&str, &migration_debug);
+ return 1;
+}
+
+__setup("migration_debug=", setup_migration_debug);
+
+/*
+ * Maximum cache-size that the scheduler should try to measure.
+ * Architectures with larger caches should tune this up during
+ * bootup. Gets used in the domain-setup code (i.e. during SMP
+ * bootup).
+ */
+unsigned int max_cache_size;
+
+static int __init setup_max_cache_size(char *str)
+{
+ get_option(&str, &max_cache_size);
+ return 1;
+}
+
+__setup("max_cache_size=", setup_max_cache_size);
+
+/*
+ * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This
+ * is the operation that is timed, so we try to generate unpredictable
+ * cachemisses that still end up filling the L2 cache:
+ */
+static void touch_cache(void *__cache, unsigned long __size)
+{
+ unsigned long size = __size/sizeof(long), chunk1 = size/3,
+ chunk2 = 2*size/3;
+ unsigned long *cache = __cache;
+ int i;
+
+ for (i = 0; i < size/6; i += 8) {
+ switch (i % 6) {
+ case 0: cache[i]++;
+ case 1: cache[size-1-i]++;
+ case 2: cache[chunk1-i]++;
+ case 3: cache[chunk1+i]++;
+ case 4: cache[chunk2-i]++;
+ case 5: cache[chunk2+i]++;
+ }
+ }
+}
+
+/*
+ * Measure the cache-cost of one task migration. Returns in units of nsec.
+ */
+static unsigned long long measure_one(void *cache, unsigned long size,
+ int source, int target)
+{
+ cpumask_t mask, saved_mask;
+ unsigned long long t0, t1, t2, t3, cost;
+
+ saved_mask = current->cpus_allowed;
+
+ /*
+ * Flush source caches to RAM and invalidate them:
+ */
+ sched_cacheflush();
+
+ /*
+ * Migrate to the source CPU:
+ */
+ mask = cpumask_of_cpu(source);
+ set_cpus_allowed(current, mask);
+ WARN_ON(smp_processor_id() != source);
+
+ /*
+ * Dirty the working set:
+ */
+ t0 = sched_clock();
+ touch_cache(cache, size);
+ t1 = sched_clock();
+
+ /*
+ * Migrate to the target CPU, dirty the L2 cache and access
+ * the shared buffer. (which represents the working set
+ * of a migrated task.)
+ */
+ mask = cpumask_of_cpu(target);
+ set_cpus_allowed(current, mask);
+ WARN_ON(smp_processor_id() != target);
+
+ t2 = sched_clock();
+ touch_cache(cache, size);
+ t3 = sched_clock();
+
+ cost = t1-t0 + t3-t2;
+
+ if (migration_debug >= 2)
+ printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n",
+ source, target, t1-t0, t1-t0, t3-t2, cost);
+ /*
+ * Flush target caches to RAM and invalidate them:
+ */
+ sched_cacheflush();
+
+ set_cpus_allowed(current, saved_mask);
+
+ return cost;
+}
+
+/*
+ * Measure a series of task migrations and return the average
+ * result. Since this code runs early during bootup the system
+ * is 'undisturbed' and the average latency makes sense.
+ *
+ * The algorithm in essence auto-detects the relevant cache-size,
+ * so it will properly detect different cachesizes for different
+ * cache-hierarchies, depending on how the CPUs are connected.
+ *
+ * Architectures can prime the upper limit of the search range via
+ * max_cache_size, otherwise the search range defaults to 20MB...64K.
+ */
+static unsigned long long
+measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
+{
+ unsigned long long cost1, cost2;
+ int i;
+
+ /*
+ * Measure the migration cost of 'size' bytes, over an
+ * average of 10 runs:
+ *
+ * (We perturb the cache size by a small (0..4k)
+ * value to compensate size/alignment related artifacts.
+ * We also subtract the cost of the operation done on
+ * the same CPU.)
+ */
+ cost1 = 0;
+
+ /*
+ * dry run, to make sure we start off cache-cold on cpu1,
+ * and to get any vmalloc pagefaults in advance:
+ */
+ measure_one(cache, size, cpu1, cpu2);
+ for (i = 0; i < ITERATIONS; i++)
+ cost1 += measure_one(cache, size - i*1024, cpu1, cpu2);
+
+ measure_one(cache, size, cpu2, cpu1);
+ for (i = 0; i < ITERATIONS; i++)
+ cost1 += measure_one(cache, size - i*1024, cpu2, cpu1);
+
+ /*
+ * (We measure the non-migrating [cached] cost on both
+ * cpu1 and cpu2, to handle CPUs with different speeds)
+ */
+ cost2 = 0;
+
+ measure_one(cache, size, cpu1, cpu1);
+ for (i = 0; i < ITERATIONS; i++)
+ cost2 += measure_one(cache, size - i*1024, cpu1, cpu1);
+
+ measure_one(cache, size, cpu2, cpu2);
+ for (i = 0; i < ITERATIONS; i++)
+ cost2 += measure_one(cache, size - i*1024, cpu2, cpu2);
+
+ /*
+ * Get the per-iteration migration cost:
+ */
+ do_div(cost1, 2*ITERATIONS);
+ do_div(cost2, 2*ITERATIONS);
+
+ return cost1 - cost2;
+}
+
+static unsigned long long measure_migration_cost(int cpu1, int cpu2)
+{
+ unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0;
+ unsigned int max_size, size, size_found = 0;
+ long long cost = 0, prev_cost;
+ void *cache;
+
+ /*
+ * Search from max_cache_size*5 down to 64K - the real relevant
+ * cachesize has to lie somewhere inbetween.
+ */
+ if (max_cache_size) {
+ max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE);
+ size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE);
+ } else {
+ /*
+ * Since we have no estimation about the relevant
+ * search range
+ */
+ max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE;
+ size = MIN_CACHE_SIZE;
+ }
+
+ if (!cpu_online(cpu1) || !cpu_online(cpu2)) {
+ printk("cpu %d and %d not both online!\n", cpu1, cpu2);
+ return 0;
+ }
+
+ /*
+ * Allocate the working set:
+ */
+ cache = vmalloc(max_size);
+ if (!cache) {
+ printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
+ return 1000000; // return 1 msec on very small boxen
+ }
+
+ while (size <= max_size) {
+ prev_cost = cost;
+ cost = measure_cost(cpu1, cpu2, cache, size);
+
+ /*
+ * Update the max:
+ */
+ if (cost > 0) {
+ if (max_cost < cost) {
+ max_cost = cost;
+ size_found = size;
+ }
+ }
+ /*
+ * Calculate average fluctuation, we use this to prevent
+ * noise from triggering an early break out of the loop:
+ */
+ fluct = abs(cost - prev_cost);
+ avg_fluct = (avg_fluct + fluct)/2;
+
+ if (migration_debug)
+ printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n",
+ cpu1, cpu2, size,
+ (long)cost / 1000000,
+ ((long)cost / 100000) % 10,
+ (long)max_cost / 1000000,
+ ((long)max_cost / 100000) % 10,
+ domain_distance(cpu1, cpu2),
+ cost, avg_fluct);
+
+ /*
+ * If we iterated at least 20% past the previous maximum,
+ * and the cost has dropped by more than 20% already,
+ * (taking fluctuations into account) then we assume to
+ * have found the maximum and break out of the loop early:
+ */
+ if (size_found && (size*100 > size_found*SIZE_THRESH))
+ if (cost+avg_fluct <= 0 ||
+ max_cost*100 > (cost+avg_fluct)*COST_THRESH) {
+
+ if (migration_debug)
+ printk("-> found max.\n");
+ break;
+ }
+ /*
+ * Increase the cachesize in 5% steps:
+ */
+ size = size * 20 / 19;
+ }
+
+ if (migration_debug)
+ printk("[%d][%d] working set size found: %d, cost: %Ld\n",
+ cpu1, cpu2, size_found, max_cost);
+
+ vfree(cache);
+
+ /*
+ * A task is considered 'cache cold' if at least 2 times
+ * the worst-case cost of migration has passed.
+ *
+ * (this limit is only listened to if the load-balancing
+ * situation is 'nice' - if there is a large imbalance we
+ * ignore it for the sake of CPU utilization and
+ * processing fairness.)
+ */
+ return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE;
+}
+
+static void calibrate_migration_costs(const cpumask_t *cpu_map)
+{
+ int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id();
+ unsigned long j0, j1, distance, max_distance = 0;
+ struct sched_domain *sd;
+
+ j0 = jiffies;
+
+ /*
+ * First pass - calculate the cacheflush times:
+ */
+ for_each_cpu_mask(cpu1, *cpu_map) {
+ for_each_cpu_mask(cpu2, *cpu_map) {
+ if (cpu1 == cpu2)
+ continue;
+ distance = domain_distance(cpu1, cpu2);
+ max_distance = max(max_distance, distance);
+ /*
+ * No result cached yet?
+ */
+ if (migration_cost[distance] == -1LL)
+ migration_cost[distance] =
+ measure_migration_cost(cpu1, cpu2);
+ }
+ }
+ /*
+ * Second pass - update the sched domain hierarchy with
+ * the new cache-hot-time estimations:
+ */
+ for_each_cpu_mask(cpu, *cpu_map) {
+ distance = 0;
+ for_each_domain(cpu, sd) {
+ sd->cache_hot_time = migration_cost[distance];
+ distance++;
+ }
+ }
+ /*
+ * Print the matrix:
+ */
+ if (migration_debug)
+ printk("migration: max_cache_size: %d, cpu: %d MHz:\n",
+ max_cache_size,
+#ifdef CONFIG_X86
+ cpu_khz/1000
+#else
+ -1
+#endif
+ );
+ printk("migration_cost=");
+ for (distance = 0; distance <= max_distance; distance++) {
+ if (distance)
+ printk(",");
+ printk("%ld", (long)migration_cost[distance] / 1000);
+ }
+ printk("\n");
+ j1 = jiffies;
+ if (migration_debug)
+ printk("migration: %ld seconds\n", (j1-j0)/HZ);
+
+ /*
+ * Move back to the original CPU. NUMA-Q gets confused
+ * if we migrate to another quad during bootup.
+ */
+ if (raw_smp_processor_id() != orig_cpu) {
+ cpumask_t mask = cpumask_of_cpu(orig_cpu),
+ saved_mask = current->cpus_allowed;
+
+ set_cpus_allowed(current, mask);
+ set_cpus_allowed(current, saved_mask);
+ }
+}
+
#ifdef CONFIG_NUMA
+
/**
* find_next_best_node - find the next node to include in a sched_domain
* @node: node whose sched_domain we're building
@@ -5448,6 +5920,10 @@ next_sg:
#endif
cpu_attach_domain(sd, i);
}
+ /*
+ * Tune cache-hot values:
+ */
+ calibrate_migration_costs(cpu_map);
}
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
OpenPOWER on IntegriCloud