diff options
author | Rik van Riel <riel@redhat.com> | 2013-10-07 11:29:38 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-09 14:48:20 +0200 |
commit | 1e3646ffc64b232cb14a5ef01d7b98997c1b73f9 (patch) | |
tree | 6bb7610078609d8d84e1b7d535d958de41c8d588 /kernel/sched/fair.c | |
parent | 930aa174fcc8b0efaad102fd80f677b92f35eaa2 (diff) | |
download | op-kernel-dev-1e3646ffc64b232cb14a5ef01d7b98997c1b73f9.zip op-kernel-dev-1e3646ffc64b232cb14a5ef01d7b98997c1b73f9.tar.gz |
mm: numa: Revert temporarily disabling of NUMA migration
With the scan rate code working (at least for multi-instance specjbb),
the large hammer that is "sched: Do not migrate memory immediately after
switching node" can be replaced with something smarter. Revert temporarily
migration disabling and all traces of numa_migrate_seq.
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1381141781-10992-61-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 25 |
1 files changed, 1 insertions, 24 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index da6fa22..8454c38 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1261,16 +1261,8 @@ static void numa_migrate_preferred(struct task_struct *p) { /* Success if task is already running on preferred CPU */ p->numa_migrate_retry = 0; - if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) { - /* - * If migration is temporarily disabled due to a task migration - * then re-enable it now as the task is running on its - * preferred node and memory should migrate locally - */ - if (!p->numa_migrate_seq) - p->numa_migrate_seq++; + if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) return; - } /* This task has no NUMA fault statistics yet */ if (unlikely(p->numa_preferred_nid == -1)) @@ -1367,7 +1359,6 @@ static void task_numa_placement(struct task_struct *p) if (p->numa_scan_seq == seq) return; p->numa_scan_seq = seq; - p->numa_migrate_seq++; p->numa_scan_period_max = task_scan_max(p); /* If the task is part of a group prevent parallel updates to group stats */ @@ -4730,20 +4721,6 @@ static void move_task(struct task_struct *p, struct lb_env *env) set_task_cpu(p, env->dst_cpu); activate_task(env->dst_rq, p, 0); check_preempt_curr(env->dst_rq, p, 0); -#ifdef CONFIG_NUMA_BALANCING - if (p->numa_preferred_nid != -1) { - int src_nid = cpu_to_node(env->src_cpu); - int dst_nid = cpu_to_node(env->dst_cpu); - - /* - * If the load balancer has moved the task then limit - * migrations from taking place in the short term in - * case this is a short-lived migration. - */ - if (src_nid != dst_nid && dst_nid != p->numa_preferred_nid) - p->numa_migrate_seq = 0; - } -#endif } /* |