diff options
author | Cody P Schafer <cody@linux.vnet.ibm.com> | 2013-07-03 15:01:34 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 16:07:27 -0700 |
commit | 22a7f12b1606327f0e11fcdf9043ae00bf9917df (patch) | |
tree | d1e34a4bafc4fd5c6bafa3c1db6de94fe2ec9da8 /mm | |
parent | 0a647f3811d6af56405a819341ceac23e31d4572 (diff) | |
download | op-kernel-dev-22a7f12b1606327f0e11fcdf9043ae00bf9917df.zip op-kernel-dev-22a7f12b1606327f0e11fcdf9043ae00bf9917df.tar.gz |
mm/page_alloc: when handling percpu_pagelist_fraction, don't unneedly recalulate high
Simply moves calculation of the new 'high' value outside the
for_each_possible_cpu() loop, as it does not depend on the cpu.
Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com>
Cc: Gilad Ben-Yossef <gilad@benyossef.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8125263..386de0f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5575,7 +5575,6 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist * can have before it gets flushed back to buddy allocator. */ - int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { @@ -5589,12 +5588,11 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, mutex_lock(&pcp_batch_high_lock); for_each_populated_zone(zone) { - for_each_possible_cpu(cpu) { - unsigned long high; - high = zone->managed_pages / percpu_pagelist_fraction; + unsigned long high; + high = zone->managed_pages / percpu_pagelist_fraction; + for_each_possible_cpu(cpu) setup_pagelist_highmark( - per_cpu_ptr(zone->pageset, cpu), high); - } + per_cpu_ptr(zone->pageset, cpu), high); } mutex_unlock(&pcp_batch_high_lock); return 0; |