diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-30 01:55:34 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-30 11:25:34 -0700 |
commit | 65ba55f500a37272985d071c9bbb35256a2f7c14 (patch) | |
tree | e7735326ef2d2dca9d00a6c5ae47e9eb03c7834f | |
parent | 2244b95a7bcf8d24196f8a3a44187ba5dfff754c (diff) | |
download | op-kernel-dev-65ba55f500a37272985d071c9bbb35256a2f7c14.zip op-kernel-dev-65ba55f500a37272985d071c9bbb35256a2f7c14.tar.gz |
[PATCH] zoned vm counters: convert nr_mapped to per zone counter
nr_mapped is important because it allows a determination of how many pages of
a zone are not mapped, which would allow a more efficient means of determining
when we need to reclaim memory in a zone.
We take the nr_mapped field out of the page state structure and define a new
per zone counter named NR_FILE_MAPPED (the anonymous pages will be split off
from NR_MAPPED in the next patch).
We replace the use of nr_mapped in various kernel locations. This avoids the
looping over all processors in try_to_free_pages(), writeback, reclaim (swap +
zone reclaim).
[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/i386/mm/pgtable.c | 2 | ||||
-rw-r--r-- | drivers/base/node.c | 4 | ||||
-rw-r--r-- | fs/proc/proc_misc.c | 2 | ||||
-rw-r--r-- | include/linux/mmzone.h | 3 | ||||
-rw-r--r-- | include/linux/vmstat.h | 2 | ||||
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 8 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
10 files changed, 16 insertions, 17 deletions
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index 2889567..f85f1a4 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c @@ -61,7 +61,7 @@ void show_mem(void) get_page_state(&ps); printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty); printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback); - printk(KERN_INFO "%lu pages mapped\n", ps.nr_mapped); + printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); printk(KERN_INFO "%lu pages slab\n", ps.nr_slab); printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages); } diff --git a/drivers/base/node.c b/drivers/base/node.c index eae2bdc..8b12323 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -54,8 +54,6 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) ps.nr_dirty = 0; if ((long)ps.nr_writeback < 0) ps.nr_writeback = 0; - if ((long)ps.nr_mapped < 0) - ps.nr_mapped = 0; if ((long)ps.nr_slab < 0) ps.nr_slab = 0; @@ -84,7 +82,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) nid, K(i.freeram - i.freehigh), nid, K(ps.nr_dirty), nid, K(ps.nr_writeback), - nid, K(ps.nr_mapped), + nid, K(node_page_state(nid, NR_FILE_MAPPED)), nid, K(ps.nr_slab)); n += hugetlb_report_node_meminfo(nid, buf + n); return n; diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 5c10ea1..bc7d9ab 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -190,7 +190,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, K(i.freeswap), K(ps.nr_dirty), K(ps.nr_writeback), - K(ps.nr_mapped), + K(global_page_state(NR_FILE_MAPPED)), K(ps.nr_slab), K(allowed), K(committed), diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 543f9e4..eb42c12 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -47,6 +47,9 @@ struct zone_padding { #endif enum zone_stat_item { + NR_FILE_MAPPED, /* mapped into pagetables. + only modified from process context */ + NR_VM_ZONE_STAT_ITEMS }; struct per_cpu_pages { diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 3fd5c11..8ab8229 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -26,8 +26,6 @@ struct page_state { unsigned long nr_writeback; /* Pages under writeback */ unsigned long nr_unstable; /* NFS unstable pages */ unsigned long nr_page_table_pages;/* Pages used for pagetables */ - unsigned long nr_mapped; /* mapped into pagetables. - * only modified from process context */ unsigned long nr_slab; /* In slab */ #define GET_PAGE_STATE_LAST nr_slab diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4ec7026..60c7244 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -111,7 +111,7 @@ static void get_writeback_state(struct writeback_state *wbs) { wbs->nr_dirty = read_page_state(nr_dirty); wbs->nr_unstable = read_page_state(nr_unstable); - wbs->nr_mapped = read_page_state(nr_mapped); + wbs->nr_mapped = global_page_state(NR_FILE_MAPPED); wbs->nr_writeback = read_page_state(nr_writeback); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3a877fe..04dd2b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1319,7 +1319,7 @@ void show_free_areas(void) ps.nr_unstable, nr_free_pages(), ps.nr_slab, - ps.nr_mapped, + global_page_state(NR_FILE_MAPPED), ps.nr_page_table_pages); for_each_zone(zone) { @@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page, * nr_mapped state can be updated without turning off * interrupts because it is not modified via interrupt. */ - __inc_page_state(nr_mapped); + __inc_zone_page_state(page, NR_FILE_MAPPED); } /** @@ -499,7 +499,7 @@ void page_add_new_anon_rmap(struct page *page, void page_add_file_rmap(struct page *page) { if (atomic_inc_and_test(&page->_mapcount)) - __inc_page_state(nr_mapped); + __inc_zone_page_state(page, NR_FILE_MAPPED); } /** @@ -531,7 +531,7 @@ void page_remove_rmap(struct page *page) */ if (page_test_and_clear_dirty(page)) set_page_dirty(page); - __dec_page_state(nr_mapped); + __dec_zone_page_state(page, NR_FILE_MAPPED); } } diff --git a/mm/vmscan.c b/mm/vmscan.c index eeacb0d..d2caf74 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -990,7 +990,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) } for (priority = DEF_PRIORITY; priority >= 0; priority--) { - sc.nr_mapped = read_page_state(nr_mapped); + sc.nr_mapped = global_page_state(NR_FILE_MAPPED); sc.nr_scanned = 0; if (!priority) disable_swap_token(); @@ -1075,7 +1075,7 @@ loop_again: total_scanned = 0; nr_reclaimed = 0; sc.may_writepage = !laptop_mode; - sc.nr_mapped = read_page_state(nr_mapped); + sc.nr_mapped = global_page_state(NR_FILE_MAPPED); inc_page_state(pageoutrun); @@ -1407,7 +1407,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) for (prio = DEF_PRIORITY; prio >= 0; prio--) { unsigned long nr_to_scan = nr_pages - ret; - sc.nr_mapped = read_page_state(nr_mapped); + sc.nr_mapped = global_page_state(NR_FILE_MAPPED); sc.nr_scanned = 0; ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); @@ -1548,7 +1548,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) struct scan_control sc = { .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), - .nr_mapped = read_page_state(nr_mapped), + .nr_mapped = global_page_state(NR_FILE_MAPPED), .swap_cluster_max = max_t(unsigned long, nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = gfp_mask, diff --git a/mm/vmstat.c b/mm/vmstat.c index 210f9bb..4800091 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -401,13 +401,13 @@ struct seq_operations fragmentation_op = { static char *vmstat_text[] = { /* Zoned VM counters */ + "nr_mapped", /* Page state */ "nr_dirty", "nr_writeback", "nr_unstable", "nr_page_table_pages", - "nr_mapped", "nr_slab", "pgpgin", |