diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2016-07-28 15:46:14 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-28 16:07:41 -0700 |
commit | 50658e2e04c12d5cd628381c1b9cb69d0093a9c0 (patch) | |
tree | 9afad9b1cba152df6971cedb58d5d39d9a538c44 /mm | |
parent | 281e37265f2826ed401d84d6790226448ef3f0e8 (diff) | |
download | op-kernel-dev-50658e2e04c12d5cd628381c1b9cb69d0093a9c0.zip op-kernel-dev-50658e2e04c12d5cd628381c1b9cb69d0093a9c0.tar.gz |
mm: move page mapped accounting to the node
Reclaim makes decisions based on the number of pages that are mapped but
it's mixing node and zone information. Account NR_FILE_MAPPED and
NR_ANON_PAGES pages on the node.
Link: http://lkml.kernel.org/r/1467970510-21195-18-git-send-email-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 14 | ||||
-rw-r--r-- | mm/vmscan.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
4 files changed, 13 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 73b018d..c11935b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4312,7 +4312,7 @@ void show_free_areas(unsigned int filter) global_page_state(NR_UNSTABLE_NFS), global_page_state(NR_SLAB_RECLAIMABLE), global_page_state(NR_SLAB_UNRECLAIMABLE), - global_page_state(NR_FILE_MAPPED), + global_node_page_state(NR_FILE_MAPPED), global_page_state(NR_SHMEM), global_page_state(NR_PAGETABLE), global_page_state(NR_BOUNCE), @@ -4334,6 +4334,7 @@ void show_free_areas(unsigned int filter) " unevictable:%lukB" " isolated(anon):%lukB" " isolated(file):%lukB" + " mapped:%lukB" " all_unreclaimable? %s" "\n", pgdat->node_id, @@ -4344,6 +4345,7 @@ void show_free_areas(unsigned int filter) K(node_page_state(pgdat, NR_UNEVICTABLE)), K(node_page_state(pgdat, NR_ISOLATED_ANON)), K(node_page_state(pgdat, NR_ISOLATED_FILE)), + K(node_page_state(pgdat, NR_FILE_MAPPED)), !pgdat_reclaimable(pgdat) ? "yes" : "no"); } @@ -4368,7 +4370,6 @@ void show_free_areas(unsigned int filter) " mlocked:%lukB" " dirty:%lukB" " writeback:%lukB" - " mapped:%lukB" " shmem:%lukB" #ifdef CONFIG_TRANSPARENT_HUGEPAGE " shmem_thp: %lukB" @@ -4397,7 +4398,6 @@ void show_free_areas(unsigned int filter) K(zone_page_state(zone, NR_MLOCK)), K(zone_page_state(zone, NR_FILE_DIRTY)), K(zone_page_state(zone, NR_WRITEBACK)), - K(zone_page_state(zone, NR_FILE_MAPPED)), K(zone_page_state(zone, NR_SHMEM)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE K(zone_page_state(zone, NR_SHMEM_THPS) * HPAGE_PMD_NR), @@ -1214,7 +1214,7 @@ void do_page_add_anon_rmap(struct page *page, */ if (compound) __inc_zone_page_state(page, NR_ANON_THPS); - __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr); + __mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, nr); } if (unlikely(PageKsm(page))) return; @@ -1258,7 +1258,7 @@ void page_add_new_anon_rmap(struct page *page, /* increment count (starts at -1) */ atomic_set(&page->_mapcount, 0); } - __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr); + __mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, nr); __page_set_anon_rmap(page, vma, address, 1); } @@ -1293,7 +1293,7 @@ void page_add_file_rmap(struct page *page, bool compound) if (!atomic_inc_and_test(&page->_mapcount)) goto out; } - __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, nr); + __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); out: unlock_page_memcg(page); @@ -1329,11 +1329,11 @@ static void page_remove_file_rmap(struct page *page, bool compound) } /* - * We use the irq-unsafe __{inc|mod}_zone_page_stat because + * We use the irq-unsafe __{inc|mod}_zone_page_state because * these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption disabled. */ - __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, -nr); + __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); if (unlikely(PageMlocked(page))) @@ -1375,7 +1375,7 @@ static void page_remove_anon_compound_rmap(struct page *page) clear_page_mlock(page); if (nr) { - __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr); + __mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, -nr); deferred_split_huge_page(page); } } @@ -1404,7 +1404,7 @@ void page_remove_rmap(struct page *page, bool compound) * these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption disabled. */ - __dec_zone_page_state(page, NR_ANON_PAGES); + __dec_node_page_state(page, NR_ANON_PAGES); if (unlikely(PageMlocked(page))) clear_page_mlock(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index 9f6e673..90b4665 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3589,7 +3589,7 @@ int sysctl_min_slab_ratio = 5; static inline unsigned long zone_unmapped_file_pages(struct zone *zone) { - unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); + unsigned long file_mapped = node_page_state(zone->zone_pgdat, NR_FILE_MAPPED); unsigned long file_lru = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE); diff --git a/mm/vmstat.c b/mm/vmstat.c index d17d66e..02e7406 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -925,8 +925,6 @@ const char * const vmstat_text[] = { "nr_zone_anon_lru", "nr_zone_file_lru", "nr_mlock", - "nr_anon_pages", - "nr_mapped", "nr_file_pages", "nr_dirty", "nr_writeback", @@ -970,6 +968,8 @@ const char * const vmstat_text[] = { "workingset_refault", "workingset_activate", "workingset_nodereclaim", + "nr_anon_pages", + "nr_mapped", /* enum writeback_stat_item counters */ "nr_dirty_threshold", |