summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMing Ling <ming.ling@spreadtrum.com>2016-12-12 16:42:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 18:55:07 -0800
commit6afcf8ef0ca0a69d014f8edb613d94821f0ae700 (patch)
tree0efd242ae2444b808e4f81aed28f4688bedeb133 /mm/compaction.c
parent6d8409580bee356ce418dcb94260b24dda639934 (diff)
downloadop-kernel-dev-6afcf8ef0ca0a69d014f8edb613d94821f0ae700.zip
op-kernel-dev-6afcf8ef0ca0a69d014f8edb613d94821f0ae700.tar.gz
mm, compaction: fix NR_ISOLATED_* stats for pfn based migration
Since commit bda807d44454 ("mm: migrate: support non-lru movable page migration") isolate_migratepages_block) can isolate !PageLRU pages which would acct_isolated account as NR_ISOLATED_*. Accounting these non-lru pages NR_ISOLATED_{ANON,FILE} doesn't make any sense and it can misguide heuristics based on those counters such as pgdat_reclaimable_pages resp. too_many_isolated which would lead to unexpected stalls during the direct reclaim without any good reason. Note that __alloc_contig_migrate_range can isolate a lot of pages at once. On mobile devices such as 512M ram android Phone, it may use a big zram swap. In some cases zram(zsmalloc) uses too many non-lru but migratedable pages, such as: MemTotal: 468148 kB Normal free:5620kB Free swap:4736kB Total swap:409596kB ZRAM: 164616kB(zsmalloc non-lru pages) active_anon:60700kB inactive_anon:60744kB active_file:34420kB inactive_file:37532kB Fix this by only accounting lru pages to NR_ISOLATED_* in isolate_migratepages_block right after they were isolated and we still know they were on LRU. Drop acct_isolated because it is called after the fact and we've lost that information. Batching per-cpu counter doesn't make much improvement anyway. Also make sure that we uncharge only LRU pages when putting them back on the LRU in putback_movable_pages resp. when unmap_and_move migrates the page. [mhocko@suse.com: replace acct_isolated() with direct counting] Fixes: bda807d44454 ("mm: migrate: support non-lru movable page migration") Link: http://lkml.kernel.org/r/20161019080240.9682-1-mhocko@kernel.org Signed-off-by: Ming Ling <ming.ling@spreadtrum.com> Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c25
1 files changed, 3 insertions, 22 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 0409a4a..70e6bec 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -634,22 +634,6 @@ isolate_freepages_range(struct compact_control *cc,
return pfn;
}
-/* Update the number of anon and file isolated pages in the zone */
-static void acct_isolated(struct zone *zone, struct compact_control *cc)
-{
- struct page *page;
- unsigned int count[2] = { 0, };
-
- if (list_empty(&cc->migratepages))
- return;
-
- list_for_each_entry(page, &cc->migratepages, lru)
- count[!!page_is_file_cache(page)]++;
-
- mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, count[0]);
- mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, count[1]);
-}
-
/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct zone *zone)
{
@@ -866,6 +850,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* Successfully isolated */
del_page_from_lru_list(page, lruvec, page_lru(page));
+ inc_node_page_state(page,
+ NR_ISOLATED_ANON + page_is_file_cache(page));
isolate_success:
list_add(&page->lru, &cc->migratepages);
@@ -902,7 +888,6 @@ isolate_fail:
spin_unlock_irqrestore(zone_lru_lock(zone), flags);
locked = false;
}
- acct_isolated(zone, cc);
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
cc->last_migrated_pfn = 0;
@@ -988,7 +973,6 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
break;
}
- acct_isolated(cc->zone, cc);
return pfn;
}
@@ -1258,10 +1242,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
low_pfn = isolate_migratepages_block(cc, low_pfn,
block_end_pfn, isolate_mode);
- if (!low_pfn || cc->contended) {
- acct_isolated(zone, cc);
+ if (!low_pfn || cc->contended)
return ISOLATE_ABORT;
- }
/*
* Either we isolated something and proceed with migration. Or
@@ -1271,7 +1253,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
break;
}
- acct_isolated(zone, cc);
/* Record where migration scanner will be restarted. */
cc->migrate_pfn = low_pfn;
OpenPOWER on IntegriCloud