summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 15:46:59 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 16:07:41 -0700
commit7cc30fcfd2a894589d832a192cac3dc5cd302bb8 (patch)
tree5f32cf9ee52f5bd6e003853507fdb031b1d1124c /mm/vmscan.c
parent16709d1de1954475356a65848f80a01581b4903c (diff)
downloadop-kernel-dev-7cc30fcfd2a894589d832a192cac3dc5cd302bb8.zip
op-kernel-dev-7cc30fcfd2a894589d832a192cac3dc5cd302bb8.tar.gz
mm: vmstat: account per-zone stalls and pages skipped during reclaim
The vmstat allocstall was fairly useful in the general sense but node-based LRUs change that. It's important to know if a stall was for an address-limited allocation request as this will require skipping pages from other zones. This patch adds pgstall_* counters to replace allocstall. The sum of the counters will equal the old allocstall so it can be trivially recalculated. A high number of address-limited allocation requests may result in a lot of useless LRU scanning for suitable pages. As address-limited allocations require pages to be skipped, it's important to know how much useless LRU scanning took place so this patch adds pgskip* counters. This yields the following model 1. The number of address-space limited stalls can be accounted for (pgstall) 2. The amount of useless work required to reclaim the data is accounted (pgskip) 3. The total number of scans is available from pgscan_kswapd and pgscan_direct so from that the ratio of useful to useless scans can be calculated. [mgorman@techsingularity.net: s/pgstall/allocstall/] Link: http://lkml.kernel.org/r/1468404004-5085-3-git-send-email-mgorman@techsingularity.netLink: http://lkml.kernel.org/r/1467970510-21195-33-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5eaf83b..d5ee6d9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1394,6 +1394,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct list_head *src = &lruvec->lists[lru];
unsigned long nr_taken = 0;
unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
+ unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long scan, nr_pages;
LIST_HEAD(pages_skipped);
@@ -1408,6 +1409,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
if (page_zonenum(page) > sc->reclaim_idx) {
list_move(&page->lru, &pages_skipped);
+ nr_skipped[page_zonenum(page)]++;
continue;
}
@@ -1436,8 +1438,17 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* scanning would soon rescan the same pages to skip and put the
* system at risk of premature OOM.
*/
- if (!list_empty(&pages_skipped))
+ if (!list_empty(&pages_skipped)) {
+ int zid;
+
list_splice(&pages_skipped, src);
+ for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+ if (!nr_skipped[zid])
+ continue;
+
+ __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
+ }
+ }
*nr_scanned = scan;
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
nr_taken, mode, is_file_lru(lru));
@@ -2680,7 +2691,7 @@ retry:
delayacct_freepages_start();
if (global_reclaim(sc))
- count_vm_event(ALLOCSTALL);
+ __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
do {
vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
OpenPOWER on IntegriCloud