diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 10 | ||||
-rw-r--r-- | mm/vmscan.c | 19 |
2 files changed, 14 insertions, 15 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 35e008e..75198da 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -737,7 +737,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, } unsigned long -mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list lru) +mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { struct mem_cgroup_per_zone *mz; @@ -1229,8 +1229,8 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) unsigned long active; unsigned long gb; - inactive = mem_cgroup_get_lruvec_size(lruvec, LRU_INACTIVE_ANON); - active = mem_cgroup_get_lruvec_size(lruvec, LRU_ACTIVE_ANON); + inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); + active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); gb = (inactive + active) >> (30 - PAGE_SHIFT); if (gb) @@ -1246,8 +1246,8 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec) unsigned long active; unsigned long inactive; - inactive = mem_cgroup_get_lruvec_size(lruvec, LRU_INACTIVE_FILE); - active = mem_cgroup_get_lruvec_size(lruvec, LRU_ACTIVE_FILE); + inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE); + active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE); return (active > inactive); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 4c5453f..8b941f3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -145,10 +145,10 @@ static bool global_reclaim(struct scan_control *sc) } #endif -static unsigned long get_lruvec_size(struct lruvec *lruvec, enum lru_list lru) +static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) { if (!mem_cgroup_disabled()) - return mem_cgroup_get_lruvec_size(lruvec, lru); + return mem_cgroup_get_lru_size(lruvec, lru); return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); } @@ -1608,10 +1608,10 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, goto out; } - anon = get_lruvec_size(lruvec, LRU_ACTIVE_ANON) + - get_lruvec_size(lruvec, LRU_INACTIVE_ANON); - file = get_lruvec_size(lruvec, LRU_ACTIVE_FILE) + - get_lruvec_size(lruvec, LRU_INACTIVE_FILE); + anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) + + get_lru_size(lruvec, LRU_INACTIVE_ANON); + file = get_lru_size(lruvec, LRU_ACTIVE_FILE) + + get_lru_size(lruvec, LRU_INACTIVE_FILE); if (global_reclaim(sc)) { free = zone_page_state(zone, NR_FREE_PAGES); @@ -1674,7 +1674,7 @@ out: int file = is_file_lru(lru); unsigned long scan; - scan = get_lruvec_size(lruvec, lru); + scan = get_lru_size(lruvec, lru); if (sc->priority || noswap || !vmscan_swappiness(sc)) { scan >>= sc->priority; if (!scan && force_scan) @@ -1743,10 +1743,9 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec, * inactive lists are large enough, continue reclaiming */ pages_for_compaction = (2UL << sc->order); - inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE); + inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE); if (nr_swap_pages > 0) - inactive_lru_pages += get_lruvec_size(lruvec, - LRU_INACTIVE_ANON); + inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON); if (sc->nr_reclaimed < pages_for_compaction && inactive_lru_pages > pages_for_compaction) return true; |