diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2016-03-15 14:57:10 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 16:55:16 -0700 |
commit | 162453bfbdf4c0f58cb3058aad9ad8cda1044cda (patch) | |
tree | 43be985615141172792acb585d80bf6651cdc399 /mm/workingset.c | |
parent | 689c94f03ae251181725fe853b4ffc870f05c7fe (diff) | |
download | op-kernel-dev-162453bfbdf4c0f58cb3058aad9ad8cda1044cda.zip op-kernel-dev-162453bfbdf4c0f58cb3058aad9ad8cda1044cda.tar.gz |
mm: workingset: separate shadow unpacking and refault calculation
Per-cgroup thrash detection will need to derive a live memcg from the
eviction cookie, and doing that inside unpack_shadow() will get nasty
with the reference handling spread over two functions.
In preparation, make unpack_shadow() clearly about extracting static
data, and let workingset_refault() do all the higher-level handling.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/workingset.c')
-rw-r--r-- | mm/workingset.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/mm/workingset.c b/mm/workingset.c index 3ef92f6..f874b2c 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -165,13 +165,10 @@ static void *pack_shadow(unsigned long eviction, struct zone *zone) return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); } -static void unpack_shadow(void *shadow, - struct zone **zone, - unsigned long *distance) +static void unpack_shadow(void *shadow, struct zone **zonep, + unsigned long *evictionp) { unsigned long entry = (unsigned long)shadow; - unsigned long eviction; - unsigned long refault; int zid, nid; entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; @@ -179,29 +176,9 @@ static void unpack_shadow(void *shadow, entry >>= ZONES_SHIFT; nid = entry & ((1UL << NODES_SHIFT) - 1); entry >>= NODES_SHIFT; - eviction = entry; - - *zone = NODE_DATA(nid)->node_zones + zid; - refault = atomic_long_read(&(*zone)->inactive_age); - - /* - * The unsigned subtraction here gives an accurate distance - * across inactive_age overflows in most cases. - * - * There is a special case: usually, shadow entries have a - * short lifetime and are either refaulted or reclaimed along - * with the inode before they get too old. But it is not - * impossible for the inactive_age to lap a shadow entry in - * the field, which can then can result in a false small - * refault distance, leading to a false activation should this - * old entry actually refault again. However, earlier kernels - * used to deactivate unconditionally with *every* reclaim - * invocation for the longest time, so the occasional - * inappropriate activation leading to pressure on the active - * list is not a problem. - */ - *distance = (refault - eviction) & EVICTION_MASK; + *zonep = NODE_DATA(nid)->node_zones + zid; + *evictionp = entry; } /** @@ -233,9 +210,32 @@ void *workingset_eviction(struct address_space *mapping, struct page *page) bool workingset_refault(void *shadow) { unsigned long refault_distance; + unsigned long eviction; + unsigned long refault; struct zone *zone; - unpack_shadow(shadow, &zone, &refault_distance); + unpack_shadow(shadow, &zone, &eviction); + + refault = atomic_long_read(&zone->inactive_age); + + /* + * The unsigned subtraction here gives an accurate distance + * across inactive_age overflows in most cases. + * + * There is a special case: usually, shadow entries have a + * short lifetime and are either refaulted or reclaimed along + * with the inode before they get too old. But it is not + * impossible for the inactive_age to lap a shadow entry in + * the field, which can then can result in a false small + * refault distance, leading to a false activation should this + * old entry actually refault again. However, earlier kernels + * used to deactivate unconditionally with *every* reclaim + * invocation for the longest time, so the occasional + * inappropriate activation leading to pressure on the active + * list is not a problem. + */ + refault_distance = (refault - eviction) & EVICTION_MASK; + inc_zone_state(zone, WORKINGSET_REFAULT); if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) { |