summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 15:46:53 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 16:07:41 -0700
commit3b8c0be43cb844b3cd26fac00e7663a1201176fd (patch)
tree398947f7f2b165b9a891f176ea19bec84f269224 /mm/page_alloc.c
parente6cbd7f2efb433d717af72aa8510a9db6f7a7e05 (diff)
downloadop-kernel-dev-3b8c0be43cb844b3cd26fac00e7663a1201176fd.zip
op-kernel-dev-3b8c0be43cb844b3cd26fac00e7663a1201176fd.tar.gz
mm: page_alloc: cache the last node whose dirty limit is reached
If a page is about to be dirtied then the page allocator attempts to limit the total number of dirty pages that exists in any given zone. The call to node_dirty_ok is expensive so this patch records if the last pgdat examined hit the dirty limits. In some cases, this reduces the number of calls to node_dirty_ok(). Link: http://lkml.kernel.org/r/1467970510-21195-31-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1dd0f1f..7427e0e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2848,6 +2848,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
{
struct zoneref *z = ac->preferred_zoneref;
struct zone *zone;
+ struct pglist_data *last_pgdat_dirty_limit = NULL;
+
/*
* Scan zonelist, looking for a zone with enough free.
* See also __cpuset_node_allowed() comment in kernel/cpuset.c.
@@ -2880,8 +2882,15 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
* will require awareness of nodes in the
* dirty-throttling and the flusher threads.
*/
- if (ac->spread_dirty_pages && !node_dirty_ok(zone->zone_pgdat))
- continue;
+ if (ac->spread_dirty_pages) {
+ if (last_pgdat_dirty_limit == zone->zone_pgdat)
+ continue;
+
+ if (!node_dirty_ok(zone->zone_pgdat)) {
+ last_pgdat_dirty_limit = zone->zone_pgdat;
+ continue;
+ }
+ }
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
if (!zone_watermark_fast(zone, order, mark,
OpenPOWER on IntegriCloud