summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-10-20 15:50:30 +0400
committerTejun Heo <tj@kernel.org>2014-10-27 11:15:27 -0400
commit344736f29b359790facd0b7a521e367f1715c11c (patch)
treec32487de22e7640a828f28819b5707790ede5105 /mm
parent8447a0fee974433f7e0035fd30e1edecf00e014f (diff)
downloadop-kernel-dev-344736f29b359790facd0b7a521e367f1715c11c.zip
op-kernel-dev-344736f29b359790facd0b7a521e367f1715c11c.tar.gz
cpuset: simplify cpuset_node_allowed API
Current cpuset API for checking if a zone/node is allowed to allocate from looks rather awkward. We have hardwall and softwall versions of cpuset_node_allowed with the softwall version doing literally the same as the hardwall version if __GFP_HARDWALL is passed to it in gfp flags. If it isn't, the softwall version may check the given node against the enclosing hardwall cpuset, which it needs to take the callback lock to do. Such a distinction was introduced by commit 02a0e53d8227 ("cpuset: rework cpuset_zone_allowed api"). Before, we had the only version with the __GFP_HARDWALL flag determining its behavior. The purpose of the commit was to avoid sleep-in-atomic bugs when someone would mistakenly call the function without the __GFP_HARDWALL flag for an atomic allocation. The suffixes introduced were intended to make the callers think before using the function. However, since the callback lock was converted from mutex to spinlock by the previous patch, the softwall check function cannot sleep, and these precautions are no longer necessary. So let's simplify the API back to the single check. Suggested-by: David Rientjes <rientjes@google.com> Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Zefan Li <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c3
-rw-r--r--mm/vmscan.c5
6 files changed, 11 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9fd7227..82da930 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -582,7 +582,7 @@ retry_cpuset:
for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
- if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) {
+ if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) {
if (avoid_reserve)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5340f6b..3348280 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -233,7 +233,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
/* Check this allocation failure is caused by cpuset's wall function */
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask)
- if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
+ if (!cpuset_zone_allowed(zone, gfp_mask))
cpuset_limited = true;
if (cpuset_limited) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9cd36b8..ab07b49 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1962,7 +1962,7 @@ zonelist_scan:
/*
* Scan zonelist, looking for a zone with enough free.
- * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
+ * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) {
@@ -1973,7 +1973,7 @@ zonelist_scan:
continue;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
- !cpuset_zone_allowed_softwall(zone, gfp_mask))
+ !cpuset_zone_allowed(zone, gfp_mask))
continue;
/*
* Distribute pages in proportion to the individual
@@ -2514,7 +2514,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
alloc_flags |= ALLOC_HARDER;
/*
* Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
- * comment for __cpuset_node_allowed_softwall().
+ * comment for __cpuset_node_allowed().
*/
alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_task(current)) && !in_interrupt())
diff --git a/mm/slab.c b/mm/slab.c
index eb2b2ea..063a91b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3012,7 +3012,7 @@ retry:
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
nid = zone_to_nid(zone);
- if (cpuset_zone_allowed_hardwall(zone, flags) &&
+ if (cpuset_zone_allowed(zone, flags | __GFP_HARDWALL) &&
get_node(cache, nid) &&
get_node(cache, nid)->free_objects) {
obj = ____cache_alloc_node(cache,
diff --git a/mm/slub.c b/mm/slub.c
index ae7b9f1..7d12f51 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1662,7 +1662,8 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
n = get_node(s, zone_to_nid(zone));
- if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+ if (n && cpuset_zone_allowed(zone,
+ flags | __GFP_HARDWALL) &&
n->nr_partial > s->min_partial) {
object = get_partial_node(s, n, c, flags);
if (object) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dcb4707..38878b2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2405,7 +2405,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
* to global LRU.
*/
if (global_reclaim(sc)) {
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ if (!cpuset_zone_allowed(zone,
+ GFP_KERNEL | __GFP_HARDWALL))
continue;
lru_pages += zone_reclaimable_pages(zone);
@@ -3388,7 +3389,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
if (!populated_zone(zone))
return;
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
return;
pgdat = zone->zone_pgdat;
if (pgdat->kswapd_max_order < order) {
OpenPOWER on IntegriCloud