summaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-07-12 10:46:31 +0200
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-14 11:45:32 -0700
commitb2fea988f4f3b38ff4edfc1556a843c91932804c (patch)
tree4ab1227ab0a607ea44ba554468bda547a4d51eb5 /mm/memblock.c
parentc13291a536b835b2ab278ab201f2cb1ce22f2785 (diff)
downloadop-kernel-dev-b2fea988f4f3b38ff4edfc1556a843c91932804c.zip
op-kernel-dev-b2fea988f4f3b38ff4edfc1556a843c91932804c.tar.gz
memblock: Improve generic memblock_nid_range() using for_each_mem_pfn_range()
Given an address range, memblock_nid_range() determines the node the start of the range belongs to and upto where the range stays in the same node. It's implemented by calling get_pfn_range_for_nid(), which determines min and max pfns for a given node, for each node and testing whether start address falls in there. This is not only inefficient but also incorrect when nodes interleave as min-max ranges for nodes overlap. This patch reimplements memblock_nid_range() using for_each_mem_pfn_range(). It's simpler, walks the mem ranges once and can find the exact range the start address falls in. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310460395-30913-5-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c20
1 files changed, 3 insertions, 17 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 0f9626f..97f3486 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -511,28 +511,14 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
{
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
- /*
- * This code originates from sparc which really wants use to walk by addresses
- * and returns the nid. This is not very convenient for early_pfn_map[] users
- * as the map isn't sorted yet, and it really wants to be walked by nid.
- *
- * For now, I implement the inefficient method below which walks the early
- * map multiple times. Eventually we may want to use an ARCH config option
- * to implement a completely different method for both case.
- */
unsigned long start_pfn, end_pfn;
int i;
- for (i = 0; i < MAX_NUMNODES; i++) {
- get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
- if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
- continue;
- *nid = i;
- return min(end, PFN_PHYS(end_pfn));
- }
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid)
+ if (start >= PFN_PHYS(start_pfn) && start < PFN_PHYS(end_pfn))
+ return min(end, PFN_PHYS(end_pfn));
#endif
*nid = 0;
-
return end;
}
OpenPOWER on IntegriCloud