summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-12-29 08:53:56 +0100
committerDan Williams <dan.j.williams@intel.com>2018-01-08 11:46:23 -0800
commit24b6d4164348370c6b6a58b4248babd85ff9e982 (patch)
tree7e89dc93eec8c743b2cbe2236255e8a1cde14833 /mm
parentda024512a1fa5c979257e442130ee1d468285057 (diff)
downloadop-kernel-dev-24b6d4164348370c6b6a58b4248babd85ff9e982.zip
op-kernel-dev-24b6d4164348370c6b6a58b4248babd85ff9e982.tar.gz
mm: pass the vmem_altmap to vmemmap_free
We can just pass this on instead of having to do a radix tree lookup without proper locking a few levels into the callchain. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c7
-rw-r--r--mm/sparse.c23
2 files changed, 17 insertions, 13 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index eae6bf4..a8dde97 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -536,7 +536,7 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
}
static int __remove_section(struct zone *zone, struct mem_section *ms,
- unsigned long map_offset)
+ unsigned long map_offset, struct vmem_altmap *altmap)
{
unsigned long start_pfn;
int scn_nr;
@@ -553,7 +553,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms,
start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
__remove_zone(zone, start_pfn);
- sparse_remove_one_section(zone, ms, map_offset);
+ sparse_remove_one_section(zone, ms, map_offset, altmap);
return 0;
}
@@ -607,7 +607,8 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
for (i = 0; i < sections_to_remove; i++) {
unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
- ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
+ ret = __remove_section(zone, __pfn_to_section(pfn), map_offset,
+ altmap);
map_offset = 0;
if (ret)
break;
diff --git a/mm/sparse.c b/mm/sparse.c
index 5f4a0da..06130c1 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -685,12 +685,13 @@ static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
/* This will make the necessary allocations eventually. */
return sparse_mem_map_populate(pnum, nid, altmap);
}
-static void __kfree_section_memmap(struct page *memmap)
+static void __kfree_section_memmap(struct page *memmap,
+ struct vmem_altmap *altmap)
{
unsigned long start = (unsigned long)memmap;
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
- vmemmap_free(start, end);
+ vmemmap_free(start, end, altmap);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
static void free_map_bootmem(struct page *memmap)
@@ -698,7 +699,7 @@ static void free_map_bootmem(struct page *memmap)
unsigned long start = (unsigned long)memmap;
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
- vmemmap_free(start, end);
+ vmemmap_free(start, end, NULL);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
#else
@@ -729,7 +730,8 @@ static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
return __kmalloc_section_memmap();
}
-static void __kfree_section_memmap(struct page *memmap)
+static void __kfree_section_memmap(struct page *memmap,
+ struct vmem_altmap *altmap)
{
if (is_vmalloc_addr(memmap))
vfree(memmap);
@@ -798,7 +800,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
return -ENOMEM;
usemap = __kmalloc_section_usemap();
if (!usemap) {
- __kfree_section_memmap(memmap);
+ __kfree_section_memmap(memmap, altmap);
return -ENOMEM;
}
@@ -820,7 +822,7 @@ out:
pgdat_resize_unlock(pgdat, &flags);
if (ret <= 0) {
kfree(usemap);
- __kfree_section_memmap(memmap);
+ __kfree_section_memmap(memmap, altmap);
}
return ret;
}
@@ -847,7 +849,8 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
}
#endif
-static void free_section_usemap(struct page *memmap, unsigned long *usemap)
+static void free_section_usemap(struct page *memmap, unsigned long *usemap,
+ struct vmem_altmap *altmap)
{
struct page *usemap_page;
@@ -861,7 +864,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
kfree(usemap);
if (memmap)
- __kfree_section_memmap(memmap);
+ __kfree_section_memmap(memmap, altmap);
return;
}
@@ -875,7 +878,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
}
void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
- unsigned long map_offset)
+ unsigned long map_offset, struct vmem_altmap *altmap)
{
struct page *memmap = NULL;
unsigned long *usemap = NULL, flags;
@@ -893,7 +896,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
clear_hwpoisoned_pages(memmap + map_offset,
PAGES_PER_SECTION - map_offset);
- free_section_usemap(memmap, usemap);
+ free_section_usemap(memmap, usemap, altmap);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
#endif /* CONFIG_MEMORY_HOTPLUG */
OpenPOWER on IntegriCloud