diff options
author | Michael Holzheu <holzheu@linux.vnet.ibm.com> | 2011-10-30 15:16:44 +0100 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-10-30 15:16:42 +0100 |
commit | dab7a7b1538fec48790a321a58180adae79a3f3c (patch) | |
tree | 145614bdf89a106508cec17bd6f13e6686ffed07 /arch | |
parent | 558df7209e7997275f6b8ad37737494cf2da1512 (diff) | |
download | op-kernel-dev-dab7a7b1538fec48790a321a58180adae79a3f3c.zip op-kernel-dev-dab7a7b1538fec48790a321a58180adae79a3f3c.tar.gz |
[S390] Add architecture code for unmapping crashkernel memory
This patch implements the crash_map_pages() function for s390.
KEXEC_CRASH_MEM_ALIGN is set to HPAGE_SIZE, in order to support
kernel mappings that use large pages. We also use HPAGE_SIZE alignment
for CONFIG_HUGETLB_PAGE=n in order to have the same 1 MiB alignment on
all s390 systems.
Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/kexec.h | 3 | ||||
-rw-r--r-- | arch/s390/kernel/machine_kexec.c | 31 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 10 |
3 files changed, 40 insertions, 4 deletions
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index fb1c96f..cf4e47b 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -36,6 +36,9 @@ /* Allocate one page for the pdp and the second for the code */ #define KEXEC_CONTROL_PAGE_SIZE 4096 +/* Alignment of crashkernel memory */ +#define KEXEC_CRASH_MEM_ALIGN HPAGE_SIZE + /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_S390 diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 0ceac06..13a0b52 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -133,6 +133,37 @@ static int kdump_csum_valid(struct kimage *image) } /* + * Map or unmap crashkernel memory + */ +static void crash_map_pages(int enable) +{ + unsigned long size = resource_size(&crashk_res); + + BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN || + size % KEXEC_CRASH_MEM_ALIGN); + if (enable) + vmem_add_mapping(crashk_res.start, size); + else + vmem_remove_mapping(crashk_res.start, size); +} + +/* + * Map crashkernel memory + */ +void crash_map_reserved_pages(void) +{ + crash_map_pages(1); +} + +/* + * Unmap crashkernel memory + */ +void crash_unmap_reserved_pages(void) +{ + crash_map_pages(0); +} + +/* * Give back memory to hypervisor before new kdump is loaded */ static int machine_kexec_prepare_kdump(void) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 63c81de..6f8e377 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -446,6 +446,7 @@ static void __init setup_resources(void) res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; switch (memory_chunk[i].type) { case CHUNK_READ_WRITE: + case CHUNK_CRASHK: res->name = "System RAM"; break; case CHUNK_READ_ONLY: @@ -720,8 +721,8 @@ static void __init reserve_crashkernel(void) &crash_base); if (rc || crash_size == 0) return; - crash_base = PAGE_ALIGN(crash_base); - crash_size = PAGE_ALIGN(crash_size); + crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); + crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); if (register_memory_notifier(&kdump_mem_nb)) return; if (!crash_base) @@ -741,7 +742,7 @@ static void __init reserve_crashkernel(void) crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; insert_resource(&iomem_resource, &crashk_res); - reserve_kdump_bootmem(crash_base, crash_size, CHUNK_READ_WRITE); + reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); pr_info("Reserving %lluMB of memory at %lluMB " "for crashkernel (System RAM: %luMB)\n", crash_size >> 20, crash_base >> 20, memory_end >> 20); @@ -816,7 +817,8 @@ setup_memory(void) for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { unsigned long start_chunk, end_chunk, pfn; - if (memory_chunk[i].type != CHUNK_READ_WRITE) + if (memory_chunk[i].type != CHUNK_READ_WRITE && + memory_chunk[i].type != CHUNK_CRASHK) continue; start_chunk = PFN_DOWN(memory_chunk[i].addr); end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); |