summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/booke/pmap.c
diff options
context:
space:
mode:
authormarkj <markj@FreeBSD.org>2015-01-07 01:01:39 +0000
committermarkj <markj@FreeBSD.org>2015-01-07 01:01:39 +0000
commit7e7e145818dbebec11d3fb1dded0452f0c3d99f9 (patch)
tree6d50f1ee65f4ceaf3fa5c14faf47e9ecd3aae3e5 /sys/powerpc/booke/pmap.c
parentd7969594c9a8a347e4914f5381da2436c1a93710 (diff)
downloadFreeBSD-src-7e7e145818dbebec11d3fb1dded0452f0c3d99f9.zip
FreeBSD-src-7e7e145818dbebec11d3fb1dded0452f0c3d99f9.tar.gz
Factor out duplicated code from dumpsys() on each architecture into generic
code in sys/kern/kern_dump.c. Most dumpsys() implementations are nearly identical and simply redefine a number of constants and helper subroutines; a generic implementation will make it easier to implement features around kernel core dumps. This change does not alter any minidump code and should have no functional impact. PR: 193873 Differential Revision: https://reviews.freebsd.org/D904 Submitted by: Conrad Meyer <conrad.meyer@isilon.com> Reviewed by: jhibbits (earlier version) Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/powerpc/booke/pmap.c')
-rw-r--r--sys/powerpc/booke/pmap.c216
1 files changed, 101 insertions, 115 deletions
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 942ad12..061704f 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -52,6 +52,7 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
+#include <sys/conf.h>
#include <sys/malloc.h>
#include <sys/ktr.h>
#include <sys/proc.h>
@@ -59,6 +60,7 @@ __FBSDID("$FreeBSD$");
#include <sys/queue.h>
#include <sys/systm.h>
#include <sys/kernel.h>
+#include <sys/kerneldump.h>
#include <sys/linker.h>
#include <sys/msgbuf.h>
#include <sys/lock.h>
@@ -100,8 +102,6 @@ __FBSDID("$FreeBSD$");
#define TODO panic("%s: not implemented", __func__);
-extern int dumpsys_minidump;
-
extern unsigned char _etext[];
extern unsigned char _end[];
@@ -322,11 +322,11 @@ static void mmu_booke_kremove(mmu_t, vm_offset_t);
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
vm_size_t);
-static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
- vm_size_t, vm_size_t *);
-static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
- vm_size_t, vm_offset_t);
-static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *);
+static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
+ void **);
+static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
+ void *);
+static void mmu_booke_scan_init(mmu_t);
static mmu_method_t mmu_booke_methods[] = {
/* pmap dispatcher interface */
@@ -381,7 +381,7 @@ static mmu_method_t mmu_booke_methods[] = {
/* dumpsys() support */
MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
- MMUMETHOD(mmu_scan_md, mmu_booke_scan_md),
+ MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
{ 0, 0 }
};
@@ -2534,139 +2534,125 @@ mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
return (EFAULT);
}
-vm_offset_t
-mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
- vm_size_t *sz)
+void
+mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
{
- vm_paddr_t pa, ppa;
- vm_offset_t va;
+ vm_paddr_t ppa;
+ vm_offset_t ofs;
vm_size_t gran;
- /* Raw physical memory dumps don't have a virtual address. */
- if (md->md_vaddr == ~0UL) {
- /* We always map a 256MB page at 256M. */
- gran = 256 * 1024 * 1024;
- pa = md->md_paddr + ofs;
- ppa = pa & ~(gran - 1);
- ofs = pa - ppa;
- va = gran;
- tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO);
- if (*sz > (gran - ofs))
- *sz = gran - ofs;
- return (va + ofs);
- }
-
/* Minidumps are based on virtual memory addresses. */
- va = md->md_vaddr + ofs;
- if (va >= kernstart + kernsize) {
- gran = PAGE_SIZE - (va & PAGE_MASK);
- if (*sz > gran)
- *sz = gran;
+ if (do_minidump) {
+ *va = (void *)pa;
+ return;
}
- return (va);
+
+ /* Raw physical memory dumps don't have a virtual address. */
+ /* We always map a 256MB page at 256M. */
+ gran = 256 * 1024 * 1024;
+ ppa = pa & ~(gran - 1);
+ ofs = pa - ppa;
+ *va = (void *)gran;
+ tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
+
+ if (sz > (gran - ofs))
+ tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
+ _TLB_ENTRY_IO);
}
void
-mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
- vm_offset_t va)
+mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
{
+ vm_paddr_t ppa;
+ vm_offset_t ofs;
+ vm_size_t gran;
+
+ /* Minidumps are based on virtual memory addresses. */
+ /* Nothing to do... */
+ if (do_minidump)
+ return;
/* Raw physical memory dumps don't have a virtual address. */
- if (md->md_vaddr == ~0UL) {
+ tlb1_idx--;
+ tlb1[tlb1_idx].mas1 = 0;
+ tlb1[tlb1_idx].mas2 = 0;
+ tlb1[tlb1_idx].mas3 = 0;
+ tlb1_write_entry(tlb1_idx);
+
+ gran = 256 * 1024 * 1024;
+ ppa = pa & ~(gran - 1);
+ ofs = pa - ppa;
+ if (sz > (gran - ofs)) {
tlb1_idx--;
tlb1[tlb1_idx].mas1 = 0;
tlb1[tlb1_idx].mas2 = 0;
tlb1[tlb1_idx].mas3 = 0;
tlb1_write_entry(tlb1_idx);
- return;
}
-
- /* Minidumps are based on virtual memory addresses. */
- /* Nothing to do... */
}
-struct pmap_md *
-mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
+extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
+
+void
+mmu_booke_scan_init(mmu_t mmu)
{
- static struct pmap_md md;
- pte_t *pte;
vm_offset_t va;
-
- if (dumpsys_minidump) {
- md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */
- if (prev == NULL) {
- /* 1st: kernel .data and .bss. */
- md.md_index = 1;
- md.md_vaddr = trunc_page((uintptr_t)_etext);
- md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
- return (&md);
+ pte_t *pte;
+ int i;
+
+ if (!do_minidump) {
+ /* Initialize phys. segments for dumpsys(). */
+ memset(&dump_map, 0, sizeof(dump_map));
+ mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
+ &availmem_regions_sz);
+ for (i = 0; i < physmem_regions_sz; i++) {
+ dump_map[i].pa_start = physmem_regions[i].mr_start;
+ dump_map[i].pa_size = physmem_regions[i].mr_size;
+ }
+ return;
+ }
+
+ /* Virtual segments for minidumps: */
+ memset(&dump_map, 0, sizeof(dump_map));
+
+ /* 1st: kernel .data and .bss. */
+ dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
+ dump_map[0].pa_size =
+ round_page((uintptr_t)_end) - dump_map[0].pa_start;
+
+ /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
+ dump_map[1].pa_start = data_start;
+ dump_map[1].pa_size = data_end - data_start;
+
+ /* 3rd: kernel VM. */
+ va = dump_map[1].pa_start + dump_map[1].pa_size;
+ /* Find start of next chunk (from va). */
+ while (va < virtual_end) {
+ /* Don't dump the buffer cache. */
+ if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
+ va = kmi.buffer_eva;
+ continue;
}
- switch (prev->md_index) {
- case 1:
- /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
- md.md_index = 2;
- md.md_vaddr = data_start;
- md.md_size = data_end - data_start;
+ pte = pte_find(mmu, kernel_pmap, va);
+ if (pte != NULL && PTE_ISVALID(pte))
break;
- case 2:
- /* 3rd: kernel VM. */
- va = prev->md_vaddr + prev->md_size;
- /* Find start of next chunk (from va). */
- while (va < virtual_end) {
- /* Don't dump the buffer cache. */
- if (va >= kmi.buffer_sva &&
- va < kmi.buffer_eva) {
- va = kmi.buffer_eva;
- continue;
- }
- pte = pte_find(mmu, kernel_pmap, va);
- if (pte != NULL && PTE_ISVALID(pte))
- break;
- va += PAGE_SIZE;
- }
- if (va < virtual_end) {
- md.md_vaddr = va;
- va += PAGE_SIZE;
- /* Find last page in chunk. */
- while (va < virtual_end) {
- /* Don't run into the buffer cache. */
- if (va == kmi.buffer_sva)
- break;
- pte = pte_find(mmu, kernel_pmap, va);
- if (pte == NULL || !PTE_ISVALID(pte))
- break;
- va += PAGE_SIZE;
- }
- md.md_size = va - md.md_vaddr;
+ va += PAGE_SIZE;
+ }
+ if (va < virtual_end) {
+ dump_map[2].pa_start = va;
+ va += PAGE_SIZE;
+ /* Find last page in chunk. */
+ while (va < virtual_end) {
+ /* Don't run into the buffer cache. */
+ if (va == kmi.buffer_sva)
break;
- }
- md.md_index = 3;
- /* FALLTHROUGH */
- default:
- return (NULL);
- }
- } else { /* minidumps */
- mem_regions(&physmem_regions, &physmem_regions_sz,
- &availmem_regions, &availmem_regions_sz);
-
- if (prev == NULL) {
- /* first physical chunk. */
- md.md_paddr = physmem_regions[0].mr_start;
- md.md_size = physmem_regions[0].mr_size;
- md.md_vaddr = ~0UL;
- md.md_index = 1;
- } else if (md.md_index < physmem_regions_sz) {
- md.md_paddr = physmem_regions[md.md_index].mr_start;
- md.md_size = physmem_regions[md.md_index].mr_size;
- md.md_vaddr = ~0UL;
- md.md_index++;
- } else {
- /* There's no next physical chunk. */
- return (NULL);
+ pte = pte_find(mmu, kernel_pmap, va);
+ if (pte == NULL || !PTE_ISVALID(pte))
+ break;
+ va += PAGE_SIZE;
}
+ dump_map[2].pa_size = va - dump_map[2].pa_start;
}
-
- return (&md);
}
/*
OpenPOWER on IntegriCloud