summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/aim
diff options
context:
space:
mode:
authormarkj <markj@FreeBSD.org>2015-01-07 01:01:39 +0000
committermarkj <markj@FreeBSD.org>2015-01-07 01:01:39 +0000
commit7e7e145818dbebec11d3fb1dded0452f0c3d99f9 (patch)
tree6d50f1ee65f4ceaf3fa5c14faf47e9ecd3aae3e5 /sys/powerpc/aim
parentd7969594c9a8a347e4914f5381da2436c1a93710 (diff)
downloadFreeBSD-src-7e7e145818dbebec11d3fb1dded0452f0c3d99f9.zip
FreeBSD-src-7e7e145818dbebec11d3fb1dded0452f0c3d99f9.tar.gz
Factor out duplicated code from dumpsys() on each architecture into generic
code in sys/kern/kern_dump.c. Most dumpsys() implementations are nearly identical and simply redefine a number of constants and helper subroutines; a generic implementation will make it easier to implement features around kernel core dumps. This change does not alter any minidump code and should have no functional impact. PR: 193873 Differential Revision: https://reviews.freebsd.org/D904 Submitted by: Conrad Meyer <conrad.meyer@isilon.com> Reviewed by: jhibbits (earlier version) Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/powerpc/aim')
-rw-r--r--sys/powerpc/aim/mmu_oea.c155
-rw-r--r--sys/powerpc/aim/mmu_oea64.c150
2 files changed, 127 insertions, 178 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 742dd70..96628f71 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -106,8 +106,10 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
+#include <sys/conf.h>
#include <sys/queue.h>
#include <sys/cpuset.h>
+#include <sys/kerneldump.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/msgbuf.h>
@@ -163,8 +165,6 @@ struct ofw_map {
extern unsigned char _etext[];
extern unsigned char _end[];
-extern int dumpsys_minidump;
-
/*
* Map of physical memory regions.
*/
@@ -314,9 +314,8 @@ void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t);
void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
-vm_offset_t moea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
- vm_size_t *sz);
-struct pmap_md * moea_scan_md(mmu_t mmu, struct pmap_md *prev);
+void moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va);
+void moea_scan_init(mmu_t mmu);
static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
@@ -363,7 +362,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_kenter, moea_kenter),
MMUMETHOD(mmu_kenter_attr, moea_kenter_attr),
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
- MMUMETHOD(mmu_scan_md, moea_scan_md),
+ MMUMETHOD(mmu_scan_init, moea_scan_init),
MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map),
{ 0, 0 }
@@ -2628,100 +2627,74 @@ moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
PMAP_UNLOCK(pm);
}
-vm_offset_t
-moea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
- vm_size_t *sz)
+void
+moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
{
- if (md->md_vaddr == ~0UL)
- return (md->md_paddr + ofs);
- else
- return (md->md_vaddr + ofs);
+
+ *va = (void *)pa;
}
-struct pmap_md *
-moea_scan_md(mmu_t mmu, struct pmap_md *prev)
+extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
+
+void
+moea_scan_init(mmu_t mmu)
{
- static struct pmap_md md;
struct pvo_entry *pvo;
vm_offset_t va;
-
- if (dumpsys_minidump) {
- md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */
- if (prev == NULL) {
- /* 1st: kernel .data and .bss. */
- md.md_index = 1;
- md.md_vaddr = trunc_page((uintptr_t)_etext);
- md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
- return (&md);
+ int i;
+
+ if (!do_minidump) {
+ /* Initialize phys. segments for dumpsys(). */
+ memset(&dump_map, 0, sizeof(dump_map));
+ mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
+ for (i = 0; i < pregions_sz; i++) {
+ dump_map[i].pa_start = pregions[i].mr_start;
+ dump_map[i].pa_size = pregions[i].mr_size;
+ }
+ return;
+ }
+
+ /* Virtual segments for minidumps: */
+ memset(&dump_map, 0, sizeof(dump_map));
+
+ /* 1st: kernel .data and .bss. */
+ dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
+ dump_map[0].pa_size =
+ round_page((uintptr_t)_end) - dump_map[0].pa_start;
+
+ /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
+ dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
+ dump_map[1].pa_size = round_page(msgbufp->msg_size);
+
+ /* 3rd: kernel VM. */
+ va = dump_map[1].pa_start + dump_map[1].pa_size;
+ /* Find start of next chunk (from va). */
+ while (va < virtual_end) {
+ /* Don't dump the buffer cache. */
+ if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
+ va = kmi.buffer_eva;
+ continue;
}
- switch (prev->md_index) {
- case 1:
- /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
- md.md_index = 2;
- md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr;
- md.md_size = round_page(msgbufp->msg_size);
+ pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
+ if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID))
break;
- case 2:
- /* 3rd: kernel VM. */
- va = prev->md_vaddr + prev->md_size;
- /* Find start of next chunk (from va). */
- while (va < virtual_end) {
- /* Don't dump the buffer cache. */
- if (va >= kmi.buffer_sva &&
- va < kmi.buffer_eva) {
- va = kmi.buffer_eva;
- continue;
- }
- pvo = moea_pvo_find_va(kernel_pmap,
- va & ~ADDR_POFF, NULL);
- if (pvo != NULL &&
- (pvo->pvo_pte.pte.pte_hi & PTE_VALID))
- break;
- va += PAGE_SIZE;
- }
- if (va < virtual_end) {
- md.md_vaddr = va;
- va += PAGE_SIZE;
- /* Find last page in chunk. */
- while (va < virtual_end) {
- /* Don't run into the buffer cache. */
- if (va == kmi.buffer_sva)
- break;
- pvo = moea_pvo_find_va(kernel_pmap,
- va & ~ADDR_POFF, NULL);
- if (pvo == NULL ||
- !(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
- break;
- va += PAGE_SIZE;
- }
- md.md_size = va - md.md_vaddr;
+ va += PAGE_SIZE;
+ }
+ if (va < virtual_end) {
+ dump_map[2].pa_start = va;
+ va += PAGE_SIZE;
+ /* Find last page in chunk. */
+ while (va < virtual_end) {
+ /* Don't run into the buffer cache. */
+ if (va == kmi.buffer_sva)
break;
- }
- md.md_index = 3;
- /* FALLTHROUGH */
- default:
- return (NULL);
- }
- } else { /* minidumps */
- mem_regions(&pregions, &pregions_sz,
- &regions, &regions_sz);
-
- if (prev == NULL) {
- /* first physical chunk. */
- md.md_paddr = pregions[0].mr_start;
- md.md_size = pregions[0].mr_size;
- md.md_vaddr = ~0UL;
- md.md_index = 1;
- } else if (md.md_index < pregions_sz) {
- md.md_paddr = pregions[md.md_index].mr_start;
- md.md_size = pregions[md.md_index].mr_size;
- md.md_vaddr = ~0UL;
- md.md_index++;
- } else {
- /* There's no next physical chunk. */
- return (NULL);
+ pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF,
+ NULL);
+ if (pvo == NULL ||
+ !(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
+ break;
+ va += PAGE_SIZE;
}
+ dump_map[2].pa_size = va - dump_map[2].pa_start;
}
-
- return (&md);
}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 9677b52..180321f 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -107,8 +107,10 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
+#include <sys/conf.h>
#include <sys/queue.h>
#include <sys/cpuset.h>
+#include <sys/kerneldump.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/msgbuf.h>
@@ -187,7 +189,6 @@ struct ofw_map {
extern unsigned char _etext[];
extern unsigned char _end[];
-extern int dumpsys_minidump;
extern int ofw_real_mode;
/*
@@ -328,9 +329,9 @@ void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
-vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
- vm_size_t *sz);
-struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev);
+void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
+ void **va);
+void moea64_scan_init(mmu_t mmu);
static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
@@ -376,7 +377,7 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_kenter, moea64_kenter),
MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
- MMUMETHOD(mmu_scan_md, moea64_scan_md),
+ MMUMETHOD(mmu_scan_init, moea64_scan_init),
MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
{ 0, 0 }
@@ -2615,97 +2616,72 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
PMAP_UNLOCK(pm);
}
-vm_offset_t
-moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
- vm_size_t *sz)
+void
+moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
{
- if (md->md_vaddr == ~0UL)
- return (md->md_paddr + ofs);
- else
- return (md->md_vaddr + ofs);
+
+ *va = (void *)pa;
}
-struct pmap_md *
-moea64_scan_md(mmu_t mmu, struct pmap_md *prev)
+extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
+
+void
+moea64_scan_init(mmu_t mmu)
{
- static struct pmap_md md;
struct pvo_entry *pvo;
vm_offset_t va;
-
- if (dumpsys_minidump) {
- md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */
- if (prev == NULL) {
- /* 1st: kernel .data and .bss. */
- md.md_index = 1;
- md.md_vaddr = trunc_page((uintptr_t)_etext);
- md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
- return (&md);
+ int i;
+
+ if (!do_minidump) {
+ /* Initialize phys. segments for dumpsys(). */
+ memset(&dump_map, 0, sizeof(dump_map));
+ mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
+ for (i = 0; i < pregions_sz; i++) {
+ dump_map[i].pa_start = pregions[i].mr_start;
+ dump_map[i].pa_size = pregions[i].mr_size;
+ }
+ return;
+ }
+
+ /* Virtual segments for minidumps: */
+ memset(&dump_map, 0, sizeof(dump_map));
+
+ /* 1st: kernel .data and .bss. */
+ dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
+ dump_map[0].pa_size = round_page((uintptr_t)_end) - dump_map[0].pa_start;
+
+ /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
+ dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
+ dump_map[1].pa_size = round_page(msgbufp->msg_size);
+
+ /* 3rd: kernel VM. */
+ va = dump_map[1].pa_start + dump_map[1].pa_size;
+ /* Find start of next chunk (from va). */
+ while (va < virtual_end) {
+ /* Don't dump the buffer cache. */
+ if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
+ va = kmi.buffer_eva;
+ continue;
}
- switch (prev->md_index) {
- case 1:
- /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
- md.md_index = 2;
- md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr;
- md.md_size = round_page(msgbufp->msg_size);
+ pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
+ if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
break;
- case 2:
- /* 3rd: kernel VM. */
- va = prev->md_vaddr + prev->md_size;
- /* Find start of next chunk (from va). */
- while (va < virtual_end) {
- /* Don't dump the buffer cache. */
- if (va >= kmi.buffer_sva &&
- va < kmi.buffer_eva) {
- va = kmi.buffer_eva;
- continue;
- }
- pvo = moea64_pvo_find_va(kernel_pmap,
- va & ~ADDR_POFF);
- if (pvo != NULL &&
- (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
- break;
- va += PAGE_SIZE;
- }
- if (va < virtual_end) {
- md.md_vaddr = va;
- va += PAGE_SIZE;
- /* Find last page in chunk. */
- while (va < virtual_end) {
- /* Don't run into the buffer cache. */
- if (va == kmi.buffer_sva)
- break;
- pvo = moea64_pvo_find_va(kernel_pmap,
- va & ~ADDR_POFF);
- if (pvo == NULL ||
- !(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
- break;
- va += PAGE_SIZE;
- }
- md.md_size = va - md.md_vaddr;
+ va += PAGE_SIZE;
+ }
+ if (va < virtual_end) {
+ dump_map[2].pa_start = va;
+ va += PAGE_SIZE;
+ /* Find last page in chunk. */
+ while (va < virtual_end) {
+ /* Don't run into the buffer cache. */
+ if (va == kmi.buffer_sva)
break;
- }
- md.md_index = 3;
- /* FALLTHROUGH */
- default:
- return (NULL);
- }
- } else { /* minidumps */
- if (prev == NULL) {
- /* first physical chunk. */
- md.md_paddr = pregions[0].mr_start;
- md.md_size = pregions[0].mr_size;
- md.md_vaddr = ~0UL;
- md.md_index = 1;
- } else if (md.md_index < pregions_sz) {
- md.md_paddr = pregions[md.md_index].mr_start;
- md.md_size = pregions[md.md_index].mr_size;
- md.md_vaddr = ~0UL;
- md.md_index++;
- } else {
- /* There's no next physical chunk. */
- return (NULL);
+ pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
+ if (pvo == NULL ||
+ !(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
+ break;
+ va += PAGE_SIZE;
}
+ dump_map[2].pa_size = va - dump_map[2].pa_start;
}
-
- return (&md);
}
OpenPOWER on IntegriCloud