diff options
Diffstat (limited to 'lib/libkvm/kvm_ia64.c')
-rw-r--r-- | lib/libkvm/kvm_ia64.c | 233 |
1 files changed, 158 insertions, 75 deletions
diff --git a/lib/libkvm/kvm_ia64.c b/lib/libkvm/kvm_ia64.c index 74e2b80..5db7e1e 100644 --- a/lib/libkvm/kvm_ia64.c +++ b/lib/libkvm/kvm_ia64.c @@ -32,12 +32,21 @@ #include <sys/elf64.h> #include <sys/mman.h> +#ifndef CROSS_LIBKVM #include <machine/atomic.h> #include <machine/bootinfo.h> +#include <machine/elf.h> #include <machine/pte.h> +#else +#include "../../sys/ia64/include/atomic.h" +#include "../../sys/ia64/include/bootinfo.h" +#include "../../sys/ia64/include/elf.h" +#include "../../sys/ia64/include/pte.h" +#endif #include <kvm.h> #include <limits.h> +#include <stdint.h> #include <stdlib.h> #include <unistd.h> @@ -55,6 +64,8 @@ #define PBVM_BASE 0x9ffc000000000000UL #define PBVM_PGSZ (64 * 1024) +typedef size_t (a2p_f)(kvm_t *, uint64_t, off_t *); + struct vmstate { void *mmapbase; size_t mmapsize; @@ -62,6 +73,7 @@ struct vmstate { u_long kptdir; u_long *pbvm_pgtbl; u_int pbvm_pgtblsz; + a2p_f *kvatop; }; /* @@ -70,7 +82,7 @@ struct vmstate { * set of headers. */ static int -_kvm_maphdrs(kvm_t *kd, size_t sz) +ia64_maphdrs(kvm_t *kd, size_t sz) { struct vmstate *vm = kd->vmst; @@ -91,38 +103,103 @@ _kvm_maphdrs(kvm_t *kd, size_t sz) } /* - * Translate a physical memory address to a file-offset in the crash-dump. + * Physical core support. */ + static size_t -_kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) +phys_addr2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) { - Elf64_Ehdr *e = kd->vmst->mmapbase; - Elf64_Phdr *p = (Elf64_Phdr*)((char*)e + e->e_phoff); - int n = e->e_phnum; + Elf64_Ehdr *e; + Elf64_Phdr *p; + int n; - if (pa != REGION_ADDR(pa)) { - _kvm_err(kd, kd->program, "internal error"); - return (0); - } + if (pa != REGION_ADDR(pa)) + goto fail; + e = (Elf64_Ehdr *)(kd->vmst->mmapbase); + n = e->e_phnum; + p = (Elf64_Phdr *)(void *)((uintptr_t)(void *)e + e->e_phoff); while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) - return (0); + goto fail; *ofs = (pa - p->p_paddr) + p->p_offset; if (pgsz == 0) return (p->p_memsz - (pa - p->p_paddr)); return (pgsz - ((size_t)pa & (pgsz - 1))); + + fail: + _kvm_err(kd, kd->program, "invalid physical address %#jx", + (uintmax_t)pa); + return (0); +} + +static size_t +phys_kvatop(kvm_t *kd, uint64_t va, off_t *ofs) +{ + struct ia64_lpte pte; + uint64_t pa, pgaddr, pt0addr, pt1addr; + size_t pgno, pgsz, pt0no, pt1no; + + if (va >= REGION_BASE(6)) { + /* Regions 6 and 7: direct mapped. */ + pa = REGION_ADDR(va); + return (phys_addr2off(kd, pa, ofs, 0)); + } else if (va >= REGION_BASE(5)) { + /* Region 5: Kernel Virtual Memory. */ + va = REGION_ADDR(va); + pgsz = kd->vmst->pagesize; + pt0no = KPTE_DIR0_INDEX(va, pgsz); + pt1no = KPTE_DIR1_INDEX(va, pgsz); + pgno = KPTE_PTE_INDEX(va, pgsz); + if (pt0no >= NKPTEDIR(pgsz)) + goto fail; + pt0addr = kd->vmst->kptdir + (pt0no << 3); + if (kvm_read(kd, pt0addr, &pt1addr, 8) != 8) + goto fail; + if (pt1addr == 0) + goto fail; + pt1addr += pt1no << 3; + if (kvm_read(kd, pt1addr, &pgaddr, 8) != 8) + goto fail; + if (pgaddr == 0) + goto fail; + pgaddr += pgno * sizeof(pte); + if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte)) + goto fail; + if (!(pte.pte & PTE_PRESENT)) + goto fail; + pa = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1)); + return (phys_addr2off(kd, pa, ofs, pgsz)); + } else if (va >= PBVM_BASE) { + /* Region 4: Pre-Boot Virtual Memory (PBVM). */ + va -= PBVM_BASE; + pgsz = PBVM_PGSZ; + pt0no = va / pgsz; + if (pt0no >= (kd->vmst->pbvm_pgtblsz >> 3)) + goto fail; + pt0addr = kd->vmst->pbvm_pgtbl[pt0no]; + if (!(pt0addr & PTE_PRESENT)) + goto fail; + pa = (pt0addr & PTE_PPN_MASK) + va % pgsz; + return (phys_addr2off(kd, pa, ofs, pgsz)); + } + + fail: + _kvm_err(kd, kd->program, "invalid kernel virtual address %#jx", + (uintmax_t)va); + *ofs = -1; + return (0); } static ssize_t -_kvm_read_phys(kvm_t *kd, uint64_t pa, void *buf, size_t bufsz) +phys_read(kvm_t *kd, uint64_t pa, void *buf, size_t bufsz) { off_t ofs; size_t sz; - sz = _kvm_pa2off(kd, pa, &ofs, 0); + sz = phys_addr2off(kd, pa, &ofs, 0); if (sz < bufsz) return ((ssize_t)sz); @@ -131,6 +208,50 @@ _kvm_read_phys(kvm_t *kd, uint64_t pa, void *buf, size_t bufsz) return (read(kd->pmfd, buf, bufsz)); } +/* + * Virtual core support (aka minidump). + */ + +static size_t +virt_addr2off(kvm_t *kd, uint64_t va, off_t *ofs, size_t pgsz) +{ + Elf64_Ehdr *e; + Elf64_Phdr *p; + int n; + + if (va < REGION_BASE(4)) + goto fail; + + e = (Elf64_Ehdr *)(kd->vmst->mmapbase); + n = e->e_phnum; + p = (Elf64_Phdr *)(void *)((uintptr_t)(void *)e + e->e_phoff); + while (n && (va < p->p_vaddr || va >= p->p_vaddr + p->p_memsz)) + p++, n--; + if (n == 0) + goto fail; + + *ofs = (va - p->p_vaddr) + p->p_offset; + if (pgsz == 0) + return (p->p_memsz - (va - p->p_vaddr)); + return (pgsz - ((size_t)va & (pgsz - 1))); + + fail: + _kvm_err(kd, kd->program, "invalid virtual address %#jx", + (uintmax_t)va); + return (0); +} + +static size_t +virt_kvatop(kvm_t *kd, uint64_t va, off_t *ofs) +{ + + return (virt_addr2off(kd, va, ofs, 0)); +} + +/* + * KVM architecture support functions. + */ + void _kvm_freevtop(kvm_t *kd) { @@ -160,27 +281,37 @@ _kvm_initvtop(kvm_t *kd) return (-1); } +#ifndef CROSS_LIBKVM kd->vmst->pagesize = getpagesize(); +#else + kd->vmst->pagesize = 8192; +#endif - if (_kvm_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1) + if (ia64_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1) return (-1); ehdr = kd->vmst->mmapbase; hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum; - if (_kvm_maphdrs(kd, hdrsz) == -1) + if (ia64_maphdrs(kd, hdrsz) == -1) return (-1); + kd->vmst->kvatop = (ehdr->e_flags & EF_IA_64_ABSOLUTE) ? + phys_kvatop : virt_kvatop; + /* * Load the PBVM page table. We need this to resolve PBVM addresses. * The PBVM page table is obtained from the bootinfo structure, of - * which the physical address is given to us in e_entry. If e_entry - * is 0, then this is assumed to be a pre-PBVM kernel. + * which the address is given to us in e_entry. If e_entry is 0, then + * this is assumed to be a pre-PBVM kernel. + * Note that the address of the bootinfo structure is either physical + * or virtual, depending on whether the core is physical or virtual. */ - if (ehdr->e_entry != 0) { - sz = _kvm_read_phys(kd, ehdr->e_entry, &bi, sizeof(bi)); + if (ehdr->e_entry != 0 && (ehdr->e_flags & EF_IA_64_ABSOLUTE) != 0) { + sz = phys_read(kd, ehdr->e_entry, &bi, sizeof(bi)); if (sz != sizeof(bi)) { _kvm_err(kd, kd->program, - "cannot read bootinfo from PA %#lx", ehdr->e_entry); + "cannot read bootinfo at physical address %#jx", + (uintmax_t)ehdr->e_entry); return (-1); } if (bi.bi_magic != BOOTINFO_MAGIC) { @@ -193,12 +324,12 @@ _kvm_initvtop(kvm_t *kd) return (-1); } kd->vmst->pbvm_pgtblsz = bi.bi_pbvm_pgtblsz; - sz = _kvm_read_phys(kd, bi.bi_pbvm_pgtbl, kd->vmst->pbvm_pgtbl, + sz = phys_read(kd, bi.bi_pbvm_pgtbl, kd->vmst->pbvm_pgtbl, bi.bi_pbvm_pgtblsz); if (sz != bi.bi_pbvm_pgtblsz) { _kvm_err(kd, kd->program, - "cannot read page table from PA %#lx", - bi.bi_pbvm_pgtbl); + "cannot read page table at physical address %#jx", + (uintmax_t)bi.bi_pbvm_pgtbl); return (-1); } } else { @@ -225,7 +356,7 @@ _kvm_initvtop(kvm_t *kd) return (-1); } - if (va < REGION_BASE(6)) { + if (va == REGION_BASE(5)) { _kvm_err(kd, kd->program, "kptdir is itself virtual"); return (-1); } @@ -237,56 +368,8 @@ _kvm_initvtop(kvm_t *kd) int _kvm_kvatop(kvm_t *kd, u_long va, off_t *ofs) { - struct ia64_lpte pte; - uint64_t pa, pgaddr, pt0addr, pt1addr; - size_t pgno, pgsz, pt0no, pt1no; - - if (va >= REGION_BASE(6)) { - /* Regions 6 and 7: direct mapped. */ - pa = REGION_ADDR(va); - return (_kvm_pa2off(kd, pa, ofs, 0)); - } else if (va >= REGION_BASE(5)) { - /* Region 5: Kernel Virtual Memory. */ - va = REGION_ADDR(va); - pgsz = kd->vmst->pagesize; - pt0no = KPTE_DIR0_INDEX(va, pgsz); - pt1no = KPTE_DIR1_INDEX(va, pgsz); - pgno = KPTE_PTE_INDEX(va, pgsz); - if (pt0no >= NKPTEDIR(pgsz)) - goto fail; - pt0addr = kd->vmst->kptdir + (pt0no << 3); - if (kvm_read(kd, pt0addr, &pt1addr, 8) != 8) - goto fail; - if (pt1addr == 0) - goto fail; - pt1addr += pt1no << 3; - if (kvm_read(kd, pt1addr, &pgaddr, 8) != 8) - goto fail; - if (pgaddr == 0) - goto fail; - pgaddr += pgno * sizeof(pte); - if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte)) - goto fail; - if (!(pte.pte & PTE_PRESENT)) - goto fail; - pa = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1)); - return (_kvm_pa2off(kd, pa, ofs, pgsz)); - } else if (va >= PBVM_BASE) { - /* Region 4: Pre-Boot Virtual Memory (PBVM). */ - va -= PBVM_BASE; - pgsz = PBVM_PGSZ; - pt0no = va / pgsz; - if (pt0no >= (kd->vmst->pbvm_pgtblsz >> 3)) - goto fail; - pt0addr = kd->vmst->pbvm_pgtbl[pt0no]; - if (!(pt0addr & PTE_PRESENT)) - goto fail; - pa = (pt0addr & PTE_PPN_MASK) + va % pgsz; - return (_kvm_pa2off(kd, pa, ofs, pgsz)); - } + size_t sz; - fail: - _kvm_err(kd, kd->program, "invalid kernel virtual address"); - *ofs = ~0UL; - return (0); + sz = kd->vmst->kvatop(kd, va, ofs); + return ((sz > INT_MAX) ? INT_MAX : sz); } |