diff options
author | raj <raj@FreeBSD.org> | 2008-08-28 07:38:08 +0000 |
---|---|---|
committer | raj <raj@FreeBSD.org> | 2008-08-28 07:38:08 +0000 |
commit | 4946f9aaa73e6c3e3942d8676601902b707f2659 (patch) | |
tree | 8c30bd71ec71a010a590dba4054dc8cce27de0ea /sys/powerpc | |
parent | 76e027f22ff36ea9632356d61f2dd00f164a4c8f (diff) | |
download | FreeBSD-src-4946f9aaa73e6c3e3942d8676601902b707f2659.zip FreeBSD-src-4946f9aaa73e6c3e3942d8676601902b707f2659.tar.gz |
Move initialization of tlb0, ptbl_bufs and kernel_pdir regions after we are
100% sure that TLB1 mapping covers for them; previously we could lock the CPU
with an untranslated references.
Obtained from: Semihalf
Diffstat (limited to 'sys/powerpc')
-rw-r--r-- | sys/powerpc/booke/pmap.c | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index a6fff41..9a4de40 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -411,7 +411,7 @@ ptbl_init(void) //debugf("ptbl_init: e\n"); } -/* Get an sf_buf from the freelist. */ +/* Get a ptbl_buf from the freelist. */ static struct ptbl_buf * ptbl_buf_alloc(void) { @@ -919,7 +919,6 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend) tlb0_get_tlbconf(); /* Read TLB0 size and associativity. */ tlb0 = (tlb_entry_t *)kernelend; kernelend += sizeof(tlb_entry_t) * tlb0_size; - memset((void *)tlb0, 0, sizeof(tlb_entry_t) * tlb0_size); debugf(" tlb0 at 0x%08x end = 0x%08x\n", (u_int32_t)tlb0, kernelend); kernelend = round_page(kernelend); @@ -927,7 +926,6 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend) /* Allocate space for ptbl_bufs. */ ptbl_bufs = (struct ptbl_buf *)kernelend; kernelend += sizeof(struct ptbl_buf) * PTBL_BUFS; - memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (u_int32_t)ptbl_bufs, kernelend); @@ -938,7 +936,6 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend) kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + PDIR_SIZE - 1) / PDIR_SIZE; kernelend += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; - memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); debugf(" kernel ptbls: %d\n", kernel_ptbls); debugf(" kernel pdir at 0x%08x\n", kernel_pdir); @@ -949,6 +946,15 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend) } else kernelend = (kernelend + 0xffffff) & ~0xffffff; + /* + * Clear the structures - note we can only do it safely after the + * possible additional TLB1 translations are in place so that + * all range up to the currently calculated 'kernelend' is covered. + */ + memset((void *)tlb0, 0, sizeof(tlb_entry_t) * tlb0_size); + memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); + memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); + /*******************************************************/ /* Set the start and end of kva. */ /*******************************************************/ @@ -1082,11 +1088,13 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend) availmem_regions[i].mr_start + availmem_regions[i].mr_size, availmem_regions[i].mr_size); - if (hwphyssz != 0 && (physsz + availmem_regions[i].mr_size) >= hwphyssz) { + if (hwphyssz != 0 && + (physsz + availmem_regions[i].mr_size) >= hwphyssz) { debugf(" hw.physmem adjust\n"); if (physsz < hwphyssz) { phys_avail[j] = availmem_regions[i].mr_start; - phys_avail[j + 1] = availmem_regions[i].mr_start + + phys_avail[j + 1] = + availmem_regions[i].mr_start + hwphyssz - physsz; physsz = hwphyssz; phys_avail_count++; |