diff options
author | nwhitehorn <nwhitehorn@FreeBSD.org> | 2010-02-25 03:53:21 +0000 |
---|---|---|
committer | nwhitehorn <nwhitehorn@FreeBSD.org> | 2010-02-25 03:53:21 +0000 |
commit | 0be33967dc512560f54754fcae2582d46ae93bf9 (patch) | |
tree | 15fbea46673e9ca23eb21fb6997c5e60ab98d31f /sys/powerpc/aim | |
parent | c82626304295f3dbf2e6d567378e95b6fe6b7175 (diff) | |
download | FreeBSD-src-0be33967dc512560f54754fcae2582d46ae93bf9.zip FreeBSD-src-0be33967dc512560f54754fcae2582d46ae93bf9.tar.gz |
Move the OEA64 scratchpage to the end of KVA from the beginning, and set
its PVO to map physical address 0 instead of kernelstart. This fixes a
situation in which a user process could attempt to return this address
via KVM, have it fault while being modified, and then panic the kernel
because (a) it is supposed to map a valid address and (b) it lies in the
no-fault region between VM_MIN_KERNEL_ADDRESS and virtual_avail.
While here, move msgbuf and dpcpu make into regular KVA space for
consistency with other implementations.
Diffstat (limited to 'sys/powerpc/aim')
-rw-r--r-- | sys/powerpc/aim/mmu_oea64.c | 23 |
1 files changed, 14 insertions, 9 deletions
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index 672dde4..6760fee 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -970,10 +970,10 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF); for (i = 0; i < 2; i++) { - moea64_scratchpage_va[i] = virtual_avail; - virtual_avail += PAGE_SIZE; + moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; + virtual_end -= PAGE_SIZE; - moea64_kenter(mmup,moea64_scratchpage_va[i],kernelstart); + moea64_kenter(mmup,moea64_scratchpage_va[i],0); LOCK_TABLE(); moea64_scratchpage_pvo[i] = moea64_pvo_find_va(kernel_pmap, @@ -1004,20 +1004,25 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele * Allocate virtual address space for the message buffer. */ pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); - msgbufp = (struct msgbuf *)msgbuf_phys; - while (pa - msgbuf_phys < MSGBUF_SIZE) { - moea64_kenter(mmup, pa, pa); + msgbufp = (struct msgbuf *)virtual_avail; + va = virtual_avail; + virtual_avail += round_page(MSGBUF_SIZE); + while (va < virtual_avail) { + moea64_kenter(mmup, va, pa); pa += PAGE_SIZE; + va += PAGE_SIZE; } /* * Allocate virtual address space for the dynamic percpu area. */ pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); - dpcpu = (void *)pa; - while (pa - (vm_offset_t)dpcpu < DPCPU_SIZE) { - moea64_kenter(mmup, pa, pa); + dpcpu = (void *)virtual_avail; + virtual_avail += DPCPU_SIZE; + while (va < virtual_avail) { + moea64_kenter(mmup, va, pa); pa += PAGE_SIZE; + va += PAGE_SIZE; } dpcpu_init(dpcpu, 0); } |