summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/booke/pmap.c
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2012-05-24 20:58:40 +0000
committermarcel <marcel@FreeBSD.org>2012-05-24 20:58:40 +0000
commiteeea3251f852bcd5a50fa4aa00040c779cb9da1a (patch)
tree0885bdc84c81e44c216990f45ef1c393a5875755 /sys/powerpc/booke/pmap.c
parentc933e51f6c2b709f1a98f25c0f49570cbad0f12f (diff)
downloadFreeBSD-src-eeea3251f852bcd5a50fa4aa00040c779cb9da1a.zip
FreeBSD-src-eeea3251f852bcd5a50fa4aa00040c779cb9da1a.tar.gz
o Rename kernload_ap to bp_kernelload. This to introduce a common prefix
for variables that live in the boot page. o Add bp_trace (yes, it's in the boot page) that gets zeroed before we try to wake a core and to which the core being woken can write markers so that we know where the core was in case it doesn't wake up. The boot code does not yet write markers (too follow). o Disable the boot page translation to allow the last 4K page to be used for whatever we please. It would get mapped otherwise. o Fix kernstart in the case of SMP. The start argument is typically page aligned due to the alignment requirements that come with having a boot page. The point of using trunc_page is that we get the actual load address given that the entry point is immediately following the ELF headers. In the SMP case this ended up exactly 4K after the load address. Hence subtracting 1 from start.
Diffstat (limited to 'sys/powerpc/booke/pmap.c')
-rw-r--r--sys/powerpc/booke/pmap.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 712967f..ca14f46 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -115,7 +115,7 @@ extern unsigned char _end[];
extern uint32_t *bootinfo;
#ifdef SMP
-extern uint32_t kernload_ap;
+extern uint32_t bp_kernload;
#endif
vm_paddr_t kernload;
@@ -967,10 +967,9 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
debugf("mmu_booke_bootstrap: entered\n");
#ifdef SMP
- kernload_ap = kernload;
+ bp_kernload = kernload;
#endif
-
/* Initialize invalidation mutex */
mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
@@ -981,8 +980,13 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
* Align kernel start and end address (kernel image).
* Note that kernel end does not necessarily relate to kernsize.
* kernsize is the size of the kernel that is actually mapped.
+ * Also note that "start - 1" is deliberate. With SMP, the
+ * entry point is exactly a page from the actual load address.
+ * As such, trunc_page() has no effect and we're off by a page.
+ * Since we always have the ELF header between the load address
+ * and the entry point, we can safely subtract 1 to compensate.
*/
- kernstart = trunc_page(start);
+ kernstart = trunc_page(start - 1);
data_start = round_page(kernelend);
data_end = data_start;
@@ -1233,9 +1237,9 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
* entries, but for pte_vatopa() to work correctly with kernel area
* addresses.
*/
- for (va = KERNBASE; va < data_end; va += PAGE_SIZE) {
+ for (va = kernstart; va < data_end; va += PAGE_SIZE) {
pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
- pte->rpn = kernload + (va - KERNBASE);
+ pte->rpn = kernload + (va - kernstart);
pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
PTE_VALID;
}
@@ -1397,9 +1401,7 @@ mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
(va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
- flags = 0;
- flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
- flags |= PTE_M;
+ flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
OpenPOWER on IntegriCloud