summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/powerpc/booke/locore.S18
-rw-r--r--sys/powerpc/booke/platform_bare.c20
-rw-r--r--sys/powerpc/booke/pmap.c20
3 files changed, 39 insertions, 19 deletions
diff --git a/sys/powerpc/booke/locore.S b/sys/powerpc/booke/locore.S
index 293eec3..0255677 100644
--- a/sys/powerpc/booke/locore.S
+++ b/sys/powerpc/booke/locore.S
@@ -242,14 +242,20 @@ done_mapping:
__boot_page:
bl 1f
- .globl kernload_ap
-kernload_ap:
+ .globl bp_trace
+bp_trace:
+ .long 0
+
+ .globl bp_kernload
+bp_kernload:
.long 0
/*
* Initial configuration
*/
1:
+ mflr %r31 /* r31 hold the address of bp_trace */
+
/* Set HIDs */
lis %r3, HID0_E500_DEFAULT_SET@h
ori %r3, %r3, HID0_E500_DEFAULT_SET@l
@@ -318,15 +324,15 @@ kernload_ap:
mtspr SPR_MAS2, %r3
isync
- /* Retrieve kernel load [physical] address from kernload_ap */
+ /* Retrieve kernel load [physical] address from bp_kernload */
bl 4f
4: mflr %r3
rlwinm %r3, %r3, 0, 0, 19
- lis %r4, kernload_ap@h
- ori %r4, %r4, kernload_ap@l
+ lis %r4, bp_kernload@h
+ ori %r4, %r4, bp_kernload@l
lis %r5, __boot_page@h
ori %r5, %r5, __boot_page@l
- sub %r4, %r4, %r5 /* offset of kernload_ap within __boot_page */
+ sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */
lwzx %r3, %r4, %r3
/* Set RPN and protection */
diff --git a/sys/powerpc/booke/platform_bare.c b/sys/powerpc/booke/platform_bare.c
index f04bf96..58fbb7f 100644
--- a/sys/powerpc/booke/platform_bare.c
+++ b/sys/powerpc/booke/platform_bare.c
@@ -56,7 +56,8 @@ __FBSDID("$FreeBSD$");
#ifdef SMP
extern void *ap_pcpu;
extern uint8_t __boot_page[]; /* Boot page body */
-extern uint32_t kernload_ap; /* Kernel physical load address */
+extern uint32_t bp_kernload; /* Kernel physical load address */
+extern uint32_t bp_trace; /* AP boot trace field */
#endif
extern uint32_t *bootinfo;
@@ -262,8 +263,8 @@ bare_smp_start_cpu(platform_t plat, struct pcpu *pc)
eebpcr = ccsr_read4(OCP85XX_EEBPCR);
if ((eebpcr & (1 << (pc->pc_cpuid + 24))) != 0) {
- printf("%s: CPU=%d already out of hold-off state!\n",
- __func__, pc->pc_cpuid);
+ printf("SMP: CPU %d already out of hold-off state!\n",
+ pc->pc_cpuid);
return (ENXIO);
}
@@ -273,12 +274,13 @@ bare_smp_start_cpu(platform_t plat, struct pcpu *pc)
/*
* Set BPTR to the physical address of the boot page
*/
- bptr = ((uint32_t)__boot_page - KERNBASE) + kernload_ap;
+ bptr = ((uint32_t)__boot_page - KERNBASE) + bp_kernload;
ccsr_write4(OCP85XX_BPTR, (bptr >> 12) | 0x80000000);
/*
* Release AP from hold-off state
*/
+ bp_trace = 0;
eebpcr |= (1 << (pc->pc_cpuid + 24));
ccsr_write4(OCP85XX_EEBPCR, eebpcr);
__asm __volatile("isync; msync");
@@ -287,6 +289,16 @@ bare_smp_start_cpu(platform_t plat, struct pcpu *pc)
while (!pc->pc_awake && timeout--)
DELAY(1000); /* wait 1ms */
+ /*
+ * Disable boot page translation so that the 4K page at the default
+ * address (= 0xfffff000) isn't permanently remapped and thus not
+ * usable otherwise.
+ */
+ ccsr_write4(OCP85XX_BPTR, 0);
+
+ if (!pc->pc_awake)
+ printf("SMP: CPU %d didn't wake up (trace code %#x).\n",
+ pc->pc_awake, bp_trace);
return ((pc->pc_awake) ? 0 : EBUSY);
#else
/* No SMP support */
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 712967f..ca14f46 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -115,7 +115,7 @@ extern unsigned char _end[];
extern uint32_t *bootinfo;
#ifdef SMP
-extern uint32_t kernload_ap;
+extern uint32_t bp_kernload;
#endif
vm_paddr_t kernload;
@@ -967,10 +967,9 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
debugf("mmu_booke_bootstrap: entered\n");
#ifdef SMP
- kernload_ap = kernload;
+ bp_kernload = kernload;
#endif
-
/* Initialize invalidation mutex */
mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
@@ -981,8 +980,13 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
* Align kernel start and end address (kernel image).
* Note that kernel end does not necessarily relate to kernsize.
* kernsize is the size of the kernel that is actually mapped.
+ * Also note that "start - 1" is deliberate. With SMP, the
+ * entry point is exactly a page from the actual load address.
+ * As such, trunc_page() has no effect and we're off by a page.
+ * Since we always have the ELF header between the load address
+ * and the entry point, we can safely subtract 1 to compensate.
*/
- kernstart = trunc_page(start);
+ kernstart = trunc_page(start - 1);
data_start = round_page(kernelend);
data_end = data_start;
@@ -1233,9 +1237,9 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
* entries, but for pte_vatopa() to work correctly with kernel area
* addresses.
*/
- for (va = KERNBASE; va < data_end; va += PAGE_SIZE) {
+ for (va = kernstart; va < data_end; va += PAGE_SIZE) {
pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
- pte->rpn = kernload + (va - KERNBASE);
+ pte->rpn = kernload + (va - kernstart);
pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
PTE_VALID;
}
@@ -1397,9 +1401,7 @@ mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
(va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
- flags = 0;
- flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
- flags |= PTE_M;
+ flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
OpenPOWER on IntegriCloud