summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2012-11-03 22:02:12 +0000
committermarcel <marcel@FreeBSD.org>2012-11-03 22:02:12 +0000
commit35b3593ff24bff29174560ecf0052522ba298d96 (patch)
tree05f3b3c36172a5e5f25a6d4e3c6e41fc8b4baa4a /sys/powerpc
parent85823a3b2e747b0343cd12c3f6750c0f809cff56 (diff)
downloadFreeBSD-src-35b3593ff24bff29174560ecf0052522ba298d96.zip
FreeBSD-src-35b3593ff24bff29174560ecf0052522ba298d96.tar.gz
1. Have the APs initialize the TLB1 entries from what has been
programmed on the BSP during (early) boot. This makes sure that the APs get configured the same as the BSP, irrspective of how FreeBSD was loaded. 2. Make sure to flush the dcache after writing the TLB1 entries to the boot page. The APs aren't part of the coherency domain just yet. 3. Set pmap_bootstrapped after calling pmap_bootstrap(). The FDT code now maps the devices (like OF), and this resulted in a panic. 4. Since we pre-wire the CCSR, make sure not to map chunks of it in pmap_mapdev().
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/booke/locore.S79
-rw-r--r--sys/powerpc/booke/machdep.c1
-rw-r--r--sys/powerpc/booke/platform_bare.c40
-rw-r--r--sys/powerpc/booke/pmap.c27
4 files changed, 89 insertions, 58 deletions
diff --git a/sys/powerpc/booke/locore.S b/sys/powerpc/booke/locore.S
index 16a9196..631796d 100644
--- a/sys/powerpc/booke/locore.S
+++ b/sys/powerpc/booke/locore.S
@@ -126,9 +126,11 @@ __start:
bl tlb1_find_current /* the entry found is returned in r29 */
bl tlb1_inval_all_but_current
+
/*
* Create temporary mapping in AS=1 and switch to it
*/
+ addi %r3, %r29, 1
bl tlb1_temp_mapping_as1
mfmsr %r3
@@ -242,19 +244,21 @@ done_mapping:
__boot_page:
bl 1f
- .globl bp_trace
-bp_trace:
+ .globl bp_ntlb1s
+bp_ntlb1s:
.long 0
- .globl bp_kernload
-bp_kernload:
- .long 0
+ .globl bp_tlb1
+bp_tlb1:
+ .space 4 * 3 * 16
+
+ .globl bp_tlb1_end
+bp_tlb1_end:
/*
* Initial configuration
*/
-1:
- mflr %r31 /* r31 hold the address of bp_trace */
+1: mflr %r31 /* r31 hold the address of bp_ntlb1s */
/* Set HIDs */
lis %r3, HID0_E500_DEFAULT_SET@h
@@ -283,9 +287,11 @@ bp_kernload:
bl tlb1_find_current /* the entry number found is in r29 */
bl tlb1_inval_all_but_current
+
/*
* Create temporary translation in AS=1 and switch to it
*/
+ lwz %r3, 0(%r31)
bl tlb1_temp_mapping_as1
mfmsr %r3
@@ -306,44 +312,34 @@ bp_kernload:
/*
* Setup final mapping in TLB1[1] and switch to it
*/
- /* Final kernel mapping, map in 16 MB of RAM */
- lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
- li %r4, 0 /* Entry 0 */
- rlwimi %r3, %r4, 16, 4, 15
+ lwz %r6, 0(%r31)
+ addi %r5, %r31, 4
+ li %r4, 0
+
+4: lis %r3, MAS0_TLBSEL1@h
+ rlwimi %r3, %r4, 16, 12, 15
mtspr SPR_MAS0, %r3
isync
-
- li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
- oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
- mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
+ lwz %r3, 0(%r5)
+ mtspr SPR_MAS1, %r3
isync
-
- lis %r3, KERNBASE@h
- ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
- ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
+ lwz %r3, 4(%r5)
mtspr SPR_MAS2, %r3
isync
-
- /* Retrieve kernel load [physical] address from bp_kernload */
- bl 4f
-4: mflr %r3
- rlwinm %r3, %r3, 0, 0, 19
- lis %r4, bp_kernload@h
- ori %r4, %r4, bp_kernload@l
- lis %r5, __boot_page@h
- ori %r5, %r5, __boot_page@l
- sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */
- lwzx %r3, %r4, %r3
-
- /* Set RPN and protection */
- ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
+ lwz %r3, 8(%r5)
mtspr SPR_MAS3, %r3
isync
tlbwe
isync
msync
+ addi %r5, %r5, 12
+ addi %r4, %r4, 1
+ cmpw %r4, %r6
+ blt 4b
/* Switch to the final mapping */
+ lis %r5, __boot_page@ha
+ ori %r5, %r5, __boot_page@l
bl 5f
5: mflr %r3
rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
@@ -460,11 +456,14 @@ tlb1_inval_entry:
blr
/*
- * r29 current entry number
- * r28 returned temp entry
- * r3-r5 scratched
+ * r3 entry of temp translation
+ * r29 entry of current translation
+ * r28 returns temp entry passed in r3
+ * r4-r5 scratched
*/
tlb1_temp_mapping_as1:
+ mr %r28, %r3
+
/* Read our current translation */
lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
rlwimi %r3, %r29, 16, 12, 15 /* Select our current entry */
@@ -472,14 +471,8 @@ tlb1_temp_mapping_as1:
isync
tlbre
- /*
- * Prepare and write temp entry
- *
- * FIXME this is not robust against overflow i.e. when the current
- * entry is the last in TLB1
- */
+ /* Prepare and write temp entry */
lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
- addi %r28, %r29, 1 /* Use next entry. */
rlwimi %r3, %r28, 16, 12, 15 /* Select temp entry */
mtspr SPR_MAS0, %r3
isync
diff --git a/sys/powerpc/booke/machdep.c b/sys/powerpc/booke/machdep.c
index ad49831..1522f9f 100644
--- a/sys/powerpc/booke/machdep.c
+++ b/sys/powerpc/booke/machdep.c
@@ -413,6 +413,7 @@ booke_init(uint32_t arg1, uint32_t arg2)
/* Initialise virtual memory. */
pmap_mmu_install(MMU_TYPE_BOOKE, 0);
pmap_bootstrap((uintptr_t)kernel_text, end);
+ pmap_bootstrapped = 1;
debugf("MSR = 0x%08x\n", mfmsr());
#if defined(BOOKE_E500)
//tlb1_print_entries();
diff --git a/sys/powerpc/booke/platform_bare.c b/sys/powerpc/booke/platform_bare.c
index ebc5e11..c5739f7 100644
--- a/sys/powerpc/booke/platform_bare.c
+++ b/sys/powerpc/booke/platform_bare.c
@@ -55,9 +55,11 @@ __FBSDID("$FreeBSD$");
#ifdef SMP
extern void *ap_pcpu;
+extern vm_paddr_t kernload; /* Kernel physical load address */
extern uint8_t __boot_page[]; /* Boot page body */
-extern uint32_t bp_kernload; /* Kernel physical load address */
-extern uint32_t bp_trace; /* AP boot trace field */
+extern uint32_t bp_ntlb1s;
+extern uint32_t bp_tlb1[];
+extern uint32_t bp_tlb1_end[];
#endif
extern uint32_t *bootinfo;
@@ -248,8 +250,9 @@ static int
bare_smp_start_cpu(platform_t plat, struct pcpu *pc)
{
#ifdef SMP
+ uint32_t *tlb1;
uint32_t bptr, eebpcr;
- int timeout;
+ int i, timeout;
eebpcr = ccsr_read4(OCP85XX_EEBPCR);
if ((eebpcr & (1 << (pc->pc_cpuid + 24))) != 0) {
@@ -259,18 +262,37 @@ bare_smp_start_cpu(platform_t plat, struct pcpu *pc)
}
ap_pcpu = pc;
- __asm __volatile("msync; isync");
+
+ i = 0;
+ tlb1 = bp_tlb1;
+ while (i < bp_ntlb1s && tlb1 < bp_tlb1_end) {
+ mtspr(SPR_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(i));
+ __asm __volatile("isync; tlbre");
+ tlb1[0] = mfspr(SPR_MAS1);
+ tlb1[1] = mfspr(SPR_MAS2);
+ tlb1[2] = mfspr(SPR_MAS3);
+ i++;
+ tlb1 += 3;
+ }
+ if (i < bp_ntlb1s)
+ bp_ntlb1s = i;
/*
* Set BPTR to the physical address of the boot page
*/
- bptr = ((uint32_t)__boot_page - KERNBASE) + bp_kernload;
- ccsr_write4(OCP85XX_BPTR, (bptr >> 12) | 0x80000000);
+ bptr = ((uint32_t)__boot_page - KERNBASE) + kernload;
+ KASSERT((bptr & 0xfff) == 0,
+ ("%s: boot page is not aligned (%#x)", __func__, bptr));
+ bptr = (bptr >> 12) | 0x80000000u;
+ ccsr_write4(OCP85XX_BPTR, bptr);
+ __asm __volatile("isync; msync");
+
+ /* Flush caches to have our changes hit DRAM. */
+ cpu_flush_dcache(__boot_page, 4096);
/*
* Release AP from hold-off state
*/
- bp_trace = 0;
eebpcr |= (1 << (pc->pc_cpuid + 24));
ccsr_write4(OCP85XX_EEBPCR, eebpcr);
__asm __volatile("isync; msync");
@@ -285,10 +307,10 @@ bare_smp_start_cpu(platform_t plat, struct pcpu *pc)
* usable otherwise.
*/
ccsr_write4(OCP85XX_BPTR, 0);
+ __asm __volatile("isync; msync");
if (!pc->pc_awake)
- printf("SMP: CPU %d didn't wake up (trace code %#x).\n",
- pc->pc_awake, bp_trace);
+ printf("SMP: CPU %d didn't wake up.\n", pc->pc_cpuid);
return ((pc->pc_awake) ? 0 : EBUSY);
#else
/* No SMP support */
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 060a7f2..3931904 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -111,9 +111,10 @@ extern unsigned char _end[];
extern uint32_t *bootinfo;
#ifdef SMP
-extern uint32_t bp_kernload;
+extern uint32_t bp_ntlb1s;
#endif
+vm_paddr_t ccsrbar_pa;
vm_paddr_t kernload;
vm_offset_t kernstart;
vm_size_t kernsize;
@@ -962,10 +963,6 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
debugf("mmu_booke_bootstrap: entered\n");
-#ifdef SMP
- bp_kernload = kernload;
-#endif
-
/* Initialize invalidation mutex */
mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
@@ -1279,7 +1276,7 @@ pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
* have the snapshot of its contents in the s/w tlb1[] table, so use
* these values directly to (re)program AP's TLB1 hardware.
*/
- for (i = 0; i < tlb1_idx; i ++) {
+ for (i = bp_ntlb1s; i < tlb1_idx; i++) {
/* Skip invalid entries */
if (!(tlb1[i].mas1 & MAS1_VALID))
continue;
@@ -2601,6 +2598,18 @@ mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
uintptr_t va;
vm_size_t sz;
+ /*
+ * CCSR is premapped. Note that (pa + size - 1) is there to make sure
+ * we don't wrap around. Devices on the local bus typically extend all
+ * the way up to and including 0xffffffff. In that case (pa + size)
+ * would be 0. This creates a false positive (i.e. we think it's
+ * within the CCSR) and not create a mapping.
+ */
+ if (pa >= ccsrbar_pa && (pa + size - 1) < (ccsrbar_pa + CCSRBAR_SIZE)) {
+ va = CCSRBAR_VA + (pa - ccsrbar_pa);
+ return ((void *)va);
+ }
+
va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
res = (void *)va;
@@ -3011,6 +3020,8 @@ tlb1_init(vm_offset_t ccsrbar)
uint32_t tsz;
u_int i;
+ ccsrbar_pa = ccsrbar;
+
if (bootinfo != NULL && bootinfo[0] != 1) {
tlb1_idx = *((uint16_t *)(bootinfo + 8));
} else
@@ -3042,6 +3053,10 @@ tlb1_init(vm_offset_t ccsrbar)
/* Map in CCSRBAR. */
tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
+#ifdef SMP
+ bp_ntlb1s = tlb1_idx;
+#endif
+
/* Purge the remaining entries */
for (i = tlb1_idx; i < TLB1_ENTRIES; i++)
tlb1_write_entry(i);
OpenPOWER on IntegriCloud