diff options
Diffstat (limited to 'sys/powerpc/booke')
-rw-r--r-- | sys/powerpc/booke/locore.S | 68 | ||||
-rw-r--r-- | sys/powerpc/booke/machdep.c | 53 | ||||
-rw-r--r-- | sys/powerpc/booke/platform_bare.c | 62 | ||||
-rw-r--r-- | sys/powerpc/booke/pmap.c | 40 |
4 files changed, 118 insertions, 105 deletions
diff --git a/sys/powerpc/booke/locore.S b/sys/powerpc/booke/locore.S index 3ac4a1a..de7effc 100644 --- a/sys/powerpc/booke/locore.S +++ b/sys/powerpc/booke/locore.S @@ -83,17 +83,18 @@ __start: * locore registers use: * r1 : stack pointer * r2 : trace pointer (AP only, for early diagnostics) - * r3-r27 : scratch registers - * r28 : kernload - * r29 : temp TLB1 entry - * r30 : initial TLB1 entry we started in - * r31 : metadata pointer + * r3-r26 : scratch registers + * r27 : kernload + * r28 : temp TLB1 entry + * r29 : initial TLB1 entry we started in + * r30-r31 : arguments (metadata pointer) */ /* - * Keep metadata ptr in r31 for later use. + * Keep arguments in r30 & r31 for later use. */ - mr %r31, %r3 + mr %r30, %r3 + mr %r31, %r4 /* * Initial cleanup @@ -120,7 +121,7 @@ __start: */ bl 1f 1: mflr %r3 - bl tlb1_find_current /* the entry number found is returned in r30 */ + bl tlb1_find_current /* the entry found is returned in r29 */ bl tlb1_inval_all_but_current /* @@ -140,7 +141,7 @@ __start: /* * Invalidate initial entry */ - mr %r3, %r30 + mr %r3, %r29 bl tlb1_inval_entry /* @@ -148,7 +149,7 @@ __start: */ /* Final kernel mapping, map in 16 MB of RAM */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ - li %r4, 1 /* Entry 1 */ + li %r4, 0 /* Entry 0 */ rlwimi %r3, %r4, 16, 12, 15 mtspr SPR_MAS0, %r3 isync @@ -170,7 +171,7 @@ __start: bl 3f 3: mflr %r4 /* Use current address */ rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */ - mr %r28, %r4 /* Keep kernel load address */ + mr %r27, %r4 /* Keep kernel load address */ ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l mtspr SPR_MAS3, %r4 /* Set RPN and protection */ isync @@ -193,7 +194,7 @@ __start: /* * Invalidate temp mapping */ - mr %r3, %r29 + mr %r3, %r28 bl tlb1_inval_entry /* @@ -201,7 +202,7 @@ __start: */ lis %r3, kernload@ha addi %r3, %r3, kernload@l - stw %r28, 0(%r3) + stw %r27, 0(%r3) #ifdef SMP /* * APs need a separate copy of kernload info within the __boot_page @@ -210,7 +211,7 @@ __start: */ lis %r3, kernload_ap@ha addi %r3, %r3, kernload_ap@l - stw %r28, 0(%r3) + stw %r27, 0(%r3) msync #endif @@ -229,14 +230,11 @@ __start: /* * Set up arguments and jump to system initialization code */ - lis %r3, kernel_text@ha - addi %r3, %r3, kernel_text@l - lis %r4, _end@ha - addi %r4, %r4, _end@l - mr %r5, %r31 /* metadata ptr */ + mr %r3, %r30 + mr %r4, %r31 /* Prepare e500 core */ - bl e500_init + bl booke_init /* Switch to thread0.td_kstack now */ mr %r1, %r3 @@ -290,7 +288,7 @@ kernload_ap: */ bl 2f 2: mflr %r3 - bl tlb1_find_current /* the entry number found is in r30 */ + bl tlb1_find_current /* the entry number found is in r29 */ bl tlb1_inval_all_but_current /* @@ -310,7 +308,7 @@ kernload_ap: /* * Invalidate initial entry */ - mr %r3, %r30 + mr %r3, %r29 bl tlb1_inval_entry /* @@ -318,7 +316,7 @@ kernload_ap: */ /* Final kernel mapping, map in 16 MB of RAM */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ - li %r4, 1 /* Entry 1 */ + li %r4, 0 /* Entry 0 */ rlwimi %r3, %r4, 16, 4, 15 mtspr SPR_MAS0, %r3 isync @@ -373,7 +371,7 @@ kernload_ap: /* * Invalidate temp mapping */ - mr %r3, %r29 + mr %r3, %r28 bl tlb1_inval_entry /* @@ -425,7 +423,7 @@ tlb_inval_all: blr /* - * expects address to look up in r3, returns entry number in r30 + * expects address to look up in r3, returns entry number in r29 * * FIXME: the hidden assumption is we are now running in AS=0, but we should * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS] @@ -437,7 +435,7 @@ tlb1_find_current: isync tlbsx 0, %r3 mfspr %r17, SPR_MAS0 - rlwinm %r30, %r17, 16, 20, 31 /* MAS0[ESEL] -> r30 */ + rlwinm %r29, %r17, 16, 20, 31 /* MAS0[ESEL] -> r29 */ /* Make sure we have IPROT set on the entry */ mfspr %r17, SPR_MAS1 @@ -470,14 +468,14 @@ tlb1_inval_entry: blr /* - * r30 current entry number - * r29 returned temp entry + * r29 current entry number + * r28 returned temp entry * r3-r5 scratched */ tlb1_temp_mapping_as1: /* Read our current translation */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ - rlwimi %r3, %r30, 16, 12, 15 /* Select our current entry */ + rlwimi %r3, %r29, 16, 12, 15 /* Select our current entry */ mtspr SPR_MAS0, %r3 isync tlbre @@ -489,12 +487,8 @@ tlb1_temp_mapping_as1: * entry is the last in TLB1 */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ - addi %r29, %r30, 1 /* Use next entry. */ - li %r4, 1 - cmpw %r4, %r29 - bne 1f - addi %r29, %r29, 1 -1: rlwimi %r3, %r29, 16, 12, 15 /* Select temp entry */ + addi %r28, %r29, 1 /* Use next entry. */ + rlwimi %r3, %r28, 16, 12, 15 /* Select temp entry */ mtspr SPR_MAS0, %r3 isync mfspr %r5, SPR_MAS1 @@ -514,7 +508,7 @@ tlb1_temp_mapping_as1: * Loops over TLB1, invalidates all entries skipping the one which currently * maps this code. * - * r30 current entry + * r29 current entry * r3-r5 scratched */ tlb1_inval_all_but_current: @@ -528,7 +522,7 @@ tlb1_inval_all_but_current: isync tlbre mfspr %r5, SPR_MAS1 - cmpw %r4, %r30 /* our current entry? */ + cmpw %r4, %r29 /* our current entry? */ beq 2f rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */ mtspr SPR_MAS1, %r5 diff --git a/sys/powerpc/booke/machdep.c b/sys/powerpc/booke/machdep.c index f2dbacf..c2b5e6f 100644 --- a/sys/powerpc/booke/machdep.c +++ b/sys/powerpc/booke/machdep.c @@ -190,7 +190,7 @@ SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_e500_startup, NULL); void print_kernel_section_addr(void); void print_kenv(void); -u_int e500_init(u_int32_t, u_int32_t, void *); +u_int booke_init(uint32_t, uint32_t); static void cpu_e500_startup(void *dummy) @@ -276,19 +276,41 @@ print_kernel_section_addr(void) } u_int -e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp) +booke_init(uint32_t arg1, uint32_t arg2) { struct pcpu *pc; - void *kmdp; + void *kmdp, *mdp; vm_offset_t dtbp, end; uint32_t csr; kmdp = NULL; - end = endkernel; + end = (uintptr_t)_end; dtbp = (vm_offset_t)NULL; /* + * Handle the various ways we can get loaded and started: + * - FreeBSD's loader passes the pointer to the metadata + * in arg1, with arg2 undefined. arg1 has a value that's + * relative to the kernel's link address (i.e. larger + * than 0xc0000000). + * - Juniper's loader passes the metadata pointer in arg2 + * and sets arg1 to zero. This is to signal that the + * loader maps the kernel and starts it at its link + * address (unlike the FreeBSD loader). + * - U-Boot passes the standard argc and argv parameters + * in arg1 and arg2 (resp). arg1 is between 1 and some + * relatively small number, such as 64K. arg2 is the + * physical address of the argv vector. + */ + if (arg1 > (uintptr_t)kernel_text) /* FreeBSD loader */ + mdp = (void *)arg1; + else if (arg1 == 0) /* Juniper loader */ + mdp = (void *)arg2; + else /* U-Boot */ + mdp = NULL; + + /* * Parse metadata and fetch parameters. */ if (mdp != NULL) { @@ -309,17 +331,8 @@ e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp) #endif } } else { - /* - * We should scream but how? Cannot even output anything... - */ - - /* - * FIXME add return value and handle in the locore so we can - * return to the loader maybe? (this seems not very easy to - * restore everything as the TLB have all been reprogrammed - * in the locore etc...) - */ - while (1); + bzero(__sbss_start, __sbss_end - __sbss_start); + bzero(__bss_start, _end - __bss_start); } #if defined(FDT_DTB_STATIC) @@ -368,9 +381,7 @@ e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp) cninit(); /* Print out some debug info... */ - debugf("e500_init: console initialized\n"); - debugf(" arg1 startkernel = 0x%08x\n", startkernel); - debugf(" arg2 endkernel = 0x%08x\n", endkernel); + debugf("%s: console initialized\n", __func__); debugf(" arg3 mdp = 0x%08x\n", (u_int32_t)mdp); debugf(" end = 0x%08x\n", (u_int32_t)end); debugf(" boothowto = 0x%08x\n", boothowto); @@ -403,7 +414,7 @@ e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp) /* Initialise virtual memory. */ pmap_mmu_install(MMU_TYPE_BOOKE, 0); - pmap_bootstrap(startkernel, end); + pmap_bootstrap((uintptr_t)kernel_text, end); debugf("MSR = 0x%08x\n", mfmsr()); //tlb1_print_entries(); //tlb1_print_tlbentries(); @@ -449,8 +460,8 @@ e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp) printf("L1 I-cache %sabled\n", (csr & L1CSR1_ICE) ? "en" : "dis"); - debugf("e500_init: SP = 0x%08x\n", ((uintptr_t)thread0.td_pcb - 16) & ~15); - debugf("e500_init: e\n"); + debugf("%s: SP = 0x%08x\n", __func__, + ((uintptr_t)thread0.td_pcb - 16) & ~15); return (((uintptr_t)thread0.td_pcb - 16) & ~15); } diff --git a/sys/powerpc/booke/platform_bare.c b/sys/powerpc/booke/platform_bare.c index 8e03bd3..d76664e 100644 --- a/sys/powerpc/booke/platform_bare.c +++ b/sys/powerpc/booke/platform_bare.c @@ -104,10 +104,22 @@ bare_probe(platform_t plat) int i, law_max, tgt; ver = SVR_VER(mfspr(SPR_SVR)); - if (ver == SVR_MPC8572E || ver == SVR_MPC8572) + switch (ver & ~0x0008) { /* Mask Security Enabled bit */ + case SVR_P4080: + maxcpu = 8; + break; + case SVR_P4040: + maxcpu = 4; + break; + case SVR_MPC8572: + case SVR_P1020: + case SVR_P2020: maxcpu = 2; - else + break; + default: maxcpu = 1; + break; + } /* * Clear local access windows. Skip DRAM entries, so we don't shoot @@ -166,8 +178,11 @@ bare_timebase_freq(platform_t plat, struct cpuref *cpuref) phandle_t cpus, child; pcell_t freq; - /* Backward compatibility. See 8-STABLE. */ - ticks = bootinfo[3] >> 3; + if (bootinfo != NULL) { + /* Backward compatibility. See 8-STABLE. */ + ticks = bootinfo[3] >> 3; + } else + ticks = 0; if ((cpus = OF_finddevice("/cpus")) == 0) goto out; @@ -241,7 +256,7 @@ bare_smp_start_cpu(platform_t plat, struct pcpu *pc) int timeout; eebpcr = ccsr_read4(OCP85XX_EEBPCR); - if ((eebpcr & (pc->pc_cpumask << 24)) != 0) { + if ((eebpcr & (1 << (pc->pc_cpuid + 24))) != 0) { printf("%s: CPU=%d already out of hold-off state!\n", __func__, pc->pc_cpuid); return (ENXIO); @@ -259,7 +274,7 @@ bare_smp_start_cpu(platform_t plat, struct pcpu *pc) /* * Release AP from hold-off state */ - eebpcr |= (pc->pc_cpumask << 24); + eebpcr |= (1 << (pc->pc_cpuid + 24)); ccsr_write4(OCP85XX_EEBPCR, eebpcr); __asm __volatile("isync; msync"); @@ -277,24 +292,23 @@ bare_smp_start_cpu(platform_t plat, struct pcpu *pc) static void e500_reset(platform_t plat) { - uint32_t ver = SVR_VER(mfspr(SPR_SVR)); - - if (ver == SVR_MPC8572E || ver == SVR_MPC8572 || - ver == SVR_MPC8548E || ver == SVR_MPC8548) - /* Systems with dedicated reset register */ - ccsr_write4(OCP85XX_RSTCR, 2); - else { - /* Clear DBCR0, disables debug interrupts and events. */ - mtspr(SPR_DBCR0, 0); - __asm __volatile("isync"); - - /* Enable Debug Interrupts in MSR. */ - mtmsr(mfmsr() | PSL_DE); - - /* Enable debug interrupts and issue reset. */ - mtspr(SPR_DBCR0, mfspr(SPR_DBCR0) | DBCR0_IDM | - DBCR0_RST_SYSTEM); - } + + /* + * Try the dedicated reset register first. + * If the SoC doesn't have one, we'll fall + * back to using the debug control register. + */ + ccsr_write4(OCP85XX_RSTCR, 2); + + /* Clear DBCR0, disables debug interrupts and events. */ + mtspr(SPR_DBCR0, 0); + __asm __volatile("isync"); + + /* Enable Debug Interrupts in MSR. */ + mtmsr(mfmsr() | PSL_DE); + + /* Enable debug interrupts and issue reset. */ + mtspr(SPR_DBCR0, mfspr(SPR_DBCR0) | DBCR0_IDM | DBCR0_RST_SYSTEM); printf("Reset failed...\n"); while (1); diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 2fffa3f..e1cd071 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$"); #include <sys/msgbuf.h> #include <sys/lock.h> #include <sys/mutex.h> +#include <sys/sched.h> #include <sys/smp.h> #include <sys/vmmeter.h> @@ -91,9 +92,6 @@ __FBSDID("$FreeBSD$"); #include "mmu_if.h" -#define DEBUG -#undef DEBUG - #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) #else @@ -393,7 +391,7 @@ tlb_miss_lock(void) if (!smp_started) return; - SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { + STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc != pcpup) { CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " @@ -419,7 +417,7 @@ tlb_miss_unlock(void) if (!smp_started) return; - SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { + STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc != pcpup) { CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", __func__, pc->pc_cpuid); @@ -946,7 +944,7 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) /**************************************************************************/ /* - * This is called during e500_init, before the system is really initialized. + * This is called during booke_init, before the system is really initialized. */ static void mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) @@ -1228,7 +1226,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) PTE_VALID; } /* Mark kernel_pmap active on all CPUs */ - kernel_pmap->pm_active = ~0; + CPU_FILL(&kernel_pmap->pm_active); /*******************************************************/ /* Final setup */ @@ -1483,7 +1481,7 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap) PMAP_LOCK_INIT(pmap); for (i = 0; i < MAXCPU; i++) pmap->pm_tid[i] = TID_NONE; - pmap->pm_active = 0; + CPU_ZERO(&kernel_pmap->pm_active); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); TAILQ_INIT(&pmap->pm_ptbl_list); @@ -1838,7 +1836,7 @@ mmu_booke_activate(mmu_t mmu, struct thread *td) mtx_lock_spin(&sched_lock); - atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); + CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask)); PCPU_SET(curpmap, pmap); if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) @@ -1867,7 +1865,9 @@ mmu_booke_deactivate(mmu_t mmu, struct thread *td) CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); - atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); + sched_pin(); + CPU_NAND_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask)); + sched_unpin(); PCPU_SET(curpmap, NULL); } @@ -3019,24 +3019,18 @@ tlb1_init(vm_offset_t ccsrbar) { uint32_t mas0; - /* TLB1[1] is used to map the kernel. Save that entry. */ - mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); + /* TLB1[0] is used to map the kernel. Save that entry. */ + mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0); mtspr(SPR_MAS0, mas0); __asm __volatile("isync; tlbre"); - tlb1[1].mas1 = mfspr(SPR_MAS1); - tlb1[1].mas2 = mfspr(SPR_MAS2); - tlb1[1].mas3 = mfspr(SPR_MAS3); + tlb1[0].mas1 = mfspr(SPR_MAS1); + tlb1[0].mas2 = mfspr(SPR_MAS2); + tlb1[0].mas3 = mfspr(SPR_MAS3); - /* Map in CCSRBAR in TLB1[0] */ - tlb1_idx = 0; + /* Map in CCSRBAR in TLB1[1] */ + tlb1_idx = 1; tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); - /* - * Set the next available TLB1 entry index. Note TLB[1] is reserved - * for initial mapping of kernel text+data, which was set early in - * locore, we need to skip this [busy] entry. - */ - tlb1_idx = 2; /* Setup TLB miss defaults */ set_mas4_defaults(); |