summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2010-02-09 06:24:43 +0000
committerneel <neel@FreeBSD.org>2010-02-09 06:24:43 +0000
commit91212ae23c60e92beb6a2bf31304fe0f44d84cee (patch)
tree6b8fc2551b36316f7e2c7437b3ef3de98dbabfb2
parent764ce56acec6e1b162434305a36821de3b6e3ded (diff)
downloadFreeBSD-src-91212ae23c60e92beb6a2bf31304fe0f44d84cee.zip
FreeBSD-src-91212ae23c60e92beb6a2bf31304fe0f44d84cee.tar.gz
SMP support for the mips port.
The platform that supports SMP currently is a SWARM with a dual-core Sibyte processor. The kernel config file to use is SWARM_SMP. Reviewed by: imp, rrs
-rw-r--r--sys/conf/files.mips1
-rw-r--r--sys/mips/conf/SWARM_SMP7
-rw-r--r--sys/mips/include/asm.h9
-rw-r--r--sys/mips/include/cpu.h2
-rw-r--r--sys/mips/include/hwfunc.h47
-rw-r--r--sys/mips/include/intr_machdep.h10
-rw-r--r--sys/mips/include/pcpu.h7
-rw-r--r--sys/mips/include/smp.h9
-rw-r--r--sys/mips/mips/intr_machdep.c38
-rw-r--r--sys/mips/mips/machdep.c31
-rw-r--r--sys/mips/mips/mp_machdep.c365
-rw-r--r--sys/mips/mips/mpboot.S72
-rw-r--r--sys/mips/mips/pmap.c9
-rw-r--r--sys/mips/sibyte/sb_asm.S18
-rw-r--r--sys/mips/sibyte/sb_machdep.c75
-rw-r--r--sys/mips/sibyte/sb_scd.c42
-rw-r--r--sys/mips/sibyte/sb_scd.h6
-rw-r--r--sys/mips/sibyte/sb_zbbus.c78
18 files changed, 604 insertions, 222 deletions
diff --git a/sys/conf/files.mips b/sys/conf/files.mips
index 89da2a7..6b2ece4 100644
--- a/sys/conf/files.mips
+++ b/sys/conf/files.mips
@@ -21,6 +21,7 @@
# ----------------------------------------------------------------------
mips/mips/machdep.c standard
mips/mips/mp_machdep.c optional smp
+mips/mips/mpboot.S optional smp
mips/mips/psraccess.S standard
# ----------------------------------------------------------------------
# Phase 3
diff --git a/sys/mips/conf/SWARM_SMP b/sys/mips/conf/SWARM_SMP
new file mode 100644
index 0000000..ec76ce4
--- /dev/null
+++ b/sys/mips/conf/SWARM_SMP
@@ -0,0 +1,7 @@
+#
+# $FreeBSD$
+#
+options SMP
+options PRINTF_BUFR_SIZE=128
+
+include SWARM
diff --git a/sys/mips/include/asm.h b/sys/mips/include/asm.h
index db6929d..3dd9aa4 100644
--- a/sys/mips/include/asm.h
+++ b/sys/mips/include/asm.h
@@ -497,17 +497,8 @@ _C_LABEL(x):
#define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)
#endif
-#ifdef SMP
- /*
- * FREEBSD_DEVELOPERS_FIXME
- * In multiprocessor case, store/retrieve the pcpu structure
- * address for current CPU in scratch register for fast access.
- */
-#error "Write GET_CPU_PCPU for SMP"
-#else
#define GET_CPU_PCPU(reg) \
lw reg, _C_LABEL(pcpup);
-#endif
/*
* Description of the setjmp buffer
diff --git a/sys/mips/include/cpu.h b/sys/mips/include/cpu.h
index c802bee..5a1cb9d 100644
--- a/sys/mips/include/cpu.h
+++ b/sys/mips/include/cpu.h
@@ -122,6 +122,8 @@
#define SOFT_INT_MASK (SOFT_INT_MASK_0 | SOFT_INT_MASK_1)
#define HW_INT_MASK (ALL_INT_MASK & ~SOFT_INT_MASK)
+#define soft_int_mask(softintr) (1 << ((softintr) + 8))
+#define hard_int_mask(hardintr) (1 << ((hardintr) + 10))
/*
* The bits in the cause register.
diff --git a/sys/mips/include/hwfunc.h b/sys/mips/include/hwfunc.h
index 16b1439..bbf3086 100644
--- a/sys/mips/include/hwfunc.h
+++ b/sys/mips/include/hwfunc.h
@@ -47,4 +47,51 @@ unsigned platform_get_timecount(struct timecounter *);
/* For hardware specific CPU initialization */
void platform_cpu_init(void);
void platform_secondary_init(void);
+
+#ifdef SMP
+
+/*
+ * Spin up the AP so that it starts executing MP bootstrap entry point: mpentry
+ *
+ * Returns 0 on sucess and non-zero on failure.
+ */
+int platform_start_ap(int processor_id);
+
+/*
+ * Platform-specific initialization that needs to be done when an AP starts
+ * running. This function is called from the MP bootstrap code in mpboot.S
+ */
+void platform_init_ap(int processor_id);
+
+/*
+ * Return a plaform-specific interrrupt number that is used to deliver IPIs.
+ *
+ * This hardware interrupt is used to deliver IPIs exclusively and must
+ * not be used for any other interrupt source.
+ */
+int platform_ipi_intrnum(void);
+
+/*
+ * Trigger a IPI interrupt on 'cpuid'.
+ */
+void platform_ipi_send(int cpuid);
+
+/*
+ * Quiesce the IPI interrupt source on the current cpu.
+ */
+void platform_ipi_clear(void);
+
+/*
+ * Return the processor id.
+ *
+ * Note that this function is called in early boot when stack is not available.
+ */
+extern int platform_processor_id(void);
+
+/*
+ * Return the number of processors available on this platform.
+ */
+extern int platform_num_processors(void);
+
+#endif /* SMP */
#endif /* !_MACHINE_HWFUNC_H_ */
diff --git a/sys/mips/include/intr_machdep.h b/sys/mips/include/intr_machdep.h
index d72828e..60e969d 100644
--- a/sys/mips/include/intr_machdep.h
+++ b/sys/mips/include/intr_machdep.h
@@ -60,6 +60,16 @@ void cpu_establish_softintr(const char *, driver_filter_t *, void (*)(void*),
void cpu_intr(struct trapframe *);
/*
+ * Allow a platform to override the default hard interrupt mask and unmask
+ * functions. The 'arg' can be cast safely to an 'int' and holds the mips
+ * hard interrupt number to mask or unmask.
+ */
+typedef void (*cpu_intr_mask_t)(void *arg);
+typedef void (*cpu_intr_unmask_t)(void *arg);
+void cpu_set_hardintr_mask_func(cpu_intr_mask_t func);
+void cpu_set_hardintr_unmask_func(cpu_intr_unmask_t func);
+
+/*
* Opaque datatype that represents intr counter
*/
typedef unsigned long* mips_intrcnt_t;
diff --git a/sys/mips/include/pcpu.h b/sys/mips/include/pcpu.h
index 3eb552d..b8928a1 100644
--- a/sys/mips/include/pcpu.h
+++ b/sys/mips/include/pcpu.h
@@ -55,6 +55,13 @@ extern struct pcpu *pcpup;
#define PCPU_SET(member,value) (PCPUP->pc_ ## member = (value))
#define PCPU_LAZY_INC(member) (++PCPUP->pc_ ## member)
+#ifdef SMP
+/*
+ * Instantiate the wired TLB entry at PCPU_TLB_ENTRY to map 'pcpu' at 'pcpup'.
+ */
+void mips_pcpu_tlb_init(struct pcpu *pcpu);
+#endif
+
#endif /* _KERNEL */
#endif /* !_MACHINE_PCPU_H_ */
diff --git a/sys/mips/include/smp.h b/sys/mips/include/smp.h
index d614dd3..346c863 100644
--- a/sys/mips/include/smp.h
+++ b/sys/mips/include/smp.h
@@ -20,7 +20,6 @@
/*
* Interprocessor interrupts for SMP.
*/
-#define IPI_INVLTLB 0x0001
#define IPI_RENDEZVOUS 0x0002
#define IPI_AST 0x0004
#define IPI_STOP 0x0008
@@ -28,13 +27,9 @@
#ifndef LOCORE
-extern u_int32_t boot_cpu_id;
-
-void ipi_selected(u_int cpus, u_int32_t ipi);
-void ipi_all_but_self(u_int32_t ipi);
-intrmask_t smp_handle_ipi(struct trapframe *frame);
+void ipi_selected(cpumask_t cpus, int ipi);
void smp_init_secondary(u_int32_t cpuid);
-void mips_ipi_send(int thread_id);
+void mpentry(void);
#endif /* !LOCORE */
#endif /* _KERNEL */
diff --git a/sys/mips/mips/intr_machdep.c b/sys/mips/mips/intr_machdep.c
index 530cc08..2dc302a 100644
--- a/sys/mips/mips/intr_machdep.c
+++ b/sys/mips/mips/intr_machdep.c
@@ -50,6 +50,9 @@ static mips_intrcnt_t mips_intr_counters[NSOFT_IRQS + NHARD_IRQS];
static int intrcnt_index;
+static cpu_intr_mask_t hardintr_mask_func;
+static cpu_intr_unmask_t hardintr_unmask_func;
+
mips_intrcnt_t
mips_intrcnt_create(const char* name)
{
@@ -128,38 +131,54 @@ cpu_init_interrupts()
}
void
+cpu_set_hardintr_mask_func(cpu_intr_mask_t func)
+{
+
+ hardintr_mask_func = func;
+}
+
+void
+cpu_set_hardintr_unmask_func(cpu_intr_unmask_t func)
+{
+
+ hardintr_unmask_func = func;
+}
+
+void
cpu_establish_hardintr(const char *name, driver_filter_t *filt,
void (*handler)(void*), void *arg, int irq, int flags, void **cookiep)
{
struct intr_event *event;
int error;
-#if 0
- printf("Establish HARD IRQ %d: filt %p handler %p arg %p\n",
- irq, filt, handler, arg);
-#endif
/*
* We have 6 levels, but thats 0 - 5 (not including 6)
*/
if (irq < 0 || irq >= NHARD_IRQS)
panic("%s called for unknown hard intr %d", __func__, irq);
+ if (hardintr_mask_func == NULL)
+ hardintr_mask_func = mips_mask_hard_irq;
+
+ if (hardintr_unmask_func == NULL)
+ hardintr_unmask_func = mips_unmask_hard_irq;
+
event = hardintr_events[irq];
if (event == NULL) {
error = intr_event_create(&event, (void *)(uintptr_t)irq, 0,
- irq, mips_mask_hard_irq, mips_unmask_hard_irq,
+ irq, hardintr_mask_func, hardintr_unmask_func,
NULL, NULL, "int%d", irq);
if (error)
return;
hardintr_events[irq] = event;
+ mips_unmask_hard_irq((void*)(uintptr_t)irq);
}
intr_event_add_handler(event, name, filt, handler, arg,
intr_priority(flags), flags, cookiep);
- mips_intrcnt_setname(mips_intr_counters[NSOFT_IRQS + irq], event->ie_fullname);
-
- mips_unmask_hard_irq((void*)(uintptr_t)irq);
+ mips_intrcnt_setname(mips_intr_counters[NSOFT_IRQS + irq],
+ event->ie_fullname);
}
void
@@ -185,14 +204,13 @@ cpu_establish_softintr(const char *name, driver_filter_t *filt,
if (error)
return;
softintr_events[irq] = event;
+ mips_unmask_soft_irq((void*)(uintptr_t)irq);
}
intr_event_add_handler(event, name, filt, handler, arg,
intr_priority(flags), flags, cookiep);
mips_intrcnt_setname(mips_intr_counters[irq], event->ie_fullname);
-
- mips_unmask_soft_irq((void*)(uintptr_t)irq);
}
void
diff --git a/sys/mips/mips/machdep.c b/sys/mips/mips/machdep.c
index ba56057..ad1dab5 100644
--- a/sys/mips/mips/machdep.c
+++ b/sys/mips/mips/machdep.c
@@ -133,11 +133,7 @@ vm_offset_t kstack0;
char pcpu_space[MAXCPU][PAGE_SIZE * 2] \
__aligned(PAGE_SIZE * 2) __section(".data");
-#ifdef SMP
-struct pcpu *pcpup = 0; /* initialized in pmap_bootstrap() */
-#else
struct pcpu *pcpup = (struct pcpu *)pcpu_space;
-#endif
vm_offset_t phys_avail[PHYS_AVAIL_ENTRIES + 2];
vm_offset_t physmem_desc[PHYS_AVAIL_ENTRIES + 2];
@@ -419,22 +415,14 @@ mips_generic_reset()
((void(*)(void))(intptr_t)MIPS_VEC_RESET)();
}
-/*
- * Initialise a struct pcpu.
- */
+#ifdef SMP
void
-cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
+mips_pcpu_tlb_init(struct pcpu *pcpu)
{
-#ifdef SMP
vm_paddr_t pa;
struct tlb tlb;
int lobits;
-#endif
-
- pcpu->pc_next_asid = 1;
- pcpu->pc_asid_generation = 1;
-#ifdef SMP
/*
* Map the pcpu structure at the virtual address 'pcpup'.
* We use a wired tlb index to do this one-time mapping.
@@ -446,6 +434,21 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
tlb.tlb_lo0 = mips_paddr_to_tlbpfn(pa) | lobits;
tlb.tlb_lo1 = mips_paddr_to_tlbpfn(pa + PAGE_SIZE) | lobits;
Mips_TLBWriteIndexed(PCPU_TLB_ENTRY, &tlb);
+}
+#endif
+
+/*
+ * Initialise a struct pcpu.
+ */
+void
+cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
+{
+
+ pcpu->pc_next_asid = 1;
+ pcpu->pc_asid_generation = 1;
+#ifdef SMP
+ if ((vm_offset_t)pcpup >= VM_MIN_KERNEL_ADDRESS)
+ mips_pcpu_tlb_init(pcpu);
#endif
}
diff --git a/sys/mips/mips/mp_machdep.c b/sys/mips/mips/mp_machdep.c
index bf32392..d8520cc 100644
--- a/sys/mips/mips/mp_machdep.c
+++ b/sys/mips/mips/mp_machdep.c
@@ -1,124 +1,110 @@
+/*-
+ * Copyright (c) 2009 Neelkanth Natu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include "opt_kstack_pages.h"
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/ktr.h>
#include <sys/proc.h>
-#include <sys/cons.h>
#include <sys/lock.h>
-#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/pcpu.h>
#include <sys/smp.h>
-#include <sys/sysctl.h>
+#include <sys/sched.h>
#include <sys/bus.h>
#include <vm/vm.h>
#include <vm/pmap.h>
-#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
-#include <machine/atomic.h>
#include <machine/clock.h>
-#include <machine/md_var.h>
-#include <machine/pcb.h>
-#include <machine/pmap.h>
#include <machine/smp.h>
+#include <machine/hwfunc.h>
+#include <machine/intr_machdep.h>
+#include <machine/cache.h>
+static void *dpcpu;
static struct mtx ap_boot_mtx;
-extern struct pcpu __pcpu[];
-extern int num_tlbentries;
-void mips_start_timer(void);
-static volatile int aps_ready = 0;
-
-u_int32_t boot_cpu_id;
+static volatile int aps_ready;
+static volatile int mp_naps;
-void
-cpu_mp_announce(void)
+static void
+ipi_send(struct pcpu *pc, int ipi)
{
-}
-/*
- * To implement IPIs on MIPS CPU, we use the Interrupt Line 2 ( bit 4 of cause
- * register) and a bitmap to avoid redundant IPI interrupts. To interrupt a
- * set of CPUs, the sender routine runs in a ' loop ' sending interrupts to
- * all the specified CPUs. A single Mutex (smp_ipi_mtx) is used for all IPIs
- * that spinwait for delivery. This includes the following IPIs
- * IPI_RENDEZVOUS
- * IPI_INVLPG
- * IPI_INVLTLB
- * IPI_INVLRNG
- */
-
-/*
- * send an IPI to a set of cpus.
- */
-void
-ipi_selected(u_int32_t cpus, u_int ipi)
-{
- struct pcpu *pcpu;
- u_int cpuid, new_pending, old_pending;
+ CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi);
- CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);
+ atomic_set_32(&pc->pc_pending_ipis, ipi);
+ platform_ipi_send(pc->pc_cpuid);
- while ((cpuid = ffs(cpus)) != 0) {
- cpuid--;
- cpus &= ~(1 << cpuid);
- pcpu = pcpu_find(cpuid);
-
- if (pcpu) {
- do {
- old_pending = pcpu->pc_pending_ipis;
- new_pending = old_pending | ipi;
- } while (!atomic_cmpset_int(&pcpu->pc_pending_ipis,
- old_pending, new_pending));
-
- if (old_pending)
- continue;
-
- mips_ipi_send (cpuid);
- }
- }
+ CTR1(KTR_SMP, "%s: sent", __func__);
}
-/*
- * send an IPI to all CPUs EXCEPT myself
- */
+/* Send an IPI to a set of cpus. */
void
-ipi_all_but_self(u_int ipi)
+ipi_selected(cpumask_t cpus, int ipi)
{
+ struct pcpu *pc;
- ipi_selected(PCPU_GET(other_cpus), ipi);
+ CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);
+
+ SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ if ((cpus & pc->pc_cpumask) != 0)
+ ipi_send(pc, ipi);
+ }
}
/*
* Handle an IPI sent to this processor.
*/
-intrmask_t
-smp_handle_ipi(struct trapframe *frame)
+static int
+mips_ipi_handler(void *arg)
{
- cpumask_t cpumask; /* This cpu mask */
+ cpumask_t cpumask;
u_int ipi, ipi_bitmap;
+ int bit;
+
+ platform_ipi_clear(); /* quiesce the pending ipi interrupt */
ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
- cpumask = PCPU_GET(cpumask);
+ if (ipi_bitmap == 0)
+ return (FILTER_STRAY);
CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
- while (ipi_bitmap) {
- /*
- * Find the lowest set bit.
- */
- ipi = ipi_bitmap & ~(ipi_bitmap - 1);
+
+ while ((bit = ffs(ipi_bitmap))) {
+ bit = bit - 1;
+ ipi = 1 << bit;
ipi_bitmap &= ~ipi;
switch (ipi) {
- case IPI_INVLTLB:
- CTR0(KTR_SMP, "IPI_INVLTLB");
- break;
-
case IPI_RENDEZVOUS:
CTR0(KTR_SMP, "IPI_RENDEZVOUS");
smp_rendezvous_action();
@@ -129,51 +115,136 @@ smp_handle_ipi(struct trapframe *frame)
break;
case IPI_STOP:
-
/*
* IPI_STOP_HARD is mapped to IPI_STOP so it is not
* necessary to add it in the switch.
*/
CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
+ cpumask = PCPU_GET(cpumask);
atomic_set_int(&stopped_cpus, cpumask);
-
while ((started_cpus & cpumask) == 0)
- ;
+ cpu_spinwait();
atomic_clear_int(&started_cpus, cpumask);
atomic_clear_int(&stopped_cpus, cpumask);
+ CTR0(KTR_SMP, "IPI_STOP (restart)");
break;
+ default:
+ panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
}
}
- return CR_INT_IPI;
+
+ return (FILTER_HANDLED);
+}
+
+static int
+start_ap(int cpuid)
+{
+ int cpus, ms;
+
+ cpus = mp_naps;
+ dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+
+ if (platform_start_ap(cpuid) != 0)
+ return (-1); /* could not start AP */
+
+ for (ms = 0; ms < 5000; ++ms) {
+ if (mp_naps > cpus)
+ return (0); /* success */
+ else
+ DELAY(1000);
+ }
+
+ return (-2); /* timeout initializing AP */
}
void
cpu_mp_setmaxid(void)
{
- mp_maxid = MAXCPU - 1;
+ mp_ncpus = platform_num_processors();
+ if (mp_ncpus <= 0)
+ mp_ncpus = 1;
+
+ mp_maxid = min(mp_ncpus, MAXCPU) - 1;
+}
+
+void
+cpu_mp_announce(void)
+{
+ /* NOTHING */
+}
+
+struct cpu_group *
+cpu_topo(void)
+{
+
+ return (smp_topo_none());
+}
+
+int
+cpu_mp_probe(void)
+{
+
+ return (mp_ncpus > 1);
+}
+
+void
+cpu_mp_start(void)
+{
+ int error, cpuid;
+
+ mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
+
+ all_cpus = 1; /* BSP */
+ for (cpuid = 1; cpuid < platform_num_processors(); ++cpuid) {
+ if (cpuid >= MAXCPU) {
+ printf("cpu_mp_start: ignoring AP #%d.\n", cpuid);
+ continue;
+ }
+
+ if ((error = start_ap(cpuid)) != 0) {
+ printf("AP #%d failed to start: %d\n", cpuid, error);
+ continue;
+ }
+
+ if (bootverbose)
+ printf("AP #%d started!\n", cpuid);
+
+ all_cpus |= 1 << cpuid;
+ }
+
+ PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
}
void
smp_init_secondary(u_int32_t cpuid)
{
+ int ipi_int_mask, clock_int_mask;
- if (cpuid >= MAXCPU)
- panic ("cpu id exceeds MAXCPU\n");
+ /* TLB */
+ Mips_SetWIRED(0);
+ Mips_TLBFlush(num_tlbentries);
+ Mips_SetWIRED(VMWIRED_ENTRIES);
+
+ /*
+ * We assume that the L1 cache on the APs is identical to the one
+ * on the BSP.
+ */
+ mips_dcache_wbinv_all();
+ mips_icache_sync_all();
- /* tlb init */
- R4K_SetWIRED(0);
- R4K_TLBFlush(num_tlbentries);
- R4K_SetWIRED(VMWIRED_ENTRIES);
MachSetPID(0);
- Mips_SyncCache();
+ pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu));
+ dpcpu_init(dpcpu, cpuid);
+
+ /* The AP has initialized successfully - allow the BSP to proceed */
+ ++mp_naps;
- mips_cp0_status_write(0);
+ /* Spin until the BSP is ready to release the APs */
while (!aps_ready)
;
- mips_sync(); mips_sync();
/* Initialize curthread. */
KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
PCPU_SET(curthread, PCPU_GET(idlethread));
@@ -182,15 +253,16 @@ smp_init_secondary(u_int32_t cpuid)
smp_cpus++;
- CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
+ CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid));
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
- printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
+ if (bootverbose)
+ printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid));
if (smp_cpus == mp_ncpus) {
- smp_started = 1;
+ atomic_store_rel_int(&smp_started, 1);
smp_active = 1;
}
@@ -198,103 +270,46 @@ smp_init_secondary(u_int32_t cpuid)
while (smp_started == 0)
; /* nothing */
- /* Enable Interrupt */
- mips_cp0_status_write(SR_INT_ENAB);
- /* ok, now grab sched_lock and enter the scheduler */
- mtx_lock_spin(&sched_lock);
/*
- * Correct spinlock nesting. The idle thread context that we are
- * borrowing was created so that it would start out with a single
- * spin lock (sched_lock) held in fork_trampoline(). Since we've
- * explicitly acquired locks in this function, the nesting count
- * is now 2 rather than 1. Since we are nested, calling
- * spinlock_exit() will simply adjust the counts without allowing
- * spin lock using code to interrupt us.
+ * Unmask the clock and ipi interrupts.
*/
- spinlock_exit();
- KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
-
- binuptime(PCPU_PTR(switchtime));
- PCPU_SET(switchticks, ticks);
-
- /* kick off the clock on this cpu */
- mips_start_timer();
- cpu_throw(NULL, choosethread()); /* doesn't return */
-
- panic("scheduler returned us to %s", __func__);
-}
-
-static int
-smp_start_secondary(int cpuid)
-{
- struct pcpu *pcpu;
- void *dpcpu;
- int i;
-
- if (bootverbose)
- printf("smp_start_secondary: starting cpu %d\n", cpuid);
+ clock_int_mask = hard_int_mask(5);
+ ipi_int_mask = hard_int_mask(platform_ipi_intrnum());
+ set_intr_mask(ALL_INT_MASK & ~(ipi_int_mask | clock_int_mask));
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
- pcpu_init(&__pcpu[cpuid], cpuid, sizeof(struct pcpu));
- dpcpu_init(dpcpu, cpuid);
-
- if (bootverbose)
- printf("smp_start_secondary: cpu %d started\n", cpuid);
-
- return 1;
-}
-
-int
-cpu_mp_probe(void)
-{
- int i, cpus;
+ /*
+ * Bootstrap the compare register.
+ */
+ mips_wr_compare(mips_rd_count() + counter_freq / hz);
- /* XXX: Need to check for valid platforms here. */
+ enableintr();
- boot_cpu_id = PCPU_GET(cpuid);
- KASSERT(boot_cpu_id == 0, ("cpu_mp_probe() called on non-primary CPU"));
- all_cpus = PCPU_GET(cpumask);
- mp_ncpus = 1;
+ /* enter the scheduler */
+ sched_throw(NULL);
- /* Make sure we have at least one secondary CPU. */
- cpus = 0;
- for (i = 0; i < MAXCPU; i++) {
- cpus++;
- }
- return (cpus);
+ panic("scheduler returned us to %s", __func__);
+ /* NOTREACHED */
}
-void
-cpu_mp_start(void)
+static void
+release_aps(void *dummy __unused)
{
- int i, cpuid;
-
- mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
+ int ipi_irq;
- cpuid = 1;
- for (i = 0; i < MAXCPU; i++) {
+ if (mp_ncpus == 1)
+ return;
- if (i == boot_cpu_id)
- continue;
- if (smp_start_secondary(i)) {
- all_cpus |= (1 << cpuid);
- mp_ncpus++;
- cpuid++;
- }
- }
- idle_mask |= CR_INT_IPI;
- PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
-}
+ /*
+ * IPI handler
+ */
+ ipi_irq = platform_ipi_intrnum();
+ cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq,
+ INTR_TYPE_MISC | INTR_EXCL | INTR_FAST, NULL);
-static void
-release_aps(void *dummy __unused)
-{
- if (bootverbose && mp_ncpus > 1)
- printf("%s: releasing secondary CPUs\n", __func__);
atomic_store_rel_int(&aps_ready, 1);
- while (mp_ncpus > 1 && smp_started == 0)
+ while (smp_started == 0)
; /* nothing */
}
diff --git a/sys/mips/mips/mpboot.S b/sys/mips/mips/mpboot.S
new file mode 100644
index 0000000..6828847
--- /dev/null
+++ b/sys/mips/mips/mpboot.S
@@ -0,0 +1,72 @@
+/*-
+ * Copyright (c) 2010 Neelkanth Natu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/cpuregs.h>
+
+#include "assym.s"
+
+ .text
+ .set noat
+ .set noreorder
+
+GLOBAL(mpentry)
+ mtc0 zero, COP_0_STATUS_REG /* disable interrupts */
+
+ mtc0 zero, COP_0_CAUSE_REG /* clear soft interrupts */
+
+ li t0, CFG_K0_CACHED /* make sure kseg0 is cached */
+ mtc0 t0, MIPS_COP_0_CONFIG
+ COP0_SYNC
+
+ jal platform_processor_id /* get the processor number */
+ nop
+ move s0, v0
+
+ /*
+ * Initialize stack and call machine startup
+ */
+ PTR_LA sp, _C_LABEL(pcpu_space)
+ addiu sp, (NBPG * 2) - START_FRAME
+ sll t0, s0, PAGE_SHIFT + 1
+ addu sp, sp, t0
+
+ /* Zero out old ra and old fp for debugger */
+ sw zero, START_FRAME - 4(sp)
+ sw zero, START_FRAME - 8(sp)
+
+ PTR_LA gp, _C_LABEL(_gp)
+
+ jal platform_init_ap
+ move a0, s0
+
+ jal smp_init_secondary
+ move a0, s0
+
+ PANIC("AP startup failed!")
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 6fc0fe2..4a267dc 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -362,6 +362,15 @@ again:
virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
pcpup = (struct pcpu *)virtual_avail;
virtual_avail += PAGE_SIZE * 2;
+
+ /*
+ * Initialize the wired TLB entry mapping the pcpu region for
+ * the BSP at 'pcpup'. Up until this point we were operating
+ * with the 'pcpup' for the BSP pointing to a virtual address
+ * in KSEG0 so there was no need for a TLB mapping.
+ */
+ mips_pcpu_tlb_init(PCPU_ADDR(0));
+
if (bootverbose)
printf("pcpu is available at virtual address %p.\n", pcpup);
#endif
diff --git a/sys/mips/sibyte/sb_asm.S b/sys/mips/sibyte/sb_asm.S
index b81c067..19d00dd 100644
--- a/sys/mips/sibyte/sb_asm.S
+++ b/sys/mips/sibyte/sb_asm.S
@@ -27,6 +27,7 @@
*/
#include <machine/asm.h>
+#include <machine/cpuregs.h>
/*
* We compile a 32-bit kernel to run on the SB-1 processor which is a 64-bit
@@ -80,3 +81,20 @@ LEAF(sb_store64)
jr ra
sd t0, 0(a0)
END(sb_store64)
+
+#ifdef SMP
+/*
+ * This function must be implemented in assembly because it is called early
+ * in AP boot without a valid stack.
+ *
+ * This cpu number is available in bits 25 to 27 of the coprocessor 0 PRID
+ * register. This is not documented in the BCM1250 user manual but can be
+ * gleaned from the CFE source code - see sb1250_altcpu.S
+ */
+LEAF(platform_processor_id)
+ mfc0 v0, MIPS_COP_0_PRID
+ srl v0, v0, 25
+ jr ra
+ and v0, v0, 7
+END(platform_processor_id)
+#endif /* SMP */
diff --git a/sys/mips/sibyte/sb_machdep.c b/sys/mips/sibyte/sb_machdep.c
index c544b18..c6043b8 100644
--- a/sys/mips/sibyte/sb_machdep.c
+++ b/sys/mips/sibyte/sb_machdep.c
@@ -74,6 +74,10 @@ __FBSDID("$FreeBSD$");
#include <machine/trap.h>
#include <machine/vmparam.h>
+#ifdef SMP
+#include <machine/smp.h>
+#endif
+
#ifdef CFE
#include <dev/cfe/cfe_api.h>
#endif
@@ -114,6 +118,19 @@ sb_intr_init(int cpuid)
intrnum = sb_route_intsrc(intsrc);
sb_disable_intsrc(cpuid, intsrc);
sb_write_intmap(cpuid, intsrc, intrnum);
+#ifdef SMP
+ /*
+ * Set up the mailbox interrupt mapping.
+ *
+ * The mailbox interrupt is "special" in that it is not shared
+ * with any other interrupt source.
+ */
+ if (intsrc == INTSRC_MAILBOX3) {
+ intrnum = platform_ipi_intrnum();
+ sb_write_intmap(cpuid, INTSRC_MAILBOX3, intrnum);
+ sb_enable_intsrc(cpuid, INTSRC_MAILBOX3);
+ }
+#endif
}
}
@@ -282,6 +299,64 @@ kseg0_map_coherent(void)
mips_wr_config(config);
}
+#ifdef SMP
+void
+platform_ipi_send(int cpuid)
+{
+ KASSERT(cpuid == 0 || cpuid == 1,
+ ("platform_ipi_send: invalid cpuid %d", cpuid));
+
+ sb_set_mailbox(cpuid, 1ULL);
+}
+
+void
+platform_ipi_clear(void)
+{
+ int cpuid;
+
+ cpuid = PCPU_GET(cpuid);
+ sb_clear_mailbox(cpuid, 1ULL);
+}
+
+int
+platform_ipi_intrnum(void)
+{
+
+ return (4);
+}
+
+void
+platform_init_ap(int cpuid)
+{
+
+ KASSERT(cpuid == 1, ("AP has an invalid cpu id %d", cpuid));
+
+ /*
+ * Make sure that kseg0 is mapped cacheable-coherent
+ */
+ kseg0_map_coherent();
+
+ sb_intr_init(cpuid);
+}
+
+int
+platform_start_ap(int cpuid)
+{
+#ifdef CFE
+ int error;
+
+ if ((error = cfe_cpu_start(cpuid, mpentry, 0, 0, 0))) {
+ printf("cfe_cpu_start error: %d\n", error);
+ return (-1);
+ } else {
+ return (0);
+ }
+#else
+ return (-1);
+#endif /* CFE */
+}
+#endif /* SMP */
+
void
platform_start(__register_t a0, __register_t a1, __register_t a2,
__register_t a3)
diff --git a/sys/mips/sibyte/sb_scd.c b/sys/mips/sibyte/sb_scd.c
index c499822..007e149 100644
--- a/sys/mips/sibyte/sb_scd.c
+++ b/sys/mips/sibyte/sb_scd.c
@@ -34,7 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <machine/resource.h>
-#include <machine/intr_machdep.h>
+#include <machine/hwfunc.h>
#include "sb_scd.h"
@@ -189,11 +189,51 @@ sb_route_intsrc(int intsrc)
* Interrupt 5 is used by sources internal to the CPU (e.g. timer).
* Use a deterministic mapping for the remaining sources.
*/
+#ifdef SMP
+ KASSERT(platform_ipi_intrnum() == 4,
+ ("Unexpected interrupt number used for IPI"));
+ intrnum = intsrc % 4;
+#else
intrnum = intsrc % 5;
+#endif
return (intrnum);
}
+#ifdef SMP
+static uint64_t
+sb_read_sysrev(void)
+{
+
+ return (sb_load64(SYSREV_ADDR));
+}
+
+void
+sb_set_mailbox(int cpu, uint64_t val)
+{
+ uint32_t regaddr;
+
+ regaddr = MAILBOX_SET_ADDR(cpu);
+ sb_store64(regaddr, val);
+}
+
+void
+sb_clear_mailbox(int cpu, uint64_t val)
+{
+ uint32_t regaddr;
+
+ regaddr = MAILBOX_CLEAR_ADDR(cpu);
+ sb_store64(regaddr, val);
+}
+
+int
+platform_num_processors(void)
+{
+
+ return (SYSREV_NUM_PROCESSORS(sb_read_sysrev()));
+}
+#endif /* SMP */
+
#define SCD_PHYSADDR 0x10000000
#define SCD_SIZE 0x00060000
diff --git a/sys/mips/sibyte/sb_scd.h b/sys/mips/sibyte/sb_scd.h
index 8f60716..03d2681 100644
--- a/sys/mips/sibyte/sb_scd.h
+++ b/sys/mips/sibyte/sb_scd.h
@@ -42,4 +42,10 @@ void sb_write_intsrc_mask(int cpu, uint64_t mask);
void sb_write_intmap(int cpu, int intsrc, int intrnum);
int sb_read_intmap(int cpu, int intsrc);
+#ifdef SMP
+#define INTSRC_MAILBOX3 29
+void sb_set_mailbox(int cpuid, uint64_t val);
+void sb_clear_mailbox(int cpuid, uint64_t val);
+#endif
+
#endif /* _SB_SCD_H_ */
diff --git a/sys/mips/sibyte/sb_zbbus.c b/sys/mips/sibyte/sb_zbbus.c
index bede796..cd16856 100644
--- a/sys/mips/sibyte/sb_zbbus.c
+++ b/sys/mips/sibyte/sb_zbbus.c
@@ -24,6 +24,9 @@
* SUCH DAMAGE.
*/
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
@@ -31,21 +34,29 @@
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <machine/resource.h>
#include <machine/intr_machdep.h>
#include "sb_scd.h"
-__FBSDID("$FreeBSD$");
-
static MALLOC_DEFINE(M_INTMAP, "sb1250 intmap", "Sibyte 1250 Interrupt Mapper");
-#define NUM_HARD_IRQS 6
+static struct mtx zbbus_intr_mtx;
+MTX_SYSINIT(zbbus_intr_mtx, &zbbus_intr_mtx, "zbbus_intr_mask/unmask lock",
+ MTX_SPIN);
+
+/*
+ * This array holds the mapping between a MIPS hard interrupt and the
+ * interrupt sources that feed into that it.
+ */
+static uint64_t hardint_to_intsrc_mask[NHARD_IRQS];
struct sb_intmap {
int intsrc; /* interrupt mapper register number (0 - 63) */
- int hardint; /* cpu interrupt from 0 to NUM_HARD_IRQS - 1 */
+ int hardint; /* cpu interrupt from 0 to NHARD_IRQS - 1 */
/*
* The device that the interrupt belongs to. Note that multiple
@@ -86,7 +97,7 @@ sb_intmap_add(int intrnum, device_t dev, int rid, int intsrc)
{
struct sb_intmap *map;
- KASSERT(intrnum >= 0 && intrnum < NUM_HARD_IRQS,
+ KASSERT(intrnum >= 0 && intrnum < NHARD_IRQS,
("intrnum is out of range: %d", intrnum));
map = sb_intmap_lookup(intrnum, dev, rid);
@@ -113,12 +124,18 @@ sb_intmap_activate(int intrnum, device_t dev, int rid)
{
struct sb_intmap *map;
- KASSERT(intrnum >= 0 && intrnum < NUM_HARD_IRQS,
+ KASSERT(intrnum >= 0 && intrnum < NHARD_IRQS,
("intrnum is out of range: %d", intrnum));
map = sb_intmap_lookup(intrnum, dev, rid);
if (map) {
+ /*
+ * Deliver all interrupts to CPU0.
+ */
+ mtx_lock_spin(&zbbus_intr_mtx);
+ hardint_to_intsrc_mask[intrnum] |= 1ULL << map->intsrc;
sb_enable_intsrc(0, map->intsrc);
+ mtx_unlock_spin(&zbbus_intr_mtx);
} else {
/*
* In zbbus_setup_intr() we blindly call sb_intmap_activate()
@@ -133,6 +150,52 @@ sb_intmap_activate(int intrnum, device_t dev, int rid)
}
}
+/*
+ * Replace the default interrupt mask and unmask routines in intr_machdep.c
+ * with routines that are SMP-friendly. In contrast to the default mask/unmask
+ * routines in intr_machdep.c these routines do not change the SR.int_mask bits.
+ *
+ * Instead they use the interrupt mapper to either mask or unmask all
+ * interrupt sources feeding into a particular interrupt line of the processor.
+ *
+ * This means that these routines have an identical effect irrespective of
+ * which cpu is executing them. This is important because the ithread may
+ * be scheduled to run on either of the cpus.
+ */
+static void
+zbbus_intr_mask(void *arg)
+{
+ uint64_t mask;
+ int irq;
+
+ irq = (uintptr_t)arg;
+
+ mtx_lock_spin(&zbbus_intr_mtx);
+
+ mask = sb_read_intsrc_mask(0);
+ mask |= hardint_to_intsrc_mask[irq];
+ sb_write_intsrc_mask(0, mask);
+
+ mtx_unlock_spin(&zbbus_intr_mtx);
+}
+
+static void
+zbbus_intr_unmask(void *arg)
+{
+ uint64_t mask;
+ int irq;
+
+ irq = (uintptr_t)arg;
+
+ mtx_lock_spin(&zbbus_intr_mtx);
+
+ mask = sb_read_intsrc_mask(0);
+ mask &= ~hardint_to_intsrc_mask[irq];
+ sb_write_intsrc_mask(0, mask);
+
+ mtx_unlock_spin(&zbbus_intr_mtx);
+}
+
struct zbbus_devinfo {
struct resource_list resources;
};
@@ -155,6 +218,9 @@ zbbus_attach(device_t dev)
device_printf(dev, "attached.\n");
}
+ cpu_set_hardintr_mask_func(zbbus_intr_mask);
+ cpu_set_hardintr_unmask_func(zbbus_intr_unmask);
+
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
bus_generic_attach(dev);
OpenPOWER on IntegriCloud