summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2011-05-31 15:11:43 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2011-05-31 15:11:43 +0000
commita69e106b2fac0817484b6b395a4bfcabef513310 (patch)
tree02ea093d9809e917fd683ac9ee2c87d27aa454e6
parent3e43795a7a21a21ba53f04ea258e3e79a7a34e9d (diff)
downloadFreeBSD-src-a69e106b2fac0817484b6b395a4bfcabef513310.zip
FreeBSD-src-a69e106b2fac0817484b6b395a4bfcabef513310.tar.gz
On multi-core, multi-threaded PPC systems, it is important that the threads
be brought up in the order they are enumerated in the device tree (in particular, that thread 0 on each core be brought up first). The SLIST through which we loop to start the CPUs has all of its entries added with SLIST_INSERT_HEAD(), which means it is in reverse order of enumeration and so AP startup would always fail in such situations (causing a machine check or RTAS failure). Fix this by changing the SLIST into an STAILQ, and inserting new CPUs at the end. Reviewed by: jhb
-rw-r--r--sys/i386/pci/pci_cfgreg.c2
-rw-r--r--sys/ia64/ia64/machdep.c2
-rw-r--r--sys/ia64/ia64/mp_machdep.c8
-rw-r--r--sys/ia64/ia64/pmap.c2
-rw-r--r--sys/kern/kern_idle.c2
-rw-r--r--sys/kern/sched_4bsd.c4
-rw-r--r--sys/kern/subr_kdb.c2
-rw-r--r--sys/kern/subr_pcpu.c6
-rw-r--r--sys/mips/mips/mp_machdep.c2
-rw-r--r--sys/net/netisr.c2
-rw-r--r--sys/powerpc/booke/pmap.c4
-rw-r--r--sys/powerpc/powerpc/mp_machdep.c6
-rw-r--r--sys/sparc64/sparc64/mp_machdep.c2
-rw-r--r--sys/sparc64/sparc64/pmap.c2
-rw-r--r--sys/sys/pcpu.h4
15 files changed, 25 insertions, 25 deletions
diff --git a/sys/i386/pci/pci_cfgreg.c b/sys/i386/pci/pci_cfgreg.c
index ae56990..ac641a8 100644
--- a/sys/i386/pci/pci_cfgreg.c
+++ b/sys/i386/pci/pci_cfgreg.c
@@ -553,7 +553,7 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus)
(uintmax_t)base);
#ifdef SMP
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu)
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
#endif
{
diff --git a/sys/ia64/ia64/machdep.c b/sys/ia64/ia64/machdep.c
index 41d2211..7252865 100644
--- a/sys/ia64/ia64/machdep.c
+++ b/sys/ia64/ia64/machdep.c
@@ -316,7 +316,7 @@ cpu_startup(void *dummy)
/*
* Create sysctl tree for per-CPU information.
*/
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
snprintf(nodename, sizeof(nodename), "%u", pc->pc_cpuid);
sysctl_ctx_init(&pc->pc_md.sysctl_ctx);
pc->pc_md.sysctl_tree = SYSCTL_ADD_NODE(&pc->pc_md.sysctl_ctx,
diff --git a/sys/ia64/ia64/mp_machdep.c b/sys/ia64/ia64/mp_machdep.c
index 5804f8c..b6b0bef 100644
--- a/sys/ia64/ia64/mp_machdep.c
+++ b/sys/ia64/ia64/mp_machdep.c
@@ -357,7 +357,7 @@ cpu_mp_start()
/* Keep 'em spinning until we unleash them... */
ia64_ap_state.as_spin = 1;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
pc->pc_md.current_pmap = kernel_pmap;
pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
/* The BSP is obviously running already. */
@@ -424,7 +424,7 @@ cpu_mp_unleash(void *dummy)
cpus = 0;
smp_cpus = 0;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
cpus++;
if (pc->pc_md.awake) {
kproc_create(ia64_store_mca_state, pc, NULL, 0, 0,
@@ -462,7 +462,7 @@ ipi_selected(cpumask_t cpus, int ipi)
{
struct pcpu *pc;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (cpus & pc->pc_cpumask)
ipi_send(pc, ipi);
}
@@ -486,7 +486,7 @@ ipi_all_but_self(int ipi)
{
struct pcpu *pc;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc != pcpup)
ipi_send(pc, ipi);
}
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 5f10ad6..411d53a 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -535,7 +535,7 @@ pmap_invalidate_page(vm_offset_t va)
critical_enter();
vhpt_ofs = ia64_thash(va) - PCPU_GET(md.vhpt);
tag = ia64_ttag(va);
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
pte = (struct ia64_lpte *)(pc->pc_md.vhpt + vhpt_ofs);
atomic_cmpset_64(&pte->tag, tag, 1UL << 63);
}
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index af12d7d..f412d17 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -60,7 +60,7 @@ idle_setup(void *dummy)
p = NULL; /* start with no idle process */
#ifdef SMP
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
#endif
#ifdef SMP
error = kproc_kthread_add(sched_idletd, NULL, &p, &td,
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index fef9e25..519cae5 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1081,7 +1081,7 @@ forward_wakeup(int cpunum)
dontuse = me | stopped_cpus | hlt_cpus_mask;
map2 = 0;
if (forward_wakeup_use_loop) {
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask;
if ((id & dontuse) == 0 &&
pc->pc_curthread == pc->pc_idlethread) {
@@ -1112,7 +1112,7 @@ forward_wakeup(int cpunum)
}
if (map) {
forward_wakeups_delivered++;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask;
if ((map & id) == 0)
continue;
diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c
index 342c5ca..5d68ae2 100644
--- a/sys/kern/subr_kdb.c
+++ b/sys/kern/subr_kdb.c
@@ -412,7 +412,7 @@ kdb_thr_ctx(struct thread *thr)
return (&kdb_pcb);
#if defined(SMP) && defined(KDB_STOPPEDPCB)
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc->pc_curthread == thr && (stopped_cpus & pc->pc_cpumask))
return (KDB_STOPPEDPCB(pc));
}
diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c
index de5cafc..5cb4f26 100644
--- a/sys/kern/subr_pcpu.c
+++ b/sys/kern/subr_pcpu.c
@@ -74,7 +74,7 @@ static TAILQ_HEAD(, dpcpu_free) dpcpu_head = TAILQ_HEAD_INITIALIZER(dpcpu_head);
static struct sx dpcpu_lock;
uintptr_t dpcpu_off[MAXCPU];
struct pcpu *cpuid_to_pcpu[MAXCPU];
-struct cpuhead cpuhead = SLIST_HEAD_INITIALIZER(cpuhead);
+struct cpuhead cpuhead = STAILQ_HEAD_INITIALIZER(cpuhead);
/*
* Initialize the MI portions of a struct pcpu.
@@ -89,7 +89,7 @@ pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
pcpu->pc_cpuid = cpuid;
pcpu->pc_cpumask = 1 << cpuid;
cpuid_to_pcpu[cpuid] = pcpu;
- SLIST_INSERT_HEAD(&cpuhead, pcpu, pc_allcpu);
+ STAILQ_INSERT_TAIL(&cpuhead, pcpu, pc_allcpu);
cpu_pcpu_init(pcpu, cpuid, size);
pcpu->pc_rm_queue.rmq_next = &pcpu->pc_rm_queue;
pcpu->pc_rm_queue.rmq_prev = &pcpu->pc_rm_queue;
@@ -245,7 +245,7 @@ void
pcpu_destroy(struct pcpu *pcpu)
{
- SLIST_REMOVE(&cpuhead, pcpu, pcpu, pc_allcpu);
+ STAILQ_REMOVE(&cpuhead, pcpu, pcpu, pc_allcpu);
cpuid_to_pcpu[pcpu->pc_cpuid] = NULL;
dpcpu_off[pcpu->pc_cpuid] = 0;
}
diff --git a/sys/mips/mips/mp_machdep.c b/sys/mips/mips/mp_machdep.c
index e945736..7191b37 100644
--- a/sys/mips/mips/mp_machdep.c
+++ b/sys/mips/mips/mp_machdep.c
@@ -86,7 +86,7 @@ ipi_selected(cpumask_t cpus, int ipi)
CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if ((cpus & pc->pc_cpumask) != 0)
ipi_send(pc, ipi);
}
diff --git a/sys/net/netisr.c b/sys/net/netisr.c
index 67ec160..127cf67 100644
--- a/sys/net/netisr.c
+++ b/sys/net/netisr.c
@@ -1221,7 +1221,7 @@ netisr_start(void *arg)
{
struct pcpu *pc;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (nws_count >= netisr_maxthreads)
break;
/* XXXRW: Is skipping absent CPUs still required here? */
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index aff7901..cabe58f 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -390,7 +390,7 @@ tlb_miss_lock(void)
if (!smp_started)
return;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc != pcpup) {
CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
@@ -416,7 +416,7 @@ tlb_miss_unlock(void)
if (!smp_started)
return;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc != pcpup) {
CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
__func__, pc->pc_cpuid);
diff --git a/sys/powerpc/powerpc/mp_machdep.c b/sys/powerpc/powerpc/mp_machdep.c
index 02920da..577d4dc 100644
--- a/sys/powerpc/powerpc/mp_machdep.c
+++ b/sys/powerpc/powerpc/mp_machdep.c
@@ -212,7 +212,7 @@ cpu_mp_unleash(void *dummy)
cpus = 0;
smp_cpus = 0;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
cpus++;
pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
if (!pc->pc_bsp) {
@@ -347,7 +347,7 @@ ipi_selected(cpumask_t cpus, int ipi)
{
struct pcpu *pc;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (cpus & pc->pc_cpumask)
ipi_send(pc, ipi);
}
@@ -367,7 +367,7 @@ ipi_all_but_self(int ipi)
{
struct pcpu *pc;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc != pcpup)
ipi_send(pc, ipi);
}
diff --git a/sys/sparc64/sparc64/mp_machdep.c b/sys/sparc64/sparc64/mp_machdep.c
index 8700f89..4d9151e 100644
--- a/sys/sparc64/sparc64/mp_machdep.c
+++ b/sys/sparc64/sparc64/mp_machdep.c
@@ -383,7 +383,7 @@ cpu_mp_unleash(void *v)
ctx_inc = (TLB_CTX_USER_MAX - 1) / mp_ncpus;
csa = &cpu_start_args;
csa->csa_count = mp_ncpus;
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
pc->pc_tlb_ctx = ctx_min;
pc->pc_tlb_ctx_min = ctx_min;
pc->pc_tlb_ctx_max = ctx_min + ctx_inc;
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 09482b9..c34fc45 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1278,7 +1278,7 @@ pmap_release(pmap_t pm)
* to a kernel thread, leaving the pmap pointer unchanged.
*/
mtx_lock_spin(&sched_lock);
- SLIST_FOREACH(pc, &cpuhead, pc_allcpu)
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
if (pc->pc_pmap == pm)
pc->pc_pmap = NULL;
mtx_unlock_spin(&sched_lock);
diff --git a/sys/sys/pcpu.h b/sys/sys/pcpu.h
index ad1cf33..0bb2cbd 100644
--- a/sys/sys/pcpu.h
+++ b/sys/sys/pcpu.h
@@ -164,7 +164,7 @@ struct pcpu {
u_int pc_cpuid; /* This cpu number */
cpumask_t pc_cpumask; /* This cpu mask */
cpumask_t pc_other_cpus; /* Mask of all other cpus */
- SLIST_ENTRY(pcpu) pc_allcpu;
+ STAILQ_ENTRY(pcpu) pc_allcpu;
struct lock_list_entry *pc_spinlocks;
#ifdef KTR
char pc_name[PCPU_NAME_LEN]; /* String name for KTR */
@@ -201,7 +201,7 @@ struct pcpu {
#ifdef _KERNEL
-SLIST_HEAD(cpuhead, pcpu);
+STAILQ_HEAD(cpuhead, pcpu);
extern struct cpuhead cpuhead;
extern struct pcpu *cpuid_to_pcpu[MAXCPU];
OpenPOWER on IntegriCloud