summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormarius <marius@FreeBSD.org>2010-07-04 12:43:12 +0000
committermarius <marius@FreeBSD.org>2010-07-04 12:43:12 +0000
commit5758b8c344de54ad9c4d7eba693a0ce23e0e3a90 (patch)
treec2ea2038cfb5782af7b8c12de2d336824cf93e86
parentaf2d069e9b94cbf2f883170eaaa531ce37733214 (diff)
downloadFreeBSD-src-5758b8c344de54ad9c4d7eba693a0ce23e0e3a90.zip
FreeBSD-src-5758b8c344de54ad9c4d7eba693a0ce23e0e3a90.tar.gz
- Pin the IPI cache and TLB demap functions in order to prevent migration
between determining the other CPUs and calling cpu_ipi_selected(), which apart from generally doing the wrong thing can lead to a panic when a CPU is told to IPI itself (which sun4u doesn't support). Reported and tested by: Nathaniel W Filardo - Add __unused where appropriate. MFC after: 3 days
-rw-r--r--sys/sparc64/include/smp.h32
1 files changed, 24 insertions, 8 deletions
diff --git a/sys/sparc64/include/smp.h b/sys/sparc64/include/smp.h
index 467c6b6..eda6d6f 100644
--- a/sys/sparc64/include/smp.h
+++ b/sys/sparc64/include/smp.h
@@ -38,6 +38,9 @@
#ifndef LOCORE
+#include <sys/proc.h>
+#include <sys/sched.h>
+
#include <machine/intr_machdep.h>
#include <machine/pcb.h>
#include <machine/tte.h>
@@ -139,6 +142,7 @@ ipi_dcache_page_inval(void *func, vm_paddr_t pa)
if (smp_cpus == 1)
return (NULL);
+ sched_pin();
ica = &ipi_cache_args;
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
@@ -154,6 +158,7 @@ ipi_icache_page_inval(void *func, vm_paddr_t pa)
if (smp_cpus == 1)
return (NULL);
+ sched_pin();
ica = &ipi_cache_args;
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
@@ -170,8 +175,11 @@ ipi_tlb_context_demap(struct pmap *pm)
if (smp_cpus == 1)
return (NULL);
- if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
+ sched_pin();
+ if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
+ sched_unpin();
return (NULL);
+ }
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
@@ -189,8 +197,11 @@ ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
if (smp_cpus == 1)
return (NULL);
- if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
+ sched_pin();
+ if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
+ sched_unpin();
return (NULL);
+ }
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
@@ -208,8 +219,11 @@ ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
if (smp_cpus == 1)
return (NULL);
- if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
+ sched_pin();
+ if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
+ sched_unpin();
return (NULL);
+ }
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
@@ -230,6 +244,7 @@ ipi_wait(void *cookie)
while (*mask != 0)
;
mtx_unlock_spin(&ipi_mtx);
+ sched_unpin();
}
}
@@ -242,35 +257,36 @@ ipi_wait(void *cookie)
#ifndef LOCORE
static __inline void *
-ipi_dcache_page_inval(void *func, vm_paddr_t pa)
+ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
{
return (NULL);
}
static __inline void *
-ipi_icache_page_inval(void *func, vm_paddr_t pa)
+ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
{
return (NULL);
}
static __inline void *
-ipi_tlb_context_demap(struct pmap *pm)
+ipi_tlb_context_demap(struct pmap *pm __unused)
{
return (NULL);
}
static __inline void *
-ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
+ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
{
return (NULL);
}
static __inline void *
-ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
+ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
+ __unused vm_offset_t end)
{
return (NULL);
OpenPOWER on IntegriCloud