summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2002-12-22 20:50:23 +0000
committerjake <jake@FreeBSD.org>2002-12-22 20:50:23 +0000
commit02d82494717cab163d9865b5886923be8b7412ff (patch)
treed45e59df844abe5565f5ea76c12ac842b67521a1
parenta658b7f4f99cc87b1f1118c9f2a7944afb3998f5 (diff)
downloadFreeBSD-src-02d82494717cab163d9865b5886923be8b7412ff.zip
FreeBSD-src-02d82494717cab163d9865b5886923be8b7412ff.tar.gz
- Add a spin lock to single thread cache invalidation and tlb flush ipis,
which allows ipis to be sent outside of Giant. - Remove the ap boot mutex, which is unused.
-rw-r--r--sys/kern/subr_witness.c3
-rw-r--r--sys/sparc64/include/smp.h18
-rw-r--r--sys/sparc64/sparc64/cache.c6
-rw-r--r--sys/sparc64/sparc64/mp_machdep.c6
-rw-r--r--sys/sparc64/sparc64/tlb.c8
5 files changed, 22 insertions, 19 deletions
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 05d279a..15e2948 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -237,6 +237,9 @@ static struct witness_order_list_entry order_lists[] = {
#if defined(__i386__) && defined(APIC_IO)
{ "tlb", &lock_class_mtx_spin },
#endif
+#ifdef __sparc64__
+ { "ipi", &lock_class_mtx_spin },
+#endif
#endif
{ "clk", &lock_class_mtx_spin },
{ "mutex profiling lock", &lock_class_mtx_spin },
diff --git a/sys/sparc64/include/smp.h b/sys/sparc64/include/smp.h
index b892976..270244b 100644
--- a/sys/sparc64/include/smp.h
+++ b/sys/sparc64/include/smp.h
@@ -84,9 +84,9 @@ void ipi_all_but_self(u_int ipi);
vm_offset_t mp_tramp_alloc(void);
-extern struct ipi_cache_args ipi_cache_args;
-extern struct ipi_level_args ipi_level_args;
-extern struct ipi_tlb_args ipi_tlb_args;
+extern struct mtx ipi_mtx;
+extern struct ipi_cache_args ipi_cache_args;
+extern struct ipi_tlb_args ipi_tlb_args;
extern vm_offset_t mp_tramp;
extern char *mp_tramp_code;
@@ -105,6 +105,8 @@ extern char tl_ipi_tlb_range_demap[];
#ifdef SMP
+#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
+
static __inline void *
ipi_dcache_page_inval(vm_offset_t pa)
{
@@ -113,6 +115,7 @@ ipi_dcache_page_inval(vm_offset_t pa)
if (smp_cpus == 1)
return (NULL);
ica = &ipi_cache_args;
+ mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
ica->ica_pa = pa;
cpu_ipi_selected(PCPU_GET(other_cpus), 0,
@@ -128,6 +131,7 @@ ipi_icache_page_inval(vm_offset_t pa)
if (smp_cpus == 1)
return (NULL);
ica = &ipi_cache_args;
+ mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
ica->ica_pa = pa;
cpu_ipi_selected(PCPU_GET(other_cpus), 0,
@@ -135,8 +139,6 @@ ipi_icache_page_inval(vm_offset_t pa)
return (&ica->ica_mask);
}
-#ifdef _MACHINE_PMAP_H_
-
static __inline void *
ipi_tlb_context_demap(struct pmap *pm)
{
@@ -148,6 +150,7 @@ ipi_tlb_context_demap(struct pmap *pm)
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
+ mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
@@ -166,6 +169,7 @@ ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
+ mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
ita->ita_va = va;
@@ -184,6 +188,7 @@ ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
+ mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
ita->ita_start = start;
@@ -201,10 +206,11 @@ ipi_wait(void *cookie)
atomic_clear_int(mask, PCPU_GET(cpumask));
while (*mask != 0)
;
+ mtx_unlock_spin(&ipi_mtx);
}
}
-#endif
+#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
#else
diff --git a/sys/sparc64/sparc64/cache.c b/sys/sparc64/sparc64/cache.c
index 1ef18f3..ef17c5d 100644
--- a/sys/sparc64/sparc64/cache.c
+++ b/sys/sparc64/sparc64/cache.c
@@ -155,6 +155,8 @@
#include <sys/param.h>
#include <sys/linker_set.h>
#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -262,7 +264,6 @@ dcache_page_inval(vm_offset_t pa)
return;
PMAP_STATS_INC(dcache_npage_inval);
target = pa >> (PAGE_SHIFT - DC_TAG_SHIFT);
- critical_enter();
cookie = ipi_dcache_page_inval(pa);
for (addr = 0; addr < cache.dc_size; addr += cache.dc_linesize) {
PMAP_STATS_INC(dcache_npage_inval_line);
@@ -276,7 +277,6 @@ dcache_page_inval(vm_offset_t pa)
}
}
ipi_wait(cookie);
- critical_exit();
}
void
@@ -294,7 +294,6 @@ icache_page_inval(vm_offset_t pa)
return;
PMAP_STATS_INC(icache_npage_inval);
target = pa >> (PAGE_SHIFT - IC_TAG_SHIFT);
- critical_enter();
cookie = ipi_icache_page_inval(pa);
for (addr = 0; addr < cache.ic_size; addr += cache.ic_linesize) {
PMAP_STATS_INC(icache_npage_inval_line);
@@ -309,7 +308,6 @@ icache_page_inval(vm_offset_t pa)
}
}
ipi_wait(cookie);
- critical_exit();
}
diff --git a/sys/sparc64/sparc64/mp_machdep.c b/sys/sparc64/sparc64/mp_machdep.c
index dba9717..f6d147d 100644
--- a/sys/sparc64/sparc64/mp_machdep.c
+++ b/sys/sparc64/sparc64/mp_machdep.c
@@ -101,9 +101,9 @@ struct cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0 };
struct ipi_cache_args ipi_cache_args;
struct ipi_tlb_args ipi_tlb_args;
-vm_offset_t mp_tramp;
+struct mtx ipi_mtx;
-static struct mtx ap_boot_mtx;
+vm_offset_t mp_tramp;
u_int mp_boot_mid;
@@ -224,7 +224,7 @@ cpu_mp_start(void)
u_int mid;
u_long s;
- mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
+ mtx_init(&ipi_mtx, "ipi", NULL, MTX_SPIN);
intr_setup(PIL_AST, cpu_ipi_ast, -1, NULL, NULL);
intr_setup(PIL_RENDEZVOUS, (ih_func_t *)smp_rendezvous_action,
diff --git a/sys/sparc64/sparc64/tlb.c b/sys/sparc64/sparc64/tlb.c
index b5e53cf..e242819 100644
--- a/sys/sparc64/sparc64/tlb.c
+++ b/sys/sparc64/sparc64/tlb.c
@@ -30,6 +30,8 @@
#include <sys/systm.h>
#include <sys/ktr.h>
#include <sys/pcpu.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/smp.h>
#include <vm/vm.h>
@@ -66,7 +68,6 @@ tlb_context_demap(struct pmap *pm)
* protect the target processor from entering the IPI handler with
* the lock held.
*/
- critical_enter();
cookie = ipi_tlb_context_demap(pm);
if (pm->pm_active & PCPU_GET(cpumask)) {
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
@@ -78,7 +79,6 @@ tlb_context_demap(struct pmap *pm)
intr_restore(s);
}
ipi_wait(cookie);
- critical_exit();
}
void
@@ -88,7 +88,6 @@ tlb_page_demap(struct pmap *pm, vm_offset_t va)
void *cookie;
u_long s;
- critical_enter();
cookie = ipi_tlb_page_demap(pm, va);
if (pm->pm_active & PCPU_GET(cpumask)) {
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
@@ -105,7 +104,6 @@ tlb_page_demap(struct pmap *pm, vm_offset_t va)
intr_restore(s);
}
ipi_wait(cookie);
- critical_exit();
}
void
@@ -116,7 +114,6 @@ tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
u_long flags;
u_long s;
- critical_enter();
cookie = ipi_tlb_range_demap(pm, start, end);
if (pm->pm_active & PCPU_GET(cpumask)) {
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
@@ -135,7 +132,6 @@ tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
intr_restore(s);
}
ipi_wait(cookie);
- critical_exit();
}
void
OpenPOWER on IntegriCloud