summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2006-05-01 21:36:47 +0000
committerjhb <jhb@FreeBSD.org>2006-05-01 21:36:47 +0000
commitca8d347695197fe6855a628e0325e9ab16820d5f (patch)
tree3a0d26aaaeb23a38ebdd2566fd5bdbc8c7bde43f /sys/amd64
parent4db7dec298d4cc5de09e6704e4b98919f21bacaf (diff)
downloadFreeBSD-src-ca8d347695197fe6855a628e0325e9ab16820d5f.zip
FreeBSD-src-ca8d347695197fe6855a628e0325e9ab16820d5f.tar.gz
Add a new 'pmap_invalidate_cache()' to flush the CPU caches via the
wbinvd() instruction. This includes a new IPI so that all CPU caches on all CPUs are flushed for the SMP case. MFC after: 1 month
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/apic_vector.S19
-rw-r--r--sys/amd64/amd64/mp_machdep.c8
-rw-r--r--sys/amd64/amd64/pmap.c31
-rw-r--r--sys/amd64/include/apicvar.h5
-rw-r--r--sys/amd64/include/pmap.h1
-rw-r--r--sys/amd64/include/smp.h2
6 files changed, 64 insertions, 2 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 96018f3..ab781ca 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -171,6 +171,25 @@ IDTVEC(invlrng)
iretq
/*
+ * Invalidate cache.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(invlcache)
+ pushq %rax
+
+ wbinvd
+
+ movq lapic, %rax
+ movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
+
+ lock
+ incl smp_tlb_wait
+
+ popq %rax
+ iretq
+
+/*
* Handler for IPIs sent via the per-cpu IPI bitmap.
*/
.text
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 226a95c..b2485c6 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -875,6 +875,14 @@ smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offse
}
void
+smp_cache_flush(void)
+{
+
+ if (smp_started)
+ smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
+}
+
+void
smp_invltlb(void)
{
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index fd5e21d..238f0b4 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -732,6 +732,30 @@ pmap_invalidate_all(pmap_t pmap)
else
critical_exit();
}
+
+void
+pmap_invalidate_cache(void)
+{
+
+ if (smp_started) {
+ if (!(read_rflags() & PSL_I))
+ panic("%s: interrupts disabled", __func__);
+ mtx_lock_spin(&smp_ipi_mtx);
+ } else
+ critical_enter();
+ /*
+ * We need to disable interrupt preemption but MUST NOT have
+ * interrupts disabled here.
+ * XXX we may need to hold schedlock to get a coherent pm_active
+ * XXX critical sections disable interrupts again
+ */
+ wbinvd();
+ smp_cache_flush();
+ if (smp_started)
+ mtx_unlock_spin(&smp_ipi_mtx);
+ else
+ critical_exit();
+}
#else /* !SMP */
/*
* Normal, non-SMP, invalidation functions.
@@ -762,6 +786,13 @@ pmap_invalidate_all(pmap_t pmap)
if (pmap == kernel_pmap || pmap->pm_active)
invltlb();
}
+
+PMAP_INLINE void
+pmap_invalidate_cache(void)
+{
+
+ wbinvd();
+}
#endif /* !SMP */
/*
diff --git a/sys/amd64/include/apicvar.h b/sys/amd64/include/apicvar.h
index c87dc7e..98cf3a3 100644
--- a/sys/amd64/include/apicvar.h
+++ b/sys/amd64/include/apicvar.h
@@ -118,8 +118,9 @@
#define IPI_INVLTLB (APIC_IPI_INTS + 1) /* TLB Shootdown IPIs */
#define IPI_INVLPG (APIC_IPI_INTS + 2)
#define IPI_INVLRNG (APIC_IPI_INTS + 3)
+#define IPI_INVLCACHE (APIC_IPI_INTS + 4)
/* Vector to handle bitmap based IPIs */
-#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 5)
+#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 6)
/* IPIs handled by IPI_BITMAPED_VECTOR (XXX ups is there a better place?) */
#define IPI_AST 0 /* Generate software trap. */
@@ -127,7 +128,7 @@
#define IPI_BITMAP_LAST IPI_PREEMPT
#define IPI_IS_BITMAPED(x) ((x) <= IPI_BITMAP_LAST)
-#define IPI_STOP (APIC_IPI_INTS + 6) /* Stop CPU until restarted. */
+#define IPI_STOP (APIC_IPI_INTS + 7) /* Stop CPU until restarted. */
/*
* The spurious interrupt can share the priority class with the IPIs since
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 49e3139..0a774c7 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -309,6 +309,7 @@ void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_invalidate_page(pmap_t, vm_offset_t);
void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
void pmap_invalidate_all(pmap_t);
+void pmap_invalidate_cache(void);
#endif /* _KERNEL */
diff --git a/sys/amd64/include/smp.h b/sys/amd64/include/smp.h
index 94a7022..5a2d3aa 100644
--- a/sys/amd64/include/smp.h
+++ b/sys/amd64/include/smp.h
@@ -40,6 +40,7 @@ inthand_t
IDTVEC(invltlb), /* TLB shootdowns - global */
IDTVEC(invlpg), /* TLB shootdowns - 1 page */
IDTVEC(invlrng), /* TLB shootdowns - page range */
+ IDTVEC(invlcache), /* Write back and invalidate cache */
IDTVEC(ipi_intr_bitmap_handler), /* Bitmap based IPIs */
IDTVEC(cpustop), /* CPU stops & waits to be restarted */
IDTVEC(rendezvous); /* handle CPU rendezvous */
@@ -56,6 +57,7 @@ void ipi_bitmap_handler(struct trapframe frame);
u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void mp_topology(void);
+void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(u_int mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
OpenPOWER on IntegriCloud