summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2007-05-02 15:40:15 +0000
committerjhb <jhb@FreeBSD.org>2007-05-02 15:40:15 +0000
commitc39819f05f7d5fa2a92a821ea0f6615b8c0f8e11 (patch)
tree5bcba858b14b893278efecd7f604d0024dba27ee
parentced84431d102c26064a28c6bcb17ac27b82db744 (diff)
downloadFreeBSD-src-c39819f05f7d5fa2a92a821ea0f6615b8c0f8e11.zip
FreeBSD-src-c39819f05f7d5fa2a92a821ea0f6615b8c0f8e11.tar.gz
MFC: Add 'pmap_invalidate_cache()'.
-rw-r--r--sys/amd64/amd64/apic_vector.S19
-rw-r--r--sys/amd64/amd64/mp_machdep.c13
-rw-r--r--sys/amd64/amd64/pmap.c31
-rw-r--r--sys/amd64/include/apicvar.h5
-rw-r--r--sys/amd64/include/pmap.h1
-rw-r--r--sys/amd64/include/smp.h2
-rw-r--r--sys/i386/i386/apic_vector.s33
-rw-r--r--sys/i386/i386/mp_machdep.c15
-rw-r--r--sys/i386/i386/pmap.c17
-rw-r--r--sys/i386/include/apicvar.h7
-rw-r--r--sys/i386/include/pmap.h1
-rw-r--r--sys/i386/include/smp.h2
12 files changed, 139 insertions, 7 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 42cb03f..307de9f 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -222,6 +222,25 @@ IDTVEC(invlrng)
iretq
/*
+ * Invalidate cache.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(invlcache)
+ pushq %rax
+
+ wbinvd
+
+ movq lapic, %rax
+ movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
+
+ lock
+ incl smp_tlb_wait
+
+ popq %rax
+ iretq
+
+/*
* Handler for IPIs sent via the per-cpu IPI bitmap.
*/
.text
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 77d3922..ed71bad 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -318,7 +318,10 @@ cpu_mp_start(void)
setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
-
+
+ /* Install an inter-CPU IPI for cache invalidation. */
+ setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
+
/* Install an inter-CPU IPI for all-CPU rendezvous */
setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
@@ -850,6 +853,14 @@ smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offse
}
void
+smp_cache_flush(void)
+{
+
+ if (smp_started)
+ smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
+}
+
+void
smp_invltlb(void)
{
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 765705f..bafe852 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -738,6 +738,30 @@ pmap_invalidate_all(pmap_t pmap)
else
critical_exit();
}
+
+void
+pmap_invalidate_cache(void)
+{
+
+ if (smp_started) {
+ if (!(read_rflags() & PSL_I))
+ panic("%s: interrupts disabled", __func__);
+ mtx_lock_spin(&smp_ipi_mtx);
+ } else
+ critical_enter();
+ /*
+ * We need to disable interrupt preemption but MUST NOT have
+ * interrupts disabled here.
+ * XXX we may need to hold schedlock to get a coherent pm_active
+ * XXX critical sections disable interrupts again
+ */
+ wbinvd();
+ smp_cache_flush();
+ if (smp_started)
+ mtx_unlock_spin(&smp_ipi_mtx);
+ else
+ critical_exit();
+}
#else /* !SMP */
/*
* Normal, non-SMP, invalidation functions.
@@ -768,6 +792,13 @@ pmap_invalidate_all(pmap_t pmap)
if (pmap == kernel_pmap || pmap->pm_active)
invltlb();
}
+
+PMAP_INLINE void
+pmap_invalidate_cache(void)
+{
+
+ wbinvd();
+}
#endif /* !SMP */
/*
diff --git a/sys/amd64/include/apicvar.h b/sys/amd64/include/apicvar.h
index f134950..624415c 100644
--- a/sys/amd64/include/apicvar.h
+++ b/sys/amd64/include/apicvar.h
@@ -118,8 +118,9 @@
#define IPI_INVLTLB (APIC_IPI_INTS + 1) /* TLB Shootdown IPIs */
#define IPI_INVLPG (APIC_IPI_INTS + 2)
#define IPI_INVLRNG (APIC_IPI_INTS + 3)
+#define IPI_INVLCACHE (APIC_IPI_INTS + 4)
/* Vector to handle bitmap based IPIs */
-#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 5)
+#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 6)
/* IPIs handled by IPI_BITMAPED_VECTOR (XXX ups is there a better place?) */
#define IPI_AST 0 /* Generate software trap. */
@@ -127,7 +128,7 @@
#define IPI_BITMAP_LAST IPI_PREEMPT
#define IPI_IS_BITMAPED(x) ((x) <= IPI_BITMAP_LAST)
-#define IPI_STOP (APIC_IPI_INTS + 6) /* Stop CPU until restarted. */
+#define IPI_STOP (APIC_IPI_INTS + 7) /* Stop CPU until restarted. */
/*
* The spurious interrupt can share the priority class with the IPIs since
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index bd2cc35..d1a9411 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -299,6 +299,7 @@ void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_invalidate_page(pmap_t, vm_offset_t);
void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
void pmap_invalidate_all(pmap_t);
+void pmap_invalidate_cache(void);
#endif /* _KERNEL */
diff --git a/sys/amd64/include/smp.h b/sys/amd64/include/smp.h
index 2c63dc9..2e9f435 100644
--- a/sys/amd64/include/smp.h
+++ b/sys/amd64/include/smp.h
@@ -40,6 +40,7 @@ inthand_t
IDTVEC(invltlb), /* TLB shootdowns - global */
IDTVEC(invlpg), /* TLB shootdowns - 1 page */
IDTVEC(invlrng), /* TLB shootdowns - page range */
+ IDTVEC(invlcache), /* Write back and invalidate cache */
IDTVEC(ipi_intr_bitmap_handler), /* Bitmap based IPIs */
IDTVEC(cpustop), /* CPU stops & waits to be restarted */
IDTVEC(rendezvous); /* handle CPU rendezvous */
@@ -55,6 +56,7 @@ void ipi_bitmap_handler(struct clockframe frame);
u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void mp_topology(void);
+void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(u_int mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s
index 2998953..294e2de 100644
--- a/sys/i386/i386/apic_vector.s
+++ b/sys/i386/i386/apic_vector.s
@@ -264,6 +264,39 @@ IDTVEC(invlrng)
iret
/*
+ * Invalidate cache.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(invlcache)
+ pushl %eax
+ pushl %ds
+ movl $KDSEL, %eax /* Kernel data selector */
+ movl %eax, %ds
+
+#ifdef COUNT_IPIS
+ pushl %fs
+ movl $KPSEL, %eax /* Private space selector */
+ movl %eax, %fs
+ movl PCPU(CPUID), %eax
+ popl %fs
+ movl ipi_invlcache_counts(,%eax,4),%eax
+ incl (%eax)
+#endif
+
+ wbinvd
+
+ movl lapic, %eax
+ movl $0, LA_EOI(%eax) /* End Of Interrupt to APIC */
+
+ lock
+ incl smp_tlb_wait
+
+ popl %ds
+ popl %eax
+ iret
+
+/*
* Handler for IPIs sent via the per-cpu IPI bitmap.
*/
.text
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 6f4ea4c..45323db 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -191,6 +191,7 @@ static u_long *ipi_ast_counts[MAXCPU];
u_long *ipi_invltlb_counts[MAXCPU];
u_long *ipi_invlrng_counts[MAXCPU];
u_long *ipi_invlpg_counts[MAXCPU];
+u_long *ipi_invlcache_counts[MAXCPU];
u_long *ipi_rendezvous_counts[MAXCPU];
u_long *ipi_lazypmap_counts[MAXCPU];
#endif
@@ -386,7 +387,11 @@ cpu_mp_start(void)
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
setidt(IPI_INVLRNG, IDTVEC(invlrng),
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-
+
+ /* Install an inter-CPU IPI for cache invalidation. */
+ setidt(IPI_INVLCACHE, IDTVEC(invlcache),
+ SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+
/* Install an inter-CPU IPI for lazy pmap release */
setidt(IPI_LAZYPMAP, IDTVEC(lazypmap),
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
@@ -1064,6 +1069,14 @@ smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offse
}
void
+smp_cache_flush(void)
+{
+
+ if (smp_started)
+ smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
+}
+
+void
smp_invltlb(void)
{
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index ce9090e..6a2be98 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -591,6 +591,16 @@ pmap_invalidate_all(pmap_t pmap)
}
sched_unpin();
}
+
+void
+pmap_invalidate_cache(void)
+{
+
+ sched_pin();
+ wbinvd();
+ smp_cache_flush();
+ sched_unpin();
+}
#else /* !SMP */
/*
* Normal, non-SMP, 486+ invalidation functions.
@@ -621,6 +631,13 @@ pmap_invalidate_all(pmap_t pmap)
if (pmap == kernel_pmap || pmap->pm_active)
invltlb();
}
+
+PMAP_INLINE void
+pmap_invalidate_cache(void)
+{
+
+ wbinvd();
+}
#endif /* !SMP */
/*
diff --git a/sys/i386/include/apicvar.h b/sys/i386/include/apicvar.h
index 1f9755a..f74c578 100644
--- a/sys/i386/include/apicvar.h
+++ b/sys/i386/include/apicvar.h
@@ -116,9 +116,10 @@
#define IPI_INVLTLB (APIC_IPI_INTS + 1) /* TLB Shootdown IPIs */
#define IPI_INVLPG (APIC_IPI_INTS + 2)
#define IPI_INVLRNG (APIC_IPI_INTS + 3)
-#define IPI_LAZYPMAP (APIC_IPI_INTS + 4) /* Lazy pmap release. */
+#define IPI_INVLCACHE (APIC_IPI_INTS + 4)
+#define IPI_LAZYPMAP (APIC_IPI_INTS + 5) /* Lazy pmap release. */
/* Vector to handle bitmap based IPIs */
-#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 5)
+#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 6)
/* IPIs handled by IPI_BITMAPED_VECTOR (XXX ups is there a better place?) */
#define IPI_AST 0 /* Generate software trap. */
@@ -126,7 +127,7 @@
#define IPI_BITMAP_LAST IPI_PREEMPT
#define IPI_IS_BITMAPED(x) ((x) <= IPI_BITMAP_LAST)
-#define IPI_STOP (APIC_IPI_INTS + 6) /* Stop CPU until restarted. */
+#define IPI_STOP (APIC_IPI_INTS + 7) /* Stop CPU until restarted. */
/*
* The spurious interrupt can share the priority class with the IPIs since
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index 0324658..1190809 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -366,6 +366,7 @@ void pmap_set_pg(void);
void pmap_invalidate_page(pmap_t, vm_offset_t);
void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
void pmap_invalidate_all(pmap_t);
+void pmap_invalidate_cache(void);
#endif /* _KERNEL */
diff --git a/sys/i386/include/smp.h b/sys/i386/include/smp.h
index 4c9ae76..095fa8a 100644
--- a/sys/i386/include/smp.h
+++ b/sys/i386/include/smp.h
@@ -55,6 +55,7 @@ inthand_t
IDTVEC(invltlb), /* TLB shootdowns - global */
IDTVEC(invlpg), /* TLB shootdowns - 1 page */
IDTVEC(invlrng), /* TLB shootdowns - page range */
+ IDTVEC(invlcache), /* Write back and invalidate cache */
IDTVEC(ipi_intr_bitmap_handler), /* Bitmap based IPIs */
IDTVEC(cpustop), /* CPU stops & waits to be restarted */
IDTVEC(rendezvous), /* handle CPU rendezvous */
@@ -71,6 +72,7 @@ void ipi_bitmap_handler(struct clockframe frame);
u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void mp_topology(void);
+void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(u_int mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
OpenPOWER on IntegriCloud