summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authormarius <marius@FreeBSD.org>2008-09-08 20:38:48 +0000
committermarius <marius@FreeBSD.org>2008-09-08 20:38:48 +0000
commit45e57c6bd280ab3f6744865db7ee4888c8b2e08b (patch)
tree6b134e49a29b46f74cd02e388d675405a35534fa /sys/sparc64
parent579a51f222627a676ded829197395af7b2a25f45 (diff)
downloadFreeBSD-src-45e57c6bd280ab3f6744865db7ee4888c8b2e08b.zip
FreeBSD-src-45e57c6bd280ab3f6744865db7ee4888c8b2e08b.tar.gz
USIII and beyond CPUs have stricter requirements when it comes
to synchronization needed after stores to internal ASIs in order to make side-effects visible. This mainly requires the MEMBAR #Sync after such stores to be replaced with a FLUSH. We use KERNBASE as the address to FLUSH as it is guaranteed to not trap. Actually, the USII synchronization rules also already require a FLUSH in pretty much all of the cases changed. We're also hitting an additional USIII synchronization rule which requires stores to AA_IMMU_SFSR to be immediately followed by a DONE, FLUSH or RETRY. Doing so triggers a RED state exception though so leave the MEMBAR #Sync. Linux apparently also has gotten away with doing the same for quite some time now, apart from the fact that it's not clear to me why we need to clear the valid bit from the SFSR in the first place. Reviewed by: nwhitehorn
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/sparc64/exception.S13
-rw-r--r--sys/sparc64/sparc64/mp_exception.S12
-rw-r--r--sys/sparc64/sparc64/pmap.c4
-rw-r--r--sys/sparc64/sparc64/support.S3
-rw-r--r--sys/sparc64/sparc64/swtch.S3
-rw-r--r--sys/sparc64/sparc64/tlb.c7
6 files changed, 29 insertions, 13 deletions
diff --git a/sys/sparc64/sparc64/exception.S b/sys/sparc64/sparc64/exception.S
index 1e097e9..5f81c04 100644
--- a/sys/sparc64/sparc64/exception.S
+++ b/sys/sparc64/sparc64/exception.S
@@ -498,6 +498,11 @@ END(rsf_fatal)
wr %g0, ASI_IMMU, %asi
rdpr %tpc, %g3
ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
+ /*
+ * XXX in theory, a store to AA_IMMU_SFSR must be immediately
+ * followed by a DONE, FLUSH or RETRY for USIII. In practice,
+ * this triggers a RED state exception though.
+ */
stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
membar #Sync
ba %xcc, tl0_sfsr_trap
@@ -716,8 +721,9 @@ ENTRY(tl0_immu_miss_trap)
* Put back the contents of the tag access register, in case we
* faulted.
*/
+ sethi %hi(KERNBASE), %g2
stxa %g1, [%g0 + AA_IMMU_TAR] %asi
- membar #Sync
+ flush %g2
/*
* Switch to alternate globals.
@@ -1213,6 +1219,11 @@ END(tl0_fp_restore)
wr %g0, ASI_IMMU, %asi
rdpr %tpc, %g3
ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
+ /*
+ * XXX in theory, a store to AA_IMMU_SFSR must be immediately
+ * followed by a DONE, FLUSH or RETRY for USIII. In practice,
+ * this triggers a RED state exception though.
+ */
stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
membar #Sync
ba %xcc, tl1_insn_exceptn_trap
diff --git a/sys/sparc64/sparc64/mp_exception.S b/sys/sparc64/sparc64/mp_exception.S
index fbb1c25..3e53377 100644
--- a/sys/sparc64/sparc64/mp_exception.S
+++ b/sys/sparc64/sparc64/mp_exception.S
@@ -199,9 +199,10 @@ ENTRY(tl_ipi_tlb_page_demap)
ldx [%g5 + ITA_VA], %g2
or %g2, %g3, %g2
+ sethi %hi(KERNBASE), %g3
stxa %g0, [%g2] ASI_DMMU_DEMAP
stxa %g0, [%g2] ASI_IMMU_DEMAP
- membar #Sync
+ flush %g3
IPI_DONE(%g5, %g1, %g2, %g3)
retry
@@ -234,13 +235,13 @@ ENTRY(tl_ipi_tlb_range_demap)
ldx [%g5 + ITA_START], %g1
ldx [%g5 + ITA_END], %g2
- set PAGE_SIZE, %g6
-
1: or %g1, %g3, %g4
+ sethi %hi(KERNBASE), %g6
stxa %g0, [%g4] ASI_DMMU_DEMAP
stxa %g0, [%g4] ASI_IMMU_DEMAP
- membar #Sync
+ flush %g6
+ set PAGE_SIZE, %g6
add %g1, %g6, %g1
cmp %g1, %g2
blt,a,pt %xcc, 1b
@@ -265,9 +266,10 @@ ENTRY(tl_ipi_tlb_context_demap)
#endif
mov TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, %g1
+ sethi %hi(KERNBASE), %g3
stxa %g0, [%g1] ASI_DMMU_DEMAP
stxa %g0, [%g1] ASI_IMMU_DEMAP
- membar #Sync
+ flush %g3
IPI_DONE(%g5, %g1, %g2, %g3)
retry
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index d3126fe..be3226a 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -556,7 +556,7 @@ pmap_map_tsb(void)
* FP block operations in the kernel).
*/
stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL);
- membar(Sync);
+ flush(KERNBASE);
intr_restore(s);
}
@@ -1980,7 +1980,7 @@ pmap_activate(struct thread *td)
stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
stxa(AA_DMMU_PCXR, ASI_DMMU, context);
- membar(Sync);
+ flush(KERNBASE);
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/sparc64/sparc64/support.S b/sys/sparc64/sparc64/support.S
index 6bec2b8..5013dc6 100644
--- a/sys/sparc64/sparc64/support.S
+++ b/sys/sparc64/sparc64/support.S
@@ -780,8 +780,9 @@ ENTRY(openfirmware_exit)
sub %l0, SPOFF, %fp ! setup a stack in a locked page
sub %l0, SPOFF + CCFSZ, %sp
mov AA_DMMU_PCXR, %l3 ! force primary DMMU context 0
+ sethi %hi(KERNBASE), %l5
stxa %g0, [%l3] ASI_DMMU
- membar #Sync
+ flush %l5
wrpr %g0, 0, %tl ! force trap level 0
call %l6
mov %i0, %o0
diff --git a/sys/sparc64/sparc64/swtch.S b/sys/sparc64/sparc64/swtch.S
index f44d4f3..552233f 100644
--- a/sys/sparc64/sparc64/swtch.S
+++ b/sys/sparc64/sparc64/swtch.S
@@ -237,8 +237,9 @@ ENTRY(cpu_switch)
mov AA_IMMU_TSB, %i5
stxa %i4, [%i5] ASI_IMMU
mov AA_DMMU_PCXR, %i5
+ sethi %hi(KERNBASE), %i4
stxa %i3, [%i5] ASI_DMMU
- membar #Sync
+ flush %i4
/*
* Done, return and load the new process's window from the stack.
diff --git a/sys/sparc64/sparc64/tlb.c b/sys/sparc64/sparc64/tlb.c
index d76eaec..9bd90a1 100644
--- a/sys/sparc64/sparc64/tlb.c
+++ b/sys/sparc64/sparc64/tlb.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <machine/pmap.h>
#include <machine/smp.h>
#include <machine/tlb.h>
+#include <machine/vmparam.h>
PMAP_STATS_VAR(tlb_ncontext_demap);
PMAP_STATS_VAR(tlb_npage_demap);
@@ -85,7 +86,7 @@ tlb_context_demap(struct pmap *pm)
s = intr_disable();
stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
- membar(Sync);
+ flush(KERNBASE);
intr_restore(s);
}
ipi_wait(cookie);
@@ -111,7 +112,7 @@ tlb_page_demap(struct pmap *pm, vm_offset_t va)
s = intr_disable();
stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
- membar(Sync);
+ flush(KERNBASE);
intr_restore(s);
}
ipi_wait(cookie);
@@ -139,7 +140,7 @@ tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
for (va = start; va < end; va += PAGE_SIZE) {
stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
- membar(Sync);
+ flush(KERNBASE);
}
intr_restore(s);
}
OpenPOWER on IntegriCloud