summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2002-05-08 04:14:16 +0000
committerjake <jake@FreeBSD.org>2002-05-08 04:14:16 +0000
commitd6121aa92fc712febf157533c5f3458020175f40 (patch)
tree922f0fba33c0320f386a071ed43278948c35919a /sys
parenteb9cd028aaf076736bc6bda08a7ccbab58f2731e (diff)
downloadFreeBSD-src-d6121aa92fc712febf157533c5f3458020175f40.zip
FreeBSD-src-d6121aa92fc712febf157533c5f3458020175f40.tar.gz
Make a macro for the guts of tl0_immu_miss, like dmmu_miss and prot.
Rearrange things slightly so that the contents of the tag access register are read and restored outside of the macros. The intention is to pass the page size to look up as an argument to the macros.
Diffstat (limited to 'sys')
-rw-r--r--sys/sparc64/sparc64/exception.S127
-rw-r--r--sys/sparc64/sparc64/exception.s127
2 files changed, 152 insertions, 102 deletions
diff --git a/sys/sparc64/sparc64/exception.S b/sys/sparc64/sparc64/exception.S
index 8bc5ff7..890398f 100644
--- a/sys/sparc64/sparc64/exception.S
+++ b/sys/sparc64/sparc64/exception.S
@@ -543,19 +543,7 @@ END(tl0_sfsr_trap)
.align 32
.endm
- .macro tl0_immu_miss
- /*
- * Force kernel store order.
- */
- wrpr %g0, PSTATE_MMU, %pstate
-
- /*
- * Load the virtual page number and context from the tag access
- * register. We ignore the context.
- */
- wr %g0, ASI_IMMU, %asi
- ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
-
+ .macro immu_miss_user
/*
* Extract the virtual page number from the contents of the tag
* access register.
@@ -623,13 +611,22 @@ END(tl0_sfsr_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
+ .endm
+ .macro tl0_immu_miss
/*
- * Put back the contents of the tag access register, in case we
- * faulted.
+ * Force kernel store order.
*/
- stxa %g2, [%g0 + AA_IMMU_TAR] %asi
- membar #Sync
+ wrpr %g0, PSTATE_MMU, %pstate
+
+ /*
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
+ */
+ wr %g0, ASI_IMMU, %asi
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
+
+ immu_miss_user
b,a %xcc, tl0_immu_miss_trap
nop
@@ -658,6 +655,13 @@ END(tl0_immu_miss_set_ref)
ENTRY(tl0_immu_miss_trap)
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_IMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
@@ -678,13 +682,6 @@ END(tl0_immu_miss_trap)
.macro dmmu_miss_user
/*
- * Load the virtual page number and context from the tag access
- * register. We ignore the context.
- */
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
-
- /*
* Extract the virtual page number from the contents of the tag
* access register.
*/
@@ -748,13 +745,6 @@ END(tl0_immu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
-
- /*
- * Put back the contents of the tag access register, in case we
- * faulted.
- */
- stxa %g2, [%g0 + AA_DMMU_TAR] %asi
- membar #Sync
.endm
ENTRY(dmmu_miss_user_set_ref)
@@ -784,6 +774,13 @@ END(dmmu_miss_user_set_ref)
wrpr %g0, PSTATE_MMU, %pstate
/*
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
+ */
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+
+ /*
* Try a fast inline lookup of the primary tsb.
*/
dmmu_miss_user
@@ -798,6 +795,13 @@ END(dmmu_miss_user_set_ref)
ENTRY(tl0_dmmu_miss_trap)
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
@@ -818,13 +822,6 @@ END(tl0_dmmu_miss_trap)
.macro dmmu_prot_user
/*
- * Load the virtual page number and context from the tag access
- * register. We ignore the context.
- */
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
-
- /*
* Extract the virtual page number from the contents of the tag
* access register.
*/
@@ -879,13 +876,6 @@ END(tl0_dmmu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
-
- /*
- * Put back the contents of the tag access register, in case we
- * faulted.
- */
- stxa %g2, [%g0 + AA_DMMU_TAR] %asi
- membar #Sync
.endm
.macro tl0_dmmu_prot
@@ -895,6 +885,13 @@ END(tl0_dmmu_miss_trap)
wrpr %g0, PSTATE_MMU, %pstate
/*
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
+ */
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+
+ /*
* Try a fast inline lookup of the tsb.
*/
dmmu_prot_user
@@ -937,6 +934,13 @@ END(dmmu_prot_set_w)
ENTRY(tl0_dmmu_prot_trap)
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
@@ -1330,13 +1334,14 @@ END(tl1_immu_miss_trap)
* the virtual page number.
*/
sllx %g6, 64 - TAR_VPN_SHIFT, %g5
- brnz,pn %g5, tl1_dmmu_miss_user
- srlx %g6, TAR_VPN_SHIFT, %g6
+ brnz,a,pn %g5, tl1_dmmu_miss_user
+ mov %g6, %g2
/*
* Find the index into the kernel tsb.
*/
set TSB_KERNEL_MASK, %g4
+ srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g3
/*
@@ -1404,6 +1409,13 @@ ENTRY(tl1_dmmu_miss_user)
dmmu_miss_user
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
@@ -1438,13 +1450,14 @@ END(tl1_dmmu_miss_user)
* the virtual page number.
*/
sllx %g6, 64 - TAR_VPN_SHIFT, %g5
- brnz,pn %g5, tl1_dmmu_prot_user
- srlx %g6, TAR_VPN_SHIFT, %g6
+ brnz,a,pn %g5, tl1_dmmu_prot_user
+ mov %g6, %g2
/*
* Find the index into the kernel tsb.
*/
set TSB_KERNEL_MASK, %g4
+ srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g5
/*
@@ -1479,6 +1492,12 @@ END(tl1_dmmu_miss_user)
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
membar #Sync
+ ba,a %xcc, tl1_dmmu_prot_cont
+ nop
+ .align 128
+ .endm
+
+ENTRY(tl1_dmmu_prot_cont)
/*
* Set the hardware write bit.
*/
@@ -1490,8 +1509,7 @@ END(tl1_dmmu_miss_user)
or %g5, TD_W, %g5
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry
- .align 128
- .endm
+END(tl1_dmmu_prot_cont)
ENTRY(tl1_dmmu_prot_user)
/*
@@ -1500,6 +1518,13 @@ ENTRY(tl1_dmmu_prot_user)
dmmu_prot_user
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
diff --git a/sys/sparc64/sparc64/exception.s b/sys/sparc64/sparc64/exception.s
index 8bc5ff7..890398f 100644
--- a/sys/sparc64/sparc64/exception.s
+++ b/sys/sparc64/sparc64/exception.s
@@ -543,19 +543,7 @@ END(tl0_sfsr_trap)
.align 32
.endm
- .macro tl0_immu_miss
- /*
- * Force kernel store order.
- */
- wrpr %g0, PSTATE_MMU, %pstate
-
- /*
- * Load the virtual page number and context from the tag access
- * register. We ignore the context.
- */
- wr %g0, ASI_IMMU, %asi
- ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
-
+ .macro immu_miss_user
/*
* Extract the virtual page number from the contents of the tag
* access register.
@@ -623,13 +611,22 @@ END(tl0_sfsr_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
+ .endm
+ .macro tl0_immu_miss
/*
- * Put back the contents of the tag access register, in case we
- * faulted.
+ * Force kernel store order.
*/
- stxa %g2, [%g0 + AA_IMMU_TAR] %asi
- membar #Sync
+ wrpr %g0, PSTATE_MMU, %pstate
+
+ /*
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
+ */
+ wr %g0, ASI_IMMU, %asi
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
+
+ immu_miss_user
b,a %xcc, tl0_immu_miss_trap
nop
@@ -658,6 +655,13 @@ END(tl0_immu_miss_set_ref)
ENTRY(tl0_immu_miss_trap)
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_IMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
@@ -678,13 +682,6 @@ END(tl0_immu_miss_trap)
.macro dmmu_miss_user
/*
- * Load the virtual page number and context from the tag access
- * register. We ignore the context.
- */
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
-
- /*
* Extract the virtual page number from the contents of the tag
* access register.
*/
@@ -748,13 +745,6 @@ END(tl0_immu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
-
- /*
- * Put back the contents of the tag access register, in case we
- * faulted.
- */
- stxa %g2, [%g0 + AA_DMMU_TAR] %asi
- membar #Sync
.endm
ENTRY(dmmu_miss_user_set_ref)
@@ -784,6 +774,13 @@ END(dmmu_miss_user_set_ref)
wrpr %g0, PSTATE_MMU, %pstate
/*
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
+ */
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+
+ /*
* Try a fast inline lookup of the primary tsb.
*/
dmmu_miss_user
@@ -798,6 +795,13 @@ END(dmmu_miss_user_set_ref)
ENTRY(tl0_dmmu_miss_trap)
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
@@ -818,13 +822,6 @@ END(tl0_dmmu_miss_trap)
.macro dmmu_prot_user
/*
- * Load the virtual page number and context from the tag access
- * register. We ignore the context.
- */
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
-
- /*
* Extract the virtual page number from the contents of the tag
* access register.
*/
@@ -879,13 +876,6 @@ END(tl0_dmmu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
-
- /*
- * Put back the contents of the tag access register, in case we
- * faulted.
- */
- stxa %g2, [%g0 + AA_DMMU_TAR] %asi
- membar #Sync
.endm
.macro tl0_dmmu_prot
@@ -895,6 +885,13 @@ END(tl0_dmmu_miss_trap)
wrpr %g0, PSTATE_MMU, %pstate
/*
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
+ */
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+
+ /*
* Try a fast inline lookup of the tsb.
*/
dmmu_prot_user
@@ -937,6 +934,13 @@ END(dmmu_prot_set_w)
ENTRY(tl0_dmmu_prot_trap)
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
@@ -1330,13 +1334,14 @@ END(tl1_immu_miss_trap)
* the virtual page number.
*/
sllx %g6, 64 - TAR_VPN_SHIFT, %g5
- brnz,pn %g5, tl1_dmmu_miss_user
- srlx %g6, TAR_VPN_SHIFT, %g6
+ brnz,a,pn %g5, tl1_dmmu_miss_user
+ mov %g6, %g2
/*
* Find the index into the kernel tsb.
*/
set TSB_KERNEL_MASK, %g4
+ srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g3
/*
@@ -1404,6 +1409,13 @@ ENTRY(tl1_dmmu_miss_user)
dmmu_miss_user
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
@@ -1438,13 +1450,14 @@ END(tl1_dmmu_miss_user)
* the virtual page number.
*/
sllx %g6, 64 - TAR_VPN_SHIFT, %g5
- brnz,pn %g5, tl1_dmmu_prot_user
- srlx %g6, TAR_VPN_SHIFT, %g6
+ brnz,a,pn %g5, tl1_dmmu_prot_user
+ mov %g6, %g2
/*
* Find the index into the kernel tsb.
*/
set TSB_KERNEL_MASK, %g4
+ srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g5
/*
@@ -1479,6 +1492,12 @@ END(tl1_dmmu_miss_user)
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
membar #Sync
+ ba,a %xcc, tl1_dmmu_prot_cont
+ nop
+ .align 128
+ .endm
+
+ENTRY(tl1_dmmu_prot_cont)
/*
* Set the hardware write bit.
*/
@@ -1490,8 +1509,7 @@ END(tl1_dmmu_miss_user)
or %g5, TD_W, %g5
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry
- .align 128
- .endm
+END(tl1_dmmu_prot_cont)
ENTRY(tl1_dmmu_prot_user)
/*
@@ -1500,6 +1518,13 @@ ENTRY(tl1_dmmu_prot_user)
dmmu_prot_user
/*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+
+ /*
* Switch to alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
OpenPOWER on IntegriCloud