summaryrefslogtreecommitdiffstats
path: root/sys/sparc64/include/tlb.h
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2001-12-29 07:07:35 +0000
committerjake <jake@FreeBSD.org>2001-12-29 07:07:35 +0000
commita3769d0f6ae5707b6235796db86a89eeb9c61eb6 (patch)
treef4d2c1cbaa1915960ee3a34735e436c04e41fe0d /sys/sparc64/include/tlb.h
parent63826f10fafda5a3a6f181dfe58a89909de04a29 (diff)
downloadFreeBSD-src-a3769d0f6ae5707b6235796db86a89eeb9c61eb6.zip
FreeBSD-src-a3769d0f6ae5707b6235796db86a89eeb9c61eb6.tar.gz
1. Certain tlb operations need to be atomic, so disable interrupts for
their duration. This is still only effective as long as they are only used in the static kernel. Code in modules may cause instruction faults which makes these break in different ways anyway. 2. Add a load bearing membar #Sync. 3. Add an inline for demapping an entire context. Submitted by: tmm (1, 2)
Diffstat (limited to 'sys/sparc64/include/tlb.h')
-rw-r--r--sys/sparc64/include/tlb.h54
1 files changed, 54 insertions, 0 deletions
diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h
index e6ff29a..5dfb6dc 100644
--- a/sys/sparc64/include/tlb.h
+++ b/sys/sparc64/include/tlb.h
@@ -82,6 +82,26 @@
#define MMU_SFSR_W (1L << MMU_SFSR_W_SHIFT)
+/*
+ * Some tlb operations must be atomical, so no interrupt or trap can be allowed
+ * while they are in progress. Traps should not happen, but interrupts need to
+ * be explicitely disabled. critical_enter() cannot be used here, since it only
+ * disables soft interrupts.
+ * XXX: is something like this needed elsewhere, too?
+ */
+#define TLB_ATOMIC_START(s) do { \
+ (s) = rdpr(pstate); \
+ wrpr(pstate, (s) & ~PSTATE_IE, 0); \
+} while (0)
+#define TLB_ATOMIC_END(s) wrpr(pstate, (s), 0)
+
+static __inline void
+tlb_dtlb_context_primary_demap(void)
+{
+ stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
+ membar(Sync);
+}
+
static __inline void
tlb_dtlb_page_demap(u_long ctx, vm_offset_t va)
{
@@ -94,6 +114,7 @@ tlb_dtlb_page_demap(u_long ctx, vm_offset_t va)
membar(Sync);
stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE,
ASI_DMMU_DEMAP, 0);
+ membar(Sync);
stxa(AA_DMMU_SCXR, ASI_DMMU, 0);
membar(Sync);
}
@@ -102,18 +123,33 @@ tlb_dtlb_page_demap(u_long ctx, vm_offset_t va)
static __inline void
tlb_dtlb_store(vm_offset_t va, u_long ctx, struct tte tte)
{
+ u_long pst;
+
+ TLB_ATOMIC_START(pst);
stxa(AA_DMMU_TAR, ASI_DMMU,
TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data);
membar(Sync);
+ TLB_ATOMIC_END(pst);
}
static __inline void
tlb_dtlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
{
+ u_long pst;
+
+ TLB_ATOMIC_START(pst);
stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data);
membar(Sync);
+ TLB_ATOMIC_END(pst);
+}
+
+static __inline void
+tlb_itlb_context_primary_demap(void)
+{
+ stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
+ membar(Sync);
}
static __inline void
@@ -128,6 +164,7 @@ tlb_itlb_page_demap(u_long ctx, vm_offset_t va)
membar(Sync);
stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE,
ASI_IMMU_DEMAP, 0);
+ membar(Sync);
stxa(AA_DMMU_SCXR, ASI_DMMU, 0);
/* flush probably not needed. */
membar(Sync);
@@ -137,6 +174,9 @@ tlb_itlb_page_demap(u_long ctx, vm_offset_t va)
static __inline void
tlb_itlb_store(vm_offset_t va, u_long ctx, struct tte tte)
{
+ u_long pst;
+
+ TLB_ATOMIC_START(pst);
stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
stxa(0, ASI_ITLB_DATA_IN_REG, tte.tte_data);
if (ctx == TLB_CTX_KERNEL)
@@ -148,14 +188,28 @@ tlb_itlb_store(vm_offset_t va, u_long ctx, struct tte tte)
*/
membar(Sync);
}
+ TLB_ATOMIC_END(pst);
+}
+
+static __inline void
+tlb_context_primary_demap(u_int tlb)
+{
+ if (tlb & TLB_DTLB)
+ tlb_dtlb_context_primary_demap();
+ if (tlb & TLB_ITLB)
+ tlb_itlb_context_primary_demap();
}
static __inline void
tlb_itlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
{
+ u_long pst;
+
+ TLB_ATOMIC_START(pst);
stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data);
flush(va);
+ TLB_ATOMIC_END(pst);
}
static __inline void
OpenPOWER on IntegriCloud