summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2003-04-13 21:54:58 +0000
committerjake <jake@FreeBSD.org>2003-04-13 21:54:58 +0000
commit08ad0d0b12b05574b3ca8dc82085cf761ced1cb6 (patch)
tree3e95e9ec9c040078e79c412e7276c6a9fa539d07 /sys/sparc64
parent718ebc1291829bf1185a75ae93a2a7e562bfc5d8 (diff)
downloadFreeBSD-src-08ad0d0b12b05574b3ca8dc82085cf761ced1cb6.zip
FreeBSD-src-08ad0d0b12b05574b3ca8dc82085cf761ced1cb6.tar.gz
- Move the routine for flushing all user mappings from the tlb from pmap to
the cpu dependent files. It will need to be done differently for USIII. - Simplify the logic for detecting context rollovers. Instead of dealing with it when the next context switch would cause the context numbers to rollover, deal with it when they actually do rollover. - Move some things around in cpu_switch so that we only do 1 membar #Sync when switching address space, instead of 2. - Detect kernel threads by comparing the new vm space to vmspace0, instead if checking if the tlb context is 0. - Removed some debug code.
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/include/pmap.h1
-rw-r--r--sys/sparc64/include/tlb.h12
-rw-r--r--sys/sparc64/sparc64/cache.c3
-rw-r--r--sys/sparc64/sparc64/cheetah.c7
-rw-r--r--sys/sparc64/sparc64/machdep.c5
-rw-r--r--sys/sparc64/sparc64/pmap.c67
-rw-r--r--sys/sparc64/sparc64/spitfire.c27
-rw-r--r--sys/sparc64/sparc64/swtch.S112
-rw-r--r--sys/sparc64/sparc64/tlb.c26
9 files changed, 102 insertions, 158 deletions
diff --git a/sys/sparc64/include/pmap.h b/sys/sparc64/include/pmap.h
index ab05d7b..c0c20df 100644
--- a/sys/sparc64/include/pmap.h
+++ b/sys/sparc64/include/pmap.h
@@ -71,7 +71,6 @@ struct pmap {
};
void pmap_bootstrap(vm_offset_t ekva);
-void pmap_context_rollover(void);
vm_paddr_t pmap_kextract(vm_offset_t va);
void pmap_kenter(vm_offset_t va, vm_page_t m);
void pmap_kremove(vm_offset_t);
diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h
index d68a0ad..235a186 100644
--- a/sys/sparc64/include/tlb.h
+++ b/sys/sparc64/include/tlb.h
@@ -93,17 +93,21 @@
#define MMU_SFSR_W (1UL << MMU_SFSR_W_SHIFT)
#define MMU_SFSR_FV (1UL << MMU_SFSR_FV_SHIFT)
+typedef void tlb_flush_user_t(void);
+
+struct pmap;
struct tlb_entry;
extern int kernel_tlb_slots;
extern struct tlb_entry *kernel_tlbs;
-extern int tlb_dtlb_entries;
-extern int tlb_itlb_entries;
-
void tlb_context_demap(struct pmap *pm);
void tlb_page_demap(struct pmap *pm, vm_offset_t va);
void tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end);
-void tlb_dump(void);
+
+tlb_flush_user_t cheetah_tlb_flush_user;
+tlb_flush_user_t spitfire_tlb_flush_user;
+
+extern tlb_flush_user_t *tlb_flush_user;
#endif /* !_MACHINE_TLB_H_ */
diff --git a/sys/sparc64/sparc64/cache.c b/sys/sparc64/sparc64/cache.c
index a3e1a1c..dad13f1 100644
--- a/sys/sparc64/sparc64/cache.c
+++ b/sys/sparc64/sparc64/cache.c
@@ -81,6 +81,7 @@
#include <dev/ofw/openfirm.h>
#include <machine/cache.h>
+#include <machine/tlb.h>
#include <machine/ver.h>
struct cacheinfo cache;
@@ -126,8 +127,10 @@ cache_init(phandle_t node)
if (cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
dcache_page_inval = cheetah_dcache_page_inval;
icache_page_inval = cheetah_icache_page_inval;
+ tlb_flush_user = cheetah_tlb_flush_user;
} else {
dcache_page_inval = spitfire_dcache_page_inval;
icache_page_inval = spitfire_icache_page_inval;
+ tlb_flush_user = spitfire_tlb_flush_user;
}
}
diff --git a/sys/sparc64/sparc64/cheetah.c b/sys/sparc64/sparc64/cheetah.c
index 9d550d5..6dbbc06 100644
--- a/sys/sparc64/sparc64/cheetah.c
+++ b/sys/sparc64/sparc64/cheetah.c
@@ -43,6 +43,7 @@
#include <machine/cache.h>
#include <machine/cpufunc.h>
#include <machine/smp.h>
+#include <machine/tlb.h>
/*
* Flush a physical page from the data cache.
@@ -69,3 +70,9 @@ void
cheetah_icache_page_inval(vm_paddr_t pa)
{
}
+
+void
+cheetah_tlb_flush_user(void)
+{
+ panic("cheetah_tlb_flush_user");
+}
diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c
index 7f3aa7a..47d9093 100644
--- a/sys/sparc64/sparc64/machdep.c
+++ b/sys/sparc64/sparc64/machdep.c
@@ -286,11 +286,6 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
}
if (child == 0)
panic("cpu_startup: no cpu\n");
- OF_getprop(child, "#dtlb-entries", &tlb_dtlb_entries,
- sizeof(tlb_dtlb_entries));
- OF_getprop(child, "#itlb-entries", &tlb_itlb_entries,
- sizeof(tlb_itlb_entries));
-
cache_init(child);
getenv_int("machdep.use_vis", &cpu_use_vis);
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 7c65dc4..c58d4b4 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -558,45 +558,6 @@ pmap_bootstrap_alloc(vm_size_t size)
panic("pmap_bootstrap_alloc");
}
-void
-pmap_context_rollover(void)
-{
- u_long data;
- u_long tag;
- int i;
-
- mtx_assert(&sched_lock, MA_OWNED);
- CTR0(KTR_PMAP, "pmap_context_rollover");
- for (i = 0; i < tlb_dtlb_entries; i++) {
- /* XXX - cheetah */
- data = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG);
- tag = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
- if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
- TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
- stxa_sync(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG, 0);
- data = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG);
- tag = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
- if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
- TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
- stxa_sync(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, 0);
- }
- PCPU_SET(tlb_ctx, PCPU_GET(tlb_ctx_min));
-}
-
-static __inline u_int
-pmap_context_alloc(void)
-{
- u_int context;
-
- mtx_assert(&sched_lock, MA_OWNED);
- context = PCPU_GET(tlb_ctx);
- if (context + 1 == PCPU_GET(tlb_ctx_max))
- pmap_context_rollover();
- else
- PCPU_SET(tlb_ctx, context + 1);
- return (context);
-}
-
/*
* Initialize the pmap module.
*/
@@ -1973,28 +1934,30 @@ void
pmap_activate(struct thread *td)
{
struct vmspace *vm;
- vm_offset_t tsb;
- u_long context;
- pmap_t pm;
+ struct pmap *pm;
+ int context;
vm = td->td_proc->p_vmspace;
- pm = &vm->vm_pmap;
- tsb = (vm_offset_t)pm->pm_tsb;
-
- KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?"));
- KASSERT(pm->pm_context[PCPU_GET(cpuid)] != 0,
- ("pmap_activate: activating nucleus context?"));
+ pm = vmspace_pmap(vm);
mtx_lock_spin(&sched_lock);
- stxa(AA_DMMU_TSB, ASI_DMMU, tsb);
- stxa(AA_IMMU_TSB, ASI_IMMU, tsb);
- membar(Sync);
- context = pmap_context_alloc();
+
+ context = PCPU_GET(tlb_ctx);
+ if (context == PCPU_GET(tlb_ctx_max)) {
+ tlb_flush_user();
+ context = PCPU_GET(tlb_ctx_min);
+ }
+ PCPU_SET(tlb_ctx, context + 1);
+
pm->pm_context[PCPU_GET(cpuid)] = context;
pm->pm_active |= PCPU_GET(cpumask);
PCPU_SET(vmspace, vm);
+
+ stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
+ stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
stxa(AA_DMMU_PCXR, ASI_DMMU, context);
membar(Sync);
+
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/sparc64/sparc64/spitfire.c b/sys/sparc64/sparc64/spitfire.c
index e7f614f..429dc9a 100644
--- a/sys/sparc64/sparc64/spitfire.c
+++ b/sys/sparc64/sparc64/spitfire.c
@@ -43,6 +43,9 @@
#include <machine/cache.h>
#include <machine/cpufunc.h>
#include <machine/smp.h>
+#include <machine/tlb.h>
+
+#define SPITFIRE_TLB_ENTRIES 64
PMAP_STATS_VAR(spitfire_dcache_npage_inval);
PMAP_STATS_VAR(spitfire_dcache_npage_inval_match);
@@ -107,3 +110,27 @@ spitfire_icache_page_inval(vm_paddr_t pa)
}
ipi_wait(cookie);
}
+
+/*
+ * Flush all user mappings from the tlb.
+ */
+void
+spitfire_tlb_flush_user(void)
+{
+ u_long data;
+ u_long tag;
+ int i;
+
+ for (i = 0; i < SPITFIRE_TLB_ENTRIES; i++) {
+ data = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG);
+ tag = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
+ if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
+ TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
+ stxa_sync(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG, 0);
+ data = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG);
+ tag = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
+ if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
+ TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
+ stxa_sync(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, 0);
+ }
+}
diff --git a/sys/sparc64/sparc64/swtch.S b/sys/sparc64/sparc64/swtch.S
index 222ef75..ae2256b 100644
--- a/sys/sparc64/sparc64/swtch.S
+++ b/sys/sparc64/sparc64/swtch.S
@@ -56,7 +56,7 @@ ENTRY(cpu_switch)
/*
* If the current thread was using floating point in the kernel, save
* its context. The userland floating point context has already been
- * saved.
+ * saved in that case.
*/
rd %fprs, %l2
andcc %l2, FPRS_FEF, %g0
@@ -125,9 +125,6 @@ ENTRY(cpu_switch)
ldx [PCB_REG + PCB_PC], %i7
sub %fp, CCFSZ, %sp
- SET(sched_lock, %i3, %i2)
- stx %i0, [%i2 + MTX_LOCK]
-
/*
* Point to the vmspaces of the new process, and of the last non-kernel
* process to run.
@@ -152,21 +149,12 @@ ENTRY(cpu_switch)
nop
/*
- * If the new process has nucleus context we are done.
+ * If the new process is a kernel thread we can just leave the old
+ * context active and avoid recycling its context number.
*/
- lduw [PCPU(CPUID)], %i3
- sllx %i3, INT_SHIFT, %i3
- add %i2, VM_PMAP + PM_CONTEXT, %i4
- lduw [%i3 + %i4], %i5
-
-#if KTR_COMPILE & KTR_PROC
- CATR(KTR_PROC, "cpu_switch: ctx=%#lx"
- , %g1, %g2, %g3, 7, 8, 9)
- stx %i5, [%g1 + KTR_PARM1]
-9:
-#endif
-
- brz,a,pn %i5, 5f
+ SET(vmspace0, %i4, %i3)
+ cmp %i2, %i3
+ be,a,pn %xcc, 5f
nop
/*
@@ -185,7 +173,7 @@ ENTRY(cpu_switch)
stw %l3, [%l2 + VM_PMAP + PM_ACTIVE]
/*
- * Take away its context.
+ * Take away its context number.
*/
lduw [PCPU(CPUID)], %l3
sllx %l3, INT_SHIFT, %l3
@@ -194,55 +182,41 @@ ENTRY(cpu_switch)
stw %l5, [%l3 + %l4]
/*
- * Find the current free tlb context for this cpu and install it as
- * the new primary context.
+ * Find a new tlb context. If we've run out we have to flush all user
+ * mappings from the tlb and reset the context numbers.
*/
-3: lduw [PCPU(TLB_CTX)], %i5
- stw %i5, [%i3 + %i4]
- mov AA_DMMU_PCXR, %i4
- stxa %i5, [%i4] ASI_DMMU
- membar #Sync
+3: lduw [PCPU(TLB_CTX)], %i3
+ lduw [PCPU(TLB_CTX_MAX)], %i4
+ cmp %i3, %i4
+ bne,a,pt %xcc, 4f
+ nop
+ SET(tlb_flush_user, %i5, %i4)
+ ldx [%i4], %i5
+ call %i5
+ nop
+ lduw [PCPU(TLB_CTX_MIN)], %i3
/*
- * See if we have run out of free contexts.
+ * Advance next free context.
*/
- lduw [PCPU(TLB_CTX_MAX)], %i3
-
-#if KTR_COMPILE & KTR_PROC
- CATR(KTR_PROC, "cpu_switch: ctx=%#lx next=%#lx max=%#lx"
- , %g1, %g2, %g3, 7, 8, 9)
- stx %i5, [%g1 + KTR_PARM1]
- add %i5, 1, %g2
- stx %g2, [%g1 + KTR_PARM2]
- stx %i3, [%g1 + KTR_PARM3]
-9:
-#endif
-
- add %i5, 1, %i5
- cmp %i3, %i5
- bne,a,pt %xcc, 4f
- stw %i5, [PCPU(TLB_CTX)]
-
-#if KTR_COMPILE & KTR_PROC
- CATR(KTR_PROC, "cpu_switch: context rollover"
- , %g1, %g2, %g3, 7, 8, 9)
-9:
-#endif
+4: add %i3, 1, %i4
+ stw %i4, [PCPU(TLB_CTX)]
/*
- * We will start re-using contexts on the next switch. Flush all
- * non-nucleus mappings from the tlb, and reset the next free context.
+ * Set the new context number in the pmap.
*/
- call pmap_context_rollover
- nop
+ lduw [PCPU(CPUID)], %i4
+ sllx %i4, INT_SHIFT, %i4
+ add %i2, VM_PMAP + PM_CONTEXT, %i5
+ stw %i3, [%i4 + %i5]
/*
* Mark the pmap as active on this cpu.
*/
-4: lduw [%i2 + VM_PMAP + PM_ACTIVE], %i3
- lduw [PCPU(CPUMASK)], %i4
- or %i3, %i4, %i3
- stw %i3, [%i2 + VM_PMAP + PM_ACTIVE]
+ lduw [%i2 + VM_PMAP + PM_ACTIVE], %i4
+ lduw [PCPU(CPUMASK)], %i5
+ or %i4, %i5, %i4
+ stw %i4, [%i2 + VM_PMAP + PM_ACTIVE]
/*
* Make note of the change in vmspace.
@@ -250,26 +224,22 @@ ENTRY(cpu_switch)
stx %i2, [PCPU(VMSPACE)]
/*
- * Load the tsb registers.
+ * Fiddle the hardware bits. Set the tsb registers and install the
+ * new context number in the cpu.
*/
- ldx [%i2 + VM_PMAP + PM_TSB], %i3
- mov AA_DMMU_TSB, %i4
- stxa %i3, [%i4] ASI_DMMU
- mov AA_IMMU_TSB, %i4
- stxa %i3, [%i4] ASI_IMMU
+ ldx [%i2 + VM_PMAP + PM_TSB], %i4
+ mov AA_DMMU_TSB, %i5
+ stxa %i4, [%i5] ASI_DMMU
+ mov AA_IMMU_TSB, %i5
+ stxa %i4, [%i5] ASI_IMMU
+ mov AA_DMMU_PCXR, %i5
+ stxa %i3, [%i5] ASI_DMMU
membar #Sync
-5:
-#if KTR_COMPILE & KTR_PROC
- CATR(KTR_PROC, "cpu_switch: return"
- , %g1, %g2, %g3, 7, 8, 9)
-9:
-#endif
-
/*
* Done. Return and load the new process's window from the stack.
*/
- ret
+5: ret
restore
END(cpu_switch)
diff --git a/sys/sparc64/sparc64/tlb.c b/sys/sparc64/sparc64/tlb.c
index a827993..2226e02 100644
--- a/sys/sparc64/sparc64/tlb.c
+++ b/sys/sparc64/sparc64/tlb.c
@@ -49,8 +49,7 @@ PMAP_STATS_VAR(tlb_ncontext_demap);
PMAP_STATS_VAR(tlb_npage_demap);
PMAP_STATS_VAR(tlb_nrange_demap);
-int tlb_dtlb_entries;
-int tlb_itlb_entries;
+tlb_flush_user_t *tlb_flush_user;
/*
* Some tlb operations must be atomic, so no interrupt or trap can be allowed
@@ -144,26 +143,3 @@ tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
}
ipi_wait(cookie);
}
-
-void
-tlb_dump(void)
-{
- u_long data;
- u_long tag;
- int slot;
-
- for (slot = 0; slot < tlb_dtlb_entries; slot++) {
- data = ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG);
- if ((data & TD_V) != 0) {
- tag = ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_TAG_READ_REG);
- TR3("pmap_dump_tlb: dltb slot=%d data=%#lx tag=%#lx",
- slot, data, tag);
- }
- data = ldxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG);
- if ((data & TD_V) != 0) {
- tag = ldxa(TLB_DAR_SLOT(slot), ASI_ITLB_TAG_READ_REG);
- TR3("pmap_dump_tlb: iltb slot=%d data=%#lx tag=%#lx",
- slot, data, tag);
- }
- }
-}
OpenPOWER on IntegriCloud