summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2002-05-20 16:10:17 +0000
committerjake <jake@FreeBSD.org>2002-05-20 16:10:17 +0000
commit1166262e26d2d07ffdc7058ec1f89e4ae8ec70dd (patch)
treed53aeef010eadddd66460382bae0779918e8f316
parentd2bc88b0d8377dba477e0e7aa90b9ffae7258b63 (diff)
downloadFreeBSD-src-1166262e26d2d07ffdc7058ec1f89e4ae8ec70dd.zip
FreeBSD-src-1166262e26d2d07ffdc7058ec1f89e4ae8ec70dd.tar.gz
De-inline the tlb demap functions. These were so big that gcc3.1 refused
to inline them anyway. ;)
-rw-r--r--sys/conf/files.sparc641
-rw-r--r--sys/sparc64/include/tlb.h103
-rw-r--r--sys/sparc64/sparc64/tlb.c140
3 files changed, 144 insertions, 100 deletions
diff --git a/sys/conf/files.sparc64 b/sys/conf/files.sparc64
index e6ddeb0..de70719 100644
--- a/sys/conf/files.sparc64
+++ b/sys/conf/files.sparc64
@@ -63,6 +63,7 @@ sparc64/sparc64/support.s standard
sparc64/sparc64/sys_machdep.c standard
sparc64/sparc64/swtch.s standard
sparc64/sparc64/tick.c standard
+sparc64/sparc64/tlb.c standard
sparc64/sparc64/trap.c standard
sparc64/sparc64/tsb.c standard
sparc64/sparc64/vm_machdep.c standard
diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h
index 9a84de0..2a46933 100644
--- a/sys/sparc64/include/tlb.h
+++ b/sys/sparc64/include/tlb.h
@@ -83,106 +83,9 @@
extern int kernel_tlb_slots;
extern struct tte *kernel_ttes;
-/*
- * Some tlb operations must be atomic, so no interrupt or trap can be allowed
- * while they are in progress. Traps should not happen, but interrupts need to
- * be explicitely disabled. critical_enter() cannot be used here, since it only
- * disables soft interrupts.
- */
-
-static __inline void
-tlb_context_demap(struct pmap *pm)
-{
- void *cookie;
- u_long s;
-
- /*
- * It is important that we are not interrupted or preempted while
- * doing the IPIs. The interrupted CPU may hold locks, and since
- * it will wait for the CPU that sent the IPI, this can lead
- * to a deadlock when an interrupt comes in on that CPU and it's
- * handler tries to grab one of that locks. This will only happen for
- * spin locks, but these IPI types are delivered even if normal
- * interrupts are disabled, so the lock critical section will not
- * protect the target processor from entering the IPI handler with
- * the lock held.
- */
- critical_enter();
- cookie = ipi_tlb_context_demap(pm);
- if (pm->pm_active & PCPU_GET(cpumask)) {
- KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
- ("tlb_context_demap: inactive pmap?"));
- s = intr_disable();
- stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
- stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
- membar(Sync);
- intr_restore(s);
- }
- ipi_wait(cookie);
- critical_exit();
-}
-
-static __inline void
-tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
-{
- u_long flags;
- void *cookie;
- u_long s;
-
- critical_enter();
- cookie = ipi_tlb_page_demap(tlb, pm, va);
- if (pm->pm_active & PCPU_GET(cpumask)) {
- KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
- ("tlb_page_demap: inactive pmap?"));
- if (pm == kernel_pmap)
- flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
- else
- flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
-
- s = intr_disable();
- if (tlb & TLB_DTLB) {
- stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
- membar(Sync);
- }
- if (tlb & TLB_ITLB) {
- stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
- membar(Sync);
- }
- intr_restore(s);
- }
- ipi_wait(cookie);
- critical_exit();
-}
-
-static __inline void
-tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
-{
- vm_offset_t va;
- void *cookie;
- u_long flags;
- u_long s;
-
- critical_enter();
- cookie = ipi_tlb_range_demap(pm, start, end);
- if (pm->pm_active & PCPU_GET(cpumask)) {
- KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
- ("tlb_range_demap: inactive pmap?"));
- if (pm == kernel_pmap)
- flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
- else
- flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
-
- s = intr_disable();
- for (va = start; va < end; va += PAGE_SIZE) {
- stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
- stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
- membar(Sync);
- }
- intr_restore(s);
- }
- ipi_wait(cookie);
- critical_exit();
-}
+void tlb_context_demap(struct pmap *pm);
+void tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va);
+void tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end);
#define tlb_tte_demap(tte, pm) \
tlb_page_demap(TD_GET_TLB((tte).tte_data), pm, \
diff --git a/sys/sparc64/sparc64/tlb.c b/sys/sparc64/sparc64/tlb.c
new file mode 100644
index 0000000..f2ecf02
--- /dev/null
+++ b/sys/sparc64/sparc64/tlb.c
@@ -0,0 +1,140 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/pmap.h>
+#include <machine/smp.h>
+#include <machine/tlb.h>
+
+/*
+ * Some tlb operations must be atomic, so no interrupt or trap can be allowed
+ * while they are in progress. Traps should not happen, but interrupts need to
+ * be explicitely disabled. critical_enter() cannot be used here, since it only
+ * disables soft interrupts.
+ */
+
+void
+tlb_context_demap(struct pmap *pm)
+{
+ void *cookie;
+ u_long s;
+
+ /*
+ * It is important that we are not interrupted or preempted while
+ * doing the IPIs. The interrupted CPU may hold locks, and since
+ * it will wait for the CPU that sent the IPI, this can lead
+ * to a deadlock when an interrupt comes in on that CPU and it's
+ * handler tries to grab one of that locks. This will only happen for
+ * spin locks, but these IPI types are delivered even if normal
+ * interrupts are disabled, so the lock critical section will not
+ * protect the target processor from entering the IPI handler with
+ * the lock held.
+ */
+ critical_enter();
+ cookie = ipi_tlb_context_demap(pm);
+ if (pm->pm_active & PCPU_GET(cpumask)) {
+ KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
+ ("tlb_context_demap: inactive pmap?"));
+ s = intr_disable();
+ stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
+ stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
+ membar(Sync);
+ intr_restore(s);
+ }
+ ipi_wait(cookie);
+ critical_exit();
+}
+
+void
+tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
+{
+ u_long flags;
+ void *cookie;
+ u_long s;
+
+ critical_enter();
+ cookie = ipi_tlb_page_demap(tlb, pm, va);
+ if (pm->pm_active & PCPU_GET(cpumask)) {
+ KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
+ ("tlb_page_demap: inactive pmap?"));
+ if (pm == kernel_pmap)
+ flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
+ else
+ flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
+
+ s = intr_disable();
+ if (tlb & TLB_DTLB) {
+ stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
+ membar(Sync);
+ }
+ if (tlb & TLB_ITLB) {
+ stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
+ membar(Sync);
+ }
+ intr_restore(s);
+ }
+ ipi_wait(cookie);
+ critical_exit();
+}
+
+void
+tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
+{
+ vm_offset_t va;
+ void *cookie;
+ u_long flags;
+ u_long s;
+
+ critical_enter();
+ cookie = ipi_tlb_range_demap(pm, start, end);
+ if (pm->pm_active & PCPU_GET(cpumask)) {
+ KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
+ ("tlb_range_demap: inactive pmap?"));
+ if (pm == kernel_pmap)
+ flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
+ else
+ flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
+
+ s = intr_disable();
+ for (va = start; va < end; va += PAGE_SIZE) {
+ stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
+ stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
+ membar(Sync);
+ }
+ intr_restore(s);
+ }
+ ipi_wait(cookie);
+ critical_exit();
+}
OpenPOWER on IntegriCloud