summaryrefslogtreecommitdiffstats
path: root/sys/mips
diff options
context:
space:
mode:
authorjchandra <jchandra@FreeBSD.org>2010-06-17 05:03:01 +0000
committerjchandra <jchandra@FreeBSD.org>2010-06-17 05:03:01 +0000
commitef78a755d618239483b4cc9731b42b3317be77e1 (patch)
treef9287c5919e1d84fa6fc4a120956716c5416d240 /sys/mips
parent67fd9a4a1d8d4218e9dbe8b77219d55e80f253a8 (diff)
downloadFreeBSD-src-ef78a755d618239483b4cc9731b42b3317be77e1.zip
FreeBSD-src-ef78a755d618239483b4cc9731b42b3317be77e1.tar.gz
Merge jmallett@'s n64 work into HEAD - changeset 4
Re-write tlb operations in C with a simpler API. Update callers to use the new API. Changes from http://svn.freebsd.org/base/user/jmallett/octeon Approved by: rrs(mentor), jmallett
Diffstat (limited to 'sys/mips')
-rw-r--r--sys/mips/include/cpu.h51
-rw-r--r--sys/mips/include/pmap.h14
-rw-r--r--sys/mips/include/pte.h118
-rw-r--r--sys/mips/include/tlb.h39
-rw-r--r--sys/mips/mips/cpu.c7
-rw-r--r--sys/mips/mips/machdep.c16
-rw-r--r--sys/mips/mips/mp_machdep.c11
-rw-r--r--sys/mips/mips/pmap.c159
-rw-r--r--sys/mips/mips/tlb.c311
-rw-r--r--sys/mips/mips/trap.c4
10 files changed, 458 insertions, 272 deletions
diff --git a/sys/mips/include/cpu.h b/sys/mips/include/cpu.h
index 83b6a85..9bcc7f2 100644
--- a/sys/mips/include/cpu.h
+++ b/sys/mips/include/cpu.h
@@ -274,27 +274,6 @@
#define OPCODE_C1 0x11
/*
- * The low part of the TLB entry.
- */
-#define VMTLB_PF_NUM 0x3fffffc0
-#define VMTLB_ATTR_MASK 0x00000038
-#define VMTLB_MOD_BIT 0x00000004
-#define VMTLB_VALID_BIT 0x00000002
-#define VMTLB_GLOBAL_BIT 0x00000001
-
-#define VMTLB_PHYS_PAGE_SHIFT 6
-
-/*
- * The high part of the TLB entry.
- */
-#define VMTLB_VIRT_PAGE_NUM 0xffffe000
-#define VMTLB_PID 0x000000ff
-#define VMTLB_PID_R9K 0x00000fff
-#define VMTLB_PID_SHIFT 0
-#define VMTLB_VIRT_PAGE_SHIFT 12
-#define VMTLB_VIRT_PAGE_SHIFT_R9K 13
-
-/*
* The first TLB entry that write random hits.
* TLB entry 0 maps the kernel stack of the currently running thread
* TLB entry 1 maps the pcpu area of processor (only for SMP builds)
@@ -313,14 +292,6 @@
#define VMNUM_PIDS 256
/*
- * TLB probe return codes.
- */
-#define VMTLB_NOT_FOUND 0
-#define VMTLB_FOUND 1
-#define VMTLB_FOUND_WITH_PATCH 2
-#define VMTLB_PROBE_ERROR 3
-
-/*
* Exported definitions unique to mips cpu support.
*/
@@ -335,6 +306,7 @@
#ifndef _LOCORE
#include <machine/cpufunc.h>
#include <machine/frame.h>
+
/*
* Arguments to hardclock and gatherstats encapsulate the previous
* machine state in an opaque clockframe.
@@ -455,12 +427,9 @@ extern union cpuprid cpu_id;
#if defined(_KERNEL) && !defined(_LOCORE)
extern union cpuprid fpu_id;
-struct tlb;
struct user;
int Mips_ConfigCache(void);
-void Mips_SetWIRED(int);
-void Mips_SetPID(int);
void Mips_SyncCache(void);
void Mips_SyncDCache(vm_offset_t, int);
@@ -471,12 +440,6 @@ void Mips_HitInvalidateDCache(vm_offset_t, int);
void Mips_SyncICache(vm_offset_t, int);
void Mips_InvalidateICache(vm_offset_t, int);
-void Mips_TLBFlush(int);
-void Mips_TLBFlushAddr(vm_offset_t);
-void Mips_TLBWriteIndexed(int, struct tlb *);
-void Mips_TLBUpdate(vm_offset_t, unsigned);
-void Mips_TLBRead(int, struct tlb *);
-void mips_TBIAP(int);
void wbflush(void);
extern u_int32_t cpu_counter_interval; /* Number of counter ticks/tick */
@@ -516,16 +479,6 @@ extern int intr_nesting_level;
: "r" (func), "r" (arg0), "r" (arg1), "r" (arg2) /* inputs */ \
: "$31", "$4", "$5", "$6");
-#define MachSetPID Mips_SetPID
-#define MachTLBUpdate Mips_TLBUpdate
-#define mips_TBIS Mips_TLBFlushAddr
-#define MIPS_TBIAP() mips_TBIAP(num_tlbentries)
-#define MachSetWIRED(index) Mips_SetWIRED(index)
-#define MachTLBFlush(count) Mips_TLBFlush(count)
-#define MachTLBGetPID(pid) (pid = Mips_TLBGetPID())
-#define MachTLBRead(tlbno, tlbp) Mips_TLBRead(tlbno, tlbp)
-#define MachFPTrap(sr, cause, pc) MipsFPTrap(sr, cause, pc)
-
/*
* Enable realtime clock (always enabled).
*/
@@ -542,8 +495,6 @@ extern int intr_nesting_level;
* Low level access routines to CPU registers
*/
-int Mips_TLBGetPID(void);
-
void swi_vm(void *);
void cpu_halt(void);
void cpu_reset(void);
diff --git a/sys/mips/include/pmap.h b/sys/mips/include/pmap.h
index 0091d58..6130dd0 100644
--- a/sys/mips/include/pmap.h
+++ b/sys/mips/include/pmap.h
@@ -49,15 +49,7 @@
#include <machine/vmparam.h>
#include <machine/pte.h>
-#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
-
#define NKPT 120 /* actual number of kernel page tables */
-
-#ifndef NKPDE
-#define NKPDE 255 /* addressable number of page tables/pde's */
-#endif
-
-#define KPTDI (VM_MIN_KERNEL_ADDRESS >> SEGSHIFT)
#define NUSERPGTBLS (VM_MAXUSER_ADDRESS >> SEGSHIFT)
#ifndef LOCORE
@@ -109,6 +101,7 @@ pd_entry_t pmap_segmap(pmap_t pmap, vm_offset_t va);
vm_offset_t pmap_kextract(vm_offset_t va);
#define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
+#define pmap_asid(pmap) (pmap)->pm_asid[PCPU_GET(cpuid)].asid
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
@@ -183,11 +176,6 @@ int pmap_compute_pages_to_dump(void);
void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
void pmap_flush_pvcache(vm_page_t m);
-/*
- * Function to save TLB contents so that they may be inspected in the debugger.
- */
-extern void pmap_save_tlb(void);
-
#endif /* _KERNEL */
#endif /* !LOCORE */
diff --git a/sys/mips/include/pte.h b/sys/mips/include/pte.h
index e3b46ca..eac22b9 100644
--- a/sys/mips/include/pte.h
+++ b/sys/mips/include/pte.h
@@ -1,13 +1,6 @@
-/* $OpenBSD: pte.h,v 1.4 1998/01/28 13:46:25 pefo Exp $ */
-
/*-
- * Copyright (c) 1988 University of Utah.
- * Copyright (c) 1992, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * the Systems Programming Group of the University of Utah Computer
- * Science Department and Ralph Campbell.
+ * Copyright (c) 2004-2010 Juli Mallett <jmallett@FreeBSD.org>
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -17,18 +10,11 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -37,60 +23,66 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: Utah Hdr: pte.h 1.11 89/09/03
- * from: @(#)pte.h 8.1 (Berkeley) 6/10/93
- * JNPR: pte.h,v 1.1.4.1 2007/09/10 06:20:19 girish
* $FreeBSD$
*/
-#ifndef _MACHINE_PTE_H_
+#ifndef _MACHINE_PTE_H_
#define _MACHINE_PTE_H_
-#include <machine/endian.h>
-
/*
- * MIPS hardware page table entry
+ * TLB and PTE management. Most things operate within the context of
+ * EntryLo0,1, and begin with TLBLO_. Things which work with EntryHi
+ * start with TLBHI_. PTE bits begin with PG_.
+ *
+ * Note that we use the same size VM and TLB pages.
*/
-
-#ifndef _LOCORE
-struct pte {
-#if BYTE_ORDER == BIG_ENDIAN
-unsigned int pg_prot:2, /* SW: access control */
- pg_pfnum:24, /* HW: core page frame number or 0 */
- pg_attr:3, /* HW: cache attribute */
- pg_m:1, /* HW: modified (dirty) bit */
- pg_v:1, /* HW: valid bit */
- pg_g:1; /* HW: ignore pid bit */
-#endif
-#if BYTE_ORDER == LITTLE_ENDIAN
-unsigned int pg_g:1, /* HW: ignore pid bit */
- pg_v:1, /* HW: valid bit */
- pg_m:1, /* HW: modified (dirty) bit */
- pg_attr:3, /* HW: cache attribute */
- pg_pfnum:24, /* HW: core page frame number or 0 */
- pg_prot:2; /* SW: access control */
-#endif
-};
+#define TLB_PAGE_SHIFT (PAGE_SHIFT)
+#define TLB_PAGE_SIZE (1 << TLB_PAGE_SHIFT)
+#define TLB_PAGE_MASK (TLB_PAGE_SIZE - 1)
/*
- * Structure defining an tlb entry data set.
+ * TLB PageMask register. Has mask bits set above the default, 4K, page mask.
*/
+#define TLBMASK_SHIFT (13)
+#define TLBMASK_MASK ((PAGE_MASK >> TLBMASK_SHIFT) << TLBMASK_SHIFT)
-struct tlb {
- int tlb_mask;
- int tlb_hi;
- int tlb_lo0;
- int tlb_lo1;
-};
+/*
+ * PFN for EntryLo register. Upper bits are 0, which is to say that
+ * bit 29 is the last hardware bit; Bits 30 and upwards (EntryLo is
+ * 64 bit though it can be referred to in 32-bits providing 2 software
+ * bits safely. We use it as 64 bits to get many software bits, and
+ * god knows what else.) are unacknowledged by hardware. They may be
+ * written as anything, but otherwise they have as much meaning as
+ * other 0 fields.
+ */
+#define TLBLO_SWBITS_SHIFT (30)
+#define TLBLO_SWBITS_MASK (0x3U << TLBLO_SWBITS_SHIFT)
+#define TLBLO_PFN_SHIFT (6)
+#define TLBLO_PFN_MASK (0x3FFFFFC0)
+#define TLBLO_PA_TO_PFN(pa) ((((pa) >> TLB_PAGE_SHIFT) << TLBLO_PFN_SHIFT) & TLBLO_PFN_MASK)
+#define TLBLO_PFN_TO_PA(pfn) ((vm_paddr_t)((pfn) >> TLBLO_PFN_SHIFT) << TLB_PAGE_SHIFT)
+#define TLBLO_PTE_TO_PFN(pte) ((pte) & TLBLO_PFN_MASK)
+#define TLBLO_PTE_TO_PA(pte) (TLBLO_PFN_TO_PA(TLBLO_PTE_TO_PFN((pte))))
+
+/*
+ * VPN for EntryHi register. Upper two bits select user, supervisor,
+ * or kernel. Bits 61 to 40 copy bit 63. VPN2 is bits 39 and down to
+ * as low as 13, down to PAGE_SHIFT, to index 2 TLB pages*. From bit 12
+ * to bit 8 there is a 5-bit 0 field. Low byte is ASID.
+ *
+ * Note that in FreeBSD, we map 2 TLB pages is equal to 1 VM page.
+ */
+#define TLBHI_ASID_MASK (0xff)
+#define TLBHI_ENTRY(va, asid) (((va) & ~PAGE_MASK) | ((asid) & TLBHI_ASID_MASK))
+#ifndef _LOCORE
typedef unsigned int pt_entry_t;
typedef pt_entry_t *pd_entry_t;
+#endif
#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
-#endif /* _LOCORE */
-
#define PT_ENTRY_NULL ((pt_entry_t *) 0)
#define PTE_WIRED 0x80000000 /* SW */
@@ -119,11 +111,6 @@ typedef pt_entry_t *pd_entry_t;
#define PTE_HVPN 0xffffe000 /* Hardware page no mask */
#define PTE_ASID 0x000000ff /* Address space ID */
-#define PTE_SHIFT 6
-#define pfn_is_ext(x) ((x) & 0x3c000000)
-#define vad_to_pfn(x) (((unsigned)(x) >> PTE_SHIFT) & PTE_FRAME)
-#define vad_to_pfn64(x) ((quad_t)(x) >> PTE_SHIFT) & PTE_FRAME)
-#define pfn_to_vad(x) (((x) & PTE_FRAME) << PTE_SHIFT)
/* User virtual to pte offset in page table */
#define vad_to_pte_offset(adr) (((adr) >> PAGE_SHIFT) & (NPTEPG -1))
@@ -138,16 +125,5 @@ typedef pt_entry_t *pd_entry_t;
#define mips_pg_cwpage_bit() (PTE_CWPAGE)
#define mips_pg_global_bit() (PTE_G)
#define mips_pg_wired_bit() (PTE_WIRED)
-#define mips_tlbpfn_to_paddr(x) pfn_to_vad((x))
-#define mips_paddr_to_tlbpfn(x) vad_to_pfn((x))
-
-/* These are not used */
-#define PTE_SIZE_4K 0x00000000
-#define PTE_SIZE_16K 0x00006000
-#define PTE_SIZE_64K 0x0001e000
-#define PTE_SIZE_256K 0x0007e000
-#define PTE_SIZE_1M 0x001fe000
-#define PTE_SIZE_4M 0x007fe000
-#define PTE_SIZE_16M 0x01ffe000
-#endif /* !_MACHINE_PTE_H_ */
+#endif /* !_MACHINE_PTE_H_ */
diff --git a/sys/mips/include/tlb.h b/sys/mips/include/tlb.h
new file mode 100644
index 0000000..e889ee6
--- /dev/null
+++ b/sys/mips/include/tlb.h
@@ -0,0 +1,39 @@
+/*-
+ * Copyright (c) 2004-2010 Juli Mallett <jmallett@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_TLB_H_
+#define _MACHINE_TLB_H_
+
+void tlb_insert_wired(unsigned, vm_offset_t, pt_entry_t, pt_entry_t);
+void tlb_invalidate_address(struct pmap *, vm_offset_t);
+void tlb_invalidate_all(void);
+void tlb_invalidate_all_user(struct pmap *);
+void tlb_save(void);
+void tlb_update(struct pmap *, vm_offset_t, pt_entry_t);
+
+#endif /* !_MACHINE_TLB_H_ */
diff --git a/sys/mips/mips/cpu.c b/sys/mips/mips/cpu.c
index 7c61fb8..5ce2400 100644
--- a/sys/mips/mips/cpu.c
+++ b/sys/mips/mips/cpu.c
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <machine/intr_machdep.h>
#include <machine/locore.h>
#include <machine/pte.h>
+#include <machine/tlb.h>
#include <machine/hwfunc.h>
struct mips_cpuinfo cpuinfo;
@@ -135,9 +136,9 @@ mips_cpu_init(void)
platform_cpu_init();
mips_get_identity(&cpuinfo);
num_tlbentries = cpuinfo.tlb_nentries;
- Mips_SetWIRED(0);
- Mips_TLBFlush(num_tlbentries);
- Mips_SetWIRED(VMWIRED_ENTRIES);
+ mips_wr_wired(0);
+ tlb_invalidate_all();
+ mips_wr_wired(VMWIRED_ENTRIES);
mips_config_cache(&cpuinfo);
mips_vector_init();
diff --git a/sys/mips/mips/machdep.c b/sys/mips/mips/machdep.c
index 59a7656..85290e6 100644
--- a/sys/mips/mips/machdep.c
+++ b/sys/mips/mips/machdep.c
@@ -89,6 +89,7 @@ __FBSDID("$FreeBSD$");
#include <machine/hwfunc.h>
#include <machine/intr_machdep.h>
#include <machine/md_var.h>
+#include <machine/tlb.h>
#ifdef DDB
#include <sys/kdb.h>
#include <ddb/ddb.h>
@@ -118,7 +119,7 @@ vm_offset_t kstack0;
/*
* Each entry in the pcpu_space[] array is laid out in the following manner:
* struct pcpu for cpu 'n' pcpu_space[n]
- * boot stack for cpu 'n' pcpu_space[n] + PAGE_SIZE * 2 - START_FRAME
+ * boot stack for cpu 'n' pcpu_space[n] + PAGE_SIZE * 2 - CALLFRAME_SIZ
*
* Note that the boot stack grows downwards and we assume that we never
* use enough stack space to trample over the 'struct pcpu' that is at
@@ -413,20 +414,17 @@ void
mips_pcpu_tlb_init(struct pcpu *pcpu)
{
vm_paddr_t pa;
- struct tlb tlb;
- int lobits;
+ pt_entry_t pte;
/*
* Map the pcpu structure at the virtual address 'pcpup'.
* We use a wired tlb index to do this one-time mapping.
*/
- memset(&tlb, 0, sizeof(tlb));
pa = vtophys(pcpu);
- lobits = PTE_RW | PTE_V | PTE_G | PTE_CACHE;
- tlb.tlb_hi = (vm_offset_t)pcpup;
- tlb.tlb_lo0 = mips_paddr_to_tlbpfn(pa) | lobits;
- tlb.tlb_lo1 = mips_paddr_to_tlbpfn(pa + PAGE_SIZE) | lobits;
- Mips_TLBWriteIndexed(PCPU_TLB_ENTRY, &tlb);
+ pte = PTE_RW | PTE_V | PTE_G | PTE_CACHE;
+ tlb_insert_wired(PCPU_TLB_ENTRY, (vm_offset_t)pcpup,
+ TLBLO_PA_TO_PFN(pa) | pte,
+ TLBLO_PA_TO_PFN(pa + PAGE_SIZE) | pte);
}
#endif
diff --git a/sys/mips/mips/mp_machdep.c b/sys/mips/mips/mp_machdep.c
index 7428719..00a91fb 100644
--- a/sys/mips/mips/mp_machdep.c
+++ b/sys/mips/mips/mp_machdep.c
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <machine/hwfunc.h>
#include <machine/intr_machdep.h>
#include <machine/cache.h>
+#include <machine/tlb.h>
struct pcb stoppcbs[MAXCPU];
@@ -128,7 +129,7 @@ mips_ipi_handler(void *arg)
CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
savectx(&stoppcbs[cpu]);
- pmap_save_tlb();
+ tlb_save();
/* Indicate we are stopped */
atomic_set_int(&stopped_cpus, cpumask);
@@ -238,9 +239,9 @@ void
smp_init_secondary(u_int32_t cpuid)
{
/* TLB */
- Mips_SetWIRED(0);
- Mips_TLBFlush(num_tlbentries);
- Mips_SetWIRED(VMWIRED_ENTRIES);
+ mips_wr_wired(0);
+ tlb_invalidate_all();
+ mips_wr_wired(VMWIRED_ENTRIES);
/*
* We assume that the L1 cache on the APs is identical to the one
@@ -251,7 +252,7 @@ smp_init_secondary(u_int32_t cpuid)
mips_sync();
- MachSetPID(0);
+ mips_wr_entryhi(0);
pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu));
dpcpu_init(dpcpu, cpuid);
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 8bb4465..8a833aa 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -99,6 +99,7 @@ __FBSDID("$FreeBSD$");
#include <machine/cache.h>
#include <machine/md_var.h>
+#include <machine/tlb.h>
#if defined(DIAGNOSTIC)
#define PMAP_DIAGNOSTIC
@@ -150,8 +151,6 @@ unsigned pmap_max_asid; /* max ASID supported by the system */
vm_offset_t kernel_vm_end;
-static struct tlb tlbstash[MAXCPU][MIPS_MAX_TLB_ENTRIES];
-
static void pmap_asid_alloc(pmap_t pmap);
/*
@@ -182,8 +181,6 @@ static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
static int init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
-static void pmap_TLB_invalidate_kernel(vm_offset_t);
-static void pmap_TLB_update_kernel(vm_offset_t, pt_entry_t);
static vm_page_t pmap_alloc_pte_page(pmap_t, unsigned int, int, vm_offset_t *);
static void pmap_release_pte_page(vm_page_t);
@@ -223,7 +220,7 @@ static struct local_sysmaps sysmap_lmem[MAXCPU];
intr = intr_disable(); \
sched_pin(); \
va = sysm->base; \
- npte = mips_paddr_to_tlbpfn(phys) | \
+ npte = TLBLO_PA_TO_PFN(phys) | \
PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; \
pte = pmap_pte(kernel_pmap, va); \
*pte = npte; \
@@ -241,11 +238,11 @@ static struct local_sysmaps sysmap_lmem[MAXCPU];
sched_pin(); \
va1 = sysm->base; \
va2 = sysm->base + PAGE_SIZE; \
- npte = mips_paddr_to_tlbpfn(phys1) | \
+ npte = TLBLO_PA_TO_PFN(phys1) | \
PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; \
pte = pmap_pte(kernel_pmap, va1); \
*pte = npte; \
- npte = mips_paddr_to_tlbpfn(phys2) | \
+ npte = TLBLO_PA_TO_PFN(phys2) | \
PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; \
pte = pmap_pte(kernel_pmap, va2); \
*pte = npte; \
@@ -255,11 +252,11 @@ static struct local_sysmaps sysmap_lmem[MAXCPU];
#define PMAP_LMEM_UNMAP() \
pte = pmap_pte(kernel_pmap, sysm->base); \
*pte = PTE_G; \
- pmap_TLB_invalidate_kernel(sysm->base); \
+ tlb_invalidate_address(kernel_pmap, sysm->base); \
sysm->valid1 = 0; \
pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE); \
*pte = PTE_G; \
- pmap_TLB_invalidate_kernel(sysm->base + PAGE_SIZE); \
+ tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE); \
sysm->valid2 = 0; \
sched_unpin(); \
intr_restore(intr); \
@@ -499,7 +496,7 @@ again:
kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
kernel_pmap->pm_asid[0].gen = 0;
pmap_max_asid = VMNUM_PIDS;
- MachSetPID(0);
+ mips_wr_entryhi(0);
}
/*
@@ -576,9 +573,14 @@ pmap_invalidate_all_action(void *arg)
#endif
- if (pmap->pm_active & PCPU_GET(cpumask)) {
- pmap_TLB_invalidate_all();
- } else
+ if (pmap == kernel_pmap) {
+ tlb_invalidate_all();
+ return;
+ }
+
+ if (pmap->pm_active & PCPU_GET(cpumask))
+ tlb_invalidate_all_user(pmap);
+ else
pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
}
@@ -608,7 +610,7 @@ pmap_invalidate_page_action(void *arg)
#endif
if (is_kernel_pmap(pmap)) {
- pmap_TLB_invalidate_kernel(va);
+ tlb_invalidate_address(pmap, va);
return;
}
if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
@@ -617,18 +619,7 @@ pmap_invalidate_page_action(void *arg)
pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
return;
}
- va = pmap_va_asid(pmap, (va & ~PAGE_MASK));
- mips_TBIS(va);
-}
-
-static void
-pmap_TLB_invalidate_kernel(vm_offset_t va)
-{
- u_int32_t pid;
-
- MachTLBGetPID(pid);
- va = va | (pid << VMTLB_PID_SHIFT);
- mips_TBIS(va);
+ tlb_invalidate_address(pmap, va);
}
struct pmap_update_page_arg {
@@ -659,7 +650,7 @@ pmap_update_page_action(void *arg)
#endif
if (is_kernel_pmap(pmap)) {
- pmap_TLB_update_kernel(va, pte);
+ tlb_update(pmap, va, pte);
return;
}
if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
@@ -668,21 +659,7 @@ pmap_update_page_action(void *arg)
pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
return;
}
- va = pmap_va_asid(pmap, (va & ~PAGE_MASK));
- MachTLBUpdate(va, pte);
-}
-
-static void
-pmap_TLB_update_kernel(vm_offset_t va, pt_entry_t pte)
-{
- u_int32_t pid;
-
- va &= ~PAGE_MASK;
-
- MachTLBGetPID(pid);
- va = va | (pid << VMTLB_PID_SHIFT);
-
- MachTLBUpdate(va, pte);
+ tlb_update(pmap, va, pte);
}
/*
@@ -700,7 +677,7 @@ pmap_extract(pmap_t pmap, vm_offset_t va)
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, va);
if (pte) {
- retval = mips_tlbpfn_to_paddr(*pte) | (va & PAGE_MASK);
+ retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
}
PMAP_UNLOCK(pmap);
return retval;
@@ -727,10 +704,10 @@ retry:
pte = *pmap_pte(pmap, va);
if (pte != 0 && pmap_pte_v(&pte) &&
((pte & PTE_RW) || (prot & VM_PROT_WRITE) == 0)) {
- if (vm_page_pa_tryrelock(pmap, mips_tlbpfn_to_paddr(pte), &pa))
+ if (vm_page_pa_tryrelock(pmap, TLBLO_PTE_TO_PA(pte), &pa))
goto retry;
- m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pte));
+ m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte));
vm_page_hold(m);
}
PA_UNLOCK_COND(pa);
@@ -754,7 +731,7 @@ pmap_kenter(vm_offset_t va, vm_paddr_t pa)
#ifdef PMAP_DEBUG
printf("pmap_kenter: va: 0x%08x -> pa: 0x%08x\n", va, pa);
#endif
- npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W;
+ npte = TLBLO_PA_TO_PFN(pa) | PTE_RW | PTE_V | PTE_G | PTE_W;
if (is_cacheable_mem(pa))
npte |= PTE_CACHE;
@@ -1484,7 +1461,7 @@ pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va)
pmap->pm_stats.wired_count -= 1;
pmap->pm_stats.resident_count -= 1;
- pa = mips_tlbpfn_to_paddr(oldpte);
+ pa = TLBLO_PTE_TO_PA(oldpte);
if (page_is_managed(pa)) {
m = PHYS_TO_VM_PAGE(pa);
@@ -1700,7 +1677,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
}
retry:
obits = pbits = *pte;
- pa = mips_tlbpfn_to_paddr(pbits);
+ pa = TLBLO_PTE_TO_PA(pbits);
if (page_is_managed(pa) && (pbits & PTE_M) != 0) {
m = PHYS_TO_VM_PAGE(pa);
@@ -1776,7 +1753,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pa = VM_PAGE_TO_PHYS(m);
om = NULL;
origpte = *pte;
- opa = mips_tlbpfn_to_paddr(origpte);
+ opa = TLBLO_PTE_TO_PA(origpte);
/*
* Mapping has not changed, must be protection or wiring change.
@@ -1873,7 +1850,7 @@ validate:
/*
* Now validate mapping with desired protection/wiring.
*/
- newpte = mips_paddr_to_tlbpfn(pa) | rw | PTE_V;
+ newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V;
if (is_cacheable_mem(pa))
newpte |= PTE_CACHE;
@@ -2039,7 +2016,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
/*
* Now validate mapping with RO protection
*/
- *pte = mips_paddr_to_tlbpfn(pa) | PTE_V;
+ *pte = TLBLO_PA_TO_PFN(pa) | PTE_V;
if (is_cacheable_mem(pa))
*pte |= PTE_CACHE;
@@ -2092,7 +2069,7 @@ pmap_kenter_temporary(vm_paddr_t pa, int i)
cpu = PCPU_GET(cpuid);
sysm = &sysmap_lmem[cpu];
/* Since this is for the debugger, no locks or any other fun */
- npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ npte = TLBLO_PA_TO_PFN(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
pte = pmap_pte(kernel_pmap, sysm->base);
*pte = npte;
sysm->valid1 = 1;
@@ -2407,7 +2384,7 @@ pmap_remove_pages(pmap_t pmap)
}
*pte = is_kernel_pmap(pmap) ? PTE_G : 0;
- m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(tpte));
+ m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
KASSERT(m != NULL,
("pmap_remove_pages: bad tpte %x", tpte));
@@ -2814,7 +2791,7 @@ retry:
val = MINCORE_INCORE;
if ((pte & PTE_M) != 0)
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
- pa = mips_tlbpfn_to_paddr(pte);
+ pa = TLBLO_PTE_TO_PA(pte);
managed = page_is_managed(pa);
if (managed) {
/*
@@ -2856,7 +2833,7 @@ pmap_activate(struct thread *td)
pmap_asid_alloc(pmap);
if (td == curthread) {
PCPU_SET(segbase, pmap->pm_segtab);
- MachSetPID(pmap->pm_asid[PCPU_GET(cpuid)].asid);
+ mips_wr_entryhi(pmap->pm_asid[PCPU_GET(cpuid)].asid);
}
PCPU_SET(curpmap, pmap);
@@ -2948,7 +2925,7 @@ pmap_pid_dump(int pid)
vm_offset_t pa;
vm_page_t m;
- pa = mips_tlbpfn_to_paddr(*pte);
+ pa = TLBLO_PFN_TO_PA(*pte);
m = PHYS_TO_VM_PAGE(pa);
printf("va: %p, pt: %p, h: %d, w: %d, f: 0x%x",
(void *)va,
@@ -3044,7 +3021,7 @@ pmap_asid_alloc(pmap)
pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
else {
if (PCPU_GET(next_asid) == pmap_max_asid) {
- MIPS_TBIAP();
+ tlb_invalidate_all_user(NULL);
PCPU_SET(asid_generation,
(PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
if (PCPU_GET(asid_generation) == 0) {
@@ -3124,7 +3101,7 @@ pmap_kextract(vm_offset_t va)
if (curproc && curproc->p_vmspace) {
ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
if (ptep)
- pa = mips_tlbpfn_to_paddr(*ptep) |
+ pa = TLBLO_PTE_TO_PA(*ptep) |
(va & PAGE_MASK);
}
} else if (va >= MIPS_KSEG0_START &&
@@ -3140,9 +3117,11 @@ pmap_kextract(vm_offset_t va)
if (kernel_pmap->pm_active) {
/* Its inside the virtual address range */
ptep = pmap_pte(kernel_pmap, va);
- if (ptep)
- pa = mips_tlbpfn_to_paddr(*ptep) |
- (va & PAGE_MASK);
+ if (ptep) {
+ return (TLBLO_PTE_TO_PA(*ptep) |
+ (va & PAGE_MASK));
+ }
+ return (0);
}
}
return pa;
@@ -3160,61 +3139,3 @@ pmap_flush_pvcache(vm_page_t m)
}
}
}
-
-void
-pmap_save_tlb(void)
-{
- int tlbno, cpu;
-
- cpu = PCPU_GET(cpuid);
-
- for (tlbno = 0; tlbno < num_tlbentries; ++tlbno)
- MachTLBRead(tlbno, &tlbstash[cpu][tlbno]);
-}
-
-#ifdef DDB
-#include <ddb/ddb.h>
-
-DB_SHOW_COMMAND(tlb, ddb_dump_tlb)
-{
- int cpu, tlbno;
- struct tlb *tlb;
-
- if (have_addr)
- cpu = ((addr >> 4) % 16) * 10 + (addr % 16);
- else
- cpu = PCPU_GET(cpuid);
-
- if (cpu < 0 || cpu >= mp_ncpus) {
- db_printf("Invalid CPU %d\n", cpu);
- return;
- } else
- db_printf("CPU %d:\n", cpu);
-
- if (cpu == PCPU_GET(cpuid))
- pmap_save_tlb();
-
- for (tlbno = 0; tlbno < num_tlbentries; ++tlbno) {
- tlb = &tlbstash[cpu][tlbno];
- if (tlb->tlb_lo0 & PTE_V || tlb->tlb_lo1 & PTE_V) {
- printf("TLB %2d vad 0x%0lx ",
- tlbno, (long)(tlb->tlb_hi & 0xffffff00));
- } else {
- printf("TLB*%2d vad 0x%0lx ",
- tlbno, (long)(tlb->tlb_hi & 0xffffff00));
- }
- printf("0=0x%0lx ", pfn_to_vad((long)tlb->tlb_lo0));
- printf("%c", tlb->tlb_lo0 & PTE_V ? 'V' : '-');
- printf("%c", tlb->tlb_lo0 & PTE_M ? 'M' : '-');
- printf("%c", tlb->tlb_lo0 & PTE_G ? 'G' : '-');
- printf(" atr %x ", (tlb->tlb_lo0 >> 3) & 7);
- printf("1=0x%0lx ", pfn_to_vad((long)tlb->tlb_lo1));
- printf("%c", tlb->tlb_lo1 & PTE_V ? 'V' : '-');
- printf("%c", tlb->tlb_lo1 & PTE_M ? 'M' : '-');
- printf("%c", tlb->tlb_lo1 & PTE_G ? 'G' : '-');
- printf(" atr %x ", (tlb->tlb_lo1 >> 3) & 7);
- printf(" sz=%x pid=%x\n", tlb->tlb_mask,
- (tlb->tlb_hi & 0x000000ff));
- }
-}
-#endif /* DDB */
diff --git a/sys/mips/mips/tlb.c b/sys/mips/mips/tlb.c
new file mode 100644
index 0000000..5855076
--- /dev/null
+++ b/sys/mips/mips/tlb.c
@@ -0,0 +1,311 @@
+/*-
+ * Copyright (c) 2004-2010 Juli Mallett <jmallett@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <machine/pte.h>
+#include <machine/tlb.h>
+
+struct tlb_state {
+ unsigned wired;
+ struct tlb_entry {
+ register_t entryhi;
+ register_t entrylo0;
+ register_t entrylo1;
+ } entry[MIPS_MAX_TLB_ENTRIES];
+};
+
+static struct tlb_state tlb_state[MAXCPU];
+
+#if 0
+/*
+ * PageMask must increment in steps of 2 bits.
+ */
+COMPILE_TIME_ASSERT(POPCNT(TLBMASK_MASK) % 2 == 0);
+#endif
+
+static inline void
+tlb_probe(void)
+{
+ __asm __volatile ("tlbp" : : : "memory");
+ mips_cp0_sync();
+}
+
+static inline void
+tlb_read(void)
+{
+ __asm __volatile ("tlbr" : : : "memory");
+ mips_cp0_sync();
+}
+
+static inline void
+tlb_write_indexed(void)
+{
+ __asm __volatile ("tlbwi" : : : "memory");
+ mips_cp0_sync();
+}
+
+static inline void
+tlb_write_random(void)
+{
+ __asm __volatile ("tlbwr" : : : "memory");
+ mips_cp0_sync();
+}
+
+static void tlb_invalidate_one(unsigned);
+
+void
+tlb_insert_wired(unsigned i, vm_offset_t va, pt_entry_t pte0, pt_entry_t pte1)
+{
+ register_t mask, asid;
+ register_t s;
+
+ va &= ~PAGE_MASK;
+
+ s = intr_disable();
+ mask = mips_rd_pagemask();
+ asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
+
+ mips_wr_index(i);
+ mips_wr_pagemask(0);
+ mips_wr_entryhi(TLBHI_ENTRY(va, 0));
+ mips_wr_entrylo0(pte0);
+ mips_wr_entrylo1(pte1);
+ tlb_write_indexed();
+
+ mips_wr_entryhi(asid);
+ mips_wr_pagemask(mask);
+ intr_restore(s);
+}
+
+void
+tlb_invalidate_address(struct pmap *pmap, vm_offset_t va)
+{
+ register_t mask, asid;
+ register_t s;
+ int i;
+
+ va &= ~PAGE_MASK;
+
+ s = intr_disable();
+ mask = mips_rd_pagemask();
+ asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
+
+ mips_wr_pagemask(0);
+ mips_wr_entryhi(TLBHI_ENTRY(va, pmap_asid(pmap)));
+ tlb_probe();
+ i = mips_rd_index();
+ if (i >= 0)
+ tlb_invalidate_one(i);
+
+ mips_wr_entryhi(asid);
+ mips_wr_pagemask(mask);
+ intr_restore(s);
+}
+
+void
+tlb_invalidate_all(void)
+{
+ register_t mask, asid;
+ register_t s;
+ unsigned i;
+
+ s = intr_disable();
+ mask = mips_rd_pagemask();
+ asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
+
+ for (i = mips_rd_wired(); i < num_tlbentries; i++)
+ tlb_invalidate_one(i);
+
+ mips_wr_entryhi(asid);
+ mips_wr_pagemask(mask);
+ intr_restore(s);
+}
+
+void
+tlb_invalidate_all_user(struct pmap *pmap)
+{
+ register_t mask, asid;
+ register_t s;
+ unsigned i;
+
+ s = intr_disable();
+ mask = mips_rd_pagemask();
+ asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
+
+ for (i = mips_rd_wired(); i < num_tlbentries; i++) {
+ register_t uasid;
+
+ mips_wr_index(i);
+ tlb_read();
+
+ uasid = mips_rd_entryhi() & TLBHI_ASID_MASK;
+ if (pmap == NULL) {
+ /*
+ * Invalidate all non-kernel entries.
+ */
+ if (uasid == 0)
+ continue;
+ } else {
+ /*
+ * Invalidate this pmap's entries.
+ */
+ if (uasid != pmap_asid(pmap))
+ continue;
+ }
+ tlb_invalidate_one(i);
+ }
+
+ mips_wr_entryhi(asid);
+ mips_wr_pagemask(mask);
+ intr_restore(s);
+}
+
+/* XXX Only if DDB? */
+void
+tlb_save(void)
+{
+ unsigned i, cpu;
+
+ cpu = PCPU_GET(cpuid);
+
+ tlb_state[cpu].wired = mips_rd_wired();
+ for (i = 0; i < num_tlbentries; i++) {
+ mips_wr_index(i);
+ tlb_read();
+
+ tlb_state[cpu].entry[i].entryhi = mips_rd_entryhi();
+ tlb_state[cpu].entry[i].entrylo0 = mips_rd_entrylo0();
+ tlb_state[cpu].entry[i].entrylo1 = mips_rd_entrylo1();
+ }
+}
+
+void
+tlb_update(struct pmap *pmap, vm_offset_t va, pt_entry_t pte)
+{
+ register_t mask, asid;
+ register_t s;
+ int i;
+
+ va &= ~PAGE_MASK;
+ pte &= ~TLBLO_SWBITS_MASK;
+
+ s = intr_disable();
+ mask = mips_rd_pagemask();
+ asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
+
+ mips_wr_pagemask(0);
+ mips_wr_entryhi(TLBHI_ENTRY(va, pmap_asid(pmap)));
+ tlb_probe();
+ i = mips_rd_index();
+ if (i >= 0) {
+ tlb_read();
+
+ if ((va & PAGE_SIZE) == 0) {
+ mips_wr_entrylo0(pte);
+ } else {
+ mips_wr_entrylo1(pte);
+ }
+ tlb_write_indexed();
+ }
+
+ mips_wr_entryhi(asid);
+ mips_wr_pagemask(mask);
+ intr_restore(s);
+}
+
+static void
+tlb_invalidate_one(unsigned i)
+{
+ /* XXX an invalid ASID? */
+ mips_wr_entryhi(TLBHI_ENTRY(MIPS_KSEG0_START + (2 * i * PAGE_SIZE), 0));
+ mips_wr_entrylo0(0);
+ mips_wr_entrylo1(0);
+ mips_wr_pagemask(0);
+ mips_wr_index(i);
+ tlb_write_indexed();
+}
+
+#ifdef DDB
+#include <ddb/ddb.h>
+
+DB_SHOW_COMMAND(tlb, ddb_dump_tlb)
+{
+ register_t ehi, elo0, elo1;
+ unsigned i, cpu;
+
+ /*
+ * XXX
+ * The worst conversion from hex to decimal ever.
+ */
+ if (have_addr)
+ cpu = ((addr >> 4) % 16) * 10 + (addr % 16);
+ else
+ cpu = PCPU_GET(cpuid);
+
+ if (cpu < 0 || cpu >= mp_ncpus) {
+ db_printf("Invalid CPU %u\n", cpu);
+ return;
+ }
+
+ if (cpu == PCPU_GET(cpuid))
+ tlb_save();
+
+ db_printf("Beginning TLB dump for CPU %u...\n", cpu);
+ for (i = 0; i < num_tlbentries; i++) {
+ if (i == tlb_state[cpu].wired) {
+ if (i != 0)
+ db_printf("^^^ WIRED ENTRIES ^^^\n");
+ else
+ db_printf("(No wired entries.)\n");
+ }
+
+ /* XXX PageMask. */
+ ehi = tlb_state[cpu].entry[i].entryhi;
+ elo0 = tlb_state[cpu].entry[i].entrylo0;
+ elo1 = tlb_state[cpu].entry[i].entrylo1;
+
+ if (elo0 == 0 && elo1 == 0)
+ continue;
+
+ db_printf("#%u\t=> %jx\n", i, (intmax_t)ehi);
+ db_printf(" Lo0\t%jx\t(%#jx)\n", (intmax_t)elo0, (intmax_t)TLBLO_PTE_TO_PA(elo0));
+ db_printf(" Lo1\t%jx\t(%#jx)\n", (intmax_t)elo1, (intmax_t)TLBLO_PTE_TO_PA(elo1));
+ }
+ db_printf("Finished.\n");
+}
+#endif
diff --git a/sys/mips/mips/trap.c b/sys/mips/mips/trap.c
index 831e018..d0ea4e8 100644
--- a/sys/mips/mips/trap.c
+++ b/sys/mips/mips/trap.c
@@ -396,7 +396,7 @@ trap(struct trapframe *trapframe)
}
*pte |= mips_pg_m_bit();
pmap_update_page(kernel_pmap, trapframe->badvaddr, *pte);
- pa = mips_tlbpfn_to_paddr(*pte);
+ pa = TLBLO_PTE_TO_PA(*pte);
if (!page_is_managed(pa))
panic("trap: ktlbmod: unmanaged page");
pmap_set_modified(pa);
@@ -435,7 +435,7 @@ trap(struct trapframe *trapframe)
}
*pte |= mips_pg_m_bit();
pmap_update_page(pmap, trapframe->badvaddr, *pte);
- pa = mips_tlbpfn_to_paddr(*pte);
+ pa = TLBLO_PTE_TO_PA(*pte);
if (!page_is_managed(pa))
panic("trap: utlbmod: unmanaged page");
pmap_set_modified(pa);
OpenPOWER on IntegriCloud