summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorTimothy Pearson <tpearson@raptorengineering.com>2017-08-23 14:45:25 -0500
committerTimothy Pearson <tpearson@raptorengineering.com>2017-08-23 14:45:25 -0500
commitfcbb27b0ec6dcbc5a5108cb8fb19eae64593d204 (patch)
tree22962a4387943edc841c72a4e636a068c66d58fd /arch/mips/mm
downloadast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.zip
ast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.tar.gz
Initial import of modified Linux 2.6.28 tree
Original upstream URL: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git | branch linux-2.6.28.y
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile36
-rw-r--r--arch/mips/mm/c-r3k.c340
-rw-r--r--arch/mips/mm/c-r4k.c1403
-rw-r--r--arch/mips/mm/c-tx39.c433
-rw-r--r--arch/mips/mm/cache.c194
-rw-r--r--arch/mips/mm/cerr-sb1.c589
-rw-r--r--arch/mips/mm/cex-gen.S42
-rw-r--r--arch/mips/mm/cex-sb1.S175
-rw-r--r--arch/mips/mm/dma-default.c390
-rw-r--r--arch/mips/mm/extable.c21
-rw-r--r--arch/mips/mm/fault.c252
-rw-r--r--arch/mips/mm/highmem.c118
-rw-r--r--arch/mips/mm/init.c509
-rw-r--r--arch/mips/mm/ioremap.c194
-rw-r--r--arch/mips/mm/page.c689
-rw-r--r--arch/mips/mm/pgtable-32.c70
-rw-r--r--arch/mips/mm/pgtable-64.c73
-rw-r--r--arch/mips/mm/sc-ip22.c177
-rw-r--r--arch/mips/mm/sc-mips.c111
-rw-r--r--arch/mips/mm/sc-r5k.c108
-rw-r--r--arch/mips/mm/sc-rm7k.c173
-rw-r--r--arch/mips/mm/tlb-r3k.c285
-rw-r--r--arch/mips/mm/tlb-r4k.c500
-rw-r--r--arch/mips/mm/tlb-r8k.c249
-rw-r--r--arch/mips/mm/tlbex-fault.S28
-rw-r--r--arch/mips/mm/tlbex.c1282
-rw-r--r--arch/mips/mm/uasm.c588
-rw-r--r--arch/mips/mm/uasm.h184
28 files changed, 9213 insertions, 0 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
new file mode 100644
index 0000000..95ba32b
--- /dev/null
+++ b/arch/mips/mm/Makefile
@@ -0,0 +1,36 @@
+#
+# Makefile for the Linux/MIPS-specific parts of the memory manager.
+#
+
+obj-y += cache.o dma-default.o extable.o fault.o \
+ init.o tlbex.o tlbex-fault.o uasm.o page.o
+
+obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
+obj-$(CONFIG_64BIT) += pgtable-64.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
+
+obj-$(CONFIG_CPU_LOONGSON2) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_MIPS64) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_NEVADA) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R10000) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
+obj-$(CONFIG_CPU_R4300) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R4X00) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R5000) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R5432) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R5500) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o
+obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
+obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
+obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o
+
+obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
+obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
+obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
+obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
+
+EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
new file mode 100644
index 0000000..5500c20
--- /dev/null
+++ b/arch/mips/mm/c-r3k.c
@@ -0,0 +1,340 @@
+/*
+ * r2300.c: R2000 and R3000 specific mmu/cache code.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ *
+ * with a lot of changes to make this thing work for R3000s
+ * Tx39XX R4k style caches added. HK
+ * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
+ * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
+ * Copyright (C) 2001, 2004, 2007 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/isadep.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+static unsigned long icache_size, dcache_size; /* Size in bytes */
+static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
+
+unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
+{
+ unsigned long flags, status, dummy, size;
+ volatile unsigned long *p;
+
+ p = (volatile unsigned long *) KSEG0;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ca_flags|flags)&~ST0_IEC);
+
+ *p = 0xa5a55a5a;
+ dummy = *p;
+ status = read_c0_status();
+
+ if (dummy != 0xa5a55a5a || (status & ST0_CM)) {
+ size = 0;
+ } else {
+ for (size = 128; size <= 0x40000; size <<= 1)
+ *(p + size) = 0;
+ *p = -1;
+ for (size = 128;
+ (size <= 0x40000) && (*(p + size) == 0);
+ size <<= 1)
+ ;
+ if (size > 0x40000)
+ size = 0;
+ }
+
+ write_c0_status(flags);
+
+ return size * sizeof(*p);
+}
+
+unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
+{
+ unsigned long flags, status, lsize, i;
+ volatile unsigned long *p;
+
+ p = (volatile unsigned long *) KSEG0;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ca_flags|flags)&~ST0_IEC);
+
+ for (i = 0; i < 128; i++)
+ *(p + i) = 0;
+ *(volatile unsigned char *)p = 0;
+ for (lsize = 1; lsize < 128; lsize <<= 1) {
+ *(p + lsize);
+ status = read_c0_status();
+ if (!(status & ST0_CM))
+ break;
+ }
+ for (i = 0; i < 128; i += lsize)
+ *(volatile unsigned char *)(p + i) = 0;
+
+ write_c0_status(flags);
+
+ return lsize * sizeof(*p);
+}
+
+static void __cpuinit r3k_probe_cache(void)
+{
+ dcache_size = r3k_cache_size(ST0_ISC);
+ if (dcache_size)
+ dcache_lsize = r3k_cache_lsize(ST0_ISC);
+
+ icache_size = r3k_cache_size(ST0_ISC|ST0_SWC);
+ if (icache_size)
+ icache_lsize = r3k_cache_lsize(ST0_ISC|ST0_SWC);
+}
+
+static void r3k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ size = end - start;
+ if (size > icache_size || KSEGX(start) != KSEG0) {
+ start = KSEG0;
+ size = icache_size;
+ }
+ p = (char *)start;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
+
+ for (i = 0; i < size; i += 0x080) {
+ asm( "sb\t$0, 0x000(%0)\n\t"
+ "sb\t$0, 0x004(%0)\n\t"
+ "sb\t$0, 0x008(%0)\n\t"
+ "sb\t$0, 0x00c(%0)\n\t"
+ "sb\t$0, 0x010(%0)\n\t"
+ "sb\t$0, 0x014(%0)\n\t"
+ "sb\t$0, 0x018(%0)\n\t"
+ "sb\t$0, 0x01c(%0)\n\t"
+ "sb\t$0, 0x020(%0)\n\t"
+ "sb\t$0, 0x024(%0)\n\t"
+ "sb\t$0, 0x028(%0)\n\t"
+ "sb\t$0, 0x02c(%0)\n\t"
+ "sb\t$0, 0x030(%0)\n\t"
+ "sb\t$0, 0x034(%0)\n\t"
+ "sb\t$0, 0x038(%0)\n\t"
+ "sb\t$0, 0x03c(%0)\n\t"
+ "sb\t$0, 0x040(%0)\n\t"
+ "sb\t$0, 0x044(%0)\n\t"
+ "sb\t$0, 0x048(%0)\n\t"
+ "sb\t$0, 0x04c(%0)\n\t"
+ "sb\t$0, 0x050(%0)\n\t"
+ "sb\t$0, 0x054(%0)\n\t"
+ "sb\t$0, 0x058(%0)\n\t"
+ "sb\t$0, 0x05c(%0)\n\t"
+ "sb\t$0, 0x060(%0)\n\t"
+ "sb\t$0, 0x064(%0)\n\t"
+ "sb\t$0, 0x068(%0)\n\t"
+ "sb\t$0, 0x06c(%0)\n\t"
+ "sb\t$0, 0x070(%0)\n\t"
+ "sb\t$0, 0x074(%0)\n\t"
+ "sb\t$0, 0x078(%0)\n\t"
+ "sb\t$0, 0x07c(%0)\n\t"
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ write_c0_status(flags);
+}
+
+static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ size = end - start;
+ if (size > dcache_size || KSEGX(start) != KSEG0) {
+ start = KSEG0;
+ size = dcache_size;
+ }
+ p = (char *)start;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ST0_ISC|flags)&~ST0_IEC);
+
+ for (i = 0; i < size; i += 0x080) {
+ asm( "sb\t$0, 0x000(%0)\n\t"
+ "sb\t$0, 0x004(%0)\n\t"
+ "sb\t$0, 0x008(%0)\n\t"
+ "sb\t$0, 0x00c(%0)\n\t"
+ "sb\t$0, 0x010(%0)\n\t"
+ "sb\t$0, 0x014(%0)\n\t"
+ "sb\t$0, 0x018(%0)\n\t"
+ "sb\t$0, 0x01c(%0)\n\t"
+ "sb\t$0, 0x020(%0)\n\t"
+ "sb\t$0, 0x024(%0)\n\t"
+ "sb\t$0, 0x028(%0)\n\t"
+ "sb\t$0, 0x02c(%0)\n\t"
+ "sb\t$0, 0x030(%0)\n\t"
+ "sb\t$0, 0x034(%0)\n\t"
+ "sb\t$0, 0x038(%0)\n\t"
+ "sb\t$0, 0x03c(%0)\n\t"
+ "sb\t$0, 0x040(%0)\n\t"
+ "sb\t$0, 0x044(%0)\n\t"
+ "sb\t$0, 0x048(%0)\n\t"
+ "sb\t$0, 0x04c(%0)\n\t"
+ "sb\t$0, 0x050(%0)\n\t"
+ "sb\t$0, 0x054(%0)\n\t"
+ "sb\t$0, 0x058(%0)\n\t"
+ "sb\t$0, 0x05c(%0)\n\t"
+ "sb\t$0, 0x060(%0)\n\t"
+ "sb\t$0, 0x064(%0)\n\t"
+ "sb\t$0, 0x068(%0)\n\t"
+ "sb\t$0, 0x06c(%0)\n\t"
+ "sb\t$0, 0x070(%0)\n\t"
+ "sb\t$0, 0x074(%0)\n\t"
+ "sb\t$0, 0x078(%0)\n\t"
+ "sb\t$0, 0x07c(%0)\n\t"
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ write_c0_status(flags);
+}
+
+static inline void r3k_flush_cache_all(void)
+{
+}
+
+static inline void r3k___flush_cache_all(void)
+{
+ r3k_flush_dcache_range(KSEG0, KSEG0 + dcache_size);
+ r3k_flush_icache_range(KSEG0, KSEG0 + icache_size);
+}
+
+static void r3k_flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+static void r3k_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static void r3k_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
+ int exec = vma->vm_flags & VM_EXEC;
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pr_debug("cpage[%08lx,%08lx]\n",
+ cpu_context(smp_processor_id(), mm), addr);
+
+ /* No ASID => no such page in the cache. */
+ if (cpu_context(smp_processor_id(), mm) == 0)
+ return;
+
+ pgdp = pgd_offset(mm, addr);
+ pudp = pud_offset(pgdp, addr);
+ pmdp = pmd_offset(pudp, addr);
+ ptep = pte_offset(pmdp, addr);
+
+ /* Invalid => no such page in the cache. */
+ if (!(pte_val(*ptep) & _PAGE_PRESENT))
+ return;
+
+ r3k_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
+ if (exec)
+ r3k_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
+}
+
+static void local_r3k_flush_data_cache_page(void *addr)
+{
+}
+
+static void r3k_flush_data_cache_page(unsigned long addr)
+{
+}
+
+static void r3k_flush_cache_sigtramp(unsigned long addr)
+{
+ unsigned long flags;
+
+ pr_debug("csigtramp[%08lx]\n", addr);
+
+ flags = read_c0_status();
+
+ write_c0_status(flags&~ST0_IEC);
+
+ /* Fill the TLB to avoid an exception with caches isolated. */
+ asm( "lw\t$0, 0x000(%0)\n\t"
+ "lw\t$0, 0x004(%0)\n\t"
+ : : "r" (addr) );
+
+ write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
+
+ asm( "sb\t$0, 0x000(%0)\n\t"
+ "sb\t$0, 0x004(%0)\n\t"
+ : : "r" (addr) );
+
+ write_c0_status(flags);
+}
+
+static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
+{
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ iob();
+ r3k_flush_dcache_range(start, start + size);
+}
+
+void __cpuinit r3k_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+
+ r3k_probe_cache();
+
+ flush_cache_all = r3k_flush_cache_all;
+ __flush_cache_all = r3k___flush_cache_all;
+ flush_cache_mm = r3k_flush_cache_mm;
+ flush_cache_range = r3k_flush_cache_range;
+ flush_cache_page = r3k_flush_cache_page;
+ flush_icache_range = r3k_flush_icache_range;
+ local_flush_icache_range = r3k_flush_icache_range;
+
+ flush_cache_sigtramp = r3k_flush_cache_sigtramp;
+ local_flush_data_cache_page = local_r3k_flush_data_cache_page;
+ flush_data_cache_page = r3k_flush_data_cache_page;
+
+ _dma_cache_wback_inv = r3k_dma_cache_wback_inv;
+ _dma_cache_wback = r3k_dma_cache_wback_inv;
+ _dma_cache_inv = r3k_dma_cache_wback_inv;
+
+ printk("Primary instruction cache %ldkB, linesize %ld bytes.\n",
+ icache_size >> 10, icache_lsize);
+ printk("Primary data cache %ldkB, linesize %ld bytes.\n",
+ dcache_size >> 10, dcache_lsize);
+
+ build_clear_page();
+ build_copy_page();
+}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
new file mode 100644
index 0000000..6e99665
--- /dev/null
+++ b/arch/mips/mm/c-r4k.c
@@ -0,0 +1,1403 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <asm/bcache.h>
+#include <asm/bootinfo.h>
+#include <asm/cache.h>
+#include <asm/cacheops.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/r4kcache.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/war.h>
+#include <asm/cacheflush.h> /* for run_uncached() */
+
+
+/*
+ * Special Variant of smp_call_function for use by cache functions:
+ *
+ * o No return value
+ * o collapses to normal function call on UP kernels
+ * o collapses to normal function call on systems with a single shared
+ * primary cache.
+ */
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
+ int wait)
+{
+ preempt_disable();
+
+#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
+ smp_call_function(func, info, wait);
+#endif
+ func(info);
+ preempt_enable();
+}
+
+#if defined(CONFIG_MIPS_CMP)
+#define cpu_has_safe_index_cacheops 0
+#else
+#define cpu_has_safe_index_cacheops 1
+#endif
+
+/*
+ * Must die.
+ */
+static unsigned long icache_size __read_mostly;
+static unsigned long dcache_size __read_mostly;
+static unsigned long scache_size __read_mostly;
+
+/*
+ * Dummy cache handling routines for machines without boardcaches
+ */
+static void cache_noop(void) {}
+
+static struct bcache_ops no_sc_ops = {
+ .bc_enable = (void *)cache_noop,
+ .bc_disable = (void *)cache_noop,
+ .bc_wback_inv = (void *)cache_noop,
+ .bc_inv = (void *)cache_noop
+};
+
+struct bcache_ops *bcops = &no_sc_ops;
+
+#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
+#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
+
+#define R4600_HIT_CACHEOP_WAR_IMPL \
+do { \
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
+ *(volatile unsigned long *)CKSEG1; \
+ if (R4600_V1_HIT_CACHEOP_WAR) \
+ __asm__ __volatile__("nop;nop;nop;nop"); \
+} while (0)
+
+static void (*r4k_blast_dcache_page)(unsigned long addr);
+
+static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
+{
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache32_page(addr);
+}
+
+static void __cpuinit r4k_blast_dcache_page_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 0)
+ r4k_blast_dcache_page = (void *)cache_noop;
+ else if (dc_lsize == 16)
+ r4k_blast_dcache_page = blast_dcache16_page;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
+}
+
+static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
+
+static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 0)
+ r4k_blast_dcache_page_indexed = (void *)cache_noop;
+ else if (dc_lsize == 16)
+ r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
+}
+
+static void (* r4k_blast_dcache)(void);
+
+static void __cpuinit r4k_blast_dcache_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 0)
+ r4k_blast_dcache = (void *)cache_noop;
+ else if (dc_lsize == 16)
+ r4k_blast_dcache = blast_dcache16;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache = blast_dcache32;
+}
+
+/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
+#define JUMP_TO_ALIGN(order) \
+ __asm__ __volatile__( \
+ "b\t1f\n\t" \
+ ".align\t" #order "\n\t" \
+ "1:\n\t" \
+ )
+#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
+#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
+
+static inline void blast_r4600_v1_icache32(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_icache32();
+ local_irq_restore(flags);
+}
+
+static inline void tx49_blast_icache32(void)
+{
+ unsigned long start = INDEX_BASE;
+ unsigned long end = start + current_cpu_data.icache.waysize;
+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
+ unsigned long ws_end = current_cpu_data.icache.ways <<
+ current_cpu_data.icache.waybit;
+ unsigned long ws, addr;
+
+ CACHE32_UNROLL32_ALIGN2;
+ /* I'm in even chunk. blast odd chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
+ cache32_unroll32(addr|ws, Index_Invalidate_I);
+ CACHE32_UNROLL32_ALIGN;
+ /* I'm in odd chunk. blast even chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400 * 2)
+ cache32_unroll32(addr|ws, Index_Invalidate_I);
+}
+
+static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_icache32_page_indexed(page);
+ local_irq_restore(flags);
+}
+
+static inline void tx49_blast_icache32_page_indexed(unsigned long page)
+{
+ unsigned long indexmask = current_cpu_data.icache.waysize - 1;
+ unsigned long start = INDEX_BASE + (page & indexmask);
+ unsigned long end = start + PAGE_SIZE;
+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
+ unsigned long ws_end = current_cpu_data.icache.ways <<
+ current_cpu_data.icache.waybit;
+ unsigned long ws, addr;
+
+ CACHE32_UNROLL32_ALIGN2;
+ /* I'm in even chunk. blast odd chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
+ cache32_unroll32(addr|ws, Index_Invalidate_I);
+ CACHE32_UNROLL32_ALIGN;
+ /* I'm in odd chunk. blast even chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400 * 2)
+ cache32_unroll32(addr|ws, Index_Invalidate_I);
+}
+
+static void (* r4k_blast_icache_page)(unsigned long addr);
+
+static void __cpuinit r4k_blast_icache_page_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache_page = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache_page = blast_icache16_page;
+ else if (ic_lsize == 32)
+ r4k_blast_icache_page = blast_icache32_page;
+ else if (ic_lsize == 64)
+ r4k_blast_icache_page = blast_icache64_page;
+}
+
+
+static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
+
+static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache_page_indexed = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
+ else if (ic_lsize == 32) {
+ if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
+ r4k_blast_icache_page_indexed =
+ blast_icache32_r4600_v1_page_indexed;
+ else if (TX49XX_ICACHE_INDEX_INV_WAR)
+ r4k_blast_icache_page_indexed =
+ tx49_blast_icache32_page_indexed;
+ else
+ r4k_blast_icache_page_indexed =
+ blast_icache32_page_indexed;
+ } else if (ic_lsize == 64)
+ r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
+}
+
+static void (* r4k_blast_icache)(void);
+
+static void __cpuinit r4k_blast_icache_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache = blast_icache16;
+ else if (ic_lsize == 32) {
+ if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
+ r4k_blast_icache = blast_r4600_v1_icache32;
+ else if (TX49XX_ICACHE_INDEX_INV_WAR)
+ r4k_blast_icache = tx49_blast_icache32;
+ else
+ r4k_blast_icache = blast_icache32;
+ } else if (ic_lsize == 64)
+ r4k_blast_icache = blast_icache64;
+}
+
+static void (* r4k_blast_scache_page)(unsigned long addr);
+
+static void __cpuinit r4k_blast_scache_page_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (scache_size == 0)
+ r4k_blast_scache_page = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache_page = blast_scache16_page;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_page = blast_scache32_page;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_page = blast_scache64_page;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_page = blast_scache128_page;
+}
+
+static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
+
+static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (scache_size == 0)
+ r4k_blast_scache_page_indexed = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
+}
+
+static void (* r4k_blast_scache)(void);
+
+static void __cpuinit r4k_blast_scache_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (scache_size == 0)
+ r4k_blast_scache = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache = blast_scache16;
+ else if (sc_lsize == 32)
+ r4k_blast_scache = blast_scache32;
+ else if (sc_lsize == 64)
+ r4k_blast_scache = blast_scache64;
+ else if (sc_lsize == 128)
+ r4k_blast_scache = blast_scache128;
+}
+
+static inline void local_r4k___flush_cache_all(void * args)
+{
+#if defined(CONFIG_CPU_LOONGSON2)
+ r4k_blast_scache();
+ return;
+#endif
+ r4k_blast_dcache();
+ r4k_blast_icache();
+
+ switch (current_cpu_type()) {
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ r4k_blast_scache();
+ }
+}
+
+static void r4k___flush_cache_all(void)
+{
+ r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
+}
+
+static inline int has_valid_asid(const struct mm_struct *mm)
+{
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+ int i;
+
+ for_each_online_cpu(i)
+ if (cpu_context(i, mm))
+ return 1;
+
+ return 0;
+#else
+ return cpu_context(smp_processor_id(), mm);
+#endif
+}
+
+static void r4k__flush_cache_vmap(void)
+{
+ r4k_blast_dcache();
+}
+
+static void r4k__flush_cache_vunmap(void)
+{
+ r4k_blast_dcache();
+}
+
+static inline void local_r4k_flush_cache_range(void * args)
+{
+ struct vm_area_struct *vma = args;
+ int exec = vma->vm_flags & VM_EXEC;
+
+ if (!(has_valid_asid(vma->vm_mm)))
+ return;
+
+ r4k_blast_dcache();
+ if (exec)
+ r4k_blast_icache();
+}
+
+static void r4k_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ int exec = vma->vm_flags & VM_EXEC;
+
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
+ r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
+}
+
+static inline void local_r4k_flush_cache_mm(void * args)
+{
+ struct mm_struct *mm = args;
+
+ if (!has_valid_asid(mm))
+ return;
+
+ /*
+ * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
+ * only flush the primary caches but R10000 and R12000 behave sane ...
+ * R4000SC and R4400SC indexed S-cache ops also invalidate primary
+ * caches, so we can bail out early.
+ */
+ if (current_cpu_type() == CPU_R4000SC ||
+ current_cpu_type() == CPU_R4000MC ||
+ current_cpu_type() == CPU_R4400SC ||
+ current_cpu_type() == CPU_R4400MC) {
+ r4k_blast_scache();
+ return;
+ }
+
+ r4k_blast_dcache();
+}
+
+static void r4k_flush_cache_mm(struct mm_struct *mm)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+
+ r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
+}
+
+struct flush_cache_page_args {
+ struct vm_area_struct *vma;
+ unsigned long addr;
+ unsigned long pfn;
+};
+
+static inline void local_r4k_flush_cache_page(void *args)
+{
+ struct flush_cache_page_args *fcp_args = args;
+ struct vm_area_struct *vma = fcp_args->vma;
+ unsigned long addr = fcp_args->addr;
+ struct page *page = pfn_to_page(fcp_args->pfn);
+ int exec = vma->vm_flags & VM_EXEC;
+ struct mm_struct *mm = vma->vm_mm;
+ int map_coherent = 0;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ void *vaddr;
+
+ /*
+ * If ownes no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (!has_valid_asid(mm))
+ return;
+
+ addr &= PAGE_MASK;
+ pgdp = pgd_offset(mm, addr);
+ pudp = pud_offset(pgdp, addr);
+ pmdp = pmd_offset(pudp, addr);
+ ptep = pte_offset(pmdp, addr);
+
+ /*
+ * If the page isn't marked valid, the page cannot possibly be
+ * in the cache.
+ */
+ if (!(pte_present(*ptep)))
+ return;
+
+ if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
+ vaddr = NULL;
+ else {
+ /*
+ * Use kmap_coherent or kmap_atomic to do flushes for
+ * another ASID than the current one.
+ */
+ map_coherent = (cpu_has_dc_aliases &&
+ page_mapped(page) && !Page_dcache_dirty(page));
+ if (map_coherent)
+ vaddr = kmap_coherent(page, addr);
+ else
+ vaddr = kmap_atomic(page, KM_USER0);
+ addr = (unsigned long)vaddr;
+ }
+
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+ r4k_blast_dcache_page(addr);
+ if (exec && !cpu_icache_snoops_remote_store)
+ r4k_blast_scache_page(addr);
+ }
+ if (exec) {
+ if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0)
+ drop_mmu_context(mm, cpu);
+ } else
+ r4k_blast_icache_page(addr);
+ }
+
+ if (vaddr) {
+ if (map_coherent)
+ kunmap_coherent();
+ else
+ kunmap_atomic(vaddr, KM_USER0);
+ }
+}
+
+static void r4k_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ struct flush_cache_page_args args;
+
+ args.vma = vma;
+ args.addr = addr;
+ args.pfn = pfn;
+
+ r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
+}
+
+static inline void local_r4k_flush_data_cache_page(void * addr)
+{
+ r4k_blast_dcache_page((unsigned long) addr);
+}
+
+static void r4k_flush_data_cache_page(unsigned long addr)
+{
+ if (in_atomic())
+ local_r4k_flush_data_cache_page((void *)addr);
+ else
+ r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
+ 1);
+}
+
+struct flush_icache_range_args {
+ unsigned long start;
+ unsigned long end;
+};
+
+static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ if (!cpu_has_ic_fills_f_dc) {
+ if (end - start >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ protected_blast_dcache_range(start, end);
+ }
+ }
+
+ if (end - start > icache_size)
+ r4k_blast_icache();
+ else
+ protected_blast_icache_range(start, end);
+}
+
+static inline void local_r4k_flush_icache_range_ipi(void *args)
+{
+ struct flush_icache_range_args *fir_args = args;
+ unsigned long start = fir_args->start;
+ unsigned long end = fir_args->end;
+
+ local_r4k_flush_icache_range(start, end);
+}
+
+static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ struct flush_icache_range_args args;
+
+ args.start = start;
+ args.end = end;
+
+ r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
+ instruction_hazard();
+}
+
+#ifdef CONFIG_DMA_NONCOHERENT
+
+static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
+{
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ if (cpu_has_inclusive_pcaches) {
+ if (size >= scache_size)
+ r4k_blast_scache();
+ else
+ blast_scache_range(addr, addr + size);
+ return;
+ }
+
+ /*
+ * Either no secondary cache or the available caches don't have the
+ * subset property so we have to flush the primary caches
+ * explicitly
+ */
+ if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(addr, addr + size);
+ }
+
+ bc_wback_inv(addr, size);
+}
+
+static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
+{
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ if (cpu_has_inclusive_pcaches) {
+ if (size >= scache_size)
+ r4k_blast_scache();
+ else
+ blast_inv_scache_range(addr, addr + size);
+ return;
+ }
+
+ if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_inv_dcache_range(addr, addr + size);
+ }
+
+ bc_inv(addr, size);
+}
+#endif /* CONFIG_DMA_NONCOHERENT */
+
+/*
+ * While we're protected against bad userland addresses we don't care
+ * very much about what happens in that case. Usually a segmentation
+ * fault will dump the process later on anyway ...
+ */
+static void local_r4k_flush_cache_sigtramp(void * arg)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+ unsigned long dc_lsize = cpu_dcache_line_size();
+ unsigned long sc_lsize = cpu_scache_line_size();
+ unsigned long addr = (unsigned long) arg;
+
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ if (dc_lsize)
+ protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
+ if (!cpu_icache_snoops_remote_store && scache_size)
+ protected_writeback_scache_line(addr & ~(sc_lsize - 1));
+ if (ic_lsize)
+ protected_flush_icache_line(addr & ~(ic_lsize - 1));
+ if (MIPS4K_ICACHE_REFILL_WAR) {
+ __asm__ __volatile__ (
+ ".set push\n\t"
+ ".set noat\n\t"
+ ".set mips3\n\t"
+#ifdef CONFIG_32BIT
+ "la $at,1f\n\t"
+#endif
+#ifdef CONFIG_64BIT
+ "dla $at,1f\n\t"
+#endif
+ "cache %0,($at)\n\t"
+ "nop; nop; nop\n"
+ "1:\n\t"
+ ".set pop"
+ :
+ : "i" (Hit_Invalidate_I));
+ }
+ if (MIPS_CACHE_SYNC_WAR)
+ __asm__ __volatile__ ("sync");
+}
+
+static void r4k_flush_cache_sigtramp(unsigned long addr)
+{
+ r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
+}
+
+static void r4k_flush_icache_all(void)
+{
+ if (cpu_has_vtag_icache)
+ r4k_blast_icache();
+}
+
+static inline void rm7k_erratum31(void)
+{
+ const unsigned long ic_lsize = 32;
+ unsigned long addr;
+
+ /* RM7000 erratum #31. The icache is screwed at startup. */
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+
+ for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
+ __asm__ __volatile__ (
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache\t%1, 0(%0)\n\t"
+ "cache\t%1, 0x1000(%0)\n\t"
+ "cache\t%1, 0x2000(%0)\n\t"
+ "cache\t%1, 0x3000(%0)\n\t"
+ "cache\t%2, 0(%0)\n\t"
+ "cache\t%2, 0x1000(%0)\n\t"
+ "cache\t%2, 0x2000(%0)\n\t"
+ "cache\t%2, 0x3000(%0)\n\t"
+ "cache\t%1, 0(%0)\n\t"
+ "cache\t%1, 0x1000(%0)\n\t"
+ "cache\t%1, 0x2000(%0)\n\t"
+ "cache\t%1, 0x3000(%0)\n\t"
+ ".set pop\n"
+ :
+ : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
+ }
+}
+
+static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
+ "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
+};
+
+static void __cpuinit probe_pcache(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config = read_c0_config();
+ unsigned int prid = read_c0_prid();
+ unsigned long config1;
+ unsigned int lsize;
+
+ switch (c->cputype) {
+ case CPU_R4600: /* QED style two way caches? */
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_NEVADA:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 2;
+ c->icache.waybit = __ffs(icache_size/2);
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 2;
+ c->dcache.waybit= __ffs(dcache_size/2);
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ break;
+
+ case CPU_R5432:
+ case CPU_R5500:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 2;
+ c->icache.waybit= 0;
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 2;
+ c->dcache.waybit = 0;
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ break;
+
+ case CPU_TX49XX:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 4;
+ c->icache.waybit= 0;
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 4;
+ c->dcache.waybit = 0;
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ case CPU_R4300:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 1;
+ c->icache.waybit = 0; /* doesn't matter */
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 1;
+ c->dcache.waybit = 0; /* does not matter */
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ break;
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
+ c->icache.linesz = 64;
+ c->icache.ways = 2;
+ c->icache.waybit = 0;
+
+ dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
+ c->dcache.linesz = 32;
+ c->dcache.ways = 2;
+ c->dcache.waybit = 0;
+
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_VR4133:
+ write_c0_config(config & ~VR41_CONF_P4K);
+ case CPU_VR4131:
+ /* Workaround for cache instruction bug of VR4131 */
+ if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
+ c->processor_id == 0x0c82U) {
+ config |= 0x00400000U;
+ if (c->processor_id == 0x0c80U)
+ config |= VR41_CONF_BP;
+ write_c0_config(config);
+ } else
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+
+ icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 2;
+ c->icache.waybit = __ffs(icache_size/2);
+
+ dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 2;
+ c->dcache.waybit = __ffs(dcache_size/2);
+ break;
+
+ case CPU_VR41XX:
+ case CPU_VR4111:
+ case CPU_VR4121:
+ case CPU_VR4122:
+ case CPU_VR4181:
+ case CPU_VR4181A:
+ icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 1;
+ c->icache.waybit = 0; /* doesn't matter */
+
+ dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 1;
+ c->dcache.waybit = 0; /* does not matter */
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ break;
+
+ case CPU_RM7000:
+ rm7k_erratum31();
+
+ case CPU_RM9000:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 4;
+ c->icache.waybit = __ffs(icache_size / c->icache.ways);
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 4;
+ c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
+
+#if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+#endif
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_LOONGSON2:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ if (prid & 0x3)
+ c->icache.ways = 4;
+ else
+ c->icache.ways = 2;
+ c->icache.waybit = 0;
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ if (prid & 0x3)
+ c->dcache.ways = 4;
+ else
+ c->dcache.ways = 2;
+ c->dcache.waybit = 0;
+ break;
+
+ default:
+ if (!(config & MIPS_CONF_M))
+ panic("Don't know how to probe P-caches on this cpu.");
+
+ /*
+ * So we seem to be a MIPS32 or MIPS64 CPU
+ * So let's probe the I-cache ...
+ */
+ config1 = read_c0_config1();
+
+ if ((lsize = ((config1 >> 19) & 7)))
+ c->icache.linesz = 2 << lsize;
+ else
+ c->icache.linesz = lsize;
+ c->icache.sets = 64 << ((config1 >> 22) & 7);
+ c->icache.ways = 1 + ((config1 >> 16) & 7);
+
+ icache_size = c->icache.sets *
+ c->icache.ways *
+ c->icache.linesz;
+ c->icache.waybit = __ffs(icache_size/c->icache.ways);
+
+ if (config & 0x8) /* VI bit */
+ c->icache.flags |= MIPS_CACHE_VTAG;
+
+ /*
+ * Now probe the MIPS32 / MIPS64 data cache.
+ */
+ c->dcache.flags = 0;
+
+ if ((lsize = ((config1 >> 10) & 7)))
+ c->dcache.linesz = 2 << lsize;
+ else
+ c->dcache.linesz= lsize;
+ c->dcache.sets = 64 << ((config1 >> 13) & 7);
+ c->dcache.ways = 1 + ((config1 >> 7) & 7);
+
+ dcache_size = c->dcache.sets *
+ c->dcache.ways *
+ c->dcache.linesz;
+ c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
+
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+ }
+
+ /*
+ * Processor configuration sanity check for the R4000SC erratum
+ * #5. With page sizes larger than 32kB there is no possibility
+ * to get a VCE exception anymore so we don't care about this
+ * misconfiguration. The case is rather theoretical anyway;
+ * presumably no vendor is shipping his hardware in the "bad"
+ * configuration.
+ */
+ if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
+ !(config & CONF_SC) && c->icache.linesz != 16 &&
+ PAGE_SIZE <= 0x8000)
+ panic("Improper R4000SC processor configuration detected");
+
+ /* compute a couple of other cache variables */
+ c->icache.waysize = icache_size / c->icache.ways;
+ c->dcache.waysize = dcache_size / c->dcache.ways;
+
+ c->icache.sets = c->icache.linesz ?
+ icache_size / (c->icache.linesz * c->icache.ways) : 0;
+ c->dcache.sets = c->dcache.linesz ?
+ dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
+
+ /*
+ * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
+ * 2-way virtually indexed so normally would suffer from aliases. So
+ * normally they'd suffer from aliases but magic in the hardware deals
+ * with that for us so we don't need to take care ourselves.
+ */
+ switch (c->cputype) {
+ case CPU_20KC:
+ case CPU_25KF:
+ case CPU_SB1:
+ case CPU_SB1A:
+ c->dcache.flags |= MIPS_CACHE_PINDEX;
+ break;
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ break;
+
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ if ((read_c0_config7() & (1 << 16))) {
+ /* effectively physically indexed dcache,
+ thus no virtual aliases. */
+ c->dcache.flags |= MIPS_CACHE_PINDEX;
+ break;
+ }
+ default:
+ if (c->dcache.waysize > PAGE_SIZE)
+ c->dcache.flags |= MIPS_CACHE_ALIASES;
+ }
+
+ switch (c->cputype) {
+ case CPU_20KC:
+ /*
+ * Some older 20Kc chips doesn't have the 'VI' bit in
+ * the config register.
+ */
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ break;
+
+ case CPU_AU1000:
+ case CPU_AU1500:
+ case CPU_AU1100:
+ case CPU_AU1550:
+ case CPU_AU1200:
+ case CPU_AU1210:
+ case CPU_AU1250:
+ c->icache.flags |= MIPS_CACHE_IC_F_DC;
+ break;
+ }
+
+#ifdef CONFIG_CPU_LOONGSON2
+ /*
+ * LOONGSON2 has 4 way icache, but when using indexed cache op,
+ * one op will act on all 4 ways
+ */
+ c->icache.ways = 1;
+#endif
+
+ printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
+ icache_size >> 10,
+ cpu_has_vtag_icache ? "VIVT" : "VIPT",
+ way_string[c->icache.ways], c->icache.linesz);
+
+ printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
+ dcache_size >> 10, way_string[c->dcache.ways],
+ (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
+ (c->dcache.flags & MIPS_CACHE_ALIASES) ?
+ "cache aliases" : "no aliases",
+ c->dcache.linesz);
+}
+
+/*
+ * If you even _breathe_ on this function, look at the gcc output and make sure
+ * it does not pop things on and off the stack for the cache sizing loop that
+ * executes in KSEG1 space or else you will crash and burn badly. You have
+ * been warned.
+ */
+static int __cpuinit probe_scache(void)
+{
+ unsigned long flags, addr, begin, end, pow2;
+ unsigned int config = read_c0_config();
+ struct cpuinfo_mips *c = &current_cpu_data;
+ int tmp;
+
+ if (config & CONF_SC)
+ return 0;
+
+ begin = (unsigned long) &_stext;
+ begin &= ~((4 * 1024 * 1024) - 1);
+ end = begin + (4 * 1024 * 1024);
+
+ /*
+ * This is such a bitch, you'd think they would make it easy to do
+ * this. Away you daemons of stupidity!
+ */
+ local_irq_save(flags);
+
+ /* Fill each size-multiple cache line with a valid tag. */
+ pow2 = (64 * 1024);
+ for (addr = begin; addr < end; addr = (begin + pow2)) {
+ unsigned long *p = (unsigned long *) addr;
+ __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
+ pow2 <<= 1;
+ }
+
+ /* Load first line with zero (therefore invalid) tag. */
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+ __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
+ cache_op(Index_Store_Tag_I, begin);
+ cache_op(Index_Store_Tag_D, begin);
+ cache_op(Index_Store_Tag_SD, begin);
+
+ /* Now search for the wrap around point. */
+ pow2 = (128 * 1024);
+ tmp = 0;
+ for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
+ cache_op(Index_Load_Tag_SD, addr);
+ __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
+ if (!read_c0_taglo())
+ break;
+ pow2 <<= 1;
+ }
+ local_irq_restore(flags);
+ addr -= begin;
+
+ scache_size = addr;
+ c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
+ c->scache.ways = 1;
+ c->dcache.waybit = 0; /* does not matter */
+
+ return 1;
+}
+
+#if defined(CONFIG_CPU_LOONGSON2)
+static void __init loongson2_sc_init(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ scache_size = 512*1024;
+ c->scache.linesz = 32;
+ c->scache.ways = 4;
+ c->scache.waybit = 0;
+ c->scache.waysize = scache_size / (c->scache.ways);
+ c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
+ pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+}
+#endif
+
+extern int r5k_sc_init(void);
+extern int rm7k_sc_init(void);
+extern int mips_sc_init(void);
+
+static void __cpuinit setup_scache(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config = read_c0_config();
+ int sc_present = 0;
+
+ /*
+ * Do the probing thing on R4000SC and R4400SC processors. Other
+ * processors don't have a S-cache that would be relevant to the
+ * Linux memory management.
+ */
+ switch (c->cputype) {
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ sc_present = run_uncached(probe_scache);
+ if (sc_present)
+ c->options |= MIPS_CPU_CACHE_CDEX_S;
+ break;
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
+ c->scache.linesz = 64 << ((config >> 13) & 1);
+ c->scache.ways = 2;
+ c->scache.waybit= 0;
+ sc_present = 1;
+ break;
+
+ case CPU_R5000:
+ case CPU_NEVADA:
+#ifdef CONFIG_R5000_CPU_SCACHE
+ r5k_sc_init();
+#endif
+ return;
+
+ case CPU_RM7000:
+ case CPU_RM9000:
+#ifdef CONFIG_RM7000_CPU_SCACHE
+ rm7k_sc_init();
+#endif
+ return;
+
+#if defined(CONFIG_CPU_LOONGSON2)
+ case CPU_LOONGSON2:
+ loongson2_sc_init();
+ return;
+#endif
+
+ default:
+ if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
+ c->isa_level == MIPS_CPU_ISA_M32R2 ||
+ c->isa_level == MIPS_CPU_ISA_M64R1 ||
+ c->isa_level == MIPS_CPU_ISA_M64R2) {
+#ifdef CONFIG_MIPS_CPU_SCACHE
+ if (mips_sc_init ()) {
+ scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
+ printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
+ scache_size >> 10,
+ way_string[c->scache.ways], c->scache.linesz);
+ }
+#else
+ if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
+ panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
+#endif
+ return;
+ }
+ sc_present = 0;
+ }
+
+ if (!sc_present)
+ return;
+
+ /* compute a couple of other cache variables */
+ c->scache.waysize = scache_size / c->scache.ways;
+
+ c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
+
+ printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+}
+
+void au1x00_fixup_config_od(void)
+{
+ /*
+ * c0_config.od (bit 19) was write only (and read as 0)
+ * on the early revisions of Alchemy SOCs. It disables the bus
+ * transaction overlapping and needs to be set to fix various errata.
+ */
+ switch (read_c0_prid()) {
+ case 0x00030100: /* Au1000 DA */
+ case 0x00030201: /* Au1000 HA */
+ case 0x00030202: /* Au1000 HB */
+ case 0x01030200: /* Au1500 AB */
+ /*
+ * Au1100 errata actually keeps silence about this bit, so we set it
+ * just in case for those revisions that require it to be set according
+ * to arch/mips/au1000/common/cputable.c
+ */
+ case 0x02030200: /* Au1100 AB */
+ case 0x02030201: /* Au1100 BA */
+ case 0x02030202: /* Au1100 BC */
+ set_c0_config(1 << 19);
+ break;
+ }
+}
+
+/* CP0 hazard avoidance. */
+#define NXP_BARRIER() \
+ __asm__ __volatile__( \
+ ".set noreorder\n\t" \
+ "nop; nop; nop; nop; nop; nop;\n\t" \
+ ".set reorder\n\t")
+
+static void nxp_pr4450_fixup_config(void)
+{
+ unsigned long config0;
+
+ config0 = read_c0_config();
+
+ /* clear all three cache coherency fields */
+ config0 &= ~(0x7 | (7 << 25) | (7 << 28));
+ config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << 28));
+ write_c0_config(config0);
+ NXP_BARRIER();
+}
+
+static int __cpuinitdata cca = -1;
+
+static int __init cca_setup(char *str)
+{
+ get_option(&str, &cca);
+
+ return 1;
+}
+
+__setup("cca=", cca_setup);
+
+static void __cpuinit coherency_setup(void)
+{
+ if (cca < 0 || cca > 7)
+ cca = read_c0_config() & CONF_CM_CMASK;
+ _page_cachable_default = cca << _CACHE_SHIFT;
+
+ pr_debug("Using cache attribute %d\n", cca);
+ change_c0_config(CONF_CM_CMASK, cca);
+
+ /*
+ * c0_status.cu=0 specifies that updates by the sc instruction use
+ * the coherency mode specified by the TLB; 1 means cachable
+ * coherent update on write will be used. Not all processors have
+ * this bit and; some wire it to zero, others like Toshiba had the
+ * silly idea of putting something else there ...
+ */
+ switch (current_cpu_type()) {
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ clear_c0_config(CONF_CU);
+ break;
+ /*
+ * We need to catch the early Alchemy SOCs with
+ * the write-only co_config.od bit and set it back to one...
+ */
+ case CPU_AU1000: /* rev. DA, HA, HB */
+ case CPU_AU1100: /* rev. AB, BA, BC ?? */
+ case CPU_AU1500: /* rev. AB */
+ au1x00_fixup_config_od();
+ break;
+
+ case PRID_IMP_PR4450:
+ nxp_pr4450_fixup_config();
+ break;
+ }
+}
+
+#if defined(CONFIG_DMA_NONCOHERENT)
+
+static int __cpuinitdata coherentio;
+
+static int __init setcoherentio(char *str)
+{
+ coherentio = 1;
+
+ return 1;
+}
+
+__setup("coherentio", setcoherentio);
+#endif
+
+void __cpuinit r4k_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+ extern char __weak except_vec2_generic;
+ extern char __weak except_vec2_sb1;
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ switch (c->cputype) {
+ case CPU_SB1:
+ case CPU_SB1A:
+ set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
+ break;
+
+ default:
+ set_uncached_handler(0x100, &except_vec2_generic, 0x80);
+ break;
+ }
+
+ probe_pcache();
+ setup_scache();
+
+ r4k_blast_dcache_page_setup();
+ r4k_blast_dcache_page_indexed_setup();
+ r4k_blast_dcache_setup();
+ r4k_blast_icache_page_setup();
+ r4k_blast_icache_page_indexed_setup();
+ r4k_blast_icache_setup();
+ r4k_blast_scache_page_setup();
+ r4k_blast_scache_page_indexed_setup();
+ r4k_blast_scache_setup();
+
+ /*
+ * Some MIPS32 and MIPS64 processors have physically indexed caches.
+ * This code supports virtually indexed processors and will be
+ * unnecessarily inefficient on physically indexed processors.
+ */
+ if (c->dcache.linesz)
+ shm_align_mask = max_t( unsigned long,
+ c->dcache.sets * c->dcache.linesz - 1,
+ PAGE_SIZE - 1);
+ else
+ shm_align_mask = PAGE_SIZE-1;
+
+ __flush_cache_vmap = r4k__flush_cache_vmap;
+ __flush_cache_vunmap = r4k__flush_cache_vunmap;
+
+ flush_cache_all = cache_noop;
+ __flush_cache_all = r4k___flush_cache_all;
+ flush_cache_mm = r4k_flush_cache_mm;
+ flush_cache_page = r4k_flush_cache_page;
+ flush_cache_range = r4k_flush_cache_range;
+
+ flush_cache_sigtramp = r4k_flush_cache_sigtramp;
+ flush_icache_all = r4k_flush_icache_all;
+ local_flush_data_cache_page = local_r4k_flush_data_cache_page;
+ flush_data_cache_page = r4k_flush_data_cache_page;
+ flush_icache_range = r4k_flush_icache_range;
+ local_flush_icache_range = local_r4k_flush_icache_range;
+
+#if defined(CONFIG_DMA_NONCOHERENT)
+ if (coherentio) {
+ _dma_cache_wback_inv = (void *)cache_noop;
+ _dma_cache_wback = (void *)cache_noop;
+ _dma_cache_inv = (void *)cache_noop;
+ } else {
+ _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
+ _dma_cache_wback = r4k_dma_cache_wback_inv;
+ _dma_cache_inv = r4k_dma_cache_inv;
+ }
+#endif
+
+ build_clear_page();
+ build_copy_page();
+#if !defined(CONFIG_MIPS_CMP)
+ local_r4k___flush_cache_all(NULL);
+#endif
+ coherency_setup();
+}
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
new file mode 100644
index 0000000..f7c8f9c
--- /dev/null
+++ b/arch/mips/mm/c-tx39.c
@@ -0,0 +1,433 @@
+/*
+ * r2300.c: R2000 and R3000 specific mmu/cache code.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ *
+ * with a lot of changes to make this thing work for R3000s
+ * Tx39XX R4k style caches added. HK
+ * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
+ * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/cacheops.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/isadep.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+/* For R3000 cores with R4000 style caches */
+static unsigned long icache_size, dcache_size; /* Size in bytes */
+
+#include <asm/r4kcache.h>
+
+extern int r3k_have_wired_reg; /* in r3k-tlb.c */
+
+/* This sequence is required to ensure icache is disabled immediately */
+#define TX39_STOP_STREAMING() \
+__asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "b 1f\n\t" \
+ "nop\n\t" \
+ "1:\n\t" \
+ ".set pop" \
+ )
+
+/* TX39H-style cache flush routines. */
+static void tx39h_flush_icache_all(void)
+{
+ unsigned long flags, config;
+
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ blast_icache16();
+ write_c0_conf(config);
+ local_irq_restore(flags);
+}
+
+static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
+{
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ iob();
+ blast_inv_dcache_range(addr, addr + size);
+}
+
+
+/* TX39H2,TX39H3 */
+static inline void tx39_blast_dcache_page(unsigned long addr)
+{
+ if (current_cpu_type() != CPU_TX3912)
+ blast_dcache16_page(addr);
+}
+
+static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
+{
+ blast_dcache16_page_indexed(addr);
+}
+
+static inline void tx39_blast_dcache(void)
+{
+ blast_dcache16();
+}
+
+static inline void tx39_blast_icache_page(unsigned long addr)
+{
+ unsigned long flags, config;
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ blast_icache16_page(addr);
+ write_c0_conf(config);
+ local_irq_restore(flags);
+}
+
+static inline void tx39_blast_icache_page_indexed(unsigned long addr)
+{
+ unsigned long flags, config;
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ blast_icache16_page_indexed(addr);
+ write_c0_conf(config);
+ local_irq_restore(flags);
+}
+
+static inline void tx39_blast_icache(void)
+{
+ unsigned long flags, config;
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ blast_icache16();
+ write_c0_conf(config);
+ local_irq_restore(flags);
+}
+
+static void tx39__flush_cache_vmap(void)
+{
+ tx39_blast_dcache();
+}
+
+static void tx39__flush_cache_vunmap(void)
+{
+ tx39_blast_dcache();
+}
+
+static inline void tx39_flush_cache_all(void)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+
+ tx39_blast_dcache();
+}
+
+static inline void tx39___flush_cache_all(void)
+{
+ tx39_blast_dcache();
+ tx39_blast_icache();
+}
+
+static void tx39_flush_cache_mm(struct mm_struct *mm)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+
+ if (cpu_context(smp_processor_id(), mm) != 0)
+ tx39_blast_dcache();
+}
+
+static void tx39_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+ if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
+ return;
+
+ tx39_blast_dcache();
+}
+
+static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
+{
+ int exec = vma->vm_flags & VM_EXEC;
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ /*
+ * If ownes no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (cpu_context(smp_processor_id(), mm) == 0)
+ return;
+
+ page &= PAGE_MASK;
+ pgdp = pgd_offset(mm, page);
+ pudp = pud_offset(pgdp, page);
+ pmdp = pmd_offset(pudp, page);
+ ptep = pte_offset(pmdp, page);
+
+ /*
+ * If the page isn't marked valid, the page cannot possibly be
+ * in the cache.
+ */
+ if (!(pte_val(*ptep) & _PAGE_PRESENT))
+ return;
+
+ /*
+ * Doing flushes for another ASID than the current one is
+ * too difficult since stupid R4k caches do a TLB translation
+ * for every cache flush operation. So we do indexed flushes
+ * in that case, which doesn't overly flush the cache too much.
+ */
+ if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
+ if (cpu_has_dc_aliases || exec)
+ tx39_blast_dcache_page(page);
+ if (exec)
+ tx39_blast_icache_page(page);
+
+ return;
+ }
+
+ /*
+ * Do indexed flush, too much work to get the (possible) TLB refills
+ * to work correctly.
+ */
+ if (cpu_has_dc_aliases || exec)
+ tx39_blast_dcache_page_indexed(page);
+ if (exec)
+ tx39_blast_icache_page_indexed(page);
+}
+
+static void local_tx39_flush_data_cache_page(void * addr)
+{
+ tx39_blast_dcache_page((unsigned long)addr);
+}
+
+static void tx39_flush_data_cache_page(unsigned long addr)
+{
+ tx39_blast_dcache_page(addr);
+}
+
+static void tx39_flush_icache_range(unsigned long start, unsigned long end)
+{
+ if (end - start > dcache_size)
+ tx39_blast_dcache();
+ else
+ protected_blast_dcache_range(start, end);
+
+ if (end - start > icache_size)
+ tx39_blast_icache();
+ else {
+ unsigned long flags, config;
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ protected_blast_icache_range(start, end);
+ write_c0_conf(config);
+ local_irq_restore(flags);
+ }
+}
+
+static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
+{
+ unsigned long end;
+
+ if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
+ end = addr + size;
+ do {
+ tx39_blast_dcache_page(addr);
+ addr += PAGE_SIZE;
+ } while(addr != end);
+ } else if (size > dcache_size) {
+ tx39_blast_dcache();
+ } else {
+ blast_dcache_range(addr, addr + size);
+ }
+}
+
+static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
+{
+ unsigned long end;
+
+ if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
+ end = addr + size;
+ do {
+ tx39_blast_dcache_page(addr);
+ addr += PAGE_SIZE;
+ } while(addr != end);
+ } else if (size > dcache_size) {
+ tx39_blast_dcache();
+ } else {
+ blast_inv_dcache_range(addr, addr + size);
+ }
+}
+
+static void tx39_flush_cache_sigtramp(unsigned long addr)
+{
+ unsigned long ic_lsize = current_cpu_data.icache.linesz;
+ unsigned long dc_lsize = current_cpu_data.dcache.linesz;
+ unsigned long config;
+ unsigned long flags;
+
+ protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
+
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ protected_flush_icache_line(addr & ~(ic_lsize - 1));
+ write_c0_conf(config);
+ local_irq_restore(flags);
+}
+
+static __init void tx39_probe_cache(void)
+{
+ unsigned long config;
+
+ config = read_c0_conf();
+
+ icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
+ TX39_CONF_ICS_SHIFT));
+ dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
+ TX39_CONF_DCS_SHIFT));
+
+ current_cpu_data.icache.linesz = 16;
+ switch (current_cpu_type()) {
+ case CPU_TX3912:
+ current_cpu_data.icache.ways = 1;
+ current_cpu_data.dcache.ways = 1;
+ current_cpu_data.dcache.linesz = 4;
+ break;
+
+ case CPU_TX3927:
+ current_cpu_data.icache.ways = 2;
+ current_cpu_data.dcache.ways = 2;
+ current_cpu_data.dcache.linesz = 16;
+ break;
+
+ case CPU_TX3922:
+ default:
+ current_cpu_data.icache.ways = 1;
+ current_cpu_data.dcache.ways = 1;
+ current_cpu_data.dcache.linesz = 16;
+ break;
+ }
+}
+
+void __cpuinit tx39_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+ unsigned long config;
+
+ config = read_c0_conf();
+ config &= ~TX39_CONF_WBON;
+ write_c0_conf(config);
+
+ tx39_probe_cache();
+
+ switch (current_cpu_type()) {
+ case CPU_TX3912:
+ /* TX39/H core (writethru direct-map cache) */
+ __flush_cache_vmap = tx39__flush_cache_vmap;
+ __flush_cache_vunmap = tx39__flush_cache_vunmap;
+ flush_cache_all = tx39h_flush_icache_all;
+ __flush_cache_all = tx39h_flush_icache_all;
+ flush_cache_mm = (void *) tx39h_flush_icache_all;
+ flush_cache_range = (void *) tx39h_flush_icache_all;
+ flush_cache_page = (void *) tx39h_flush_icache_all;
+ flush_icache_range = (void *) tx39h_flush_icache_all;
+ local_flush_icache_range = (void *) tx39h_flush_icache_all;
+
+ flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
+ local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
+ flush_data_cache_page = (void *) tx39h_flush_icache_all;
+
+ _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
+
+ shm_align_mask = PAGE_SIZE - 1;
+
+ break;
+
+ case CPU_TX3922:
+ case CPU_TX3927:
+ default:
+ /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
+ r3k_have_wired_reg = 1;
+ write_c0_wired(0); /* set 8 on reset... */
+ /* board-dependent init code may set WBON */
+
+ __flush_cache_vmap = tx39__flush_cache_vmap;
+ __flush_cache_vunmap = tx39__flush_cache_vunmap;
+
+ flush_cache_all = tx39_flush_cache_all;
+ __flush_cache_all = tx39___flush_cache_all;
+ flush_cache_mm = tx39_flush_cache_mm;
+ flush_cache_range = tx39_flush_cache_range;
+ flush_cache_page = tx39_flush_cache_page;
+ flush_icache_range = tx39_flush_icache_range;
+ local_flush_icache_range = tx39_flush_icache_range;
+
+ flush_cache_sigtramp = tx39_flush_cache_sigtramp;
+ local_flush_data_cache_page = local_tx39_flush_data_cache_page;
+ flush_data_cache_page = tx39_flush_data_cache_page;
+
+ _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
+ _dma_cache_wback = tx39_dma_cache_wback_inv;
+ _dma_cache_inv = tx39_dma_cache_inv;
+
+ shm_align_mask = max_t(unsigned long,
+ (dcache_size / current_cpu_data.dcache.ways) - 1,
+ PAGE_SIZE - 1);
+
+ break;
+ }
+
+ current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
+ current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
+
+ current_cpu_data.icache.sets =
+ current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
+ current_cpu_data.dcache.sets =
+ current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
+
+ if (current_cpu_data.dcache.waysize > PAGE_SIZE)
+ current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
+
+ current_cpu_data.icache.waybit = 0;
+ current_cpu_data.dcache.waybit = 0;
+
+ printk("Primary instruction cache %ldkB, linesize %d bytes\n",
+ icache_size >> 10, current_cpu_data.icache.linesz);
+ printk("Primary data cache %ldkB, linesize %d bytes\n",
+ dcache_size >> 10, current_cpu_data.dcache.linesz);
+
+ build_clear_page();
+ build_copy_page();
+ tx39h_flush_icache_all();
+}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
new file mode 100644
index 0000000..1eb7c71
--- /dev/null
+++ b/arch/mips/mm/cache.c
@@ -0,0 +1,194 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ */
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+
+/* Cache operations. */
+void (*flush_cache_all)(void);
+void (*__flush_cache_all)(void);
+void (*flush_cache_mm)(struct mm_struct *mm);
+void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
+ unsigned long pfn);
+void (*flush_icache_range)(unsigned long start, unsigned long end);
+void (*local_flush_icache_range)(unsigned long start, unsigned long end);
+
+void (*__flush_cache_vmap)(void);
+void (*__flush_cache_vunmap)(void);
+
+/* MIPS specific cache operations */
+void (*flush_cache_sigtramp)(unsigned long addr);
+void (*local_flush_data_cache_page)(void * addr);
+void (*flush_data_cache_page)(unsigned long addr);
+void (*flush_icache_all)(void);
+
+EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
+EXPORT_SYMBOL(flush_data_cache_page);
+
+#ifdef CONFIG_DMA_NONCOHERENT
+
+/* DMA cache operations. */
+void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
+void (*_dma_cache_wback)(unsigned long start, unsigned long size);
+void (*_dma_cache_inv)(unsigned long start, unsigned long size);
+
+EXPORT_SYMBOL(_dma_cache_wback_inv);
+
+#endif /* CONFIG_DMA_NONCOHERENT */
+
+/*
+ * We could optimize the case where the cache argument is not BCACHE but
+ * that seems very atypical use ...
+ */
+asmlinkage int sys_cacheflush(unsigned long addr,
+ unsigned long bytes, unsigned int cache)
+{
+ if (bytes == 0)
+ return 0;
+ if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
+ return -EFAULT;
+
+ flush_icache_range(addr, addr + bytes);
+
+ return 0;
+}
+
+void __flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ unsigned long addr;
+
+ if (PageHighMem(page))
+ return;
+ if (mapping && !mapping_mapped(mapping)) {
+ SetPageDcacheDirty(page);
+ return;
+ }
+
+ /*
+ * We could delay the flush for the !page_mapping case too. But that
+ * case is for exec env/arg pages and those are %99 certainly going to
+ * get faulted into the tlb (and thus flushed) anyways.
+ */
+ addr = (unsigned long) page_address(page);
+ flush_data_cache_page(addr);
+}
+
+EXPORT_SYMBOL(__flush_dcache_page);
+
+void __flush_anon_page(struct page *page, unsigned long vmaddr)
+{
+ unsigned long addr = (unsigned long) page_address(page);
+
+ if (pages_do_alias(addr, vmaddr)) {
+ if (page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *kaddr;
+
+ kaddr = kmap_coherent(page, vmaddr);
+ flush_data_cache_page((unsigned long)kaddr);
+ kunmap_coherent();
+ } else
+ flush_data_cache_page(addr);
+ }
+}
+
+EXPORT_SYMBOL(__flush_anon_page);
+
+void __update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte)
+{
+ struct page *page;
+ unsigned long pfn, addr;
+ int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
+
+ pfn = pte_pfn(pte);
+ if (unlikely(!pfn_valid(pfn)))
+ return;
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && Page_dcache_dirty(page)) {
+ addr = (unsigned long) page_address(page);
+ if (exec || pages_do_alias(addr, address & PAGE_MASK))
+ flush_data_cache_page(addr);
+ ClearPageDcacheDirty(page);
+ }
+}
+
+unsigned long _page_cachable_default;
+EXPORT_SYMBOL_GPL(_page_cachable_default);
+
+static inline void setup_protection_map(void)
+{
+ protection_map[0] = PAGE_NONE;
+ protection_map[1] = PAGE_READONLY;
+ protection_map[2] = PAGE_COPY;
+ protection_map[3] = PAGE_COPY;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+ protection_map[9] = PAGE_READONLY;
+ protection_map[10] = PAGE_SHARED;
+ protection_map[11] = PAGE_SHARED;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+ protection_map[15] = PAGE_SHARED;
+}
+
+void __devinit cpu_cache_init(void)
+{
+ if (cpu_has_3k_cache) {
+ extern void __weak r3k_cache_init(void);
+
+ r3k_cache_init();
+ }
+ if (cpu_has_6k_cache) {
+ extern void __weak r6k_cache_init(void);
+
+ r6k_cache_init();
+ }
+ if (cpu_has_4k_cache) {
+ extern void __weak r4k_cache_init(void);
+
+ r4k_cache_init();
+ }
+ if (cpu_has_8k_cache) {
+ extern void __weak r8k_cache_init(void);
+
+ r8k_cache_init();
+ }
+ if (cpu_has_tx39_cache) {
+ extern void __weak tx39_cache_init(void);
+
+ tx39_cache_init();
+ }
+
+ setup_protection_map();
+}
+
+int __weak __uncached_access(struct file *file, unsigned long addr)
+{
+ if (file->f_flags & O_SYNC)
+ return 1;
+
+ return addr >= __pa(high_memory);
+}
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
new file mode 100644
index 0000000..1bd1f18
--- /dev/null
+++ b/arch/mips/mm/cerr-sb1.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright (C) 2001,2002,2003 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/sched.h>
+#include <asm/mipsregs.h>
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+
+#if !defined(CONFIG_SIBYTE_BUS_WATCHER) || defined(CONFIG_SIBYTE_BW_TRACE)
+#include <asm/io.h>
+#include <asm/sibyte/sb1250_scd.h>
+#endif
+
+/*
+ * We'd like to dump the L2_ECC_TAG register on errors, but errata make
+ * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
+ */
+#undef DUMP_L2_ECC_TAG_ON_ERROR
+
+/* SB1 definitions */
+
+/* XXX should come from config1 XXX */
+#define SB1_CACHE_INDEX_MASK 0x1fe0
+
+#define CP0_ERRCTL_RECOVERABLE (1 << 31)
+#define CP0_ERRCTL_DCACHE (1 << 30)
+#define CP0_ERRCTL_ICACHE (1 << 29)
+#define CP0_ERRCTL_MULTIBUS (1 << 23)
+#define CP0_ERRCTL_MC_TLB (1 << 15)
+#define CP0_ERRCTL_MC_TIMEOUT (1 << 14)
+
+#define CP0_CERRI_TAG_PARITY (1 << 29)
+#define CP0_CERRI_DATA_PARITY (1 << 28)
+#define CP0_CERRI_EXTERNAL (1 << 26)
+
+#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
+#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
+
+#define CP0_CERRD_MULTIPLE (1 << 31)
+#define CP0_CERRD_TAG_STATE (1 << 30)
+#define CP0_CERRD_TAG_ADDRESS (1 << 29)
+#define CP0_CERRD_DATA_SBE (1 << 28)
+#define CP0_CERRD_DATA_DBE (1 << 27)
+#define CP0_CERRD_EXTERNAL (1 << 26)
+#define CP0_CERRD_LOAD (1 << 25)
+#define CP0_CERRD_STORE (1 << 24)
+#define CP0_CERRD_FILLWB (1 << 23)
+#define CP0_CERRD_COHERENCY (1 << 22)
+#define CP0_CERRD_DUPTAG (1 << 21)
+
+#define CP0_CERRD_DPA_VALID(c) (!((c) & CP0_CERRD_EXTERNAL))
+#define CP0_CERRD_IDX_VALID(c) \
+ (((c) & (CP0_CERRD_LOAD | CP0_CERRD_STORE)) ? (!((c) & CP0_CERRD_EXTERNAL)) : 0)
+#define CP0_CERRD_CAUSES \
+ (CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
+#define CP0_CERRD_TYPES \
+ (CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
+#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
+
+static uint32_t extract_ic(unsigned short addr, int data);
+static uint32_t extract_dc(unsigned short addr, int data);
+
+static inline void breakout_errctl(unsigned int val)
+{
+ if (val & CP0_ERRCTL_RECOVERABLE)
+ printk(" recoverable");
+ if (val & CP0_ERRCTL_DCACHE)
+ printk(" dcache");
+ if (val & CP0_ERRCTL_ICACHE)
+ printk(" icache");
+ if (val & CP0_ERRCTL_MULTIBUS)
+ printk(" multiple-buserr");
+ printk("\n");
+}
+
+static inline void breakout_cerri(unsigned int val)
+{
+ if (val & CP0_CERRI_TAG_PARITY)
+ printk(" tag-parity");
+ if (val & CP0_CERRI_DATA_PARITY)
+ printk(" data-parity");
+ if (val & CP0_CERRI_EXTERNAL)
+ printk(" external");
+ printk("\n");
+}
+
+static inline void breakout_cerrd(unsigned int val)
+{
+ switch (val & CP0_CERRD_CAUSES) {
+ case CP0_CERRD_LOAD:
+ printk(" load,");
+ break;
+ case CP0_CERRD_STORE:
+ printk(" store,");
+ break;
+ case CP0_CERRD_FILLWB:
+ printk(" fill/wb,");
+ break;
+ case CP0_CERRD_COHERENCY:
+ printk(" coherency,");
+ break;
+ case CP0_CERRD_DUPTAG:
+ printk(" duptags,");
+ break;
+ default:
+ printk(" NO CAUSE,");
+ break;
+ }
+ if (!(val & CP0_CERRD_TYPES))
+ printk(" NO TYPE");
+ else {
+ if (val & CP0_CERRD_MULTIPLE)
+ printk(" multi-err");
+ if (val & CP0_CERRD_TAG_STATE)
+ printk(" tag-state");
+ if (val & CP0_CERRD_TAG_ADDRESS)
+ printk(" tag-address");
+ if (val & CP0_CERRD_DATA_SBE)
+ printk(" data-SBE");
+ if (val & CP0_CERRD_DATA_DBE)
+ printk(" data-DBE");
+ if (val & CP0_CERRD_EXTERNAL)
+ printk(" external");
+ }
+ printk("\n");
+}
+
+#ifndef CONFIG_SIBYTE_BUS_WATCHER
+
+static void check_bus_watcher(void)
+{
+ uint32_t status, l2_err, memio_err;
+#ifdef DUMP_L2_ECC_TAG_ON_ERROR
+ uint64_t l2_tag;
+#endif
+
+ /* Destructive read, clears register and interrupt */
+ status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
+ /* Bit 31 is always on, but there's no #define for that */
+ if (status & ~(1UL << 31)) {
+ l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
+#ifdef DUMP_L2_ECC_TAG_ON_ERROR
+ l2_tag = in64(IOADDR(A_L2_ECC_TAG));
+#endif
+ memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
+ printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
+ printk("\nLast recorded signature:\n");
+ printk("Request %02x from %d, answered by %d with Dcode %d\n",
+ (unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
+ (int)(G_SCD_BERR_TID(status) >> 6),
+ (int)G_SCD_BERR_RID(status),
+ (int)G_SCD_BERR_DCODE(status));
+#ifdef DUMP_L2_ECC_TAG_ON_ERROR
+ printk("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
+#endif
+ } else {
+ printk("Bus watcher indicates no error\n");
+ }
+}
+#else
+extern void check_bus_watcher(void);
+#endif
+
+asmlinkage void sb1_cache_error(void)
+{
+ uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res;
+ unsigned long long cerr_dpa;
+
+#ifdef CONFIG_SIBYTE_BW_TRACE
+ /* Freeze the trace buffer now */
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+ csr_out32(M_BCM1480_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
+#else
+ csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
+#endif
+ printk("Trace buffer frozen\n");
+#endif
+
+ printk("Cache error exception on CPU %x:\n",
+ (read_c0_prid() >> 25) & 0x7);
+
+ __asm__ __volatile__ (
+ " .set push\n\t"
+ " .set mips64\n\t"
+ " .set noat\n\t"
+ " mfc0 %0, $26\n\t"
+ " mfc0 %1, $27\n\t"
+ " mfc0 %2, $27, 1\n\t"
+ " dmfc0 $1, $27, 3\n\t"
+ " dsrl32 %3, $1, 0 \n\t"
+ " sll %4, $1, 0 \n\t"
+ " mfc0 %5, $30\n\t"
+ " .set pop"
+ : "=r" (errctl), "=r" (cerr_i), "=r" (cerr_d),
+ "=r" (dpahi), "=r" (dpalo), "=r" (eepc));
+
+ cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
+ printk(" c0_errorepc == %08x\n", eepc);
+ printk(" c0_errctl == %08x", errctl);
+ breakout_errctl(errctl);
+ if (errctl & CP0_ERRCTL_ICACHE) {
+ printk(" c0_cerr_i == %08x", cerr_i);
+ breakout_cerri(cerr_i);
+ if (CP0_CERRI_IDX_VALID(cerr_i)) {
+ /* Check index of EPC, allowing for delay slot */
+ if (((eepc & SB1_CACHE_INDEX_MASK) != (cerr_i & SB1_CACHE_INDEX_MASK)) &&
+ ((eepc & SB1_CACHE_INDEX_MASK) != ((cerr_i & SB1_CACHE_INDEX_MASK) - 4)))
+ printk(" cerr_i idx doesn't match eepc\n");
+ else {
+ res = extract_ic(cerr_i & SB1_CACHE_INDEX_MASK,
+ (cerr_i & CP0_CERRI_DATA) != 0);
+ if (!(res & cerr_i))
+ printk("...didn't see indicated icache problem\n");
+ }
+ }
+ }
+ if (errctl & CP0_ERRCTL_DCACHE) {
+ printk(" c0_cerr_d == %08x", cerr_d);
+ breakout_cerrd(cerr_d);
+ if (CP0_CERRD_DPA_VALID(cerr_d)) {
+ printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
+ if (!CP0_CERRD_IDX_VALID(cerr_d)) {
+ res = extract_dc(cerr_dpa & SB1_CACHE_INDEX_MASK,
+ (cerr_d & CP0_CERRD_DATA) != 0);
+ if (!(res & cerr_d))
+ printk("...didn't see indicated dcache problem\n");
+ } else {
+ if ((cerr_dpa & SB1_CACHE_INDEX_MASK) != (cerr_d & SB1_CACHE_INDEX_MASK))
+ printk(" cerr_d idx doesn't match cerr_dpa\n");
+ else {
+ res = extract_dc(cerr_d & SB1_CACHE_INDEX_MASK,
+ (cerr_d & CP0_CERRD_DATA) != 0);
+ if (!(res & cerr_d))
+ printk("...didn't see indicated problem\n");
+ }
+ }
+ }
+ }
+
+ check_bus_watcher();
+
+ /*
+ * Calling panic() when a fatal cache error occurs scrambles the
+ * state of the system (and the cache), making it difficult to
+ * investigate after the fact. However, if you just stall the CPU,
+ * the other CPU may keep on running, which is typically very
+ * undesirable.
+ */
+#ifdef CONFIG_SB1_CERR_STALL
+ while (1)
+ ;
+#else
+ panic("unhandled cache error");
+#endif
+}
+
+
+/* Parity lookup table. */
+static const uint8_t parity[256] = {
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0
+};
+
+/* Masks to select bits for Hamming parity, mask_72_64[i] for bit[i] */
+static const uint64_t mask_72_64[8] = {
+ 0x0738C808099264FFULL,
+ 0x38C808099264FF07ULL,
+ 0xC808099264FF0738ULL,
+ 0x08099264FF0738C8ULL,
+ 0x099264FF0738C808ULL,
+ 0x9264FF0738C80809ULL,
+ 0x64FF0738C8080992ULL,
+ 0xFF0738C808099264ULL
+};
+
+/* Calculate the parity on a range of bits */
+static char range_parity(uint64_t dword, int max, int min)
+{
+ char parity = 0;
+ int i;
+ dword >>= min;
+ for (i=max-min; i>=0; i--) {
+ if (dword & 0x1)
+ parity = !parity;
+ dword >>= 1;
+ }
+ return parity;
+}
+
+/* Calculate the 4-bit even byte-parity for an instruction */
+static unsigned char inst_parity(uint32_t word)
+{
+ int i, j;
+ char parity = 0;
+ for (j=0; j<4; j++) {
+ char byte_parity = 0;
+ for (i=0; i<8; i++) {
+ if (word & 0x80000000)
+ byte_parity = !byte_parity;
+ word <<= 1;
+ }
+ parity <<= 1;
+ parity |= byte_parity;
+ }
+ return parity;
+}
+
+static uint32_t extract_ic(unsigned short addr, int data)
+{
+ unsigned short way;
+ int valid;
+ uint32_t taghi, taglolo, taglohi;
+ unsigned long long taglo, va;
+ uint64_t tlo_tmp;
+ uint8_t lru;
+ int res = 0;
+
+ printk("Icache index 0x%04x ", addr);
+ for (way = 0; way < 4; way++) {
+ /* Index-load-tag-I */
+ __asm__ __volatile__ (
+ " .set push \n\t"
+ " .set noreorder \n\t"
+ " .set mips64 \n\t"
+ " .set noat \n\t"
+ " cache 4, 0(%3) \n\t"
+ " mfc0 %0, $29 \n\t"
+ " dmfc0 $1, $28 \n\t"
+ " dsrl32 %1, $1, 0 \n\t"
+ " sll %2, $1, 0 \n\t"
+ " .set pop"
+ : "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
+ : "r" ((way << 13) | addr));
+
+ taglo = ((unsigned long long)taglohi << 32) | taglolo;
+ if (way == 0) {
+ lru = (taghi >> 14) & 0xff;
+ printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
+ ((addr >> 5) & 0x3), /* bank */
+ ((addr >> 7) & 0x3f), /* index */
+ (lru & 0x3),
+ ((lru >> 2) & 0x3),
+ ((lru >> 4) & 0x3),
+ ((lru >> 6) & 0x3));
+ }
+ va = (taglo & 0xC0000FFFFFFFE000ULL) | addr;
+ if ((taglo & (1 << 31)) && (((taglo >> 62) & 0x3) == 3))
+ va |= 0x3FFFF00000000000ULL;
+ valid = ((taghi >> 29) & 1);
+ if (valid) {
+ tlo_tmp = taglo & 0xfff3ff;
+ if (((taglo >> 10) & 1) ^ range_parity(tlo_tmp, 23, 0)) {
+ printk(" ** bad parity in VTag0/G/ASID\n");
+ res |= CP0_CERRI_TAG_PARITY;
+ }
+ if (((taglo >> 11) & 1) ^ range_parity(taglo, 63, 24)) {
+ printk(" ** bad parity in R/VTag1\n");
+ res |= CP0_CERRI_TAG_PARITY;
+ }
+ }
+ if (valid ^ ((taghi >> 27) & 1)) {
+ printk(" ** bad parity for valid bit\n");
+ res |= CP0_CERRI_TAG_PARITY;
+ }
+ printk(" %d [VA %016llx] [Vld? %d] raw tags: %08X-%016llX\n",
+ way, va, valid, taghi, taglo);
+
+ if (data) {
+ uint32_t datahi, insta, instb;
+ uint8_t predecode;
+ int offset;
+
+ /* (hit all banks and ways) */
+ for (offset = 0; offset < 4; offset++) {
+ /* Index-load-data-I */
+ __asm__ __volatile__ (
+ " .set push\n\t"
+ " .set noreorder\n\t"
+ " .set mips64\n\t"
+ " .set noat\n\t"
+ " cache 6, 0(%3) \n\t"
+ " mfc0 %0, $29, 1\n\t"
+ " dmfc0 $1, $28, 1\n\t"
+ " dsrl32 %1, $1, 0 \n\t"
+ " sll %2, $1, 0 \n\t"
+ " .set pop \n"
+ : "=r" (datahi), "=r" (insta), "=r" (instb)
+ : "r" ((way << 13) | addr | (offset << 3)));
+ predecode = (datahi >> 8) & 0xff;
+ if (((datahi >> 16) & 1) != (uint32_t)range_parity(predecode, 7, 0)) {
+ printk(" ** bad parity in predecode\n");
+ res |= CP0_CERRI_DATA_PARITY;
+ }
+ /* XXXKW should/could check predecode bits themselves */
+ if (((datahi >> 4) & 0xf) ^ inst_parity(insta)) {
+ printk(" ** bad parity in instruction a\n");
+ res |= CP0_CERRI_DATA_PARITY;
+ }
+ if ((datahi & 0xf) ^ inst_parity(instb)) {
+ printk(" ** bad parity in instruction b\n");
+ res |= CP0_CERRI_DATA_PARITY;
+ }
+ printk(" %05X-%08X%08X", datahi, insta, instb);
+ }
+ printk("\n");
+ }
+ }
+ return res;
+}
+
+/* Compute the ECC for a data doubleword */
+static uint8_t dc_ecc(uint64_t dword)
+{
+ uint64_t t;
+ uint32_t w;
+ uint8_t p;
+ int i;
+
+ p = 0;
+ for (i = 7; i >= 0; i--)
+ {
+ p <<= 1;
+ t = dword & mask_72_64[i];
+ w = (uint32_t)(t >> 32);
+ p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
+ ^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
+ w = (uint32_t)(t & 0xFFFFFFFF);
+ p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
+ ^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
+ }
+ return p;
+}
+
+struct dc_state {
+ unsigned char val;
+ char *name;
+};
+
+static struct dc_state dc_states[] = {
+ { 0x00, "INVALID" },
+ { 0x0f, "COH-SHD" },
+ { 0x13, "NCO-E-C" },
+ { 0x19, "NCO-E-D" },
+ { 0x16, "COH-E-C" },
+ { 0x1c, "COH-E-D" },
+ { 0xff, "*ERROR*" }
+};
+
+#define DC_TAG_VALID(state) \
+ (((state) == 0x0) || ((state) == 0xf) || ((state) == 0x13) || \
+ ((state) == 0x19) || ((state) == 0x16) || ((state) == 0x1c))
+
+static char *dc_state_str(unsigned char state)
+{
+ struct dc_state *dsc = dc_states;
+ while (dsc->val != 0xff) {
+ if (dsc->val == state)
+ break;
+ dsc++;
+ }
+ return dsc->name;
+}
+
+static uint32_t extract_dc(unsigned short addr, int data)
+{
+ int valid, way;
+ unsigned char state;
+ uint32_t taghi, taglolo, taglohi;
+ unsigned long long taglo, pa;
+ uint8_t ecc, lru;
+ int res = 0;
+
+ printk("Dcache index 0x%04x ", addr);
+ for (way = 0; way < 4; way++) {
+ __asm__ __volatile__ (
+ " .set push\n\t"
+ " .set noreorder\n\t"
+ " .set mips64\n\t"
+ " .set noat\n\t"
+ " cache 5, 0(%3)\n\t" /* Index-load-tag-D */
+ " mfc0 %0, $29, 2\n\t"
+ " dmfc0 $1, $28, 2\n\t"
+ " dsrl32 %1, $1, 0\n\t"
+ " sll %2, $1, 0\n\t"
+ " .set pop"
+ : "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
+ : "r" ((way << 13) | addr));
+
+ taglo = ((unsigned long long)taglohi << 32) | taglolo;
+ pa = (taglo & 0xFFFFFFE000ULL) | addr;
+ if (way == 0) {
+ lru = (taghi >> 14) & 0xff;
+ printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
+ ((addr >> 11) & 0x2) | ((addr >> 5) & 1), /* bank */
+ ((addr >> 6) & 0x3f), /* index */
+ (lru & 0x3),
+ ((lru >> 2) & 0x3),
+ ((lru >> 4) & 0x3),
+ ((lru >> 6) & 0x3));
+ }
+ state = (taghi >> 25) & 0x1f;
+ valid = DC_TAG_VALID(state);
+ printk(" %d [PA %010llx] [state %s (%02x)] raw tags: %08X-%016llX\n",
+ way, pa, dc_state_str(state), state, taghi, taglo);
+ if (valid) {
+ if (((taglo >> 11) & 1) ^ range_parity(taglo, 39, 26)) {
+ printk(" ** bad parity in PTag1\n");
+ res |= CP0_CERRD_TAG_ADDRESS;
+ }
+ if (((taglo >> 10) & 1) ^ range_parity(taglo, 25, 13)) {
+ printk(" ** bad parity in PTag0\n");
+ res |= CP0_CERRD_TAG_ADDRESS;
+ }
+ } else {
+ res |= CP0_CERRD_TAG_STATE;
+ }
+
+ if (data) {
+ uint32_t datalohi, datalolo, datahi;
+ unsigned long long datalo;
+ int offset;
+ char bad_ecc = 0;
+
+ for (offset = 0; offset < 4; offset++) {
+ /* Index-load-data-D */
+ __asm__ __volatile__ (
+ " .set push\n\t"
+ " .set noreorder\n\t"
+ " .set mips64\n\t"
+ " .set noat\n\t"
+ " cache 7, 0(%3)\n\t" /* Index-load-data-D */
+ " mfc0 %0, $29, 3\n\t"
+ " dmfc0 $1, $28, 3\n\t"
+ " dsrl32 %1, $1, 0 \n\t"
+ " sll %2, $1, 0 \n\t"
+ " .set pop"
+ : "=r" (datahi), "=r" (datalohi), "=r" (datalolo)
+ : "r" ((way << 13) | addr | (offset << 3)));
+ datalo = ((unsigned long long)datalohi << 32) | datalolo;
+ ecc = dc_ecc(datalo);
+ if (ecc != datahi) {
+ int bits = 0;
+ bad_ecc |= 1 << (3-offset);
+ ecc ^= datahi;
+ while (ecc) {
+ if (ecc & 1) bits++;
+ ecc >>= 1;
+ }
+ res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE;
+ }
+ printk(" %02X-%016llX", datahi, datalo);
+ }
+ printk("\n");
+ if (bad_ecc)
+ printk(" dwords w/ bad ECC: %d %d %d %d\n",
+ !!(bad_ecc & 8), !!(bad_ecc & 4),
+ !!(bad_ecc & 2), !!(bad_ecc & 1));
+ }
+ }
+ return res;
+}
diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S
new file mode 100644
index 0000000..e743622
--- /dev/null
+++ b/arch/mips/mm/cex-gen.S
@@ -0,0 +1,42 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 - 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ *
+ * Cache error handler
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+/*
+ * Game over. Go to the button. Press gently. Swear where allowed by
+ * legislation.
+ */
+ LEAF(except_vec2_generic)
+ .set noreorder
+ .set noat
+ .set mips0
+ /*
+ * This is a very bad place to be. Our cache error
+ * detection has triggered. If we have write-back data
+ * in the cache, we may not be able to recover. As a
+ * first-order desperate measure, turn off KSEG0 cacheing.
+ */
+ mfc0 k0,CP0_CONFIG
+ li k1,~CONF_CM_CMASK
+ and k0,k0,k1
+ ori k0,k0,CONF_CM_UNCACHED
+ mtc0 k0,CP0_CONFIG
+ /* Give it a few cycles to sink in... */
+ nop
+ nop
+ nop
+
+ j cache_parity_error
+ nop
+ END(except_vec2_generic)
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
new file mode 100644
index 0000000..2d08268
--- /dev/null
+++ b/arch/mips/mm/cex-sb1.S
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2001,2002,2003 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/init.h>
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/cacheops.h>
+#include <asm/sibyte/board.h>
+
+#define C0_ERRCTL $26 /* CP0: Error info */
+#define C0_CERR_I $27 /* CP0: Icache error */
+#define C0_CERR_D $27,1 /* CP0: Dcache error */
+
+ /*
+ * Based on SiByte sample software cache-err/cerr.S
+ * CVS revision 1.8. Only the 'unrecoverable' case
+ * is changed.
+ */
+
+ .set mips64
+ .set noreorder
+ .set noat
+
+ /*
+ * sb1_cerr_vec: code to be copied to the Cache Error
+ * Exception vector. The code must be pushed out to memory
+ * (either by copying to Kseg0 and Kseg1 both, or by flushing
+ * the L1 and L2) since it is fetched as 0xa0000100.
+ *
+ * NOTE: Be sure this handler is at most 28 instructions long
+ * since the final 16 bytes of the exception vector memory
+ * (0x170-0x17f) are used to preserve k0, k1, and ra.
+ */
+
+ __CPUINIT
+
+LEAF(except_vec2_sb1)
+ /*
+ * If this error is recoverable, we need to exit the handler
+ * without having dirtied any registers. To do this,
+ * save/restore k0 and k1 from low memory (Useg is direct
+ * mapped while ERL=1). Note that we can't save to a
+ * CPU-specific location without ruining a register in the
+ * process. This means we are vulnerable to data corruption
+ * whenever the handler is reentered by a second CPU.
+ */
+ sd k0,0x170($0)
+ sd k1,0x178($0)
+
+#ifdef CONFIG_SB1_CEX_ALWAYS_FATAL
+ j handle_vec2_sb1
+ nop
+#else
+ /*
+ * M_ERRCTL_RECOVERABLE is bit 31, which makes it easy to tell
+ * if we can fast-path out of here for a h/w-recovered error.
+ */
+ mfc0 k1,C0_ERRCTL
+ bgtz k1,attempt_recovery
+ sll k0,k1,1
+
+recovered_dcache:
+ /*
+ * Unlock CacheErr-D (which in turn unlocks CacheErr-DPA).
+ * Ought to log the occurence of this recovered dcache error.
+ */
+ b recovered
+ mtc0 $0,C0_CERR_D
+
+attempt_recovery:
+ /*
+ * k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any
+ * Dcache errors we can recover from will take more extensive
+ * processing. For now, they are considered "unrecoverable".
+ * Note that 'DC' becoming set (outside of ERL mode) will
+ * cause 'IC' to clear; so if there's an Icache error, we'll
+ * only find out about it if we recover from this error and
+ * continue executing.
+ */
+ bltz k0,unrecoverable
+ sll k0,1
+
+ /*
+ * k0 has C0_ERRCTL << 2, which puts 'IC' at bit 31. If an
+ * Icache error isn't indicated, I'm not sure why we got here.
+ * Consider that case "unrecoverable" for now.
+ */
+ bgez k0,unrecoverable
+
+attempt_icache_recovery:
+ /*
+ * External icache errors are due to uncorrectable ECC errors
+ * in the L2 cache or Memory Controller and cannot be
+ * recovered here.
+ */
+ mfc0 k0,C0_CERR_I /* delay slot */
+ li k1,1 << 26 /* ICACHE_EXTERNAL */
+ and k1,k0
+ bnez k1,unrecoverable
+ andi k0,0x1fe0
+
+ /*
+ * Since the error is internal, the 'IDX' field from
+ * CacheErr-I is valid and we can just invalidate all blocks
+ * in that set.
+ */
+ cache Index_Invalidate_I,(0<<13)(k0)
+ cache Index_Invalidate_I,(1<<13)(k0)
+ cache Index_Invalidate_I,(2<<13)(k0)
+ cache Index_Invalidate_I,(3<<13)(k0)
+
+ /* Ought to log this recovered icache error */
+
+recovered:
+ /* Restore the saved registers */
+ ld k0,0x170($0)
+ ld k1,0x178($0)
+ eret
+
+unrecoverable:
+ /* Unrecoverable Icache or Dcache error; log it and/or fail */
+ j handle_vec2_sb1
+ nop
+#endif
+
+END(except_vec2_sb1)
+
+ __FINIT
+
+ LEAF(handle_vec2_sb1)
+ mfc0 k0,CP0_CONFIG
+ li k1,~CONF_CM_CMASK
+ and k0,k0,k1
+ ori k0,k0,CONF_CM_UNCACHED
+ mtc0 k0,CP0_CONFIG
+
+ SSNOP
+ SSNOP
+ SSNOP
+ SSNOP
+ bnezl $0, 1f
+1:
+ mfc0 k0, CP0_STATUS
+ sll k0, k0, 3 # check CU0 (kernel?)
+ bltz k0, 2f
+ nop
+
+ /* Get a valid Kseg0 stack pointer. Any task's stack pointer
+ * will do, although if we ever want to resume execution we
+ * better not have corrupted any state. */
+ get_saved_sp
+ move sp, k1
+
+2:
+ j sb1_cache_error
+ nop
+
+ END(handle_vec2_sb1)
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
new file mode 100644
index 0000000..e6708b3
--- /dev/null
+++ b/arch/mips/mm/dma-default.c
@@ -0,0 +1,390 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+
+#include <dma-coherence.h>
+
+static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
+{
+ unsigned long addr = plat_dma_addr_to_phys(dma_addr);
+
+ return (unsigned long)phys_to_virt(addr);
+}
+
+/*
+ * Warning on the terminology - Linux calls an uncached area coherent;
+ * MIPS terminology calls memory areas with hardware maintained coherency
+ * coherent.
+ */
+
+static inline int cpu_is_noncoherent_r10000(struct device *dev)
+{
+ return !plat_device_is_coherent(dev) &&
+ (current_cpu_type() == CPU_R10000 ||
+ current_cpu_type() == CPU_R12000);
+}
+
+static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+{
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA
+ if (dev == NULL)
+ gfp |= __GFP_DMA;
+ else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
+ gfp |= __GFP_DMA;
+ else
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+ gfp |= __GFP_DMA32;
+ else
+#endif
+ ;
+
+ /* Don't invoke OOM killer */
+ gfp |= __GFP_NORETRY;
+
+ return gfp;
+}
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t * dma_handle, gfp_t gfp)
+{
+ void *ret;
+
+ gfp = massage_gfp_flags(dev, gfp);
+
+ ret = (void *) __get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = plat_map_dma_mem(dev, ret, size);
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL(dma_alloc_noncoherent);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t * dma_handle, gfp_t gfp)
+{
+ void *ret;
+
+ gfp = massage_gfp_flags(dev, gfp);
+
+ ret = (void *) __get_free_pages(gfp, get_order(size));
+
+ if (ret) {
+ memset(ret, 0, size);
+ *dma_handle = plat_map_dma_mem(dev, ret, size);
+
+ if (!plat_device_is_coherent(dev)) {
+ dma_cache_wback_inv((unsigned long) ret, size);
+ ret = UNCAC_ADDR(ret);
+ }
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ plat_unmap_dma_mem(dma_handle);
+ free_pages((unsigned long) vaddr, get_order(size));
+}
+
+EXPORT_SYMBOL(dma_free_noncoherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ unsigned long addr = (unsigned long) vaddr;
+
+ plat_unmap_dma_mem(dma_handle);
+
+ if (!plat_device_is_coherent(dev))
+ addr = CAC_ADDR(addr);
+
+ free_pages(addr, get_order(size));
+}
+
+EXPORT_SYMBOL(dma_free_coherent);
+
+static inline void __dma_sync(unsigned long addr, size_t size,
+ enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ dma_cache_wback(addr, size);
+ break;
+
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(addr, size);
+ break;
+
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(addr, size);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+{
+ unsigned long addr = (unsigned long) ptr;
+
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(addr, size, direction);
+
+ return plat_map_dma_mem(dev, ptr, size);
+}
+
+EXPORT_SYMBOL(dma_map_single);
+
+void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ if (cpu_is_noncoherent_r10000(dev))
+ __dma_sync(dma_addr_to_virt(dma_addr), size,
+ direction);
+
+ plat_unmap_dma_mem(dma_addr);
+}
+
+EXPORT_SYMBOL(dma_unmap_single);
+
+int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for (i = 0; i < nents; i++, sg++) {
+ unsigned long addr;
+
+ addr = (unsigned long) sg_virt(sg);
+ if (!plat_device_is_coherent(dev) && addr)
+ __dma_sync(addr, sg->length, direction);
+ sg->dma_address = plat_map_dma_mem(dev,
+ (void *)addr, sg->length);
+ }
+
+ return nents;
+}
+
+EXPORT_SYMBOL(dma_map_sg);
+
+dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (!plat_device_is_coherent(dev)) {
+ unsigned long addr;
+
+ addr = (unsigned long) page_address(page) + offset;
+ dma_cache_wback_inv(addr, size);
+ }
+
+ return plat_map_dma_mem_page(dev, page) + offset;
+}
+
+EXPORT_SYMBOL(dma_map_page);
+
+void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
+ unsigned long addr;
+
+ addr = plat_dma_addr_to_phys(dma_address);
+ dma_cache_wback_inv(addr, size);
+ }
+
+ plat_unmap_dma_mem(dma_address);
+}
+
+EXPORT_SYMBOL(dma_unmap_page);
+
+void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ unsigned long addr;
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for (i = 0; i < nhwentries; i++, sg++) {
+ if (!plat_device_is_coherent(dev) &&
+ direction != DMA_TO_DEVICE) {
+ addr = (unsigned long) sg_virt(sg);
+ if (addr)
+ __dma_sync(addr, sg->length, direction);
+ }
+ plat_unmap_dma_mem(sg->dma_address);
+ }
+}
+
+EXPORT_SYMBOL(dma_unmap_sg);
+
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (cpu_is_noncoherent_r10000(dev)) {
+ unsigned long addr;
+
+ addr = dma_addr_to_virt(dma_handle);
+ __dma_sync(addr, size, direction);
+ }
+}
+
+EXPORT_SYMBOL(dma_sync_single_for_cpu);
+
+void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (!plat_device_is_coherent(dev)) {
+ unsigned long addr;
+
+ addr = dma_addr_to_virt(dma_handle);
+ __dma_sync(addr, size, direction);
+ }
+}
+
+EXPORT_SYMBOL(dma_sync_single_for_device);
+
+void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (cpu_is_noncoherent_r10000(dev)) {
+ unsigned long addr;
+
+ addr = dma_addr_to_virt(dma_handle);
+ __dma_sync(addr + offset, size, direction);
+ }
+}
+
+EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
+
+void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (!plat_device_is_coherent(dev)) {
+ unsigned long addr;
+
+ addr = dma_addr_to_virt(dma_handle);
+ __dma_sync(addr + offset, size, direction);
+ }
+}
+
+EXPORT_SYMBOL(dma_sync_single_range_for_device);
+
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ /* Make sure that gcc doesn't leave the empty loop body. */
+ for (i = 0; i < nelems; i++, sg++) {
+ if (cpu_is_noncoherent_r10000(dev))
+ __dma_sync((unsigned long)page_address(sg_page(sg)),
+ sg->length, direction);
+ }
+}
+
+EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ /* Make sure that gcc doesn't leave the empty loop body. */
+ for (i = 0; i < nelems; i++, sg++) {
+ if (!plat_device_is_coherent(dev))
+ __dma_sync((unsigned long)page_address(sg_page(sg)),
+ sg->length, direction);
+ }
+}
+
+EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(dma_mapping_error);
+
+int dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+EXPORT_SYMBOL(dma_supported);
+
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
+{
+ return plat_device_is_coherent(dev);
+}
+
+EXPORT_SYMBOL(dma_is_consistent);
+
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (!plat_device_is_coherent(dev))
+ __dma_sync((unsigned long)vaddr, size, direction);
+}
+
+EXPORT_SYMBOL(dma_cache_sync);
diff --git a/arch/mips/mm/extable.c b/arch/mips/mm/extable.c
new file mode 100644
index 0000000..297fb9f
--- /dev/null
+++ b/arch/mips/mm/extable.c
@@ -0,0 +1,21 @@
+/*
+ * linux/arch/mips/mm/extable.c
+ */
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/branch.h>
+#include <asm/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(exception_epc(regs));
+ if (fixup) {
+ regs->cp0_epc = fixup->nextinsn;
+
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
new file mode 100644
index 0000000..fa636fc
--- /dev/null
+++ b/arch/mips/mm/fault.c
@@ -0,0 +1,252 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ */
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+#include <linux/module.h>
+
+#include <asm/branch.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/highmem.h> /* For VMALLOC_END */
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
+ unsigned long address)
+{
+ struct vm_area_struct * vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ const int field = sizeof(unsigned long) * 2;
+ siginfo_t info;
+ int fault;
+
+#if 0
+ printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
+ current->comm, current->pid, field, address, write,
+ field, regs->cp0_epc);
+#endif
+
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
+ goto vmalloc_fault;
+#ifdef MODULE_START
+ if (unlikely(address >= MODULE_START && address < MODULE_END))
+ goto vmalloc_fault;
+#endif
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto bad_area_nosemaphore;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+
+ if (write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ goto bad_area;
+ }
+
+survive:
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(mm, vma, address, write);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ tsk->thread.cp0_badvaddr = address;
+ tsk->thread.error_code = write;
+#if 0
+ printk("do_page_fault() #2: sending SIGSEGV to %s for "
+ "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
+ tsk->comm,
+ write ? "write access to" : "read access from",
+ field, address,
+ field, (unsigned long) regs->cp0_epc,
+ field, (unsigned long) regs->regs[31]);
+#endif
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void __user *) address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs)) {
+ current->thread.cp0_baduaddr = address;
+ return;
+ }
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+
+ printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
+ "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
+ raw_smp_processor_id(), field, address, field, regs->cp0_epc,
+ field, regs->regs[31]);
+ die("Oops", regs);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (is_global_init(tsk)) {
+ yield();
+ down_read(&mm->mmap_sem);
+ goto survive;
+ }
+ printk("VM: killing process %s\n", tsk->comm);
+ if (user_mode(regs))
+ do_group_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+ else
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+#if 0
+ printk("do_page_fault() #3: sending SIGBUS to %s for "
+ "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
+ tsk->comm,
+ write ? "write access to" : "read access from",
+ field, address,
+ field, (unsigned long) regs->cp0_epc,
+ field, (unsigned long) regs->regs[31]);
+#endif
+ tsk->thread.cp0_badvaddr = address;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *) address;
+ force_sig_info(SIGBUS, &info, tsk);
+
+ return;
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int offset = __pgd_offset(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
+ pgd_k = init_mm.pgd + offset;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+ set_pgd(pgd, *pgd_k);
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+}
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
new file mode 100644
index 0000000..8f2cd8e
--- /dev/null
+++ b/arch/mips/mm/highmem.c
@@ -0,0 +1,118 @@
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+void *__kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ return page_address(page);
+ addr = kmap_high(page);
+ flush_tlb_one((unsigned long)addr);
+
+ return addr;
+}
+
+void __kunmap(struct page *page)
+{
+ if (in_interrupt())
+ BUG();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ */
+
+void *__kmap_atomic(struct page *page, enum km_type type)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ if (!pte_none(*(kmap_pte-idx)))
+ BUG();
+#endif
+ set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+ local_flush_tlb_one((unsigned long)vaddr);
+
+ return (void*) vaddr;
+}
+
+void __kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < FIXADDR_START) { // FIXME
+ pagefault_enable();
+ return;
+ }
+
+ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ BUG();
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_one(vaddr);
+#endif
+
+ pagefault_enable();
+}
+
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ pagefault_disable();
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
+ flush_tlb_one(vaddr);
+
+ return (void*) vaddr;
+}
+
+struct page *__kmap_atomic_to_page(void *ptr)
+{
+ unsigned long idx, vaddr = (unsigned long)ptr;
+ pte_t *pte;
+
+ if (vaddr < FIXADDR_START)
+ return virt_to_page(ptr);
+
+ idx = virt_to_fix(vaddr);
+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
+ return pte_page(*pte);
+}
+
+EXPORT_SYMBOL(__kmap);
+EXPORT_SYMBOL(__kunmap);
+EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
new file mode 100644
index 0000000..137c14b
--- /dev/null
+++ b/arch/mips/mm/init.c
@@ -0,0 +1,509 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/proc_fs.h>
+#include <linux/pfn.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/bootinfo.h>
+#include <asm/cachectl.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/kmap_types.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/fixmap.h>
+
+/* Atomicity and interruptability */
+#ifdef CONFIG_MIPS_MT_SMTC
+
+#include <asm/mipsmtregs.h>
+
+#define ENTER_CRITICAL(flags) \
+ { \
+ unsigned int mvpflags; \
+ local_irq_save(flags);\
+ mvpflags = dvpe()
+#define EXIT_CRITICAL(flags) \
+ evpe(mvpflags); \
+ local_irq_restore(flags); \
+ }
+#else
+
+#define ENTER_CRITICAL(flags) local_irq_save(flags)
+#define EXIT_CRITICAL(flags) local_irq_restore(flags)
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/*
+ * We have up to 8 empty zeroed pages so we can map one of the right colour
+ * when needed. This is necessary only on R4000 / R4400 SC and MC versions
+ * where we have to avoid VCED / VECI exceptions for good performance at
+ * any price. Since page is never written to after the initialization we
+ * don't have to care about aliases on other CPUs.
+ */
+unsigned long empty_zero_page, zero_page_mask;
+EXPORT_SYMBOL_GPL(empty_zero_page);
+
+/*
+ * Not static inline because used by IP27 special magic initialization code
+ */
+unsigned long setup_zero_pages(void)
+{
+ unsigned int order;
+ unsigned long size;
+ struct page *page;
+
+ if (cpu_has_vce)
+ order = 3;
+ else
+ order = 0;
+
+ empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!empty_zero_page)
+ panic("Oh boy, that early out of memory?");
+
+ page = virt_to_page((void *)empty_zero_page);
+ split_page(page, order);
+ while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
+ SetPageReserved(page);
+ page++;
+ }
+
+ size = PAGE_SIZE << order;
+ zero_page_mask = (size - 1) & PAGE_MASK;
+
+ return 1UL << order;
+}
+
+/*
+ * These are almost like kmap_atomic / kunmap_atmic except they take an
+ * additional address argument as the hint.
+ */
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+
+#ifdef CONFIG_MIPS_MT_SMTC
+static pte_t *kmap_coherent_pte;
+static void __init kmap_coherent_init(void)
+{
+ unsigned long vaddr;
+
+ /* cache the first coherent kmap pte */
+ vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
+ kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+}
+#else
+static inline void kmap_coherent_init(void) {}
+#endif
+
+void *kmap_coherent(struct page *page, unsigned long addr)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr, flags, entrylo;
+ unsigned long old_ctx;
+ pte_t pte;
+ int tlbidx;
+
+ BUG_ON(Page_dcache_dirty(page));
+
+ inc_preempt_count();
+ idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
+#ifdef CONFIG_MIPS_MT_SMTC
+ idx += FIX_N_COLOURS * smp_processor_id();
+#endif
+ vaddr = __fix_to_virt(FIX_CMAP_END - idx);
+ pte = mk_pte(page, PAGE_KERNEL);
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+ entrylo = pte.pte_high;
+#else
+ entrylo = pte_val(pte) >> 6;
+#endif
+
+ ENTER_CRITICAL(flags);
+ old_ctx = read_c0_entryhi();
+ write_c0_entryhi(vaddr & (PAGE_MASK << 1));
+ write_c0_entrylo0(entrylo);
+ write_c0_entrylo1(entrylo);
+#ifdef CONFIG_MIPS_MT_SMTC
+ set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
+ /* preload TLB instead of local_flush_tlb_one() */
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ tlbidx = read_c0_index();
+ mtc0_tlbw_hazard();
+ if (tlbidx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+#else
+ tlbidx = read_c0_wired();
+ write_c0_wired(tlbidx + 1);
+ write_c0_index(tlbidx);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+#endif
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ EXIT_CRITICAL(flags);
+
+ return (void*) vaddr;
+}
+
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+void kunmap_coherent(void)
+{
+#ifndef CONFIG_MIPS_MT_SMTC
+ unsigned int wired;
+ unsigned long flags, old_ctx;
+
+ ENTER_CRITICAL(flags);
+ old_ctx = read_c0_entryhi();
+ wired = read_c0_wired() - 1;
+ write_c0_wired(wired);
+ write_c0_index(wired);
+ write_c0_entryhi(UNIQUE_ENTRYHI(wired));
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ EXIT_CRITICAL(flags);
+#endif
+ dec_preempt_count();
+ preempt_check_resched();
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ void *vfrom, *vto;
+
+ vto = kmap_atomic(to, KM_USER1);
+ if (cpu_has_dc_aliases &&
+ page_mapped(from) && !Page_dcache_dirty(from)) {
+ vfrom = kmap_coherent(from, vaddr);
+ copy_page(vto, vfrom);
+ kunmap_coherent();
+ } else {
+ vfrom = kmap_atomic(from, KM_USER0);
+ copy_page(vto, vfrom);
+ kunmap_atomic(vfrom, KM_USER0);
+ }
+ if ((!cpu_has_ic_fills_f_dc) ||
+ pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+ flush_data_cache_page((unsigned long)vto);
+ kunmap_atomic(vto, KM_USER1);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
+}
+
+void copy_to_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ if (cpu_has_dc_aliases &&
+ page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(vto, src, len);
+ kunmap_coherent();
+ } else {
+ memcpy(dst, src, len);
+ if (cpu_has_dc_aliases)
+ SetPageDcacheDirty(page);
+ }
+ if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
+ flush_cache_page(vma, vaddr, page_to_pfn(page));
+}
+
+void copy_from_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ if (cpu_has_dc_aliases &&
+ page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(dst, vfrom, len);
+ kunmap_coherent();
+ } else {
+ memcpy(dst, src, len);
+ if (cpu_has_dc_aliases)
+ SetPageDcacheDirty(page);
+ }
+}
+
+#ifdef CONFIG_HIGHMEM
+unsigned long highstart_pfn, highend_pfn;
+
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+static void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+
+ kmap_prot = PAGE_KERNEL;
+}
+#endif /* CONFIG_HIGHMEM */
+
+void __init fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base)
+{
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i, j, k;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = __pgd_offset(vaddr);
+ j = __pud_offset(vaddr);
+ k = __pmd_offset(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ pud = (pud_t *)pgd;
+ for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+ pmd = (pmd_t *)pud;
+ for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ set_pmd(pmd, __pmd((unsigned long)pte));
+ if (pte != pte_offset_kernel(pmd, 0))
+ BUG();
+ }
+ vaddr += PMD_SIZE;
+ }
+ k = 0;
+ }
+ j = 0;
+ }
+#endif
+}
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+static int __init page_is_ram(unsigned long pagenr)
+{
+ int i;
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ unsigned long addr, end;
+
+ if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
+ /* not usable memory */
+ continue;
+
+ addr = PFN_UP(boot_mem_map.map[i].addr);
+ end = PFN_DOWN(boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size);
+
+ if (pagenr >= addr && pagenr < end)
+ return 1;
+ }
+
+ return 0;
+}
+
+void __init paging_init(void)
+{
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ unsigned long lastpfn;
+
+ pagetable_init();
+
+#ifdef CONFIG_HIGHMEM
+ kmap_init();
+#endif
+ kmap_coherent_init();
+
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ lastpfn = max_low_pfn;
+#ifdef CONFIG_HIGHMEM
+ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
+ lastpfn = highend_pfn;
+
+ if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
+ printk(KERN_WARNING "This processor doesn't support highmem."
+ " %ldk highmem ignored\n",
+ (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+ max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+ lastpfn = max_low_pfn;
+ }
+#endif
+
+ free_area_init_nodes(max_zone_pfns);
+}
+
+static struct kcore_list kcore_mem, kcore_vmalloc;
+#ifdef CONFIG_64BIT
+static struct kcore_list kcore_kseg0;
+#endif
+
+void __init mem_init(void)
+{
+ unsigned long codesize, reservedpages, datasize, initsize;
+ unsigned long tmp, ram;
+
+#ifdef CONFIG_HIGHMEM
+#ifdef CONFIG_DISCONTIGMEM
+#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
+#endif
+ max_mapnr = highend_pfn;
+#else
+ max_mapnr = max_low_pfn;
+#endif
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+ totalram_pages += free_all_bootmem();
+ totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
+
+ reservedpages = ram = 0;
+ for (tmp = 0; tmp < max_low_pfn; tmp++)
+ if (page_is_ram(tmp)) {
+ ram++;
+ if (PageReserved(pfn_to_page(tmp)))
+ reservedpages++;
+ }
+ num_physpages = ram;
+
+#ifdef CONFIG_HIGHMEM
+ for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
+ struct page *page = pfn_to_page(tmp);
+
+ if (!page_is_ram(tmp)) {
+ SetPageReserved(page);
+ continue;
+ }
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ totalhigh_pages++;
+ }
+ totalram_pages += totalhigh_pages;
+ num_physpages += totalhigh_pages;
+#endif
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+#ifdef CONFIG_64BIT
+ if ((unsigned long) &_text > (unsigned long) CKSEG0)
+ /* The -4 is a hack so that user tools don't have to handle
+ the overflow. */
+ kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
+#endif
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ VMALLOC_END-VMALLOC_START);
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
+ "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ ram << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
+}
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+
+void free_init_pages(const char *what, unsigned long begin, unsigned long end)
+{
+ unsigned long pfn;
+
+ for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
+ struct page *page = pfn_to_page(pfn);
+ void *addr = phys_to_virt(PFN_PHYS(pfn));
+
+ ClearPageReserved(page);
+ init_page_count(page);
+ memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
+ __free_page(page);
+ totalram_pages++;
+ }
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_init_pages("initrd memory",
+ virt_to_phys((void *)start),
+ virt_to_phys((void *)end));
+}
+#endif
+
+void __init_refok free_initmem(void)
+{
+ prom_free_prom_memory();
+ free_init_pages("unused kernel memory",
+ __pa_symbol(&__init_begin),
+ __pa_symbol(&__init_end));
+}
+
+unsigned long pgd_current[NR_CPUS];
+/*
+ * On 64-bit we've got three-level pagetables with a slightly
+ * different layout ...
+ */
+#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
+
+/*
+ * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
+ * are constants. So we use the variants from asm-offset.h until that gcc
+ * will officially be retired.
+ */
+pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
+#ifdef CONFIG_64BIT
+#ifdef MODULE_START
+pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
+#endif
+pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
+#endif
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
new file mode 100644
index 0000000..59945b9
--- /dev/null
+++ b/arch/mips/mm/ioremap.c
@@ -0,0 +1,194 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2001, 2002 Ralf Baechle
+ */
+#include <linux/module.h>
+#include <asm/addrspace.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/tlbflush.h>
+
+static inline void remap_area_pte(pte_t * pte, unsigned long address,
+ phys_t size, phys_t phys_addr, unsigned long flags)
+{
+ phys_t end;
+ unsigned long pfn;
+ pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
+ | __WRITEABLE | flags);
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ if (address >= end)
+ BUG();
+ pfn = phys_addr >> PAGE_SHIFT;
+ do {
+ if (!pte_none(*pte)) {
+ printk("remap_area_pte: page already exists\n");
+ BUG();
+ }
+ set_pte(pte, pfn_pte(pfn, pgprot));
+ address += PAGE_SIZE;
+ pfn++;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
+ phys_t size, phys_t phys_addr, unsigned long flags)
+{
+ phys_t end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ phys_addr -= address;
+ if (address >= end)
+ BUG();
+ do {
+ pte_t * pte = pte_alloc_kernel(pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ remap_area_pte(pte, address, end - address, address + phys_addr, flags);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+static int remap_area_pages(unsigned long address, phys_t phys_addr,
+ phys_t size, unsigned long flags)
+{
+ int error;
+ pgd_t * dir;
+ unsigned long end = address + size;
+
+ phys_addr -= address;
+ dir = pgd_offset(&init_mm, address);
+ flush_cache_all();
+ if (address >= end)
+ BUG();
+ do {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ error = -ENOMEM;
+ pud = pud_alloc(&init_mm, dir, address);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(&init_mm, pud, address);
+ if (!pmd)
+ break;
+ if (remap_area_pmd(pmd, address, end - address,
+ phys_addr + address, flags))
+ break;
+ error = 0;
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+ flush_tlb_all();
+ return error;
+}
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+
+#define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
+
+void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
+{
+ struct vm_struct * area;
+ unsigned long offset;
+ phys_t last_addr;
+ void * addr;
+
+ phys_addr = fixup_bigphys_addr(phys_addr, size);
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ /*
+ * Map uncached objects in the low 512mb of address space using KSEG1,
+ * otherwise map using page tables.
+ */
+ if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
+ flags == _CACHE_UNCACHED)
+ return (void __iomem *) CKSEG1ADDR(phys_addr);
+
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+ if (phys_addr < virt_to_phys(high_memory)) {
+ char *t_addr, *t_end;
+ struct page *page;
+
+ t_addr = __va(phys_addr);
+ t_end = t_addr + (size - 1);
+
+ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+ if(!PageReserved(page))
+ return NULL;
+ }
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ addr = area->addr;
+ if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
+ vunmap(addr);
+ return NULL;
+ }
+
+ return (void __iomem *) (offset + (char *)addr);
+}
+
+#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
+
+void __iounmap(const volatile void __iomem *addr)
+{
+ struct vm_struct *p;
+
+ if (IS_KSEG1(addr))
+ return;
+
+ p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+ if (!p)
+ printk(KERN_ERR "iounmap: bad address %p\n", addr);
+
+ kfree(p);
+}
+
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(__iounmap);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
new file mode 100644
index 0000000..1417c64
--- /dev/null
+++ b/arch/mips/mm/page.c
@@ -0,0 +1,689 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2008 Thiemo Seufer
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+
+#include <asm/bugs.h>
+#include <asm/cacheops.h>
+#include <asm/inst.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/prefetch.h>
+#include <asm/system.h>
+#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
+#include <asm/cpu.h>
+#include <asm/war.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_dma.h>
+#endif
+
+#include "uasm.h"
+
+/* Registers used in the assembled routines. */
+#define ZERO 0
+#define AT 2
+#define A0 4
+#define A1 5
+#define A2 6
+#define T0 8
+#define T1 9
+#define T2 10
+#define T3 11
+#define T9 25
+#define RA 31
+
+/* Handle labels (which must be positive integers). */
+enum label_id {
+ label_clear_nopref = 1,
+ label_clear_pref,
+ label_copy_nopref,
+ label_copy_pref_both,
+ label_copy_pref_store,
+};
+
+UASM_L_LA(_clear_nopref)
+UASM_L_LA(_clear_pref)
+UASM_L_LA(_copy_nopref)
+UASM_L_LA(_copy_pref_both)
+UASM_L_LA(_copy_pref_store)
+
+/* We need one branch and therefore one relocation per target label. */
+static struct uasm_label __cpuinitdata labels[5];
+static struct uasm_reloc __cpuinitdata relocs[5];
+
+#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
+#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x058 bytes
+ * R4600 v1.7: 0x05c bytes
+ * R4600 v2.0: 0x060 bytes
+ * With prefetching, 16 word strides 0x120 bytes
+ */
+
+static u32 clear_page_array[0x120 / 4];
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
+#else
+void clear_page(void *page) __attribute__((alias("clear_page_array")));
+#endif
+
+EXPORT_SYMBOL(clear_page);
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x11c bytes
+ * R4600 v1.7: 0x080 bytes
+ * R4600 v2.0: 0x07c bytes
+ * With prefetching, 16 word strides 0x540 bytes
+ */
+static u32 copy_page_array[0x540 / 4];
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+void
+copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
+#else
+void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
+#endif
+
+EXPORT_SYMBOL(copy_page);
+
+
+static int pref_bias_clear_store __cpuinitdata;
+static int pref_bias_copy_load __cpuinitdata;
+static int pref_bias_copy_store __cpuinitdata;
+
+static u32 pref_src_mode __cpuinitdata;
+static u32 pref_dst_mode __cpuinitdata;
+
+static int clear_word_size __cpuinitdata;
+static int copy_word_size __cpuinitdata;
+
+static int half_clear_loop_size __cpuinitdata;
+static int half_copy_loop_size __cpuinitdata;
+
+static int cache_line_size __cpuinitdata;
+#define cache_line_mask() (cache_line_size - 1)
+
+static inline void __cpuinit
+pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
+{
+ if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
+ if (off > 0x7fff) {
+ uasm_i_lui(buf, T9, uasm_rel_hi(off));
+ uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
+ } else
+ uasm_i_addiu(buf, T9, ZERO, off);
+ uasm_i_daddu(buf, reg1, reg2, T9);
+ } else {
+ if (off > 0x7fff) {
+ uasm_i_lui(buf, T9, uasm_rel_hi(off));
+ uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
+ UASM_i_ADDU(buf, reg1, reg2, T9);
+ } else
+ UASM_i_ADDIU(buf, reg1, reg2, off);
+ }
+}
+
+static void __cpuinit set_prefetch_parameters(void)
+{
+ if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
+ clear_word_size = 8;
+ else
+ clear_word_size = 4;
+
+ if (cpu_has_64bit_gp_regs)
+ copy_word_size = 8;
+ else
+ copy_word_size = 4;
+
+ /*
+ * The pref's used here are using "streaming" hints, which cause the
+ * copied data to be kicked out of the cache sooner. A page copy often
+ * ends up copying a lot more data than is commonly used, so this seems
+ * to make sense in terms of reducing cache pollution, but I've no real
+ * performance data to back this up.
+ */
+ if (cpu_has_prefetch) {
+ /*
+ * XXX: Most prefetch bias values in here are based on
+ * guesswork.
+ */
+ cache_line_size = cpu_dcache_line_size();
+ switch (current_cpu_type()) {
+ case CPU_TX49XX:
+ /* TX49 supports only Pref_Load */
+ pref_bias_copy_load = 256;
+ break;
+
+ case CPU_RM9000:
+ /*
+ * As a workaround for erratum G105 which make the
+ * PrepareForStore hint unusable we fall back to
+ * StoreRetained on the RM9000. Once it is known which
+ * versions of the RM9000 we'll be able to condition-
+ * alize this.
+ */
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ /*
+ * Those values have been experimentally tuned for an
+ * Origin 200.
+ */
+ pref_bias_clear_store = 512;
+ pref_bias_copy_load = 256;
+ pref_bias_copy_store = 256;
+ pref_src_mode = Pref_LoadStreamed;
+ pref_dst_mode = Pref_StoreStreamed;
+ break;
+
+ case CPU_SB1:
+ case CPU_SB1A:
+ pref_bias_clear_store = 128;
+ pref_bias_copy_load = 128;
+ pref_bias_copy_store = 128;
+ /*
+ * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
+ * hints are broken.
+ */
+ if (current_cpu_type() == CPU_SB1 &&
+ (current_cpu_data.processor_id & 0xff) < 0x02) {
+ pref_src_mode = Pref_Load;
+ pref_dst_mode = Pref_Store;
+ } else {
+ pref_src_mode = Pref_LoadStreamed;
+ pref_dst_mode = Pref_StoreStreamed;
+ }
+ break;
+
+ default:
+ pref_bias_clear_store = 128;
+ pref_bias_copy_load = 256;
+ pref_bias_copy_store = 128;
+ pref_src_mode = Pref_LoadStreamed;
+ pref_dst_mode = Pref_PrepareForStore;
+ break;
+ }
+ } else {
+ if (cpu_has_cache_cdex_s)
+ cache_line_size = cpu_scache_line_size();
+ else if (cpu_has_cache_cdex_p)
+ cache_line_size = cpu_dcache_line_size();
+ }
+ /*
+ * Too much unrolling will overflow the available space in
+ * clear_space_array / copy_page_array.
+ */
+ half_clear_loop_size = min(16 * clear_word_size,
+ max(cache_line_size >> 1,
+ 4 * clear_word_size));
+ half_copy_loop_size = min(16 * copy_word_size,
+ max(cache_line_size >> 1,
+ 4 * copy_word_size));
+}
+
+static void __cpuinit build_clear_store(u32 **buf, int off)
+{
+ if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
+ uasm_i_sd(buf, ZERO, off, A0);
+ } else {
+ uasm_i_sw(buf, ZERO, off, A0);
+ }
+}
+
+static inline void __cpuinit build_clear_pref(u32 **buf, int off)
+{
+ if (off & cache_line_mask())
+ return;
+
+ if (pref_bias_clear_store) {
+ uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
+ A0);
+ } else if (cache_line_size == (half_clear_loop_size << 1)) {
+ if (cpu_has_cache_cdex_s) {
+ uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+ } else if (cpu_has_cache_cdex_p) {
+ if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ }
+
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+ uasm_i_lw(buf, ZERO, ZERO, AT);
+
+ uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+ }
+ }
+}
+
+void __cpuinit build_clear_page(void)
+{
+ int off;
+ u32 *buf = (u32 *)&clear_page_array;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ int i;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ set_prefetch_parameters();
+
+ /*
+ * This algorithm makes the following assumptions:
+ * - The prefetch bias is a multiple of 2 words.
+ * - The prefetch bias is less than one page.
+ */
+ BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
+ BUG_ON(PAGE_SIZE < pref_bias_clear_store);
+
+ off = PAGE_SIZE - pref_bias_clear_store;
+ if (off > 0xffff || !pref_bias_clear_store)
+ pg_addiu(&buf, A2, A0, off);
+ else
+ uasm_i_ori(&buf, A2, A0, off);
+
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+ uasm_i_lui(&buf, AT, 0xa000);
+
+ off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
+ * cache_line_size : 0;
+ while (off) {
+ build_clear_pref(&buf, -off);
+ off -= cache_line_size;
+ }
+ uasm_l_clear_pref(&l, buf);
+ do {
+ build_clear_pref(&buf, off);
+ build_clear_store(&buf, off);
+ off += clear_word_size;
+ } while (off < half_clear_loop_size);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ build_clear_pref(&buf, off);
+ if (off == -clear_word_size)
+ uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
+ build_clear_store(&buf, off);
+ off += clear_word_size;
+ } while (off < 0);
+
+ if (pref_bias_clear_store) {
+ pg_addiu(&buf, A2, A0, pref_bias_clear_store);
+ uasm_l_clear_nopref(&l, buf);
+ off = 0;
+ do {
+ build_clear_store(&buf, off);
+ off += clear_word_size;
+ } while (off < half_clear_loop_size);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ if (off == -clear_word_size)
+ uasm_il_bne(&buf, &r, A0, A2,
+ label_clear_nopref);
+ build_clear_store(&buf, off);
+ off += clear_word_size;
+ } while (off < 0);
+ }
+
+ uasm_i_jr(&buf, RA);
+ uasm_i_nop(&buf);
+
+ BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
+
+ uasm_resolve_relocs(relocs, labels);
+
+ pr_debug("Synthesized clear page handler (%u instructions).\n",
+ (u32)(buf - clear_page_array));
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+ for (i = 0; i < (buf - clear_page_array); i++)
+ pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
+ pr_debug("\t.set pop\n");
+}
+
+static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
+{
+ if (cpu_has_64bit_gp_regs) {
+ uasm_i_ld(buf, reg, off, A1);
+ } else {
+ uasm_i_lw(buf, reg, off, A1);
+ }
+}
+
+static void __cpuinit build_copy_store(u32 **buf, int reg, int off)
+{
+ if (cpu_has_64bit_gp_regs) {
+ uasm_i_sd(buf, reg, off, A0);
+ } else {
+ uasm_i_sw(buf, reg, off, A0);
+ }
+}
+
+static inline void build_copy_load_pref(u32 **buf, int off)
+{
+ if (off & cache_line_mask())
+ return;
+
+ if (pref_bias_copy_load)
+ uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
+}
+
+static inline void build_copy_store_pref(u32 **buf, int off)
+{
+ if (off & cache_line_mask())
+ return;
+
+ if (pref_bias_copy_store) {
+ uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
+ A0);
+ } else if (cache_line_size == (half_copy_loop_size << 1)) {
+ if (cpu_has_cache_cdex_s) {
+ uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+ } else if (cpu_has_cache_cdex_p) {
+ if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ }
+
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+ uasm_i_lw(buf, ZERO, ZERO, AT);
+
+ uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+ }
+ }
+}
+
+void __cpuinit build_copy_page(void)
+{
+ int off;
+ u32 *buf = (u32 *)&copy_page_array;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ int i;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ set_prefetch_parameters();
+
+ /*
+ * This algorithm makes the following assumptions:
+ * - All prefetch biases are multiples of 8 words.
+ * - The prefetch biases are less than one page.
+ * - The store prefetch bias isn't greater than the load
+ * prefetch bias.
+ */
+ BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
+ BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
+ BUG_ON(PAGE_SIZE < pref_bias_copy_load);
+ BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
+
+ off = PAGE_SIZE - pref_bias_copy_load;
+ if (off > 0xffff || !pref_bias_copy_load)
+ pg_addiu(&buf, A2, A0, off);
+ else
+ uasm_i_ori(&buf, A2, A0, off);
+
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+ uasm_i_lui(&buf, AT, 0xa000);
+
+ off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
+ cache_line_size : 0;
+ while (off) {
+ build_copy_load_pref(&buf, -off);
+ off -= cache_line_size;
+ }
+ off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
+ cache_line_size : 0;
+ while (off) {
+ build_copy_store_pref(&buf, -off);
+ off -= cache_line_size;
+ }
+ uasm_l_copy_pref_both(&l, buf);
+ do {
+ build_copy_load_pref(&buf, off);
+ build_copy_load(&buf, T0, off);
+ build_copy_load_pref(&buf, off + copy_word_size);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load_pref(&buf, off + 2 * copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load_pref(&buf, off + 3 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store_pref(&buf, off);
+ build_copy_store(&buf, T0, off);
+ build_copy_store_pref(&buf, off + copy_word_size);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store_pref(&buf, off + 2 * copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store_pref(&buf, off + 3 * copy_word_size);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < half_copy_loop_size);
+ pg_addiu(&buf, A1, A1, 2 * off);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ build_copy_load_pref(&buf, off);
+ build_copy_load(&buf, T0, off);
+ build_copy_load_pref(&buf, off + copy_word_size);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load_pref(&buf, off + 2 * copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load_pref(&buf, off + 3 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store_pref(&buf, off);
+ build_copy_store(&buf, T0, off);
+ build_copy_store_pref(&buf, off + copy_word_size);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store_pref(&buf, off + 2 * copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store_pref(&buf, off + 3 * copy_word_size);
+ if (off == -(4 * copy_word_size))
+ uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < 0);
+
+ if (pref_bias_copy_load - pref_bias_copy_store) {
+ pg_addiu(&buf, A2, A0,
+ pref_bias_copy_load - pref_bias_copy_store);
+ uasm_l_copy_pref_store(&l, buf);
+ off = 0;
+ do {
+ build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store_pref(&buf, off);
+ build_copy_store(&buf, T0, off);
+ build_copy_store_pref(&buf, off + copy_word_size);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store_pref(&buf, off + 2 * copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store_pref(&buf, off + 3 * copy_word_size);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < half_copy_loop_size);
+ pg_addiu(&buf, A1, A1, 2 * off);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store_pref(&buf, off);
+ build_copy_store(&buf, T0, off);
+ build_copy_store_pref(&buf, off + copy_word_size);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store_pref(&buf, off + 2 * copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store_pref(&buf, off + 3 * copy_word_size);
+ if (off == -(4 * copy_word_size))
+ uasm_il_bne(&buf, &r, A2, A0,
+ label_copy_pref_store);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < 0);
+ }
+
+ if (pref_bias_copy_store) {
+ pg_addiu(&buf, A2, A0, pref_bias_copy_store);
+ uasm_l_copy_nopref(&l, buf);
+ off = 0;
+ do {
+ build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, T0, off);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < half_copy_loop_size);
+ pg_addiu(&buf, A1, A1, 2 * off);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, T0, off);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ if (off == -(4 * copy_word_size))
+ uasm_il_bne(&buf, &r, A2, A0,
+ label_copy_nopref);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < 0);
+ }
+
+ uasm_i_jr(&buf, RA);
+ uasm_i_nop(&buf);
+
+ BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
+
+ uasm_resolve_relocs(relocs, labels);
+
+ pr_debug("Synthesized copy page handler (%u instructions).\n",
+ (u32)(buf - copy_page_array));
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+ for (i = 0; i < (buf - copy_page_array); i++)
+ pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
+ pr_debug("\t.set pop\n");
+}
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+
+/*
+ * Pad descriptors to cacheline, since each is exclusively owned by a
+ * particular CPU.
+ */
+struct dmadscr {
+ u64 dscr_a;
+ u64 dscr_b;
+ u64 pad_a;
+ u64 pad_b;
+} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
+
+void sb1_dma_init(void)
+{
+ int i;
+
+ for (i = 0; i < DM_NUM_CHANNELS; i++) {
+ const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
+ V_DM_DSCR_BASE_RINGSZ(1);
+ void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
+
+ __raw_writeq(base_val, base_reg);
+ __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
+ __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
+ }
+}
+
+void clear_page(void *page)
+{
+ u64 to_phys = CPHYSADDR((unsigned long)page);
+ unsigned int cpu = smp_processor_id();
+
+ /* if the page is not in KSEG0, use old way */
+ if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
+ return clear_page_cpu(page);
+
+ page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
+ M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
+ page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
+ __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
+
+ /*
+ * Don't really want to do it this way, but there's no
+ * reliable way to delay completion detection.
+ */
+ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
+ & M_DM_DSCR_BASE_INTERRUPT))
+ ;
+ __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
+}
+
+void copy_page(void *to, void *from)
+{
+ u64 from_phys = CPHYSADDR((unsigned long)from);
+ u64 to_phys = CPHYSADDR((unsigned long)to);
+ unsigned int cpu = smp_processor_id();
+
+ /* if any page is not in KSEG0, use old way */
+ if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
+ || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
+ return copy_page_cpu(to, from);
+
+ page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
+ M_DM_DSCRA_INTERRUPT;
+ page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
+ __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
+
+ /*
+ * Don't really want to do it this way, but there's no
+ * reliable way to delay completion detection.
+ */
+ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
+ & M_DM_DSCR_BASE_INTERRUPT))
+ ;
+ __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
+}
+
+#endif /* CONFIG_SIBYTE_DMA_PAGEOPS */
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
new file mode 100644
index 0000000..575e401
--- /dev/null
+++ b/arch/mips/mm/pgtable-32.c
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+void pgd_init(unsigned long page)
+{
+ unsigned long *p = (unsigned long *) page;
+ int i;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i+=8) {
+ p[i + 0] = (unsigned long) invalid_pte_table;
+ p[i + 1] = (unsigned long) invalid_pte_table;
+ p[i + 2] = (unsigned long) invalid_pte_table;
+ p[i + 3] = (unsigned long) invalid_pte_table;
+ p[i + 4] = (unsigned long) invalid_pte_table;
+ p[i + 5] = (unsigned long) invalid_pte_table;
+ p[i + 6] = (unsigned long) invalid_pte_table;
+ p[i + 7] = (unsigned long) invalid_pte_table;
+ }
+}
+
+void __init pagetable_init(void)
+{
+ unsigned long vaddr;
+ pgd_t *pgd_base;
+#ifdef CONFIG_HIGHMEM
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+#endif
+
+ /* Initialize the entire pgd. */
+ pgd_init((unsigned long)swapper_pg_dir);
+ pgd_init((unsigned long)swapper_pg_dir
+ + sizeof(pgd_t) * USER_PTRS_PER_PGD);
+
+ pgd_base = swapper_pg_dir;
+
+ /*
+ * Fixed mappings:
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, 0, pgd_base);
+
+#ifdef CONFIG_HIGHMEM
+ /*
+ * Permanent kmaps:
+ */
+ vaddr = PKMAP_BASE;
+ fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+
+ pgd = swapper_pg_dir + __pgd_offset(vaddr);
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ pte = pte_offset_kernel(pmd, vaddr);
+ pkmap_page_table = pte;
+#endif
+}
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
new file mode 100644
index 0000000..e4b565a
--- /dev/null
+++ b/arch/mips/mm/pgtable-64.c
@@ -0,0 +1,73 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999, 2000 by Silicon Graphics
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+void pgd_init(unsigned long page)
+{
+ unsigned long *p, *end;
+
+ p = (unsigned long *) page;
+ end = p + PTRS_PER_PGD;
+
+ while (p < end) {
+ p[0] = (unsigned long) invalid_pmd_table;
+ p[1] = (unsigned long) invalid_pmd_table;
+ p[2] = (unsigned long) invalid_pmd_table;
+ p[3] = (unsigned long) invalid_pmd_table;
+ p[4] = (unsigned long) invalid_pmd_table;
+ p[5] = (unsigned long) invalid_pmd_table;
+ p[6] = (unsigned long) invalid_pmd_table;
+ p[7] = (unsigned long) invalid_pmd_table;
+ p += 8;
+ }
+}
+
+void pmd_init(unsigned long addr, unsigned long pagetable)
+{
+ unsigned long *p, *end;
+
+ p = (unsigned long *) addr;
+ end = p + PTRS_PER_PMD;
+
+ while (p < end) {
+ p[0] = (unsigned long)pagetable;
+ p[1] = (unsigned long)pagetable;
+ p[2] = (unsigned long)pagetable;
+ p[3] = (unsigned long)pagetable;
+ p[4] = (unsigned long)pagetable;
+ p[5] = (unsigned long)pagetable;
+ p[6] = (unsigned long)pagetable;
+ p[7] = (unsigned long)pagetable;
+ p += 8;
+ }
+}
+
+void __init pagetable_init(void)
+{
+ unsigned long vaddr;
+ pgd_t *pgd_base;
+
+ /* Initialize the entire pgd. */
+ pgd_init((unsigned long)swapper_pg_dir);
+#ifdef MODULE_START
+ pgd_init((unsigned long)module_pg_dir);
+#endif
+ pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
+
+ pgd_base = swapper_pg_dir;
+ /*
+ * Fixed mappings:
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, 0, pgd_base);
+}
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
new file mode 100644
index 0000000..13adb57
--- /dev/null
+++ b/arch/mips/mm/sc-ip22.c
@@ -0,0 +1,177 @@
+/*
+ * sc-ip22.c: Indy cache management functions.
+ *
+ * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
+ * derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/bcache.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/bootinfo.h>
+#include <asm/sgi/ip22.h>
+#include <asm/sgi/mc.h>
+
+/* Secondary cache size in bytes, if present. */
+static unsigned long scache_size;
+
+#undef DEBUG_CACHE
+
+#define SC_SIZE 0x00080000
+#define SC_LINE 32
+#define CI_MASK (SC_SIZE - SC_LINE)
+#define SC_INDEX(n) ((n) & CI_MASK)
+
+static inline void indy_sc_wipe(unsigned long first, unsigned long last)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ ".set\tpush\t\t\t# indy_sc_wipe\n\t"
+ ".set\tnoreorder\n\t"
+ ".set\tmips3\n\t"
+ ".set\tnoat\n\t"
+ "mfc0\t%2, $12\n\t"
+ "li\t$1, 0x80\t\t\t# Go 64 bit\n\t"
+ "mtc0\t$1, $12\n\t"
+
+ "dli\t$1, 0x9000000080000000\n\t"
+ "or\t%0, $1\t\t\t# first line to flush\n\t"
+ "or\t%1, $1\t\t\t# last line to flush\n\t"
+ ".set\tat\n\t"
+
+ "1:\tsw\t$0, 0(%0)\n\t"
+ "bne\t%0, %1, 1b\n\t"
+ " daddu\t%0, 32\n\t"
+
+ "mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t"
+ "nop; nop; nop; nop;\n\t"
+ ".set\tpop"
+ : "=r" (first), "=r" (last), "=&r" (tmp)
+ : "0" (first), "1" (last));
+}
+
+static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size)
+{
+ unsigned long first_line, last_line;
+ unsigned long flags;
+
+#ifdef DEBUG_CACHE
+ printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size);
+#endif
+
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ /* Which lines to flush? */
+ first_line = SC_INDEX(addr);
+ last_line = SC_INDEX(addr + size - 1);
+
+ local_irq_save(flags);
+ if (first_line <= last_line) {
+ indy_sc_wipe(first_line, last_line);
+ goto out;
+ }
+
+ indy_sc_wipe(first_line, SC_SIZE - SC_LINE);
+ indy_sc_wipe(0, last_line);
+out:
+ local_irq_restore(flags);
+}
+
+static void indy_sc_enable(void)
+{
+ unsigned long addr, tmp1, tmp2;
+
+ /* This is really cool... */
+#ifdef DEBUG_CACHE
+ printk("Enabling R4600 SCACHE\n");
+#endif
+ __asm__ __volatile__(
+ ".set\tpush\n\t"
+ ".set\tnoreorder\n\t"
+ ".set\tmips3\n\t"
+ "mfc0\t%2, $12\n\t"
+ "nop; nop; nop; nop;\n\t"
+ "li\t%1, 0x80\n\t"
+ "mtc0\t%1, $12\n\t"
+ "nop; nop; nop; nop;\n\t"
+ "li\t%0, 0x1\n\t"
+ "dsll\t%0, 31\n\t"
+ "lui\t%1, 0x9000\n\t"
+ "dsll32\t%1, 0\n\t"
+ "or\t%0, %1, %0\n\t"
+ "sb\t$0, 0(%0)\n\t"
+ "mtc0\t$0, $12\n\t"
+ "nop; nop; nop; nop;\n\t"
+ "mtc0\t%2, $12\n\t"
+ "nop; nop; nop; nop;\n\t"
+ ".set\tpop"
+ : "=r" (tmp1), "=r" (tmp2), "=r" (addr));
+}
+
+static void indy_sc_disable(void)
+{
+ unsigned long tmp1, tmp2, tmp3;
+
+#ifdef DEBUG_CACHE
+ printk("Disabling R4600 SCACHE\n");
+#endif
+ __asm__ __volatile__(
+ ".set\tpush\n\t"
+ ".set\tnoreorder\n\t"
+ ".set\tmips3\n\t"
+ "li\t%0, 0x1\n\t"
+ "dsll\t%0, 31\n\t"
+ "lui\t%1, 0x9000\n\t"
+ "dsll32\t%1, 0\n\t"
+ "or\t%0, %1, %0\n\t"
+ "mfc0\t%2, $12\n\t"
+ "nop; nop; nop; nop\n\t"
+ "li\t%1, 0x80\n\t"
+ "mtc0\t%1, $12\n\t"
+ "nop; nop; nop; nop\n\t"
+ "sh\t$0, 0(%0)\n\t"
+ "mtc0\t$0, $12\n\t"
+ "nop; nop; nop; nop\n\t"
+ "mtc0\t%2, $12\n\t"
+ "nop; nop; nop; nop\n\t"
+ ".set\tpop"
+ : "=r" (tmp1), "=r" (tmp2), "=r" (tmp3));
+}
+
+static inline int __init indy_sc_probe(void)
+{
+ unsigned int size = ip22_eeprom_read(&sgimc->eeprom, 17);
+ if (size == 0)
+ return 0;
+
+ size <<= PAGE_SHIFT;
+ printk(KERN_INFO "R4600/R5000 SCACHE size %dK, linesize 32 bytes.\n",
+ size >> 10);
+ scache_size = size;
+
+ return 1;
+}
+
+/* XXX Check with wje if the Indy caches can differenciate between
+ writeback + invalidate and just invalidate. */
+static struct bcache_ops indy_sc_ops = {
+ .bc_enable = indy_sc_enable,
+ .bc_disable = indy_sc_disable,
+ .bc_wback_inv = indy_sc_wback_invalidate,
+ .bc_inv = indy_sc_wback_invalidate
+};
+
+void __cpuinit indy_sc_init(void)
+{
+ if (indy_sc_probe()) {
+ indy_sc_enable();
+ bcops = &indy_sc_ops;
+ }
+}
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
new file mode 100644
index 0000000..b55c2d1
--- /dev/null
+++ b/arch/mips/mm/sc-mips.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2006 Chris Dearman (chris@mips.com),
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/mipsregs.h>
+#include <asm/bcache.h>
+#include <asm/cacheops.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/r4kcache.h>
+
+/*
+ * MIPS32/MIPS64 L2 cache handling
+ */
+
+/*
+ * Writeback and invalidate the secondary cache before DMA.
+ */
+static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
+{
+ blast_scache_range(addr, addr + size);
+}
+
+/*
+ * Invalidate the secondary cache before DMA.
+ */
+static void mips_sc_inv(unsigned long addr, unsigned long size)
+{
+ blast_inv_scache_range(addr, addr + size);
+}
+
+static void mips_sc_enable(void)
+{
+ /* L2 cache is permanently enabled */
+}
+
+static void mips_sc_disable(void)
+{
+ /* L2 cache is permanently enabled */
+}
+
+static struct bcache_ops mips_sc_ops = {
+ .bc_enable = mips_sc_enable,
+ .bc_disable = mips_sc_disable,
+ .bc_wback_inv = mips_sc_wback_inv,
+ .bc_inv = mips_sc_inv
+};
+
+static inline int __init mips_sc_probe(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config1, config2;
+ unsigned int tmp;
+
+ /* Mark as not present until probe completed */
+ c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
+
+ /* Ignore anything but MIPSxx processors */
+ if (c->isa_level != MIPS_CPU_ISA_M32R1 &&
+ c->isa_level != MIPS_CPU_ISA_M32R2 &&
+ c->isa_level != MIPS_CPU_ISA_M64R1 &&
+ c->isa_level != MIPS_CPU_ISA_M64R2)
+ return 0;
+
+ /* Does this MIPS32/MIPS64 CPU have a config2 register? */
+ config1 = read_c0_config1();
+ if (!(config1 & MIPS_CONF_M))
+ return 0;
+
+ config2 = read_c0_config2();
+ tmp = (config2 >> 4) & 0x0f;
+ if (0 < tmp && tmp <= 7)
+ c->scache.linesz = 2 << tmp;
+ else
+ return 0;
+
+ tmp = (config2 >> 8) & 0x0f;
+ if (0 <= tmp && tmp <= 7)
+ c->scache.sets = 64 << tmp;
+ else
+ return 0;
+
+ tmp = (config2 >> 0) & 0x0f;
+ if (0 <= tmp && tmp <= 7)
+ c->scache.ways = tmp + 1;
+ else
+ return 0;
+
+ c->scache.waysize = c->scache.sets * c->scache.linesz;
+ c->scache.waybit = __ffs(c->scache.waysize);
+
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+ return 1;
+}
+
+int __cpuinit mips_sc_init(void)
+{
+ int found = mips_sc_probe();
+ if (found) {
+ mips_sc_enable();
+ bcops = &mips_sc_ops;
+ }
+ return found;
+}
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
new file mode 100644
index 0000000..f330d38
--- /dev/null
+++ b/arch/mips/mm/sc-r5k.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
+ * derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/mipsregs.h>
+#include <asm/bcache.h>
+#include <asm/cacheops.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/r4kcache.h>
+
+/* Secondary cache size in bytes, if present. */
+static unsigned long scache_size;
+
+#define SC_LINE 32
+#define SC_PAGE (128*SC_LINE)
+
+static inline void blast_r5000_scache(void)
+{
+ unsigned long start = INDEX_BASE;
+ unsigned long end = start + scache_size;
+
+ while(start < end) {
+ cache_op(R5K_Page_Invalidate_S, start);
+ start += SC_PAGE;
+ }
+}
+
+static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
+{
+ unsigned long end, a;
+
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ if (size >= scache_size) {
+ blast_r5000_scache();
+ return;
+ }
+
+ /* On the R5000 secondary cache we cannot
+ * invalidate less than a page at a time.
+ * The secondary cache is physically indexed, write-through.
+ */
+ a = addr & ~(SC_PAGE - 1);
+ end = (addr + size - 1) & ~(SC_PAGE - 1);
+ while (a <= end) {
+ cache_op(R5K_Page_Invalidate_S, a);
+ a += SC_PAGE;
+ }
+}
+
+static void r5k_sc_enable(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ set_c0_config(R5K_CONF_SE);
+ blast_r5000_scache();
+ local_irq_restore(flags);
+}
+
+static void r5k_sc_disable(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_r5000_scache();
+ clear_c0_config(R5K_CONF_SE);
+ local_irq_restore(flags);
+}
+
+static inline int __init r5k_sc_probe(void)
+{
+ unsigned long config = read_c0_config();
+
+ if (config & CONF_SC)
+ return(0);
+
+ scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20);
+
+ printk("R5000 SCACHE size %ldkB, linesize 32 bytes.\n",
+ scache_size >> 10);
+
+ return 1;
+}
+
+static struct bcache_ops r5k_sc_ops = {
+ .bc_enable = r5k_sc_enable,
+ .bc_disable = r5k_sc_disable,
+ .bc_wback_inv = r5k_dma_cache_inv_sc,
+ .bc_inv = r5k_dma_cache_inv_sc
+};
+
+void __cpuinit r5k_sc_init(void)
+{
+ if (r5k_sc_probe()) {
+ r5k_sc_enable();
+ bcops = &r5k_sc_ops;
+ }
+}
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
new file mode 100644
index 0000000..e3abfb2
--- /dev/null
+++ b/arch/mips/mm/sc-rm7k.c
@@ -0,0 +1,173 @@
+/*
+ * sc-rm7k.c: RM7000 cache management functions.
+ *
+ * Copyright (C) 1997, 2001, 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
+ */
+
+#undef DEBUG
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+#include <asm/addrspace.h>
+#include <asm/bcache.h>
+#include <asm/cacheops.h>
+#include <asm/mipsregs.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h> /* for run_uncached() */
+
+/* Primary cache parameters. */
+#define sc_lsize 32
+#define tc_pagesize (32*128)
+
+/* Secondary cache parameters. */
+#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
+
+extern unsigned long icache_way_size, dcache_way_size;
+
+#include <asm/r4kcache.h>
+
+int rm7k_tcache_enabled;
+
+/*
+ * Writeback and invalidate the primary cache dcache before DMA.
+ * (XXX These need to be fixed ...)
+ */
+static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
+{
+ unsigned long end, a;
+
+ pr_debug("rm7k_sc_wback_inv[%08lx,%08lx]", addr, size);
+
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ blast_scache_range(addr, addr + size);
+
+ if (!rm7k_tcache_enabled)
+ return;
+
+ a = addr & ~(tc_pagesize - 1);
+ end = (addr + size - 1) & ~(tc_pagesize - 1);
+ while(1) {
+ invalidate_tcache_page(a); /* Page_Invalidate_T */
+ if (a == end)
+ break;
+ a += tc_pagesize;
+ }
+}
+
+static void rm7k_sc_inv(unsigned long addr, unsigned long size)
+{
+ unsigned long end, a;
+
+ pr_debug("rm7k_sc_inv[%08lx,%08lx]", addr, size);
+
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ blast_inv_scache_range(addr, addr + size);
+
+ if (!rm7k_tcache_enabled)
+ return;
+
+ a = addr & ~(tc_pagesize - 1);
+ end = (addr + size - 1) & ~(tc_pagesize - 1);
+ while(1) {
+ invalidate_tcache_page(a); /* Page_Invalidate_T */
+ if (a == end)
+ break;
+ a += tc_pagesize;
+ }
+}
+
+/*
+ * This function is executed in uncached address space.
+ */
+static __cpuinit void __rm7k_sc_enable(void)
+{
+ int i;
+
+ set_c0_config(RM7K_CONF_SE);
+
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+
+ for (i = 0; i < scache_size; i += sc_lsize) {
+ __asm__ __volatile__ (
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (CKSEG0ADDR(i)), "i" (Index_Store_Tag_SD));
+ }
+}
+
+static __cpuinit void rm7k_sc_enable(void)
+{
+ if (read_c0_config() & RM7K_CONF_SE)
+ return;
+
+ printk(KERN_INFO "Enabling secondary cache...\n");
+ run_uncached(__rm7k_sc_enable);
+}
+
+static void rm7k_sc_disable(void)
+{
+ clear_c0_config(RM7K_CONF_SE);
+}
+
+struct bcache_ops rm7k_sc_ops = {
+ .bc_enable = rm7k_sc_enable,
+ .bc_disable = rm7k_sc_disable,
+ .bc_wback_inv = rm7k_sc_wback_inv,
+ .bc_inv = rm7k_sc_inv
+};
+
+void __cpuinit rm7k_sc_init(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config = read_c0_config();
+
+ if ((config & RM7K_CONF_SC))
+ return;
+
+ c->scache.linesz = sc_lsize;
+ c->scache.ways = 4;
+ c->scache.waybit= __ffs(scache_size / c->scache.ways);
+ c->scache.waysize = scache_size / c->scache.ways;
+ c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
+ printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
+ (scache_size >> 10), sc_lsize);
+
+ if (!(config & RM7K_CONF_SE))
+ rm7k_sc_enable();
+
+ /*
+ * While we're at it let's deal with the tertiary cache.
+ */
+ if (!(config & RM7K_CONF_TC)) {
+
+ /*
+ * We can't enable the L3 cache yet. There may be board-specific
+ * magic necessary to turn it on, and blindly asking the CPU to
+ * start using it would may give cache errors.
+ *
+ * Also, board-specific knowledge may allow us to use the
+ * CACHE Flash_Invalidate_T instruction if the tag RAM supports
+ * it, and may specify the size of the L3 cache so we don't have
+ * to probe it.
+ */
+ printk(KERN_INFO "Tertiary cache present, %s enabled\n",
+ (config & RM7K_CONF_TE) ? "already" : "not (yet)");
+
+ if ((config & RM7K_CONF_TE))
+ rm7k_tcache_enabled = 1;
+ }
+
+ bcops = &rm7k_sc_ops;
+}
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
new file mode 100644
index 0000000..f0cf46a
--- /dev/null
+++ b/arch/mips/mm/tlb-r3k.c
@@ -0,0 +1,285 @@
+/*
+ * r2300.c: R2000 and R3000 specific mmu/cache code.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ *
+ * with a lot of changes to make this thing work for R3000s
+ * Tx39XX R4k style caches added. HK
+ * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
+ * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
+ * Copyright (C) 2002 Ralf Baechle
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/isadep.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+#undef DEBUG_TLB
+
+extern void build_tlb_refill_handler(void);
+
+/* CP0 hazard avoidance. */
+#define BARRIER \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "nop\n\t" \
+ ".set pop\n\t")
+
+int r3k_have_wired_reg; /* should be in cpu_data? */
+
+/* TLB operations. */
+void local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ int entry;
+
+#ifdef DEBUG_TLB
+ printk("[tlball]");
+#endif
+
+ local_irq_save(flags);
+ old_ctx = read_c0_entryhi() & ASID_MASK;
+ write_c0_entrylo0(0);
+ entry = r3k_have_wired_reg ? read_c0_wired() : 8;
+ for (; entry < current_cpu_data.tlbsize; entry++) {
+ write_c0_index(entry << 8);
+ write_c0_entryhi((entry | 0x80000) << 12);
+ BARRIER;
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(old_ctx);
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+#ifdef DEBUG_TLB
+ printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
+#endif
+ drop_mmu_context(mm, cpu);
+ }
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+ unsigned long flags;
+ int size;
+
+#ifdef DEBUG_TLB
+ printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
+ cpu_context(cpu, mm) & ASID_MASK, start, end);
+#endif
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size <= current_cpu_data.tlbsize) {
+ int oldpid = read_c0_entryhi() & ASID_MASK;
+ int newpid = cpu_context(cpu, mm) & ASID_MASK;
+
+ start &= PAGE_MASK;
+ end += PAGE_SIZE - 1;
+ end &= PAGE_MASK;
+ while (start < end) {
+ int idx;
+
+ write_c0_entryhi(start | newpid);
+ start += PAGE_SIZE; /* BARRIER */
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entryhi(KSEG0);
+ if (idx < 0) /* BARRIER */
+ continue;
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(oldpid);
+ } else {
+ drop_mmu_context(mm, cpu);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+ int size;
+
+#ifdef DEBUG_TLB
+ printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
+#endif
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size <= current_cpu_data.tlbsize) {
+ int pid = read_c0_entryhi();
+
+ start &= PAGE_MASK;
+ end += PAGE_SIZE - 1;
+ end &= PAGE_MASK;
+
+ while (start < end) {
+ int idx;
+
+ write_c0_entryhi(start);
+ start += PAGE_SIZE; /* BARRIER */
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entryhi(KSEG0);
+ if (idx < 0) /* BARRIER */
+ continue;
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(pid);
+ } else {
+ local_flush_tlb_all();
+ }
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cpu = smp_processor_id();
+
+ if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
+ unsigned long flags;
+ int oldpid, newpid, idx;
+
+#ifdef DEBUG_TLB
+ printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
+#endif
+ newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
+ page &= PAGE_MASK;
+ local_irq_save(flags);
+ oldpid = read_c0_entryhi() & ASID_MASK;
+ write_c0_entryhi(page | newpid);
+ BARRIER;
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entryhi(KSEG0);
+ if (idx < 0) /* BARRIER */
+ goto finish;
+ tlb_write_indexed();
+
+finish:
+ write_c0_entryhi(oldpid);
+ local_irq_restore(flags);
+ }
+}
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ int idx, pid;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ pid = read_c0_entryhi() & ASID_MASK;
+
+#ifdef DEBUG_TLB
+ if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+ printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
+ (cpu_context(cpu, vma->vm_mm)), pid);
+ }
+#endif
+
+ local_irq_save(flags);
+ address &= PAGE_MASK;
+ write_c0_entryhi(address | pid);
+ BARRIER;
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(pte_val(pte));
+ write_c0_entryhi(address | pid);
+ if (idx < 0) { /* BARRIER */
+ tlb_write_random();
+ } else {
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(pid);
+ local_irq_restore(flags);
+}
+
+void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ static unsigned long wired = 0;
+
+ if (r3k_have_wired_reg) { /* TX39XX */
+ unsigned long old_pagemask;
+ unsigned long w;
+
+#ifdef DEBUG_TLB
+ printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
+ entrylo0, entryhi, pagemask);
+#endif
+
+ local_irq_save(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_pagemask = read_c0_pagemask();
+ w = read_c0_wired();
+ write_c0_wired(w + 1);
+ write_c0_index(w << 8);
+ write_c0_pagemask(pagemask);
+ write_c0_entryhi(entryhi);
+ write_c0_entrylo0(entrylo0);
+ BARRIER;
+ tlb_write_indexed();
+
+ write_c0_entryhi(old_ctx);
+ write_c0_pagemask(old_pagemask);
+ local_flush_tlb_all();
+ local_irq_restore(flags);
+
+ } else if (wired < 8) {
+#ifdef DEBUG_TLB
+ printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
+ entrylo0, entryhi);
+#endif
+
+ local_irq_save(flags);
+ old_ctx = read_c0_entryhi() & ASID_MASK;
+ write_c0_entrylo0(entrylo0);
+ write_c0_entryhi(entryhi);
+ write_c0_index(wired);
+ wired++; /* BARRIER */
+ tlb_write_indexed();
+ write_c0_entryhi(old_ctx);
+ local_flush_tlb_all();
+ local_irq_restore(flags);
+ }
+}
+
+void __cpuinit tlb_init(void)
+{
+ local_flush_tlb_all();
+
+ build_tlb_refill_handler();
+}
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
new file mode 100644
index 0000000..5ce2fa7
--- /dev/null
+++ b/arch/mips/mm/tlb-r4k.c
@@ -0,0 +1,500 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+extern void build_tlb_refill_handler(void);
+
+/*
+ * Make sure all entries differ. If they're not different
+ * MIPS32 will take revenge ...
+ */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+/* Atomicity and interruptability */
+#ifdef CONFIG_MIPS_MT_SMTC
+
+#include <asm/smtc.h>
+#include <asm/mipsmtregs.h>
+
+#define ENTER_CRITICAL(flags) \
+ { \
+ unsigned int mvpflags; \
+ local_irq_save(flags);\
+ mvpflags = dvpe()
+#define EXIT_CRITICAL(flags) \
+ evpe(mvpflags); \
+ local_irq_restore(flags); \
+ }
+#else
+
+#define ENTER_CRITICAL(flags) local_irq_save(flags)
+#define EXIT_CRITICAL(flags) local_irq_restore(flags)
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+#if defined(CONFIG_CPU_LOONGSON2)
+/*
+ * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
+ * unfortrunately, itlb is not totally transparent to software.
+ */
+#define FLUSH_ITLB write_c0_diag(4);
+
+#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); }
+
+#else
+
+#define FLUSH_ITLB
+#define FLUSH_ITLB_VM(vma)
+
+#endif
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ int entry;
+
+ ENTER_CRITICAL(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+ entry = read_c0_wired();
+
+ /* Blast 'em all away. */
+ while (entry < current_cpu_data.tlbsize) {
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ entry++;
+ }
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ FLUSH_ITLB;
+ EXIT_CRITICAL(flags);
+}
+
+/* All entries common to a mm share an asid. To effectively flush
+ these entries, we just bump the asid. */
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ int cpu;
+
+ preempt_disable();
+
+ cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+ drop_mmu_context(mm, cpu);
+ }
+
+ preempt_enable();
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+ unsigned long flags;
+ int size;
+
+ ENTER_CRITICAL(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ size = (size + 1) >> 1;
+ if (size <= current_cpu_data.tlbsize/2) {
+ int oldpid = read_c0_entryhi();
+ int newpid = cpu_asid(cpu, mm);
+
+ start &= (PAGE_MASK << 1);
+ end += ((PAGE_SIZE << 1) - 1);
+ end &= (PAGE_MASK << 1);
+ while (start < end) {
+ int idx;
+
+ write_c0_entryhi(start | newpid);
+ start += (PAGE_SIZE << 1);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ if (idx < 0)
+ continue;
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ }
+ tlbw_use_hazard();
+ write_c0_entryhi(oldpid);
+ } else {
+ drop_mmu_context(mm, cpu);
+ }
+ FLUSH_ITLB;
+ EXIT_CRITICAL(flags);
+ }
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+ int size;
+
+ ENTER_CRITICAL(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ size = (size + 1) >> 1;
+ if (size <= current_cpu_data.tlbsize / 2) {
+ int pid = read_c0_entryhi();
+
+ start &= (PAGE_MASK << 1);
+ end += ((PAGE_SIZE << 1) - 1);
+ end &= (PAGE_MASK << 1);
+
+ while (start < end) {
+ int idx;
+
+ write_c0_entryhi(start);
+ start += (PAGE_SIZE << 1);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ if (idx < 0)
+ continue;
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ }
+ tlbw_use_hazard();
+ write_c0_entryhi(pid);
+ } else {
+ local_flush_tlb_all();
+ }
+ FLUSH_ITLB;
+ EXIT_CRITICAL(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, vma->vm_mm) != 0) {
+ unsigned long flags;
+ int oldpid, newpid, idx;
+
+ newpid = cpu_asid(cpu, vma->vm_mm);
+ page &= (PAGE_MASK << 1);
+ ENTER_CRITICAL(flags);
+ oldpid = read_c0_entryhi();
+ write_c0_entryhi(page | newpid);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ if (idx < 0)
+ goto finish;
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ finish:
+ write_c0_entryhi(oldpid);
+ FLUSH_ITLB_VM(vma);
+ EXIT_CRITICAL(flags);
+ }
+}
+
+/*
+ * This one is only used for pages with the global bit set so we don't care
+ * much about the ASID.
+ */
+void local_flush_tlb_one(unsigned long page)
+{
+ unsigned long flags;
+ int oldpid, idx;
+
+ ENTER_CRITICAL(flags);
+ oldpid = read_c0_entryhi();
+ page &= (PAGE_MASK << 1);
+ write_c0_entryhi(page);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ if (idx >= 0) {
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ }
+ write_c0_entryhi(oldpid);
+ FLUSH_ITLB;
+ EXIT_CRITICAL(flags);
+}
+
+/*
+ * We will need multiple versions of update_mmu_cache(), one that just
+ * updates the TLB with the new pte(s), and another which also checks
+ * for the R4k "end of page" hardware bug and does the needy.
+ */
+void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int idx, pid;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ ENTER_CRITICAL(flags);
+
+ pid = read_c0_entryhi() & ASID_MASK;
+ address &= (PAGE_MASK << 1);
+ write_c0_entryhi(address | pid);
+ pgdp = pgd_offset(vma->vm_mm, address);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ pudp = pud_offset(pgdp, address);
+ pmdp = pmd_offset(pudp, address);
+ idx = read_c0_index();
+ ptep = pte_offset_map(pmdp, address);
+
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+ write_c0_entrylo0(ptep->pte_high);
+ ptep++;
+ write_c0_entrylo1(ptep->pte_high);
+#else
+ write_c0_entrylo0(pte_val(*ptep++) >> 6);
+ write_c0_entrylo1(pte_val(*ptep) >> 6);
+#endif
+ mtc0_tlbw_hazard();
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ FLUSH_ITLB_VM(vma);
+ EXIT_CRITICAL(flags);
+}
+
+#if 0
+static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ unsigned int asid;
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int idx;
+
+ ENTER_CRITICAL(flags);
+ address &= (PAGE_MASK << 1);
+ asid = read_c0_entryhi() & ASID_MASK;
+ write_c0_entryhi(address | asid);
+ pgdp = pgd_offset(vma->vm_mm, address);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ pmdp = pmd_offset(pgdp, address);
+ idx = read_c0_index();
+ ptep = pte_offset_map(pmdp, address);
+ write_c0_entrylo0(pte_val(*ptep++) >> 6);
+ write_c0_entrylo1(pte_val(*ptep) >> 6);
+ mtc0_tlbw_hazard();
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ EXIT_CRITICAL(flags);
+}
+#endif
+
+void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask)
+{
+ unsigned long flags;
+ unsigned long wired;
+ unsigned long old_pagemask;
+ unsigned long old_ctx;
+
+ ENTER_CRITICAL(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+ wired = read_c0_wired();
+ write_c0_wired(wired + 1);
+ write_c0_index(wired);
+ tlbw_use_hazard(); /* What is the hazard here? */
+ write_c0_pagemask(pagemask);
+ write_c0_entryhi(entryhi);
+ write_c0_entrylo0(entrylo0);
+ write_c0_entrylo1(entrylo1);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_ctx);
+ tlbw_use_hazard(); /* What is the hazard here? */
+ write_c0_pagemask(old_pagemask);
+ local_flush_tlb_all();
+ EXIT_CRITICAL(flags);
+}
+
+/*
+ * Used for loading TLB entries before trap_init() has started, when we
+ * don't actually want to add a wired entry which remains throughout the
+ * lifetime of the system
+ */
+
+static int temp_tlb_entry __cpuinitdata;
+
+__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask)
+{
+ int ret = 0;
+ unsigned long flags;
+ unsigned long wired;
+ unsigned long old_pagemask;
+ unsigned long old_ctx;
+
+ ENTER_CRITICAL(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+ wired = read_c0_wired();
+ if (--temp_tlb_entry < wired) {
+ printk(KERN_WARNING
+ "No TLB space left for add_temporary_entry\n");
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ write_c0_index(temp_tlb_entry);
+ write_c0_pagemask(pagemask);
+ write_c0_entryhi(entryhi);
+ write_c0_entrylo0(entrylo0);
+ write_c0_entrylo1(entrylo1);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_ctx);
+ write_c0_pagemask(old_pagemask);
+out:
+ EXIT_CRITICAL(flags);
+ return ret;
+}
+
+static void __cpuinit probe_tlb(unsigned long config)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int reg;
+
+ /*
+ * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
+ * is not supported, we assume R4k style. Cpu probing already figured
+ * out the number of tlb entries.
+ */
+ if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
+ return;
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * If TLB is shared in SMTC system, total size already
+ * has been calculated and written into cpu_data tlbsize
+ */
+ if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
+ return;
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+ reg = read_c0_config1();
+ if (!((config >> 7) & 3))
+ panic("No TLB present");
+
+ c->tlbsize = ((reg >> 25) & 0x3f) + 1;
+}
+
+static int __cpuinitdata ntlb = 0;
+static int __init set_ntlb(char *str)
+{
+ get_option(&str, &ntlb);
+ return 1;
+}
+
+__setup("ntlb=", set_ntlb);
+
+void __cpuinit tlb_init(void)
+{
+ unsigned int config = read_c0_config();
+
+ /*
+ * You should never change this register:
+ * - On R4600 1.7 the tlbp never hits for pages smaller than
+ * the value in the c0_pagemask register.
+ * - The entire mm handling assumes the c0_pagemask register to
+ * be set to fixed-size pages.
+ */
+ probe_tlb(config);
+ write_c0_pagemask(PM_DEFAULT_MASK);
+ write_c0_wired(0);
+ write_c0_framemask(0);
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+
+ if (ntlb) {
+ if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
+ int wired = current_cpu_data.tlbsize - ntlb;
+ write_c0_wired(wired);
+ write_c0_index(wired-1);
+ printk("Restricting TLB to %d entries\n", ntlb);
+ } else
+ printk("Ignoring invalid argument ntlb=%d\n", ntlb);
+ }
+
+ build_tlb_refill_handler();
+}
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
new file mode 100644
index 0000000..4f01a3b
--- /dev/null
+++ b/arch/mips/mm/tlb-r8k.c
@@ -0,0 +1,249 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+extern void build_tlb_refill_handler(void);
+
+#define TFP_TLB_SIZE 384
+#define TFP_TLB_SET_SHIFT 7
+
+/* CP0 hazard avoidance. */
+#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
+ "nop; nop; nop; nop; nop; nop;\n\t" \
+ ".set reorder\n\t")
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ int entry;
+
+ local_irq_save(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ write_c0_entrylo(0);
+
+ for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
+ write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
+ write_c0_vaddr(entry << PAGE_SHIFT);
+ write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
+ mtc0_tlbw_hazard();
+ tlb_write();
+ }
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0)
+ drop_mmu_context(mm, cpu);
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int cpu = smp_processor_id();
+ unsigned long flags;
+ int oldpid, newpid, size;
+
+ if (!cpu_context(cpu, mm))
+ return;
+
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ size = (size + 1) >> 1;
+
+ local_irq_save(flags);
+
+ if (size > TFP_TLB_SIZE / 2) {
+ drop_mmu_context(mm, cpu);
+ goto out_restore;
+ }
+
+ oldpid = read_c0_entryhi();
+ newpid = cpu_asid(cpu, mm);
+
+ write_c0_entrylo(0);
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ while (start < end) {
+ signed long idx;
+
+ write_c0_vaddr(start);
+ write_c0_entryhi(start);
+ start += PAGE_SIZE;
+ tlb_probe();
+ idx = read_c0_tlbset();
+ if (idx < 0)
+ continue;
+
+ write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+ tlb_write();
+ }
+ write_c0_entryhi(oldpid);
+
+out_restore:
+ local_irq_restore(flags);
+}
+
+/* Usable for KV1 addresses only! */
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+ int size;
+
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ size = (size + 1) >> 1;
+
+ if (size > TFP_TLB_SIZE / 2) {
+ local_flush_tlb_all();
+ return;
+ }
+
+ local_irq_save(flags);
+
+ write_c0_entrylo(0);
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ while (start < end) {
+ signed long idx;
+
+ write_c0_vaddr(start);
+ write_c0_entryhi(start);
+ start += PAGE_SIZE;
+ tlb_probe();
+ idx = read_c0_tlbset();
+ if (idx < 0)
+ continue;
+
+ write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+ tlb_write();
+ }
+
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cpu = smp_processor_id();
+ unsigned long flags;
+ int oldpid, newpid;
+ signed long idx;
+
+ if (!cpu_context(cpu, vma->vm_mm))
+ return;
+
+ newpid = cpu_asid(cpu, vma->vm_mm);
+ page &= PAGE_MASK;
+ local_irq_save(flags);
+ oldpid = read_c0_entryhi();
+ write_c0_vaddr(page);
+ write_c0_entryhi(newpid);
+ tlb_probe();
+ idx = read_c0_tlbset();
+ if (idx < 0)
+ goto finish;
+
+ write_c0_entrylo(0);
+ write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+ tlb_write();
+
+finish:
+ write_c0_entryhi(oldpid);
+ local_irq_restore(flags);
+}
+
+/*
+ * We will need multiple versions of update_mmu_cache(), one that just
+ * updates the TLB with the new pte(s), and another which also checks
+ * for the R4k "end of page" hardware bug and does the needy.
+ */
+void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int pid;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ pid = read_c0_entryhi() & ASID_MASK;
+
+ local_irq_save(flags);
+ address &= PAGE_MASK;
+ write_c0_vaddr(address);
+ write_c0_entryhi(pid);
+ pgdp = pgd_offset(vma->vm_mm, address);
+ pmdp = pmd_offset(pgdp, address);
+ ptep = pte_offset_map(pmdp, address);
+ tlb_probe();
+
+ write_c0_entrylo(pte_val(*ptep++) >> 6);
+ tlb_write();
+
+ write_c0_entryhi(pid);
+ local_irq_restore(flags);
+}
+
+static void __cpuinit probe_tlb(unsigned long config)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
+}
+
+void __cpuinit tlb_init(void)
+{
+ unsigned int config = read_c0_config();
+ unsigned long status;
+
+ probe_tlb(config);
+
+ status = read_c0_status();
+ status &= ~(ST0_UPS | ST0_KPS);
+#ifdef CONFIG_PAGE_SIZE_4KB
+ status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36);
+#elif defined(CONFIG_PAGE_SIZE_8KB)
+ status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36);
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+ status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36);
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+ status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36);
+#endif
+ write_c0_status(status);
+
+ write_c0_wired(0);
+
+ local_flush_tlb_all();
+
+ build_tlb_refill_handler();
+}
diff --git a/arch/mips/mm/tlbex-fault.S b/arch/mips/mm/tlbex-fault.S
new file mode 100644
index 0000000..e99eaa1
--- /dev/null
+++ b/arch/mips/mm/tlbex-fault.S
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .macro tlb_do_page_fault, write
+ NESTED(tlb_do_page_fault_\write, PT_SIZE, sp)
+ SAVE_ALL
+ MFC0 a2, CP0_BADVADDR
+ KMODE
+ move a0, sp
+ REG_S a2, PT_BVADDR(sp)
+ li a1, \write
+ PTR_LA ra, ret_from_exception
+ j do_page_fault
+ END(tlb_do_page_fault_\write)
+ .endm
+
+ tlb_do_page_fault 0
+ tlb_do_page_fault 1
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
new file mode 100644
index 0000000..979cf91
--- /dev/null
+++ b/arch/mips/mm/tlbex.c
@@ -0,0 +1,1282 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Synthesize TLB refill handlers at runtime.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * ... and the days got worse and worse and now you see
+ * I've gone completly out of my mind.
+ *
+ * They're coming to take me a away haha
+ * they're coming to take me a away hoho hihi haha
+ * to the funny farm where code is beautiful all the time ...
+ *
+ * (Condolences to Napoleon XIV)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/mmu_context.h>
+#include <asm/war.h>
+
+#include "uasm.h"
+
+static inline int r45k_bvahwbug(void)
+{
+ /* XXX: We should probe for the presence of this bug, but we don't. */
+ return 0;
+}
+
+static inline int r4k_250MHZhwbug(void)
+{
+ /* XXX: We should probe for the presence of this bug, but we don't. */
+ return 0;
+}
+
+static inline int __maybe_unused bcm1250_m3_war(void)
+{
+ return BCM1250_M3_WAR;
+}
+
+static inline int __maybe_unused r10000_llsc_war(void)
+{
+ return R10000_LLSC_WAR;
+}
+
+/*
+ * Found by experiment: At least some revisions of the 4kc throw under
+ * some circumstances a machine check exception, triggered by invalid
+ * values in the index register. Delaying the tlbp instruction until
+ * after the next branch, plus adding an additional nop in front of
+ * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
+ * why; it's not an issue caused by the core RTL.
+ *
+ */
+static int __cpuinit m4kc_tlbp_war(void)
+{
+ return (current_cpu_data.processor_id & 0xffff00) ==
+ (PRID_COMP_MIPS | PRID_IMP_4KC);
+}
+
+/* Handle labels (which must be positive integers). */
+enum label_id {
+ label_second_part = 1,
+ label_leave,
+#ifdef MODULE_START
+ label_module_alloc,
+#endif
+ label_vmalloc,
+ label_vmalloc_done,
+ label_tlbw_hazard,
+ label_split,
+ label_nopage_tlbl,
+ label_nopage_tlbs,
+ label_nopage_tlbm,
+ label_smp_pgtable_change,
+ label_r3000_write_probe_fail,
+};
+
+UASM_L_LA(_second_part)
+UASM_L_LA(_leave)
+#ifdef MODULE_START
+UASM_L_LA(_module_alloc)
+#endif
+UASM_L_LA(_vmalloc)
+UASM_L_LA(_vmalloc_done)
+UASM_L_LA(_tlbw_hazard)
+UASM_L_LA(_split)
+UASM_L_LA(_nopage_tlbl)
+UASM_L_LA(_nopage_tlbs)
+UASM_L_LA(_nopage_tlbm)
+UASM_L_LA(_smp_pgtable_change)
+UASM_L_LA(_r3000_write_probe_fail)
+
+/*
+ * For debug purposes.
+ */
+static inline void dump_handler(const u32 *handler, int count)
+{
+ int i;
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+
+ for (i = 0; i < count; i++)
+ pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
+
+ pr_debug("\t.set pop\n");
+}
+
+/* The only general purpose registers allowed in TLB handlers. */
+#define K0 26
+#define K1 27
+
+/* Some CP0 registers */
+#define C0_INDEX 0, 0
+#define C0_ENTRYLO0 2, 0
+#define C0_TCBIND 2, 2
+#define C0_ENTRYLO1 3, 0
+#define C0_CONTEXT 4, 0
+#define C0_BADVADDR 8, 0
+#define C0_ENTRYHI 10, 0
+#define C0_EPC 14, 0
+#define C0_XCONTEXT 20, 0
+
+#ifdef CONFIG_64BIT
+# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
+#else
+# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
+#endif
+
+/* The worst case length of the handler is around 18 instructions for
+ * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
+ * Maximum space available is 32 instructions for R3000 and 64
+ * instructions for R4000.
+ *
+ * We deliberately chose a buffer size of 128, so we won't scribble
+ * over anything important on overflow before we panic.
+ */
+static u32 tlb_handler[128] __cpuinitdata;
+
+/* simply assume worst case size for labels and relocs */
+static struct uasm_label labels[128] __cpuinitdata;
+static struct uasm_reloc relocs[128] __cpuinitdata;
+
+/*
+ * The R3000 TLB handler is simple.
+ */
+static void __cpuinit build_r3000_tlb_refill_handler(void)
+{
+ long pgdc = (long)pgd_current;
+ u32 *p;
+
+ memset(tlb_handler, 0, sizeof(tlb_handler));
+ p = tlb_handler;
+
+ uasm_i_mfc0(&p, K0, C0_BADVADDR);
+ uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
+ uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
+ uasm_i_srl(&p, K0, K0, 22); /* load delay */
+ uasm_i_sll(&p, K0, K0, 2);
+ uasm_i_addu(&p, K1, K1, K0);
+ uasm_i_mfc0(&p, K0, C0_CONTEXT);
+ uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
+ uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
+ uasm_i_addu(&p, K1, K1, K0);
+ uasm_i_lw(&p, K0, 0, K1);
+ uasm_i_nop(&p); /* load delay */
+ uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
+ uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
+ uasm_i_tlbwr(&p); /* cp0 delay */
+ uasm_i_jr(&p, K1);
+ uasm_i_rfe(&p); /* branch delay */
+
+ if (p > tlb_handler + 32)
+ panic("TLB refill handler space exceeded");
+
+ pr_debug("Wrote TLB refill handler (%u instructions).\n",
+ (unsigned int)(p - tlb_handler));
+
+ memcpy((void *)ebase, tlb_handler, 0x80);
+
+ dump_handler((u32 *)ebase, 32);
+}
+
+/*
+ * The R4000 TLB handler is much more complicated. We have two
+ * consecutive handler areas with 32 instructions space each.
+ * Since they aren't used at the same time, we can overflow in the
+ * other one.To keep things simple, we first assume linear space,
+ * then we relocate it to the final handler layout as needed.
+ */
+static u32 final_handler[64] __cpuinitdata;
+
+/*
+ * Hazards
+ *
+ * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
+ * 2. A timing hazard exists for the TLBP instruction.
+ *
+ * stalling_instruction
+ * TLBP
+ *
+ * The JTLB is being read for the TLBP throughout the stall generated by the
+ * previous instruction. This is not really correct as the stalling instruction
+ * can modify the address used to access the JTLB. The failure symptom is that
+ * the TLBP instruction will use an address created for the stalling instruction
+ * and not the address held in C0_ENHI and thus report the wrong results.
+ *
+ * The software work-around is to not allow the instruction preceding the TLBP
+ * to stall - make it an NOP or some other instruction guaranteed not to stall.
+ *
+ * Errata 2 will not be fixed. This errata is also on the R5000.
+ *
+ * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
+ */
+static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
+{
+ switch (current_cpu_type()) {
+ /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
+ case CPU_R4600:
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_R5000A:
+ case CPU_NEVADA:
+ uasm_i_nop(p);
+ uasm_i_tlbp(p);
+ break;
+
+ default:
+ uasm_i_tlbp(p);
+ break;
+ }
+}
+
+/*
+ * Write random or indexed TLB entry, and care about the hazards from
+ * the preceeding mtc0 and for the following eret.
+ */
+enum tlb_write_entry { tlb_random, tlb_indexed };
+
+static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ enum tlb_write_entry wmode)
+{
+ void(*tlbw)(u32 **) = NULL;
+
+ switch (wmode) {
+ case tlb_random: tlbw = uasm_i_tlbwr; break;
+ case tlb_indexed: tlbw = uasm_i_tlbwi; break;
+ }
+
+ if (cpu_has_mips_r2) {
+ uasm_i_ehb(p);
+ tlbw(p);
+ return;
+ }
+
+ switch (current_cpu_type()) {
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ /*
+ * This branch uses up a mtc0 hazard nop slot and saves
+ * two nops after the tlbw instruction.
+ */
+ uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
+ tlbw(p);
+ uasm_l_tlbw_hazard(l, *p);
+ uasm_i_nop(p);
+ break;
+
+ case CPU_R4600:
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_R5000A:
+ uasm_i_nop(p);
+ tlbw(p);
+ uasm_i_nop(p);
+ break;
+
+ case CPU_R4300:
+ case CPU_5KC:
+ case CPU_TX49XX:
+ case CPU_AU1000:
+ case CPU_AU1100:
+ case CPU_AU1500:
+ case CPU_AU1550:
+ case CPU_AU1200:
+ case CPU_AU1210:
+ case CPU_AU1250:
+ case CPU_PR4450:
+ uasm_i_nop(p);
+ tlbw(p);
+ break;
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_4KC:
+ case CPU_4KEC:
+ case CPU_SB1:
+ case CPU_SB1A:
+ case CPU_4KSC:
+ case CPU_20KC:
+ case CPU_25KF:
+ case CPU_BCM3302:
+ case CPU_BCM4710:
+ case CPU_LOONGSON2:
+ if (m4kc_tlbp_war())
+ uasm_i_nop(p);
+ tlbw(p);
+ break;
+
+ case CPU_NEVADA:
+ uasm_i_nop(p); /* QED specifies 2 nops hazard */
+ /*
+ * This branch uses up a mtc0 hazard nop slot and saves
+ * a nop after the tlbw instruction.
+ */
+ uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
+ tlbw(p);
+ uasm_l_tlbw_hazard(l, *p);
+ break;
+
+ case CPU_RM7000:
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ tlbw(p);
+ break;
+
+ case CPU_RM9000:
+ /*
+ * When the JTLB is updated by tlbwi or tlbwr, a subsequent
+ * use of the JTLB for instructions should not occur for 4
+ * cpu cycles and use for data translations should not occur
+ * for 3 cpu cycles.
+ */
+ uasm_i_ssnop(p);
+ uasm_i_ssnop(p);
+ uasm_i_ssnop(p);
+ uasm_i_ssnop(p);
+ tlbw(p);
+ uasm_i_ssnop(p);
+ uasm_i_ssnop(p);
+ uasm_i_ssnop(p);
+ uasm_i_ssnop(p);
+ break;
+
+ case CPU_VR4111:
+ case CPU_VR4121:
+ case CPU_VR4122:
+ case CPU_VR4181:
+ case CPU_VR4181A:
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ tlbw(p);
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ break;
+
+ case CPU_VR4131:
+ case CPU_VR4133:
+ case CPU_R5432:
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ tlbw(p);
+ break;
+
+ default:
+ panic("No TLB refill handler yet (CPU type: %d)",
+ current_cpu_data.cputype);
+ break;
+ }
+}
+
+#ifdef CONFIG_64BIT
+/*
+ * TMP and PTR are scratch.
+ * TMP will be clobbered, PTR will hold the pmd entry.
+ */
+static void __cpuinit
+build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int tmp, unsigned int ptr)
+{
+ long pgdc = (long)pgd_current;
+
+ /*
+ * The vmalloc handling is not in the hotpath.
+ */
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR);
+#ifdef MODULE_START
+ uasm_il_bltz(p, r, tmp, label_module_alloc);
+#else
+ uasm_il_bltz(p, r, tmp, label_vmalloc);
+#endif
+ /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
+
+#ifdef CONFIG_SMP
+# ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC uses TCBind value as "CPU" index
+ */
+ uasm_i_mfc0(p, ptr, C0_TCBIND);
+ uasm_i_dsrl(p, ptr, ptr, 19);
+# else
+ /*
+ * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
+ * stored in CONTEXT.
+ */
+ uasm_i_dmfc0(p, ptr, C0_CONTEXT);
+ uasm_i_dsrl(p, ptr, ptr, 23);
+#endif
+ UASM_i_LA_mostly(p, tmp, pgdc);
+ uasm_i_daddu(p, ptr, ptr, tmp);
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR);
+ uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+#else
+ UASM_i_LA_mostly(p, ptr, pgdc);
+ uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+#endif
+
+ uasm_l_vmalloc_done(l, *p);
+
+ if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */
+ uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
+ else
+ uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
+
+ uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
+ uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
+ uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
+ uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
+ uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
+}
+
+/*
+ * BVADDR is the faulting address, PTR is scratch.
+ * PTR will hold the pgd for vmalloc.
+ */
+static void __cpuinit
+build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int bvaddr, unsigned int ptr)
+{
+ long swpd = (long)swapper_pg_dir;
+
+#ifdef MODULE_START
+ long modd = (long)module_pg_dir;
+
+ uasm_l_module_alloc(l, *p);
+ /*
+ * Assumption:
+ * VMALLOC_START >= 0xc000000000000000UL
+ * MODULE_START >= 0xe000000000000000UL
+ */
+ UASM_i_SLL(p, ptr, bvaddr, 2);
+ uasm_il_bgez(p, r, ptr, label_vmalloc);
+
+ if (uasm_in_compat_space_p(MODULE_START) &&
+ !uasm_rel_lo(MODULE_START)) {
+ uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */
+ } else {
+ /* unlikely configuration */
+ uasm_i_nop(p); /* delay slot */
+ UASM_i_LA(p, ptr, MODULE_START);
+ }
+ uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
+
+ if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) {
+ uasm_il_b(p, r, label_vmalloc_done);
+ uasm_i_lui(p, ptr, uasm_rel_hi(modd));
+ } else {
+ UASM_i_LA_mostly(p, ptr, modd);
+ uasm_il_b(p, r, label_vmalloc_done);
+ if (uasm_in_compat_space_p(modd))
+ uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd));
+ else
+ uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd));
+ }
+
+ uasm_l_vmalloc(l, *p);
+ if (uasm_in_compat_space_p(MODULE_START) &&
+ !uasm_rel_lo(MODULE_START) &&
+ MODULE_START << 32 == VMALLOC_START)
+ uasm_i_dsll32(p, ptr, ptr, 0); /* typical case */
+ else
+ UASM_i_LA(p, ptr, VMALLOC_START);
+#else
+ uasm_l_vmalloc(l, *p);
+ UASM_i_LA(p, ptr, VMALLOC_START);
+#endif
+ uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
+
+ if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
+ uasm_il_b(p, r, label_vmalloc_done);
+ uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
+ } else {
+ UASM_i_LA_mostly(p, ptr, swpd);
+ uasm_il_b(p, r, label_vmalloc_done);
+ if (uasm_in_compat_space_p(swpd))
+ uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
+ else
+ uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
+ }
+}
+
+#else /* !CONFIG_64BIT */
+
+/*
+ * TMP and PTR are scratch.
+ * TMP will be clobbered, PTR will hold the pgd entry.
+ */
+static void __cpuinit __maybe_unused
+build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
+{
+ long pgdc = (long)pgd_current;
+
+ /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC uses TCBind value as "CPU" index
+ */
+ uasm_i_mfc0(p, ptr, C0_TCBIND);
+ UASM_i_LA_mostly(p, tmp, pgdc);
+ uasm_i_srl(p, ptr, ptr, 19);
+#else
+ /*
+ * smp_processor_id() << 3 is stored in CONTEXT.
+ */
+ uasm_i_mfc0(p, ptr, C0_CONTEXT);
+ UASM_i_LA_mostly(p, tmp, pgdc);
+ uasm_i_srl(p, ptr, ptr, 23);
+#endif
+ uasm_i_addu(p, ptr, tmp, ptr);
+#else
+ UASM_i_LA_mostly(p, ptr, pgdc);
+#endif
+ uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+ uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
+ uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
+ uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
+}
+
+#endif /* !CONFIG_64BIT */
+
+static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
+{
+ unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
+ unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
+
+ switch (current_cpu_type()) {
+ case CPU_VR41XX:
+ case CPU_VR4111:
+ case CPU_VR4121:
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4181:
+ case CPU_VR4181A:
+ case CPU_VR4133:
+ shift += 2;
+ break;
+
+ default:
+ break;
+ }
+
+ if (shift)
+ UASM_i_SRL(p, ctx, ctx, shift);
+ uasm_i_andi(p, ctx, ctx, mask);
+}
+
+static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+{
+ /*
+ * Bug workaround for the Nevada. It seems as if under certain
+ * circumstances the move from cp0_context might produce a
+ * bogus result when the mfc0 instruction and its consumer are
+ * in a different cacheline or a load instruction, probably any
+ * memory reference, is between them.
+ */
+ switch (current_cpu_type()) {
+ case CPU_NEVADA:
+ UASM_i_LW(p, ptr, 0, ptr);
+ GET_CONTEXT(p, tmp); /* get context reg */
+ break;
+
+ default:
+ GET_CONTEXT(p, tmp); /* get context reg */
+ UASM_i_LW(p, ptr, 0, ptr);
+ break;
+ }
+
+ build_adjust_context(p, tmp);
+ UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
+}
+
+static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
+ unsigned int ptep)
+{
+ /*
+ * 64bit address support (36bit on a 32bit CPU) in a 32bit
+ * Kernel is a special case. Only a few CPUs use it.
+ */
+#ifdef CONFIG_64BIT_PHYS_ADDR
+ if (cpu_has_64bits) {
+ uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
+ uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
+ uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
+ uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+ uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
+ uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+ } else {
+ int pte_off_even = sizeof(pte_t) / 2;
+ int pte_off_odd = pte_off_even + sizeof(pte_t);
+
+ /* The pte entries are pre-shifted */
+ uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
+ uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+ uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
+ uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+ }
+#else
+ UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
+ UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
+ if (r45k_bvahwbug())
+ build_tlb_probe_entry(p);
+ UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
+ if (r4k_250MHZhwbug())
+ uasm_i_mtc0(p, 0, C0_ENTRYLO0);
+ uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+ UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
+ if (r45k_bvahwbug())
+ uasm_i_mfc0(p, tmp, C0_INDEX);
+ if (r4k_250MHZhwbug())
+ uasm_i_mtc0(p, 0, C0_ENTRYLO1);
+ uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+#endif
+}
+
+static void __cpuinit build_r4000_tlb_refill_handler(void)
+{
+ u32 *p = tlb_handler;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ u32 *f;
+ unsigned int final_len;
+
+ memset(tlb_handler, 0, sizeof(tlb_handler));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+ memset(final_handler, 0, sizeof(final_handler));
+
+ /*
+ * create the plain linear handler
+ */
+ if (bcm1250_m3_war()) {
+ UASM_i_MFC0(&p, K0, C0_BADVADDR);
+ UASM_i_MFC0(&p, K1, C0_ENTRYHI);
+ uasm_i_xor(&p, K0, K0, K1);
+ UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
+ uasm_il_bnez(&p, &r, K0, label_leave);
+ /* No need for uasm_i_nop */
+ }
+
+#ifdef CONFIG_64BIT
+ build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
+#else
+ build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
+#endif
+
+ build_get_ptep(&p, K0, K1);
+ build_update_entries(&p, K0, K1);
+ build_tlb_write_entry(&p, &l, &r, tlb_random);
+ uasm_l_leave(&l, p);
+ uasm_i_eret(&p); /* return from trap */
+
+#ifdef CONFIG_64BIT
+ build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
+#endif
+
+ /*
+ * Overflow check: For the 64bit handler, we need at least one
+ * free instruction slot for the wrap-around branch. In worst
+ * case, if the intended insertion point is a delay slot, we
+ * need three, with the second nop'ed and the third being
+ * unused.
+ */
+ /* Loongson2 ebase is different than r4k, we have more space */
+#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
+ if ((p - tlb_handler) > 64)
+ panic("TLB refill handler space exceeded");
+#else
+ if (((p - tlb_handler) > 63)
+ || (((p - tlb_handler) > 61)
+ && uasm_insn_has_bdelay(relocs, tlb_handler + 29)))
+ panic("TLB refill handler space exceeded");
+#endif
+
+ /*
+ * Now fold the handler in the TLB refill handler space.
+ */
+#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
+ f = final_handler;
+ /* Simplest case, just copy the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+ final_len = p - tlb_handler;
+#else /* CONFIG_64BIT */
+ f = final_handler + 32;
+ if ((p - tlb_handler) <= 32) {
+ /* Just copy the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+ final_len = p - tlb_handler;
+ } else {
+ u32 *split = tlb_handler + 30;
+
+ /*
+ * Find the split point.
+ */
+ if (uasm_insn_has_bdelay(relocs, split - 1))
+ split--;
+
+ /* Copy first part of the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, split, f);
+ f += split - tlb_handler;
+
+ /* Insert branch. */
+ uasm_l_split(&l, final_handler);
+ uasm_il_b(&f, &r, label_split);
+ if (uasm_insn_has_bdelay(relocs, split))
+ uasm_i_nop(&f);
+ else {
+ uasm_copy_handler(relocs, labels, split, split + 1, f);
+ uasm_move_labels(labels, f, f + 1, -1);
+ f++;
+ split++;
+ }
+
+ /* Copy the rest of the handler. */
+ uasm_copy_handler(relocs, labels, split, p, final_handler);
+ final_len = (f - (final_handler + 32)) + (p - split);
+ }
+#endif /* CONFIG_64BIT */
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB refill handler (%u instructions).\n",
+ final_len);
+
+ memcpy((void *)ebase, final_handler, 0x100);
+
+ dump_handler((u32 *)ebase, 64);
+}
+
+/*
+ * TLB load/store/modify handlers.
+ *
+ * Only the fastpath gets synthesized at runtime, the slowpath for
+ * do_page_fault remains normal asm.
+ */
+extern void tlb_do_page_fault_0(void);
+extern void tlb_do_page_fault_1(void);
+
+/*
+ * 128 instructions for the fastpath handler is generous and should
+ * never be exceeded.
+ */
+#define FASTPATH_SIZE 128
+
+u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
+u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
+u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
+
+static void __cpuinit
+iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
+{
+#ifdef CONFIG_SMP
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (cpu_has_64bits)
+ uasm_i_lld(p, pte, 0, ptr);
+ else
+# endif
+ UASM_i_LL(p, pte, 0, ptr);
+#else
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (cpu_has_64bits)
+ uasm_i_ld(p, pte, 0, ptr);
+ else
+# endif
+ UASM_i_LW(p, pte, 0, ptr);
+#endif
+}
+
+static void __cpuinit
+iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
+ unsigned int mode)
+{
+#ifdef CONFIG_64BIT_PHYS_ADDR
+ unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
+#endif
+
+ uasm_i_ori(p, pte, pte, mode);
+#ifdef CONFIG_SMP
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (cpu_has_64bits)
+ uasm_i_scd(p, pte, 0, ptr);
+ else
+# endif
+ UASM_i_SC(p, pte, 0, ptr);
+
+ if (r10000_llsc_war())
+ uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
+ else
+ uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
+
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (!cpu_has_64bits) {
+ /* no uasm_i_nop needed */
+ uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
+ uasm_i_ori(p, pte, pte, hwmode);
+ uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
+ uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
+ /* no uasm_i_nop needed */
+ uasm_i_lw(p, pte, 0, ptr);
+ } else
+ uasm_i_nop(p);
+# else
+ uasm_i_nop(p);
+# endif
+#else
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (cpu_has_64bits)
+ uasm_i_sd(p, pte, 0, ptr);
+ else
+# endif
+ UASM_i_SW(p, pte, 0, ptr);
+
+# ifdef CONFIG_64BIT_PHYS_ADDR
+ if (!cpu_has_64bits) {
+ uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
+ uasm_i_ori(p, pte, pte, hwmode);
+ uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
+ uasm_i_lw(p, pte, 0, ptr);
+ }
+# endif
+#endif
+}
+
+/*
+ * Check if PTE is present, if not then jump to LABEL. PTR points to
+ * the page table where this PTE is located, PTE will be re-loaded
+ * with it's original value.
+ */
+static void __cpuinit
+build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int pte, unsigned int ptr, enum label_id lid)
+{
+ uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+ uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+ uasm_il_bnez(p, r, pte, lid);
+ iPTE_LW(p, l, pte, ptr);
+}
+
+/* Make PTE valid, store result in PTR. */
+static void __cpuinit
+build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
+ unsigned int ptr)
+{
+ unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
+
+ iPTE_SW(p, r, pte, ptr, mode);
+}
+
+/*
+ * Check if PTE can be written to, if not branch to LABEL. Regardless
+ * restore PTE with value from PTR when done.
+ */
+static void __cpuinit
+build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int pte, unsigned int ptr, enum label_id lid)
+{
+ uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
+ uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
+ uasm_il_bnez(p, r, pte, lid);
+ iPTE_LW(p, l, pte, ptr);
+}
+
+/* Make PTE writable, update software status bits as well, then store
+ * at PTR.
+ */
+static void __cpuinit
+build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
+ unsigned int ptr)
+{
+ unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
+ | _PAGE_DIRTY);
+
+ iPTE_SW(p, r, pte, ptr, mode);
+}
+
+/*
+ * Check if PTE can be modified, if not branch to LABEL. Regardless
+ * restore PTE with value from PTR when done.
+ */
+static void __cpuinit
+build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int pte, unsigned int ptr, enum label_id lid)
+{
+ uasm_i_andi(p, pte, pte, _PAGE_WRITE);
+ uasm_il_beqz(p, r, pte, lid);
+ iPTE_LW(p, l, pte, ptr);
+}
+
+/*
+ * R3000 style TLB load/store/modify handlers.
+ */
+
+/*
+ * This places the pte into ENTRYLO0 and writes it with tlbwi.
+ * Then it returns.
+ */
+static void __cpuinit
+build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
+{
+ uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
+ uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
+ uasm_i_tlbwi(p);
+ uasm_i_jr(p, tmp);
+ uasm_i_rfe(p); /* branch delay */
+}
+
+/*
+ * This places the pte into ENTRYLO0 and writes it with tlbwi
+ * or tlbwr as appropriate. This is because the index register
+ * may have the probe fail bit set as a result of a trap on a
+ * kseg2 access, i.e. without refill. Then it returns.
+ */
+static void __cpuinit
+build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r, unsigned int pte,
+ unsigned int tmp)
+{
+ uasm_i_mfc0(p, tmp, C0_INDEX);
+ uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
+ uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
+ uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
+ uasm_i_tlbwi(p); /* cp0 delay */
+ uasm_i_jr(p, tmp);
+ uasm_i_rfe(p); /* branch delay */
+ uasm_l_r3000_write_probe_fail(l, *p);
+ uasm_i_tlbwr(p); /* cp0 delay */
+ uasm_i_jr(p, tmp);
+ uasm_i_rfe(p); /* branch delay */
+}
+
+static void __cpuinit
+build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
+ unsigned int ptr)
+{
+ long pgdc = (long)pgd_current;
+
+ uasm_i_mfc0(p, pte, C0_BADVADDR);
+ uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
+ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+ uasm_i_srl(p, pte, pte, 22); /* load delay */
+ uasm_i_sll(p, pte, pte, 2);
+ uasm_i_addu(p, ptr, ptr, pte);
+ uasm_i_mfc0(p, pte, C0_CONTEXT);
+ uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
+ uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
+ uasm_i_addu(p, ptr, ptr, pte);
+ uasm_i_lw(p, pte, 0, ptr);
+ uasm_i_tlbp(p); /* load delay */
+}
+
+static void __cpuinit build_r3000_tlb_load_handler(void)
+{
+ u32 *p = handle_tlbl;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(handle_tlbl, 0, sizeof(handle_tlbl));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r3000_tlbchange_handler_head(&p, K0, K1);
+ build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
+ uasm_i_nop(&p); /* load delay */
+ build_make_valid(&p, &r, K0, K1);
+ build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+
+ uasm_l_nopage_tlbl(&l, p);
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if ((p - handle_tlbl) > FASTPATH_SIZE)
+ panic("TLB load handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbl));
+
+ dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
+}
+
+static void __cpuinit build_r3000_tlb_store_handler(void)
+{
+ u32 *p = handle_tlbs;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(handle_tlbs, 0, sizeof(handle_tlbs));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r3000_tlbchange_handler_head(&p, K0, K1);
+ build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
+ uasm_i_nop(&p); /* load delay */
+ build_make_write(&p, &r, K0, K1);
+ build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+
+ uasm_l_nopage_tlbs(&l, p);
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if ((p - handle_tlbs) > FASTPATH_SIZE)
+ panic("TLB store handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbs));
+
+ dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
+}
+
+static void __cpuinit build_r3000_tlb_modify_handler(void)
+{
+ u32 *p = handle_tlbm;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(handle_tlbm, 0, sizeof(handle_tlbm));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r3000_tlbchange_handler_head(&p, K0, K1);
+ build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
+ uasm_i_nop(&p); /* load delay */
+ build_make_write(&p, &r, K0, K1);
+ build_r3000_pte_reload_tlbwi(&p, K0, K1);
+
+ uasm_l_nopage_tlbm(&l, p);
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if ((p - handle_tlbm) > FASTPATH_SIZE)
+ panic("TLB modify handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbm));
+
+ dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
+}
+
+/*
+ * R4000 style TLB load/store/modify handlers.
+ */
+static void __cpuinit
+build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r, unsigned int pte,
+ unsigned int ptr)
+{
+#ifdef CONFIG_64BIT
+ build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
+#else
+ build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
+#endif
+
+ UASM_i_MFC0(p, pte, C0_BADVADDR);
+ UASM_i_LW(p, ptr, 0, ptr);
+ UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
+ uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
+ UASM_i_ADDU(p, ptr, ptr, pte);
+
+#ifdef CONFIG_SMP
+ uasm_l_smp_pgtable_change(l, *p);
+#endif
+ iPTE_LW(p, l, pte, ptr); /* get even pte */
+ if (!m4kc_tlbp_war())
+ build_tlb_probe_entry(p);
+}
+
+static void __cpuinit
+build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r, unsigned int tmp,
+ unsigned int ptr)
+{
+ uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
+ uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
+ build_update_entries(p, tmp, ptr);
+ build_tlb_write_entry(p, l, r, tlb_indexed);
+ uasm_l_leave(l, *p);
+ uasm_i_eret(p); /* return from trap */
+
+#ifdef CONFIG_64BIT
+ build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
+#endif
+}
+
+static void __cpuinit build_r4000_tlb_load_handler(void)
+{
+ u32 *p = handle_tlbl;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(handle_tlbl, 0, sizeof(handle_tlbl));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ if (bcm1250_m3_war()) {
+ UASM_i_MFC0(&p, K0, C0_BADVADDR);
+ UASM_i_MFC0(&p, K1, C0_ENTRYHI);
+ uasm_i_xor(&p, K0, K0, K1);
+ UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
+ uasm_il_bnez(&p, &r, K0, label_leave);
+ /* No need for uasm_i_nop */
+ }
+
+ build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
+ build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
+ if (m4kc_tlbp_war())
+ build_tlb_probe_entry(&p);
+ build_make_valid(&p, &r, K0, K1);
+ build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
+
+ uasm_l_nopage_tlbl(&l, p);
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if ((p - handle_tlbl) > FASTPATH_SIZE)
+ panic("TLB load handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbl));
+
+ dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
+}
+
+static void __cpuinit build_r4000_tlb_store_handler(void)
+{
+ u32 *p = handle_tlbs;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(handle_tlbs, 0, sizeof(handle_tlbs));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
+ build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
+ if (m4kc_tlbp_war())
+ build_tlb_probe_entry(&p);
+ build_make_write(&p, &r, K0, K1);
+ build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
+
+ uasm_l_nopage_tlbs(&l, p);
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if ((p - handle_tlbs) > FASTPATH_SIZE)
+ panic("TLB store handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbs));
+
+ dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
+}
+
+static void __cpuinit build_r4000_tlb_modify_handler(void)
+{
+ u32 *p = handle_tlbm;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(handle_tlbm, 0, sizeof(handle_tlbm));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
+ build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
+ if (m4kc_tlbp_war())
+ build_tlb_probe_entry(&p);
+ /* Present and writable bits set, set accessed and dirty bits. */
+ build_make_write(&p, &r, K0, K1);
+ build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
+
+ uasm_l_nopage_tlbm(&l, p);
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if ((p - handle_tlbm) > FASTPATH_SIZE)
+ panic("TLB modify handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
+ (unsigned int)(p - handle_tlbm));
+
+ dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
+}
+
+void __cpuinit build_tlb_refill_handler(void)
+{
+ /*
+ * The refill handler is generated per-CPU, multi-node systems
+ * may have local storage for it. The other handlers are only
+ * needed once.
+ */
+ static int run_once = 0;
+
+ switch (current_cpu_type()) {
+ case CPU_R2000:
+ case CPU_R3000:
+ case CPU_R3000A:
+ case CPU_R3081E:
+ case CPU_TX3912:
+ case CPU_TX3922:
+ case CPU_TX3927:
+ build_r3000_tlb_refill_handler();
+ if (!run_once) {
+ build_r3000_tlb_load_handler();
+ build_r3000_tlb_store_handler();
+ build_r3000_tlb_modify_handler();
+ run_once++;
+ }
+ break;
+
+ case CPU_R6000:
+ case CPU_R6000A:
+ panic("No R6000 TLB refill handler yet");
+ break;
+
+ case CPU_R8000:
+ panic("No R8000 TLB refill handler yet");
+ break;
+
+ default:
+ build_r4000_tlb_refill_handler();
+ if (!run_once) {
+ build_r4000_tlb_load_handler();
+ build_r4000_tlb_store_handler();
+ build_r4000_tlb_modify_handler();
+ run_once++;
+ }
+ }
+}
+
+void __cpuinit flush_tlb_handlers(void)
+{
+ local_flush_icache_range((unsigned long)handle_tlbl,
+ (unsigned long)handle_tlbl + sizeof(handle_tlbl));
+ local_flush_icache_range((unsigned long)handle_tlbs,
+ (unsigned long)handle_tlbs + sizeof(handle_tlbs));
+ local_flush_icache_range((unsigned long)handle_tlbm,
+ (unsigned long)handle_tlbm + sizeof(handle_tlbm));
+}
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
new file mode 100644
index 0000000..f467199
--- /dev/null
+++ b/arch/mips/mm/uasm.c
@@ -0,0 +1,588 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+
+#include "uasm.h"
+
+enum fields {
+ RS = 0x001,
+ RT = 0x002,
+ RD = 0x004,
+ RE = 0x008,
+ SIMM = 0x010,
+ UIMM = 0x020,
+ BIMM = 0x040,
+ JIMM = 0x080,
+ FUNC = 0x100,
+ SET = 0x200
+};
+
+#define OP_MASK 0x3f
+#define OP_SH 26
+#define RS_MASK 0x1f
+#define RS_SH 21
+#define RT_MASK 0x1f
+#define RT_SH 16
+#define RD_MASK 0x1f
+#define RD_SH 11
+#define RE_MASK 0x1f
+#define RE_SH 6
+#define IMM_MASK 0xffff
+#define IMM_SH 0
+#define JIMM_MASK 0x3ffffff
+#define JIMM_SH 0
+#define FUNC_MASK 0x3f
+#define FUNC_SH 0
+#define SET_MASK 0x7
+#define SET_SH 0
+
+enum opcode {
+ insn_invalid,
+ insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
+ insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
+ insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
+ insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
+ insn_dsrl32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr,
+ insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
+ insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
+ insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
+ insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori
+};
+
+struct insn {
+ enum opcode opcode;
+ u32 match;
+ enum fields fields;
+};
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f) \
+ ((a) << OP_SH \
+ | (b) << RS_SH \
+ | (c) << RT_SH \
+ | (d) << RD_SH \
+ | (e) << RE_SH \
+ | (f) << FUNC_SH)
+
+static struct insn insn_table[] __cpuinitdata = {
+ { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
+ { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+ { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
+ { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
+ { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
+ { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
+ { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+ { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
+ { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
+ { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
+ { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
+ { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
+ { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
+ { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
+ { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
+ { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
+ { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
+ { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
+ { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
+ { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
+ { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
+ { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
+ { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
+ { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
+ { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
+ { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __cpuinit u32 build_rs(u32 arg)
+{
+ if (arg & ~RS_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RS_MASK) << RS_SH;
+}
+
+static inline __cpuinit u32 build_rt(u32 arg)
+{
+ if (arg & ~RT_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RT_MASK) << RT_SH;
+}
+
+static inline __cpuinit u32 build_rd(u32 arg)
+{
+ if (arg & ~RD_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RD_MASK) << RD_SH;
+}
+
+static inline __cpuinit u32 build_re(u32 arg)
+{
+ if (arg & ~RE_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RE_MASK) << RE_SH;
+}
+
+static inline __cpuinit u32 build_simm(s32 arg)
+{
+ if (arg > 0x7fff || arg < -0x8000)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & 0xffff;
+}
+
+static inline __cpuinit u32 build_uimm(u32 arg)
+{
+ if (arg & ~IMM_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & IMM_MASK;
+}
+
+static inline __cpuinit u32 build_bimm(s32 arg)
+{
+ if (arg > 0x1ffff || arg < -0x20000)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ if (arg & 0x3)
+ printk(KERN_WARNING "Invalid micro-assembler branch target\n");
+
+ return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
+}
+
+static inline __cpuinit u32 build_jimm(u32 arg)
+{
+ if (arg & ~((JIMM_MASK) << 2))
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg >> 2) & JIMM_MASK;
+}
+
+static inline __cpuinit u32 build_func(u32 arg)
+{
+ if (arg & ~FUNC_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & FUNC_MASK;
+}
+
+static inline __cpuinit u32 build_set(u32 arg)
+{
+ if (arg & ~SET_MASK)
+ printk(KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & SET_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
+{
+ struct insn *ip = NULL;
+ unsigned int i;
+ va_list ap;
+ u32 op;
+
+ for (i = 0; insn_table[i].opcode != insn_invalid; i++)
+ if (insn_table[i].opcode == opc) {
+ ip = &insn_table[i];
+ break;
+ }
+
+ if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+ panic("Unsupported Micro-assembler instruction %d", opc);
+
+ op = ip->match;
+ va_start(ap, opc);
+ if (ip->fields & RS)
+ op |= build_rs(va_arg(ap, u32));
+ if (ip->fields & RT)
+ op |= build_rt(va_arg(ap, u32));
+ if (ip->fields & RD)
+ op |= build_rd(va_arg(ap, u32));
+ if (ip->fields & RE)
+ op |= build_re(va_arg(ap, u32));
+ if (ip->fields & SIMM)
+ op |= build_simm(va_arg(ap, s32));
+ if (ip->fields & UIMM)
+ op |= build_uimm(va_arg(ap, u32));
+ if (ip->fields & BIMM)
+ op |= build_bimm(va_arg(ap, s32));
+ if (ip->fields & JIMM)
+ op |= build_jimm(va_arg(ap, u32));
+ if (ip->fields & FUNC)
+ op |= build_func(va_arg(ap, u32));
+ if (ip->fields & SET)
+ op |= build_set(va_arg(ap, u32));
+ va_end(ap);
+
+ **buf = op;
+ (*buf)++;
+}
+
+#define I_u1u2u3(op) \
+Ip_u1u2u3(op) \
+{ \
+ build_insn(buf, insn##op, a, b, c); \
+}
+
+#define I_u2u1u3(op) \
+Ip_u2u1u3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c); \
+}
+
+#define I_u3u1u2(op) \
+Ip_u3u1u2(op) \
+{ \
+ build_insn(buf, insn##op, b, c, a); \
+}
+
+#define I_u1u2s3(op) \
+Ip_u1u2s3(op) \
+{ \
+ build_insn(buf, insn##op, a, b, c); \
+}
+
+#define I_u2s3u1(op) \
+Ip_u2s3u1(op) \
+{ \
+ build_insn(buf, insn##op, c, a, b); \
+}
+
+#define I_u2u1s3(op) \
+Ip_u2u1s3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c); \
+}
+
+#define I_u1u2(op) \
+Ip_u1u2(op) \
+{ \
+ build_insn(buf, insn##op, a, b); \
+}
+
+#define I_u1s2(op) \
+Ip_u1s2(op) \
+{ \
+ build_insn(buf, insn##op, a, b); \
+}
+
+#define I_u1(op) \
+Ip_u1(op) \
+{ \
+ build_insn(buf, insn##op, a); \
+}
+
+#define I_0(op) \
+Ip_0(op) \
+{ \
+ build_insn(buf, insn##op); \
+}
+
+I_u2u1s3(_addiu)
+I_u3u1u2(_addu)
+I_u2u1u3(_andi)
+I_u3u1u2(_and)
+I_u1u2s3(_beq)
+I_u1u2s3(_beql)
+I_u1s2(_bgez)
+I_u1s2(_bgezl)
+I_u1s2(_bltz)
+I_u1s2(_bltzl)
+I_u1u2s3(_bne)
+I_u2s3u1(_cache)
+I_u1u2u3(_dmfc0)
+I_u1u2u3(_dmtc0)
+I_u2u1s3(_daddiu)
+I_u3u1u2(_daddu)
+I_u2u1u3(_dsll)
+I_u2u1u3(_dsll32)
+I_u2u1u3(_dsra)
+I_u2u1u3(_dsrl)
+I_u2u1u3(_dsrl32)
+I_u3u1u2(_dsubu)
+I_0(_eret)
+I_u1(_j)
+I_u1(_jal)
+I_u1(_jr)
+I_u2s3u1(_ld)
+I_u2s3u1(_ll)
+I_u2s3u1(_lld)
+I_u1s2(_lui)
+I_u2s3u1(_lw)
+I_u1u2u3(_mfc0)
+I_u1u2u3(_mtc0)
+I_u2u1u3(_ori)
+I_u2s3u1(_pref)
+I_0(_rfe)
+I_u2s3u1(_sc)
+I_u2s3u1(_scd)
+I_u2s3u1(_sd)
+I_u2u1u3(_sll)
+I_u2u1u3(_sra)
+I_u2u1u3(_srl)
+I_u3u1u2(_subu)
+I_u2s3u1(_sw)
+I_0(_tlbp)
+I_0(_tlbwi)
+I_0(_tlbwr)
+I_u3u1u2(_xor)
+I_u2u1u3(_xori)
+
+/* Handle labels. */
+void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+{
+ (*lab)->addr = addr;
+ (*lab)->lab = lid;
+ (*lab)++;
+}
+
+int __cpuinit uasm_in_compat_space_p(long addr)
+{
+ /* Is this address in 32bit compat space? */
+#ifdef CONFIG_64BIT
+ return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
+#else
+ return 1;
+#endif
+}
+
+static int __cpuinit uasm_rel_highest(long val)
+{
+#ifdef CONFIG_64BIT
+ return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
+#else
+ return 0;
+#endif
+}
+
+static int __cpuinit uasm_rel_higher(long val)
+{
+#ifdef CONFIG_64BIT
+ return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
+#else
+ return 0;
+#endif
+}
+
+int __cpuinit uasm_rel_hi(long val)
+{
+ return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
+}
+
+int __cpuinit uasm_rel_lo(long val)
+{
+ return ((val & 0xffff) ^ 0x8000) - 0x8000;
+}
+
+void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+{
+ if (!uasm_in_compat_space_p(addr)) {
+ uasm_i_lui(buf, rs, uasm_rel_highest(addr));
+ if (uasm_rel_higher(addr))
+ uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
+ if (uasm_rel_hi(addr)) {
+ uasm_i_dsll(buf, rs, rs, 16);
+ uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr));
+ uasm_i_dsll(buf, rs, rs, 16);
+ } else
+ uasm_i_dsll32(buf, rs, rs, 0);
+ } else
+ uasm_i_lui(buf, rs, uasm_rel_hi(addr));
+}
+
+void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+{
+ UASM_i_LA_mostly(buf, rs, addr);
+ if (uasm_rel_lo(addr)) {
+ if (!uasm_in_compat_space_p(addr))
+ uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr));
+ else
+ uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
+ }
+}
+
+/* Handle relocations. */
+void __cpuinit
+uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
+{
+ (*rel)->addr = addr;
+ (*rel)->type = R_MIPS_PC16;
+ (*rel)->lab = lid;
+ (*rel)++;
+}
+
+static inline void __cpuinit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ long laddr = (long)lab->addr;
+ long raddr = (long)rel->addr;
+
+ switch (rel->type) {
+ case R_MIPS_PC16:
+ *rel->addr |= build_bimm(laddr - (raddr + 4));
+ break;
+
+ default:
+ panic("Unsupported Micro-assembler relocation %d",
+ rel->type);
+ }
+}
+
+void __cpuinit
+uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ struct uasm_label *l;
+
+ for (; rel->lab != UASM_LABEL_INVALID; rel++)
+ for (l = lab; l->lab != UASM_LABEL_INVALID; l++)
+ if (rel->lab == l->lab)
+ __resolve_relocs(rel, l);
+}
+
+void __cpuinit
+uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+{
+ for (; rel->lab != UASM_LABEL_INVALID; rel++)
+ if (rel->addr >= first && rel->addr < end)
+ rel->addr += off;
+}
+
+void __cpuinit
+uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
+{
+ for (; lab->lab != UASM_LABEL_INVALID; lab++)
+ if (lab->addr >= first && lab->addr < end)
+ lab->addr += off;
+}
+
+void __cpuinit
+uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
+ u32 *end, u32 *target)
+{
+ long off = (long)(target - first);
+
+ memcpy(target, first, (end - first) * sizeof(u32));
+
+ uasm_move_relocs(rel, first, end, off);
+ uasm_move_labels(lab, first, end, off);
+}
+
+int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+{
+ for (; rel->lab != UASM_LABEL_INVALID; rel++) {
+ if (rel->addr == addr
+ && (rel->type == R_MIPS_PC16
+ || rel->type == R_MIPS_26))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Convenience functions for labeled branches. */
+void __cpuinit
+uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bltz(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_b(p, 0);
+}
+
+void __cpuinit
+uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_beqz(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_beqzl(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ unsigned int reg2, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bne(p, reg1, reg2, 0);
+}
+
+void __cpuinit
+uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bnez(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bgezl(p, reg, 0);
+}
+
+void __cpuinit
+uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bgez(p, reg, 0);
+}
diff --git a/arch/mips/mm/uasm.h b/arch/mips/mm/uasm.h
new file mode 100644
index 0000000..c6d1e3d
--- /dev/null
+++ b/arch/mips/mm/uasm.h
@@ -0,0 +1,184 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+
+#include <linux/types.h>
+
+#define Ip_u1u2u3(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u2u1u3(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u3u1u2(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u1u2s3(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+
+#define Ip_u2s3u1(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
+
+#define Ip_u2u1s3(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+
+#define Ip_u1u2(op) \
+void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+
+#define Ip_u1s2(op) \
+void __cpuinit uasm_i##op(u32 **buf, unsigned int a, signed int b)
+
+#define Ip_u1(op) void __cpuinit uasm_i##op(u32 **buf, unsigned int a)
+
+#define Ip_0(op) void __cpuinit uasm_i##op(u32 **buf)
+
+Ip_u2u1s3(_addiu);
+Ip_u3u1u2(_addu);
+Ip_u2u1u3(_andi);
+Ip_u3u1u2(_and);
+Ip_u1u2s3(_beq);
+Ip_u1u2s3(_beql);
+Ip_u1s2(_bgez);
+Ip_u1s2(_bgezl);
+Ip_u1s2(_bltz);
+Ip_u1s2(_bltzl);
+Ip_u1u2s3(_bne);
+Ip_u2s3u1(_cache);
+Ip_u1u2u3(_dmfc0);
+Ip_u1u2u3(_dmtc0);
+Ip_u2u1s3(_daddiu);
+Ip_u3u1u2(_daddu);
+Ip_u2u1u3(_dsll);
+Ip_u2u1u3(_dsll32);
+Ip_u2u1u3(_dsra);
+Ip_u2u1u3(_dsrl);
+Ip_u2u1u3(_dsrl32);
+Ip_u3u1u2(_dsubu);
+Ip_0(_eret);
+Ip_u1(_j);
+Ip_u1(_jal);
+Ip_u1(_jr);
+Ip_u2s3u1(_ld);
+Ip_u2s3u1(_ll);
+Ip_u2s3u1(_lld);
+Ip_u1s2(_lui);
+Ip_u2s3u1(_lw);
+Ip_u1u2u3(_mfc0);
+Ip_u1u2u3(_mtc0);
+Ip_u2u1u3(_ori);
+Ip_u2s3u1(_pref);
+Ip_0(_rfe);
+Ip_u2s3u1(_sc);
+Ip_u2s3u1(_scd);
+Ip_u2s3u1(_sd);
+Ip_u2u1u3(_sll);
+Ip_u2u1u3(_sra);
+Ip_u2u1u3(_srl);
+Ip_u3u1u2(_subu);
+Ip_u2s3u1(_sw);
+Ip_0(_tlbp);
+Ip_0(_tlbwi);
+Ip_0(_tlbwr);
+Ip_u3u1u2(_xor);
+Ip_u2u1u3(_xori);
+
+/* Handle labels. */
+struct uasm_label {
+ u32 *addr;
+ int lab;
+};
+
+void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
+#ifdef CONFIG_64BIT
+int uasm_in_compat_space_p(long addr);
+#endif
+int uasm_rel_hi(long val);
+int uasm_rel_lo(long val);
+void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
+void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
+
+#define UASM_L_LA(lb) \
+static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
+{ \
+ uasm_build_label(lab, addr, label##lb); \
+}
+
+/* convenience macros for instructions */
+#ifdef CONFIG_64BIT
+# define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off)
+# define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off)
+# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
+# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
+# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
+# define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
+# define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
+# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val)
+# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd)
+# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd)
+# define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
+# define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
+#else
+# define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
+# define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
+# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
+# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
+# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
+# define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
+# define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
+# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
+# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
+# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
+# define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
+# define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
+#endif
+
+#define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off)
+#define uasm_i_beqz(buf, rs, off) uasm_i_beq(buf, rs, 0, off)
+#define uasm_i_beqzl(buf, rs, off) uasm_i_beql(buf, rs, 0, off)
+#define uasm_i_bnez(buf, rs, off) uasm_i_bne(buf, rs, 0, off)
+#define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off)
+#define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b)
+#define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0)
+#define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1)
+#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
+
+/* Handle relocations. */
+struct uasm_reloc {
+ u32 *addr;
+ unsigned int type;
+ int lab;
+};
+
+/* This is zero so we can use zeroed label arrays. */
+#define UASM_LABEL_INVALID 0
+
+void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
+void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
+void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
+void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
+void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
+ u32 *first, u32 *end, u32 *target);
+int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
+
+/* Convenience functions for labeled branches. */
+void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
+void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ unsigned int reg2, int lid);
+void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
OpenPOWER on IntegriCloud