summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorimp <imp@FreeBSD.org>2008-04-13 07:27:37 +0000
committerimp <imp@FreeBSD.org>2008-04-13 07:27:37 +0000
commit352e51d169c4877beae82a6316142f70d0742025 (patch)
treeafafd1ee9a9f4f112c1ec09045950573a591b1d1 /sys
parent49872b4cfffb3ba15429b1d5c532d570bd29c0a5 (diff)
downloadFreeBSD-src-352e51d169c4877beae82a6316142f70d0742025.zip
FreeBSD-src-352e51d169c4877beae82a6316142f70d0742025.tar.gz
FreeBSD/mips port. The FreeBSD/mips port targets mips32, mips64,
mips32r2 and mips64r2 (and close relatives) processors. There presently is support for ADMtek ADM5120, A mips 4Kc in a malta board, the RB533 routerboard (based on IDT RC32434) and some preliminary support for sibtye/broadcom designs. Other hardware support will be forthcomcing. This port boots multiuser under gxemul emulating the malta board and also bootstraps on the hardware whose support is forthcoming... Oleksandr Tymoshenko, Wojciech Koszek, Warner Losh, Olivier Houchard, Randall Stewert and others that have contributed to the mips2 and/or mips2-jnpr perforce branches. Juniper contirbuted a generic mips port late in the life cycle of the misp2 branch. Warner Losh merged the mips2 and Juniper code bases, and others list above have worked for the past several months to get to multiuser. In addition, the mips2 work owe a debt to the trail blazing efforts of the original mips branch in perforce done by Juli Mallett.
Diffstat (limited to 'sys')
-rw-r--r--sys/mips/include/_bus.h49
-rw-r--r--sys/mips/include/_bus_octeon.h46
-rw-r--r--sys/mips/include/_inttypes.h221
-rw-r--r--sys/mips/include/_limits.h100
-rw-r--r--sys/mips/include/_stdint.h172
-rw-r--r--sys/mips/include/_types.h169
-rw-r--r--sys/mips/include/am29lv081b.h111
-rw-r--r--sys/mips/include/archtype.h49
-rw-r--r--sys/mips/include/asm.h553
-rw-r--r--sys/mips/include/asmacros.h195
-rw-r--r--sys/mips/include/atomic.h441
-rw-r--r--sys/mips/include/bootinfo.h142
-rw-r--r--sys/mips/include/bswap.h11
-rw-r--r--sys/mips/include/bus.h909
-rw-r--r--sys/mips/include/bus_dma.h34
-rw-r--r--sys/mips/include/bus_octeon.h883
-rw-r--r--sys/mips/include/cache.h261
-rw-r--r--sys/mips/include/cache_mipsNN.h67
-rw-r--r--sys/mips/include/cache_r4k.h383
-rw-r--r--sys/mips/include/clock.h39
-rw-r--r--sys/mips/include/clockvar.h55
-rw-r--r--sys/mips/include/cp0.h310
-rw-r--r--sys/mips/include/cpu.h564
-rw-r--r--sys/mips/include/cpufunc.h346
-rw-r--r--sys/mips/include/cpuinfo.h120
-rw-r--r--sys/mips/include/cpuregs.h899
-rw-r--r--sys/mips/include/cputypes.h38
-rw-r--r--sys/mips/include/db_machdep.h99
-rw-r--r--sys/mips/include/defs.h256
-rw-r--r--sys/mips/include/elf.h215
-rw-r--r--sys/mips/include/endian.h146
-rw-r--r--sys/mips/include/exec.h40
-rw-r--r--sys/mips/include/float.h81
-rw-r--r--sys/mips/include/floatingpoint.h43
-rw-r--r--sys/mips/include/fpu.h109
-rw-r--r--sys/mips/include/frame.h138
-rw-r--r--sys/mips/include/gdb_machdep.h56
-rw-r--r--sys/mips/include/hwfunc.h42
-rw-r--r--sys/mips/include/ieee.h154
-rw-r--r--sys/mips/include/ieeefp.h32
-rw-r--r--sys/mips/include/in_cksum.h77
-rw-r--r--sys/mips/include/intr.h94
-rw-r--r--sys/mips/include/intr_machdep.h43
-rw-r--r--sys/mips/include/iodev.h33
-rw-r--r--sys/mips/include/kdb.h50
-rw-r--r--sys/mips/include/limits.h45
-rw-r--r--sys/mips/include/locore.h70
-rw-r--r--sys/mips/include/md_var.h72
-rw-r--r--sys/mips/include/memdev.h39
-rw-r--r--sys/mips/include/metadata.h34
-rw-r--r--sys/mips/include/minidump.h46
-rw-r--r--sys/mips/include/mips_opcode.h413
-rw-r--r--sys/mips/include/mp_watchdog.h34
-rw-r--r--sys/mips/include/mutex.h2
-rw-r--r--sys/mips/include/ns16550.h194
-rw-r--r--sys/mips/include/param.h196
-rw-r--r--sys/mips/include/pcb.h82
-rw-r--r--sys/mips/include/pcb_ext.h4
-rw-r--r--sys/mips/include/pci_cfgreg.h47
-rw-r--r--sys/mips/include/pcpu.h79
-rw-r--r--sys/mips/include/pltfm.h29
-rw-r--r--sys/mips/include/pmap.h231
-rw-r--r--sys/mips/include/pmc_mdep.h24
-rw-r--r--sys/mips/include/ppireg.h49
-rw-r--r--sys/mips/include/proc.h71
-rw-r--r--sys/mips/include/profile.h172
-rw-r--r--sys/mips/include/psl.h54
-rw-r--r--sys/mips/include/pte.h149
-rw-r--r--sys/mips/include/ptrace.h37
-rw-r--r--sys/mips/include/queue.h171
-rw-r--r--sys/mips/include/reg.h78
-rw-r--r--sys/mips/include/regdef.h53
-rw-r--r--sys/mips/include/regnum.h203
-rw-r--r--sys/mips/include/reloc.h35
-rw-r--r--sys/mips/include/resource.h46
-rw-r--r--sys/mips/include/rm7000.h95
-rw-r--r--sys/mips/include/runq.h47
-rw-r--r--sys/mips/include/segments.h40
-rw-r--r--sys/mips/include/setjmp.h59
-rw-r--r--sys/mips/include/sf_buf.h65
-rw-r--r--sys/mips/include/sigframe.h49
-rw-r--r--sys/mips/include/signal.h80
-rw-r--r--sys/mips/include/smp.h43
-rw-r--r--sys/mips/include/stdarg.h144
-rw-r--r--sys/mips/include/sysarch.h53
-rw-r--r--sys/mips/include/timerreg.h65
-rw-r--r--sys/mips/include/trap.h117
-rw-r--r--sys/mips/include/ucontext.h101
-rw-r--r--sys/mips/include/varargs.h59
-rw-r--r--sys/mips/include/vmparam.h201
-rw-r--r--sys/mips/mips/autoconf.c112
-rw-r--r--sys/mips/mips/busdma_machdep.c841
-rw-r--r--sys/mips/mips/cache.c220
-rw-r--r--sys/mips/mips/cache_mipsNN.c608
-rw-r--r--sys/mips/mips/copystr.S148
-rw-r--r--sys/mips/mips/cpu.c328
-rw-r--r--sys/mips/mips/db_disasm.c392
-rw-r--r--sys/mips/mips/db_interface.c339
-rw-r--r--sys/mips/mips/db_trace.c77
-rw-r--r--sys/mips/mips/dump_machdep.c35
-rw-r--r--sys/mips/mips/elf_machdep.c268
-rw-r--r--sys/mips/mips/exception.S1287
-rw-r--r--sys/mips/mips/fp.S3608
-rw-r--r--sys/mips/mips/gdb_machdep.c189
-rw-r--r--sys/mips/mips/genassym.c99
-rw-r--r--sys/mips/mips/in_cksum.c248
-rw-r--r--sys/mips/mips/intr_machdep.c199
-rw-r--r--sys/mips/mips/locore.S279
-rw-r--r--sys/mips/mips/machdep.c557
-rw-r--r--sys/mips/mips/mainbus.c343
-rw-r--r--sys/mips/mips/mem.c185
-rw-r--r--sys/mips/mips/mips_subr.c48
-rw-r--r--sys/mips/mips/mp_machdep.c313
-rw-r--r--sys/mips/mips/nexus.c474
-rw-r--r--sys/mips/mips/pm_machdep.c541
-rw-r--r--sys/mips/mips/pmap.c3229
-rw-r--r--sys/mips/mips/psraccess.S196
-rw-r--r--sys/mips/mips/stack_machdep.c153
-rw-r--r--sys/mips/mips/support.S1537
-rw-r--r--sys/mips/mips/swtch.S650
-rw-r--r--sys/mips/mips/tick.c369
-rw-r--r--sys/mips/mips/tlb.S509
-rw-r--r--sys/mips/mips/trap.c1815
-rw-r--r--sys/mips/mips/uio_machdep.c128
-rw-r--r--sys/mips/mips/vm_machdep.c541
125 files changed, 33717 insertions, 0 deletions
diff --git a/sys/mips/include/_bus.h b/sys/mips/include/_bus.h
new file mode 100644
index 0000000..74865da
--- /dev/null
+++ b/sys/mips/include/_bus.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2005 M. Warner Losh.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: src/sys/i386/include/_bus.h,v 1.1 2005/04/18 21:45:33 imp
+ * $FreeBSD$
+ */
+
+#ifndef MIPS_INCLUDE__BUS_H
+#define MIPS_INCLUDE__BUS_H
+#ifdef TARGET_OCTEON
+#include "_bus_octeon.h"
+#else
+/*
+ * Bus address and size types
+ */
+typedef uintptr_t bus_addr_t;
+typedef uintptr_t bus_size_t;
+
+/*
+ * Access methods for bus resources and address space.
+ */
+typedef long bus_space_tag_t;
+typedef u_long bus_space_handle_t;
+#endif
+#endif /* MIPS_INCLUDE__BUS_H */
diff --git a/sys/mips/include/_bus_octeon.h b/sys/mips/include/_bus_octeon.h
new file mode 100644
index 0000000..385e8e5
--- /dev/null
+++ b/sys/mips/include/_bus_octeon.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright (c) 2005 M. Warner Losh.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef MIPS_INCLUDE__BUS_OCTEON_H
+#define MIPS_INCLUDE__BUS_OCTEON_H
+
+/*
+ * Bus address and size types
+ */
+typedef uint64_t bus_addr_t;
+typedef uint32_t bus_size_t;
+
+/*
+ * Access methods for bus resources and address space.
+ */
+typedef uint32_t bus_space_tag_t;
+typedef uint64_t bus_space_handle_t;
+
+#endif /* MIPS_INCLUDE__BUS_OCTEON_H */
diff --git a/sys/mips/include/_inttypes.h b/sys/mips/include/_inttypes.h
new file mode 100644
index 0000000..e09f9de
--- /dev/null
+++ b/sys/mips/include/_inttypes.h
@@ -0,0 +1,221 @@
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * From: $NetBSD: int_fmtio.h,v 1.2 2001/04/26 16:25:21 kleink Exp $
+ * from: src/sys/i386/include/_inttypes.h,v 1.2 2002/06/30 05:48:02 mike
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_INTTYPES_H_
+#define _MACHINE_INTTYPES_H_
+
+/*
+ * Macros for format specifiers.
+ */
+
+/* fprintf(3) macros for signed integers. */
+
+#define PRId8 "d" /* int8_t */
+#define PRId16 "d" /* int16_t */
+#define PRId32 "d" /* int32_t */
+#define PRId64 "lld" /* int64_t */
+#define PRIdLEAST8 "d" /* int_least8_t */
+#define PRIdLEAST16 "d" /* int_least16_t */
+#define PRIdLEAST32 "d" /* int_least32_t */
+#define PRIdLEAST64 "lld" /* int_least64_t */
+#define PRIdFAST8 "d" /* int_fast8_t */
+#define PRIdFAST16 "d" /* int_fast16_t */
+#define PRIdFAST32 "d" /* int_fast32_t */
+#define PRIdFAST64 "lld" /* int_fast64_t */
+#define PRIdMAX "jd" /* intmax_t */
+#define PRIdPTR "d" /* intptr_t */
+
+#define PRIi8 "i" /* int8_t */
+#define PRIi16 "i" /* int16_t */
+#define PRIi32 "i" /* int32_t */
+#define PRIi64 "lli" /* int64_t */
+#define PRIiLEAST8 "i" /* int_least8_t */
+#define PRIiLEAST16 "i" /* int_least16_t */
+#define PRIiLEAST32 "i" /* int_least32_t */
+#define PRIiLEAST64 "lli" /* int_least64_t */
+#define PRIiFAST8 "i" /* int_fast8_t */
+#define PRIiFAST16 "i" /* int_fast16_t */
+#define PRIiFAST32 "i" /* int_fast32_t */
+#define PRIiFAST64 "lli" /* int_fast64_t */
+#define PRIiMAX "ji" /* intmax_t */
+#define PRIiPTR "i" /* intptr_t */
+
+/* fprintf(3) macros for unsigned integers. */
+
+#define PRIo8 "o" /* uint8_t */
+#define PRIo16 "o" /* uint16_t */
+#define PRIo32 "o" /* uint32_t */
+#define PRIo64 "llo" /* uint64_t */
+#define PRIoLEAST8 "o" /* uint_least8_t */
+#define PRIoLEAST16 "o" /* uint_least16_t */
+#define PRIoLEAST32 "o" /* uint_least32_t */
+#define PRIoLEAST64 "llo" /* uint_least64_t */
+#define PRIoFAST8 "o" /* uint_fast8_t */
+#define PRIoFAST16 "o" /* uint_fast16_t */
+#define PRIoFAST32 "o" /* uint_fast32_t */
+#define PRIoFAST64 "llo" /* uint_fast64_t */
+#define PRIoMAX "jo" /* uintmax_t */
+#define PRIoPTR "o" /* uintptr_t */
+
+#define PRIu8 "u" /* uint8_t */
+#define PRIu16 "u" /* uint16_t */
+#define PRIu32 "u" /* uint32_t */
+#define PRIu64 "llu" /* uint64_t */
+#define PRIuLEAST8 "u" /* uint_least8_t */
+#define PRIuLEAST16 "u" /* uint_least16_t */
+#define PRIuLEAST32 "u" /* uint_least32_t */
+#define PRIuLEAST64 "llu" /* uint_least64_t */
+#define PRIuFAST8 "u" /* uint_fast8_t */
+#define PRIuFAST16 "u" /* uint_fast16_t */
+#define PRIuFAST32 "u" /* uint_fast32_t */
+#define PRIuFAST64 "llu" /* uint_fast64_t */
+#define PRIuMAX "ju" /* uintmax_t */
+#define PRIuPTR "u" /* uintptr_t */
+
+#define PRIx8 "x" /* uint8_t */
+#define PRIx16 "x" /* uint16_t */
+#define PRIx32 "x" /* uint32_t */
+#define PRIx64 "llx" /* uint64_t */
+#define PRIxLEAST8 "x" /* uint_least8_t */
+#define PRIxLEAST16 "x" /* uint_least16_t */
+#define PRIxLEAST32 "x" /* uint_least32_t */
+#define PRIxLEAST64 "llx" /* uint_least64_t */
+#define PRIxFAST8 "x" /* uint_fast8_t */
+#define PRIxFAST16 "x" /* uint_fast16_t */
+#define PRIxFAST32 "x" /* uint_fast32_t */
+#define PRIxFAST64 "llx" /* uint_fast64_t */
+#define PRIxMAX "jx" /* uintmax_t */
+#define PRIxPTR "x" /* uintptr_t */
+
+#define PRIX8 "X" /* uint8_t */
+#define PRIX16 "X" /* uint16_t */
+#define PRIX32 "X" /* uint32_t */
+#define PRIX64 "llX" /* uint64_t */
+#define PRIXLEAST8 "X" /* uint_least8_t */
+#define PRIXLEAST16 "X" /* uint_least16_t */
+#define PRIXLEAST32 "X" /* uint_least32_t */
+#define PRIXLEAST64 "llX" /* uint_least64_t */
+#define PRIXFAST8 "X" /* uint_fast8_t */
+#define PRIXFAST16 "X" /* uint_fast16_t */
+#define PRIXFAST32 "X" /* uint_fast32_t */
+#define PRIXFAST64 "llX" /* uint_fast64_t */
+#define PRIXMAX "jX" /* uintmax_t */
+#define PRIXPTR "X" /* uintptr_t */
+
+/* fscanf(3) macros for signed integers. */
+
+#define SCNd8 "hhd" /* int8_t */
+#define SCNd16 "hd" /* int16_t */
+#define SCNd32 "d" /* int32_t */
+#define SCNd64 "lld" /* int64_t */
+#define SCNdLEAST8 "hhd" /* int_least8_t */
+#define SCNdLEAST16 "hd" /* int_least16_t */
+#define SCNdLEAST32 "d" /* int_least32_t */
+#define SCNdLEAST64 "lld" /* int_least64_t */
+#define SCNdFAST8 "d" /* int_fast8_t */
+#define SCNdFAST16 "d" /* int_fast16_t */
+#define SCNdFAST32 "d" /* int_fast32_t */
+#define SCNdFAST64 "lld" /* int_fast64_t */
+#define SCNdMAX "jd" /* intmax_t */
+#define SCNdPTR "d" /* intptr_t */
+
+#define SCNi8 "hhi" /* int8_t */
+#define SCNi16 "hi" /* int16_t */
+#define SCNi32 "i" /* int32_t */
+#define SCNi64 "lli" /* int64_t */
+#define SCNiLEAST8 "hhi" /* int_least8_t */
+#define SCNiLEAST16 "hi" /* int_least16_t */
+#define SCNiLEAST32 "i" /* int_least32_t */
+#define SCNiLEAST64 "lli" /* int_least64_t */
+#define SCNiFAST8 "i" /* int_fast8_t */
+#define SCNiFAST16 "i" /* int_fast16_t */
+#define SCNiFAST32 "i" /* int_fast32_t */
+#define SCNiFAST64 "lli" /* int_fast64_t */
+#define SCNiMAX "ji" /* intmax_t */
+#define SCNiPTR "i" /* intptr_t */
+
+/* fscanf(3) macros for unsigned integers. */
+
+#define SCNo8 "hho" /* uint8_t */
+#define SCNo16 "ho" /* uint16_t */
+#define SCNo32 "o" /* uint32_t */
+#define SCNo64 "llo" /* uint64_t */
+#define SCNoLEAST8 "hho" /* uint_least8_t */
+#define SCNoLEAST16 "ho" /* uint_least16_t */
+#define SCNoLEAST32 "o" /* uint_least32_t */
+#define SCNoLEAST64 "llo" /* uint_least64_t */
+#define SCNoFAST8 "o" /* uint_fast8_t */
+#define SCNoFAST16 "o" /* uint_fast16_t */
+#define SCNoFAST32 "o" /* uint_fast32_t */
+#define SCNoFAST64 "llo" /* uint_fast64_t */
+#define SCNoMAX "jo" /* uintmax_t */
+#define SCNoPTR "o" /* uintptr_t */
+
+#define SCNu8 "hhu" /* uint8_t */
+#define SCNu16 "hu" /* uint16_t */
+#define SCNu32 "u" /* uint32_t */
+#define SCNu64 "llu" /* uint64_t */
+#define SCNuLEAST8 "hhu" /* uint_least8_t */
+#define SCNuLEAST16 "hu" /* uint_least16_t */
+#define SCNuLEAST32 "u" /* uint_least32_t */
+#define SCNuLEAST64 "llu" /* uint_least64_t */
+#define SCNuFAST8 "u" /* uint_fast8_t */
+#define SCNuFAST16 "u" /* uint_fast16_t */
+#define SCNuFAST32 "u" /* uint_fast32_t */
+#define SCNuFAST64 "llu" /* uint_fast64_t */
+#define SCNuMAX "ju" /* uintmax_t */
+#define SCNuPTR "u" /* uintptr_t */
+
+#define SCNx8 "hhx" /* uint8_t */
+#define SCNx16 "hx" /* uint16_t */
+#define SCNx32 "x" /* uint32_t */
+#define SCNx64 "llx" /* uint64_t */
+#define SCNxLEAST8 "hhx" /* uint_least8_t */
+#define SCNxLEAST16 "hx" /* uint_least16_t */
+#define SCNxLEAST32 "x" /* uint_least32_t */
+#define SCNxLEAST64 "llx" /* uint_least64_t */
+#define SCNxFAST8 "x" /* uint_fast8_t */
+#define SCNxFAST16 "x" /* uint_fast16_t */
+#define SCNxFAST32 "x" /* uint_fast32_t */
+#define SCNxFAST64 "llx" /* uint_fast64_t */
+#define SCNxMAX "jx" /* uintmax_t */
+#define SCNxPTR "x" /* uintptr_t */
+
+#endif /* !_MACHINE_INTTYPES_H_ */
diff --git a/sys/mips/include/_limits.h b/sys/mips/include/_limits.h
new file mode 100644
index 0000000..d544305
--- /dev/null
+++ b/sys/mips/include/_limits.h
@@ -0,0 +1,100 @@
+/*-
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
+ * from: src/sys/i386/include/_limits.h,v 1.27 2005/01/06 22:18:15 imp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__LIMITS_H_
+#define _MACHINE__LIMITS_H_
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN, etc., is so the value is not unsigned; e.g., 0x80000000 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers are for the default configuration of gcc. They work for
+ * some other compilers as well, but this should not be depended on.
+ */
+
+#define __CHAR_BIT 8 /* number of bits in a char */
+
+#define __SCHAR_MAX 0x7f /* max value for a signed char */
+#define __SCHAR_MIN (-0x7f - 1) /* min value for a signed char */
+
+#define __UCHAR_MAX 0xff /* max value for an unsigned char */
+
+#define __USHRT_MAX 0xffff /* max value for an unsigned short */
+#define __SHRT_MAX 0x7fff /* max value for a short */
+#define __SHRT_MIN (-0x7fff - 1) /* min value for a short */
+
+#define __UINT_MAX 0xffffffffU /* max value for an unsigned int */
+#define __INT_MAX 0x7fffffff /* max value for an int */
+#define __INT_MIN (-0x7fffffff - 1) /* min value for an int */
+
+/* Bad hack for gcc configured to give 64-bit longs. */
+#ifdef _LARGE_LONG
+#define __ULONG_MAX 0xffffffffffffffffUL
+#define __LONG_MAX 0x7fffffffffffffffL
+#define __LONG_MIN (-0x7fffffffffffffffL - 1)
+#else
+#define __ULONG_MAX 0xffffffffUL /* max value for an unsigned long */
+#define __LONG_MAX 0x7fffffffL /* max value for a long */
+#define __LONG_MIN (-0x7fffffffL - 1) /* min value for a long */
+#endif
+
+ /* max value for an unsigned long long */
+#define __ULLONG_MAX 0xffffffffffffffffULL
+#define __LLONG_MAX 0x7fffffffffffffffLL /* max value for a long long */
+#define __LLONG_MIN (-0x7fffffffffffffffLL - 1) /* min for a long long */
+
+#define __SSIZE_MAX __INT_MAX /* max value for a ssize_t */
+
+#define __SIZE_T_MAX __UINT_MAX /* max value for a size_t */
+
+#define __OFF_MAX __LLONG_MAX /* max value for an off_t */
+#define __OFF_MIN __LLONG_MIN /* min value for an off_t */
+
+/* Quads and long longs are the same size. Ensure they stay in sync. */
+#define __UQUAD_MAX __ULLONG_MAX /* max value for a uquad_t */
+#define __QUAD_MAX __LLONG_MAX /* max value for a quad_t */
+#define __QUAD_MIN __LLONG_MIN /* min value for a quad_t */
+
+#ifdef _LARGE_LONG
+#define __LONG_BIT 64
+#else
+#define __LONG_BIT 32
+#endif
+#define __WORD_BIT 32
+
+#define __MINSIGSTKSZ (512 * 4)
+
+#endif /* !_MACHINE__LIMITS_H_ */
diff --git a/sys/mips/include/_stdint.h b/sys/mips/include/_stdint.h
new file mode 100644
index 0000000..0abfdde
--- /dev/null
+++ b/sys/mips/include/_stdint.h
@@ -0,0 +1,172 @@
+/*-
+ * Copyright (c) 2001, 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: src/sys/i386/include/_stdint.h,v 1.2 2004/05/18 16:04:57 stefanf
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__STDINT_H_
+#define _MACHINE__STDINT_H_
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS)
+
+#define INT8_C(c) (c)
+#define INT16_C(c) (c)
+#define INT32_C(c) (c)
+#define INT64_C(c) (c ## LL)
+
+#define UINT8_C(c) (c)
+#define UINT16_C(c) (c)
+#define UINT32_C(c) (c ## U)
+#define UINT64_C(c) (c ## ULL)
+
+#define INTMAX_C(c) (c ## LL)
+#define UINTMAX_C(c) (c ## ULL)
+
+#endif /* !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) */
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS)
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.1 Limits of exact-width integer types
+ */
+/* Minimum values of exact-width signed integer types. */
+#define INT8_MIN (-0x7f-1)
+#define INT16_MIN (-0x7fff-1)
+#define INT32_MIN (-0x7fffffff-1)
+#define INT64_MIN (-0x7fffffffffffffffLL-1)
+
+/* Maximum values of exact-width signed integer types. */
+#define INT8_MAX 0x7f
+#define INT16_MAX 0x7fff
+#define INT32_MAX 0x7fffffff
+#define INT64_MAX 0x7fffffffffffffffLL
+
+/* Maximum values of exact-width unsigned integer types. */
+#define UINT8_MAX 0xff
+#define UINT16_MAX 0xffff
+#define UINT32_MAX 0xffffffffU
+#define UINT64_MAX 0xffffffffffffffffULL
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.2 Limits of minimum-width integer types
+ */
+/* Minimum values of minimum-width signed integer types. */
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST64_MIN INT64_MIN
+
+/* Maximum values of minimum-width signed integer types. */
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MAX INT64_MAX
+
+/* Maximum values of minimum-width unsigned integer types. */
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.3 Limits of fastest minimum-width integer types
+ */
+/* Minimum values of fastest minimum-width signed integer types. */
+#define INT_FAST8_MIN INT32_MIN
+#define INT_FAST16_MIN INT32_MIN
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST64_MIN INT64_MIN
+
+/* Maximum values of fastest minimum-width signed integer types. */
+#define INT_FAST8_MAX INT32_MAX
+#define INT_FAST16_MAX INT32_MAX
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MAX INT64_MAX
+
+/* Maximum values of fastest minimum-width unsigned integer types. */
+#define UINT_FAST8_MAX UINT32_MAX
+#define UINT_FAST16_MAX UINT32_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.4 Limits of integer types capable of holding object pointers
+ */
+#define INTPTR_MIN INT32_MIN
+#define INTPTR_MAX INT32_MAX
+#define UINTPTR_MAX UINT32_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.5 Limits of greatest-width integer types
+ */
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.3 Limits of other integer types
+ */
+/* Limits of ptrdiff_t. */
+#define PTRDIFF_MIN INT32_MIN
+#define PTRDIFF_MAX INT32_MAX
+
+/* Limits of sig_atomic_t. */
+#define SIG_ATOMIC_MIN INT32_MIN
+#define SIG_ATOMIC_MAX INT32_MAX
+
+/* Limit of size_t. */
+#define SIZE_MAX UINT32_MAX
+
+#ifndef WCHAR_MIN /* Also possibly defined in <wchar.h> */
+/* Limits of wchar_t. */
+#define WCHAR_MIN INT32_MIN
+#define WCHAR_MAX INT32_MAX
+#endif
+
+/* Limits of wint_t. */
+#define WINT_MIN INT32_MIN
+#define WINT_MAX INT32_MAX
+
+#endif /* !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) */
+
+#endif /* !_MACHINE__STDINT_H_ */
diff --git a/sys/mips/include/_types.h b/sys/mips/include/_types.h
new file mode 100644
index 0000000..ec94439
--- /dev/null
+++ b/sys/mips/include/_types.h
@@ -0,0 +1,169 @@
+/*-
+ * Copyright (c) 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)ansi.h 8.2 (Berkeley) 1/4/94
+ * From: @(#)types.h 8.3 (Berkeley) 1/5/94
+ * from: src/sys/i386/include/_types.h,v 1.12 2005/07/02 23:13:31 thompsa
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__TYPES_H_
+#define _MACHINE__TYPES_H_
+
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+/*
+ * Basic types upon which most other types are built.
+ */
+typedef __signed char __int8_t;
+typedef unsigned char __uint8_t;
+typedef short __int16_t;
+typedef unsigned short __uint16_t;
+typedef int __int32_t;
+typedef unsigned int __uint32_t;
+
+#ifdef __mips64
+typedef long __int64_t;
+typedef unsigned long __uint64_t;
+#else
+#if defined(lint)
+/* LONGLONG */
+typedef long long __int64_t;
+/* LONGLONG */
+typedef unsigned long long __uint64_t;
+#elif defined(__GNUCLIKE_ATTRIBUTE_MODE_DI)
+typedef int __attribute__((__mode__(__DI__))) __int64_t;
+typedef unsigned int __attribute__((__mode__(__DI__))) __uint64_t;
+#else
+/* LONGLONG */
+typedef long long __int64_t;
+/* LONGLONG */
+typedef unsigned long long __uint64_t;
+#endif
+#endif
+
+/*
+ * Standard type definitions.
+ */
+typedef __int32_t __clock_t; /* clock()... */
+typedef unsigned int __cpumask_t;
+#ifdef __mips64
+typedef __int64_t __critical_t;
+#else
+typedef __int32_t __critical_t;
+#endif
+typedef double __double_t;
+typedef double __float_t;
+#ifdef __mips64
+typedef __int64_t __intfptr_t;
+typedef __int64_t __intptr_t;
+#else
+typedef __int32_t __intfptr_t;
+typedef __int32_t __intptr_t;
+#endif
+typedef __int64_t __intmax_t;
+typedef __int32_t __int_fast8_t;
+typedef __int32_t __int_fast16_t;
+typedef __int32_t __int_fast32_t;
+typedef __int64_t __int_fast64_t;
+typedef __int8_t __int_least8_t;
+typedef __int16_t __int_least16_t;
+typedef __int32_t __int_least32_t;
+typedef __int64_t __int_least64_t;
+#if defined(__mips64) || defined(ISA_MIPS64)
+typedef __int64_t __register_t;
+typedef __int64_t f_register_t;
+#else
+typedef __int32_t __register_t;
+typedef __int32_t f_register_t;
+#endif
+#ifdef __mips64
+typedef __int64_t __ptrdiff_t;
+typedef __int64_t __segsz_t;
+typedef __uint64_t __size_t;
+typedef __int64_t __ssize_t;
+typedef __uint64_t __uintfptr_t;
+typedef __uint64_t __uintptr_t;
+#else
+typedef __int32_t __ptrdiff_t; /* ptr1 - ptr2 */
+typedef __int32_t __segsz_t; /* segment size (in pages) */
+typedef __uint32_t __size_t; /* sizeof() */
+typedef __int32_t __ssize_t; /* byte count or error */
+typedef __uint32_t __uintfptr_t;
+typedef __uint32_t __uintptr_t;
+#endif
+typedef __int64_t __time_t; /* time()... */
+typedef __uint64_t __uintmax_t;
+typedef __uint32_t __uint_fast8_t;
+typedef __uint32_t __uint_fast16_t;
+typedef __uint32_t __uint_fast32_t;
+typedef __uint64_t __uint_fast64_t;
+typedef __uint8_t __uint_least8_t;
+typedef __uint16_t __uint_least16_t;
+typedef __uint32_t __uint_least32_t;
+typedef __uint64_t __uint_least64_t;
+#if defined(__mips64) || defined(ISA_MIPS64)
+typedef __uint64_t __u_register_t;
+typedef __uint64_t __vm_offset_t;
+typedef __uint64_t __vm_paddr_t;
+typedef __uint64_t __vm_size_t;
+#else
+typedef __uint32_t __u_register_t;
+typedef __uint32_t __vm_offset_t;
+typedef __uint32_t __vm_paddr_t;
+typedef __uint32_t __vm_size_t;
+#endif
+typedef __int64_t __vm_ooffset_t;
+typedef __uint64_t __vm_pindex_t;
+
+/*
+ * Unusual type definitions.
+ */
+#ifdef __GNUCLIKE_BUILTIN_VARARGS
+typedef __builtin_va_list __va_list; /* internally known to gcc */
+#else
+typedef char * __va_list;
+#endif /* __GNUCLIKE_BUILTIN_VARARGS */
+#if defined(__GNUC_VA_LIST_COMPATIBILITY) && !defined(__GNUC_VA_LIST) \
+ && !defined(__NO_GNUC_VA_LIST)
+#define __GNUC_VA_LIST
+typedef __va_list __gnuc_va_list; /* compatibility w/GNU headers*/
+#endif
+
+typedef struct label_t {
+ __register_t val[13];
+} label_t;
+
+#endif /* !_MACHINE__TYPES_H_ */
diff --git a/sys/mips/include/am29lv081b.h b/sys/mips/include/am29lv081b.h
new file mode 100644
index 0000000..7ca481b
--- /dev/null
+++ b/sys/mips/include/am29lv081b.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2000-2003, 2005, Juniper Networks, Inc.
+ * All rights reserved.
+ * JNPR: am29lv081b.h,v 1.1 2006/08/07 05:38:57 katta
+ *
+ * am29lv081b.h -- am29lv081b definitions
+ *
+ * Chan Lee, May 2000
+ */
+// $FreeBSD$
+
+#ifndef __AM29LV081B_H__
+#define __AM29LV081B_H__
+
+/*
+ * Identifiers for the am29lv081b chip
+ */
+#define AM29L_MAN_ID 0x01
+#define AM29L_DEV_ID 0x38
+
+#define AM29L_DEV_ID_OFFSET 0x01
+
+#define AM29L_TIMEOUT 3000 /* 3 seconds in ms */
+#define AM29L_ERASE_TIME 30000 /* 30 seconds in ms */
+
+/*
+ * This is defined for human consumption.
+ */
+#define AM29L_BANNER "AMD29L081B 8Mb flash"
+
+/*
+ * Sector definitions.
+ */
+
+#define AM29L_SA0 0x00000
+#define AM29L_SA1 0x10000
+#define AM29L_SA2 0x20000
+#define AM29L_SA3 0x30000
+#define AM29L_SA4 0x40000
+#define AM29L_SA5 0x50000
+#define AM29L_SA6 0x60000
+#define AM29L_SA7 0x70000
+#define AM29L_SA8 0x80000
+#define AM29L_SA9 0x90000
+#define AM29L_SA10 0xA0000
+#define AM29L_SA11 0xB0000
+#define AM29L_SA12 0xC0000
+#define AM29L_SA13 0xD0000
+#define AM29L_SA14 0xE0000
+#define AM29L_SA15 0xF0000
+
+#define AM29L_BANK_MASK 0xFFF00000
+#define AM29L_SECTOR_MASK 0xFFFF0000
+#define AM29L_SECTOR_SIZE 0x10000
+#define AM29L_SECTOR_PER_BLK 4
+#define AM29L_TOTAL_SECTORS 16
+#define AM29L_PROTECT_OFFSET 0x2
+
+/*
+ * Definitions for the unlock sequence, both
+ * the address offset and the data definition.
+ */
+#define AM29L_ULCK_ADDR1 0x555
+#define AM29L_ULCK_ADDR2 0x2AA
+
+#define AM29L_ULCK_DATA1 0xAA
+#define AM29L_ULCK_DATA2 0x55
+
+/*
+ * Command definitions for the am29lv081b. Most
+ * of the following command can only be issue
+ * after the unlock command sequence.
+ */
+
+#define AM29L_CMD_AUTO 0x90
+#define AM29L_CMD_BYTE_PROGRAM 0xA0
+#define AM29L_CMD_ERASE 0x80
+#define AM29L_CMD_ERASE_CHIP 0x10
+#define AM29L_CMD_ERASE_SECT 0x30
+#define AM29L_CMD_RESET 0xF0
+
+/*
+ * Masks for get the DQ3, DQ5, DQ6, DQ7 bits.
+ * All these bits signals the status of the
+ * command operations.
+ */
+
+#define AM29L_DQ2_MASK 0x04
+#define AM29L_DQ3_MASK 0x08
+#define AM29L_DQ5_MASK 0x20
+#define AM29L_DQ6_MASK 0x40
+#define AM29L_DQ7_MASK 0x80
+
+#define AM29L_GET_DQ2(data) ((data & AM29L_DQ2_MASK) >> 2)
+#define AM29L_GET_DQ3(data) ((data & AM29L_DQ3_MASK) >> 3)
+#define AM29L_GET_DQ5(data) ((data & AM29L_DQ5_MASK) >> 5)
+#define AM29L_GET_DQ6(data) ((data & AM29L_DQ6_MASK) >> 6)
+#define AM29L_GET_DQ7(data) ((data & AM29L_DQ7_MASK) >> 7)
+
+extern void flash_add_amd29l081b (flash_device_t *dev);
+
+static inline u_int32_t
+am29f_start_addr_flash(u_int8_t *ptr)
+{
+
+ return((u_int32_t)ptr & AM29L_SECTOR_MASK);
+}
+
+#endif /* __AM29LV081B_H_ */
+
+/* End of file */
diff --git a/sys/mips/include/archtype.h b/sys/mips/include/archtype.h
new file mode 100644
index 0000000..ed1b5ea
--- /dev/null
+++ b/sys/mips/include/archtype.h
@@ -0,0 +1,49 @@
+/* $OpenBSD: archtype.h,v 1.6 1999/01/27 04:46:04 imp Exp $ */
+/*
+ * Copyright (c) 1997 Per Fogelstrom
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed under OpenBSD by
+ * Per Fogelstrom.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * JNPR: archtype.h,v 1.6 2007/08/09 11:23:32 katta
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ARCHTYPE_H_
+#define _MACHINE_ARCHTYPE_H_
+/*
+ * Define architectural identitys for the different Mips machines.
+ */
+
+/*
+ * FREEBSD_DEVELOPERS_FIXME
+ * Define constants for the supported MIPS CPU's
+ */
+#define MIPS_CLASS_UNKNOWN 0x00
+
+#endif /* !_MACHINE_ARCHTYPE_H_ */
diff --git a/sys/mips/include/asm.h b/sys/mips/include/asm.h
new file mode 100644
index 0000000..0df221e
--- /dev/null
+++ b/sys/mips/include/asm.h
@@ -0,0 +1,553 @@
+/* $NetBSD: asm.h,v 1.29 2000/12/14 21:29:51 jeffs Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)machAsmDefs.h 8.1 (Berkeley) 6/10/93
+ * JNPR: asm.h,v 1.10 2007/08/09 11:23:32 katta
+ * $FreeBSD$
+ */
+
+/*
+ * machAsmDefs.h --
+ *
+ * Macros used when writing assembler programs.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsmDefs.h,
+ * v 1.2 89/08/15 18:28:24 rab Exp SPRITE (DECWRL)
+ */
+
+#ifndef _MACHINE_ASM_H_
+#define _MACHINE_ASM_H_
+
+#ifndef NO_REG_DEFS
+#include <machine/regdef.h>
+#endif
+#include <machine/endian.h>
+
+#undef __FBSDID
+#if !defined(lint) && !defined(STRIP_FBSDID)
+#define __FBSDID(s) .ident s
+#else
+#define __FBSDID(s) /* nothing */
+#endif
+
+/*
+ * Define -pg profile entry code.
+ * Must always be noreorder, must never use a macro instruction
+ * Final addiu to t9 must always equal the size of this _KERN_MCOUNT
+ */
+#define _KERN_MCOUNT \
+ .set push; \
+ .set noreorder; \
+ .set noat; \
+ subu sp,sp,16; \
+ sw t9,12(sp); \
+ move AT,ra; \
+ lui t9,%hi(_mcount); \
+ addiu t9,t9,%lo(_mcount); \
+ jalr t9; \
+ nop; \
+ lw t9,4(sp); \
+ addiu sp,sp,8; \
+ addiu t9,t9,40; \
+ .set pop;
+
+#ifdef GPROF
+#define MCOUNT _KERN_MCOUNT
+#else
+#define MCOUNT
+#endif
+
+#define _C_LABEL(x) x
+
+/*
+ * Endian-independent assembly-code aliases for unaligned memory accesses.
+ */
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define LWLO lwl
+#define LWHI lwr
+#define SWLO swl
+#define SWHI swr
+#endif
+
+#if BYTE_ORDER == BIG_ENDIAN
+#define LWLO lwr
+#define LWHI lwl
+#define SWLO swr
+#define SWHI swl
+#endif
+
+#ifdef USE_AENT
+#define AENT(x) \
+ .aent x, 0
+#else
+#define AENT(x)
+#endif
+
+/*
+ * WARN_REFERENCES: create a warning if the specified symbol is referenced
+ */
+#define WARN_REFERENCES(_sym,_msg) \
+ .section .gnu.warning. ## _sym ; .ascii _msg ; .text
+
+/*
+ * These are temp registers whose names can be used in either the old
+ * or new ABI, although they map to different physical registers. In
+ * the old ABI, they map to t4-t7, and in the new ABI, they map to a4-a7.
+ *
+ * Because they overlap with the last 4 arg regs in the new ABI, ta0-ta3
+ * should be used only when we need more than t0-t3.
+ */
+#if defined(__mips_n32) || defined(__mips_n64)
+#define ta0 $8
+#define ta1 $9
+#define ta2 $10
+#define ta3 $11
+#else
+#define ta0 $12
+#define ta1 $13
+#define ta2 $14
+#define ta3 $15
+#endif /* __mips_n32 || __mips_n64 */
+
+#ifdef __ELF__
+# define _C_LABEL(x) x
+#else
+# define _C_LABEL(x) _ ## x
+#endif
+
+/*
+ * WEAK_ALIAS: create a weak alias.
+ */
+#define WEAK_ALIAS(alias,sym) \
+ .weak alias; \
+ alias = sym
+
+/*
+ * STRONG_ALIAS: create a strong alias.
+ */
+#define STRONG_ALIAS(alias,sym) \
+ .globl alias; \
+ alias = sym
+
+#define GLOBAL(sym) \
+ .globl sym; sym:
+
+#define ENTRY(sym) \
+ .text; .globl sym; .ent sym; sym:
+
+#define ASM_ENTRY(sym) \
+ .text; .globl sym; .type sym,@function; sym:
+
+/*
+ * LEAF
+ * A leaf routine does
+ * - call no other function,
+ * - never use any register that callee-saved (S0-S8), and
+ * - not use any local stack storage.
+ */
+#define LEAF(x) \
+ .globl _C_LABEL(x); \
+ .ent _C_LABEL(x), 0; \
+_C_LABEL(x): ; \
+ .frame sp, 0, ra; \
+ MCOUNT
+
+/*
+ * LEAF_NOPROFILE
+ * No profilable leaf routine.
+ */
+#define LEAF_NOPROFILE(x) \
+ .globl _C_LABEL(x); \
+ .ent _C_LABEL(x), 0; \
+_C_LABEL(x): ; \
+ .frame sp, 0, ra
+
+/*
+ * XLEAF
+ * declare alternate entry to leaf routine
+ */
+#define XLEAF(x) \
+ .globl _C_LABEL(x); \
+ AENT (_C_LABEL(x)); \
+_C_LABEL(x):
+
+/*
+ * NESTED
+ * A function calls other functions and needs
+ * therefore stack space to save/restore registers.
+ */
+#define NESTED(x, fsize, retpc) \
+ .globl _C_LABEL(x); \
+ .ent _C_LABEL(x), 0; \
+_C_LABEL(x): ; \
+ .frame sp, fsize, retpc; \
+ MCOUNT
+
+/*
+ * NESTED_NOPROFILE(x)
+ * No profilable nested routine.
+ */
+#define NESTED_NOPROFILE(x, fsize, retpc) \
+ .globl _C_LABEL(x); \
+ .ent _C_LABEL(x), 0; \
+_C_LABEL(x): ; \
+ .frame sp, fsize, retpc
+
+/*
+ * XNESTED
+ * declare alternate entry point to nested routine.
+ */
+#define XNESTED(x) \
+ .globl _C_LABEL(x); \
+ AENT (_C_LABEL(x)); \
+_C_LABEL(x):
+
+/*
+ * END
+ * Mark end of a procedure.
+ */
+#define END(x) \
+ .end _C_LABEL(x)
+
+/*
+ * IMPORT -- import external symbol
+ */
+#define IMPORT(sym, size) \
+ .extern _C_LABEL(sym),size
+
+/*
+ * EXPORT -- export definition of symbol
+ */
+#define EXPORT(x) \
+ .globl _C_LABEL(x); \
+_C_LABEL(x):
+
+/*
+ * VECTOR
+ * exception vector entrypoint
+ * XXX: regmask should be used to generate .mask
+ */
+#define VECTOR(x, regmask) \
+ .ent _C_LABEL(x),0; \
+ EXPORT(x); \
+
+#define VECTOR_END(x) \
+ EXPORT(x ## End); \
+ END(x)
+
+#define KSEG0TEXT_START
+#define KSEG0TEXT_END
+#define KSEG0TEXT .text
+
+/*
+ * Macros to panic and printf from assembly language.
+ */
+#define PANIC(msg) \
+ la a0, 9f; \
+ jal _C_LABEL(panic); \
+ nop; \
+ MSG(msg)
+
+#define PANIC_KSEG0(msg, reg) PANIC(msg)
+
+#define PRINTF(msg) \
+ la a0, 9f; \
+ jal _C_LABEL(printf); \
+ nop; \
+ MSG(msg)
+
+#define MSG(msg) \
+ .rdata; \
+9: .asciiz msg; \
+ .text
+
+#define ASMSTR(str) \
+ .asciiz str; \
+ .align 3
+
+/*
+ * Call ast if required
+ */
+#define DO_AST \
+44: \
+ la s0, _C_LABEL(disableintr) ;\
+ jalr s0 ;\
+ nop ;\
+ GET_CPU_PCPU(s1) ;\
+ lw s3, PC_CURPCB(s1) ;\
+ lw s1, PC_CURTHREAD(s1) ;\
+ lw s2, TD_FLAGS(s1) ;\
+ li s0, TDF_ASTPENDING | TDF_NEEDRESCHED;\
+ and s2, s0 ;\
+ la s0, _C_LABEL(enableintr) ;\
+ jalr s0 ;\
+ nop ;\
+ beq s2, zero, 4f ;\
+ nop ;\
+ la s0, _C_LABEL(ast) ;\
+ jalr s0 ;\
+ addu a0, s3, U_PCB_REGS ;\
+ j 44b ;\
+ nop ;\
+4:
+
+
+/*
+ * XXX retain dialects XXX
+ */
+#define ALEAF(x) XLEAF(x)
+#define NLEAF(x) LEAF_NOPROFILE(x)
+#define NON_LEAF(x, fsize, retpc) NESTED(x, fsize, retpc)
+#define NNON_LEAF(x, fsize, retpc) NESTED_NOPROFILE(x, fsize, retpc)
+
+/*
+ * standard callframe {
+ * register_t cf_args[4]; arg0 - arg3
+ * register_t cf_sp; frame pointer
+ * register_t cf_ra; return address
+ * };
+ */
+#define CALLFRAME_SIZ (4 * (4 + 2))
+#define CALLFRAME_SP (4 * 4)
+#define CALLFRAME_RA (4 * 5)
+#define START_FRAME CALLFRAME_SIZ
+
+/*
+ * While it would be nice to be compatible with the SGI
+ * REG_L and REG_S macros, because they do not take parameters, it
+ * is impossible to use them with the _MIPS_SIM_ABIX32 model.
+ *
+ * These macros hide the use of mips3 instructions from the
+ * assembler to prevent the assembler from generating 64-bit style
+ * ABI calls.
+ */
+
+#if !defined(_MIPS_BSD_API) || _MIPS_BSD_API == _MIPS_BSD_API_LP32
+#define REG_L lw
+#define REG_S sw
+#define REG_LI li
+#define REG_PROLOGUE .set push
+#define REG_EPILOGUE .set pop
+#define SZREG 4
+#else
+#define REG_L ld
+#define REG_S sd
+#define REG_LI dli
+#define REG_PROLOGUE .set push ; .set mips3
+#define REG_EPILOGUE .set pop
+#define SZREG 8
+#endif /* _MIPS_BSD_API */
+
+#define mfc0_macro(data, spr) \
+ __asm __volatile ("mfc0 %0, $%1" \
+ : "=r" (data) /* outputs */ \
+ : "i" (spr)); /* inputs */
+
+#define mtc0_macro(data, spr) \
+ __asm __volatile ("mtc0 %0, $%1" \
+ : /* outputs */ \
+ : "r" (data), "i" (spr)); /* inputs */
+
+#define cfc0_macro(data, spr) \
+ __asm __volatile ("cfc0 %0, $%1" \
+ : "=r" (data) /* outputs */ \
+ : "i" (spr)); /* inputs */
+
+#define ctc0_macro(data, spr) \
+ __asm __volatile ("ctc0 %0, $%1" \
+ : /* outputs */ \
+ : "r" (data), "i" (spr)); /* inputs */
+
+
+#define lbu_macro(data, addr) \
+ __asm __volatile ("lbu %0, 0x0(%1)" \
+ : "=r" (data) /* outputs */ \
+ : "r" (addr)); /* inputs */
+
+#define lb_macro(data, addr) \
+ __asm __volatile ("lb %0, 0x0(%1)" \
+ : "=r" (data) /* outputs */ \
+ : "r" (addr)); /* inputs */
+
+#define lwl_macro(data, addr) \
+ __asm __volatile ("lwl %0, 0x0(%1)" \
+ : "=r" (data) /* outputs */ \
+ : "r" (addr)); /* inputs */
+
+#define lwr_macro(data, addr) \
+ __asm __volatile ("lwr %0, 0x0(%1)" \
+ : "=r" (data) /* outputs */ \
+ : "r" (addr)); /* inputs */
+
+#define ldl_macro(data, addr) \
+ __asm __volatile ("ldl %0, 0x0(%1)" \
+ : "=r" (data) /* outputs */ \
+ : "r" (addr)); /* inputs */
+
+#define ldr_macro(data, addr) \
+ __asm __volatile ("ldr %0, 0x0(%1)" \
+ : "=r" (data) /* outputs */ \
+ : "r" (addr)); /* inputs */
+
+#define sb_macro(data, addr) \
+ __asm __volatile ("sb %0, 0x0(%1)" \
+ : /* outputs */ \
+ : "r" (data), "r" (addr)); /* inputs */
+
+#define swl_macro(data, addr) \
+ __asm __volatile ("swl %0, 0x0(%1)" \
+ : /* outputs */ \
+ : "r" (data), "r" (addr)); /* inputs */
+
+#define swr_macro(data, addr) \
+ __asm __volatile ("swr %0, 0x0(%1)" \
+ : /* outputs */ \
+ : "r" (data), "r" (addr)); /* inputs */
+
+#define sdl_macro(data, addr) \
+ __asm __volatile ("sdl %0, 0x0(%1)" \
+ : /* outputs */ \
+ : "r" (data), "r" (addr)); /* inputs */
+
+#define sdr_macro(data, addr) \
+ __asm __volatile ("sdr %0, 0x0(%1)" \
+ : /* outputs */ \
+ : "r" (data), "r" (addr)); /* inputs */
+
+#define mfgr_macro(data, gr) \
+ __asm __volatile ("move %0, $%1" \
+ : "=r" (data) /* outputs */ \
+ : "i" (gr)); /* inputs */
+
+#define dmfc0_macro(data, spr) \
+ __asm __volatile ("dmfc0 %0, $%1" \
+ : "=r" (data) /* outputs */ \
+ : "i" (spr)); /* inputs */
+
+#define dmtc0_macro(data, spr, sel) \
+ __asm __volatile ("dmtc0 %0, $%1, %2" \
+ : /* no outputs */ \
+ : "r" (data), "i" (spr), "i" (sel)); /* inputs */
+
+/*
+ * The DYNAMIC_STATUS_MASK option adds an additional masking operation
+ * when updating the hardware interrupt mask in the status register.
+ *
+ * This is useful for platforms that need to at run-time mask
+ * interrupts based on motherboard configuration or to handle
+ * slowly clearing interrupts.
+ *
+ * XXX this is only currently implemented for mips3.
+ */
+#ifdef MIPS_DYNAMIC_STATUS_MASK
+#define DYNAMIC_STATUS_MASK(sr,scratch) \
+ lw scratch, mips_dynamic_status_mask; \
+ and sr, sr, scratch
+
+#define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1) \
+ ori sr, (MIPS_INT_MASK | MIPS_SR_INT_IE); \
+ DYNAMIC_STATUS_MASK(sr,scratch1)
+#else
+#define DYNAMIC_STATUS_MASK(sr,scratch)
+#define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)
+#endif
+
+#ifdef SMP
+ /*
+ * FREEBSD_DEVELOPERS_FIXME
+ * In multiprocessor case, store/retrieve the pcpu structure
+ * address for current CPU in scratch register for fast access.
+ */
+#error "Write GET_CPU_PCPU for SMP"
+#else
+#define GET_CPU_PCPU(reg) \
+ lw reg, _C_LABEL(pcpup);
+#endif
+
+/*
+ * Description of the setjmp buffer
+ *
+ * word 0 magic number (dependant on creator)
+ * 1 RA
+ * 2 S0
+ * 3 S1
+ * 4 S2
+ * 5 S3
+ * 6 S4
+ * 7 S5
+ * 8 S6
+ * 9 S7
+ * 10 SP
+ * 11 S8
+ * 12 signal mask (dependant on magic)
+ * 13 (con't)
+ * 14 (con't)
+ * 15 (con't)
+ *
+ * The magic number number identifies the jmp_buf and
+ * how the buffer was created as well as providing
+ * a sanity check
+ *
+ */
+
+#define _JB_MAGIC__SETJMP 0xBADFACED
+#define _JB_MAGIC_SETJMP 0xFACEDBAD
+
+/* Valid for all jmp_buf's */
+
+#define _JB_MAGIC 0
+#define _JB_REG_RA 1
+#define _JB_REG_S0 2
+#define _JB_REG_S1 3
+#define _JB_REG_S2 4
+#define _JB_REG_S3 5
+#define _JB_REG_S4 6
+#define _JB_REG_S5 7
+#define _JB_REG_S6 8
+#define _JB_REG_S7 9
+#define _JB_REG_SP 10
+#define _JB_REG_S8 11
+
+/* Only valid with the _JB_MAGIC_SETJMP magic */
+
+#define _JB_SIGMASK 12
+
+#endif /* !_MACHINE_ASM_H_ */
diff --git a/sys/mips/include/asmacros.h b/sys/mips/include/asmacros.h
new file mode 100644
index 0000000..c2cc13e
--- /dev/null
+++ b/sys/mips/include/asmacros.h
@@ -0,0 +1,195 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ASMACROS_H_
+#define _MACHINE_ASMACROS_H_
+
+#include <sys/cdefs.h>
+
+#if 0
+/* XXX too much duplication in various asm*.h's. */
+
+/*
+ * CNAME and HIDENAME manage the relationship between symbol names in C
+ * and the equivalent assembly language names. CNAME is given a name as
+ * it would be used in a C program. It expands to the equivalent assembly
+ * language name. HIDENAME is given an assembly-language name, and expands
+ * to a possibly-modified form that will be invisible to C programs.
+ */
+#define CNAME(csym) csym
+#define HIDENAME(asmsym) .asmsym
+
+#define ALIGN_DATA .p2align 3 /* 8 byte alignment, zero filled */
+#ifdef GPROF
+#define ALIGN_TEXT .p2align 4,0x90 /* 16-byte alignment, nop filled */
+#else
+#define ALIGN_TEXT .p2align 4,0x90 /* 16-byte alignment, nop filled */
+#endif
+#define SUPERALIGN_TEXT .p2align 4,0x90 /* 16-byte alignment, nop filled */
+
+#define GEN_ENTRY(name) ALIGN_TEXT; .globl CNAME(name); \
+ .type CNAME(name),@function; CNAME(name):
+#define NON_GPROF_ENTRY(name) GEN_ENTRY(name)
+#define NON_GPROF_RET .byte 0xc3 /* opcode for `ret' */
+
+#ifdef GPROF
+/*
+ * __mcount is like [.]mcount except that doesn't require its caller to set
+ * up a frame pointer. It must be called before pushing anything onto the
+ * stack. gcc should eventually generate code to call __mcount in most
+ * cases. This would make -pg in combination with -fomit-frame-pointer
+ * useful. gcc has a configuration variable PROFILE_BEFORE_PROLOGUE to
+ * allow profiling before setting up the frame pointer, but this is
+ * inadequate for good handling of special cases, e.g., -fpic works best
+ * with profiling after the prologue.
+ *
+ * [.]mexitcount is a new function to support non-statistical profiling if an
+ * accurate clock is available. For C sources, calls to it are generated
+ * by the FreeBSD extension `-mprofiler-epilogue' to gcc. It is best to
+ * call [.]mexitcount at the end of a function like the MEXITCOUNT macro does,
+ * but gcc currently generates calls to it at the start of the epilogue to
+ * avoid problems with -fpic.
+ *
+ * [.]mcount and __mcount may clobber the call-used registers and %ef.
+ * [.]mexitcount may clobber %ecx and %ef.
+ *
+ * Cross-jumping makes non-statistical profiling timing more complicated.
+ * It is handled in many cases by calling [.]mexitcount before jumping. It
+ * is handled for conditional jumps using CROSSJUMP() and CROSSJUMP_LABEL().
+ * It is handled for some fault-handling jumps by not sharing the exit
+ * routine.
+ *
+ * ALTENTRY() must be before a corresponding ENTRY() so that it can jump to
+ * the main entry point. Note that alt entries are counted twice. They
+ * have to be counted as ordinary entries for gprof to get the call times
+ * right for the ordinary entries.
+ *
+ * High local labels are used in macros to avoid clashes with local labels
+ * in functions.
+ *
+ * Ordinary `ret' is used instead of a macro `RET' because there are a lot
+ * of `ret's. 0xc3 is the opcode for `ret' (`#define ret ... ret' can't
+ * be used because this file is sometimes preprocessed in traditional mode).
+ * `ret' clobbers eflags but this doesn't matter.
+ */
+#define ALTENTRY(name) GEN_ENTRY(name) ; MCOUNT ; MEXITCOUNT ; jmp 9f
+#define CROSSJUMP(jtrue, label, jfalse) \
+ jfalse 8f; MEXITCOUNT; jmp __CONCAT(to,label); 8:
+#define CROSSJUMPTARGET(label) \
+ ALIGN_TEXT; __CONCAT(to,label): ; MCOUNT; jmp label
+#define ENTRY(name) GEN_ENTRY(name) ; 9: ; MCOUNT
+#define FAKE_MCOUNT(caller) pushq caller ; call __mcount ; popq %rcx
+#define MCOUNT call __mcount
+#define MCOUNT_LABEL(name) GEN_ENTRY(name) ; nop ; ALIGN_TEXT
+#define MEXITCOUNT call HIDENAME(mexitcount)
+#define ret MEXITCOUNT ; NON_GPROF_RET
+
+#else /* !GPROF */
+/*
+ * ALTENTRY() has to align because it is before a corresponding ENTRY().
+ * ENTRY() has to align to because there may be no ALTENTRY() before it.
+ * If there is a previous ALTENTRY() then the alignment code for ENTRY()
+ * is empty.
+ */
+#define ALTENTRY(name) GEN_ENTRY(name)
+#define CROSSJUMP(jtrue, label, jfalse) jtrue label
+#define CROSSJUMPTARGET(label)
+#define ENTRY(name) GEN_ENTRY(name)
+#define FAKE_MCOUNT(caller)
+#define MCOUNT
+#define MCOUNT_LABEL(name)
+#define MEXITCOUNT
+#endif /* GPROF */
+
+#ifdef LOCORE
+/*
+ * Convenience macro for declaring interrupt entry points.
+ */
+#define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(X,name); \
+ .type __CONCAT(X,name),@function; __CONCAT(X,name):
+
+/*
+ * Macros to create and destroy a trap frame.
+ */
+#define PUSH_FRAME \
+ subq $TF_RIP,%rsp ; /* skip dummy tf_err and tf_trapno */ \
+ testb $SEL_RPL_MASK,TF_CS(%rsp) ; /* come from kernel? */ \
+ jz 1f ; /* Yes, dont swapgs again */ \
+ swapgs ; \
+1: movq %rdi,TF_RDI(%rsp) ; \
+ movq %rsi,TF_RSI(%rsp) ; \
+ movq %rdx,TF_RDX(%rsp) ; \
+ movq %rcx,TF_RCX(%rsp) ; \
+ movq %r8,TF_R8(%rsp) ; \
+ movq %r9,TF_R9(%rsp) ; \
+ movq %rax,TF_RAX(%rsp) ; \
+ movq %rbx,TF_RBX(%rsp) ; \
+ movq %rbp,TF_RBP(%rsp) ; \
+ movq %r10,TF_R10(%rsp) ; \
+ movq %r11,TF_R11(%rsp) ; \
+ movq %r12,TF_R12(%rsp) ; \
+ movq %r13,TF_R13(%rsp) ; \
+ movq %r14,TF_R14(%rsp) ; \
+ movq %r15,TF_R15(%rsp)
+
+#define POP_FRAME \
+ movq TF_RDI(%rsp),%rdi ; \
+ movq TF_RSI(%rsp),%rsi ; \
+ movq TF_RDX(%rsp),%rdx ; \
+ movq TF_RCX(%rsp),%rcx ; \
+ movq TF_R8(%rsp),%r8 ; \
+ movq TF_R9(%rsp),%r9 ; \
+ movq TF_RAX(%rsp),%rax ; \
+ movq TF_RBX(%rsp),%rbx ; \
+ movq TF_RBP(%rsp),%rbp ; \
+ movq TF_R10(%rsp),%r10 ; \
+ movq TF_R11(%rsp),%r11 ; \
+ movq TF_R12(%rsp),%r12 ; \
+ movq TF_R13(%rsp),%r13 ; \
+ movq TF_R14(%rsp),%r14 ; \
+ movq TF_R15(%rsp),%r15 ; \
+ testb $SEL_RPL_MASK,TF_CS(%rsp) ; /* come from kernel? */ \
+ jz 1f ; /* keep kernel GS.base */ \
+ cli ; \
+ swapgs ; \
+1: addq $TF_RIP,%rsp /* skip over tf_err, tf_trapno */
+
+/*
+ * Access per-CPU data.
+ */
+#define PCPU(member) %gs:PC_ ## member
+#define PCPU_ADDR(member, reg) \
+ movq %gs:PC_PRVSPACE, reg ; \
+ addq $PC_ ## member, reg
+
+#endif /* LOCORE */
+#endif
+#endif /* !_MACHINE_ASMACROS_H_ */
diff --git a/sys/mips/include/atomic.h b/sys/mips/include/atomic.h
new file mode 100644
index 0000000..8f0190a
--- /dev/null
+++ b/sys/mips/include/atomic.h
@@ -0,0 +1,441 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: src/sys/alpha/include/atomic.h,v 1.21.2.3 2005/10/06 18:12:05 jhb
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+static __inline void
+mips_sync(void)
+{
+ __asm __volatile (".set noreorder\n\t"
+ "sync\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".set reorder\n"
+ : : : "memory");
+}
+
+/*
+ * Various simple arithmetic on memory which is atomic in the presence
+ * of interrupts and SMP safe.
+ */
+
+void atomic_set_8(__volatile uint8_t *, uint8_t);
+void atomic_clear_8(__volatile uint8_t *, uint8_t);
+void atomic_add_8(__volatile uint8_t *, uint8_t);
+void atomic_subtract_8(__volatile uint8_t *, uint8_t);
+
+void atomic_set_16(__volatile uint16_t *, uint16_t);
+void atomic_clear_16(__volatile uint16_t *, uint16_t);
+void atomic_add_16(__volatile uint16_t *, uint16_t);
+void atomic_subtract_16(__volatile uint16_t *, uint16_t);
+
+static __inline void
+atomic_set_32(__volatile uint32_t *p, uint32_t v)
+{
+ uint32_t temp;
+
+ __asm __volatile (
+ "1:\tll %0, %3\n\t" /* load old value */
+ "or %0, %2, %0\n\t" /* calculate new value */
+ "sc %0, %1\n\t" /* attempt to store */
+ "beqz %0, 1b\n\t" /* spin if failed */
+ : "=&r" (temp), "=m" (*p)
+ : "r" (v), "m" (*p)
+ : "memory");
+
+}
+
+static __inline void
+atomic_clear_32(__volatile uint32_t *p, uint32_t v)
+{
+ uint32_t temp;
+ v = ~v;
+
+ __asm __volatile (
+ "1:\tll %0, %3\n\t" /* load old value */
+ "and %0, %2, %0\n\t" /* calculate new value */
+ "sc %0, %1\n\t" /* attempt to store */
+ "beqz %0, 1b\n\t" /* spin if failed */
+ : "=&r" (temp), "=m" (*p)
+ : "r" (v), "m" (*p)
+ : "memory");
+}
+
+static __inline void
+atomic_add_32(__volatile uint32_t *p, uint32_t v)
+{
+ uint32_t temp;
+
+ __asm __volatile (
+ "1:\tll %0, %3\n\t" /* load old value */
+ "addu %0, %2, %0\n\t" /* calculate new value */
+ "sc %0, %1\n\t" /* attempt to store */
+ "beqz %0, 1b\n\t" /* spin if failed */
+ : "=&r" (temp), "=m" (*p)
+ : "r" (v), "m" (*p)
+ : "memory");
+}
+
+static __inline void
+atomic_subtract_32(__volatile uint32_t *p, uint32_t v)
+{
+ uint32_t temp;
+
+ __asm __volatile (
+ "1:\tll %0, %3\n\t" /* load old value */
+ "subu %0, %2\n\t" /* calculate new value */
+ "sc %0, %1\n\t" /* attempt to store */
+ "beqz %0, 1b\n\t" /* spin if failed */
+ : "=&r" (temp), "=m" (*p)
+ : "r" (v), "m" (*p)
+ : "memory");
+}
+
+static __inline uint32_t
+atomic_readandclear_32(__volatile uint32_t *addr)
+{
+ uint32_t result,temp;
+
+ __asm __volatile (
+ "1:\tll %0,%3\n\t" /* load current value, asserting lock */
+ "li %1,0\n\t" /* value to store */
+ "sc %1,%2\n\t" /* attempt to store */
+ "beqz %1, 1b\n\t" /* if the store failed, spin */
+ : "=&r"(result), "=&r"(temp), "=m" (*addr)
+ : "m" (*addr)
+ : "memory");
+
+ return result;
+}
+
+static __inline uint32_t
+atomic_readandset_32(__volatile uint32_t *addr, uint32_t value)
+{
+ uint32_t result,temp;
+
+ __asm __volatile (
+ "1:\tll %0,%3\n\t" /* load current value, asserting lock */
+ "or %1,$0,%4\n\t"
+ "sc %1,%2\n\t" /* attempt to store */
+ "beqz %1, 1b\n\t" /* if the store failed, spin */
+ : "=&r"(result), "=&r"(temp), "=m" (*addr)
+ : "m" (*addr), "r" (value)
+ : "memory");
+
+ return result;
+}
+
+#define ATOMIC_ACQ_REL(NAME, WIDTH) \
+static __inline void \
+atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
+{ \
+ atomic_##NAME##_##WIDTH(p, v); \
+ mips_sync(); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
+{ \
+ mips_sync(); \
+ atomic_##NAME##_##WIDTH(p, v); \
+}
+
+/* Variants of simple arithmetic with memory barriers. */
+ATOMIC_ACQ_REL(set, 8)
+ATOMIC_ACQ_REL(clear, 8)
+ATOMIC_ACQ_REL(add, 8)
+ATOMIC_ACQ_REL(subtract, 8)
+ATOMIC_ACQ_REL(set, 16)
+ATOMIC_ACQ_REL(clear, 16)
+ATOMIC_ACQ_REL(add, 16)
+ATOMIC_ACQ_REL(subtract, 16)
+ATOMIC_ACQ_REL(set, 32)
+ATOMIC_ACQ_REL(clear, 32)
+ATOMIC_ACQ_REL(add, 32)
+ATOMIC_ACQ_REL(subtract, 32)
+#if 0
+ATOMIC_ACQ_REL(set, 64)
+ATOMIC_ACQ_REL(clear, 64)
+ATOMIC_ACQ_REL(add, 64)
+ATOMIC_ACQ_REL(subtract, 64)
+#endif
+
+#undef ATOMIC_ACQ_REL
+
+/*
+ * We assume that a = b will do atomic loads and stores.
+ */
+#define ATOMIC_STORE_LOAD(WIDTH) \
+static __inline uint##WIDTH##_t \
+atomic_load_acq_##WIDTH(__volatile uint##WIDTH##_t *p) \
+{ \
+ uint##WIDTH##_t v; \
+ \
+ v = *p; \
+ mips_sync(); \
+ return (v); \
+} \
+ \
+static __inline void \
+atomic_store_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
+{ \
+ mips_sync(); \
+ *p = v; \
+}
+
+ATOMIC_STORE_LOAD(32)
+ATOMIC_STORE_LOAD(64)
+void atomic_store_64 (__volatile uint64_t *, uint64_t *);
+void atomic_load_64 (__volatile uint64_t *, uint64_t *);
+
+#undef ATOMIC_STORE_LOAD
+
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+static __inline uint32_t
+atomic_cmpset_32(__volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
+{
+ uint32_t ret;
+
+ __asm __volatile (
+ "1:\tll %0, %4\n\t" /* load old value */
+ "bne %0, %2, 2f\n\t" /* compare */
+ "move %0, %3\n\t" /* value to store */
+ "sc %0, %1\n\t" /* attempt to store */
+ "beqz %0, 1b\n\t" /* if it failed, spin */
+ "j 3f\n\t"
+ "2:\n\t"
+ "li %0, 0\n\t"
+ "3:\n"
+ : "=&r" (ret), "=m" (*p)
+ : "r" (cmpval), "r" (newval), "m" (*p)
+ : "memory");
+
+ return ret;
+}
+
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+static __inline uint32_t
+atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
+{
+ int retval;
+
+ retval = atomic_cmpset_32(p, cmpval, newval);
+ mips_sync();
+ return (retval);
+}
+
+static __inline uint32_t
+atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
+{
+ mips_sync();
+ return (atomic_cmpset_32(p, cmpval, newval));
+}
+
+/*
+ * Atomically add the value of v to the integer pointed to by p and return
+ * the previous value of *p.
+ */
+static __inline uint32_t
+atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v)
+{
+ uint32_t value, temp;
+
+ __asm __volatile (
+ "1:\tll %0, %1\n\t" /* load old value */
+ "addu %2, %3, %0\n\t" /* calculate new value */
+ "sc %2, %1\n\t" /* attempt to store */
+ "beqz %2, 1b\n\t" /* spin if failed */
+ : "=&r" (value), "=m" (*p), "=r" (temp)
+ : "r" (v), "m" (*p));
+ return (value);
+}
+
+/* Operations on chars. */
+#define atomic_set_char atomic_set_8
+#define atomic_set_acq_char atomic_set_acq_8
+#define atomic_set_rel_char atomic_set_rel_8
+#define atomic_clear_char atomic_clear_8
+#define atomic_clear_acq_char atomic_clear_acq_8
+#define atomic_clear_rel_char atomic_clear_rel_8
+#define atomic_add_char atomic_add_8
+#define atomic_add_acq_char atomic_add_acq_8
+#define atomic_add_rel_char atomic_add_rel_8
+#define atomic_subtract_char atomic_subtract_8
+#define atomic_subtract_acq_char atomic_subtract_acq_8
+#define atomic_subtract_rel_char atomic_subtract_rel_8
+
+/* Operations on shorts. */
+#define atomic_set_short atomic_set_16
+#define atomic_set_acq_short atomic_set_acq_16
+#define atomic_set_rel_short atomic_set_rel_16
+#define atomic_clear_short atomic_clear_16
+#define atomic_clear_acq_short atomic_clear_acq_16
+#define atomic_clear_rel_short atomic_clear_rel_16
+#define atomic_add_short atomic_add_16
+#define atomic_add_acq_short atomic_add_acq_16
+#define atomic_add_rel_short atomic_add_rel_16
+#define atomic_subtract_short atomic_subtract_16
+#define atomic_subtract_acq_short atomic_subtract_acq_16
+#define atomic_subtract_rel_short atomic_subtract_rel_16
+
+/* Operations on ints. */
+#define atomic_set_int atomic_set_32
+#define atomic_set_acq_int atomic_set_acq_32
+#define atomic_set_rel_int atomic_set_rel_32
+#define atomic_clear_int atomic_clear_32
+#define atomic_clear_acq_int atomic_clear_acq_32
+#define atomic_clear_rel_int atomic_clear_rel_32
+#define atomic_add_int atomic_add_32
+#define atomic_add_acq_int atomic_add_acq_32
+#define atomic_add_rel_int atomic_add_rel_32
+#define atomic_subtract_int atomic_subtract_32
+#define atomic_subtract_acq_int atomic_subtract_acq_32
+#define atomic_subtract_rel_int atomic_subtract_rel_32
+#define atomic_cmpset_int atomic_cmpset_32
+#define atomic_cmpset_acq_int atomic_cmpset_acq_32
+#define atomic_cmpset_rel_int atomic_cmpset_rel_32
+#define atomic_load_acq_int atomic_load_acq_32
+#define atomic_store_rel_int atomic_store_rel_32
+#define atomic_readandclear_int atomic_readandclear_32
+#define atomic_readandset_int atomic_readandset_32
+#define atomic_fetchadd_int atomic_fetchadd_32
+
+#ifdef __mips64
+/* Operations on longs. */
+#define atomic_set_long atomic_set_64
+#define atomic_set_acq_long atomic_set_acq_64
+#define atomic_set_rel_long atomic_set_rel_64
+#define atomic_clear_long atomic_clear_64
+#define atomic_clear_acq_long atomic_clear_acq_64
+#define atomic_clear_rel_long atomic_clear_rel_64
+#define atomic_add_long atomic_add_64
+#define atomic_add_acq_long atomic_add_acq_64
+#define atomic_add_rel_long atomic_add_rel_64
+#define atomic_subtract_long atomic_subtract_64
+#define atomic_subtract_acq_long atomic_subtract_acq_64
+#define atomic_subtract_rel_long atomic_subtract_rel_64
+#define atomic_cmpset_long atomic_cmpset_64
+#define atomic_cmpset_acq_long atomic_cmpset_acq_64
+#define atomic_cmpset_rel_long atomic_cmpset_rel_64
+#define atomic_load_acq_long atomic_load_acq_64
+#define atomic_store_rel_long atomic_store_rel_64
+#define atomic_fetchadd_long atomic_fetchadd_64
+#define atomic_readandclear_long atomic_readandclear_64
+
+/* Operations on pointers. */
+#define atomic_set_ptr atomic_set_64
+#define atomic_set_acq_ptr atomic_set_acq_64
+#define atomic_set_rel_ptr atomic_set_rel_64
+#define atomic_clear_ptr atomic_clear_64
+#define atomic_clear_acq_ptr atomic_clear_acq_64
+#define atomic_clear_rel_ptr atomic_clear_rel_64
+#define atomic_add_ptr atomic_add_64
+#define atomic_add_acq_ptr atomic_add_acq_64
+#define atomic_add_rel_ptr atomic_add_rel_64
+#define atomic_subtract_ptr atomic_subtract_64
+#define atomic_subtract_acq_ptr atomic_subtract_acq_64
+#define atomic_subtract_rel_ptr atomic_subtract_rel_64
+#define atomic_cmpset_ptr atomic_cmpset_64
+#define atomic_cmpset_acq_ptr atomic_cmpset_acq_64
+#define atomic_cmpset_rel_ptr atomic_cmpset_rel_64
+#define atomic_load_acq_ptr atomic_load_acq_64
+#define atomic_store_rel_ptr atomic_store_rel_64
+#define atomic_readandclear_ptr atomic_readandclear_64
+
+#else /* __mips64 */
+
+/* Operations on longs. */
+#define atomic_set_long atomic_set_32
+#define atomic_set_acq_long atomic_set_acq_32
+#define atomic_set_rel_long atomic_set_rel_32
+#define atomic_clear_long atomic_clear_32
+#define atomic_clear_acq_long atomic_clear_acq_32
+#define atomic_clear_rel_long atomic_clear_rel_32
+#define atomic_add_long(p, v) \
+ atomic_add_32((volatile u_int *)(p), (u_int)(v))
+#define atomic_add_acq_long atomic_add_acq_32
+#define atomic_add_rel_long atomic_add_rel_32
+#define atomic_subtract_long(p, v) \
+ atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
+#define atomic_subtract_acq_long atomic_subtract_acq_32
+#define atomic_subtract_rel_long atomic_subtract_rel_32
+#define atomic_cmpset_long atomic_cmpset_32
+#define atomic_cmpset_acq_long(p, cmpval, newval) \
+ atomic_cmpset_acq_32((volatile u_int *)(p), \
+ (u_int)(cmpval), (u_int)(newval))
+#define atomic_cmpset_rel_long(p, cmpval, newval) \
+ atomic_cmpset_rel_32((volatile u_int *)(p), \
+ (u_int)(cmpval), (u_int)(newval))
+#define atomic_load_acq_long atomic_load_acq_32
+#define atomic_store_rel_long atomic_store_rel_32
+#define atomic_fetchadd_long(p, v) \
+ atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
+#define atomic_readandclear_long atomic_readandclear_32
+
+/* Operations on pointers. */
+#define atomic_set_ptr atomic_set_32
+#define atomic_set_acq_ptr atomic_set_acq_32
+#define atomic_set_rel_ptr atomic_set_rel_32
+#define atomic_clear_ptr atomic_clear_32
+#define atomic_clear_acq_ptr atomic_clear_acq_32
+#define atomic_clear_rel_ptr atomic_clear_rel_32
+#define atomic_add_ptr atomic_add_32
+#define atomic_add_acq_ptr atomic_add_acq_32
+#define atomic_add_rel_ptr atomic_add_rel_32
+#define atomic_subtract_ptr atomic_subtract_32
+#define atomic_subtract_acq_ptr atomic_subtract_acq_32
+#define atomic_subtract_rel_ptr atomic_subtract_rel_32
+#define atomic_cmpset_ptr atomic_cmpset_32
+#define atomic_cmpset_acq_ptr atomic_cmpset_acq_32
+#define atomic_cmpset_rel_ptr atomic_cmpset_rel_32
+#define atomic_load_acq_ptr atomic_load_acq_32
+#define atomic_store_rel_ptr atomic_store_rel_32
+#define atomic_readandclear_ptr atomic_readandclear_32
+#endif /* __mips64 */
+
+#endif /* ! _MACHINE_ATOMIC_H_ */
diff --git a/sys/mips/include/bootinfo.h b/sys/mips/include/bootinfo.h
new file mode 100644
index 0000000..a710a38
--- /dev/null
+++ b/sys/mips/include/bootinfo.h
@@ -0,0 +1,142 @@
+/*-
+ * Copyright (C) 1994 by Rodney W. Grimes, Milwaukie, Oregon 97222
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Rodney W. Grimes.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY RODNEY W. GRIMES ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL RODNEY W. GRIMES BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BOOTINFO_H_
+#define _MACHINE_BOOTINFO_H_
+
+/* Only change the version number if you break compatibility. */
+#define BOOTINFO_VERSION 1
+
+#define N_BIOS_GEOM 8
+
+#define MIPS_BOOTINFO_MAGIC 0xCDEACDEA
+
+/* Extended OLV bootinfo struct. The data area includes a list of named
+ OIDs and associated data values. The format is:
+
+ NUL-terminated dotted-string name
+ 2 byte length, in big-endian order
+ LENGTH bytes of data
+ [...]
+
+ The two magic fields are used to guard against other bootloaders that
+ may place other sorts of data here. */
+
+struct bootinfo_ext {
+#define BOOTINFO_EXT_MAGIC1 0x55aa00ff
+ unsigned int magic1;
+ unsigned char *data;
+ unsigned int size;
+#define BOOTINFO_EXT_MAGIC2 0x32719187
+ unsigned int magic2;
+};
+
+#define BOOTINFO_EXT_MAX_SIZE 16384
+
+/*
+ * A zero bootinfo field often means that there is no info available.
+ * Flags are used to indicate the validity of fields where zero is a
+ * normal value.
+ */
+struct bootinfo {
+ u_int32_t bi_version;
+ u_int32_t bi_kernelname; /* represents a char * */
+ u_int32_t bi_nfs_diskless; /* struct nfs_diskless * */
+ /* End of fields that are always present. */
+#define bi_endcommon bi_n_bios_used
+ u_int32_t bi_n_bios_used;
+ u_int32_t bi_bios_geom[N_BIOS_GEOM];
+ u_int32_t bi_size;
+ u_int8_t bi_memsizes_valid;
+ u_int8_t bi_bios_dev; /* bootdev BIOS unit number */
+ u_int8_t bi_pad[2];
+ u_int32_t bi_basemem;
+ u_int32_t bi_extmem;
+ u_int32_t bi_symtab; /* struct symtab * */
+ u_int32_t bi_esymtab; /* struct symtab * */
+ /* Items below only from advanced bootloader */
+ u_int32_t bi_kernend; /* end of kernel space */
+ u_int32_t bi_envp; /* environment */
+ u_int32_t bi_modulep; /* preloaded modules */
+};
+
+#ifdef _KERNEL
+extern struct bootinfo bootinfo;
+#endif
+
+/*
+ * Constants for converting boot-style device number to type,
+ * adaptor (uba, mba, etc), unit number and partition number.
+ * Type (== major device number) is in the low byte
+ * for backward compatibility. Except for that of the "magic
+ * number", each mask applies to the shifted value.
+ * Format:
+ * (4) (4) (4) (4) (8) (8)
+ * --------------------------------
+ * |MA | AD| CT| UN| PART | TYPE |
+ * --------------------------------
+ */
+#define B_ADAPTORSHIFT 24
+#define B_ADAPTORMASK 0x0f
+#define B_ADAPTOR(val) (((val) >> B_ADAPTORSHIFT) & B_ADAPTORMASK)
+#define B_CONTROLLERSHIFT 20
+#define B_CONTROLLERMASK 0xf
+#define B_CONTROLLER(val) (((val)>>B_CONTROLLERSHIFT) & B_CONTROLLERMASK)
+#define B_SLICESHIFT 20
+#define B_SLICEMASK 0xff
+#define B_SLICE(val) (((val)>>B_SLICESHIFT) & B_SLICEMASK)
+#define B_UNITSHIFT 16
+#define B_UNITMASK 0xf
+#define B_UNIT(val) (((val) >> B_UNITSHIFT) & B_UNITMASK)
+#define B_PARTITIONSHIFT 8
+#define B_PARTITIONMASK 0xff
+#define B_PARTITION(val) (((val) >> B_PARTITIONSHIFT) & B_PARTITIONMASK)
+#define B_TYPESHIFT 0
+#define B_TYPEMASK 0xff
+#define B_TYPE(val) (((val) >> B_TYPESHIFT) & B_TYPEMASK)
+
+#define B_MAGICMASK 0xf0000000
+#define B_DEVMAGIC 0xa0000000
+
+#define MAKEBOOTDEV(type, adaptor, controller, unit, partition) \
+ (((type) << B_TYPESHIFT) | ((adaptor) << B_ADAPTORSHIFT) | \
+ ((controller) << B_CONTROLLERSHIFT) | ((unit) << B_UNITSHIFT) | \
+ ((partition) << B_PARTITIONSHIFT) | B_DEVMAGIC)
+
+#define BASE_SLICE 2
+#define COMPATIBILITY_SLICE 0
+#define MAX_SLICES 32
+#define WHOLE_DISK_SLICE 1
+
+#endif /* !_MACHINE_BOOTINFO_H_ */
diff --git a/sys/mips/include/bswap.h b/sys/mips/include/bswap.h
new file mode 100644
index 0000000..61ace98
--- /dev/null
+++ b/sys/mips/include/bswap.h
@@ -0,0 +1,11 @@
+/* $NetBSD: bswap.h,v 1.2 1999/08/21 05:39:53 simonb Exp $ */
+/* JNPR: bswap.h,v 1.1 2006/08/07 05:38:57 katta */
+/* $FreeBSD$
+
+#ifndef _MACHINE_BSWAP_H_
+#define _MACHINE_BSWAP_H_
+
+#define __BSWAP_RENAME
+#include <sys/bswap.h>
+
+#endif /* !_MACHINE_BSWAP_H_ */
diff --git a/sys/mips/include/bus.h b/sys/mips/include/bus.h
new file mode 100644
index 0000000..42ac1df
--- /dev/null
+++ b/sys/mips/include/bus.h
@@ -0,0 +1,909 @@
+/* $NetBSD: bus.h,v 1.12 1997/10/01 08:25:15 fvdl Exp $ */
+/*-
+ * $Id: bus.h,v 1.6 2007/08/09 11:23:32 katta Exp $
+ *
+ * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: src/sys/alpha/include/bus.h,v 1.5 1999/08/28 00:38:40 peter
+ * $FreeBSD$
+*/
+
+#ifndef _MACHINE_BUS_H_
+#define _MACHINE_BUS_H_
+
+#ifdef TARGET_OCTEON
+#include <machine/bus_octeon.h>
+#else
+#include <machine/_bus.h>
+#include <machine/cpufunc.h>
+
+/*
+ * Values for the mips bus space tag, not to be used directly by MI code.
+ */
+#define MIPS_BUS_SPACE_IO 0 /* space is i/o space */
+#define MIPS_BUS_SPACE_MEM 1 /* space is mem space */
+
+
+#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFF
+#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXSIZE 0xFFFFFFFF /* Maximum supported size */
+#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFF
+#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXADDR 0xFFFFFFFF
+
+#define BUS_SPACE_UNRESTRICTED (~0)
+
+/*
+ * Map a region of device bus space into CPU virtual address space.
+ */
+
+__inline int bus_space_map(bus_space_tag_t t, bus_addr_t addr,
+ bus_size_t size, int flags, bus_space_handle_t *bshp);
+
+static __inline int
+bus_space_map(bus_space_tag_t t __unused, bus_addr_t addr,
+ bus_size_t size __unused, int flags __unused,
+ bus_space_handle_t *bshp)
+{
+
+ *bshp = addr;
+ return (0);
+}
+
+/*
+ * Unmap a region of device bus space.
+ */
+
+void bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh,
+ bus_size_t size);
+
+/*
+ * Get a new handle for a subregion of an already-mapped area of bus space.
+ */
+
+int bus_space_subregion(bus_space_tag_t t, bus_space_handle_t bsh,
+ bus_size_t offset, bus_size_t size, bus_space_handle_t *nbshp);
+
+/*
+ * Allocate a region of memory that is accessible to devices in bus space.
+ */
+
+int bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart,
+ bus_addr_t rend, bus_size_t size, bus_size_t align,
+ bus_size_t boundary, int flags, bus_addr_t *addrp,
+ bus_space_handle_t *bshp);
+
+/*
+ * Free a region of bus space accessible memory.
+ */
+
+void bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh,
+ bus_size_t size);
+
+
+/*
+ * Read a 1, 2, 4, or 8 byte quantity from bus space
+ * described by tag/handle/offset.
+ */
+static __inline u_int8_t bus_space_read_1(bus_space_tag_t tag,
+ bus_space_handle_t handle,
+ bus_size_t offset);
+
+static __inline u_int16_t bus_space_read_2(bus_space_tag_t tag,
+ bus_space_handle_t handle,
+ bus_size_t offset);
+
+static __inline u_int32_t bus_space_read_4(bus_space_tag_t tag,
+ bus_space_handle_t handle,
+ bus_size_t offset);
+
+static __inline u_int8_t
+bus_space_read_1(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ return (inb(handle + offset));
+ return (readb(handle + offset));
+}
+
+static __inline u_int16_t
+bus_space_read_2(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ return (inw(handle + offset));
+ return (readw(handle + offset));
+}
+
+static __inline u_int32_t
+bus_space_read_4(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ return (inl(handle + offset));
+ return (readl(handle + offset));
+}
+
+#if 0 /* Cause a link error for bus_space_read_8 */
+#define bus_space_read_8(t, h, o) !!! bus_space_read_8 unimplemented !!!
+#endif
+
+/*
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle/offset and copy into buffer provided.
+ */
+static __inline void bus_space_read_multi_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_multi_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_multi_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr,
+ size_t count);
+
+static __inline void
+bus_space_read_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr, size_t count)
+{
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--)
+ *addr++ = inb(bsh + offset);
+ else
+ while (count--)
+ *addr++ = readb(bsh + offset);
+}
+
+static __inline void
+bus_space_read_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--)
+ *addr++ = inw(baddr);
+ else
+ while (count--)
+ *addr++ = readw(baddr);
+}
+
+static __inline void
+bus_space_read_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--)
+ *addr++ = inl(baddr);
+ else
+ while (count--)
+ *addr++ = readl(baddr);
+}
+
+#if 0 /* Cause a link error for bus_space_read_multi_8 */
+#define bus_space_read_multi_8 !!! bus_space_read_multi_8 unimplemented !!!
+#endif
+
+/*
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle and starting at `offset' and copy into
+ * buffer provided.
+ */
+static __inline void bus_space_read_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr,
+ size_t count);
+
+
+static __inline void
+bus_space_read_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--) {
+ *addr++ = inb(baddr);
+ baddr += 1;
+ }
+ else
+ while (count--) {
+ *addr++ = readb(baddr);
+ baddr += 1;
+ }
+}
+
+static __inline void
+bus_space_read_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--) {
+ *addr++ = inw(baddr);
+ baddr += 2;
+ }
+ else
+ while (count--) {
+ *addr++ = readw(baddr);
+ baddr += 2;
+ }
+}
+
+static __inline void
+bus_space_read_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--) {
+ *addr++ = inl(baddr);
+ baddr += 4;
+ }
+ else
+ while (count--) {
+ *addr++ = readb(baddr);
+ baddr += 4;
+ }
+}
+
+#if 0 /* Cause a link error for bus_space_read_region_8 */
+#define bus_space_read_region_8 !!! bus_space_read_region_8 unimplemented !!!
+#endif
+
+/*
+ * Write the 1, 2, 4, or 8 byte value `value' to bus space
+ * described by tag/handle/offset.
+ */
+
+static __inline void bus_space_write_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value);
+
+static __inline void bus_space_write_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value);
+
+static __inline void bus_space_write_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value);
+
+static __inline void
+bus_space_write_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value)
+{
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ outb(bsh + offset, value);
+ else
+ writeb(bsh + offset, value);
+}
+
+static __inline void
+bus_space_write_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value)
+{
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ outw(bsh + offset, value);
+ else
+ writew(bsh + offset, value);
+}
+
+static __inline void
+bus_space_write_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value)
+{
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ outl(bsh + offset, value);
+ else
+ writel(bsh + offset, value);
+}
+
+#if 0 /* Cause a link error for bus_space_write_8 */
+#define bus_space_write_8 !!! bus_space_write_8 not implemented !!!
+#endif
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
+ * provided to bus space described by tag/handle/offset.
+ */
+
+static __inline void bus_space_write_multi_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int8_t *addr,
+ size_t count);
+static __inline void bus_space_write_multi_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int16_t *addr,
+ size_t count);
+
+static __inline void bus_space_write_multi_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int32_t *addr,
+ size_t count);
+
+static __inline void
+bus_space_write_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int8_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--)
+ outb(baddr, *addr++);
+ else
+ while (count--)
+ writeb(baddr, *addr++);
+}
+
+static __inline void
+bus_space_write_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--)
+ outw(baddr, *addr++);
+ else
+ while (count--)
+ writew(baddr, *addr++);
+}
+
+static __inline void
+bus_space_write_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--)
+ outl(baddr, *addr++);
+ else
+ while (count--)
+ writel(baddr, *addr++);
+}
+
+#if 0 /* Cause a link error for bus_space_write_multi_8 */
+#define bus_space_write_multi_8(t, h, o, a, c) \
+ !!! bus_space_write_multi_8 unimplemented !!!
+#endif
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer provided
+ * to bus space described by tag/handle starting at `offset'.
+ */
+
+static __inline void bus_space_write_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int8_t *addr,
+ size_t count);
+static __inline void bus_space_write_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int16_t *addr,
+ size_t count);
+static __inline void bus_space_write_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int32_t *addr,
+ size_t count);
+
+static __inline void
+bus_space_write_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int8_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--) {
+ outb(baddr, *addr++);
+ baddr += 1;
+ }
+ else
+ while (count--) {
+ writeb(baddr, *addr++);
+ baddr += 1;
+ }
+}
+
+static __inline void
+bus_space_write_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--) {
+ outw(baddr, *addr++);
+ baddr += 2;
+ }
+ else
+ while (count--) {
+ writew(baddr, *addr++);
+ baddr += 2;
+ }
+}
+
+static __inline void
+bus_space_write_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+ bus_addr_t baddr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--) {
+ outl(baddr, *addr++);
+ baddr += 4;
+ }
+ else
+ while (count--) {
+ writel(baddr, *addr++);
+ baddr += 4;
+ }
+}
+
+#if 0 /* Cause a link error for bus_space_write_region_8 */
+#define bus_space_write_region_8 \
+ !!! bus_space_write_region_8 unimplemented !!!
+#endif
+
+/*
+ * Write the 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle/offset `count' times.
+ */
+
+static __inline void bus_space_set_multi_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ u_int8_t value, size_t count);
+static __inline void bus_space_set_multi_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ u_int16_t value, size_t count);
+static __inline void bus_space_set_multi_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ u_int32_t value, size_t count);
+
+static __inline void
+bus_space_set_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--)
+ outb(addr, value);
+ else
+ while (count--)
+ writeb(addr, value);
+}
+
+static __inline void
+bus_space_set_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--)
+ outw(addr, value);
+ else
+ while (count--)
+ writew(addr, value);
+}
+
+static __inline void
+bus_space_set_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ while (count--)
+ outl(addr, value);
+ else
+ while (count--)
+ writel(addr, value);
+}
+
+#if 0 /* Cause a link error for bus_space_set_multi_8 */
+#define bus_space_set_multi_8 !!! bus_space_set_multi_8 unimplemented !!!
+#endif
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle starting at `offset'.
+ */
+
+static __inline void bus_space_set_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value,
+ size_t count);
+static __inline void bus_space_set_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value,
+ size_t count);
+static __inline void bus_space_set_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value,
+ size_t count);
+
+static __inline void
+bus_space_set_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ for (; count != 0; count--, addr++)
+ outb(addr, value);
+ else
+ for (; count != 0; count--, addr++)
+ writeb(addr, value);
+}
+
+static __inline void
+bus_space_set_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ for (; count != 0; count--, addr += 2)
+ outw(addr, value);
+ else
+ for (; count != 0; count--, addr += 2)
+ writew(addr, value);
+}
+
+static __inline void
+bus_space_set_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value, size_t count)
+{
+ bus_addr_t addr = bsh + offset;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ for (; count != 0; count--, addr += 4)
+ outl(addr, value);
+ else
+ for (; count != 0; count--, addr += 4)
+ writel(addr, value);
+}
+
+#if 0 /* Cause a link error for bus_space_set_region_8 */
+#define bus_space_set_region_8 !!! bus_space_set_region_8 unimplemented !!!
+#endif
+
+/*
+ * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
+ * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
+ */
+
+static __inline void bus_space_copy_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh1,
+ bus_size_t off1,
+ bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count);
+
+static __inline void bus_space_copy_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh1,
+ bus_size_t off1,
+ bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count);
+
+static __inline void bus_space_copy_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh1,
+ bus_size_t off1,
+ bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count);
+
+static __inline void
+bus_space_copy_region_1(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ bus_addr_t addr1 = bsh1 + off1;
+ bus_addr_t addr2 = bsh2 + off2;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1++, addr2++)
+ outb(addr2, inb(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += (count - 1), addr2 += (count - 1);
+ count != 0; count--, addr1--, addr2--)
+ outb(addr2, inb(addr1));
+ }
+ } else {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1++, addr2++)
+ writeb(addr2, readb(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += (count - 1), addr2 += (count - 1);
+ count != 0; count--, addr1--, addr2--)
+ writeb(addr2, readb(addr1));
+ }
+ }
+}
+
+static __inline void
+bus_space_copy_region_2(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ bus_addr_t addr1 = bsh1 + off1;
+ bus_addr_t addr2 = bsh2 + off2;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1 += 2, addr2 += 2)
+ outw(addr2, inw(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += 2 * (count - 1), addr2 += 2 * (count - 1);
+ count != 0; count--, addr1 -= 2, addr2 -= 2)
+ outw(addr2, inw(addr1));
+ }
+ } else {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1 += 2, addr2 += 2)
+ writew(addr2, readw(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += 2 * (count - 1), addr2 += 2 * (count - 1);
+ count != 0; count--, addr1 -= 2, addr2 -= 2)
+ writew(addr2, readw(addr1));
+ }
+ }
+}
+
+static __inline void
+bus_space_copy_region_4(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ bus_addr_t addr1 = bsh1 + off1;
+ bus_addr_t addr2 = bsh2 + off2;
+
+ if (tag == MIPS_BUS_SPACE_IO)
+ {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1 += 4, addr2 += 4)
+ outl(addr2, inl(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += 4 * (count - 1), addr2 += 4 * (count - 1);
+ count != 0; count--, addr1 -= 4, addr2 -= 4)
+ outl(addr2, inl(addr1));
+ }
+ } else {
+ if (addr1 >= addr2) {
+ /* src after dest: copy forward */
+ for (; count != 0; count--, addr1 += 4, addr2 += 4)
+ writel(addr2, readl(addr1));
+ } else {
+ /* dest after src: copy backwards */
+ for (addr1 += 4 * (count - 1), addr2 += 4 * (count - 1);
+ count != 0; count--, addr1 -= 4, addr2 -= 4)
+ writel(addr2, readl(addr1));
+ }
+ }
+}
+
+
+#if 0 /* Cause a link error for bus_space_copy_8 */
+#define bus_space_copy_region_8 !!! bus_space_copy_region_8 unimplemented !!!
+#endif
+
+
+/*
+ * Bus read/write barrier methods.
+ *
+ * void bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t bsh,
+ * bus_size_t offset, bus_size_t len, int flags);
+ *
+ *
+ * Note that BUS_SPACE_BARRIER_WRITE doesn't do anything other than
+ * prevent reordering by the compiler; all Intel x86 processors currently
+ * retire operations outside the CPU in program order.
+ */
+#define BUS_SPACE_BARRIER_READ 0x01 /* force read barrier */
+#define BUS_SPACE_BARRIER_WRITE 0x02 /* force write barrier */
+
+static __inline void
+bus_space_barrier(bus_space_tag_t tag __unused, bus_space_handle_t bsh __unused,
+ bus_size_t offset __unused, bus_size_t len __unused, int flags)
+{
+#if 0
+#ifdef __GNUCLIKE_ASM
+ if (flags & BUS_SPACE_BARRIER_READ)
+ __asm __volatile("lock; addl $0,0(%%rsp)" : : : "memory");
+ else
+ __asm __volatile("" : : : "memory");
+#endif
+#endif
+}
+
+#ifdef BUS_SPACE_NO_LEGACY
+#undef inb
+#undef outb
+#define inb(a) compiler_error
+#define inw(a) compiler_error
+#define inl(a) compiler_error
+#define outb(a, b) compiler_error
+#define outw(a, b) compiler_error
+#define outl(a, b) compiler_error
+#endif
+
+#include <machine/bus_dma.h>
+
+/*
+ * Stream accesses are the same as normal accesses on amd64; there are no
+ * supported bus systems with an endianess different from the host one.
+ */
+#define bus_space_read_stream_1(t, h, o) bus_space_read_1((t), (h), (o))
+#define bus_space_read_stream_2(t, h, o) bus_space_read_2((t), (h), (o))
+#define bus_space_read_stream_4(t, h, o) bus_space_read_4((t), (h), (o))
+
+#define bus_space_read_multi_stream_1(t, h, o, a, c) \
+ bus_space_read_multi_1((t), (h), (o), (a), (c))
+#define bus_space_read_multi_stream_2(t, h, o, a, c) \
+ bus_space_read_multi_2((t), (h), (o), (a), (c))
+#define bus_space_read_multi_stream_4(t, h, o, a, c) \
+ bus_space_read_multi_4((t), (h), (o), (a), (c))
+
+#define bus_space_write_stream_1(t, h, o, v) \
+ bus_space_write_1((t), (h), (o), (v))
+#define bus_space_write_stream_2(t, h, o, v) \
+ bus_space_write_2((t), (h), (o), (v))
+#define bus_space_write_stream_4(t, h, o, v) \
+ bus_space_write_4((t), (h), (o), (v))
+
+#define bus_space_write_multi_stream_1(t, h, o, a, c) \
+ bus_space_write_multi_1((t), (h), (o), (a), (c))
+#define bus_space_write_multi_stream_2(t, h, o, a, c) \
+ bus_space_write_multi_2((t), (h), (o), (a), (c))
+#define bus_space_write_multi_stream_4(t, h, o, a, c) \
+ bus_space_write_multi_4((t), (h), (o), (a), (c))
+
+#define bus_space_set_multi_stream_1(t, h, o, v, c) \
+ bus_space_set_multi_1((t), (h), (o), (v), (c))
+#define bus_space_set_multi_stream_2(t, h, o, v, c) \
+ bus_space_set_multi_2((t), (h), (o), (v), (c))
+#define bus_space_set_multi_stream_4(t, h, o, v, c) \
+ bus_space_set_multi_4((t), (h), (o), (v), (c))
+
+#define bus_space_read_region_stream_1(t, h, o, a, c) \
+ bus_space_read_region_1((t), (h), (o), (a), (c))
+#define bus_space_read_region_stream_2(t, h, o, a, c) \
+ bus_space_read_region_2((t), (h), (o), (a), (c))
+#define bus_space_read_region_stream_4(t, h, o, a, c) \
+ bus_space_read_region_4((t), (h), (o), (a), (c))
+
+#define bus_space_write_region_stream_1(t, h, o, a, c) \
+ bus_space_write_region_1((t), (h), (o), (a), (c))
+#define bus_space_write_region_stream_2(t, h, o, a, c) \
+ bus_space_write_region_2((t), (h), (o), (a), (c))
+#define bus_space_write_region_stream_4(t, h, o, a, c) \
+ bus_space_write_region_4((t), (h), (o), (a), (c))
+
+#define bus_space_set_region_stream_1(t, h, o, v, c) \
+ bus_space_set_region_1((t), (h), (o), (v), (c))
+#define bus_space_set_region_stream_2(t, h, o, v, c) \
+ bus_space_set_region_2((t), (h), (o), (v), (c))
+#define bus_space_set_region_stream_4(t, h, o, v, c) \
+ bus_space_set_region_4((t), (h), (o), (v), (c))
+
+#define bus_space_copy_region_stream_1(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_1((t), (h1), (o1), (h2), (o2), (c))
+#define bus_space_copy_region_stream_2(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_2((t), (h1), (o1), (h2), (o2), (c))
+#define bus_space_copy_region_stream_4(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_4((t), (h1), (o1), (h2), (o2), (c))
+
+#endif /* !TARGET_OCTEON */
+#endif /* !_MACHINE_BUS_H_ */
diff --git a/sys/mips/include/bus_dma.h b/sys/mips/include/bus_dma.h
new file mode 100644
index 0000000..35dfba2
--- /dev/null
+++ b/sys/mips/include/bus_dma.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2005 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MIPS_BUS_DMA_H_
+#define _MIPS_BUS_DMA_H_
+
+#include <sys/bus_dma.h>
+
+#endif /* _MIPS_BUS_DMA_H_ */
diff --git a/sys/mips/include/bus_octeon.h b/sys/mips/include/bus_octeon.h
new file mode 100644
index 0000000..be538ba
--- /dev/null
+++ b/sys/mips/include/bus_octeon.h
@@ -0,0 +1,883 @@
+/*-
+ * Copyright (c) 2006 Oleksandr Tymoshenko.
+ * Copyright (c) KATO Takenori, 1999.
+ *
+ * All rights reserved. Unpublished rights reserved under the copyright
+ * laws of Japan.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* $NetBSD: bus.h,v 1.12 1997/10/01 08:25:15 fvdl Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MIPS_BUS_OCTEON_H_
+#define _MIPS_BUS_OCTEON_H_
+
+#include "../../mips32/octeon32/octeon_pcmap_regs.h"
+#include <machine/_bus_octeon.h>
+#include <machine/cpufunc.h>
+
+/*
+ * Values for the mips64 bus space tag, not to be used directly by MI code.
+ */
+#define MIPS_BUS_SPACE_IO 0 /* space is i/o space */
+#define MIPS_BUS_SPACE_MEM 1 /* space is mem space */
+
+#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFF
+#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXSIZE 0xFFFFFFFF
+#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFF
+#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXADDR 0xFFFFFFFF
+
+#define BUS_SPACE_UNRESTRICTED (~0)
+
+/*
+ * Map a region of device bus space into CPU virtual address space.
+ */
+
+static __inline int bus_space_map(bus_space_tag_t t, bus_addr_t addr,
+ bus_size_t size, int flags,
+ bus_space_handle_t *bshp);
+
+static __inline int
+bus_space_map(bus_space_tag_t t __unused, bus_addr_t addr,
+ bus_size_t size __unused, int flags __unused,
+ bus_space_handle_t *bshp)
+{
+
+ *bshp = addr;
+ return (0);
+}
+
+/*
+ * Unmap a region of device bus space.
+ */
+
+static __inline void bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh,
+ bus_size_t size);
+
+static __inline void
+bus_space_unmap(bus_space_tag_t t __unused, bus_space_handle_t bsh __unused,
+ bus_size_t size __unused)
+{
+}
+
+/*
+ * Get a new handle for a subregion of an already-mapped area of bus space.
+ */
+
+static __inline int bus_space_subregion(bus_space_tag_t t,
+ bus_space_handle_t bsh,
+ bus_size_t offset, bus_size_t size,
+ bus_space_handle_t *nbshp);
+
+static __inline int
+bus_space_subregion(bus_space_tag_t t __unused, bus_space_handle_t bsh,
+ bus_size_t offset, bus_size_t size __unused,
+ bus_space_handle_t *nbshp)
+{
+ *nbshp = bsh + offset;
+ return (0);
+}
+
+/*
+ * Allocate a region of memory that is accessible to devices in bus space.
+ */
+
+int bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart,
+ bus_addr_t rend, bus_size_t size, bus_size_t align,
+ bus_size_t boundary, int flags, bus_addr_t *addrp,
+ bus_space_handle_t *bshp);
+
+/*
+ * Free a region of bus space accessible memory.
+ */
+
+static __inline void bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh,
+ bus_size_t size);
+
+static __inline void
+bus_space_free(bus_space_tag_t t __unused, bus_space_handle_t bsh __unused,
+ bus_size_t size __unused)
+{
+}
+
+
+/*
+ * Read a 1, 2, 4, or 8 byte quantity from bus space
+ * described by tag/handle/offset.
+ */
+static __inline u_int8_t bus_space_read_1(bus_space_tag_t tag,
+ bus_space_handle_t handle,
+ bus_size_t offset);
+
+static __inline u_int16_t bus_space_read_2(bus_space_tag_t tag,
+ bus_space_handle_t handle,
+ bus_size_t offset);
+
+static __inline u_int32_t bus_space_read_4(bus_space_tag_t tag,
+ bus_space_handle_t handle,
+ bus_size_t offset);
+
+static __inline u_int8_t
+bus_space_read_1(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+ uint64_t ret_val;
+ uint64_t oct64_addr;
+
+ oct64_addr = handle + offset;
+ ret_val = oct_read8(oct64_addr);
+ return ((u_int8_t) ret_val);
+}
+
+static __inline u_int16_t
+bus_space_read_2(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+ uint64_t ret_val;
+ uint64_t oct64_addr;
+
+ oct64_addr = handle + offset;
+ ret_val = oct_read16(oct64_addr);
+ return ((u_int16_t) ret_val);
+}
+
+static __inline u_int32_t
+bus_space_read_4(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+ uint64_t ret_val;
+ uint64_t oct64_addr;
+
+ oct64_addr = handle + offset;
+ ret_val = oct_read32(oct64_addr);
+ return ((u_int32_t) ret_val);
+}
+
+
+static __inline u_int64_t
+bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+ uint64_t ret_val;
+ uint64_t oct64_addr;
+
+ oct64_addr = handle + offset;
+ ret_val = oct_read64(oct64_addr);
+ return (ret_val);
+}
+
+
+/*
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle/offset and copy into buffer provided.
+ */
+static __inline void bus_space_read_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr,
+ size_t count);
+
+static __inline void
+bus_space_read_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++, ptr++) {
+ *addr = oct_read8(ptr);
+ }
+}
+
+static __inline void
+bus_space_read_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++, ptr+=2) {
+ *addr = oct_read16(ptr);
+ }
+}
+
+static __inline void
+bus_space_read_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++, ptr+=4) {
+ *addr = oct_read32(ptr);
+ }
+}
+
+static __inline void
+bus_space_read_region_8(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int64_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++, ptr+=4) {
+ *addr = oct_read64(ptr);
+ }
+}
+
+/*
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle and starting at `offset' and copy into
+ * buffer provided.
+ */
+static __inline void bus_space_read_multi_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_multi_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr,
+ size_t count);
+
+static __inline void bus_space_read_multi_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr,
+ size_t count);
+
+
+static __inline void
+bus_space_read_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++) {
+ *addr = oct_read8(ptr);
+ }
+}
+
+static __inline void
+bus_space_read_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++) {
+ *addr = oct_read16(ptr);
+ }
+}
+
+static __inline void
+bus_space_read_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++) {
+ *addr = oct_read32(ptr);
+ }
+}
+
+static __inline void
+bus_space_read_multi_8(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int64_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++) {
+ *addr = oct_read64(ptr);
+ }
+}
+
+
+/*
+ * Write the 1, 2, 4, or 8 byte value `value' to bus space
+ * described by tag/handle/offset.
+ */
+
+static __inline void bus_space_write_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value);
+
+static __inline void bus_space_write_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value);
+
+static __inline void bus_space_write_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value);
+
+static __inline void
+bus_space_write_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value)
+{
+ oct_write8(bsh+offset, value);
+}
+
+static __inline void
+bus_space_write_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value)
+{
+ oct_write16(bsh+offset, value);
+}
+
+static __inline void
+bus_space_write_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value)
+{
+ oct_write32(bsh+offset, value);
+}
+
+static __inline void
+bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int64_t value)
+{
+ oct_write64(bsh+offset, value);
+}
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
+ * provided to bus space described by tag/handle/offset.
+ */
+
+static __inline void bus_space_write_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int8_t *addr,
+ size_t count);
+static __inline void bus_space_write_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int16_t *addr,
+ size_t count);
+
+static __inline void bus_space_write_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int32_t *addr,
+ size_t count);
+
+static __inline void
+bus_space_write_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int8_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++, ptr++) {
+ oct_write8(ptr, *addr);
+ }
+}
+
+static __inline void
+bus_space_write_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++, ptr++) {
+ oct_write16(ptr, *addr);
+ }
+}
+
+static __inline void
+bus_space_write_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++, ptr++) {
+ oct_write32(ptr, *addr);
+ }
+}
+
+static __inline void
+bus_space_write_region_8(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int64_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++, ptr++) {
+ oct_write64(ptr, *addr);
+ }
+}
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer provided
+ * to bus space described by tag/handle starting at `offset'.
+ */
+
+static __inline void bus_space_write_multi_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int8_t *addr,
+ size_t count);
+static __inline void bus_space_write_multi_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int16_t *addr,
+ size_t count);
+static __inline void bus_space_write_multi_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ const u_int32_t *addr,
+ size_t count);
+
+static __inline void
+bus_space_write_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int8_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++) {
+ oct_write8(ptr, *addr);
+ }
+}
+
+static __inline void
+bus_space_write_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++) {
+ oct_write16(ptr, *addr);
+ }
+}
+
+static __inline void
+bus_space_write_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++) {
+ oct_write32(ptr, *addr);
+ }
+}
+
+static __inline void
+bus_space_write_multi_8(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, const u_int64_t *addr, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, addr++) {
+ oct_write64(ptr, *addr);
+ }
+}
+
+/*
+ * Write the 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle/offset `count' times.
+ */
+
+static __inline void bus_space_set_multi_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ u_int8_t value, size_t count);
+static __inline void bus_space_set_multi_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ u_int16_t value, size_t count);
+static __inline void bus_space_set_multi_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset,
+ u_int32_t value, size_t count);
+
+static __inline void
+bus_space_set_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--) {
+ oct_write8(ptr, value);
+ }
+}
+
+static __inline void
+bus_space_set_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--) {
+ oct_write16(ptr, value);
+ }
+}
+
+static __inline void
+bus_space_set_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--) {
+ oct_write32(ptr, value);
+ }
+}
+
+static __inline void
+bus_space_set_multi_8(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int64_t value, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--) {
+ oct_write64(ptr, value);
+ }
+}
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle starting at `offset'.
+ */
+
+static __inline void bus_space_set_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value,
+ size_t count);
+static __inline void bus_space_set_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value,
+ size_t count);
+static __inline void bus_space_set_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value,
+ size_t count);
+
+static __inline void
+bus_space_set_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int8_t value, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, ptr++) {
+ oct_write8(ptr, value);
+ }
+}
+
+static __inline void
+bus_space_set_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int16_t value, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, ptr++) {
+ oct_write16(ptr, value);
+ }
+}
+
+static __inline void
+bus_space_set_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int32_t value, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, ptr++) {
+ oct_write32(ptr, value);
+ }
+}
+
+static __inline void
+bus_space_set_region_8(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, u_int64_t value, size_t count)
+{
+ uint64_t ptr = ((uint64_t) bsh + (uint64_t) offset);
+
+ for(; count > 0; count--, ptr++) {
+ oct_write64(ptr, value);
+ }
+}
+
+/*
+ * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
+ * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
+ */
+
+static __inline void bus_space_copy_region_1(bus_space_tag_t tag,
+ bus_space_handle_t bsh1,
+ bus_size_t off1,
+ bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count);
+
+static __inline void bus_space_copy_region_2(bus_space_tag_t tag,
+ bus_space_handle_t bsh1,
+ bus_size_t off1,
+ bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count);
+
+static __inline void bus_space_copy_region_4(bus_space_tag_t tag,
+ bus_space_handle_t bsh1,
+ bus_size_t off1,
+ bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count);
+
+static __inline void
+bus_space_copy_region_1(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ uint64_t ptr1 = ((uint64_t) bsh1 + (uint64_t) off1);
+ uint64_t ptr2 = ((uint64_t) bsh2 + (uint64_t) off2);
+ uint8_t val;
+
+ for(; count > 0; count--, ptr1++, ptr2++) {
+ val = oct_read8(ptr1);
+ oct_write8(ptr2, val);
+ }
+}
+
+static __inline void
+bus_space_copy_region_2(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ uint64_t ptr1 = ((uint64_t) bsh1 + (uint64_t) off1);
+ uint64_t ptr2 = ((uint64_t) bsh2 + (uint64_t) off2);
+ uint16_t val;
+
+ for(; count > 0; count--, ptr1++, ptr2++) {
+ val = oct_read16(ptr1);
+ oct_write16(ptr2, val);
+ }
+}
+
+static __inline void
+bus_space_copy_region_4(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ uint64_t ptr1 = ((uint64_t) bsh1 + (uint64_t) off1);
+ uint64_t ptr2 = ((uint64_t) bsh2 + (uint64_t) off2);
+ uint32_t val;
+
+ for(; count > 0; count--, ptr1++, ptr2++) {
+ val = oct_read32(ptr1);
+ oct_write32(ptr2, val);
+ }
+}
+
+static __inline void
+bus_space_copy_region_8(bus_space_tag_t tag, bus_space_handle_t bsh1,
+ bus_size_t off1, bus_space_handle_t bsh2,
+ bus_size_t off2, size_t count)
+{
+ uint64_t ptr1 = ((uint64_t) bsh1 + (uint64_t) off1);
+ uint64_t ptr2 = ((uint64_t) bsh2 + (uint64_t) off2);
+ uint64_t val;
+
+ for(; count > 0; count--, ptr1++, ptr2++) {
+ val = oct_read64(ptr1);
+ oct_write64(ptr2, val);
+ }
+}
+
+/*
+ * Bus read/write barrier methods.
+ *
+ * void bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t bsh,
+ * bus_size_t offset, bus_size_t len, int flags);
+ *
+ *
+ * Note that BUS_SPACE_BARRIER_WRITE doesn't do anything other than
+ * prevent reordering by the compiler; all Intel x86 processors currently
+ * retire operations outside the CPU in program order.
+ */
+#define BUS_SPACE_BARRIER_READ 0x01 /* force read barrier */
+#define BUS_SPACE_BARRIER_WRITE 0x02 /* force write barrier */
+
+static __inline void
+bus_space_barrier(bus_space_tag_t tag __unused, bus_space_handle_t bsh __unused,
+ bus_size_t offset __unused, bus_size_t len __unused, int flags)
+{
+#if 0
+#ifdef __GNUCLIKE_ASM
+ if (flags & BUS_SPACE_BARRIER_READ)
+ __asm __volatile("lock; addl $0,0(%%rsp)" : : : "memory");
+ else
+ __asm __volatile("" : : : "memory");
+#endif
+#endif
+ oct_read64(OCTEON_MIO_BOOT_BIST_STAT);
+}
+
+#ifdef BUS_SPACE_NO_LEGACY
+#undef inb
+#undef outb
+#define inb(a) compiler_error
+#define inw(a) compiler_error
+#define inl(a) compiler_error
+#define outb(a, b) compiler_error
+#define outw(a, b) compiler_error
+#define outl(a, b) compiler_error
+#endif
+
+#include <machine/bus_dma.h>
+
+/*
+ * Stream accesses are the same as normal accesses on amd64; there are no
+ * supported bus systems with an endianess different from the host one.
+ */
+#define bus_space_read_stream_1(t, h, o) bus_space_read_1((t), (h), (o))
+#define bus_space_read_stream_2(t, h, o) bus_space_read_2((t), (h), (o))
+#define bus_space_read_stream_4(t, h, o) bus_space_read_4((t), (h), (o))
+
+#define bus_space_read_multi_stream_1(t, h, o, a, c) \
+ bus_space_read_multi_1((t), (h), (o), (a), (c))
+#define bus_space_read_multi_stream_2(t, h, o, a, c) \
+ bus_space_read_multi_2((t), (h), (o), (a), (c))
+#define bus_space_read_multi_stream_4(t, h, o, a, c) \
+ bus_space_read_multi_4((t), (h), (o), (a), (c))
+
+#define bus_space_write_stream_1(t, h, o, v) \
+ bus_space_write_1((t), (h), (o), (v))
+#define bus_space_write_stream_2(t, h, o, v) \
+ bus_space_write_2((t), (h), (o), (v))
+#define bus_space_write_stream_4(t, h, o, v) \
+ bus_space_write_4((t), (h), (o), (v))
+
+#define bus_space_write_multi_stream_1(t, h, o, a, c) \
+ bus_space_write_multi_1((t), (h), (o), (a), (c))
+#define bus_space_write_multi_stream_2(t, h, o, a, c) \
+ bus_space_write_multi_2((t), (h), (o), (a), (c))
+#define bus_space_write_multi_stream_4(t, h, o, a, c) \
+ bus_space_write_multi_4((t), (h), (o), (a), (c))
+
+#define bus_space_set_multi_stream_1(t, h, o, v, c) \
+ bus_space_set_multi_1((t), (h), (o), (v), (c))
+#define bus_space_set_multi_stream_2(t, h, o, v, c) \
+ bus_space_set_multi_2((t), (h), (o), (v), (c))
+#define bus_space_set_multi_stream_4(t, h, o, v, c) \
+ bus_space_set_multi_4((t), (h), (o), (v), (c))
+
+#define bus_space_read_region_stream_1(t, h, o, a, c) \
+ bus_space_read_region_1((t), (h), (o), (a), (c))
+#define bus_space_read_region_stream_2(t, h, o, a, c) \
+ bus_space_read_region_2((t), (h), (o), (a), (c))
+#define bus_space_read_region_stream_4(t, h, o, a, c) \
+ bus_space_read_region_4((t), (h), (o), (a), (c))
+
+#define bus_space_write_region_stream_1(t, h, o, a, c) \
+ bus_space_write_region_1((t), (h), (o), (a), (c))
+#define bus_space_write_region_stream_2(t, h, o, a, c) \
+ bus_space_write_region_2((t), (h), (o), (a), (c))
+#define bus_space_write_region_stream_4(t, h, o, a, c) \
+ bus_space_write_region_4((t), (h), (o), (a), (c))
+
+#define bus_space_set_region_stream_1(t, h, o, v, c) \
+ bus_space_set_region_1((t), (h), (o), (v), (c))
+#define bus_space_set_region_stream_2(t, h, o, v, c) \
+ bus_space_set_region_2((t), (h), (o), (v), (c))
+#define bus_space_set_region_stream_4(t, h, o, v, c) \
+ bus_space_set_region_4((t), (h), (o), (v), (c))
+
+#define bus_space_copy_region_stream_1(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_1((t), (h1), (o1), (h2), (o2), (c))
+#define bus_space_copy_region_stream_2(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_2((t), (h1), (o1), (h2), (o2), (c))
+#define bus_space_copy_region_stream_4(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_4((t), (h1), (o1), (h2), (o2), (c))
+
+#endif /* _MIPS_BUS_OCTEON_H_ */
diff --git a/sys/mips/include/cache.h b/sys/mips/include/cache.h
new file mode 100644
index 0000000..8f22cdb
--- /dev/null
+++ b/sys/mips/include/cache.h
@@ -0,0 +1,261 @@
+/* $NetBSD: cache.h,v 1.6 2003/02/17 11:35:01 simonb Exp $ */
+
+/*
+ * Copyright 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Cache operations.
+ *
+ * We define the following primitives:
+ *
+ * --- Instruction cache synchronization (mandatory):
+ *
+ * icache_sync_all Synchronize I-cache
+ *
+ * icache_sync_range Synchronize I-cache range
+ *
+ * icache_sync_range_index (index ops)
+ *
+ * --- Primary data cache (mandatory):
+ *
+ * pdcache_wbinv_all Write-back Invalidate primary D-cache
+ *
+ * pdcache_wbinv_range Write-back Invalidate primary D-cache range
+ *
+ * pdcache_wbinv_range_index (index ops)
+ *
+ * pdcache_inv_range Invalidate primary D-cache range
+ *
+ * pdcache_wb_range Write-back primary D-cache range
+ *
+ * --- Secondary data cache (optional):
+ *
+ * sdcache_wbinv_all Write-back Invalidate secondary D-cache
+ *
+ * sdcache_wbinv_range Write-back Invalidate secondary D-cache range
+ *
+ * sdcache_wbinv_range_index (index ops)
+ *
+ * sdcache_inv_range Invalidate secondary D-cache range
+ *
+ * sdcache_wb_range Write-back secondary D-cache range
+ *
+ * There are some rules that must be followed:
+ *
+ * I-cache Synch (all or range):
+ * The goal is to synchronize the instruction stream,
+ * so you may need to write-back dirty data cache
+ * blocks first. If a range is requested, and you
+ * can't synchronize just a range, you have to hit
+ * the whole thing.
+ *
+ * D-cache Write-back Invalidate range:
+ * If you can't WB-Inv a range, you must WB-Inv the
+ * entire D-cache.
+ *
+ * D-cache Invalidate:
+ * If you can't Inv the D-cache without doing a
+ * Write-back, YOU MUST PANIC. This is to catch
+ * errors in calling code. Callers must be aware
+ * of this scenario, and must handle it appropriately
+ * (consider the bus_dma(9) operations).
+ *
+ * D-cache Write-back:
+ * If you can't Write-back without doing an invalidate,
+ * that's fine. Then treat this as a WB-Inv. Skipping
+ * the invalidate is merely an optimization.
+ *
+ * All operations:
+ * Valid virtual addresses must be passed to the
+ * cache operation.
+ *
+ * Finally, these primitives are grouped together in reasonable
+ * ways. For all operations described here, first the primary
+ * cache is frobbed, then the secondary cache frobbed, if the
+ * operation for the secondary cache exists.
+ *
+ * mips_icache_sync_all Synchronize I-cache
+ *
+ * mips_icache_sync_range Synchronize I-cache range
+ *
+ * mips_icache_sync_range_index (index ops)
+ *
+ * mips_dcache_wbinv_all Write-back Invalidate D-cache
+ *
+ * mips_dcache_wbinv_range Write-back Invalidate D-cache range
+ *
+ * mips_dcache_wbinv_range_index (index ops)
+ *
+ * mips_dcache_inv_range Invalidate D-cache range
+ *
+ * mips_dcache_wb_range Write-back D-cache range
+ */
+
+struct mips_cache_ops {
+ void (*mco_icache_sync_all)(void);
+ void (*mco_icache_sync_range)(vm_offset_t, vm_size_t);
+ void (*mco_icache_sync_range_index)(vm_offset_t, vm_size_t);
+
+ void (*mco_pdcache_wbinv_all)(void);
+ void (*mco_pdcache_wbinv_range)(vm_offset_t, vm_size_t);
+ void (*mco_pdcache_wbinv_range_index)(vm_offset_t, vm_size_t);
+ void (*mco_pdcache_inv_range)(vm_offset_t, vm_size_t);
+ void (*mco_pdcache_wb_range)(vm_offset_t, vm_size_t);
+
+ /* These are called only by the (mipsNN) icache functions. */
+ void (*mco_intern_pdcache_wbinv_all)(void);
+ void (*mco_intern_pdcache_wbinv_range_index)(vm_offset_t, vm_size_t);
+ void (*mco_intern_pdcache_wb_range)(vm_offset_t, vm_size_t);
+
+ void (*mco_sdcache_wbinv_all)(void);
+ void (*mco_sdcache_wbinv_range)(vm_offset_t, vm_size_t);
+ void (*mco_sdcache_wbinv_range_index)(vm_offset_t, vm_size_t);
+ void (*mco_sdcache_inv_range)(vm_offset_t, vm_size_t);
+ void (*mco_sdcache_wb_range)(vm_offset_t, vm_size_t);
+
+ /* These are called only by the (mipsNN) icache functions. */
+ void (*mco_intern_sdcache_wbinv_all)(void);
+ void (*mco_intern_sdcache_wbinv_range_index)(vm_offset_t, vm_size_t);
+ void (*mco_intern_sdcache_wb_range)(vm_offset_t, vm_size_t);
+};
+
+extern struct mips_cache_ops mips_cache_ops;
+
+/* PRIMARY CACHE VARIABLES */
+extern u_int mips_picache_size;
+extern u_int mips_picache_line_size;
+extern u_int mips_picache_ways;
+extern u_int mips_picache_way_size;
+extern u_int mips_picache_way_mask;
+
+extern u_int mips_pdcache_size; /* and unified */
+extern u_int mips_pdcache_line_size;
+extern u_int mips_pdcache_ways;
+extern u_int mips_pdcache_way_size;
+extern u_int mips_pdcache_way_mask;
+extern int mips_pdcache_write_through;
+
+extern int mips_pcache_unified;
+
+/* SECONDARY CACHE VARIABLES */
+extern u_int mips_sicache_size;
+extern u_int mips_sicache_line_size;
+extern u_int mips_sicache_ways;
+extern u_int mips_sicache_way_size;
+extern u_int mips_sicache_way_mask;
+
+extern u_int mips_sdcache_size; /* and unified */
+extern u_int mips_sdcache_line_size;
+extern u_int mips_sdcache_ways;
+extern u_int mips_sdcache_way_size;
+extern u_int mips_sdcache_way_mask;
+extern int mips_sdcache_write_through;
+
+extern int mips_scache_unified;
+
+/* TERTIARY CACHE VARIABLES */
+extern u_int mips_tcache_size; /* always unified */
+extern u_int mips_tcache_line_size;
+extern u_int mips_tcache_ways;
+extern u_int mips_tcache_way_size;
+extern u_int mips_tcache_way_mask;
+extern int mips_tcache_write_through;
+
+extern u_int mips_dcache_align;
+extern u_int mips_dcache_align_mask;
+
+extern u_int mips_cache_alias_mask;
+extern u_int mips_cache_prefer_mask;
+
+#define __mco_noargs(prefix, x) \
+do { \
+ (*mips_cache_ops.mco_ ## prefix ## p ## x )(); \
+ if (*mips_cache_ops.mco_ ## prefix ## s ## x ) \
+ (*mips_cache_ops.mco_ ## prefix ## s ## x )(); \
+} while (/*CONSTCOND*/0)
+
+#define __mco_2args(prefix, x, a, b) \
+do { \
+ (*mips_cache_ops.mco_ ## prefix ## p ## x )((a), (b)); \
+ if (*mips_cache_ops.mco_ ## prefix ## s ## x ) \
+ (*mips_cache_ops.mco_ ## prefix ## s ## x )((a), (b)); \
+} while (/*CONSTCOND*/0)
+
+#define mips_icache_sync_all() \
+ (*mips_cache_ops.mco_icache_sync_all)()
+
+#define mips_icache_sync_range(v, s) \
+ (*mips_cache_ops.mco_icache_sync_range)((v), (s))
+
+#define mips_icache_sync_range_index(v, s) \
+ (*mips_cache_ops.mco_icache_sync_range_index)((v), (s))
+
+#define mips_dcache_wbinv_all() \
+ __mco_noargs(, dcache_wbinv_all)
+
+#define mips_dcache_wbinv_range(v, s) \
+ __mco_2args(, dcache_wbinv_range, (v), (s))
+
+#define mips_dcache_wbinv_range_index(v, s) \
+ __mco_2args(, dcache_wbinv_range_index, (v), (s))
+
+#define mips_dcache_inv_range(v, s) \
+ __mco_2args(, dcache_inv_range, (v), (s))
+
+#define mips_dcache_wb_range(v, s) \
+ __mco_2args(, dcache_wb_range, (v), (s))
+
+/*
+ * Private D-cache functions only called from (currently only the
+ * mipsNN) I-cache functions.
+ */
+#define mips_intern_dcache_wbinv_all() \
+ __mco_noargs(intern_, dcache_wbinv_all)
+
+#define mips_intern_dcache_wbinv_range_index(v, s) \
+ __mco_2args(intern_, dcache_wbinv_range_index, (v), (s))
+
+#define mips_intern_dcache_wb_range(v, s) \
+ __mco_2args(intern_, dcache_wb_range, (v), (s))
+
+/* forward declaration */
+struct mips_cpuinfo;
+
+void mips_config_cache(struct mips_cpuinfo *);
+void mips_dcache_compute_align(void);
+
+#include <machine/cache_mipsNN.h>
diff --git a/sys/mips/include/cache_mipsNN.h b/sys/mips/include/cache_mipsNN.h
new file mode 100644
index 0000000..e44746a
--- /dev/null
+++ b/sys/mips/include/cache_mipsNN.h
@@ -0,0 +1,67 @@
+/* $NetBSD: cache_mipsNN.h,v 1.4 2003/02/17 11:35:02 simonb Exp $ */
+
+/*
+ * Copyright 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Simon Burge for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+void mipsNN_cache_init(struct mips_cpuinfo *);
+
+void mipsNN_icache_sync_all_16(void);
+void mipsNN_icache_sync_all_32(void);
+void mipsNN_icache_sync_range_16(vm_offset_t, vm_size_t);
+void mipsNN_icache_sync_range_32(vm_offset_t, vm_size_t);
+void mipsNN_icache_sync_range_index_16(vm_offset_t, vm_size_t);
+void mipsNN_icache_sync_range_index_32(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_wbinv_all_16(void);
+void mipsNN_pdcache_wbinv_all_32(void);
+void mipsNN_pdcache_wbinv_range_16(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_wbinv_range_32(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_wbinv_range_index_16(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_wbinv_range_index_32(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_inv_range_16(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_inv_range_32(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_wb_range_16(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_wb_range_32(vm_offset_t, vm_size_t);
+#ifdef TARGET_OCTEON
+void mipsNN_icache_sync_all_128(void);
+void mipsNN_icache_sync_range_128(vm_offset_t, vm_size_t);
+void mipsNN_icache_sync_range_index_128(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_wbinv_all_128(void);
+void mipsNN_pdcache_wbinv_range_128(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_wbinv_range_index_128(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_inv_range_128(vm_offset_t, vm_size_t);
+void mipsNN_pdcache_wb_range_128(vm_offset_t, vm_size_t);
+#endif
diff --git a/sys/mips/include/cache_r4k.h b/sys/mips/include/cache_r4k.h
new file mode 100644
index 0000000..a3a9460
--- /dev/null
+++ b/sys/mips/include/cache_r4k.h
@@ -0,0 +1,383 @@
+/* $NetBSD: cache_r4k.h,v 1.10 2003/03/08 04:43:26 rafal Exp $ */
+
+/*
+ * Copyright 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Cache definitions/operations for R4000-style caches.
+ */
+
+#define CACHE_R4K_I 0
+#define CACHE_R4K_D 1
+#define CACHE_R4K_SI 2
+#define CACHE_R4K_SD 3
+
+#define CACHEOP_R4K_INDEX_INV (0 << 2) /* I, SI */
+#define CACHEOP_R4K_INDEX_WB_INV (0 << 2) /* D, SD */
+#define CACHEOP_R4K_INDEX_LOAD_TAG (1 << 2) /* all */
+#define CACHEOP_R4K_INDEX_STORE_TAG (2 << 2) /* all */
+#define CACHEOP_R4K_CREATE_DIRTY_EXCL (3 << 2) /* D, SD */
+#define CACHEOP_R4K_HIT_INV (4 << 2) /* all */
+#define CACHEOP_R4K_HIT_WB_INV (5 << 2) /* D, SD */
+#define CACHEOP_R4K_FILL (5 << 2) /* I */
+#define CACHEOP_R4K_HIT_WB (6 << 2) /* I, D, SD */
+#define CACHEOP_R4K_HIT_SET_VIRTUAL (7 << 2) /* SI, SD */
+
+#if !defined(LOCORE)
+
+/*
+ * cache_r4k_op_line:
+ *
+ * Perform the specified cache operation on a single line.
+ */
+#define cache_op_r4k_line(va, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0(%0) \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+/*
+ * cache_r4k_op_8lines_16:
+ *
+ * Perform the specified cache operation on 8 16-byte cache lines.
+ */
+#define cache_r4k_op_8lines_16(va, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0x00(%0); cache %1, 0x10(%0) \n\t" \
+ "cache %1, 0x20(%0); cache %1, 0x30(%0) \n\t" \
+ "cache %1, 0x40(%0); cache %1, 0x50(%0) \n\t" \
+ "cache %1, 0x60(%0); cache %1, 0x70(%0) \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+/*
+ * cache_r4k_op_8lines_32:
+ *
+ * Perform the specified cache operation on 8 32-byte cache lines.
+ */
+#define cache_r4k_op_8lines_32(va, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0x00(%0); cache %1, 0x20(%0) \n\t" \
+ "cache %1, 0x40(%0); cache %1, 0x60(%0) \n\t" \
+ "cache %1, 0x80(%0); cache %1, 0xa0(%0) \n\t" \
+ "cache %1, 0xc0(%0); cache %1, 0xe0(%0) \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+/*
+ * cache_r4k_op_32lines_16:
+ *
+ * Perform the specified cache operation on 32 16-byte
+ * cache lines.
+ */
+#define cache_r4k_op_32lines_16(va, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0x000(%0); cache %1, 0x010(%0); \n\t" \
+ "cache %1, 0x020(%0); cache %1, 0x030(%0); \n\t" \
+ "cache %1, 0x040(%0); cache %1, 0x050(%0); \n\t" \
+ "cache %1, 0x060(%0); cache %1, 0x070(%0); \n\t" \
+ "cache %1, 0x080(%0); cache %1, 0x090(%0); \n\t" \
+ "cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \n\t" \
+ "cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \n\t" \
+ "cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \n\t" \
+ "cache %1, 0x100(%0); cache %1, 0x110(%0); \n\t" \
+ "cache %1, 0x120(%0); cache %1, 0x130(%0); \n\t" \
+ "cache %1, 0x140(%0); cache %1, 0x150(%0); \n\t" \
+ "cache %1, 0x160(%0); cache %1, 0x170(%0); \n\t" \
+ "cache %1, 0x180(%0); cache %1, 0x190(%0); \n\t" \
+ "cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \n\t" \
+ "cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \n\t" \
+ "cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+/*
+ * cache_r4k_op_32lines_32:
+ *
+ * Perform the specified cache operation on 32 32-byte
+ * cache lines.
+ */
+#define cache_r4k_op_32lines_32(va, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0x000(%0); cache %1, 0x020(%0); \n\t" \
+ "cache %1, 0x040(%0); cache %1, 0x060(%0); \n\t" \
+ "cache %1, 0x080(%0); cache %1, 0x0a0(%0); \n\t" \
+ "cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \n\t" \
+ "cache %1, 0x100(%0); cache %1, 0x120(%0); \n\t" \
+ "cache %1, 0x140(%0); cache %1, 0x160(%0); \n\t" \
+ "cache %1, 0x180(%0); cache %1, 0x1a0(%0); \n\t" \
+ "cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \n\t" \
+ "cache %1, 0x200(%0); cache %1, 0x220(%0); \n\t" \
+ "cache %1, 0x240(%0); cache %1, 0x260(%0); \n\t" \
+ "cache %1, 0x280(%0); cache %1, 0x2a0(%0); \n\t" \
+ "cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \n\t" \
+ "cache %1, 0x300(%0); cache %1, 0x320(%0); \n\t" \
+ "cache %1, 0x340(%0); cache %1, 0x360(%0); \n\t" \
+ "cache %1, 0x380(%0); cache %1, 0x3a0(%0); \n\t" \
+ "cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+/*
+ * cache_r4k_op_32lines_128:
+ *
+ * Perform the specified cache operation on 32 128-byte
+ * cache lines.
+ */
+#define cache_r4k_op_32lines_128(va, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %1, 0x0000(%0); cache %1, 0x0080(%0); \n\t" \
+ "cache %1, 0x0100(%0); cache %1, 0x0180(%0); \n\t" \
+ "cache %1, 0x0200(%0); cache %1, 0x0280(%0); \n\t" \
+ "cache %1, 0x0300(%0); cache %1, 0x0380(%0); \n\t" \
+ "cache %1, 0x0400(%0); cache %1, 0x0480(%0); \n\t" \
+ "cache %1, 0x0500(%0); cache %1, 0x0580(%0); \n\t" \
+ "cache %1, 0x0600(%0); cache %1, 0x0680(%0); \n\t" \
+ "cache %1, 0x0700(%0); cache %1, 0x0780(%0); \n\t" \
+ "cache %1, 0x0800(%0); cache %1, 0x0880(%0); \n\t" \
+ "cache %1, 0x0900(%0); cache %1, 0x0980(%0); \n\t" \
+ "cache %1, 0x0a00(%0); cache %1, 0x0a80(%0); \n\t" \
+ "cache %1, 0x0b00(%0); cache %1, 0x0b80(%0); \n\t" \
+ "cache %1, 0x0c00(%0); cache %1, 0x0c80(%0); \n\t" \
+ "cache %1, 0x0d00(%0); cache %1, 0x0d80(%0); \n\t" \
+ "cache %1, 0x0e00(%0); cache %1, 0x0e80(%0); \n\t" \
+ "cache %1, 0x0f00(%0); cache %1, 0x0f80(%0); \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+/*
+ * cache_r4k_op_16lines_16_2way:
+ *
+ * Perform the specified cache operation on 16 16-byte
+ * cache lines, 2-ways.
+ */
+#define cache_r4k_op_16lines_16_2way(va1, va2, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \
+ "cache %2, 0x010(%0); cache %2, 0x010(%1); \n\t" \
+ "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \
+ "cache %2, 0x030(%0); cache %2, 0x030(%1); \n\t" \
+ "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \
+ "cache %2, 0x050(%0); cache %2, 0x050(%1); \n\t" \
+ "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \
+ "cache %2, 0x070(%0); cache %2, 0x070(%1); \n\t" \
+ "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \
+ "cache %2, 0x090(%0); cache %2, 0x090(%1); \n\t" \
+ "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \
+ "cache %2, 0x0b0(%0); cache %2, 0x0b0(%1); \n\t" \
+ "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \
+ "cache %2, 0x0d0(%0); cache %2, 0x0d0(%1); \n\t" \
+ "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \
+ "cache %2, 0x0f0(%0); cache %2, 0x0f0(%1); \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va1), "r" (va2), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+/*
+ * cache_r4k_op_16lines_32_2way:
+ *
+ * Perform the specified cache operation on 16 32-byte
+ * cache lines, 2-ways.
+ */
+#define cache_r4k_op_16lines_32_2way(va1, va2, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \
+ "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \
+ "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \
+ "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \
+ "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \
+ "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \
+ "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \
+ "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \
+ "cache %2, 0x100(%0); cache %2, 0x100(%1); \n\t" \
+ "cache %2, 0x120(%0); cache %2, 0x120(%1); \n\t" \
+ "cache %2, 0x140(%0); cache %2, 0x140(%1); \n\t" \
+ "cache %2, 0x160(%0); cache %2, 0x160(%1); \n\t" \
+ "cache %2, 0x180(%0); cache %2, 0x180(%1); \n\t" \
+ "cache %2, 0x1a0(%0); cache %2, 0x1a0(%1); \n\t" \
+ "cache %2, 0x1c0(%0); cache %2, 0x1c0(%1); \n\t" \
+ "cache %2, 0x1e0(%0); cache %2, 0x1e0(%1); \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va1), "r" (va2), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+/*
+ * cache_r4k_op_8lines_16_4way:
+ *
+ * Perform the specified cache operation on 8 16-byte
+ * cache lines, 4-ways.
+ */
+#define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \
+ "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \
+ "cache %4, 0x010(%0); cache %4, 0x010(%1); \n\t" \
+ "cache %4, 0x010(%2); cache %4, 0x010(%3); \n\t" \
+ "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \
+ "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \
+ "cache %4, 0x030(%0); cache %4, 0x030(%1); \n\t" \
+ "cache %4, 0x030(%2); cache %4, 0x030(%3); \n\t" \
+ "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \
+ "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \
+ "cache %4, 0x050(%0); cache %4, 0x050(%1); \n\t" \
+ "cache %4, 0x050(%2); cache %4, 0x050(%3); \n\t" \
+ "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \
+ "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \
+ "cache %4, 0x070(%0); cache %4, 0x070(%1); \n\t" \
+ "cache %4, 0x070(%2); cache %4, 0x070(%3); \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+/*
+ * cache_r4k_op_8lines_32_4way:
+ *
+ * Perform the specified cache operation on 8 32-byte
+ * cache lines, 4-ways.
+ */
+#define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op) \
+do { \
+ __asm __volatile( \
+ ".set noreorder \n\t" \
+ "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \
+ "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \
+ "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \
+ "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \
+ "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \
+ "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \
+ "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \
+ "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \
+ "cache %4, 0x080(%0); cache %4, 0x080(%1); \n\t" \
+ "cache %4, 0x080(%2); cache %4, 0x080(%3); \n\t" \
+ "cache %4, 0x0a0(%0); cache %4, 0x0a0(%1); \n\t" \
+ "cache %4, 0x0a0(%2); cache %4, 0x0a0(%3); \n\t" \
+ "cache %4, 0x0c0(%0); cache %4, 0x0c0(%1); \n\t" \
+ "cache %4, 0x0c0(%2); cache %4, 0x0c0(%3); \n\t" \
+ "cache %4, 0x0e0(%0); cache %4, 0x0e0(%1); \n\t" \
+ "cache %4, 0x0e0(%2); cache %4, 0x0e0(%3); \n\t" \
+ ".set reorder" \
+ : \
+ : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \
+ : "memory"); \
+} while (/*CONSTCOND*/0)
+
+void r4k_icache_sync_all_16(void);
+void r4k_icache_sync_range_16(vm_paddr_t, vm_size_t);
+void r4k_icache_sync_range_index_16(vm_paddr_t, vm_size_t);
+
+void r4k_icache_sync_all_32(void);
+void r4k_icache_sync_range_32(vm_paddr_t, vm_size_t);
+void r4k_icache_sync_range_index_32(vm_paddr_t, vm_size_t);
+
+void r4k_pdcache_wbinv_all_16(void);
+void r4k_pdcache_wbinv_range_16(vm_paddr_t, vm_size_t);
+void r4k_pdcache_wbinv_range_index_16(vm_paddr_t, vm_size_t);
+
+void r4k_pdcache_inv_range_16(vm_paddr_t, vm_size_t);
+void r4k_pdcache_wb_range_16(vm_paddr_t, vm_size_t);
+
+void r4k_pdcache_wbinv_all_32(void);
+void r4k_pdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
+void r4k_pdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
+
+void r4k_pdcache_inv_range_32(vm_paddr_t, vm_size_t);
+void r4k_pdcache_wb_range_32(vm_paddr_t, vm_size_t);
+
+void r4k_sdcache_wbinv_all_32(void);
+void r4k_sdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
+void r4k_sdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
+
+void r4k_sdcache_inv_range_32(vm_paddr_t, vm_size_t);
+void r4k_sdcache_wb_range_32(vm_paddr_t, vm_size_t);
+
+void r4k_sdcache_wbinv_all_128(void);
+void r4k_sdcache_wbinv_range_128(vm_paddr_t, vm_size_t);
+void r4k_sdcache_wbinv_range_index_128(vm_paddr_t, vm_size_t);
+
+void r4k_sdcache_inv_range_128(vm_paddr_t, vm_size_t);
+void r4k_sdcache_wb_range_128(vm_paddr_t, vm_size_t);
+
+void r4k_sdcache_wbinv_all_generic(void);
+void r4k_sdcache_wbinv_range_generic(vm_paddr_t, vm_size_t);
+void r4k_sdcache_wbinv_range_index_generic(vm_paddr_t, vm_size_t);
+
+void r4k_sdcache_inv_range_generic(vm_paddr_t, vm_size_t);
+void r4k_sdcache_wb_range_generic(vm_paddr_t, vm_size_t);
+
+#endif /* !LOCORE */
diff --git a/sys/mips/include/clock.h b/sys/mips/include/clock.h
new file mode 100644
index 0000000..62b5112
--- /dev/null
+++ b/sys/mips/include/clock.h
@@ -0,0 +1,39 @@
+/*
+ * Garrett Wollman, September 1994.
+ * This file is in the public domain.
+ * Kernel interface to machine-dependent clock driver.
+ *
+ * JNPR: clock.h,v 1.6.2.1 2007/08/29 09:36:05 girish
+ * from: src/sys/alpha/include/clock.h,v 1.5 1999/12/29 04:27:55 peter
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CLOCK_H_
+#define _MACHINE_CLOCK_H_
+
+#include <sys/bus.h>
+
+#ifdef _KERNEL
+
+extern int cpu_clock;
+
+extern uint32_t clockintr(uint32_t, struct clockframe *);
+
+#define wall_cmos_clock 0
+#define adjkerntz 0
+
+/*
+ * Default is to assume a CPU pipeline clock of 100Mhz, and
+ * that CP0_COUNT increments every 2 cycles.
+ */
+#define MIPS_DEFAULT_HZ (100 * 1000 * 1000)
+
+void mips_timer_early_init(uint64_t clock_hz);
+void mips_timer_init_params(uint64_t, int);
+
+extern uint64_t counter_freq;
+extern int clocks_running;
+
+#endif
+
+#endif /* !_MACHINE_CLOCK_H_ */
diff --git a/sys/mips/include/clockvar.h b/sys/mips/include/clockvar.h
new file mode 100644
index 0000000..429beb7
--- /dev/null
+++ b/sys/mips/include/clockvar.h
@@ -0,0 +1,55 @@
+/* $OpenBSD: clockvar.h,v 1.1 1998/01/29 15:06:19 pefo Exp $ */
+/* $NetBSD: clockvar.h,v 1.1 1995/06/28 02:44:59 cgd Exp $ */
+
+/*
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ * Adopted for r4400: Per Fogelstrom
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * JNPR: clockvar.h,v 1.3 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+
+/*
+ * Definitions for "cpu-independent" clock handling for the mips arc arch.
+ */
+
+/*
+ * clocktime structure:
+ *
+ * structure passed to TOY clocks when setting them. broken out this
+ * way, so that the time_t -> field conversion can be shared.
+ */
+struct tod_time {
+ int year; /* year - 1900 */
+ int mon; /* month (1 - 12) */
+ int day; /* day (1 - 31) */
+ int hour; /* hour (0 - 23) */
+ int min; /* minute (0 - 59) */
+ int sec; /* second (0 - 59) */
+ int dow; /* day of week (0 - 6; 0 = Sunday) */
+};
+
+int clockinitted;
diff --git a/sys/mips/include/cp0.h b/sys/mips/include/cp0.h
new file mode 100644
index 0000000..37e0fbb
--- /dev/null
+++ b/sys/mips/include/cp0.h
@@ -0,0 +1,310 @@
+/*-
+ * Copyright (c) 2001, 2005, Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * Truman Joe, March 2001.
+ *
+ * cp0.h -- MIPS coprocessor 0 defines.
+ *
+ * JNPR: cp0.h,v 1.4 2006/12/02 09:53:40 katta
+ * $FreeBSD$
+ */
+
+/*
+ * This header file is updated from:
+ * pfe/include/mips/cp0.h
+ */
+
+/*
+ * Note: Registers and bit descriptions that do NOT adhere to
+ * the MIPS64 descriptions as defined in the "MIPS64
+ * Architecture for Programmers, Volume III: The MIPS64
+ * Privileged Resource Architecture" document (doc # MD00091)
+ * are considered to be processor specific and must have the
+ * processor type included in the constant name.
+ */
+
+#ifndef _MACHINE_CP0_H_
+#define _MACHINE_CP0_H_
+
+#ifndef ASMINCLUDE
+
+/* Coprocessor 0 set 0 */
+
+#define C0_INDEX 0
+#define C0_RANDOM 1
+#define C0_ENTRYLO0 2
+#define C0_ENTRYLO1 3
+#define C0_CONTEXT 4
+#define C0_PAGEMASK 5
+#define C0_WIRED 6
+#define R7K_C0_INFO 7
+#define R9K_C0_INFO 7
+#define C0_BADVADDR 8
+#define C0_COUNT 9
+#define C0_ENTRYHI 10
+#define C0_COMPARE 11
+#define C0_STATUS 12
+#define C0_CAUSE 13
+#define C0_EPC 14
+#define C0_PRID 15
+#define C0_CONFIG 16
+#define C0_LLADDR 17
+#define C0_WATCH1 18
+#define C0_WATCH2 19
+#define C0_XCONTEXT 20
+#define R7K_C0_PERFCTL 22
+#define C0_DEBUG 23
+#define R9K_C0_JTAG_DEBUG 23
+#define R7K_C0_WATCHMASK 24
+#define R9K_C0_JTAG_DEPC 24
+#define C0_PERFCOUNT 25
+#define C0_ECC 26
+#define C0_CACHEERR 27
+#define C0_TAGLO 28
+#define C0_TAGHI 29
+#define C0_ERROREPC 30
+#define R9K_C0_JTAG_DESAV 31
+
+/* Coprocessor 0 Set 1 */
+
+#define R7K_C0_1_IPLLO 18
+#define R7K_C0_1_IPLHI 19
+#define R7K_C0_1_INTCTL 20
+#define R9K_C0_1_TBCTL 22
+#define R9K_C0_1_TBIDX 24
+#define R9K_C0_1_TBOUT 25
+#define R7K_C0_1_DERRADDR0 26
+#define R7K_C0_1_DERRADDR1 27
+
+#else /* ASMINCLUDE */
+
+/* Coprocessor 0 set 0 */
+
+#define C0_INDEX $0
+#define C0_RANDOM $1
+#define C0_ENTRYLO0 $2
+#define C0_ENTRYLO1 $3
+#define C0_CONTEXT $4
+#define C0_PAGEMASK $5
+#define C0_WIRED $6
+#define C0_INFO $7
+#define C0_BADVADDR $8
+#define C0_COUNT $9
+#define C0_ENTRYHI $10
+#define C0_COMPARE $11
+#define C0_STATUS $12
+#define C0_CAUSE $13
+#define C0_EPC $14
+#define C0_PRID $15
+#define C0_CONFIG $16
+#define C0_LLADDR $17
+#define C0_WATCH1 $18
+#define C0_WATCH2 $19
+#define C0_XCONTEXT $20
+#define R7K_C0_PERFCTL $22
+#define C0_DEBUG $23
+#define R9K_C0_JTAG_DEBUG $23
+#define R7K_C0_WATCHMASK $24
+#define R9K_C0_JTAG_DEPC $24
+#define C0_PERFCOUNT $25
+#define C0_ECC $26
+#define C0_CACHEERR $27
+#define C0_TAGLO $28
+#define C0_TAGHI $29
+#define C0_ERROREPC $30
+#define R9K_C0_JTAG_DESAV $31
+
+/* Coprocessor 0 Set 1 */
+
+#define R7K_C0_1_IPLLO $18
+#define R7K_C0_1_IPLHI $19
+#define R7K_C0_1_INTCTL $20
+#define R7K_C0_1_DERRADDR0 $26
+#define R7K_C0_1_DERRADDR1 $27
+
+#endif /* ASMINCLUDE */
+
+/* CACHE INSTR OPERATIONS */
+
+#define CACHE_I 0
+#define CACHE_D 1
+#define CACHE_T 2
+#define CACHE_S 3
+
+#define INDEX_INVL_I ((0 << 2) | CACHE_I)
+#define INDEX_WB_INVL_D ((0 << 2) | CACHE_D)
+#define FLASH_INVL_T ((0 << 2) | CACHE_T)
+#define INDEX_WB_INVL_S ((0 << 2) | CACHE_S)
+#define INDEX_LD_TAG_I ((1 << 2) | CACHE_I)
+#define INDEX_LD_TAG_D ((1 << 2) | CACHE_D)
+#define INDEX_LD_TAG_T ((1 << 2) | CACHE_T)
+#define INDEX_LD_TAG_S ((1 << 2) | CACHE_S)
+#define INDEX_ST_TAG_I ((2 << 2) | CACHE_I)
+#define INDEX_ST_TAG_D ((2 << 2) | CACHE_D)
+#define INDEX_ST_TAG_T ((2 << 2) | CACHE_T)
+#define INDEX_ST_TAG_S ((2 << 2) | CACHE_S)
+#define CREATE_DRTY_EXCL_D ((3 << 2) | CACHE_D)
+#define HIT_INVL_I ((4 << 2) | CACHE_I)
+#define HIT_INVL_D ((4 << 2) | CACHE_D)
+#define HIT_INVL_S ((4 << 2) | CACHE_S)
+#define HIT_WB_INVL_D ((5 << 2) | CACHE_D)
+#define FILL_I ((5 << 2) | CACHE_I)
+#define HIT_WB_INVL_S ((5 << 2) | CACHE_S)
+#define PAGE_INVL_T ((5 << 2) | CACHE_T)
+#define HIT_WB_D ((6 << 2) | CACHE_D)
+#define HIT_WB_I ((6 << 2) | CACHE_I)
+#define HIT_WB_S ((6 << 2) | CACHE_S)
+
+/* CO_CONFIG bit definitions */
+#define R7K_CFG_TE (0x1 << 12) /* diff from MIPS64 standard */
+#define R7K_CFG_SE (0x1 << 3) /* diff from MIPS64 standard */
+#define R9K_CFG_SE (0x1 << 3) /* diff from MIPS64 standard */
+#define R9K_CFG_SC (0x1 << 31) /* diff from MIPS64 standard */
+#define CFG_K0_MASK (0x7 << 0)
+#define CFG_K0_UNC (0x2 << 0)
+#define CFG_K0_WB (0x3 << 0)
+
+#define R9K_CFG_K0_WT 0x0 /* Write thru */
+#define R9K_CFG_K0_WTWA 0x1 /* Write thru with write alloc */
+#define R9K_CFG_K0_UNCB 0x2 /* Uncached, blocking */
+#define R9K_CFG_K0_WB 0x3 /* Write Back */
+#define R9K_CFG_K0_CWBEA 0x4 /* Coherent WB wih exclusive alloc */
+#define R9K_CFG_K0_CWB 0x5 /* Coherent WB */
+#define R9K_CFG_K0_UNCNB 0x6 /* Uncached, nonblocking */
+#define R9K_CFG_K0_FPC 0x7 /* Fast Packet Cache (bypass 2nd cache) */
+
+/* Special C0_INFO bit descriptions for the R9K processor */
+#define R9K_INFO_AE (1 << 0) /* atomic SR_IE for R9K */
+#define R9K_INFO_64_TLB (1 << 29)/* R9K C0_INFO bit - chip has 64 TLB entries */
+
+/* CO_PAGEMASK bit definitions */
+
+/*
+ * These look wierd because the 'size' used is twice what you
+ * think it is, but remember that the MIPs TLB maps even odd
+ * pages so that you need to acount for the 2x page size
+ * R9K supports 256M pages (it has a 16 bit Mask field in the
+ * PageMask register).
+ */
+#define PAGEMASK_256M ((0x20000000 - 1) & ~0x1fff) /* R9K only */
+#define PAGEMASK_64M ((0x08000000 - 1) & ~0x1fff) /* R9K only */
+#define PAGEMASK_16M ((0x02000000 - 1) & ~0x1fff)
+#define PAGEMASK_4M ((0x00800000 - 1) & ~0x1fff)
+#define PAGEMASK_1M ((0x00200000 - 1) & ~0x1fff)
+#define PAGEMASK_256K ((0x00080000 - 1) & ~0x1fff)
+#define PAGEMASK_64K ((0x00020000 - 1) & ~0x1fff)
+#define PAGEMASK_16K ((0x00008000 - 1) & ~0x1fff)
+#define PAGEMASK_4K ((0x00002000 - 1) & ~0x1fff)
+
+#define R9K_PAGEMASK 0xffff /* R9K has a 16 bit of PageMask reg */
+#define PAGEMASK_SHIFT 13
+
+/*
+ * Cache Coherency Attributes
+ * These are different for R7K and R9K
+ */
+#define R7K_TLB_COHERENCY_WTNA 0x0
+#define R7K_TLB_COHERENCY_WTWA 0x1
+#define R7K_TLB_COHERENCY_UNCBLK 0x2
+#define R7K_TLB_COHERENCY_WB 0x3
+#define R7K_TLB_COHERENCY_UNCNBLK 0x6
+#define R7K_TLB_COHERENCY_BYPASS 0x7
+
+#define ENTRYHI_ASID_MASK 0xff
+#define R9K_ENTRYHI_ASID_MASK 0xfff
+#define R7K_ENTRYHI_VPNMASK 0x7ffffff
+#define ENTRYHI_VPNSHIFT 13
+#define ENTRYHI_R_SHIFT 62
+#define R7K_ENTRYLO_PFNMASK 0xffffff
+#define ENTRYLO_PFNSHIFT 6
+#define ENTRYLO_C_SHIFT 3
+
+#define R9K_ENTRYHI_VPNMASK 0x7ffffff /* same as r7k */
+#define R9K_ENTRYLO_PFNMASK 0xffffff /* same as r7k */
+
+#define R9K_ENTRYLO_C_WTNWA (0x0 << 3) /* Cache NonCoher WriteThru No Alloc */
+#define R9K_ENTRYLO_C_WTWA (0x1 << 3) /* Cache NonCoher WriteThru Wr Alloc */
+#define R9K_ENTRYLO_C_UNCACHED (0x2 << 3) /* Uncached, blocking */
+#define R9K_ENTRYLO_C_CNONC_WB (0x3 << 3) /* Cacheable NonCoherent WriteBack */
+#define R9K_ENTRYLO_C_CCEXCLU (0x4 << 3) /* Cacheable Coherent Exclusive */
+#define R9K_ENTRYLO_C_CC_WB (0x5 << 3) /* Cacheable Coherent Write Back */
+#define R9K_ENTRYLO_C_UNCNBLK (0x6 << 3) /* Uncached, Nonblocking */
+#define R9K_ENTRYLO_C_FPC (0x7 << 3) /* Fast Packet Cache */
+
+#define R7K_ENTRYLO_C_WB (R7K_TLB_COHERENCY_WB << 3)
+#define R7K_ENTRYLO_C_UNCBLK (R7K_TLB_COHERENCY_UNCBLK << 3)
+#define R7K_ENTRYLO_C_UNCNBLK (R7K_TLB_COHERENCY_UNCNBLK << 3)
+#define R7K_ENTRYLO_C_BYPASS (R7K_TLB_COHERENCY_BYPASS << 3)
+#define ENTRYLO_D (0x1 << 2)
+#define ENTRYLO_V (0x1 << 1)
+#define ENTRYLO_G (0x1 << 0)
+
+/* C0_CAUSE bit definitions */
+
+#define CAUSE_BD (0x1 << 31)
+#define CAUSE_CE_SHIFT 28
+#define CAUSE_CE_MASK 3
+#define R7K_CAUSE_IV (0x1 << 24) /* different from MIPS64 standard */
+#define R9K_CAUSE_IV (0x1 << 24) /* different from MIPS64 standard */
+#define R9K_CAUSE_W1 (0x1 << 25) /* different from MIPS64 standard */
+#define R9K_CAUSE_W2 (0x1 << 26) /* different from MIPS64 standard */
+#define CAUSE_IV (0x1 << 23)
+#define CAUSE_WP (0x1 << 22)
+#define CAUSE_EXCCODE_MASK 0x1f
+#define CAUSE_EXCCODE_SHIFT 2
+#define CAUSE_IP_MASK 0xff
+#define R7K_CAUSE_IP_MASK 0xffff /* different from MIPS64 standard */
+#define R9K_CAUSE_IP_MASK 0xffff /* different from MIPS64 standard */
+#define CAUSE_IP_SHIFT 8
+#define CAUSE_IP(num) (0x1 << ((num) + CAUSE_IP_SHIFT))
+
+#define CAUSE_EXCCODE_INT (0 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_MOD (1 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_TLBL (2 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_TLBS (3 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_ADEL (4 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_ADES (5 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_IBE (6 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_DBE (7 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_SYS (8 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_BP (9 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_RI (10 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_CPU (11 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_OV (12 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_TR (13 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_FPE (15 << CAUSE_EXCCODE_SHIFT)
+#define R7K_CAUSE_EXCCODE_IWE (16 << CAUSE_EXCCODE_SHIFT) /* r7k implementation */
+#define CAUSE_EXCCODE_C2E (18 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_MDMX (22 << CAUSE_EXCCODE_SHIFT)
+#define R7K_CAUSE_EXCCODE_DWE (23 << CAUSE_EXCCODE_SHIFT) /* diff from standard */
+#define CAUSE_EXCCODE_WATCH (23 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_MACH_CHK (24 << CAUSE_EXCCODE_SHIFT)
+#define CAUSE_EXCCODE_CACHE_ERR (30 << CAUSE_EXCCODE_SHIFT)
+
+/* C0_PRID bit definitions */
+#define PRID_GET_REV(val) ((val) & 0xff)
+#define PRID_GET_RPID(val) (((val) >> 8) & 0xff)
+#define R9K_PRID_GET_IMP(val) (((val) >> 8) & 0xff)
+#define PRID_GET_CID(val) (((val) >> 16) & 0xff)
+#define PRID_GET_OPT(val) (((val) >> 24) & 0xff)
+
+/* C0_PRID bit definitions for R9K multiprocessor */
+#define R9K_PRID_GET_PNUM(val) (((val) >> 24) & 0x07) /* only 0 & 1 are valid */
+
+/* C0_1_INTCTL bit definitions for R7K and R9K */
+#define R7K_INTCTL_VS_MASK 0x1f
+#define R7K_INTCTL_VS_SHIFT 0
+#define R7K_INTCTL_IMASK 0xff00
+
+/* C0_Watch bit definitions */
+#define WATCHLO_STORE 0x00000001 /* watch stores */
+#define WATCHLO_LOAD 0x00000002 /* watch loads */
+#define WATCHLO_FETCH 0x00000003 /* watch loads */
+#define WATCHLO_PADDR0_MASK 0xfffffff8 /* bits 31:3 of the paddr */
+
+#define WATCHHI_GLOBAL_BIT (1 << 30)
+
+#endif /* __MACHINE_CP0_H__ */
+
+/* end of file */
diff --git a/sys/mips/include/cpu.h b/sys/mips/include/cpu.h
new file mode 100644
index 0000000..20b41e2
--- /dev/null
+++ b/sys/mips/include/cpu.h
@@ -0,0 +1,564 @@
+/* $OpenBSD: cpu.h,v 1.4 1998/09/15 10:50:12 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell and Rick Macklem.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: @(#)cpu.h 8.4 (Berkeley) 1/4/94
+ * JNPR: cpu.h,v 1.9.2.2 2007/09/10 08:23:46 girish
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPU_H_
+#define _MACHINE_CPU_H_
+
+#include <machine/psl.h>
+#include <machine/endian.h>
+
+#define MIPS_CACHED_MEMORY_ADDR 0x80000000
+#define MIPS_UNCACHED_MEMORY_ADDR 0xa0000000
+#define MIPS_MAX_MEM_ADDR 0xbe000000
+#define MIPS_RESERVED_ADDR 0xbfc80000
+
+#define MIPS_KSEG0_LARGEST_PHYS 0x20000000
+#define MIPS_CACHED_TO_PHYS(x) ((unsigned)(x) & 0x1fffffff)
+#define MIPS_PHYS_TO_CACHED(x) ((unsigned)(x) | MIPS_CACHED_MEMORY_ADDR)
+#define MIPS_UNCACHED_TO_PHYS(x) ((unsigned)(x) & 0x1fffffff)
+#define MIPS_PHYS_TO_UNCACHED(x) ((unsigned)(x) | MIPS_UNCACHED_MEMORY_ADDR)
+
+#define MIPS_PHYS_MASK (0x1fffffff)
+#define MIPS_PA_2_K1VA(x) (MIPS_KSEG1_START | ((x) & MIPS_PHYS_MASK))
+
+#define MIPS_VA_TO_CINDEX(x) ((unsigned)(x) & 0xffffff | MIPS_CACHED_MEMORY_ADDR)
+#define MIPS_CACHED_TO_UNCACHED(x) (MIPS_PHYS_TO_UNCACHED(MIPS_CACHED_TO_PHYS(x)))
+
+#define MIPS_PHYS_TO_KSEG0(x) ((unsigned)(x) | MIPS_KSEG0_START)
+#define MIPS_PHYS_TO_KSEG1(x) ((unsigned)(x) | MIPS_KSEG1_START)
+#define MIPS_KSEG0_TO_PHYS(x) ((unsigned)(x) & MIPS_PHYS_MASK)
+#define MIPS_KSEG1_TO_PHYS(x) ((unsigned)(x) & MIPS_PHYS_MASK)
+
+/*
+ * Status register.
+ */
+#define SR_COP_USABILITY 0xf0000000
+#define SR_COP_0_BIT 0x10000000
+#define SR_COP_1_BIT 0x20000000
+#define SR_COP_2_BIT 0x40000000
+#define SR_RP 0x08000000
+#define SR_FR_32 0x04000000
+#define SR_RE 0x02000000
+#define SR_PX 0x00800000
+#define SR_BOOT_EXC_VEC 0x00400000
+#define SR_TLB_SHUTDOWN 0x00200000
+#define SR_SOFT_RESET 0x00100000
+#define SR_DIAG_CH 0x00040000
+#define SR_DIAG_CE 0x00020000
+#define SR_DIAG_DE 0x00010000
+#define SR_KX 0x00000080
+#define SR_SX 0x00000040
+#define SR_UX 0x00000020
+#define SR_KSU_MASK 0x00000018
+#define SR_KSU_USER 0x00000010
+#define SR_KSU_SUPER 0x00000008
+#define SR_KSU_KERNEL 0x00000000
+#define SR_ERL 0x00000004
+#define SR_EXL 0x00000002
+#define SR_INT_ENAB 0x00000001
+
+#define SR_INT_MASK 0x0000ff00
+#define SOFT_INT_MASK_0 0x00000100
+#define SOFT_INT_MASK_1 0x00000200
+#define SR_INT_MASK_0 0x00000400
+#define SR_INT_MASK_1 0x00000800
+#define SR_INT_MASK_2 0x00001000
+#define SR_INT_MASK_3 0x00002000
+#define SR_INT_MASK_4 0x00004000
+#define SR_INT_MASK_5 0x00008000
+#define ALL_INT_MASK SR_INT_MASK
+#define SOFT_INT_MASK (SOFT_INT_MASK_0 | SOFT_INT_MASK_1)
+#define HW_INT_MASK (ALL_INT_MASK & ~SOFT_INT_MASK)
+
+
+/*
+ * The bits in the cause register.
+ *
+ * CR_BR_DELAY Exception happened in branch delay slot.
+ * CR_COP_ERR Coprocessor error.
+ * CR_IP Interrupt pending bits defined below.
+ * CR_EXC_CODE The exception type (see exception codes below).
+ */
+#define CR_BR_DELAY 0x80000000
+#define CR_COP_ERR 0x30000000
+#define CR_EXC_CODE 0x0000007c
+#define CR_EXC_CODE_SHIFT 2
+#define CR_IPEND 0x0000ff00
+
+/*
+ * Cause Register Format:
+ *
+ * 31 30 29 28 27 26 25 24 23 8 7 6 2 1 0
+ * ----------------------------------------------------------------------
+ * | BD | 0| CE | 0| W2| W1| IV| IP15 - IP0 | 0| Exc Code | 0|
+ * |______________________________________________________________________
+ */
+
+#define CR_INT_SOFT0 0x00000100
+#define CR_INT_SOFT1 0x00000200
+#define CR_INT_0 0x00000400
+#define CR_INT_1 0x00000800
+#define CR_INT_2 0x00001000
+#define CR_INT_3 0x00002000
+#define CR_INT_4 0x00004000
+#define CR_INT_5 0x00008000
+
+#define CR_INT_UART CR_INT_1
+#define CR_INT_IPI CR_INT_2
+#define CR_INT_CLOCK CR_INT_5
+
+/*
+ * The bits in the CONFIG register
+ */
+#define CFG_K0_UNCACHED 2
+#define CFG_K0_CACHED 3
+
+/*
+ * The bits in the context register.
+ */
+#define CNTXT_PTE_BASE 0xff800000
+#define CNTXT_BAD_VPN2 0x007ffff0
+
+/*
+ * Location of exception vectors.
+ */
+#define RESET_EXC_VEC 0xbfc00000
+#define TLB_MISS_EXC_VEC 0x80000000
+#define XTLB_MISS_EXC_VEC 0x80000080
+#define CACHE_ERR_EXC_VEC 0x80000100
+#define GEN_EXC_VEC 0x80000180
+
+/*
+ * Coprocessor 0 registers:
+ */
+#define COP_0_TLB_INDEX $0
+#define COP_0_TLB_RANDOM $1
+#define COP_0_TLB_LO0 $2
+#define COP_0_TLB_LO1 $3
+#define COP_0_TLB_CONTEXT $4
+#define COP_0_TLB_PG_MASK $5
+#define COP_0_TLB_WIRED $6
+#define COP_0_INFO $7
+#define COP_0_BAD_VADDR $8
+#define COP_0_COUNT $9
+#define COP_0_TLB_HI $10
+#define COP_0_COMPARE $11
+#define COP_0_STATUS_REG $12
+#define COP_0_CAUSE_REG $13
+#define COP_0_EXC_PC $14
+#define COP_0_PRID $15
+#define COP_0_CONFIG $16
+#define COP_0_LLADDR $17
+#define COP_0_WATCH_LO $18
+#define COP_0_WATCH_HI $19
+#define COP_0_TLB_XCONTEXT $20
+#define COP_0_ECC $26
+#define COP_0_CACHE_ERR $27
+#define COP_0_TAG_LO $28
+#define COP_0_TAG_HI $29
+#define COP_0_ERROR_PC $30
+
+/*
+ * Coprocessor 0 Set 1
+ */
+#define C0P_1_IPLLO $18
+#define C0P_1_IPLHI $19
+#define C0P_1_INTCTL $20
+#define C0P_1_DERRADDR0 $26
+#define C0P_1_DERRADDR1 $27
+
+/*
+ * Values for the code field in a break instruction.
+ */
+#define BREAK_INSTR 0x0000000d
+#define BREAK_VAL_MASK 0x03ffffc0
+#define BREAK_VAL_SHIFT 16
+#define BREAK_KDB_VAL 512
+#define BREAK_SSTEP_VAL 513
+#define BREAK_BRKPT_VAL 514
+#define BREAK_SOVER_VAL 515
+#define BREAK_DDB_VAL 516
+#define BREAK_KDB (BREAK_INSTR | (BREAK_KDB_VAL << BREAK_VAL_SHIFT))
+#define BREAK_SSTEP (BREAK_INSTR | (BREAK_SSTEP_VAL << BREAK_VAL_SHIFT))
+#define BREAK_BRKPT (BREAK_INSTR | (BREAK_BRKPT_VAL << BREAK_VAL_SHIFT))
+#define BREAK_SOVER (BREAK_INSTR | (BREAK_SOVER_VAL << BREAK_VAL_SHIFT))
+#define BREAK_DDB (BREAK_INSTR | (BREAK_DDB_VAL << BREAK_VAL_SHIFT))
+
+/*
+ * Mininum and maximum cache sizes.
+ */
+#define MIN_CACHE_SIZE (16 * 1024)
+#define MAX_CACHE_SIZE (256 * 1024)
+
+/*
+ * The floating point version and status registers.
+ */
+#define FPC_ID $0
+#define FPC_CSR $31
+
+/*
+ * The floating point coprocessor status register bits.
+ */
+#define FPC_ROUNDING_BITS 0x00000003
+#define FPC_ROUND_RN 0x00000000
+#define FPC_ROUND_RZ 0x00000001
+#define FPC_ROUND_RP 0x00000002
+#define FPC_ROUND_RM 0x00000003
+#define FPC_STICKY_BITS 0x0000007c
+#define FPC_STICKY_INEXACT 0x00000004
+#define FPC_STICKY_UNDERFLOW 0x00000008
+#define FPC_STICKY_OVERFLOW 0x00000010
+#define FPC_STICKY_DIV0 0x00000020
+#define FPC_STICKY_INVALID 0x00000040
+#define FPC_ENABLE_BITS 0x00000f80
+#define FPC_ENABLE_INEXACT 0x00000080
+#define FPC_ENABLE_UNDERFLOW 0x00000100
+#define FPC_ENABLE_OVERFLOW 0x00000200
+#define FPC_ENABLE_DIV0 0x00000400
+#define FPC_ENABLE_INVALID 0x00000800
+#define FPC_EXCEPTION_BITS 0x0003f000
+#define FPC_EXCEPTION_INEXACT 0x00001000
+#define FPC_EXCEPTION_UNDERFLOW 0x00002000
+#define FPC_EXCEPTION_OVERFLOW 0x00004000
+#define FPC_EXCEPTION_DIV0 0x00008000
+#define FPC_EXCEPTION_INVALID 0x00010000
+#define FPC_EXCEPTION_UNIMPL 0x00020000
+#define FPC_COND_BIT 0x00800000
+#define FPC_FLUSH_BIT 0x01000000
+#define FPC_MBZ_BITS 0xfe7c0000
+
+/*
+ * Constants to determine if have a floating point instruction.
+ */
+#define OPCODE_SHIFT 26
+#define OPCODE_C1 0x11
+
+/*
+ * The low part of the TLB entry.
+ */
+#define VMTLB_PF_NUM 0x3fffffc0
+#define VMTLB_ATTR_MASK 0x00000038
+#define VMTLB_MOD_BIT 0x00000004
+#define VMTLB_VALID_BIT 0x00000002
+#define VMTLB_GLOBAL_BIT 0x00000001
+
+#define VMTLB_PHYS_PAGE_SHIFT 6
+
+/*
+ * The high part of the TLB entry.
+ */
+#define VMTLB_VIRT_PAGE_NUM 0xffffe000
+#define VMTLB_PID 0x000000ff
+#define VMTLB_PID_R9K 0x00000fff
+#define VMTLB_PID_SHIFT 0
+#define VMTLB_VIRT_PAGE_SHIFT 12
+#define VMTLB_VIRT_PAGE_SHIFT_R9K 13
+
+/*
+ * The first TLB entry that write random hits.
+ */
+#define VMWIRED_ENTRIES 1
+
+/*
+ * The number of process id entries.
+ */
+#define VMNUM_PIDS 256
+
+/*
+ * TLB probe return codes.
+ */
+#define VMTLB_NOT_FOUND 0
+#define VMTLB_FOUND 1
+#define VMTLB_FOUND_WITH_PATCH 2
+#define VMTLB_PROBE_ERROR 3
+
+/*
+ * Exported definitions unique to mips cpu support.
+ */
+
+/*
+ * definitions of cpu-dependent requirements
+ * referenced in generic code
+ */
+#define COPY_SIGCODE /* copy sigcode above user stack in exec */
+
+#define cpu_swapout(p) panic("cpu_swapout: can't get here");
+
+#ifndef _LOCORE
+#include <machine/frame.h>
+/*
+ * Arguments to hardclock and gatherstats encapsulate the previous
+ * machine state in an opaque clockframe.
+ */
+#define clockframe trapframe /* Use normal trap frame */
+
+#define CLKF_USERMODE(framep) ((framep)->sr & SR_KSU_USER)
+#define CLKF_BASEPRI(framep) ((framep)->cpl == 0)
+#define CLKF_PC(framep) ((framep)->pc)
+#define CLKF_INTR(framep) (0)
+#define MIPS_CLKF_INTR() (intr_nesting_level >= 1)
+#define TRAPF_USERMODE(framep) (((framep)->sr & SR_KSU_USER) != 0)
+#define TRAPF_PC(framep) ((framep)->pc)
+#define cpu_getstack(td) ((td)->td_frame->sp)
+
+/*
+ * CPU identification, from PRID register.
+ */
+union cpuprid {
+ int cpuprid;
+ struct {
+#if BYTE_ORDER == BIG_ENDIAN
+ u_int pad1:8; /* reserved */
+ u_int cp_vendor:8; /* company identifier */
+ u_int cp_imp:8; /* implementation identifier */
+ u_int cp_majrev:4; /* major revision identifier */
+ u_int cp_minrev:4; /* minor revision identifier */
+#else
+ u_int cp_minrev:4; /* minor revision identifier */
+ u_int cp_majrev:4; /* major revision identifier */
+ u_int cp_imp:8; /* implementation identifier */
+ u_int cp_vendor:8; /* company identifier */
+ u_int pad1:8; /* reserved */
+#endif
+ } cpu;
+};
+
+#endif /* !_LOCORE */
+
+/*
+ * CTL_MACHDEP definitions.
+ */
+#define CPU_CONSDEV 1 /* dev_t: console terminal device */
+#define CPU_ADJKERNTZ 2 /* int: timezone offset (seconds) */
+#define CPU_DISRTCSET 3 /* int: disable resettodr() call */
+#define CPU_BOOTINFO 4 /* struct: bootinfo */
+#define CPU_WALLCLOCK 5 /* int: indicates wall CMOS clock */
+#define CPU_MAXID 6 /* number of valid machdep ids */
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+ { "console_device", CTLTYPE_STRUCT }, \
+ { "adjkerntz", CTLTYPE_INT }, \
+ { "disable_rtc_set", CTLTYPE_INT }, \
+ { "bootinfo", CTLTYPE_STRUCT }, \
+ { "wall_cmos_clock", CTLTYPE_INT }, \
+}
+
+/*
+ * MIPS CPU types (cp_imp).
+ */
+#define MIPS_R2000 0x01 /* MIPS R2000 CPU ISA I */
+#define MIPS_R3000 0x02 /* MIPS R3000 CPU ISA I */
+#define MIPS_R6000 0x03 /* MIPS R6000 CPU ISA II */
+#define MIPS_R4000 0x04 /* MIPS R4000/4400 CPU ISA III */
+#define MIPS_R3LSI 0x05 /* LSI Logic R3000 derivate ISA I */
+#define MIPS_R6000A 0x06 /* MIPS R6000A CPU ISA II */
+#define MIPS_R3IDT 0x07 /* IDT R3000 derivate ISA I */
+#define MIPS_R10000 0x09 /* MIPS R10000/T5 CPU ISA IV */
+#define MIPS_R4200 0x0a /* MIPS R4200 CPU (ICE) ISA III */
+#define MIPS_R4300 0x0b /* NEC VR4300 CPU ISA III */
+#define MIPS_R4100 0x0c /* NEC VR41xx CPU MIPS-16 ISA III */
+#define MIPS_R8000 0x10 /* MIPS R8000 Blackbird/TFP ISA IV */
+#define MIPS_R4600 0x20 /* QED R4600 Orion ISA III */
+#define MIPS_R4700 0x21 /* QED R4700 Orion ISA III */
+#define MIPS_R3TOSH 0x22 /* Toshiba R3000 based CPU ISA I */
+#define MIPS_R5000 0x23 /* MIPS R5000 CPU ISA IV */
+#define MIPS_RM7000 0x27 /* QED RM7000 CPU ISA IV */
+#define MIPS_RM52X0 0x28 /* QED RM52X0 CPU ISA IV */
+#define MIPS_VR5400 0x54 /* NEC Vr5400 CPU ISA IV+ */
+#define MIPS_RM9000 0x34 /* E9000 CPU */
+
+/*
+ * MIPS FPU types
+ */
+#define MIPS_SOFT 0x00 /* Software emulation ISA I */
+#define MIPS_R2360 0x01 /* MIPS R2360 FPC ISA I */
+#define MIPS_R2010 0x02 /* MIPS R2010 FPC ISA I */
+#define MIPS_R3010 0x03 /* MIPS R3010 FPC ISA I */
+#define MIPS_R6010 0x04 /* MIPS R6010 FPC ISA II */
+#define MIPS_R4010 0x05 /* MIPS R4000/R4400 FPC ISA II */
+#define MIPS_R31LSI 0x06 /* LSI Logic derivate ISA I */
+#define MIPS_R10010 0x09 /* MIPS R10000/T5 FPU ISA IV */
+#define MIPS_R4210 0x0a /* MIPS R4200 FPC (ICE) ISA III */
+#define MIPS_UNKF1 0x0b /* unnanounced product cpu ISA III */
+#define MIPS_R8000 0x10 /* MIPS R8000 Blackbird/TFP ISA IV */
+#define MIPS_R4600 0x20 /* QED R4600 Orion ISA III */
+#define MIPS_R3SONY 0x21 /* Sony R3000 based FPU ISA I */
+#define MIPS_R3TOSH 0x22 /* Toshiba R3000 based FPU ISA I */
+#define MIPS_R5010 0x23 /* MIPS R5000 based FPU ISA IV */
+#define MIPS_RM7000 0x27 /* QED RM7000 FPU ISA IV */
+#define MIPS_RM5230 0x28 /* QED RM52X0 based FPU ISA IV */
+#define MIPS_RM52XX 0x28 /* QED RM52X0 based FPU ISA IV */
+#define MIPS_VR5400 0x54 /* NEC Vr5400 FPU ISA IV+ */
+
+#ifndef _LOCORE
+extern union cpuprid cpu_id;
+
+#define mips_proc_type() ((cpu_id.cpu.cp_vendor << 8) | cpu_id.cpu.cp_imp)
+#define mips_set_proc_type(type) (cpu_id.cpu.cp_vendor = (type) >> 8, \
+ cpu_id.cpu.cp_imp = ((type) & 0x00ff))
+#endif /* !_LOCORE */
+
+#if defined(_KERNEL) && !defined(_LOCORE)
+extern union cpuprid fpu_id;
+
+struct tlb;
+struct user;
+
+u_int32_t mips_cp0_config1_read(void);
+int Mips_ConfigCache(void);
+void Mips_SetWIRED(int);
+void Mips_SetPID(int);
+u_int Mips_GetCOUNT(void);
+void Mips_SetCOMPARE(u_int);
+u_int Mips_GetCOMPARE(void);
+
+void Mips_SyncCache(void);
+void Mips_SyncDCache(vm_offset_t, int);
+void Mips_HitSyncDCache(vm_offset_t, int);
+void Mips_HitSyncSCache(vm_offset_t, int);
+void Mips_IOSyncDCache(vm_offset_t, int, int);
+void Mips_HitInvalidateDCache(vm_offset_t, int);
+void Mips_SyncICache(vm_offset_t, int);
+void Mips_InvalidateICache(vm_offset_t, int);
+
+void Mips_TLBFlush(int);
+void Mips_TLBFlushAddr(vm_offset_t);
+void Mips_TLBWriteIndexed(int, struct tlb *);
+void Mips_TLBUpdate(vm_offset_t, unsigned);
+void Mips_TLBRead(int, struct tlb *);
+void mips_TBIAP(int);
+void wbflush(void);
+
+extern u_int32_t cpu_counter_interval; /* Number of counter ticks/tick */
+extern u_int32_t cpu_counter_last; /* Last compare value loaded */
+extern int num_tlbentries;
+extern char btext[];
+extern char etext[];
+extern int intr_nesting_level;
+
+#define func_0args_asmmacro(func, in) \
+ __asm __volatile ( "jalr %0" \
+ : "=r" (in) /* outputs */ \
+ : "r" (func) /* inputs */ \
+ : "$31", "$4");
+
+#define func_1args_asmmacro(func, arg0) \
+ __asm __volatile ("move $4, %1;" \
+ "jalr %0" \
+ : /* outputs */ \
+ : "r" (func), "r" (arg0) /* inputs */ \
+ : "$31", "$4");
+
+#define func_2args_asmmacro(func, arg0, arg1) \
+ __asm __volatile ("move $4, %1;" \
+ "move $5, %2;" \
+ "jalr %0" \
+ : /* outputs */ \
+ : "r" (func), "r" (arg0), "r" (arg1) /* inputs */ \
+ : "$31", "$4", "$5");
+
+#define func_3args_asmmacro(func, arg0, arg1, arg2) \
+ __asm __volatile ( "move $4, %1;" \
+ "move $5, %2;" \
+ "move $6, %3;" \
+ "jalr %0" \
+ : /* outputs */ \
+ : "r" (func), "r" (arg0), "r" (arg1), "r" (arg2) /* inputs */ \
+ : "$31", "$4", "$5", "$6");
+
+#define MachSetPID Mips_SetPID
+#define MachTLBUpdate Mips_TLBUpdate
+#define mips_TBIS Mips_TLBFlushAddr
+#define MIPS_TBIAP() mips_TBIAP(num_tlbentries)
+#define MachSetWIRED(index) Mips_SetWIRED(index)
+#define MachTLBFlush(count) Mips_TLBFlush(count)
+#define MachTLBGetPID(pid) (pid = Mips_TLBGetPID())
+#define MachTLBRead(tlbno, tlbp) Mips_TLBRead(tlbno, tlbp)
+#define MachFPTrap(sr, cause, pc) MipsFPTrap(sr, cause, pc)
+
+/*
+ * Enable realtime clock (always enabled).
+ */
+#define enablertclock()
+
+/*
+ * Are we in an interrupt handler? required by JunOS
+ */
+#define IN_INT_HANDLER() \
+ (curthread->td_intr_nesting_level != 0 || \
+ (curthread->td_pflags & TDP_ITHREAD))
+
+/*
+ * Low level access routines to CPU registers
+ */
+
+void setsoftintr0(void);
+void clearsoftintr0(void);
+void setsoftintr1(void);
+void clearsoftintr1(void);
+
+
+u_int32_t mips_cp0_status_read(void);
+void mips_cp0_status_write(u_int32_t);
+
+int disableintr(void);
+void restoreintr(int);
+int enableintr(void);
+int Mips_TLBGetPID(void);
+
+void swi_vm(void *);
+void cpu_halt(void);
+void cpu_reset(void);
+
+u_int32_t set_intr_mask(u_int32_t);
+u_int32_t get_intr_mask(void);
+u_int32_t get_cyclecount(void);
+
+#define cpu_spinwait() /* nothing */
+
+#endif /* _KERNEL */
+#endif /* !_MACHINE_CPU_H_ */
diff --git a/sys/mips/include/cpufunc.h b/sys/mips/include/cpufunc.h
new file mode 100644
index 0000000..f3aa5a4
--- /dev/null
+++ b/sys/mips/include/cpufunc.h
@@ -0,0 +1,346 @@
+/* $OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */
+
+/*
+ * Copyright (c) 1995-1999 Per Fogelstrom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Per Fogelstrom.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUFUNC_H_
+#define _MACHINE_CPUFUNC_H_
+
+#include <sys/types.h>
+#include <machine/cpuregs.h>
+
+/*
+ * These functions are required by user-land atomi ops
+ */
+
+static __inline void
+mips_barrier(void)
+{
+ __asm __volatile (".set noreorder\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".set reorder\n\t"
+ : : : "memory");
+}
+
+static __inline void
+mips_wbflush(void)
+{
+ __asm __volatile ("sync" : : : "memory");
+ mips_barrier();
+#if 0
+ __asm __volatile("mtc0 %0, $12\n" /* MIPS_COP_0_STATUS */
+ : : "r" (flag));
+#endif
+}
+
+static __inline void
+mips_read_membar(void)
+{
+ /* Nil */
+}
+
+static __inline void
+mips_write_membar(void)
+{
+ mips_wbflush();
+}
+
+#ifdef _KERNEL
+
+static __inline void
+mips_tlbp(void)
+{
+ __asm __volatile ("tlbp");
+ mips_barrier();
+#if 0
+ register_t ret;
+ register_t tmp;
+
+ __asm __volatile("mfc0 %0, $12\n" /* MIPS_COP_0_STATUS */
+ "and %1, %0, $~1\n" /* MIPS_SR_INT_IE */
+ "mtc0 %1, $12\n" /* MIPS_COP_0_STATUS */
+ : "=r" (ret), "=r" (tmp));
+ return (ret);
+#endif
+}
+
+static __inline void
+mips_tlbr(void)
+{
+ __asm __volatile ("tlbr");
+ mips_barrier();
+}
+
+static __inline void
+mips_tlbwi(void)
+{
+ __asm __volatile ("tlbwi");
+ mips_barrier();
+#if 0
+ __asm __volatile("mfc %0, $12\n" /* MIPS_COP_0_STATUS */
+ "or %0, %0, $1\n" /* MIPS_SR_INT_IE */
+ "mtc0 %0, $12\n" /* MIPS_COP_0_STATUS */
+ : "=r" (tmp));
+#endif
+}
+
+static __inline void
+mips_tlbwr(void)
+{
+ __asm __volatile ("tlbwr");
+ mips_barrier();
+}
+
+
+#if 0 /* XXX mips64 */
+
+#define MIPS_RDRW64_COP0(n,r) \
+static __inline uint64_t \
+mips_rd_ ## n (void) \
+{ \
+ int v0; \
+ __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";" \
+ : [v0] "=&r"(v0)); \
+ mips_barrier(); \
+ return (v0); \
+} \
+static __inline void \
+mips_wr_ ## n (uint64_t a0) \
+{ \
+ __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";" \
+ __XSTRING(COP0_SYNC)";" \
+ "nop;" \
+ "nop;" \
+ : \
+ : [a0] "r"(a0)); \
+ mips_barrier(); \
+} struct __hack
+
+MIPS_RDRW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
+MIPS_RDRW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
+MIPS_RDRW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
+MIPS_RDRW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
+MIPS_RDRW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
+
+#undef MIPS_RDRW64_COP0
+#endif
+
+#define MIPS_RDRW32_COP0(n,r) \
+static __inline uint32_t \
+mips_rd_ ## n (void) \
+{ \
+ int v0; \
+ __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";" \
+ : [v0] "=&r"(v0)); \
+ mips_barrier(); \
+ return (v0); \
+} \
+static __inline void \
+mips_wr_ ## n (uint32_t a0) \
+{ \
+ __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";" \
+ __XSTRING(COP0_SYNC)";" \
+ "nop;" \
+ "nop;" \
+ : \
+ : [a0] "r"(a0)); \
+ mips_barrier(); \
+} struct __hack
+
+#ifdef TARGET_OCTEON
+static __inline void mips_sync_icache (void)
+{
+ __asm __volatile (
+ ".set mips64\n"
+ ".word 0x041f0000\n"
+ "nop\n"
+ ".set mips0\n"
+ : : );
+}
+#endif
+
+MIPS_RDRW32_COP0(compare, MIPS_COP_0_COMPARE);
+MIPS_RDRW32_COP0(config, MIPS_COP_0_CONFIG);
+MIPS_RDRW32_COP0(count, MIPS_COP_0_COUNT);
+MIPS_RDRW32_COP0(index, MIPS_COP_0_TLB_INDEX);
+MIPS_RDRW32_COP0(wired, MIPS_COP_0_TLB_WIRED);
+MIPS_RDRW32_COP0(cause, MIPS_COP_0_CAUSE);
+MIPS_RDRW32_COP0(status, MIPS_COP_0_STATUS);
+
+/* XXX: Some of these registers are specific to MIPS32. */
+MIPS_RDRW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
+MIPS_RDRW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
+MIPS_RDRW32_COP0(entrylow, MIPS_COP_0_TLB_LOW);
+MIPS_RDRW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
+MIPS_RDRW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
+MIPS_RDRW32_COP0(prid, MIPS_COP_0_PRID);
+MIPS_RDRW32_COP0(watchlo, MIPS_COP_0_WATCH_LO);
+MIPS_RDRW32_COP0(watchhi, MIPS_COP_0_WATCH_HI);
+
+static __inline uint32_t
+mips_rd_config_sel1(void)
+{
+ int v0;
+ __asm __volatile("mfc0 %[v0], $16, 1 ;"
+ : [v0] "=&r" (v0));
+ mips_barrier();
+ return (v0);
+}
+
+#undef MIPS_RDRW32_COP0
+
+static __inline register_t
+intr_disable(void)
+{
+ register_t s;
+
+ s = mips_rd_status();
+ mips_wr_status(s & ~MIPS_SR_INT_IE);
+
+ return (s);
+}
+
+static __inline register_t
+intr_enable(void)
+{
+ register_t s;
+
+ s = mips_rd_status();
+ mips_wr_status(s | MIPS_SR_INT_IE);
+
+ return (s);
+}
+
+#define intr_restore(s) mips_wr_status((s))
+
+static __inline void
+breakpoint(void)
+{
+ __asm __volatile ("break");
+}
+
+#endif /* _KERNEL */
+
+#define readb(va) (*(volatile uint8_t *) (va))
+#define readw(va) (*(volatile uint16_t *) (va))
+#define readl(va) (*(volatile uint32_t *) (va))
+
+#define writeb(va, d) (*(volatile uint8_t *) (va) = (d))
+#define writew(va, d) (*(volatile uint16_t *) (va) = (d))
+#define writel(va, d) (*(volatile uint32_t *) (va) = (d))
+
+/*
+ * I/O macros.
+ */
+
+#define outb(a,v) (*(volatile unsigned char*)(a) = (v))
+#define out8(a,v) (*(volatile unsigned char*)(a) = (v))
+#define outw(a,v) (*(volatile unsigned short*)(a) = (v))
+#define out16(a,v) outw(a,v)
+#define outl(a,v) (*(volatile unsigned int*)(a) = (v))
+#define out32(a,v) outl(a,v)
+#define inb(a) (*(volatile unsigned char*)(a))
+#define in8(a) (*(volatile unsigned char*)(a))
+#define inw(a) (*(volatile unsigned short*)(a))
+#define in16(a) inw(a)
+#define inl(a) (*(volatile unsigned int*)(a))
+#define in32(a) inl(a)
+
+#define out8rb(a,v) (*(volatile unsigned char*)(a) = (v))
+#define out16rb(a,v) (__out16rb((volatile uint16_t *)(a), v))
+#define out32rb(a,v) (__out32rb((volatile uint32_t *)(a), v))
+#define in8rb(a) (*(volatile unsigned char*)(a))
+#define in16rb(a) (__in16rb((volatile uint16_t *)(a)))
+#define in32rb(a) (__in32rb((volatile uint32_t *)(a)))
+
+#define _swap_(x) (((x) >> 24) | ((x) << 24) | \
+ (((x) >> 8) & 0xff00) | (((x) & 0xff00) << 8))
+
+static __inline void __out32rb(volatile uint32_t *, uint32_t);
+static __inline void __out16rb(volatile uint16_t *, uint16_t);
+static __inline uint32_t __in32rb(volatile uint32_t *);
+static __inline uint16_t __in16rb(volatile uint16_t *);
+
+static __inline void
+__out32rb(volatile uint32_t *a, uint32_t v)
+{
+ uint32_t _v_ = v;
+
+ _v_ = _swap_(_v_);
+ out32(a, _v_);
+}
+
+static __inline void
+__out16rb(volatile uint16_t *a, uint16_t v)
+{
+ uint16_t _v_;
+
+ _v_ = ((v >> 8) & 0xff) | (v << 8);
+ out16(a, _v_);
+}
+
+static __inline uint32_t
+__in32rb(volatile uint32_t *a)
+{
+ uint32_t _v_;
+
+ _v_ = in32(a);
+ _v_ = _swap_(_v_);
+ return _v_;
+}
+
+static __inline uint16_t
+__in16rb(volatile uint16_t *a)
+{
+ uint16_t _v_;
+
+ _v_ = in16(a);
+ _v_ = ((_v_ >> 8) & 0xff) | (_v_ << 8);
+ return _v_;
+}
+
+void insb(uint8_t *, uint8_t *,int);
+void insw(uint16_t *, uint16_t *,int);
+void insl(uint32_t *, uint32_t *,int);
+void outsb(uint8_t *, const uint8_t *,int);
+void outsw(uint16_t *, const uint16_t *,int);
+void outsl(uint32_t *, const uint32_t *,int);
+u_int loadandclear(volatile u_int *addr);
+
+#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/mips/include/cpuinfo.h b/sys/mips/include/cpuinfo.h
new file mode 100644
index 0000000..bf32086
--- /dev/null
+++ b/sys/mips/include/cpuinfo.h
@@ -0,0 +1,120 @@
+/* $NetBSD: cpu.h,v 1.70 2003/01/17 23:36:08 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell and Rick Macklem.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ * @(#)cpu.h 8.4 (Berkeley) 1/4/94
+ */
+
+#ifndef _CPUINFO_H_
+#define _CPUINFO_H_
+
+/*
+ * Exported definitions unique to NetBSD/mips cpu support.
+ */
+
+#ifdef _KERNEL
+#ifndef LOCORE
+
+struct mips_cpuinfo {
+ u_int8_t cpu_vendor;
+ u_int8_t cpu_rev;
+ u_int8_t cpu_impl;
+ u_int8_t tlb_type;
+ u_int16_t tlb_nentries;
+ u_int8_t icache_virtual;
+ struct {
+ u_int8_t ic_size;
+ u_int8_t ic_linesize;
+ u_int8_t ic_nways;
+ u_int16_t ic_nsets;
+ u_int8_t dc_size;
+ u_int8_t dc_linesize;
+ u_int8_t dc_nways;
+ u_int16_t dc_nsets;
+ } l1;
+};
+
+/* TODO: Merge above structure with NetBSD's below. */
+
+struct cpu_info {
+#ifdef notyet
+ struct schedstate_percpu ci_schedstate; /* scheduler state */
+#endif
+ u_long ci_cpu_freq; /* CPU frequency */
+ u_long ci_cycles_per_hz; /* CPU freq / hz */
+ u_long ci_divisor_delay; /* for delay/DELAY */
+ u_long ci_divisor_recip; /* scaled reciprocal of previous;
+ see below */
+#if defined(DIAGNOSTIC) || defined(LOCKDEBUG)
+ u_long ci_spin_locks; /* # of spin locks held */
+ u_long ci_simple_locks; /* # of simple locks held */
+#endif
+};
+
+/*
+ * To implement a more accurate microtime using the CP0 COUNT register
+ * we need to divide that register by the number of cycles per MHz.
+ * But...
+ *
+ * DIV and DIVU are expensive on MIPS (eg 75 clocks on the R4000). MULT
+ * and MULTU are only 12 clocks on the same CPU.
+ *
+ * The strategy we use is to calculate the reciprical of cycles per MHz,
+ * scaled by 1<<32. Then we can simply issue a MULTU and pluck of the
+ * HI register and have the results of the division.
+ */
+#define MIPS_SET_CI_RECIPRICAL(cpu) \
+do { \
+ KASSERT((cpu)->ci_divisor_delay != 0, ("divisor delay")); \
+ (cpu)->ci_divisor_recip = 0x100000000ULL / (cpu)->ci_divisor_delay; \
+} while (0)
+
+#define MIPS_COUNT_TO_MHZ(cpu, count, res) \
+ __asm __volatile ("multu %1,%2 ; mfhi %0" \
+ : "=r"((res)) : "r"((count)), "r"((cpu)->ci_divisor_recip))
+
+
+extern struct cpu_info cpu_info_store;
+
+#if 0
+#define curcpu() (&cpu_info_store)
+#define cpu_number() (0)
+#endif
+
+#endif /* !LOCORE */
+#endif /* _KERNEL */
+#endif /* _CPUINFO_H_ */
diff --git a/sys/mips/include/cpuregs.h b/sys/mips/include/cpuregs.h
new file mode 100644
index 0000000..e590c9d
--- /dev/null
+++ b/sys/mips/include/cpuregs.h
@@ -0,0 +1,899 @@
+/* $NetBSD: cpuregs.h,v 1.70 2006/05/15 02:26:54 simonb Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell and Rick Macklem.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)machConst.h 8.1 (Berkeley) 6/10/93
+ *
+ * machConst.h --
+ *
+ * Machine dependent constants.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machConst.h,
+ * v 9.2 89/10/21 15:55:22 jhh Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAddrs.h,
+ * v 1.2 89/08/15 18:28:21 rab Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/vm/ds3100.md/RCS/vmPmaxConst.h,
+ * v 9.1 89/09/18 17:33:00 shirriff Exp SPRITE (DECWRL)
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MIPS_CPUREGS_H_
+#define _MIPS_CPUREGS_H_
+
+#include <sys/cdefs.h> /* For __CONCAT() */
+
+#if defined(_KERNEL_OPT)
+#include "opt_cputype.h"
+#endif
+
+/*
+ * Address space.
+ * 32-bit mips CPUS partition their 32-bit address space into four segments:
+ *
+ * kuseg 0x00000000 - 0x7fffffff User virtual mem, mapped
+ * kseg0 0x80000000 - 0x9fffffff Physical memory, cached, unmapped
+ * kseg1 0xa0000000 - 0xbfffffff Physical memory, uncached, unmapped
+ * kseg2 0xc0000000 - 0xffffffff kernel-virtual, mapped
+ *
+ * mips1 physical memory is limited to 512Mbytes, which is
+ * doubly mapped in kseg0 (cached) and kseg1 (uncached.)
+ * Caching of mapped addresses is controlled by bits in the TLB entry.
+ */
+
+#define MIPS_KUSEG_START 0x0
+#define MIPS_KSEG0_START 0x80000000
+#define MIPS_KSEG0_END 0x9fffffff
+#define MIPS_KSEG1_START 0xa0000000
+#define MIPS_KSEG1_END 0xbfffffff
+#define MIPS_KSSEG_START 0xc0000000
+#define MIPS_KSSEG_END 0xdfffffff
+#define MIPS_KSEG2_START MIPS_KSSEG_START
+#define MIPS_KSEG2_END MIPS_KSSEG_END
+#define MIPS_KSEG3_START 0xe0000000
+#define MIPS_KSEG3_END 0xffffffff
+#define MIPS_MAX_MEM_ADDR 0xbe000000
+#define MIPS_RESERVED_ADDR 0xbfc80000
+
+/* Map virtual address to index in mips3 r4k virtually-indexed cache */
+#define MIPS3_VA_TO_CINDEX(x) \
+ ((unsigned)(x) & 0xffffff | MIPS_KSEG0_START)
+
+#define MIPS_PHYS_TO_XKPHYS(cca,x) \
+ ((0x2ULL << 62) | ((unsigned long long)(cca) << 59) | (x))
+#define MIPS_XKPHYS_TO_PHYS(x) ((x) & 0x0effffffffffffffULL)
+
+/* CPU dependent mtc0 hazard hook */
+#ifdef TARGET_OCTEON
+#define COP0_SYNC nop; nop; nop; nop; nop;
+#else
+#define COP0_SYNC /* nothing */
+#endif
+#define COP0_HAZARD_FPUENABLE nop; nop; nop; nop;
+
+/*
+ * The bits in the cause register.
+ *
+ * Bits common to r3000 and r4000:
+ *
+ * MIPS_CR_BR_DELAY Exception happened in branch delay slot.
+ * MIPS_CR_COP_ERR Coprocessor error.
+ * MIPS_CR_IP Interrupt pending bits defined below.
+ * (same meaning as in CAUSE register).
+ * MIPS_CR_EXC_CODE The exception type (see exception codes below).
+ *
+ * Differences:
+ * r3k has 4 bits of execption type, r4k has 5 bits.
+ */
+#define MIPS_CR_BR_DELAY 0x80000000
+#define MIPS_CR_COP_ERR 0x30000000
+#define MIPS1_CR_EXC_CODE 0x0000003C /* four bits */
+#define MIPS3_CR_EXC_CODE 0x0000007C /* five bits */
+#define MIPS_CR_IP 0x0000FF00
+#define MIPS_CR_EXC_CODE_SHIFT 2
+
+/*
+ * The bits in the status register. All bits are active when set to 1.
+ *
+ * R3000 status register fields:
+ * MIPS_SR_COP_USABILITY Control the usability of the four coprocessors.
+ * MIPS_SR_TS TLB shutdown.
+ *
+ * MIPS_SR_INT_IE Master (current) interrupt enable bit.
+ *
+ * Differences:
+ * r3k has cache control is via frobbing SR register bits, whereas the
+ * r4k cache control is via explicit instructions.
+ * r3k has a 3-entry stack of kernel/user bits, whereas the
+ * r4k has kernel/supervisor/user.
+ */
+#define MIPS_SR_COP_USABILITY 0xf0000000
+#define MIPS_SR_COP_0_BIT 0x10000000
+#define MIPS_SR_COP_1_BIT 0x20000000
+#define MIPS_SR_COP_2_BIT 0x40000000
+
+ /* r4k and r3k differences, see below */
+
+#define MIPS_SR_MX 0x01000000 /* MIPS64 */
+#define MIPS_SR_PX 0x00800000 /* MIPS64 */
+#define MIPS_SR_BEV 0x00400000 /* Use boot exception vector */
+#define MIPS_SR_TS 0x00200000
+#define MIPS_SR_DE 0x00010000
+
+#define MIPS_SR_INT_IE 0x00000001
+/*#define MIPS_SR_MBZ 0x0f8000c0*/ /* Never used, true for r3k */
+/*#define MIPS_SR_INT_MASK 0x0000ff00*/
+
+/*
+ * The R2000/R3000-specific status register bit definitions.
+ * all bits are active when set to 1.
+ *
+ * MIPS_SR_PARITY_ERR Parity error.
+ * MIPS_SR_CACHE_MISS Most recent D-cache load resulted in a miss.
+ * MIPS_SR_PARITY_ZERO Zero replaces outgoing parity bits.
+ * MIPS_SR_SWAP_CACHES Swap I-cache and D-cache.
+ * MIPS_SR_ISOL_CACHES Isolate D-cache from main memory.
+ * Interrupt enable bits defined below.
+ * MIPS_SR_KU_OLD Old kernel/user mode bit. 1 => user mode.
+ * MIPS_SR_INT_ENA_OLD Old interrupt enable bit.
+ * MIPS_SR_KU_PREV Previous kernel/user mode bit. 1 => user mode.
+ * MIPS_SR_INT_ENA_PREV Previous interrupt enable bit.
+ * MIPS_SR_KU_CUR Current kernel/user mode bit. 1 => user mode.
+ */
+
+#define MIPS1_PARITY_ERR 0x00100000
+#define MIPS1_CACHE_MISS 0x00080000
+#define MIPS1_PARITY_ZERO 0x00040000
+#define MIPS1_SWAP_CACHES 0x00020000
+#define MIPS1_ISOL_CACHES 0x00010000
+
+#define MIPS1_SR_KU_OLD 0x00000020 /* 2nd stacked KU/IE*/
+#define MIPS1_SR_INT_ENA_OLD 0x00000010 /* 2nd stacked KU/IE*/
+#define MIPS1_SR_KU_PREV 0x00000008 /* 1st stacked KU/IE*/
+#define MIPS1_SR_INT_ENA_PREV 0x00000004 /* 1st stacked KU/IE*/
+#define MIPS1_SR_KU_CUR 0x00000002 /* current KU */
+
+/* backwards compatibility */
+#define MIPS_SR_PARITY_ERR MIPS1_PARITY_ERR
+#define MIPS_SR_CACHE_MISS MIPS1_CACHE_MISS
+#define MIPS_SR_PARITY_ZERO MIPS1_PARITY_ZERO
+#define MIPS_SR_SWAP_CACHES MIPS1_SWAP_CACHES
+#define MIPS_SR_ISOL_CACHES MIPS1_ISOL_CACHES
+
+#define MIPS_SR_KU_OLD MIPS1_SR_KU_OLD
+#define MIPS_SR_INT_ENA_OLD MIPS1_SR_INT_ENA_OLD
+#define MIPS_SR_KU_PREV MIPS1_SR_KU_PREV
+#define MIPS_SR_KU_CUR MIPS1_SR_KU_CUR
+#define MIPS_SR_INT_ENA_PREV MIPS1_SR_INT_ENA_PREV
+
+/*
+ * R4000 status register bit definitons,
+ * where different from r2000/r3000.
+ */
+#define MIPS3_SR_XX 0x80000000
+#define MIPS3_SR_RP 0x08000000
+#define MIPS3_SR_FR 0x04000000
+#define MIPS3_SR_RE 0x02000000
+
+#define MIPS3_SR_DIAG_DL 0x01000000 /* QED 52xx */
+#define MIPS3_SR_DIAG_IL 0x00800000 /* QED 52xx */
+#define MIPS3_SR_SR 0x00100000
+#define MIPS3_SR_NMI 0x00080000 /* MIPS32/64 */
+#define MIPS3_SR_DIAG_CH 0x00040000
+#define MIPS3_SR_DIAG_CE 0x00020000
+#define MIPS3_SR_DIAG_PE 0x00010000
+#define MIPS3_SR_EIE 0x00010000 /* TX79/R5900 */
+#define MIPS3_SR_KX 0x00000080
+#define MIPS3_SR_SX 0x00000040
+#define MIPS3_SR_UX 0x00000020
+#define MIPS3_SR_KSU_MASK 0x00000018
+#define MIPS3_SR_KSU_USER 0x00000010
+#define MIPS3_SR_KSU_SUPER 0x00000008
+#define MIPS3_SR_KSU_KERNEL 0x00000000
+#define MIPS3_SR_ERL 0x00000004
+#define MIPS3_SR_EXL 0x00000002
+
+#ifdef MIPS3_5900
+#undef MIPS_SR_INT_IE
+#define MIPS_SR_INT_IE 0x00010001 /* XXX */
+#endif
+
+/*
+ * These definitions are for MIPS32 processors.
+ */
+#define MIPS32_SR_RP 0x08000000 /* reduced power mode */
+#define MIPS32_SR_FR 0x04000000 /* 64-bit capable fpu */
+#define MIPS32_SR_RE 0x02000000 /* reverse user endian */
+#define MIPS32_SR_MX 0x01000000 /* MIPS64 */
+#define MIPS32_SR_PX 0x00800000 /* MIPS64 */
+#define MIPS32_SR_BEV 0x00400000 /* Use boot exception vector */
+#define MIPS32_SR_TS 0x00200000 /* TLB multiple match */
+#define MIPS32_SR_SOFT_RESET 0x00100000 /* soft reset occurred */
+#define MIPS32_SR_NMI 0x00080000 /* NMI occurred */
+#define MIPS32_SR_INT_MASK 0x0000ff00
+#define MIPS32_SR_KX 0x00000080 /* MIPS64 */
+#define MIPS32_SR_SX 0x00000040 /* MIPS64 */
+#define MIPS32_SR_UX 0x00000020 /* MIPS64 */
+#define MIPS32_SR_KSU_MASK 0x00000018 /* privilege mode */
+#define MIPS32_SR_KSU_USER 0x00000010
+#define MIPS32_SR_KSU_SUPER 0x00000008
+#define MIPS32_SR_KSU_KERNEL 0x00000000
+#define MIPS32_SR_ERL 0x00000004 /* error level */
+#define MIPS32_SR_EXL 0x00000002 /* exception level */
+
+#define MIPS_SR_SOFT_RESET MIPS3_SR_SR
+#define MIPS_SR_DIAG_CH MIPS3_SR_DIAG_CH
+#define MIPS_SR_DIAG_CE MIPS3_SR_DIAG_CE
+#define MIPS_SR_DIAG_PE MIPS3_SR_DIAG_PE
+#define MIPS_SR_KX MIPS3_SR_KX
+#define MIPS_SR_SX MIPS3_SR_SX
+#define MIPS_SR_UX MIPS3_SR_UX
+
+#define MIPS_SR_KSU_MASK MIPS3_SR_KSU_MASK
+#define MIPS_SR_KSU_USER MIPS3_SR_KSU_USER
+#define MIPS_SR_KSU_SUPER MIPS3_SR_KSU_SUPER
+#define MIPS_SR_KSU_KERNEL MIPS3_SR_KSU_KERNEL
+#define MIPS_SR_ERL MIPS3_SR_ERL
+#define MIPS_SR_EXL MIPS3_SR_EXL
+
+
+/*
+ * The interrupt masks.
+ * If a bit in the mask is 1 then the interrupt is enabled (or pending).
+ */
+#define MIPS_INT_MASK 0xff00
+#define MIPS_INT_MASK_5 0x8000
+#define MIPS_INT_MASK_4 0x4000
+#define MIPS_INT_MASK_3 0x2000
+#define MIPS_INT_MASK_2 0x1000
+#define MIPS_INT_MASK_1 0x0800
+#define MIPS_INT_MASK_0 0x0400
+#define MIPS_HARD_INT_MASK 0xfc00
+#define MIPS_SOFT_INT_MASK_1 0x0200
+#define MIPS_SOFT_INT_MASK_0 0x0100
+
+/*
+ * mips3 CPUs have on-chip timer at INT_MASK_5. Each platform can
+ * choose to enable this interrupt.
+ */
+#if defined(MIPS3_ENABLE_CLOCK_INTR)
+#define MIPS3_INT_MASK MIPS_INT_MASK
+#define MIPS3_HARD_INT_MASK MIPS_HARD_INT_MASK
+#else
+#define MIPS3_INT_MASK (MIPS_INT_MASK & ~MIPS_INT_MASK_5)
+#define MIPS3_HARD_INT_MASK (MIPS_HARD_INT_MASK & ~MIPS_INT_MASK_5)
+#endif
+
+/*
+ * The bits in the context register.
+ */
+#define MIPS1_CNTXT_PTE_BASE 0xFFE00000
+#define MIPS1_CNTXT_BAD_VPN 0x001FFFFC
+
+#define MIPS3_CNTXT_PTE_BASE 0xFF800000
+#define MIPS3_CNTXT_BAD_VPN2 0x007FFFF0
+
+/*
+ * Location of MIPS32 exception vectors. Most are multiplexed in
+ * the sense that further decoding is necessary (e.g. reading the
+ * CAUSE register or NMI bits in STATUS).
+ * Most interrupts go via the
+ * The INT vector is dedicated for hardware interrupts; it is
+ * only referenced if the IV bit in CAUSE is set to 1.
+ */
+#define MIPS_VEC_RESET 0xBFC00000 /* Hard, soft, or NMI */
+#define MIPS_VEC_EJTAG 0xBFC00480
+#define MIPS_VEC_TLB 0x80000000
+#define MIPS_VEC_XTLB 0x80000080
+#define MIPS_VEC_CACHE 0x80000100
+#define MIPS_VEC_GENERIC 0x80000180 /* Most exceptions */
+#define MIPS_VEC_INTERRUPT 0x80000200
+
+/*
+ * The bits in the MIPS3 config register.
+ *
+ * bit 0..5: R/W, Bit 6..31: R/O
+ */
+
+/* kseg0 coherency algorithm - see MIPS3_TLB_ATTR values */
+#define MIPS3_CONFIG_K0_MASK 0x00000007
+
+/*
+ * R/W Update on Store Conditional
+ * 0: Store Conditional uses coherency algorithm specified by TLB
+ * 1: Store Conditional uses cacheable coherent update on write
+ */
+#define MIPS3_CONFIG_CU 0x00000008
+
+#define MIPS3_CONFIG_DB 0x00000010 /* Primary D-cache line size */
+#define MIPS3_CONFIG_IB 0x00000020 /* Primary I-cache line size */
+#define MIPS3_CONFIG_CACHE_L1_LSIZE(config, bit) \
+ (((config) & (bit)) ? 32 : 16)
+
+#define MIPS3_CONFIG_DC_MASK 0x000001c0 /* Primary D-cache size */
+#define MIPS3_CONFIG_DC_SHIFT 6
+#define MIPS3_CONFIG_IC_MASK 0x00000e00 /* Primary I-cache size */
+#define MIPS3_CONFIG_IC_SHIFT 9
+#define MIPS3_CONFIG_C_DEFBASE 0x1000 /* default base 2^12 */
+
+/* Cache size mode indication: available only on Vr41xx CPUs */
+#define MIPS3_CONFIG_CS 0x00001000
+#define MIPS3_CONFIG_C_4100BASE 0x0400 /* base is 2^10 if CS=1 */
+#define MIPS3_CONFIG_CACHE_SIZE(config, mask, base, shift) \
+ ((base) << (((config) & (mask)) >> (shift)))
+
+/* External cache enable: Controls L2 for R5000/Rm527x and L3 for Rm7000 */
+#define MIPS3_CONFIG_SE 0x00001000
+
+/* Block ordering: 0: sequential, 1: sub-block */
+#define MIPS3_CONFIG_EB 0x00002000
+
+/* ECC mode - 0: ECC mode, 1: parity mode */
+#define MIPS3_CONFIG_EM 0x00004000
+
+/* BigEndianMem - 0: kernel and memory are little endian, 1: big endian */
+#define MIPS3_CONFIG_BE 0x00008000
+
+/* Dirty Shared coherency state - 0: enabled, 1: disabled */
+#define MIPS3_CONFIG_SM 0x00010000
+
+/* Secondary Cache - 0: present, 1: not present */
+#define MIPS3_CONFIG_SC 0x00020000
+
+/* System Port width - 0: 64-bit, 1: 32-bit (QED RM523x), 2,3: reserved */
+#define MIPS3_CONFIG_EW_MASK 0x000c0000
+#define MIPS3_CONFIG_EW_SHIFT 18
+
+/* Secondary Cache port width - 0: 128-bit data path to S-cache, 1: reserved */
+#define MIPS3_CONFIG_SW 0x00100000
+
+/* Split Secondary Cache Mode - 0: I/D mixed, 1: I/D separated by SCAddr(17) */
+#define MIPS3_CONFIG_SS 0x00200000
+
+/* Secondary Cache line size */
+#define MIPS3_CONFIG_SB_MASK 0x00c00000
+#define MIPS3_CONFIG_SB_SHIFT 22
+#define MIPS3_CONFIG_CACHE_L2_LSIZE(config) \
+ (0x10 << (((config) & MIPS3_CONFIG_SB_MASK) >> MIPS3_CONFIG_SB_SHIFT))
+
+/* Write back data rate */
+#define MIPS3_CONFIG_EP_MASK 0x0f000000
+#define MIPS3_CONFIG_EP_SHIFT 24
+
+/* System clock ratio - this value is CPU dependent */
+#define MIPS3_CONFIG_EC_MASK 0x70000000
+#define MIPS3_CONFIG_EC_SHIFT 28
+
+/* Master-Checker Mode - 1: enabled */
+#define MIPS3_CONFIG_CM 0x80000000
+
+/*
+ * The bits in the MIPS4 config register.
+ */
+
+/* kseg0 coherency algorithm - see MIPS3_TLB_ATTR values */
+#define MIPS4_CONFIG_K0_MASK MIPS3_CONFIG_K0_MASK
+#define MIPS4_CONFIG_DN_MASK 0x00000018 /* Device number */
+#define MIPS4_CONFIG_CT 0x00000020 /* CohPrcReqTar */
+#define MIPS4_CONFIG_PE 0x00000040 /* PreElmReq */
+#define MIPS4_CONFIG_PM_MASK 0x00000180 /* PreReqMax */
+#define MIPS4_CONFIG_EC_MASK 0x00001e00 /* SysClkDiv */
+#define MIPS4_CONFIG_SB 0x00002000 /* SCBlkSize */
+#define MIPS4_CONFIG_SK 0x00004000 /* SCColEn */
+#define MIPS4_CONFIG_BE 0x00008000 /* MemEnd */
+#define MIPS4_CONFIG_SS_MASK 0x00070000 /* SCSize */
+#define MIPS4_CONFIG_SC_MASK 0x00380000 /* SCClkDiv */
+#define MIPS4_CONFIG_RESERVED 0x03c00000 /* Reserved wired 0 */
+#define MIPS4_CONFIG_DC_MASK 0x1c000000 /* Primary D-Cache size */
+#define MIPS4_CONFIG_IC_MASK 0xe0000000 /* Primary I-Cache size */
+
+#define MIPS4_CONFIG_DC_SHIFT 26
+#define MIPS4_CONFIG_IC_SHIFT 29
+
+#define MIPS4_CONFIG_CACHE_SIZE(config, mask, base, shift) \
+ ((base) << (((config) & (mask)) >> (shift)))
+
+#define MIPS4_CONFIG_CACHE_L2_LSIZE(config) \
+ (((config) & MIPS4_CONFIG_SB) ? 128 : 64)
+
+/*
+ * Location of exception vectors.
+ *
+ * Common vectors: reset and UTLB miss.
+ */
+#define MIPS_RESET_EXC_VEC 0xBFC00000
+#define MIPS_UTLB_MISS_EXC_VEC 0x80000000
+
+/*
+ * MIPS-1 general exception vector (everything else)
+ */
+#define MIPS1_GEN_EXC_VEC 0x80000080
+
+/*
+ * MIPS-III exception vectors
+ */
+#define MIPS3_XTLB_MISS_EXC_VEC 0x80000080
+#define MIPS3_CACHE_ERR_EXC_VEC 0x80000100
+#define MIPS3_GEN_EXC_VEC 0x80000180
+
+/*
+ * TX79 (R5900) exception vectors
+ */
+#define MIPS_R5900_COUNTER_EXC_VEC 0x80000080
+#define MIPS_R5900_DEBUG_EXC_VEC 0x80000100
+
+/*
+ * MIPS32/MIPS64 (and some MIPS3) dedicated interrupt vector.
+ */
+#define MIPS3_INTR_EXC_VEC 0x80000200
+
+/*
+ * Coprocessor 0 registers:
+ *
+ * v--- width for mips I,III,32,64
+ * (3=32bit, 6=64bit, i=impl dep)
+ * 0 MIPS_COP_0_TLB_INDEX 3333 TLB Index.
+ * 1 MIPS_COP_0_TLB_RANDOM 3333 TLB Random.
+ * 2 MIPS_COP_0_TLB_LOW 3... r3k TLB entry low.
+ * 2 MIPS_COP_0_TLB_LO0 .636 r4k TLB entry low.
+ * 3 MIPS_COP_0_TLB_LO1 .636 r4k TLB entry low, extended.
+ * 4 MIPS_COP_0_TLB_CONTEXT 3636 TLB Context.
+ * 5 MIPS_COP_0_TLB_PG_MASK .333 TLB Page Mask register.
+ * 6 MIPS_COP_0_TLB_WIRED .333 Wired TLB number.
+ * 8 MIPS_COP_0_BAD_VADDR 3636 Bad virtual address.
+ * 9 MIPS_COP_0_COUNT .333 Count register.
+ * 10 MIPS_COP_0_TLB_HI 3636 TLB entry high.
+ * 11 MIPS_COP_0_COMPARE .333 Compare (against Count).
+ * 12 MIPS_COP_0_STATUS 3333 Status register.
+ * 13 MIPS_COP_0_CAUSE 3333 Exception cause register.
+ * 14 MIPS_COP_0_EXC_PC 3636 Exception PC.
+ * 15 MIPS_COP_0_PRID 3333 Processor revision identifier.
+ * 16 MIPS_COP_0_CONFIG 3333 Configuration register.
+ * 16/1 MIPS_COP_0_CONFIG1 ..33 Configuration register 1.
+ * 16/2 MIPS_COP_0_CONFIG2 ..33 Configuration register 2.
+ * 16/3 MIPS_COP_0_CONFIG3 ..33 Configuration register 3.
+ * 17 MIPS_COP_0_LLADDR .336 Load Linked Address.
+ * 18 MIPS_COP_0_WATCH_LO .336 WatchLo register.
+ * 19 MIPS_COP_0_WATCH_HI .333 WatchHi register.
+ * 20 MIPS_COP_0_TLB_XCONTEXT .6.6 TLB XContext register.
+ * 23 MIPS_COP_0_DEBUG .... Debug JTAG register.
+ * 24 MIPS_COP_0_DEPC .... DEPC JTAG register.
+ * 25 MIPS_COP_0_PERFCNT ..36 Performance Counter register.
+ * 26 MIPS_COP_0_ECC .3ii ECC / Error Control register.
+ * 27 MIPS_COP_0_CACHE_ERR .3ii Cache Error register.
+ * 28/0 MIPS_COP_0_TAG_LO .3ii Cache TagLo register (instr).
+ * 28/1 MIPS_COP_0_DATA_LO ..ii Cache DataLo register (instr).
+ * 28/2 MIPS_COP_0_TAG_LO ..ii Cache TagLo register (data).
+ * 28/3 MIPS_COP_0_DATA_LO ..ii Cache DataLo register (data).
+ * 29/0 MIPS_COP_0_TAG_HI .3ii Cache TagHi register (instr).
+ * 29/1 MIPS_COP_0_DATA_HI ..ii Cache DataHi register (instr).
+ * 29/2 MIPS_COP_0_TAG_HI ..ii Cache TagHi register (data).
+ * 29/3 MIPS_COP_0_DATA_HI ..ii Cache DataHi register (data).
+ * 30 MIPS_COP_0_ERROR_PC .636 Error EPC register.
+ * 31 MIPS_COP_0_DESAVE .... DESAVE JTAG register.
+ */
+
+/* Deal with inclusion from an assembly file. */
+#if defined(_LOCORE) || defined(LOCORE)
+#define _(n) $n
+#else
+#define _(n) n
+#endif
+
+
+#define MIPS_COP_0_TLB_INDEX _(0)
+#define MIPS_COP_0_TLB_RANDOM _(1)
+ /* Name and meaning of TLB bits for $2 differ on r3k and r4k. */
+
+#define MIPS_COP_0_TLB_CONTEXT _(4)
+ /* $5 and $6 new with MIPS-III */
+#define MIPS_COP_0_BAD_VADDR _(8)
+#define MIPS_COP_0_TLB_HI _(10)
+#define MIPS_COP_0_STATUS _(12)
+#define MIPS_COP_0_CAUSE _(13)
+#define MIPS_COP_0_EXC_PC _(14)
+#define MIPS_COP_0_PRID _(15)
+
+
+/* MIPS-I */
+#define MIPS_COP_0_TLB_LOW _(2)
+
+/* MIPS-III */
+#define MIPS_COP_0_TLB_LO0 _(2)
+#define MIPS_COP_0_TLB_LO1 _(3)
+
+#define MIPS_COP_0_TLB_PG_MASK _(5)
+#define MIPS_COP_0_TLB_WIRED _(6)
+
+#define MIPS_COP_0_COUNT _(9)
+#define MIPS_COP_0_COMPARE _(11)
+
+#define MIPS_COP_0_CONFIG _(16)
+#define MIPS_COP_0_LLADDR _(17)
+#define MIPS_COP_0_WATCH_LO _(18)
+#define MIPS_COP_0_WATCH_HI _(19)
+#define MIPS_COP_0_TLB_XCONTEXT _(20)
+#define MIPS_COP_0_ECC _(26)
+#define MIPS_COP_0_CACHE_ERR _(27)
+#define MIPS_COP_0_TAG_LO _(28)
+#define MIPS_COP_0_TAG_HI _(29)
+#define MIPS_COP_0_ERROR_PC _(30)
+
+/* MIPS32/64 */
+#define MIPS_COP_0_DEBUG _(23)
+#define MIPS_COP_0_DEPC _(24)
+#define MIPS_COP_0_PERFCNT _(25)
+#define MIPS_COP_0_DATA_LO _(28)
+#define MIPS_COP_0_DATA_HI _(29)
+#define MIPS_COP_0_DESAVE _(31)
+
+/* MIPS32 Config register definitions */
+#define MIPS_MMU_NONE 0x00 /* No MMU present */
+#define MIPS_MMU_TLB 0x01 /* Standard TLB */
+#define MIPS_MMU_BAT 0x02 /* Standard BAT */
+#define MIPS_MMU_FIXED 0x03 /* Standard fixed mapping */
+
+#define MIPS_CONFIG0_MT_MASK 0x00000380 /* bits 9..7 MMU Type */
+#define MIPS_CONFIG0_MT_SHIFT 7
+#define MIPS_CONFIG0_BE 0x00008000 /* data is big-endian */
+#define MIPS_CONFIG0_VI 0x00000004 /* instruction cache is virtual */
+
+#define MIPS_CONFIG1_TLBSZ_MASK 0x7E000000 /* bits 30..25 # tlb entries minus one */
+#define MIPS_CONFIG1_TLBSZ_SHIFT 25
+#define MIPS_CONFIG1_IS_MASK 0x01C00000 /* bits 24..22 icache sets per way */
+#define MIPS_CONFIG1_IS_SHIFT 22
+#define MIPS_CONFIG1_IL_MASK 0x00380000 /* bits 21..19 icache line size */
+#define MIPS_CONFIG1_IL_SHIFT 19
+#define MIPS_CONFIG1_IA_MASK 0x00070000 /* bits 18..16 icache associativity */
+#define MIPS_CONFIG1_IA_SHIFT 16
+#define MIPS_CONFIG1_DS_MASK 0x0000E000 /* bits 15..13 dcache sets per way */
+#define MIPS_CONFIG1_DS_SHIFT 13
+#define MIPS_CONFIG1_DL_MASK 0x00001C00 /* bits 12..10 dcache line size */
+#define MIPS_CONFIG1_DL_SHIFT 10
+#define MIPS_CONFIG1_DA_MASK 0x00000380 /* bits 9.. 7 dcache associativity */
+#define MIPS_CONFIG1_DA_SHIFT 7
+#define MIPS_CONFIG1_LOWBITS 0x0000007F
+#define MIPS_CONFIG1_C2 0x00000040 /* Coprocessor 2 implemented */
+#define MIPS_CONFIG1_MD 0x00000020 /* MDMX ASE implemented (MIPS64) */
+#define MIPS_CONFIG1_PC 0x00000010 /* Performance counters implemented */
+#define MIPS_CONFIG1_WR 0x00000008 /* Watch registers implemented */
+#define MIPS_CONFIG1_CA 0x00000004 /* MIPS16e ISA implemented */
+#define MIPS_CONFIG1_EP 0x00000002 /* EJTAG implemented */
+#define MIPS_CONFIG1_FP 0x00000001 /* FPU implemented */
+
+/*
+ * Values for the code field in a break instruction.
+ */
+#define MIPS_BREAK_INSTR 0x0000000d
+#define MIPS_BREAK_VAL_MASK 0x03ff0000
+#define MIPS_BREAK_VAL_SHIFT 16
+#define MIPS_BREAK_KDB_VAL 512
+#define MIPS_BREAK_SSTEP_VAL 513
+#define MIPS_BREAK_BRKPT_VAL 514
+#define MIPS_BREAK_SOVER_VAL 515
+#define MIPS_BREAK_KDB (MIPS_BREAK_INSTR | \
+ (MIPS_BREAK_KDB_VAL << MIPS_BREAK_VAL_SHIFT))
+#define MIPS_BREAK_SSTEP (MIPS_BREAK_INSTR | \
+ (MIPS_BREAK_SSTEP_VAL << MIPS_BREAK_VAL_SHIFT))
+#define MIPS_BREAK_BRKPT (MIPS_BREAK_INSTR | \
+ (MIPS_BREAK_BRKPT_VAL << MIPS_BREAK_VAL_SHIFT))
+#define MIPS_BREAK_SOVER (MIPS_BREAK_INSTR | \
+ (MIPS_BREAK_SOVER_VAL << MIPS_BREAK_VAL_SHIFT))
+
+/*
+ * Mininum and maximum cache sizes.
+ */
+#define MIPS_MIN_CACHE_SIZE (16 * 1024)
+#define MIPS_MAX_CACHE_SIZE (256 * 1024)
+#define MIPS3_MAX_PCACHE_SIZE (32 * 1024) /* max. primary cache size */
+
+/*
+ * The floating point version and status registers.
+ */
+#define MIPS_FPU_ID $0
+#define MIPS_FPU_CSR $31
+
+/*
+ * The floating point coprocessor status register bits.
+ */
+#define MIPS_FPU_ROUNDING_BITS 0x00000003
+#define MIPS_FPU_ROUND_RN 0x00000000
+#define MIPS_FPU_ROUND_RZ 0x00000001
+#define MIPS_FPU_ROUND_RP 0x00000002
+#define MIPS_FPU_ROUND_RM 0x00000003
+#define MIPS_FPU_STICKY_BITS 0x0000007c
+#define MIPS_FPU_STICKY_INEXACT 0x00000004
+#define MIPS_FPU_STICKY_UNDERFLOW 0x00000008
+#define MIPS_FPU_STICKY_OVERFLOW 0x00000010
+#define MIPS_FPU_STICKY_DIV0 0x00000020
+#define MIPS_FPU_STICKY_INVALID 0x00000040
+#define MIPS_FPU_ENABLE_BITS 0x00000f80
+#define MIPS_FPU_ENABLE_INEXACT 0x00000080
+#define MIPS_FPU_ENABLE_UNDERFLOW 0x00000100
+#define MIPS_FPU_ENABLE_OVERFLOW 0x00000200
+#define MIPS_FPU_ENABLE_DIV0 0x00000400
+#define MIPS_FPU_ENABLE_INVALID 0x00000800
+#define MIPS_FPU_EXCEPTION_BITS 0x0003f000
+#define MIPS_FPU_EXCEPTION_INEXACT 0x00001000
+#define MIPS_FPU_EXCEPTION_UNDERFLOW 0x00002000
+#define MIPS_FPU_EXCEPTION_OVERFLOW 0x00004000
+#define MIPS_FPU_EXCEPTION_DIV0 0x00008000
+#define MIPS_FPU_EXCEPTION_INVALID 0x00010000
+#define MIPS_FPU_EXCEPTION_UNIMPL 0x00020000
+#define MIPS_FPU_COND_BIT 0x00800000
+#define MIPS_FPU_FLUSH_BIT 0x01000000 /* r4k, MBZ on r3k */
+#define MIPS1_FPC_MBZ_BITS 0xff7c0000
+#define MIPS3_FPC_MBZ_BITS 0xfe7c0000
+
+
+/*
+ * Constants to determine if have a floating point instruction.
+ */
+#define MIPS_OPCODE_SHIFT 26
+#define MIPS_OPCODE_C1 0x11
+
+
+/*
+ * The low part of the TLB entry.
+ */
+#define MIPS1_TLB_PFN 0xfffff000
+#define MIPS1_TLB_NON_CACHEABLE_BIT 0x00000800
+#define MIPS1_TLB_DIRTY_BIT 0x00000400
+#define MIPS1_TLB_VALID_BIT 0x00000200
+#define MIPS1_TLB_GLOBAL_BIT 0x00000100
+
+#define MIPS3_TLB_PFN 0x3fffffc0
+#define MIPS3_TLB_ATTR_MASK 0x00000038
+#define MIPS3_TLB_ATTR_SHIFT 3
+#define MIPS3_TLB_DIRTY_BIT 0x00000004
+#define MIPS3_TLB_VALID_BIT 0x00000002
+#define MIPS3_TLB_GLOBAL_BIT 0x00000001
+
+#define MIPS1_TLB_PHYS_PAGE_SHIFT 12
+#define MIPS3_TLB_PHYS_PAGE_SHIFT 6
+#define MIPS1_TLB_PF_NUM MIPS1_TLB_PFN
+#define MIPS3_TLB_PF_NUM MIPS3_TLB_PFN
+#define MIPS1_TLB_MOD_BIT MIPS1_TLB_DIRTY_BIT
+#define MIPS3_TLB_MOD_BIT MIPS3_TLB_DIRTY_BIT
+
+/*
+ * MIPS3_TLB_ATTR values - coherency algorithm:
+ * 0: cacheable, noncoherent, write-through, no write allocate
+ * 1: cacheable, noncoherent, write-through, write allocate
+ * 2: uncached
+ * 3: cacheable, noncoherent, write-back (noncoherent)
+ * 4: cacheable, coherent, write-back, exclusive (exclusive)
+ * 5: cacheable, coherent, write-back, exclusive on write (sharable)
+ * 6: cacheable, coherent, write-back, update on write (update)
+ * 7: uncached, accelerated (gather STORE operations)
+ */
+#define MIPS3_TLB_ATTR_WT 0 /* IDT */
+#define MIPS3_TLB_ATTR_WT_WRITEALLOCATE 1 /* IDT */
+#define MIPS3_TLB_ATTR_UNCACHED 2 /* R4000/R4400, IDT */
+#define MIPS3_TLB_ATTR_WB_NONCOHERENT 3 /* R4000/R4400, IDT */
+#define MIPS3_TLB_ATTR_WB_EXCLUSIVE 4 /* R4000/R4400 */
+#define MIPS3_TLB_ATTR_WB_SHARABLE 5 /* R4000/R4400 */
+#define MIPS3_TLB_ATTR_WB_UPDATE 6 /* R4000/R4400 */
+#define MIPS4_TLB_ATTR_UNCACHED_ACCELERATED 7 /* R10000 */
+
+
+/*
+ * The high part of the TLB entry.
+ */
+#define MIPS1_TLB_VPN 0xfffff000
+#define MIPS1_TLB_PID 0x00000fc0
+#define MIPS1_TLB_PID_SHIFT 6
+
+#define MIPS3_TLB_VPN2 0xffffe000
+#define MIPS3_TLB_ASID 0x000000ff
+
+#define MIPS1_TLB_VIRT_PAGE_NUM MIPS1_TLB_VPN
+#define MIPS3_TLB_VIRT_PAGE_NUM MIPS3_TLB_VPN2
+#define MIPS3_TLB_PID MIPS3_TLB_ASID
+#define MIPS_TLB_VIRT_PAGE_SHIFT 12
+
+/*
+ * r3000: shift count to put the index in the right spot.
+ */
+#define MIPS1_TLB_INDEX_SHIFT 8
+
+/*
+ * The first TLB that write random hits.
+ */
+#define MIPS1_TLB_FIRST_RAND_ENTRY 8
+#define MIPS3_TLB_WIRED_UPAGES 1
+
+/*
+ * The number of process id entries.
+ */
+#define MIPS1_TLB_NUM_PIDS 64
+#define MIPS3_TLB_NUM_ASIDS 256
+
+/*
+ * Patch codes to hide CPU design differences between MIPS1 and MIPS3.
+ */
+
+/* XXX simonb: this is before MIPS3_PLUS is defined (and is ugly!) */
+
+#if !(defined(MIPS3) || defined(MIPS4) || defined(MIPS32) || defined(MIPS64)) \
+ && defined(MIPS1) /* XXX simonb must be neater! */
+#define MIPS_TLB_PID_SHIFT MIPS1_TLB_PID_SHIFT
+#define MIPS_TLB_NUM_PIDS MIPS1_TLB_NUM_PIDS
+#endif
+
+#if (defined(MIPS3) || defined(MIPS4) || defined(MIPS32) || defined(MIPS64)) \
+ && !defined(MIPS1) /* XXX simonb must be neater! */
+#define MIPS_TLB_PID_SHIFT 0
+#define MIPS_TLB_NUM_PIDS MIPS3_TLB_NUM_ASIDS
+#endif
+
+
+#if !defined(MIPS_TLB_PID_SHIFT)
+#define MIPS_TLB_PID_SHIFT \
+ ((MIPS_HAS_R4K_MMU) ? 0 : MIPS1_TLB_PID_SHIFT)
+
+#define MIPS_TLB_NUM_PIDS \
+ ((MIPS_HAS_R4K_MMU) ? MIPS3_TLB_NUM_ASIDS : MIPS1_TLB_NUM_PIDS)
+#endif
+
+/*
+ * CPU processor revision IDs for company ID == 0 (non mips32/64 chips)
+ */
+#define MIPS_R2000 0x01 /* MIPS R2000 ISA I */
+#define MIPS_R3000 0x02 /* MIPS R3000 ISA I */
+#define MIPS_R6000 0x03 /* MIPS R6000 ISA II */
+#define MIPS_R4000 0x04 /* MIPS R4000/R4400 ISA III */
+#define MIPS_R3LSI 0x05 /* LSI Logic R3000 derivative ISA I */
+#define MIPS_R6000A 0x06 /* MIPS R6000A ISA II */
+#define MIPS_R3IDT 0x07 /* IDT R3041 or RC36100 ISA I */
+#define MIPS_R10000 0x09 /* MIPS R10000 ISA IV */
+#define MIPS_R4200 0x0a /* NEC VR4200 ISA III */
+#define MIPS_R4300 0x0b /* NEC VR4300 ISA III */
+#define MIPS_R4100 0x0c /* NEC VR4100 ISA III */
+#define MIPS_R12000 0x0e /* MIPS R12000 ISA IV */
+#define MIPS_R14000 0x0f /* MIPS R14000 ISA IV */
+#define MIPS_R8000 0x10 /* MIPS R8000 Blackbird/TFP ISA IV */
+#define MIPS_RC32300 0x18 /* IDT RC32334,332,355 ISA 32 */
+#define MIPS_R4600 0x20 /* QED R4600 Orion ISA III */
+#define MIPS_R4700 0x21 /* QED R4700 Orion ISA III */
+#define MIPS_R3SONY 0x21 /* Sony R3000 based ISA I */
+#define MIPS_R4650 0x22 /* QED R4650 ISA III */
+#define MIPS_TX3900 0x22 /* Toshiba TX39 family ISA I */
+#define MIPS_R5000 0x23 /* MIPS R5000 ISA IV */
+#define MIPS_R3NKK 0x23 /* NKK R3000 based ISA I */
+#define MIPS_RC32364 0x26 /* IDT RC32364 ISA 32 */
+#define MIPS_RM7000 0x27 /* QED RM7000 ISA IV */
+#define MIPS_RM5200 0x28 /* QED RM5200s ISA IV */
+#define MIPS_TX4900 0x2d /* Toshiba TX49 family ISA III */
+#define MIPS_R5900 0x2e /* Toshiba R5900 (EECore) ISA --- */
+#define MIPS_RC64470 0x30 /* IDT RC64474/RC64475 ISA III */
+#define MIPS_TX7900 0x38 /* Toshiba TX79 ISA III+*/
+#define MIPS_R5400 0x54 /* NEC VR5400 ISA IV */
+#define MIPS_R5500 0x55 /* NEC VR5500 ISA IV */
+
+/*
+ * CPU revision IDs for some prehistoric processors.
+ */
+
+/* For MIPS_R3000 */
+#define MIPS_REV_R3000 0x20
+#define MIPS_REV_R3000A 0x30
+
+/* For MIPS_TX3900 */
+#define MIPS_REV_TX3912 0x10
+#define MIPS_REV_TX3922 0x30
+#define MIPS_REV_TX3927 0x40
+
+/* For MIPS_R4000 */
+#define MIPS_REV_R4000_A 0x00
+#define MIPS_REV_R4000_B 0x22
+#define MIPS_REV_R4000_C 0x30
+#define MIPS_REV_R4400_A 0x40
+#define MIPS_REV_R4400_B 0x50
+#define MIPS_REV_R4400_C 0x60
+
+/* For MIPS_TX4900 */
+#define MIPS_REV_TX4927 0x22
+
+/*
+ * CPU processor revision IDs for company ID == 1 (MIPS)
+ */
+#define MIPS_4Kc 0x80 /* MIPS 4Kc ISA 32 */
+#define MIPS_5Kc 0x81 /* MIPS 5Kc ISA 64 */
+#define MIPS_20Kc 0x82 /* MIPS 20Kc ISA 64 */
+#define MIPS_4Kmp 0x83 /* MIPS 4Km/4Kp ISA 32 */
+#define MIPS_4KEc 0x84 /* MIPS 4KEc ISA 32 */
+#define MIPS_4KEmp 0x85 /* MIPS 4KEm/4KEp ISA 32 */
+#define MIPS_4KSc 0x86 /* MIPS 4KSc ISA 32 */
+#define MIPS_M4K 0x87 /* MIPS M4K ISA 32 Rel 2 */
+#define MIPS_25Kf 0x88 /* MIPS 25Kf ISA 64 */
+#define MIPS_5KE 0x89 /* MIPS 5KE ISA 64 Rel 2 */
+#define MIPS_4KEc_R2 0x90 /* MIPS 4KEc_R2 ISA 32 Rel 2 */
+#define MIPS_4KEmp_R2 0x91 /* MIPS 4KEm/4KEp_R2 ISA 32 Rel 2 */
+#define MIPS_4KSd 0x92 /* MIPS 4KSd ISA 32 Rel 2 */
+
+/*
+ * AMD (company ID 3) use the processor ID field to donote the CPU core
+ * revision and the company options field do donate the SOC chip type.
+ */
+
+/* CPU processor revision IDs */
+#define MIPS_AU_REV1 0x01 /* Alchemy Au1000 (Rev 1) ISA 32 */
+#define MIPS_AU_REV2 0x02 /* Alchemy Au1000 (Rev 2) ISA 32 */
+
+/* CPU company options IDs */
+#define MIPS_AU1000 0x00
+#define MIPS_AU1500 0x01
+#define MIPS_AU1100 0x02
+#define MIPS_AU1550 0x03
+
+/*
+ * CPU processor revision IDs for company ID == 4 (Broadcom)
+ */
+#define MIPS_SB1 0x01 /* SiByte SB1 ISA 64 */
+
+/*
+ * CPU processor revision IDs for company ID == 5 (SandCraft)
+ */
+#define MIPS_SR7100 0x04 /* SandCraft SR7100 ISA 64 */
+
+/*
+ * FPU processor revision ID
+ */
+#define MIPS_SOFT 0x00 /* Software emulation ISA I */
+#define MIPS_R2360 0x01 /* MIPS R2360 FPC ISA I */
+#define MIPS_R2010 0x02 /* MIPS R2010 FPC ISA I */
+#define MIPS_R3010 0x03 /* MIPS R3010 FPC ISA I */
+#define MIPS_R6010 0x04 /* MIPS R6010 FPC ISA II */
+#define MIPS_R4010 0x05 /* MIPS R4010 FPC ISA II */
+#define MIPS_R31LSI 0x06 /* LSI Logic derivate ISA I */
+#define MIPS_R3TOSH 0x22 /* Toshiba R3000 based FPU ISA I */
+
+#ifdef ENABLE_MIPS_TX3900
+#include <mips/r3900regs.h>
+#endif
+#ifdef MIPS3_5900
+#include <mips/r5900regs.h>
+#endif
+#ifdef MIPS64_SB1
+#include <mips/sb1regs.h>
+#endif
+
+#endif /* _MIPS_CPUREGS_H_ */
diff --git a/sys/mips/include/cputypes.h b/sys/mips/include/cputypes.h
new file mode 100644
index 0000000..cb2b707
--- /dev/null
+++ b/sys/mips/include/cputypes.h
@@ -0,0 +1,38 @@
+/*-
+ * Copyright (c) 1993 Christopher G. Demetriou
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUTYPES_H_
+#define _MACHINE_CPUTYPES_H_
+
+#ifndef LOCORE
+extern int cpu;
+extern int cpu_class;
+#endif
+
+#endif /* !_MACHINE_CPUTYPES_H_ */
diff --git a/sys/mips/include/db_machdep.h b/sys/mips/include/db_machdep.h
new file mode 100644
index 0000000..989f05c
--- /dev/null
+++ b/sys/mips/include/db_machdep.h
@@ -0,0 +1,99 @@
+/* $OpenBSD: db_machdep.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */
+
+/*
+ * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed under OpenBSD by
+ * Per Fogelstrom, Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * JNPR: db_machdep.h,v 1.7 2006/10/16 12:30:34 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MIPS_DB_MACHDEP_H_
+#define _MIPS_DB_MACHDEP_H_
+
+#include <machine/frame.h>
+#include <machine/psl.h>
+#include <machine/trap.h>
+#include <machine/endian.h>
+
+typedef struct trapframe db_regs_t;
+extern db_regs_t ddb_regs; /* register state */
+
+typedef vm_offset_t db_addr_t; /* address - unsigned */
+typedef int db_expr_t; /* expression - signed */
+
+#if BYTE_ORDER == _BIG_ENDIAN
+#define BYTE_MSF (1)
+#endif
+
+#define SOFTWARE_SSTEP /* Need software single step */
+#define SOFTWARE_SSTEP_EMUL /* next_instr_address() emulates 100% */
+db_addr_t next_instr_address(db_addr_t, boolean_t);
+#define BKPT_SIZE (4)
+#define BKPT_SET(ins) (BREAK_DDB)
+#define DB_VALID_BREAKPOINT(addr) (((addr) & 3) == 0)
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_BREAK)
+#define IS_WATCHPOINT_TRAP(type, code) (0) /* XXX mips3 watchpoint */
+
+#define PC_REGS() ((db_addr_t)kdb_thrctx->pcb_regs.pc)
+#define BKPT_SKIP \
+ do { \
+ if((db_get_value(kdb_frame->pc, sizeof(int), FALSE) & \
+ ~BREAK_VAL_MASK) == BREAK_INSTR) { \
+ kdb_frame->pc += BKPT_SIZE; \
+ kdb_thrctx->pcb_regs.pc += BKPT_SIZE; \
+ } \
+ } while (0);
+
+
+/*
+ * Test of instructions to see class.
+ */
+#define IT_CALL 0x01
+#define IT_BRANCH 0x02
+#define IT_LOAD 0x03
+#define IT_STORE 0x04
+
+#define inst_branch(i) (db_inst_type(i) == IT_BRANCH)
+#define inst_trap_return(i) ((i) & 0)
+#define inst_call(i) (db_inst_type(i) == IT_CALL)
+#define inst_return(i) ((i) == 0x03e00008)
+#define inst_load(i) (db_inst_type(i) == IT_LOAD)
+#define inst_store(i) (db_inst_type(i) == IT_STORE)
+
+#define DB_SMALL_VALUE_MAX 0x7fffffff
+#define DB_SMALL_VALUE_MIN (-0x400001)
+
+int db_inst_type(int);
+void db_dump_tlb(int, int);
+db_addr_t branch_taken(int inst, db_addr_t pc);
+void stacktrace_subr(db_regs_t *, int (*)(const char *, ...));
+
+#endif /* !_MIPS_DB_MACHDEP_H_ */
diff --git a/sys/mips/include/defs.h b/sys/mips/include/defs.h
new file mode 100644
index 0000000..20d093e
--- /dev/null
+++ b/sys/mips/include/defs.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 1996, 2001-2003, 2005, Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * defs.h -- Simple universal types and definitions for use by the microkernel
+ * Jim Hayes, November 1996
+ *
+ * JNPR: defs.h,v 1.3.2.1 2007/09/10 08:16:32 girish
+ * $FreeBSD$
+ */
+
+#ifndef __DEFS_H__
+#define __DEFS_H__
+
+/*
+ * Paranoid compilation. If defined, the PARANOID flag will enable asserts,
+ * data structure magic stamping and a suite of other debug tools. To disable
+ * it, comment out its definition.
+ */
+#define PARANOID
+
+/*
+ * This is the ONLY place you should see hardware specific information
+ * encoded as #ifdefs. (Well, except for stdarg.h, perhaps.)
+ * I apologize in advance!
+ */
+#include <machine/defs_mips.h>
+#define CPU_GOT_ONE
+
+#if !defined(CPU_GOT_ONE)
+#error "YOU NEED TO SPECIFY ONE CPU TYPE TO USE THIS FILE"
+#endif
+
+#ifdef TRUE
+#undef TRUE
+#endif
+
+#ifdef FALSE
+#undef FALSE
+#endif
+
+typedef enum boolean_
+{
+ FALSE = 0,
+ TRUE = 1
+} boolean;
+
+/*
+ * Make NULL a pointer within the microkernel environment to catch
+ * pointer semantic miscreants.
+ *
+ * The reason it's conditional here is that some of the BSD includes
+ * define it multiple times as a straight integer and GCC barfs on
+ * the alternative prototypes.
+ */
+
+#ifndef NULL
+#define NULL (void *)0
+#endif
+
+/*
+ * Define some standard sized types. (Defined in cpu-specific type files
+ * included above.)
+ */
+
+#define MAX_U8 255
+#define MAX_S8 128
+#define MIN_S8 -127
+
+#define MAX_U16 0xffff
+#define MIN_S16 ((int16_t)(1 << 15))
+#define MAX_S16 ((int16_t)~MIN_S16)
+
+#define MAX_U32 0xffffffff
+#define MIN_S32 ((int32_t)(1 << 31))
+#define MAX_S32 ((int32_t)~MIN_S32)
+
+#define MAX_U64 ((u_int64_t)0 - 1)
+#define MAX_S64 ((int64_t)(MAX_U64 >> 1))
+#define MIN_S64 (-MAX_S64-1)
+
+/*
+ * Solaris uses _SIZE_T to mark the fact that "size_t" has already
+ * been defined. _SYS_TYPES_H_ is used by BSD.
+ *
+ */
+#if !defined(_SYS_TYPES_H_) && !defined(_SIZE_T)
+typedef UNSIGNED_32 size_t;
+#define _SIZE_T
+#endif
+
+#if !defined(_SYS_TYPES_H_)
+typedef char * caddr_t;
+
+typedef UNSIGNED_8 u_int8_t;
+typedef SIGNED_8 int8_t;
+
+typedef UNSIGNED_16 u_int16_t;
+typedef SIGNED_16 int16_t;
+
+typedef UNSIGNED_32 u_int32_t;
+typedef SIGNED_32 int32_t;
+
+typedef UNSIGNED_64 u_int64_t;
+typedef SIGNED_64 int64_t;
+
+typedef UNSIGNED_32 u_long;
+typedef UNSIGNED_16 u_short;
+typedef UNSIGNED_8 u_char;
+
+
+/*
+ * Define the standard terminology used in the diag software
+ * with regards to bytes, words, etc.
+ * BYTE = 8 bits
+ * HWORD (halfword) = 2 bytes or 16 bits
+ * WORD = 4 bytes or 32 bits
+ * QUAD = 8 bytes or 64 bits
+ *
+ * (The term QUAD seems less-than-intuitive here, but it is
+ * derived from BSD sources where it is defined as int64_t.)
+ *
+ * For consistency use the following defines wherever appropriate.
+ */
+
+typedef enum {
+ NBI_BYTE = (sizeof(u_int8_t) * 8),
+ NBI_HWORD = (sizeof(u_int16_t) * 8),
+ NBI_WORD = (sizeof(u_int32_t) * 8),
+ NBI_QUAD = (sizeof(u_int64_t) * 8)
+} num_bits_t;
+
+typedef enum {
+ NBY_BYTE = sizeof(u_int8_t),
+ NBY_HWORD = sizeof(u_int16_t),
+ NBY_WORD = sizeof(u_int32_t),
+ NBY_QUAD = sizeof(u_int64_t)
+} num_bytes_t;
+
+/*
+ * We assume that pid values are 16 bit integers
+ */
+
+typedef u_int16_t pid_t;
+
+#endif /* _SYS_TYPES_H_ */
+
+typedef UNSIGNED_32 magic_t;
+typedef int status_t;
+
+#define BITS_IN_BYTE 8
+
+/*
+ * Packed definition. We use this for fields in network frames where we
+ * don't want the compiler to pack out to even alignment
+ */
+
+#ifdef PACKED
+#undef PACKED
+#endif
+#define PACKED(x) x __attribute__ ((packed))
+
+/*
+ * __unused is a FreeBSDism that prevents the compiler from choking
+ * on function parameters that remain unused through the life of a
+ * function. This is not an issue for the Cygnus toolchain. In general
+ * it SHOULD NOT BE USED in the martini embedded software repository.
+ * It should only be used inside of shared code.
+ */
+#ifndef __unused
+#define __unused __attribute__ ((__unused__))
+#endif
+
+/*
+ * Basic memory multiples
+ */
+
+#define SIZE_1K 0x00000400
+#define SIZE_2K 0x00000800
+#define SIZE_4K 0x00001000
+#define SIZE_8K 0x00002000
+#define SIZE_16K 0x00004000
+#define SIZE_32K 0x00008000
+#define SIZE_64K 0x00010000
+#define SIZE_128K 0x00020000
+#define SIZE_256K 0x00040000
+#define SIZE_512K 0x00080000
+#define SIZE_1M 0x00100000
+#define SIZE_2M 0x00200000
+#define SIZE_4M 0x00400000
+#define SIZE_8M 0x00800000
+#define SIZE_16M 0x01000000
+#define SIZE_32M 0x02000000
+#define SIZE_64M 0x04000000
+#define SIZE_128M 0x08000000
+#define SIZE_256M 0x10000000
+#define SIZE_512M 0x20000000
+#define SIZE_1G 0x40000000
+#define SIZE_2G 0x80000000
+
+/*
+ * swap16_inline
+ * swap32_inline
+ *
+ * Byteswap a 16 and 32 bit quantities
+ */
+
+static inline u_int16_t
+swap16_inline(u_int16_t data)
+{
+ return(((data & 0x00ff) << 8) |
+ ((data & 0xff00) >> 8));
+}
+
+static inline u_int32_t
+swap32_inline(u_int32_t data)
+{
+ return(((data & 0x000000ff) << 24) |
+ ((data & 0x0000ff00) << 8) |
+ ((data & 0x00ff0000) >> 8) |
+ ((data & 0xff000000) >> 24));
+}
+
+/*
+ * Define errno_t here as it is needed by the rom and ukernel
+ */
+typedef u_int32_t errno_t;
+
+#define EOK 0
+
+/*
+ * Define the main communication structure used for passing
+ * information from the rom to the ukernel (done here as it is
+ * used by them both)
+ */
+typedef struct rom_info_ rom_info_t;
+
+/*
+ * Typedef the return code from the ukernel to the ROM
+ */
+typedef u_int32_t rom_return_t;
+
+/*
+ * Pull in the relevant global environment header file
+ *
+ * This file is shared by the uKernel and the system simulation effort.
+ */
+#if defined(ENV_UKERN) || defined (ENV_SYS_SIM)
+#include "ukern.h"
+#endif /* ENV_UKERN */
+
+#if defined(ENV_ROM)
+#include "rom.h"
+#endif
+
+#endif /* __DEFS_H__ */
diff --git a/sys/mips/include/elf.h b/sys/mips/include/elf.h
new file mode 100644
index 0000000..fc1035e
--- /dev/null
+++ b/sys/mips/include/elf.h
@@ -0,0 +1,215 @@
+/* $OpenBSD: elf_abi.h,v 1.1 1998/01/28 11:14:41 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1996 Per Fogelstrom
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed under OpenBSD by
+ * Per Fogelstrom.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * JNPR: elf.h,v 1.4 2006/12/02 09:53:40 katta
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _MACHINE_ELF_H_
+#define _MACHINE_ELF_H_
+
+/* Information taken from MIPS ABI supplemental */
+
+#include <sys/elf32.h> /* Definitions common to all 32 bit architectures. */
+
+#define __ELF_WORD_SIZE 32 /* Used by <sys/elf_generic.h> */
+#include <sys/elf_generic.h>
+
+#define ELF_ARCH EM_MIPS
+#define ELF_MACHINE_OK(x) ((x) == EM_MIPS || (x) == EM_MIPS_RS4_BE)
+
+/* Architecture dependent Segment types - p_type */
+#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */
+
+/* Architecture dependent d_tag field for Elf32_Dyn. */
+#define DT_MIPS_RLD_VERSION 0x70000001 /* Runtime Linker Interface ID */
+#define DT_MIPS_TIME_STAMP 0x70000002 /* Timestamp */
+#define DT_MIPS_ICHECKSUM 0x70000003 /* Cksum of ext str and com sizes */
+#define DT_MIPS_IVERSION 0x70000004 /* Version string (string tbl index) */
+#define DT_MIPS_FLAGS 0x70000005 /* Flags */
+#define DT_MIPS_BASE_ADDRESS 0x70000006 /* Segment base address */
+#define DT_MIPS_CONFLICT 0x70000008 /* Adr of .conflict section */
+#define DT_MIPS_LIBLIST 0x70000009 /* Address of .liblist section */
+#define DT_MIPS_LOCAL_GOTNO 0x7000000a /* Number of local .GOT entries */
+#define DT_MIPS_CONFLICTNO 0x7000000b /* Number of .conflict entries */
+#define DT_MIPS_LIBLISTNO 0x70000010 /* Number of .liblist entries */
+#define DT_MIPS_SYMTABNO 0x70000011 /* Number of .dynsym entries */
+#define DT_MIPS_UNREFEXTNO 0x70000012 /* First external DYNSYM */
+#define DT_MIPS_GOTSYM 0x70000013 /* First GOT entry in .dynsym */
+#define DT_MIPS_HIPAGENO 0x70000014 /* Number of GOT page table entries */
+#define DT_MIPS_RLD_MAP 0x70000016 /* Address of debug map pointer */
+
+#define DT_PROCNUM (DT_MIPS_RLD_MAP - DT_LOPROC + 1)
+
+/*
+ * Legal values for e_flags field of Elf32_Ehdr.
+ */
+#define EF_MIPS_NOREORDER 1 /* .noreorder was used */
+#define EF_MIPS_PIC 2 /* Contains PIC code */
+#define EF_MIPS_CPIC 4 /* Uses PIC calling sequence */
+#define EF_MIPS_ARCH 0xf0000000 /* MIPS architecture level */
+
+/*
+ * Mips special sections.
+ */
+#define SHN_MIPS_ACOMMON 0xff00 /* Allocated common symbols */
+#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */
+#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */
+
+/*
+ * Legal values for sh_type field of Elf32_Shdr.
+ */
+#define SHT_MIPS_LIBLIST 0x70000000 /* Shared objects used in link */
+#define SHT_MIPS_CONFLICT 0x70000002 /* Conflicting symbols */
+#define SHT_MIPS_GPTAB 0x70000003 /* Global data area sizes */
+#define SHT_MIPS_UCODE 0x70000004 /* Reserved for SGI/MIPS compilers */
+#define SHT_MIPS_DEBUG 0x70000005 /* MIPS ECOFF debugging information */
+#define SHT_MIPS_REGINFO 0x70000006 /* Register usage information */
+
+/*
+ * Legal values for sh_flags field of Elf32_Shdr.
+ */
+#define SHF_MIPS_GPREL 0x10000000 /* Must be part of global data area */
+
+/*
+ * Entries found in sections of type SHT_MIPS_GPTAB.
+ */
+typedef union {
+ struct {
+ Elf32_Word gt_current_g_value; /* -G val used in compilation */
+ Elf32_Word gt_unused; /* Not used */
+ } gt_header; /* First entry in section */
+ struct {
+ Elf32_Word gt_g_value; /* If this val were used for -G */
+ Elf32_Word gt_bytes; /* This many bytes would be used */
+ } gt_entry; /* Subsequent entries in section */
+} Elf32_gptab;
+
+/*
+ * Entry found in sections of type SHT_MIPS_REGINFO.
+ */
+typedef struct {
+ Elf32_Word ri_gprmask; /* General registers used */
+ Elf32_Word ri_cprmask[4]; /* Coprocessor registers used */
+ Elf32_Sword ri_gp_value; /* $gp register value */
+} Elf32_RegInfo;
+
+
+/*
+ * Mips relocations.
+ */
+
+#define R_MIPS_NONE 0 /* No reloc */
+#define R_MIPS_16 1 /* Direct 16 bit */
+#define R_MIPS_32 2 /* Direct 32 bit */
+#define R_MIPS_REL32 3 /* PC relative 32 bit */
+#define R_MIPS_26 4 /* Direct 26 bit shifted */
+#define R_MIPS_HI16 5 /* High 16 bit */
+#define R_MIPS_LO16 6 /* Low 16 bit */
+#define R_MIPS_GPREL16 7 /* GP relative 16 bit */
+#define R_MIPS_LITERAL 8 /* 16 bit literal entry */
+#define R_MIPS_GOT16 9 /* 16 bit GOT entry */
+#define R_MIPS_PC16 10 /* PC relative 16 bit */
+#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */
+#define R_MIPS_GPREL32 12 /* GP relative 32 bit */
+#define R_MIPS_GOTHI16 21 /* GOT HI 16 bit */
+#define R_MIPS_GOTLO16 22 /* GOT LO 16 bit */
+#define R_MIPS_CALLHI16 30 /* upper 16 bit GOT entry for function */
+#define R_MIPS_CALLLO16 31 /* lower 16 bit GOT entry for function */
+
+#define R_TYPE(name) __CONCAT(R_MIPS_,name)
+
+/* Define "machine" characteristics */
+#define ELF_TARG_CLASS ELFCLASS32
+#ifdef __MIPSEB__
+#define ELF_TARG_DATA ELFDATA2MSB
+#else
+#define ELF_TARG_DATA ELFDATA2LSB
+#endif
+#define ELF_TARG_MACH EM_MIPS
+#define ELF_TARG_VER 1
+
+
+/*
+ * Auxiliary vector entries for passing information to the interpreter.
+ *
+ * The i386 supplement to the SVR4 ABI specification names this "auxv_t",
+ * but POSIX lays claim to all symbols ending with "_t".
+ */
+
+typedef struct { /* Auxiliary vector entry on initial stack */
+ int a_type; /* Entry type. */
+ union {
+ long a_val; /* Integer value. */
+ void *a_ptr; /* Address. */
+ void (*a_fcn)(void); /* Function pointer (not used). */
+ } a_un;
+} Elf32_Auxinfo;
+
+__ElfType(Auxinfo);
+
+/* Values for a_type. */
+#define AT_NULL 0 /* Terminates the vector. */
+#define AT_IGNORE 1 /* Ignored entry. */
+#define AT_EXECFD 2 /* File descriptor of program to load. */
+#define AT_PHDR 3 /* Program header of program already loaded. */
+#define AT_PHENT 4 /* Size of each program header entry. */
+#define AT_PHNUM 5 /* Number of program header entries. */
+#define AT_PAGESZ 6 /* Page size in bytes. */
+#define AT_BASE 7 /* Interpreter's base address. */
+#define AT_FLAGS 8 /* Flags (unused for i386). */
+#define AT_ENTRY 9 /* Where interpreter should transfer control. */
+
+/*
+ * The following non-standard values are used for passing information
+ * from John Polstra's testbed program to the dynamic linker. These
+ * are expected to go away soon.
+ *
+ * Unfortunately, these overlap the Linux non-standard values, so they
+ * must not be used in the same context.
+ */
+#define AT_BRK 10 /* Starting point for sbrk and brk. */
+#define AT_DEBUG 11 /* Debugging level. */
+
+/*
+ * The following non-standard values are used in Linux ELF binaries.
+ */
+#define AT_NOTELF 10 /* Program is not ELF ?? */
+#define AT_UID 11 /* Real uid. */
+#define AT_EUID 12 /* Effective uid. */
+#define AT_GID 13 /* Real gid. */
+#define AT_EGID 14 /* Effective gid. */
+
+#define AT_COUNT 15 /* Count of defined aux entry types. */
+
+#endif /* !_MACHINE_ELF_H_ */
diff --git a/sys/mips/include/endian.h b/sys/mips/include/endian.h
new file mode 100644
index 0000000..1d2b4fe
--- /dev/null
+++ b/sys/mips/include/endian.h
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 1987, 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)endian.h 7.8 (Berkeley) 4/3/91
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ENDIAN_H_
+#define _MACHINE_ENDIAN_H_
+
+#include <sys/cdefs.h>
+#ifndef __ASSEMBLER__
+#include <sys/_types.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define _LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define _BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */
+#define _PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */
+
+#ifdef __MIPSEB__
+#define _BYTE_ORDER _BIG_ENDIAN
+#else
+#define _BYTE_ORDER _LITTLE_ENDIAN
+#endif /* __MIBSEB__ */
+
+/*
+ * Deprecated variants that don't have enough underscores to be useful in more
+ * strict namespaces.
+ */
+#if __BSD_VISIBLE
+#define LITTLE_ENDIAN _LITTLE_ENDIAN
+#define BIG_ENDIAN _BIG_ENDIAN
+#define PDP_ENDIAN _PDP_ENDIAN
+#define BYTE_ORDER _BYTE_ORDER
+#endif
+
+#ifndef __ASSEMBLER__
+#if defined(__GNUCLIKE_BUILTIN_CONSTANT_P) && defined(__OPTIMIZE__)
+#define __is_constant(x) __builtin_constant_p(x)
+#else
+#define __is_constant(x) 0
+#endif
+
+#define __bswap16_const(x) (((x) >> 8) | (((x) << 8) & 0xff00))
+#define __bswap32_const(x) (((x) >> 24) | (((x) >> 8) & 0xff00) | \
+ (((x) << 8) & 0xff0000) | (((x) << 24) & 0xff000000))
+#define __bswap64_const(x) (((x) >> 56) | (((x) >> 40) & 0xff00) | \
+ (((x) >> 24) & 0xff0000) | (((x) >> 8) & 0xff000000) | \
+ (((x) << 8) & ((__uint64_t)0xff << 32)) | \
+ (((x) << 24) & ((__uint64_t)0xff << 40)) | \
+ (((x) << 40) & ((__uint64_t)0xff << 48)) | (((x) << 56)))
+
+static __inline __uint16_t
+__bswap16_var(__uint16_t _x)
+{
+
+ return ((_x >> 8) | ((_x << 8) & 0xff00));
+}
+
+static __inline __uint32_t
+__bswap32_var(__uint32_t _x)
+{
+
+ return ((_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
+ ((_x << 24) & 0xff000000));
+}
+
+static __inline __uint64_t
+__bswap64_var(__uint64_t _x)
+{
+
+ return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
+ ((_x >> 8) & 0xff000000) | ((_x << 8) & ((__uint64_t)0xff << 32)) |
+ ((_x << 24) & ((__uint64_t)0xff << 40)) |
+ ((_x << 40) & ((__uint64_t)0xff << 48)) | ((_x << 56)));
+}
+
+#define __bswap16(x) (__uint16_t)(__is_constant(x) ? __bswap16_const(x) : \
+ __bswap16_var(x))
+#define __bswap32(x) (__uint32_t)(__is_constant(x) ? __bswap32_const(x) : \
+ __bswap32_var(x))
+#define __bswap64(x) (__uint64_t)(__is_constant(x) ? __bswap64_const(x) : \
+ __bswap64_var(x))
+
+#ifdef __MIPSEB__
+#define __htonl(x) ((__uint32_t)(x))
+#define __htons(x) ((__uint16_t)(x))
+#define __ntohl(x) ((__uint32_t)(x))
+#define __ntohs(x) ((__uint16_t)(x))
+/*
+ * Define the order of 32-bit words in 64-bit words.
+ */
+/*
+ * XXXMIPS: Additional parentheses to make gcc more happy.
+ */
+#define _QUAD_HIGHWORD 0
+#define _QUAD_LOWWORD 1
+#else
+#define _QUAD_HIGHWORD 1
+#define _QUAD_LOWWORD 0
+#define __ntohl(x) (__bswap32((x)))
+#define __ntohs(x) (__bswap16((x)))
+#define __htonl(x) (__bswap32((x)))
+#define __htons(x) (__bswap16((x)))
+#endif /* _MIPSEB */
+
+#endif /* _ASSEMBLER_ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_MACHINE_ENDIAN_H_ */
diff --git a/sys/mips/include/exec.h b/sys/mips/include/exec.h
new file mode 100644
index 0000000..4650090
--- /dev/null
+++ b/sys/mips/include/exec.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)exec.h 8.1 (Berkeley) 6/11/93
+ * from: src/sys/i386/include/exec.h,v 1.8 1999/08/28 00:44:11 peter
+ * JNPR: exec.h,v 1.3 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_EXEC_H_
+#define _MACHINE_EXEC_H_
+
+#define __LDPGSZ 4096
+
+#endif /* !_MACHINE_EXEC_H_ */
diff --git a/sys/mips/include/float.h b/sys/mips/include/float.h
new file mode 100644
index 0000000..407945d
--- /dev/null
+++ b/sys/mips/include/float.h
@@ -0,0 +1,81 @@
+/*-
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)float.h 7.1 (Berkeley) 5/8/90
+ * from: src/sys/i386/include/float.h,v 1.8 1999/08/28 00:44:11 peter
+ * JNPR: float.h,v 1.4 2006/12/02 09:53:41 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FLOAT_H_
+#define _MACHINE_FLOAT_H_ 1
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern int __flt_rounds(void);
+__END_DECLS
+
+#define FLT_RADIX 2 /* b */
+#ifdef SOFTFLOAT
+#define FLT_ROUNDS -1
+#else
+#define FLT_ROUNDS __flt_rounds() /* FP addition rounds to nearest */
+#endif
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP (-125) /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP (-1021)
+#define DBL_MIN 2.2250738585072014E-308
+#define DBL_MIN_10_EXP (-307)
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MAX_10_EXP 308
+
+#define LDBL_MANT_DIG DBL_MANT_DIG
+#define LDBL_EPSILON DBL_EPSILON
+#define LDBL_DIG DBL_DIG
+#define LDBL_MIN_EXP DBL_MIN_EXP
+#define LDBL_MIN DBL_MIN
+#define LDBL_MIN_10_EXP DBL_MIN_10_EXP
+#define LDBL_MAX_EXP DBL_MAX_EXP
+#define LDBL_MAX DBL_MAX
+#define LDBL_MAX_10_EXP DBL_MAX_10_EXP
+
+#endif /* _MACHINE_FLOAT_H_ */
diff --git a/sys/mips/include/floatingpoint.h b/sys/mips/include/floatingpoint.h
new file mode 100644
index 0000000..cda9e46
--- /dev/null
+++ b/sys/mips/include/floatingpoint.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 1993 Andrew Moore, Talke Studio
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#) floatingpoint.h 1.0 (Berkeley) 9/23/93
+ * $FreeBSD$
+ */
+
+#ifndef _FLOATINGPOINT_H_
+#define _FLOATINGPOINT_H_
+
+#include <sys/cdefs.h>
+#include <machine/ieeefp.h>
+
+#endif /* !_FLOATINGPOINT_H_ */
diff --git a/sys/mips/include/fpu.h b/sys/mips/include/fpu.h
new file mode 100644
index 0000000..88932ed
--- /dev/null
+++ b/sys/mips/include/fpu.h
@@ -0,0 +1,109 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)npx.h 5.3 (Berkeley) 1/18/91
+ * $FreeBSD$
+ */
+
+/*
+ * Floating Point Data Structures and Constants
+ * W. Jolitz 1/90
+ */
+
+#ifndef _MACHINE_FPU_H_
+#define _MACHINE_FPU_H_
+
+/* Contents of each x87 floating point accumulator */
+struct fpacc87 {
+ u_char fp_bytes[10];
+};
+
+/* Contents of each SSE extended accumulator */
+struct xmmacc {
+ u_char xmm_bytes[16];
+};
+
+struct envxmm {
+ u_int16_t en_cw; /* control word (16bits) */
+ u_int16_t en_sw; /* status word (16bits) */
+ u_int8_t en_tw; /* tag word (8bits) */
+ u_int8_t en_zero;
+ u_int16_t en_opcode; /* opcode last executed (11 bits ) */
+ u_int64_t en_rip; /* floating point instruction pointer */
+ u_int64_t en_rdp; /* floating operand pointer */
+ u_int32_t en_mxcsr; /* SSE sontorol/status register */
+ u_int32_t en_mxcsr_mask; /* valid bits in mxcsr */
+};
+
+struct savefpu {
+ struct envxmm sv_env;
+ struct {
+ struct fpacc87 fp_acc;
+ u_char fp_pad[6]; /* padding */
+ } sv_fp[8];
+ struct xmmacc sv_xmm[16];
+ u_char sv_pad[96];
+} __aligned(16);
+
+/*
+ * The hardware default control word for i387's and later coprocessors is
+ * 0x37F, giving:
+ *
+ * round to nearest
+ * 64-bit precision
+ * all exceptions masked.
+ *
+ * FreeBSD/i386 uses 53 bit precision for things like fadd/fsub/fsqrt etc
+ * because of the difference between memory and fpu register stack arguments.
+ * If its using an intermediate fpu register, it has 80/64 bits to work
+ * with. If it uses memory, it has 64/53 bits to work with. However,
+ * gcc is aware of this and goes to a fair bit of trouble to make the
+ * best use of it.
+ *
+ * This is mostly academic for AMD64, because the ABI prefers the use
+ * SSE2 based math. For FreeBSD/amd64, we go with the default settings.
+ */
+#define __INITIAL_FPUCW__ 0x037F
+#define __INITIAL_MXCSR__ 0x1F80
+#define __INITIAL_MXCSR_MASK__ 0xFFBF
+
+#ifdef _KERNEL
+int fpudna(void);
+void fpudrop(void);
+void fpuexit(struct thread *td);
+int fpuformat(void);
+int fpugetregs(struct thread *td, struct savefpu *addr);
+void fpuinit(void);
+void fpusetregs(struct thread *td, struct savefpu *addr);
+int fputrap(void);
+#endif
+
+#endif /* !_MACHINE_FPU_H_ */
diff --git a/sys/mips/include/frame.h b/sys/mips/include/frame.h
new file mode 100644
index 0000000..43661a8
--- /dev/null
+++ b/sys/mips/include/frame.h
@@ -0,0 +1,138 @@
+/* $OpenBSD: frame.h,v 1.3 1998/09/15 10:50:12 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed under OpenBSD by
+ * Per Fogelstrom, Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * JNPR: frame.h,v 1.6.2.1 2007/09/10 08:14:57 girish
+ * $FreeBSD$
+ *
+ */
+#ifndef _MACHINE_FRAME_H_
+#define _MACHINE_FRAME_H_
+
+struct trapframe {
+ register_t zero;
+ register_t ast;
+ register_t v0;
+ register_t v1;
+ register_t a0;
+ register_t a1;
+ register_t a2;
+ register_t a3;
+ register_t t0;
+ register_t t1;
+ register_t t2;
+ register_t t3;
+ register_t t4;
+ register_t t5;
+ register_t t6;
+ register_t t7;
+ register_t s0;
+ register_t s1;
+ register_t s2;
+ register_t s3;
+ register_t s4;
+ register_t s5;
+ register_t s6;
+ register_t s7;
+ register_t t8;
+ register_t t9;
+ register_t k0;
+ register_t k1;
+ register_t gp;
+ register_t sp;
+ register_t s8;
+ register_t ra;
+ register_t sr;
+ register_t mullo;
+ register_t mulhi;
+ register_t badvaddr;
+ register_t cause;
+ register_t pc;
+ /*
+ * FREEBSD_DEVELOPERS_FIXME:
+ * Include any other registers which are CPU-Specific and
+ * need to be part of the frame here.
+ *
+ * Also, be sure this matches what is defined in regnum.h
+ */
+ register_t ic; /* RM7k and RM9k specific */
+ register_t dummy; /* Alignment for 32-bit case */
+
+/* From here and on, only saved user processes. */
+
+ f_register_t f0;
+ f_register_t f1;
+ f_register_t f2;
+ f_register_t f3;
+ f_register_t f4;
+ f_register_t f5;
+ f_register_t f6;
+ f_register_t f7;
+ f_register_t f8;
+ f_register_t f9;
+ f_register_t f10;
+ f_register_t f11;
+ f_register_t f12;
+ f_register_t f13;
+ f_register_t f14;
+ f_register_t f15;
+ f_register_t f16;
+ f_register_t f17;
+ f_register_t f18;
+ f_register_t f19;
+ f_register_t f20;
+ f_register_t f21;
+ f_register_t f22;
+ f_register_t f23;
+ f_register_t f24;
+ f_register_t f25;
+ f_register_t f26;
+ f_register_t f27;
+ f_register_t f28;
+ f_register_t f29;
+ f_register_t f30;
+ f_register_t f31;
+ register_t fsr;
+ register_t fdummy;
+ /*
+ * COP2 registers may need to be saved here based on the CPU, and those
+ * might need to be per process, or even for the kernel, so we need
+ * some thought here.
+ */
+};
+
+/* REVISIT */
+struct frame *get_current_fp __P((void));
+#define get_next_fp(fp) (0)
+#define get_return_ptr(fp) (0)
+void get_stack_trace(u_int32_t depth, u_int32_t *trace);
+
+#endif /* !_MACHINE_FRAME_H_ */
diff --git a/sys/mips/include/gdb_machdep.h b/sys/mips/include/gdb_machdep.h
new file mode 100644
index 0000000..a76a824
--- /dev/null
+++ b/sys/mips/include/gdb_machdep.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: src/sys/alpha/include/gdb_machdep.h,v 1.3 2005/01/05 20:05:50 imp
+ * JNPR: gdb_machdep.h,v 1.1 2007/08/09 12:25:25 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_GDB_MACHDEP_H_
+#define _MACHINE_GDB_MACHDEP_H_
+
+#define GDB_BUFSZ 600
+#define GDB_NREGS 90
+#define GDB_REG_PC 37
+
+static __inline size_t
+gdb_cpu_regsz(int regnum)
+{
+
+ return (sizeof(long));
+}
+
+static __inline int
+gdb_cpu_query(void)
+{
+
+ return (0);
+}
+
+void *gdb_cpu_getreg(int, size_t *);
+void gdb_cpu_setreg(int, void *);
+int gdb_cpu_signal(int, int);
+
+#endif /* !_MACHINE_GDB_MACHDEP_H_ */
diff --git a/sys/mips/include/hwfunc.h b/sys/mips/include/hwfunc.h
new file mode 100644
index 0000000..ef5088c
--- /dev/null
+++ b/sys/mips/include/hwfunc.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 2003-2004 Juli Mallett. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_HWFUNC_H_
+#define _MACHINE_HWFUNC_H_
+
+struct trapframe;
+
+/*
+ * Hooks downward into hardware functionality.
+ */
+
+void platform_halt(void);
+void platform_intr(struct trapframe *);
+void platform_reset(void);
+void platform_start(__register_t, __register_t, __register_t, __register_t);
+
+#endif /* !_MACHINE_HWFUNC_H_ */
diff --git a/sys/mips/include/ieee.h b/sys/mips/include/ieee.h
new file mode 100644
index 0000000..a7411dd
--- /dev/null
+++ b/sys/mips/include/ieee.h
@@ -0,0 +1,154 @@
+/* $NetBSD: ieee754.h,v 1.4 2003/10/27 02:30:26 simonb Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ieee.h 8.1 (Berkeley) 6/11/93
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * NOTICE: This is not a standalone file. To use it, #include it in
+ * your port's ieee.h header.
+ */
+
+#include <machine/endian.h>
+
+/*
+ * <sys/ieee754.h> defines the layout of IEEE 754 floating point types.
+ * Only single-precision and double-precision types are defined here;
+ * extended types, if available, are defined in the machine-dependent
+ * header.
+ */
+
+/*
+ * Define the number of bits in each fraction and exponent.
+ *
+ * k k+1
+ * Note that 1.0 x 2 == 0.1 x 2 and that denorms are represented
+ *
+ * (-exp_bias+1)
+ * as fractions that look like 0.fffff x 2 . This means that
+ *
+ * -126
+ * the number 0.10000 x 2 , for instance, is the same as the normalized
+ *
+ * -127 -128
+ * float 1.0 x 2 . Thus, to represent 2 , we need one leading zero
+ *
+ * -129
+ * in the fraction; to represent 2 , we need two, and so on. This
+ *
+ * (-exp_bias-fracbits+1)
+ * implies that the smallest denormalized number is 2
+ *
+ * for whichever format we are talking about: for single precision, for
+ *
+ * -126 -149
+ * instance, we get .00000000000000000000001 x 2 , or 1.0 x 2 , and
+ *
+ * -149 == -127 - 23 + 1.
+ */
+#define SNG_EXPBITS 8
+#define SNG_FRACBITS 23
+
+#define DBL_EXPBITS 11
+#define DBL_FRACBITS 52
+
+struct ieee_single {
+#if _BYTE_ORDER == _BIG_ENDIAN
+ u_int sng_sign:1;
+ u_int sng_exp:8;
+ u_int sng_frac:23;
+#else
+ u_int sng_frac:23;
+ u_int sng_exp:8;
+ u_int sng_sign:1;
+#endif
+};
+
+struct ieee_double {
+#if _BYTE_ORDER == _BIG_ENDIAN
+ u_int dbl_sign:1;
+ u_int dbl_exp:11;
+ u_int dbl_frach:20;
+ u_int dbl_fracl;
+#else
+ u_int dbl_fracl;
+ u_int dbl_frach:20;
+ u_int dbl_exp:11;
+ u_int dbl_sign:1;
+#endif
+};
+
+/*
+ * Floats whose exponent is in [1..INFNAN) (of whatever type) are
+ * `normal'. Floats whose exponent is INFNAN are either Inf or NaN.
+ * Floats whose exponent is zero are either zero (iff all fraction
+ * bits are zero) or subnormal values.
+ *
+ * A NaN is a `signalling NaN' if its QUIETNAN bit is clear in its
+ * high fraction; if the bit is set, it is a `quiet NaN'.
+ */
+#define SNG_EXP_INFNAN 255
+#define DBL_EXP_INFNAN 2047
+
+#if 0
+#define SNG_QUIETNAN (1 << 22)
+#define DBL_QUIETNAN (1 << 19)
+#endif
+
+/*
+ * Exponent biases.
+ */
+#define SNG_EXP_BIAS 127
+#define DBL_EXP_BIAS 1023
+
+/*
+ * Convenience data structures.
+ */
+union ieee_single_u {
+ float sngu_f;
+ struct ieee_single sngu_sng;
+};
+
+union ieee_double_u {
+ double dblu_d;
+ struct ieee_double dblu_dbl;
+};
diff --git a/sys/mips/include/ieeefp.h b/sys/mips/include/ieeefp.h
new file mode 100644
index 0000000..c7d9244
--- /dev/null
+++ b/sys/mips/include/ieeefp.h
@@ -0,0 +1,32 @@
+/* $OpenBSD: ieeefp.h,v 1.2 1999/01/27 04:46:05 imp Exp $ */
+
+/*-
+ * Written by J.T. Conklin, Apr 11, 1995
+ * Public domain.
+ *
+ * JNPR: ieeefp.h,v 1.1 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IEEEFP_H_
+#define _MACHINE_IEEEFP_H_
+
+typedef int fp_except;
+typedef int fp_except_t;
+
+#define FP_X_IMP 0x01 /* imprecise (loss of precision) */
+#define FP_X_UFL 0x02 /* underflow exception */
+#define FP_X_OFL 0x04 /* overflow exception */
+#define FP_X_DZ 0x08 /* divide-by-zero exception */
+#define FP_X_INV 0x10 /* invalid operation exception */
+
+typedef enum {
+ FP_RN=0, /* round to nearest representable number */
+ FP_RZ=1, /* round to zero (truncate) */
+ FP_RP=2, /* round toward positive infinity */
+ FP_RM=3 /* round toward negative infinity */
+} fp_rnd;
+
+typedef fp_rnd fp_rnd_t;
+
+#endif /* !_MACHINE_IEEEFP_H_ */
diff --git a/sys/mips/include/in_cksum.h b/sys/mips/include/in_cksum.h
new file mode 100644
index 0000000..37d88e2
--- /dev/null
+++ b/sys/mips/include/in_cksum.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * from: src/sys/alpha/include/in_cksum.h,v 1.7 2005/03/02 21:33:20 joerg
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#include <sys/cdefs.h>
+
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#ifdef __CC_SUPPORTS___INLINE
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+ int __tmpsum;
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256;
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+#else
+
+#define in_cksum_update(ip) \
+ do { \
+ int __tmpsum; \
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256; \
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16)); \
+ } while(0)
+
+#endif
+
+#ifdef _KERNEL
+u_int in_cksum_hdr(const struct ip *ip);
+u_short in_addword(u_short sum, u_short b);
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif
+
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/sys/mips/include/intr.h b/sys/mips/include/intr.h
new file mode 100644
index 0000000..c406379
--- /dev/null
+++ b/sys/mips/include/intr.h
@@ -0,0 +1,94 @@
+/* $NetBSD: intr.h,v 1.5 1996/05/13 06:11:28 mycroft Exp $ */
+
+/*-
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Charles M. Hannum.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * JNPR: intr.h,v 1.4 2007/08/09 11:23:32 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_INTR_H_
+#define _MACHINE_INTR_H_
+
+/* Interrupt sharing types. */
+#define IST_NONE 0 /* none */
+#define IST_PULSE 1 /* pulsed */
+#define IST_EDGE 2 /* edge-triggered */
+#define IST_LEVEL 3 /* level-triggered */
+
+#ifndef _LOCORE
+
+/*
+ * Index into intrcnt[], which is defined in exceptions.S
+ * Index # = irq # - 1
+ */
+#define INTRCNT_HARDCLOCK 0
+#define INTRCNT_RTC 1
+#define INTRCNT_SIO 2 /* irq 3 */
+#define INTRCNT_PE 3 /* irq 4 */
+#define INTRCNT_PICNIC 4 /* irq 5 */
+
+extern uint32_t idle_mask;
+extern void (*mips_ack_interrupt)(int, uint32_t);
+
+typedef int ih_func_t(void *);
+
+struct intr_event;
+
+struct mips_intr_handler {
+ int (*ih_func) (void *);
+ void *ih_arg;
+ struct intr_event *ih_event;
+ u_int ih_flags;
+ volatile long *ih_count;
+ int ih_level;
+ int ih_irq;
+ void *frame;
+};
+
+extern struct mips_intr_handler intr_handlers[];
+
+typedef void (*mask_fn)(void *);
+
+void mips_mask_irq(void);
+void mips_unmask_irq(void);
+
+struct trapframe;
+void mips_set_intr(int pri, uint32_t mask,
+ uint32_t (*int_hand)(uint32_t, struct trapframe *));
+uint32_t mips_handle_interrupts(uint32_t pending, struct trapframe *cf);
+void intr_enable_source(uintptr_t irq);
+struct trapframe * mips_get_trapframe(void *ih_arg);
+int inthand_add(const char *name, u_int irq, void (*handler)(void *),
+ void *arg, int flags, void **cookiep);
+int inthand_remove(u_int irq, void *cookie);
+void bvif_attach(void);
+
+#endif /* _LOCORE */
+
+#endif /* !_MACHINE_INTR_H_ */
diff --git a/sys/mips/include/intr_machdep.h b/sys/mips/include/intr_machdep.h
new file mode 100644
index 0000000..d5f26d9
--- /dev/null
+++ b/sys/mips/include/intr_machdep.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 2004 Juli Mallett <jmallett@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_INTR_MACHDEP_H_
+#define _MACHINE_INTR_MACHDEP_H_
+
+#define NHARD_IRQS 6
+#define NSOFT_IRQS 2
+
+struct trapframe;
+
+void cpu_establish_hardintr(const char *, int (*)(void*), void (*)(void*),
+ void *, int, int, void **);
+void cpu_establish_softintr(const char *, int (*)(void*), void (*)(void*),
+ void *, int, int, void **);
+void cpu_intr(struct trapframe *);
+
+#endif /* !_MACHINE_INTR_MACHDEP_H_ */
diff --git a/sys/mips/include/iodev.h b/sys/mips/include/iodev.h
new file mode 100644
index 0000000..2273620
--- /dev/null
+++ b/sys/mips/include/iodev.h
@@ -0,0 +1,33 @@
+/*-
+ * Copyright (c) 2004 Mark R V Murray
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define CDEV_MAJOR 2
+#define CDEV_MINOR_IO 14
+
+d_open_t ioopen;
+d_close_t ioclose;
diff --git a/sys/mips/include/kdb.h b/sys/mips/include/kdb.h
new file mode 100644
index 0000000..7be4ecb
--- /dev/null
+++ b/sys/mips/include/kdb.h
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: src/sys/alpha/include/kdb.h,v 1.2 2005/01/05 20:05:50 imp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_KDB_H_
+#define _MACHINE_KDB_H_
+
+#include <machine/frame.h>
+
+static __inline void
+kdb_cpu_clear_singlestep(void)
+{
+}
+
+static __inline void
+kdb_cpu_set_singlestep(void)
+{
+}
+
+static __inline void
+kdb_cpu_trap(int vector, int _)
+{
+}
+
+#endif /* _MACHINE_KDB_H_ */
diff --git a/sys/mips/include/limits.h b/sys/mips/include/limits.h
new file mode 100644
index 0000000..2381553
--- /dev/null
+++ b/sys/mips/include/limits.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
+ * from: src/sys/i386/include/limits.h,v 1.27 2005/03/02 21:33:26 joerg
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_LIMITS_H_
+#define _MACHINE_LIMITS_H_
+
+#include <sys/cdefs.h>
+
+#ifdef __CC_SUPPORTS_WARNING
+#warning "machine/limits.h is deprecated. Include sys/limits.h instead."
+#endif
+
+#include <sys/limits.h>
+
+#endif /* !_MACHINE_LIMITS_H_ */
diff --git a/sys/mips/include/locore.h b/sys/mips/include/locore.h
new file mode 100644
index 0000000..ce60353
--- /dev/null
+++ b/sys/mips/include/locore.h
@@ -0,0 +1,70 @@
+/* $NetBSD: locore.h,v 1.78 2007/10/17 19:55:36 garbled Exp $ */
+
+/*
+ * Copyright 1996 The Board of Trustees of The Leland Stanford
+ * Junior University. All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this
+ * software and its documentation for any purpose and without
+ * fee is hereby granted, provided that the above copyright
+ * notice appear in all copies. Stanford University
+ * makes no representations about the suitability of this
+ * software for any purpose. It is provided "as is" without
+ * express or implied warranty.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Jump table for MIPS cpu locore functions that are implemented
+ * differently on different generations, or instruction-level
+ * archtecture (ISA) level, the Mips family.
+ *
+ * We currently provide support for MIPS I and MIPS III.
+ */
+
+#ifndef _MIPS_LOCORE_H
+#define _MIPS_LOCORE_H
+
+#include <machine/cpufunc.h>
+#include <machine/cpuregs.h>
+#include <machine/frame.h>
+#include <machine/md_var.h>
+
+struct tlb;
+
+/*
+ * CPU identification, from PRID register.
+ */
+typedef int mips_prid_t;
+
+#define MIPS_PRID_REV(x) (((x) >> 0) & 0x00ff)
+#define MIPS_PRID_IMPL(x) (((x) >> 8) & 0x00ff)
+
+/* pre-MIPS32/64 */
+#define MIPS_PRID_RSVD(x) (((x) >> 16) & 0xffff)
+#define MIPS_PRID_REV_MIN(x) ((MIPS_PRID_REV(x) >> 0) & 0x0f)
+#define MIPS_PRID_REV_MAJ(x) ((MIPS_PRID_REV(x) >> 4) & 0x0f)
+
+/* MIPS32/64 */
+#define MIPS_PRID_CID(x) (((x) >> 16) & 0x00ff) /* Company ID */
+#define MIPS_PRID_CID_PREHISTORIC 0x00 /* Not MIPS32/64 */
+#define MIPS_PRID_CID_MTI 0x01 /* MIPS Technologies, Inc. */
+#define MIPS_PRID_CID_BROADCOM 0x02 /* Broadcom */
+#define MIPS_PRID_CID_ALCHEMY 0x03 /* Alchemy Semiconductor */
+#define MIPS_PRID_CID_SIBYTE 0x04 /* SiByte */
+#define MIPS_PRID_CID_SANDCRAFT 0x05 /* SandCraft */
+#define MIPS_PRID_CID_PHILIPS 0x06 /* Philips */
+#define MIPS_PRID_CID_TOSHIBA 0x07 /* Toshiba */
+#define MIPS_PRID_CID_LSI 0x08 /* LSI */
+ /* 0x09 unannounced */
+ /* 0x0a unannounced */
+#define MIPS_PRID_CID_LEXRA 0x0b /* Lexra */
+#define MIPS_PRID_COPTS(x) (((x) >> 24) & 0x00ff) /* Company Options */
+
+#ifdef _KERNEL
+#ifdef __HAVE_MIPS_MACHDEP_CACHE_CONFIG
+void mips_machdep_cache_config(void);
+#endif
+#endif /* _KERNEL */
+#endif /* _MIPS_LOCORE_H */
diff --git a/sys/mips/include/md_var.h b/sys/mips/include/md_var.h
new file mode 100644
index 0000000..3b8d0a7
--- /dev/null
+++ b/sys/mips/include/md_var.h
@@ -0,0 +1,72 @@
+/*-
+ * Copyright (c) 1995 Bruce D. Evans.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: src/sys/i386/include/md_var.h,v 1.35 2000/02/20 20:51:23 bsd
+ * JNPR: md_var.h,v 1.4 2006/10/16 12:30:34 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MD_VAR_H_
+#define _MACHINE_MD_VAR_H_
+
+#include <machine/reg.h>
+
+/*
+ * Miscellaneous machine-dependent declarations.
+ */
+extern int Maxmem;
+extern char sigcode[];
+extern int szsigcode, szosigcode;
+
+extern vm_offset_t kstack0;
+
+void MipsSaveCurFPState(struct thread *);
+void fork_trampoline(void);
+void cpu_swapin(struct proc *);
+u_int MipsEmulateBranch(struct trapframe *, int, int, u_int);
+u_long kvtop(void *addr);
+int is_physical_memory(vm_offset_t addr);
+int is_cacheable_mem(vm_offset_t pa);
+
+#define MIPS_DEBUG 0
+
+#if MIPS_DEBUG
+#define MIPS_DEBUG_PRINT(fmt, args...) printf("%s: " fmt "\n" , __FUNCTION__ , ## args)
+#else
+#define MIPS_DEBUG_PRINT(fmt, args...)
+#endif
+
+void mips_vector_init(void);
+void cpu_identify(void);
+void mips_cpu_init(void);
+void mips_proc0_init(void);
+
+/* Platform call-downs. */
+void platform_identify(void);
+
+#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/mips/include/memdev.h b/sys/mips/include/memdev.h
new file mode 100644
index 0000000..6a7e9ed
--- /dev/null
+++ b/sys/mips/include/memdev.h
@@ -0,0 +1,39 @@
+/*-
+ * Copyright (c) 2004 Mark R V Murray
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: src/sys/alpha/include/memdev.h,v 1.2 2004/08/01 18:51:44 markm
+ * $FreeBSD$
+ */
+
+#define CDEV_MAJOR 2
+#define CDEV_MINOR_MEM 0
+#define CDEV_MINOR_KMEM 1
+
+d_open_t memopen;
+d_read_t memrw;
+#define memioctl (d_ioctl_t *)NULL
+d_mmap_t memmmap;
+
+void dev_mem_md_init(void);
diff --git a/sys/mips/include/metadata.h b/sys/mips/include/metadata.h
new file mode 100644
index 0000000..84e6f87
--- /dev/null
+++ b/sys/mips/include/metadata.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2003 Peter Wemm <peter@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_METADATA_H_
+#define _MACHINE_METADATA_H_
+
+#define MODINFOMD_SMAP 0x1001
+
+#endif /* !_MACHINE_METADATA_H_ */
diff --git a/sys/mips/include/minidump.h b/sys/mips/include/minidump.h
new file mode 100644
index 0000000..b2d75d8
--- /dev/null
+++ b/sys/mips/include/minidump.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright (c) 2006 Peter Wemm
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MINIDUMP_H_
+#define _MACHINE_MINIDUMP_H_ 1
+
+#define MINIDUMP_MAGIC "minidump FreeBSD/mips"
+#define MINIDUMP_VERSION 1
+
+struct minidumphdr {
+ char magic[24];
+ uint32_t version;
+ uint32_t msgbufsize;
+ uint32_t bitmapsize;
+ uint32_t ptesize;
+ uint64_t kernbase;
+ uint64_t dmapbase;
+ uint64_t dmapend;
+};
+
+#endif /* _MACHINE_MINIDUMP_H_ */
diff --git a/sys/mips/include/mips_opcode.h b/sys/mips/include/mips_opcode.h
new file mode 100644
index 0000000..72f281a
--- /dev/null
+++ b/sys/mips/include/mips_opcode.h
@@ -0,0 +1,413 @@
+/* $OpenBSD: mips_opcode.h,v 1.2 1999/01/27 04:46:05 imp Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)mips_opcode.h 8.1 (Berkeley) 6/10/93
+ * JNPR: mips_opcode.h,v 1.1 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MIPS_OPCODE_H_
+#define _MACHINE_MIPS_OPCODE_H_
+
+/*
+ * Define the instruction formats and opcode values for the
+ * MIPS instruction set.
+ */
+#include <machine/endian.h>
+
+/*
+ * Define the instruction formats.
+ */
+typedef union {
+ unsigned word;
+
+#if BYTE_ORDER == BIG_ENDIAN
+ struct {
+ unsigned op: 6;
+ unsigned rs: 5;
+ unsigned rt: 5;
+ unsigned imm: 16;
+ } IType;
+
+ struct {
+ unsigned op: 6;
+ unsigned target: 26;
+ } JType;
+
+ struct {
+ unsigned op: 6;
+ unsigned rs: 5;
+ unsigned rt: 5;
+ unsigned rd: 5;
+ unsigned shamt: 5;
+ unsigned func: 6;
+ } RType;
+
+ struct {
+ unsigned op: 6; /* always '0x11' */
+ unsigned : 1; /* always '1' */
+ unsigned fmt: 4;
+ unsigned ft: 5;
+ unsigned fs: 5;
+ unsigned fd: 5;
+ unsigned func: 6;
+ } FRType;
+#endif
+#if BYTE_ORDER == LITTLE_ENDIAN
+ struct {
+ unsigned imm: 16;
+ unsigned rt: 5;
+ unsigned rs: 5;
+ unsigned op: 6;
+ } IType;
+
+ struct {
+ unsigned target: 26;
+ unsigned op: 6;
+ } JType;
+
+ struct {
+ unsigned func: 6;
+ unsigned shamt: 5;
+ unsigned rd: 5;
+ unsigned rt: 5;
+ unsigned rs: 5;
+ unsigned op: 6;
+ } RType;
+
+ struct {
+ unsigned func: 6;
+ unsigned fd: 5;
+ unsigned fs: 5;
+ unsigned ft: 5;
+ unsigned fmt: 4;
+ unsigned : 1; /* always '1' */
+ unsigned op: 6; /* always '0x11' */
+ } FRType;
+#endif
+} InstFmt;
+
+/* instruction field decoding macros */
+#define MIPS_INST_OPCODE(val) (val >> 26)
+#define MIPS_INST_RS(val) ((val & 0x03e00000) >> 21)
+#define MIPS_INST_RT(val) ((val & 0x001f0000) >> 16)
+#define MIPS_INST_IMM(val) ((val & 0x0000ffff))
+
+#define MIPS_INST_RD(val) ((val & 0x0000f800) >> 11)
+#define MIPS_INST_SA(val) ((val & 0x000007c0) >> 6)
+#define MIPS_INST_FUNC(val) (val & 0x0000003f)
+
+#define MIPS_INST_INDEX(val) (val & 0x03ffffff)
+
+/*
+ * the mips opcode and function table use a 3bit row and 3bit col
+ * number we define the following macro for easy transcribing
+ */
+
+#define MIPS_OPCODE(r, c) (((r & 0x07) << 3) | (c & 0x07))
+
+
+/*
+ * Values for the 'op' field.
+ */
+#define OP_SPECIAL 000
+#define OP_BCOND 001
+#define OP_J 002
+#define OP_JAL 003
+#define OP_BEQ 004
+#define OP_BNE 005
+#define OP_BLEZ 006
+#define OP_BGTZ 007
+
+#define OP_REGIMM OP_BCOND
+
+#define OP_ADDI 010
+#define OP_ADDIU 011
+#define OP_SLTI 012
+#define OP_SLTIU 013
+#define OP_ANDI 014
+#define OP_ORI 015
+#define OP_XORI 016
+#define OP_LUI 017
+
+#define OP_COP0 020
+#define OP_COP1 021
+#define OP_COP2 022
+#define OP_COP3 023
+#define OP_BEQL 024
+#define OP_BNEL 025
+#define OP_BLEZL 026
+#define OP_BGTZL 027
+
+#define OP_COP1X OP_COP3
+
+#define OP_DADDI 030
+#define OP_DADDIU 031
+#define OP_LDL 032
+#define OP_LDR 033
+
+#define OP_LB 040
+#define OP_LH 041
+#define OP_LWL 042
+#define OP_LW 043
+#define OP_LBU 044
+#define OP_LHU 045
+#define OP_LWR 046
+#define OP_LWU 047
+
+#define OP_SB 050
+#define OP_SH 051
+#define OP_SWL 052
+#define OP_SW 053
+#define OP_SDL 054
+#define OP_SDR 055
+#define OP_SWR 056
+#define OP_CACHE 057
+
+#define OP_LL 060
+#define OP_LWC1 061
+#define OP_LWC2 062
+#define OP_LWC3 063
+#define OP_LLD 064
+#define OP_LDC1 065
+#define OP_LDC2 066
+#define OP_LD 067
+
+#define OP_PREF OP_LWC3
+
+#define OP_SC 070
+#define OP_SWC1 071
+#define OP_SWC2 072
+#define OP_SWC3 073
+#define OP_SCD 074
+#define OP_SDC1 075
+#define OP_SDC2 076
+#define OP_SD 077
+
+/*
+ * Values for the 'func' field when 'op' == OP_SPECIAL.
+ */
+#define OP_SLL 000
+#define OP_MOVCI 001
+#define OP_SRL 002
+#define OP_SRA 003
+#define OP_SLLV 004
+#define OP_SRLV 006
+#define OP_SRAV 007
+
+#define OP_F_SLL OP_SLL
+#define OP_F_MOVCI OP_MOVCI
+#define OP_F_SRL OP_SRL
+#define OP_F_SRA OP_SRA
+#define OP_F_SLLV OP_SLLV
+#define OP_F_SRLV OP_SRLV
+#define OP_F_SRAV OP_SRAV
+
+#define OP_JR 010
+#define OP_JALR 011
+#define OP_MOVZ 012
+#define OP_MOVN 013
+#define OP_SYSCALL 014
+#define OP_BREAK 015
+#define OP_SYNC 017
+
+#define OP_F_JR OP_JR
+#define OP_F_JALR OP_JALR
+#define OP_F_MOVZ OP_MOVZ
+#define OP_F_MOVN OP_MOVN
+#define OP_F_SYSCALL OP_SYSCALL
+#define OP_F_BREAK OP_BREAK
+#define OP_F_SYNC OP_SYNC
+
+#define OP_MFHI 020
+#define OP_MTHI 021
+#define OP_MFLO 022
+#define OP_MTLO 023
+#define OP_DSLLV 024
+#define OP_DSRLV 026
+#define OP_DSRAV 027
+
+#define OP_F_MFHI OP_MFHI
+#define OP_F_MTHI OP_MTHI
+#define OP_F_MFLO OP_MFLO
+#define OP_F_MTLO OP_MTLO
+#define OP_F_DSLLV OP_DSLLV
+#define OP_F_DSRLV OP_DSRLV
+#define OP_F_DSRAV OP_DSRAV
+
+#define OP_MULT 030
+#define OP_MULTU 031
+#define OP_DIV 032
+#define OP_DIVU 033
+#define OP_DMULT 034
+#define OP_DMULTU 035
+#define OP_DDIV 036
+#define OP_DDIVU 037
+
+#define OP_F_MULT OP_MULT
+#define OP_F_MULTU OP_MULTU
+#define OP_F_DIV OP_DIV
+#define OP_F_DIVU OP_DIVU
+#define OP_F_DMULT OP_DMULT
+#define OP_F_DMULTU OP_DMULTU
+#define OP_F_DDIV OP_DDIV
+#define OP_F_DDIVU OP_DDIVU
+
+#define OP_ADD 040
+#define OP_ADDU 041
+#define OP_SUB 042
+#define OP_SUBU 043
+#define OP_AND 044
+#define OP_OR 045
+#define OP_XOR 046
+#define OP_NOR 047
+
+#define OP_F_ADD OP_ADD
+#define OP_F_ADDU OP_ADDU
+#define OP_F_SUB OP_SUB
+#define OP_F_SUBU OP_SUBU
+#define OP_F_AND OP_AND
+#define OP_F_OR OP_OR
+#define OP_F_XOR OP_XOR
+#define OP_F_NOR OP_NOR
+
+#define OP_SLT 052
+#define OP_SLTU 053
+#define OP_DADD 054
+#define OP_DADDU 055
+#define OP_DSUB 056
+#define OP_DSUBU 057
+
+#define OP_F_SLT OP_SLT
+#define OP_F_SLTU OP_SLTU
+#define OP_F_DADD OP_DADD
+#define OP_F_DADDU OP_DADDU
+#define OP_F_DSUB OP_DSUB
+#define OP_F_DSUBU OP_DSUBU
+
+#define OP_TGE 060
+#define OP_TGEU 061
+#define OP_TLT 062
+#define OP_TLTU 063
+#define OP_TEQ 064
+#define OP_TNE 066
+
+#define OP_F_TGE OP_TGE
+#define OP_F_TGEU OP_TGEU
+#define OP_F_TLT OP_TLT
+#define OP_F_TLTU OP_TLTU
+#define OP_F_TEQ OP_TEQ
+#define OP_F_TNE OP_TNE
+
+#define OP_DSLL 070
+#define OP_DSRL 072
+#define OP_DSRA 073
+#define OP_DSLL32 074
+#define OP_DSRL32 076
+#define OP_DSRA32 077
+
+#define OP_F_DSLL OP_DSLL
+#define OP_F_DSRL OP_DSRL
+#define OP_F_DSRA OP_DSRA
+#define OP_F_DSLL32 OP_DSLL32
+#define OP_F_DSRL32 OP_DSRL32
+#define OP_F_DSRA32 OP_DSRA32
+
+/*
+ * The REGIMM - register immediate instructions are further
+ * decoded using this table that has 2bit row numbers, hence
+ * a need for a new helper macro.
+ */
+
+#define MIPS_ROP(r, c) ((r & 0x03) << 3) | (c & 0x07)
+
+/*
+ * Values for the 'func' field when 'op' == OP_BCOND.
+ */
+#define OP_BLTZ 000
+#define OP_BGEZ 001
+#define OP_BLTZL 002
+#define OP_BGEZL 003
+
+#define OP_R_BLTZ OP_BLTZ
+#define OP_R_BGEZ OP_BGEZ
+#define OP_R_BLTZL OP_BLTZL
+#define OP_R_BGEZL OP_BGEZL
+
+#define OP_TGEI 010
+#define OP_TGEIU 011
+#define OP_TLTI 012
+#define OP_TLTIU 013
+#define OP_TEQI 014
+#define OP_TNEI 016
+
+#define OP_R_TGEI OP_TGEI
+#define OP_R_TGEIU OP_TGEIU
+#define OP_R_TLTI OP_TLTI
+#define OP_R_TLTIU OP_TLTIU
+#define OP_R_TEQI OP_TEQI
+#define OP_R_TNEI OP_TNEI
+
+#define OP_BLTZAL 020
+#define OP_BGEZAL 021
+#define OP_BLTZALL 022
+#define OP_BGEZALL 023
+
+#define OP_R_BLTZAL OP_BLTZAL
+#define OP_R_BGEZAL OP_BGEZAL
+#define OP_R_BLTZALL OP_BLTZALL
+#define OP_R_BGEZALL OP_BGEZALL
+
+/*
+ * Values for the 'rs' field when 'op' == OP_COPz.
+ */
+#define OP_MF 000
+#define OP_DMF 001
+#define OP_MT 004
+#define OP_DMT 005
+#define OP_BCx 010
+#define OP_BCy 014
+#define OP_CF 002
+#define OP_CT 006
+
+/*
+ * Values for the 'rt' field when 'op' == OP_COPz.
+ */
+#define COPz_BC_TF_MASK 0x01
+#define COPz_BC_TRUE 0x01
+#define COPz_BC_FALSE 0x00
+#define COPz_BCL_TF_MASK 0x02
+#define COPz_BCL_TRUE 0x02
+#define COPz_BCL_FALSE 0x00
+
+#endif /* !_MACHINE_MIPS_OPCODE_H_ */
diff --git a/sys/mips/include/mp_watchdog.h b/sys/mips/include/mp_watchdog.h
new file mode 100644
index 0000000..bcec051
--- /dev/null
+++ b/sys/mips/include/mp_watchdog.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2004 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MP_WATCHDOG_H_
+#define _MACHINE_MP_WATCHDOG_H_
+
+void ap_watchdog(u_int cpuid);
+
+#endif /* !_MACHINE_MP_WATCHDOG_H_ */
diff --git a/sys/mips/include/mutex.h b/sys/mips/include/mutex.h
new file mode 100644
index 0000000..cbc066d
--- /dev/null
+++ b/sys/mips/include/mutex.h
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+/* Empty file */
diff --git a/sys/mips/include/ns16550.h b/sys/mips/include/ns16550.h
new file mode 100644
index 0000000..69c6bd7
--- /dev/null
+++ b/sys/mips/include/ns16550.h
@@ -0,0 +1,194 @@
+/*-
+ * Copyright (c) 1997-2001, 2005, Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * ns16550.h -- NS16550 DUART Device Driver, used on Atlas, SCB and NIC
+ *
+ * Michael Beesley, April 1997
+ * Highly leveraged from the Atlas device driver written by Jim Hayes
+ *
+ * JNPR: ns16550.h,v 1.2.4.1 2007/09/10 07:51:14 girish
+ * $FreeBSD$
+ */
+
+#ifndef __NS16550_H__
+#define __NS16550_H__
+
+/* speed to initialize to during chip tests */
+#define SIO_TEST_SPEED 9600
+
+/* default serial console speed if not set with sysctl or probed from boot */
+#ifndef CONSPEED
+#define CONSPEED 9600
+#endif
+
+/* default serial gdb speed if not set with sysctl or probed from boot */
+#ifndef GDBSPEED
+#define GDBSPEED CONSPEED
+#endif
+
+#define IO_COMSIZE 8 /* 8250, 16x50 com controllers */
+
+/*
+ * NS16550 UART registers
+ */
+
+/* 8250 registers #[0-6]. */
+
+#define IER_ERXRDY 0x1
+#define IER_ETXRDY 0x2
+#define IER_ERLS 0x4
+#define IER_EMSC 0x8
+
+#define IIR_IMASK 0xf
+#define IIR_RXTOUT 0xc
+#define IIR_RLS 0x6
+#define IIR_RXRDY 0x4
+#define IIR_TXRDY 0x2
+#define IIR_NOPEND 0x1
+#define IIR_MLSC 0x0
+#define IIR_FIFO_MASK 0xc0 /* set if FIFOs are enabled */
+
+#define LCR_DLAB 0x80
+#define CFCR_DLAB LCR_DLAB
+#define LCR_EFR_ENABLE 0xbf /* magic to enable EFR on 16650 up */
+#define CFCR_EFR_ENABLE LCR_EFR_ENABLE
+#define LCR_SBREAK 0x40
+#define CFCR_SBREAK LCR_SBREAK
+#define LCR_PZERO 0x30
+#define CFCR_PZERO LCR_PZERO
+#define LCR_PONE 0x20
+#define CFCR_PONE LCR_PONE
+#define LCR_PEVEN 0x10
+#define CFCR_PEVEN LCR_PEVEN
+#define LCR_PODD 0x00
+#define CFCR_PODD LCR_PODD
+#define LCR_PENAB 0x08
+#define CFCR_PENAB LCR_PENAB
+#define LCR_STOPB 0x04
+#define CFCR_STOPB LCR_STOPB
+#define LCR_8BITS 0x03
+#define CFCR_8BITS LCR_8BITS
+#define LCR_7BITS 0x02
+#define CFCR_7BITS LCR_7BITS
+#define LCR_6BITS 0x01
+#define CFCR_6BITS LCR_6BITS
+#define LCR_5BITS 0x00
+#define CFCR_5BITS LCR_5BITS
+
+#define MCR_PRESCALE 0x80 /* only available on 16650 up */
+#define MCR_LOOPBACK 0x10
+#define MCR_IE 0x08
+#define MCR_IENABLE MCR_IE
+#define MCR_DRS 0x04
+#define MCR_RTS 0x02
+#define MCR_DTR 0x01
+
+#define LSR_RCV_FIFO 0x80
+#define LSR_TEMT 0x40
+#define LSR_TSRE LSR_TEMT
+#define LSR_THRE 0x20
+#define LSR_TXRDY LSR_THRE
+#define LSR_BI 0x10
+#define LSR_FE 0x08
+#define LSR_PE 0x04
+#define LSR_OE 0x02
+#define LSR_RXRDY 0x01
+#define LSR_RCV_MASK 0x1f
+
+#define MSR_DCD 0x80
+#define MSR_RI 0x40
+#define MSR_DSR 0x20
+#define MSR_CTS 0x10
+#define MSR_DDCD 0x08
+#define MSR_TERI 0x04
+#define MSR_DDSR 0x02
+#define MSR_DCTS 0x01
+
+#define FCR_ENABLE 0x01
+#define FIFO_ENABLE FCR_ENABLE
+#define FCR_RCV_RST 0x02
+#define FIFO_RCV_RST FCR_RCV_RST
+#define FCR_XMT_RST 0x04
+#define FIFO_XMT_RST FCR_XMT_RST
+#define FCR_DMA 0x08
+#define FIFO_DMA_MODE FCR_DMA
+#define FCR_RX_LOW 0x00
+#define FIFO_RX_LOW FCR_RX_LOW
+#define FCR_RX_MEDL 0x40
+#define FIFO_RX_MEDL FCR_RX_MEDL
+#define FCR_RX_MEDH 0x80
+#define FIFO_RX_MEDH FCR_RX_MEDH
+#define FCR_RX_HIGH 0xc0
+#define FIFO_RX_HIGH FCR_RX_HIGH
+
+/* 16650 registers #2,[4-7]. Access enabled by LCR_EFR_ENABLE. */
+
+#define EFR_CTS 0x80
+#define EFR_AUTOCTS EFR_CTS
+#define EFR_RTS 0x40
+#define EFR_AUTORTS EFR_RTS
+#define EFR_EFE 0x10 /* enhanced functions enable */
+
+#define com_data 0 /* data register (R) */
+#define com_rdata 0 /* data register (R) */
+#define com_tdata 0 /* data register (W) */
+#define com_dlbl 0 /* divisor latch low (W) */
+#define com_dlbh 0x4 /* divisor latch high (W) */
+#define com_ier 0x4 /* interrupt enable (W) */
+#define com_iir 0x8 /* interrupt identification (R) */
+#define com_fifo 0x8 /* FIFO control (W) */
+#define com_lctl 0xc /* line control register (R/W) */
+#define com_cfcr 0xc /* line control register (R/W) */
+#define com_mcr 0x10 /* modem control register (R/W) */
+#define com_lsr 0x14 /* line status register (R/W) */
+#define com_msr 0x18 /* modem status register (R/W) */
+
+#define NS16550_HZ (33300000) /* 33.3 Mhz */
+#define DEFAULT_RCLK (33300000)
+#define NS16550_PAD(x)
+
+/*
+ * ns16550_device: Structure to lay down over the device registers
+ * Note: all accesses are 8-bit reads and writes
+ */
+typedef struct {
+ volatile u_int32_t data; /* data register (R/W) */
+ volatile u_int32_t ier; /* interrupt enable (W) */
+ volatile u_int32_t iir; /* interrupt identification (R) */
+ volatile u_int32_t cfcr; /* line control register (R/W) */
+ volatile u_int32_t mcr; /* modem control register (R/W) */
+ volatile u_int32_t lsr; /* line status register (R/W) */
+ volatile u_int32_t msr; /* modem status register (R/W) */
+ volatile u_int32_t scr; /* scratch register (R/W) */
+} ns16550_device;
+
+
+#define com_lcr com_cfcr
+#define com_efr com_fifo
+
+
+#define NS16550_SYNC __asm __volatile ("sync")
+
+
+#define NS16550_DEVICE (1<<0)
+#define TI16C752B_DEVICE (1<<1)
+#define fifo iir /* 16550 fifo control (W) */
+
+/* 16 bit baud rate divisor (lower byte in dca_data, upper in dca_ier) */
+#define BRTC(x) (NS16550_HZ / (16*(x)))
+
+#define PA_2_K1VA(a) (MIPS_UNCACHED_MEMORY_ADDR | (a))
+
+#ifdef COMBRD
+#undef COMBRD
+#define COMBRD(x) (NS16550_HZ / (16*(x)))
+#endif
+
+void uart_post_init(u_int32_t addr);
+void puts_post(u_int32_t addr, const char *char_p);
+void hexout_post(u_int32_t addr, u_int32_t value, int num_chars);
+
+#endif /* __NS16550_H__ */
+
+/* end of file */
diff --git a/sys/mips/include/param.h b/sys/mips/include/param.h
new file mode 100644
index 0000000..2e8c425
--- /dev/null
+++ b/sys/mips/include/param.h
@@ -0,0 +1,196 @@
+/* $OpenBSD: param.h,v 1.11 1998/08/30 22:05:35 millert Exp $ */
+
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: machparam.h 1.11 89/08/14
+ * from: @(#)param.h 8.1 (Berkeley) 6/10/93
+ * JNPR: param.h,v 1.6.2.1 2007/09/10 07:49:36 girish
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PARAM_H_
+#define _MACHINE_PARAM_H_
+
+#include <sys/cdefs.h>
+#ifdef _KERNEL
+#ifdef _LOCORE
+#include <machine/psl.h>
+#else
+#include <machine/cpu.h>
+#endif
+#endif
+
+#define __PCI_REROUTE_INTERRUPT
+
+#ifndef MACHINE
+#define MACHINE "mips"
+#endif
+#ifndef MACHINE_ARCH
+#define MACHINE_ARCH "mips"
+#endif
+
+/*
+ * OBJFORMAT_NAMES is a comma-separated list of the object formats
+ * that are supported on the architecture.
+ */
+#define OBJFORMAT_NAMES "elf"
+#define OBJFORMAT_DEFAULT "elf"
+
+#define MID_MACHINE 0 /* None but has to be defined */
+
+#ifdef SMP
+#define MAXSMPCPU 16
+#define MAXCPU MAXSMPCPU
+#else
+#define MAXSMPCPU 1
+#define MAXCPU 1
+#endif
+
+/*
+ * Round p (pointer or byte index) up to a correctly-aligned value for all
+ * data types (int, long, ...). The result is u_int and must be cast to
+ * any desired pointer type.
+ */
+#define _ALIGNBYTES 7
+#define _ALIGN(p) (((u_int)(p) + _ALIGNBYTES) &~ _ALIGNBYTES)
+#define ALIGNED_POINTER(p, t) ((((u_int32_t)(p)) & (sizeof (t) - 1)) == 0)
+
+#define ALIGNBYTES _ALIGNBYTES
+#define ALIGN(p) _ALIGN(p)
+
+#define NBPG 4096 /* bytes/page */
+#define PGOFSET (NBPG-1) /* byte offset into page */
+#define PGSHIFT 12 /* LOG2(NBPG) */
+
+#define PAGE_SHIFT 12 /* LOG2(PAGE_SIZE) */
+#define PAGE_SIZE (1<<PAGE_SHIFT) /* bytes/page */
+#define PAGE_MASK (PAGE_SIZE-1)
+#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
+
+#define NBSEG 0x400000 /* bytes/segment */
+#define SEGOFSET (NBSEG-1) /* byte offset into segment */
+#define SEGSHIFT 22 /* LOG2(NBSEG) */
+
+/* XXXimp: This has moved to vmparam.h */
+/* Also, this differs from the mips2 definition, but likely is better */
+/* since this means the kernel won't chew up TLBs when it is executing */
+/* code */
+#define KERNBASE 0x80000000 /* start of kernel virtual */
+#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT)
+
+#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
+#define BLKDEV_IOSIZE 2048
+#define DFLTPHYS (64 * 1024) /* default max raw I/O transfer size */
+
+#define MAXPHYS (128 * 1024) /* max raw I/O transfer size */
+
+#define MAXDUMPPGS 1
+
+#define CLSIZE 1
+#define CLBYTES (CLSIZE * NBPG)
+#define CLSIZELOG2 0
+
+/*
+ * NOTE: In FreeBSD, Uarea's don't have a fixed address.
+ * Therefore, any code imported from OpenBSD which depends on
+ * UADDR, UVPN and KERNELSTACK requires porting.
+ */
+#define KSTACK_PAGES 3 /* kernel stack*/
+#define KSTACK_GUARD_PAGES 0 /* pages of kstack guard; 0 disables */
+
+#define UPAGES 2
+
+/*
+ * Constants related to network buffer management.
+ * MCLBYTES must be no larger than CLBYTES (the software page size), and,
+ * on machines that exchange pages of input or output buffers with mbuf
+ * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple
+ * of the hardware page size.
+ */
+#ifndef MSIZE
+#define MSIZE 256 /* size of an mbuf */
+#endif /* MSIZE */
+
+#ifndef MCLSHIFT
+#define MCLSHIFT 11
+#endif /* MCLSHIFT */
+#define MCLBYTES (1 << MCLSHIFT) /* enough for whole Ethernet packet */
+#define MCLOFSET (MCLBYTES - 1)
+
+/*
+ * Size of kernel malloc arena in CLBYTES-sized logical pages
+ */
+#ifndef NKMEMCLUSTERS
+#define NKMEMCLUSTERS (4096*1024/CLBYTES)
+#endif
+
+/* pages ("clicks") (4096 bytes) to disk blocks */
+#define ctod(x) ((x) << (PGSHIFT - DEV_BSHIFT))
+#define dtoc(x) ((x) >> (PGSHIFT - DEV_BSHIFT))
+
+/* pages to bytes */
+#define ctob(x) ((x) << PGSHIFT)
+#define btoc(x) (((x) + PGOFSET) >> PGSHIFT)
+
+/* bytes to disk blocks */
+#define btodb(x) ((x) >> DEV_BSHIFT)
+#define dbtob(x) ((x) << DEV_BSHIFT)
+
+/*
+ * Map a ``block device block'' to a file system block.
+ * This should be device dependent, and should use the bsize
+ * field from the disk label.
+ * For now though just use DEV_BSIZE.
+ */
+#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE))
+
+/*
+ * Conversion macros
+ */
+#define mips_round_page(x) ((((unsigned)(x)) + NBPG - 1) & ~(NBPG-1))
+#define mips_trunc_page(x) ((unsigned)(x) & ~(NBPG-1))
+#define mips_btop(x) ((unsigned)(x) >> PGSHIFT)
+#define mips_ptob(x) ((unsigned)(x) << PGSHIFT)
+#define round_page mips_round_page
+#define trunc_page mips_trunc_page
+#define atop(x) ((unsigned long)(x) >> PAGE_SHIFT)
+#define ptoa(x) ((unsigned long)(x) << PAGE_SHIFT)
+
+#define pgtok(x) ((x) * (PAGE_SIZE / 1024))
+
+#ifndef _KERNEL
+#define DELAY(n) { register int N = (n); while (--N > 0); }
+#endif /* !_KERNEL */
+
+#endif /* !_MACHINE_PARAM_H_ */
diff --git a/sys/mips/include/pcb.h b/sys/mips/include/pcb.h
new file mode 100644
index 0000000..16d274d
--- /dev/null
+++ b/sys/mips/include/pcb.h
@@ -0,0 +1,82 @@
+/* $OpenBSD: pcb.h,v 1.3 1998/09/15 10:50:12 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: pcb.h 1.13 89/04/23
+ * from: @(#)pcb.h 8.1 (Berkeley) 6/10/93
+ * JNPR: pcb.h,v 1.2 2006/08/07 11:51:17 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCB_H_
+#define _MACHINE_PCB_H_
+
+#include <machine/frame.h>
+
+/*
+ * MIPS process control block
+ */
+struct pcb
+{
+ struct trapframe pcb_regs; /* saved CPU and registers */
+ label_t pcb_context; /* kernel context for resume */
+ int pcb_onfault; /* for copyin/copyout faults */
+};
+
+/* these match the regnum's in regnum.h
+ * used by switch.S
+ */
+#define PCB_REG_S0 0
+#define PCB_REG_S1 1
+#define PCB_REG_S2 2
+#define PCB_REG_S3 3
+#define PCB_REG_S4 4
+#define PCB_REG_S5 5
+#define PCB_REG_S6 6
+#define PCB_REG_S7 7
+#define PCB_REG_SP 8
+#define PCB_REG_S8 9
+#define PCB_REG_RA 10
+#define PCB_REG_SR 11
+#define PCB_REG_GP 12
+
+
+#ifdef _KERNEL
+extern struct pcb *curpcb; /* the current running pcb */
+
+void makectx(struct trapframe *, struct pcb *);
+int savectx(struct pcb *);
+#endif
+
+#endif /* !_MACHINE_PCB_H_ */
diff --git a/sys/mips/include/pcb_ext.h b/sys/mips/include/pcb_ext.h
new file mode 100644
index 0000000..9340db5
--- /dev/null
+++ b/sys/mips/include/pcb_ext.h
@@ -0,0 +1,4 @@
+/*-
+ * EMPTY FILE -- needed?
+ * $FreeBSD$
+ */
diff --git a/sys/mips/include/pci_cfgreg.h b/sys/mips/include/pci_cfgreg.h
new file mode 100644
index 0000000..1df8972
--- /dev/null
+++ b/sys/mips/include/pci_cfgreg.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#define CONF1_ADDR_PORT 0x0cf8
+#define CONF1_DATA_PORT 0x0cfc
+
+#define CONF1_ENABLE 0x80000000ul
+#define CONF1_ENABLE_CHK 0x80000000ul
+#define CONF1_ENABLE_MSK 0x7f000000ul
+#define CONF1_ENABLE_CHK1 0xff000001ul
+#define CONF1_ENABLE_MSK1 0x80000001ul
+#define CONF1_ENABLE_RES1 0x80000000ul
+
+#define CONF2_ENABLE_PORT 0x0cf8
+#define CONF2_FORWARD_PORT 0x0cfa
+#define CONF2_ENABLE_CHK 0x0e
+#define CONF2_ENABLE_RES 0x0e
+
+int pci_cfgregopen(void);
+u_int32_t pci_cfgregread(int bus, int slot, int func, int reg, int bytes);
+void pci_cfgregwrite(int bus, int slot, int func, int reg, u_int32_t data, int bytes);
diff --git a/sys/mips/include/pcpu.h b/sys/mips/include/pcpu.h
new file mode 100644
index 0000000..fb2951b
--- /dev/null
+++ b/sys/mips/include/pcpu.h
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * Copyright (c) Peter Wemm <peter@netplex.com.au>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: src/sys/alpha/include/pcpu.h,v 1.15 2004/11/05 19:16:44 jhb
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCPU_H_
+#define _MACHINE_PCPU_H_
+
+#ifdef _KERNEL
+
+#include <machine/pte.h>
+#define PCPU_MD_FIELDS \
+ pd_entry_t *pc_segbase; /* curthread segbase */ \
+ struct pmap *pc_curpmap; /* pmap of curthread */ \
+ u_int32_t pc_next_asid; /* next ASID to alloc */ \
+ u_int32_t pc_asid_generation; /* current ASID generation */ \
+ u_int pc_pending_ipis; /* the IPIs pending to this CPU */ \
+ void *pc_boot_stack;
+
+#ifdef SMP
+static __inline struct pcpu*
+get_pcpup(void)
+{
+ /*
+ * FREEBSD_DEVELOPERS_FIXME
+ * In multiprocessor case, store/retrieve the pcpu structure
+ * address for current CPU in scratch register for fast access.
+ *
+ * In this routine, read the scratch register to retrieve the PCPU
+ * structure for this CPU
+ */
+ struct pcpu *ret;
+
+ /* ret should contain the pointer to the PCPU structure for this CPU */
+ return(ret);
+}
+
+#define PCPUP ((struct pcpu *)get_pcpup())
+#else
+/* Uni processor systems */
+extern struct pcpu *pcpup;
+#define PCPUP pcpup
+#endif /* SMP */
+
+#define PCPU_ADD(member, value) (PCPUP->pc_ ## member += (value))
+#define PCPU_GET(member) (PCPUP->pc_ ## member)
+#define PCPU_INC(member) PCPU_ADD(member, 1)
+#define PCPU_PTR(member) (&PCPUP->pc_ ## member)
+#define PCPU_SET(member,value) (PCPUP->pc_ ## member = (value))
+#define PCPU_LAZY_INC(member) (++PCPUP->pc_ ## member)
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_PCPU_H_ */
diff --git a/sys/mips/include/pltfm.h b/sys/mips/include/pltfm.h
new file mode 100644
index 0000000..e3f118b
--- /dev/null
+++ b/sys/mips/include/pltfm.h
@@ -0,0 +1,29 @@
+/*-
+ * JNPR: pltfm.h,v 1.5.2.1 2007/09/10 05:56:11 girish
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PLTFM_H_
+#define _MACHINE_PLTFM_H_
+
+/*
+ * This files contains platform-specific definitions.
+ */
+#define SDRAM_ADDR_START 0 /* SDRAM addr space */
+#define SDRAM_ADDR_END (SDRAM_ADDR_START + (1024*0x100000))
+#define SDRAM_MEM_SIZE (SDRAM_ADDR_END - SDRAM_ADDR_START)
+
+#define UART_ADDR_START 0x1ef14000 /* UART */
+#define UART_ADDR_END 0x1ef14fff
+#define UART_MEM_SIZE (UART_ADDR_END-UART_ADDR_START)
+
+/*
+ * NS16550 UART address
+ */
+#ifdef ADDR_NS16550_UART1
+#undef ADDR_NS16550_UART1
+#endif
+#define ADDR_NS16550_UART1 0x1ef14000 /* UART */
+#define VADDR_NS16550_UART1 0xbef14000 /* UART */
+
+#endif /* !_MACHINE_PLTFM_H_ */
diff --git a/sys/mips/include/pmap.h b/sys/mips/include/pmap.h
new file mode 100644
index 0000000..046c514
--- /dev/null
+++ b/sys/mips/include/pmap.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Derived from hp300 version by Mike Hibler, this version by William
+ * Jolitz uses a recursive map [a pde points to the page directory] to
+ * map the page tables using the pagetables themselves. This is done to
+ * reduce the impact on kernel virtual memory for lots of sparse address
+ * space, and to reduce the cost of memory to each process.
+ *
+ * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
+ * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
+ * from: src/sys/i386/include/pmap.h,v 1.65.2.2 2000/11/30 01:54:42 peter
+ * JNPR: pmap.h,v 1.7.2.1 2007/09/10 07:44:12 girish
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
+
+#include <machine/vmparam.h>
+
+/*
+ * Pte related macros
+ */
+#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
+
+#define NKPT 120 /* actual number of kernel page tables */
+
+#ifndef NKPDE
+#define NKPDE 255 /* addressable number of page tables/pde's */
+#endif
+
+#define KPTDI (VM_MIN_KERNEL_ADDRESS >> SEGSHIFT)
+#define NUSERPGTBLS (VM_MAXUSER_ADDRESS >> SEGSHIFT)
+
+#ifndef LOCORE
+
+#include <sys/queue.h>
+#include <machine/pte.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+
+
+/*
+ * Pmap stuff
+ */
+struct pv_entry;
+
+struct md_page {
+ int pv_list_count;
+ int pv_flags;
+ TAILQ_HEAD(, pv_entry)pv_list;
+};
+
+#define PV_TABLE_MOD 0x01 /* modified */
+#define PV_TABLE_REF 0x02 /* referenced */
+
+#define ASID_BITS 8
+#define ASIDGEN_BITS (32 - ASID_BITS)
+#define ASIDGEN_MASK ((1 << ASIDGEN_BITS) - 1)
+
+struct pmap {
+ pd_entry_t *pm_segtab; /* KVA of segment table */
+ TAILQ_HEAD(, pv_entry)pm_pvlist; /* list of mappings in
+ * pmap */
+ int pm_count; /* reference count */
+ int pm_active; /* active on cpus */
+ struct {
+ u_int32_t asid:ASID_BITS; /* TLB address space tag */
+ u_int32_t gen:ASIDGEN_BITS; /* its generation number */
+ } pm_asid[MAXSMPCPU];
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ struct vm_page *pm_ptphint; /* pmap ptp hint */
+ struct mtx pm_mtx;
+};
+
+typedef struct pmap *pmap_t;
+
+#ifdef _KERNEL
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <vm/vm_map.h>
+
+pt_entry_t *pmap_pte(pmap_t, vm_offset_t);
+pd_entry_t pmap_segmap(pmap_t pmap, vm_offset_t va);
+vm_offset_t pmap_kextract(vm_offset_t va);
+extern pmap_t kernel_pmap;
+
+#define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
+
+#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
+#define PMAP_LOCK_ASSERT(pmap, type) mtx_assert(&(pmap)->pm_mtx, (type))
+#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
+#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
+ NULL, MTX_DEF)
+#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
+#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
+#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
+#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
+
+#define PMAP_LGMEM_LOCK_INIT(sysmap) mtx_init(&(sysmap)->lock, "pmap-lgmem", \
+ "per-cpu-map", (MTX_DEF| MTX_DUPOK))
+#define PMAP_LGMEM_LOCK(sysmap) mtx_lock(&(sysmap)->lock)
+#define PMAP_LGMEM_UNLOCK(sysmap) mtx_unlock(&(sysmap)->lock)
+#define PMAP_LGMEM_DESTROY(sysmap) mtx_destroy(&(sysmap)->lock)
+
+#endif /* _KERNEL */
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
+ */
+typedef struct pv_entry {
+ pmap_t pv_pmap; /* pmap where mapping lies */
+ vm_offset_t pv_va; /* virtual address for mapping */
+ TAILQ_ENTRY(pv_entry)pv_list;
+ TAILQ_ENTRY(pv_entry)pv_plist;
+ vm_page_t pv_ptem; /* VM page for pte */
+ boolean_t pv_wired; /* whether this entry is wired */
+} *pv_entry_t;
+
+
+#ifdef _KERNEL
+
+#if defined(DIAGNOSTIC)
+#define PMAP_DIAGNOSTIC
+#endif
+
+#if !defined(PMAP_DIAGNOSTIC)
+#define PMAP_INLINE __inline
+#else
+#define PMAP_INLINE
+#endif
+
+extern vm_offset_t avail_end;
+extern vm_offset_t avail_start;
+extern vm_offset_t clean_eva;
+extern vm_offset_t clean_sva;
+extern vm_offset_t phys_avail[];
+extern char *ptvmmap; /* poor name! */
+extern vm_offset_t virtual_avail;
+extern vm_offset_t virtual_end;
+extern pd_entry_t *segbase;
+
+extern vm_paddr_t mips_wired_tlb_physmem_start;
+extern vm_paddr_t mips_wired_tlb_physmem_end;
+extern u_int need_wired_tlb_page_pool;
+
+#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
+#define pmap_kernel() kernel_pmap
+
+void pmap_bootstrap(void);
+void *pmap_mapdev(vm_offset_t, vm_size_t);
+void pmap_unmapdev(vm_offset_t, vm_size_t);
+vm_offset_t pmap_steal_memory(vm_size_t size);
+void pmap_set_modified(vm_offset_t pa);
+int page_is_managed(vm_offset_t pa);
+void pmap_page_is_free(vm_page_t m);
+void pmap_kushmem_reattach(struct proc *);
+ /* PMAP_INLINE */ void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
+ /* PMAP_INLINE */ void pmap_kremove(vm_offset_t va);
+void *pmap_kenter_temporary(vm_paddr_t pa, int i);
+void pmap_kenter_temporary_free(vm_paddr_t pa);
+int pmap_compute_pages_to_dump(void);
+void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
+
+/*
+ * floating virtual pages (FPAGES)
+ *
+ * These are the reserved virtual memory areas which can be
+ * mapped to any physical memory.
+ */
+#define FPAGES 2
+#define FPAGES_SHARED 2
+#define FSPACE ((FPAGES * MAXCPU + FPAGES_SHARED) * PAGE_SIZE)
+#define PMAP_FPAGE1 0x00 /* Used by pmap_zero_page &
+ * pmap_copy_page */
+#define PMAP_FPAGE2 0x01 /* Used by pmap_copy_page */
+
+#define PMAP_FPAGE3 0x00 /* Used by pmap_zero_page_idle */
+#define PMAP_FPAGE_KENTER_TEMP 0x01 /* Used by coredump */
+
+struct fpage {
+ vm_offset_t kva;
+ u_int state;
+};
+
+struct sysmaps {
+ struct mtx lock;
+ struct fpage fp[FPAGES];
+};
+
+vm_offset_t
+pmap_map_fpage(vm_paddr_t pa, struct fpage *fp,
+ boolean_t check_unmaped);
+void pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp);
+
+#endif /* _KERNEL */
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/mips/include/pmc_mdep.h b/sys/mips/include/pmc_mdep.h
new file mode 100644
index 0000000..46639544
--- /dev/null
+++ b/sys/mips/include/pmc_mdep.h
@@ -0,0 +1,24 @@
+/*-
+ * This file is in the public domain.
+ *
+ * from: src/sys/alpha/include/pmc_mdep.h,v 1.2 2005/06/09 19:45:06 jkoshy
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PMC_MDEP_H_
+#define _MACHINE_PMC_MDEP_H_
+
+union pmc_md_op_pmcallocate {
+ uint64_t __pad[4];
+};
+
+/* Logging */
+#define PMCLOG_READADDR PMCLOG_READ64
+#define PMCLOG_EMITADDR PMCLOG_EMIT64
+
+#if _KERNEL
+union pmc_md_pmc {
+};
+
+#endif
+#endif /* !_MACHINE_PMC_MDEP_H_ */
diff --git a/sys/mips/include/ppireg.h b/sys/mips/include/ppireg.h
new file mode 100644
index 0000000..5774757
--- /dev/null
+++ b/sys/mips/include/ppireg.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (C) 2005 TAKAHASHI Yoshihiro. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PPIREG_H_
+#define _MACHINE_PPIREG_H_
+
+#ifdef _KERNEL
+
+#define IO_PPI 0x61 /* Programmable Peripheral Interface */
+
+/*
+ * PPI speaker control values
+ */
+
+#define PIT_ENABLETMR2 0x01 /* Enable timer/counter 2 */
+#define PIT_SPKRDATA 0x02 /* Direct to speaker */
+
+#define PIT_SPKR (PIT_ENABLETMR2 | PIT_SPKRDATA)
+
+#define ppi_spkr_on() outb(IO_PPI, inb(IO_PPI) | PIT_SPKR)
+#define ppi_spkr_off() outb(IO_PPI, inb(IO_PPI) & ~PIT_SPKR)
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_PPIREG_H_ */
diff --git a/sys/mips/include/proc.h b/sys/mips/include/proc.h
new file mode 100644
index 0000000..6a0ce7d
--- /dev/null
+++ b/sys/mips/include/proc.h
@@ -0,0 +1,71 @@
+/* $OpenBSD: proc.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)proc.h 8.1 (Berkeley) 6/10/93
+ * JNPR: proc.h,v 1.7.2.1 2007/09/10 06:25:24 girish
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PROC_H_
+#define _MACHINE_PROC_H_
+
+/*
+ * Machine-dependent part of the proc structure.
+ */
+struct mdthread {
+ int md_flags; /* machine-dependent flags */
+ int md_upte[KSTACK_PAGES]; /* ptes for mapping u pcb */
+ int md_ss_addr; /* single step address for ptrace */
+ int md_ss_instr; /* single step instruction for ptrace */
+ register_t md_saved_intr;
+ u_int md_spinlock_count;
+/* The following is CPU dependent, but kept in for compatibility */
+ int md_pc_ctrl; /* performance counter control */
+ int md_pc_count; /* performance counter */
+ int md_pc_spill; /* performance counter spill */
+ vm_offset_t md_realstack;
+};
+
+/* md_flags */
+#define MDTD_FPUSED 0x0001 /* Process used the FPU */
+
+struct mdproc {
+ /* empty */
+};
+
+struct thread;
+
+void mips_cpu_switch(struct thread *, struct thread *, struct mtx *);
+void mips_cpu_throw(struct thread *, struct thread *);
+
+#endif /* !_MACHINE_PROC_H_ */
diff --git a/sys/mips/include/profile.h b/sys/mips/include/profile.h
new file mode 100644
index 0000000..9659d1f
--- /dev/null
+++ b/sys/mips/include/profile.h
@@ -0,0 +1,172 @@
+/* $OpenBSD: profile.h,v 1.2 1999/01/27 04:46:05 imp Exp $ */
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)profile.h 8.1 (Berkeley) 6/10/93
+ * JNPR: profile.h,v 1.4 2006/12/02 09:53:41 katta
+ * $FreeBSD$
+ */
+#ifndef _MACHINE_PROFILE_H_
+#define _MACHINE_PROFILE_H_
+
+#define _MCOUNT_DECL void ___mcount
+
+/*XXX The cprestore instruction is a "dummy" to shut up as(1). */
+
+#define MCOUNT \
+ __asm(".globl _mcount;" \
+ ".type _mcount,@function;" \
+ "_mcount:;" \
+ ".set noreorder;" \
+ ".set noat;" \
+ ".cpload $25;" \
+ ".cprestore 4;" \
+ "sw $4,8($29);" \
+ "sw $5,12($29);" \
+ "sw $6,16($29);" \
+ "sw $7,20($29);" \
+ "sw $1,0($29);" \
+ "sw $31,4($29);" \
+ "move $5,$31;" \
+ "jal ___mcount;" \
+ "move $4,$1;" \
+ "lw $4,8($29);" \
+ "lw $5,12($29);" \
+ "lw $6,16($29);" \
+ "lw $7,20($29);" \
+ "lw $31,4($29);" \
+ "lw $1,0($29);" \
+ "addu $29,$29,8;" \
+ "j $31;" \
+ "move $31,$1;" \
+ ".set reorder;" \
+ ".set at");
+
+#ifdef _KERNEL
+/*
+ * The following two macros do splhigh and splx respectively.
+ * They have to be defined this way because these are real
+ * functions on the MIPS, and we do not want to invoke mcount
+ * recursively.
+ */
+
+#define MCOUNT_DECL(s) u_long s;
+#ifdef SMP
+extern int mcount_lock;
+#define MCOUNT_ENTER(s) { \
+ s = disable_intr(); \
+ while (!atomic_cmpset_acq_int(&mcount_lock, 0, 1)) \
+ /* nothing */ ; \
+}
+#define MCOUNT_EXIT(s) { \
+ atomic_store_rel_int(&mcount_lock, 0); \
+ enableintr(s); \
+}
+#else
+#define MCOUNT_ENTER(s) { s = disable_intr(); }
+#define MCOUNT_EXIT(s) (enableintr(s))
+#endif
+
+/* REVISIT for mips */
+/*
+ * Config generates something to tell the compiler to align functions on 16
+ * byte boundaries. A strict alignment is good for keeping the tables small.
+ */
+#define FUNCTION_ALIGNMENT 16
+
+#ifdef GUPROF
+struct gmonparam;
+void stopguprof __P((struct gmonparam *p));
+#else
+#define stopguprof(p)
+#endif /* GUPROF */
+
+#else /* !_KERNEL */
+
+#define FUNCTION_ALIGNMENT 4
+
+typedef unsigned int uintfptr_t;
+
+#endif /* _KERNEL */
+
+/*
+ * An unsigned integral type that can hold non-negative difference between
+ * function pointers.
+ */
+typedef u_int fptrdiff_t;
+
+#ifdef _KERNEL
+
+void mcount(uintfptr_t frompc, uintfptr_t selfpc);
+
+#ifdef GUPROF
+struct gmonparam;
+
+void nullfunc_loop_profiled(void);
+void nullfunc_profiled(void);
+void startguprof(struct gmonparam *p);
+void stopguprof(struct gmonparam *p);
+#else
+#define startguprof(p)
+#define stopguprof(p)
+#endif /* GUPROF */
+
+#else /* !_KERNEL */
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+#ifdef __GNUC__
+#ifdef __ELF__
+void mcount(void) __asm(".mcount");
+#else
+void mcount(void) __asm("mcount");
+#endif
+#endif
+void _mcount(uintfptr_t frompc, uintfptr_t selfpc);
+__END_DECLS
+
+#endif /* _KERNEL */
+
+#ifdef GUPROF
+/* XXX doesn't quite work outside kernel yet. */
+extern int cputime_bias;
+
+__BEGIN_DECLS
+int cputime(void);
+void empty_loop(void);
+void mexitcount(uintfptr_t selfpc);
+void nullfunc(void);
+void nullfunc_loop(void);
+__END_DECLS
+#endif
+
+#endif /* !_MACHINE_PROFILE_H_ */
diff --git a/sys/mips/include/psl.h b/sys/mips/include/psl.h
new file mode 100644
index 0000000..9d05d13
--- /dev/null
+++ b/sys/mips/include/psl.h
@@ -0,0 +1,54 @@
+/* $OpenBSD: psl.h,v 1.2 1998/01/28 13:46:25 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)psl.h 8.1 (Berkeley) 6/10/93
+ * JNPR: psl.h,v 1.1 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PSL_H_
+#define _MACHINE_PSL_H_
+
+#include <machine/cpu.h>
+
+/*
+ * Macros to decode processor status word.
+ */
+#define USERMODE(ps) (((ps) & SR_KSU_MASK) == SR_KSU_USER)
+#define BASEPRI(ps) (((ps) & (INT_MASK | SR_INT_ENA_PREV)) \
+ == (INT_MASK | SR_INT_ENA_PREV))
+
+#ifdef _KERNEL
+#include <machine/intr.h>
+#endif
+#endif /* _MACHINE_PSL_H_ */
diff --git a/sys/mips/include/pte.h b/sys/mips/include/pte.h
new file mode 100644
index 0000000..809a71f
--- /dev/null
+++ b/sys/mips/include/pte.h
@@ -0,0 +1,149 @@
+/* $OpenBSD: pte.h,v 1.4 1998/01/28 13:46:25 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: pte.h 1.11 89/09/03
+ * from: @(#)pte.h 8.1 (Berkeley) 6/10/93
+ * JNPR: pte.h,v 1.1.4.1 2007/09/10 06:20:19 girish
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PTE_H_
+#define _MACHINE_PTE_H_
+
+#include <machine/endian.h>
+
+/*
+ * MIPS hardware page table entry
+ */
+
+#ifndef _LOCORE
+struct pte {
+#if BYTE_ORDER == BIG_ENDIAN
+unsigned int pg_prot:2, /* SW: access control */
+ pg_pfnum:24, /* HW: core page frame number or 0 */
+ pg_attr:3, /* HW: cache attribute */
+ pg_m:1, /* HW: modified (dirty) bit */
+ pg_v:1, /* HW: valid bit */
+ pg_g:1; /* HW: ignore pid bit */
+#endif
+#if BYTE_ORDER == LITTLE_ENDIAN
+unsigned int pg_g:1, /* HW: ignore pid bit */
+ pg_v:1, /* HW: valid bit */
+ pg_m:1, /* HW: modified (dirty) bit */
+ pg_attr:3, /* HW: cache attribute */
+ pg_pfnum:24, /* HW: core page frame number or 0 */
+ pg_prot:2; /* SW: access control */
+#endif
+};
+
+/*
+ * Structure defining an tlb entry data set.
+ */
+
+struct tlb {
+ int tlb_mask;
+ int tlb_hi;
+ int tlb_lo0;
+ int tlb_lo1;
+};
+
+typedef unsigned long pt_entry_t;
+typedef pt_entry_t *pd_entry_t;
+
+#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
+#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
+
+#endif /* _LOCORE */
+
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+#define PTE_WIRED 0x80000000 /* SW */
+#define PTE_W PTE_WIRED
+#define PTE_RO 0x40000000 /* SW */
+
+#define PTE_G 0x00000001 /* HW */
+#define PTE_V 0x00000002
+/*#define PTE_NV 0x00000000 Not Used */
+#define PTE_M 0x00000004
+#define PTE_RW PTE_M
+#define PTE_ODDPG 0x00001000
+/*#define PG_ATTR 0x0000003f Not Used */
+#define PTE_UNCACHED 0x00000010
+#define PTE_CACHE 0x00000018
+/*#define PG_CACHEMODE 0x00000038 Not Used*/
+#define PTE_ROPAGE (PTE_V | PTE_RO | PTE_CACHE) /* Write protected */
+#define PTE_RWPAGE (PTE_V | PTE_M | PTE_CACHE) /* Not wr-prot not clean */
+#define PTE_CWPAGE (PTE_V | PTE_CACHE) /* Not wr-prot but clean */
+#define PTE_IOPAGE (PTE_G | PTE_V | PTE_M | PTE_UNCACHED)
+#define PTE_FRAME 0x3fffffc0
+#define PTE_HVPN 0xffffe000 /* Hardware page no mask */
+#define PTE_ASID 0x000000ff /* Address space ID */
+
+#define PTE_SHIFT 6
+#define pfn_is_ext(x) ((x) & 0x3c000000)
+#define vad_to_pfn(x) (((unsigned)(x) >> PTE_SHIFT) & PTE_FRAME)
+#define vad_to_pfn64(x) ((quad_t)(x) >> PTE_SHIFT) & PTE_FRAME)
+#define pfn_to_vad(x) (((x) & PTE_FRAME) << PTE_SHIFT)
+
+/* User viritual to pte offset in page table */
+#define vad_to_pte_offset(adr) (((adr) >> PGSHIFT) & (NPTEPG -1))
+
+#define mips_pg_v(entry) ((entry) & PTE_V)
+#define mips_pg_wired(entry) ((entry) & PTE_WIRED)
+#define mips_pg_m_bit() (PTE_M)
+#define mips_pg_rw_bit() (PTE_M)
+#define mips_pg_ro_bit() (PTE_RO)
+#define mips_pg_ropage_bit() (PTE_ROPAGE)
+#define mips_pg_rwpage_bit() (PTE_RWPAGE)
+#define mips_pg_cwpage_bit() (PTE_CWPAGE)
+#define mips_pg_global_bit() (PTE_G)
+#define mips_pg_wired_bit() (PTE_WIRED)
+#define mips_tlbpfn_to_paddr(x) pfn_to_vad((x))
+#define mips_paddr_to_tlbpfn(x) vad_to_pfn((x))
+
+/* These are not used */
+#define PTE_SIZE_4K 0x00000000
+#define PTE_SIZE_16K 0x00006000
+#define PTE_SIZE_64K 0x0001e000
+#define PTE_SIZE_256K 0x0007e000
+#define PTE_SIZE_1M 0x001fe000
+#define PTE_SIZE_4M 0x007fe000
+#define PTE_SIZE_16M 0x01ffe000
+
+#endif /* !_MACHINE_PTE_H_ */
diff --git a/sys/mips/include/ptrace.h b/sys/mips/include/ptrace.h
new file mode 100644
index 0000000..a34b6f9
--- /dev/null
+++ b/sys/mips/include/ptrace.h
@@ -0,0 +1,37 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ptrace.h 8.1 (Berkeley) 6/11/93
+ * from: src/sys/i386/include/ptrace.h,v 1.14 2005/05/31 09:43:04 dfr
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PTRACE_H_
+#define _MACHINE_PTRACE_H_
+
+#endif
diff --git a/sys/mips/include/queue.h b/sys/mips/include/queue.h
new file mode 100644
index 0000000..d992332
--- /dev/null
+++ b/sys/mips/include/queue.h
@@ -0,0 +1,171 @@
+/*-
+ * Copyright (c) 1996-1997, 2001, 2005, Juniper Networks, Inc.
+ * All rights reserved.
+ * Jim Hayes, November 1996
+ *
+ * queue.h - Description of uKernel queues, for the Juniper Kernel
+ *
+ * JNPR: queue.h,v 1.1 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QUEUE_H__
+#define __QUEUE_H__
+
+/*---------------------------------------------------------------------------
+ * QUEUE MANAGEMENT DOCUMENTATION
+ */
+
+/*
+ --------
+ Q_INIT()
+ --------
+
+ void q_init(void)
+
+ Initialize the queue management system for the microkernel.
+ This initializes the debugging flags and sets up accounting.
+
+ ---------
+ Q_ALLOC()
+ ---------
+
+ queue_t *q_alloc()
+
+ Allocates a queue from kernel memory, and initializes it for you.
+
+ The default initialization provides a queue that is unbounded.
+
+ If you want to be bounded with special features, use q_control
+ after initialization.
+
+ q_alloc() returns NULL in the face of peril or low memory.
+
+ --------
+ Q_FREE()
+ --------
+
+ void *q_free(queue_t *queue_pointer)
+
+ Returns a queue to kernel memory, and frees the queue contents
+ for you using free() and complains (with a traceback) that you
+ tried to kill of a non-empty queue.
+
+ If any threads are waiting on the queue, wake them up.
+
+ -----------
+ Q_CONTROL()
+ -----------
+ void q_control(queue_t *queue_pointer, queue_size_t max_queue_size);
+
+ For now, allows you to limit queue growth.
+
+ ----------------
+ Q_DEQUEUE_WAIT() ** MAY CAUSE THREAD TO BLOCK/CANNOT BE CALLED FROM ISRs **
+ ----------------
+
+ void *q_dequeue_wait(queue_t *queue_pointer, wakeup_mask_t *mask)
+
+ Removes and returns a pointer to the next message in the specified
+ queue. If the queue is empty, the calling thread goes to sleep
+ until something is queued to the queue. If this call returns NULL,
+ then an extraordinary event requires this thread's attention--
+ check errno in this case.
+
+ ---------
+ Q_DEQUEUE ** CAN BE CALLED FROM ISRs **
+ ---------
+
+ void *q_dequeue(queue_t *queue_pointer)
+
+ Just like q_dequeue_wait(), but instead of blocking, return NULL.
+
+ -----------
+ Q_ENQUEUE() ** CAN BE CALLED FROM ISRs **
+ -----------
+
+ boolean q_enqueue(queue_t *queue_pointer, void *element_pointer)
+
+ Add the element to the end of the named queue. If the add fails
+ because a limit has been reached, return TRUE. Otherwise return
+ FALSE if everything went OK.
+
+ ----------
+ Q_URGENT()
+ ----------
+
+ boolean q_urgent(queue_t *queue_pointer, void *element_pointer)
+
+ Same as q_enqueue(), except this element will be placed at the top
+ of the queue, and will be picked off at the next q_dequeue_wait()
+ operation.
+
+ --------
+ Q_PEEK() ** CAN BE CALLED FROM ISRs **
+ --------
+
+ void *q_peek(queue_t *queue_pointer)
+
+ Returns a pointer to the top element of the queue without actually
+ dequeuing it. Returns NULL of the queue is empty.
+
+ This routine will never block.
+
+ ----------
+ Q_DELETE()
+ ----------
+
+ void q_delete(queue_t *queue_pointer, void *element_pointer)
+
+ Delete the element_pointer from the queue, if it exists. This
+ isn't speedy, and isn't meant for tasks requiring performance.
+ It's primary use is to pull something off the queue when you know
+ in the common case that it's gonna be at or near the top of the
+ list. (I.e. waking a thread from a wake list when extraordinary
+ conditions exist, and you have to pluck it from the middle of the
+ list.)
+
+ This routine does not block or return anything.
+
+ --------
+ Q_SIZE()
+ --------
+
+ queue_size_t q_size(queue_t *queue_pointer)
+
+ Returns the number of elements in the queue.
+
+ ------------
+ Q_MAX_SIZE()
+ ------------
+
+ queue_size_t q_max_size(queue_t *queue_pointer);
+
+ Returns the maximum size of this queue, or 0 if this queue is
+ unbounded.
+
+*/
+
+/*-------------------------------------------------------------------------
+ * Basic queue management structures.
+ */
+
+/*
+ * Typedefs
+ */
+
+typedef u_int32_t queue_size_t;
+
+/*
+ * Prototypes
+ */
+
+void q_init(void);
+queue_t *q_alloc(void);
+void *q_peek(queue_t *queue);
+void *q_dequeue(queue_t *queue);
+boolean q_enqueue(queue_t *queue, void *item);
+boolean q_urgent(queue_t *queue, void *item);
+
+#endif /* __QUEUE_H__ */
diff --git a/sys/mips/include/reg.h b/sys/mips/include/reg.h
new file mode 100644
index 0000000..6510db6
--- /dev/null
+++ b/sys/mips/include/reg.h
@@ -0,0 +1,78 @@
+/* $OpenBSD: reg.h,v 1.1 1998/01/28 11:14:53 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: reg.h 1.1 90/07/09
+ * @(#)reg.h 8.2 (Berkeley) 1/11/94
+ * JNPR: reg.h,v 1.6 2006/09/15 12:52:34 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_REG_H_
+#define _MACHINE_REG_H_
+
+/*
+ * Location of the users' stored registers relative to ZERO.
+ * must be visible to assembly code.
+ */
+#include <machine/regnum.h>
+
+/*
+ * Register set accessible via /proc/$pid/reg
+ */
+struct reg {
+ register_t r_regs[NUMSAVEREGS]; /* numbered as above */
+};
+
+struct fpreg {
+ f_register_t r_regs[NUMFPREGS];
+};
+
+/*
+ * Placeholder.
+ */
+struct dbreg {
+ unsigned long junk;
+};
+
+#ifdef _KERNEL
+int fill_fpregs(struct thread *, struct fpreg *);
+int fill_regs(struct thread *, struct reg *);
+int set_fpregs(struct thread *, struct fpreg *);
+int set_regs(struct thread *, struct reg *);
+int fill_dbregs(struct thread *, struct dbreg *);
+int set_dbregs(struct thread *, struct dbreg *);
+#endif
+
+#endif /* !_MACHINE_REG_H_ */
diff --git a/sys/mips/include/regdef.h b/sys/mips/include/regdef.h
new file mode 100644
index 0000000..bb9eb3d
--- /dev/null
+++ b/sys/mips/include/regdef.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2001, Juniper Networks, Inc.
+ * All rights reserved.
+ * Truman Joe, February 2001.
+ *
+ * regdef.h -- MIPS register definitions.
+ *
+ * JNPR: regdef.h,v 1.3 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_REGDEF_H_
+#define _MACHINE_REGDEF_H_
+
+#if defined(__ASSEMBLER__)
+/* General purpose CPU register names */
+#define zero $0 /* wired zero */
+#define AT $at /* assembler temp */
+#define v0 $2 /* return value */
+#define v1 $3
+#define a0 $4 /* argument registers */
+#define a1 $5
+#define a2 $6
+#define a3 $7
+#define t0 $8 /* caller saved */
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12 /* caller saved - 32 bit env arg reg 64 bit */
+#define t5 $13
+#define t6 $14
+#define t7 $15
+#define s0 $16 /* callee saved */
+#define s1 $17
+#define s2 $18
+#define s3 $19
+#define s4 $20
+#define s5 $21
+#define s6 $22
+#define s7 $23
+#define t8 $24 /* code generator */
+#define t9 $25
+#define k0 $26 /* kernel temporary */
+#define k1 $27
+#define gp $28 /* global pointer */
+#define sp $29 /* stack pointer */
+#define fp $30 /* frame pointer */
+#define s8 $30 /* callee saved */
+#define ra $31 /* return address */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* !_MACHINE_REGDEF_H_ */
diff --git a/sys/mips/include/regnum.h b/sys/mips/include/regnum.h
new file mode 100644
index 0000000..1e3f2c8
--- /dev/null
+++ b/sys/mips/include/regnum.h
@@ -0,0 +1,203 @@
+/* $OpenBSD: regnum.h,v 1.3 1999/01/27 04:46:06 imp Exp $ */
+
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: reg.h 1.1 90/07/09
+ * @(#)reg.h 8.2 (Berkeley) 1/11/94
+ * JNPR: regnum.h,v 1.6 2007/08/09 11:23:32 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_REGNUM_H_
+#define _MACHINE_REGNUM_H_
+
+#define STAND_ARG_SIZE 16
+#define STAND_FRAME_SIZE 24
+#define STAND_RA_OFFSET 20
+
+/* This must match the numbers
+ * in pcb.h and is used by
+ * swtch.S
+ */
+#define PREG_S0 0
+#define PREG_S1 1
+#define PREG_S2 2
+#define PREG_S3 3
+#define PREG_S4 4
+#define PREG_S5 5
+#define PREG_S6 6
+#define PREG_S7 7
+#define PREG_SP 8
+#define PREG_S8 9
+#define PREG_RA 10
+#define PREG_SR 11
+#define PREG_GP 12
+
+
+
+/*
+ * Location of the saved registers relative to ZERO.
+ * This must match struct trapframe defined in frame.h exactly.
+ */
+#define ZERO 0
+#define AST 1
+#define V0 2
+#define V1 3
+#define A0 4
+#define A1 5
+#define A2 6
+#define A3 7
+#define T0 8
+#define T1 9
+#define T2 10
+#define T3 11
+#define T4 12
+#define T5 13
+#define T6 14
+#define T7 15
+#define S0 16
+#define S1 17
+#define S2 18
+#define S3 19
+#define S4 20
+#define S5 21
+#define S6 22
+#define S7 23
+#define T8 24
+#define T9 25
+#define K0 26
+#define K1 27
+#define GP 28
+#define SP 29
+#define S8 30
+#define RA 31
+#define SR 32
+#define PS SR /* alias for SR */
+#define MULLO 33
+#define MULHI 34
+#define BADVADDR 35
+#define CAUSE 36
+#define PC 37
+/*
+ * IC is valid only on RM7K and RM9K processors. Access to this is
+ * controlled by IC_INT_REG which defined in kernel config
+ */
+#define IC 38
+#define DUMMY 39 /* for 8 byte alignment */
+#define NUMSAVEREGS 40
+
+/*
+ * Index of FP registers in 'struct frame', counting from the beginning
+ * of the frame (i.e., including the general registers).
+ */
+#define FPBASE NUMSAVEREGS
+#define F0 (FPBASE+0)
+#define F1 (FPBASE+1)
+#define F2 (FPBASE+2)
+#define F3 (FPBASE+3)
+#define F4 (FPBASE+4)
+#define F5 (FPBASE+5)
+#define F6 (FPBASE+6)
+#define F7 (FPBASE+7)
+#define F8 (FPBASE+8)
+#define F9 (FPBASE+9)
+#define F10 (FPBASE+10)
+#define F11 (FPBASE+11)
+#define F12 (FPBASE+12)
+#define F13 (FPBASE+13)
+#define F14 (FPBASE+14)
+#define F15 (FPBASE+15)
+#define F16 (FPBASE+16)
+#define F17 (FPBASE+17)
+#define F18 (FPBASE+18)
+#define F19 (FPBASE+19)
+#define F20 (FPBASE+20)
+#define F21 (FPBASE+21)
+#define F22 (FPBASE+22)
+#define F23 (FPBASE+23)
+#define F24 (FPBASE+24)
+#define F25 (FPBASE+25)
+#define F26 (FPBASE+26)
+#define F27 (FPBASE+27)
+#define F28 (FPBASE+28)
+#define F29 (FPBASE+29)
+#define F30 (FPBASE+30)
+#define F31 (FPBASE+31)
+#define FSR (FPBASE+32)
+#define FSR_DUMMY (FPBASE+33) /* For 8 byte alignment */
+
+#define NUMFPREGS 34
+
+#define NREGS (NUMSAVEREGS + NUMFPREGS)
+
+/*
+ * Index of FP registers in 'struct frame', relative to the base
+ * of the FP registers in frame (i.e., *not* including the general
+ * registers).
+ */
+#define F0_NUM (0)
+#define F1_NUM (1)
+#define F2_NUM (2)
+#define F3_NUM (3)
+#define F4_NUM (4)
+#define F5_NUM (5)
+#define F6_NUM (6)
+#define F7_NUM (7)
+#define F8_NUM (8)
+#define F9_NUM (9)
+#define F10_NUM (10)
+#define F11_NUM (11)
+#define F12_NUM (12)
+#define F13_NUM (13)
+#define F14_NUM (14)
+#define F15_NUM (15)
+#define F16_NUM (16)
+#define F17_NUM (17)
+#define F18_NUM (18)
+#define F19_NUM (19)
+#define F20_NUM (20)
+#define F21_NUM (21)
+#define F22_NUM (22)
+#define F23_NUM (23)
+#define F24_NUM (24)
+#define F25_NUM (25)
+#define F26_NUM (26)
+#define F27_NUM (27)
+#define F28_NUM (28)
+#define F29_NUM (29)
+#define F30_NUM (30)
+#define F31_NUM (31)
+#define FSR_NUM (32)
+
+#endif /* !_MACHINE_REGNUM_H_ */
diff --git a/sys/mips/include/reloc.h b/sys/mips/include/reloc.h
new file mode 100644
index 0000000..113745f
--- /dev/null
+++ b/sys/mips/include/reloc.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: src/sys/alpha/include/reloc.h,v 1.1.1.1.6.1 2000/08/03 00:48:04 peter
+ * JNPR: reloc.h,v 1.3 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
diff --git a/sys/mips/include/resource.h b/sys/mips/include/resource.h
new file mode 100644
index 0000000..c5b4283
--- /dev/null
+++ b/sys/mips/include/resource.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * from: src/sys/i386/include/resource.h,v 1.3 1999/10/14 21:38:30 dfr
+ * JNPR: resource.h,v 1.3 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_RESOURCE_H_
+#define _MACHINE_RESOURCE_H_ 1
+
+/*
+ * Definitions of resource types for Intel Architecture machines
+ * with support for legacy ISA devices and drivers.
+ */
+
+#define SYS_RES_IRQ 1 /* interrupt lines */
+#define SYS_RES_DRQ 2 /* isa dma lines */
+#define SYS_RES_MEMORY 3 /* i/o memory */
+#define SYS_RES_IOPORT 4 /* i/o ports */
+
+#endif /* !_MACHINE_RESOURCE_H_ */
diff --git a/sys/mips/include/rm7000.h b/sys/mips/include/rm7000.h
new file mode 100644
index 0000000..f1c0c44
--- /dev/null
+++ b/sys/mips/include/rm7000.h
@@ -0,0 +1,95 @@
+/* $OpenBSD$ */
+
+/*
+ * Copyright (c) 2000 Opsycon Open System Consulting AB (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon Open System
+ * Consulting AB, Sweden under contract to QED, Inc.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * JNPR: rm7000.h,v 1.2.4.1 2007/08/29 12:06:30 girish
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_RM7000_H_
+#define _MACHINE_RM7000_H_
+
+/*
+ * QED RM7000 specific defines.
+ */
+
+/*
+ * Performance counters.
+ */
+
+#define PCNT_SRC_CLOCKS 0x00 /* Clock cycles */
+#define PCNT_SRC_INSTR 0x01 /* Total instructions issued */
+#define PCNT_SRC_FPINSTR 0x02 /* Float instructions issued */
+#define PCNT_SRC_IINSTR 0x03 /* Integer instructions issued */
+#define PCNT_SRC_LOAD 0x04 /* Load instructions issued */
+#define PCNT_SRC_STORE 0x05 /* Store instructions issued */
+#define PCNT_SRC_DUAL 0x06 /* Dual issued pairs */
+#define PCNT_SRC_BRPREF 0x07 /* Branch prefetches */
+#define PCNT_SRC_EXTMISS 0x08 /* External cache misses */
+#define PCNT_SRC_STALL 0x09 /* Stall cycles */
+#define PCNT_SRC_SECMISS 0x0a /* Secondary cache misses */
+#define PCNT_SRC_INSMISS 0x0b /* Instruction cache misses */
+#define PCNT_SRC_DTAMISS 0x0c /* Data cache misses */
+#define PCNT_SRC_DTLBMISS 0x0d /* Data TLB misses */
+#define PCNT_SRC_ITLBMISS 0x0e /* Instruction TLB misses */
+#define PCNT_SRC_JTLBIMISS 0x0f /* Joint TLB instruction misses */
+#define PCNT_SRC_JTLBDMISS 0x10 /* Joint TLB data misses */
+#define PCNT_SRC_BRTAKEN 0x11 /* Branches taken */
+#define PCNT_SRC_BRISSUED 0x12 /* Branches issued */
+#define PCNT_SRC_SECWBACK 0x13 /* Secondary cache writebacks */
+#define PCNT_SRC_PRIWBACK 0x14 /* Primary cache writebacks */
+#define PCNT_SRC_DCSTALL 0x15 /* Dcache miss stall cycles */
+#define PCNT_SRC_MISS 0x16 /* Cache misses */
+#define PCNT_SRC_FPEXC 0x17 /* FP possible execption cycles */
+#define PCNT_SRC_MULSLIP 0x18 /* Slip cycles due to mult. busy */
+#define PCNT_SRC_CP0SLIP 0x19 /* CP0 Slip cycles */
+#define PCNT_SRC_LDSLIP 0x1a /* Slip cycles due to pend. non-b ld */
+#define PCNT_SRC_WBFULL 0x1b /* Write buffer full stall cycles */
+#define PCNT_SRC_CISTALL 0x1c /* Cache instruction stall cycles */
+#define PCNT_SRC_MULSTALL 0x1d /* Multiplier stall cycles */
+#define PCNT_SRC_ELDSTALL 0x1d /* Excepion stall due to non-b ld */
+#define PCNT_SRC_MAX 0x1d /* Maximum PCNT select code */
+
+/*
+ * Counter control bits.
+ */
+
+#define PCNT_CE 0x0400 /* Count enable */
+#define PCNT_UM 0x0200 /* Count in User mode */
+#define PCNT_KM 0x0100 /* Count in kernel mode */
+
+/*
+ * Performance counter system call function codes.
+ */
+#define PCNT_FNC_SELECT 0x0001 /* Select counter source */
+#define PCNT_FNC_READ 0x0002 /* Read current value of counter */
+
+#endif /* _MACHINE_RM7000_H_ */
diff --git a/sys/mips/include/runq.h b/sys/mips/include/runq.h
new file mode 100644
index 0000000..2e57301
--- /dev/null
+++ b/sys/mips/include/runq.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: src/sys/i386/include/runq.h,v 1.3 2005/01/06 22:18:15 imp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_RUNQ_H_
+#define _MACHINE_RUNQ_H_
+
+#define RQB_LEN (2) /* Number of priority status words. */
+#define RQB_L2BPW (5) /* Log2(sizeof(rqb_word_t) * NBBY)). */
+#define RQB_BPW (1<<RQB_L2BPW) /* Bits in an rqb_word_t. */
+
+#define RQB_BIT(pri) (1 << ((pri) & (RQB_BPW - 1)))
+#define RQB_WORD(pri) ((pri) >> RQB_L2BPW)
+
+#define RQB_FFS(word) (ffs(word) - 1)
+
+/*
+ * Type of run queue status word.
+ */
+typedef u_int32_t rqb_word_t;
+
+#endif
diff --git a/sys/mips/include/segments.h b/sys/mips/include/segments.h
new file mode 100644
index 0000000..406b965
--- /dev/null
+++ b/sys/mips/include/segments.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)segments.h 7.1 (Berkeley) 5/9/91
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SEGMENTS_H_
+#define _MACHINE_SEGMENTS_H_
+
+#endif /* !_MACHINE_SEGMENTS_H_ */
diff --git a/sys/mips/include/setjmp.h b/sys/mips/include/setjmp.h
new file mode 100644
index 0000000..575efdc
--- /dev/null
+++ b/sys/mips/include/setjmp.h
@@ -0,0 +1,59 @@
+/* From: NetBSD: setjmp.h,v 1.2 1997/04/06 08:47:41 cgd Exp */
+
+/*-
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * JNPR: setjmp.h,v 1.2 2006/12/02 09:53:41 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SETJMP_H_
+#define _MACHINE_SETJMP_H_
+
+/*
+ * machine/setjmp.h: machine dependent setjmp-related information.
+ */
+
+#include <sys/cdefs.h>
+
+#define _JBLEN 95 /* size, in longs, of a jmp_buf */
+
+/*
+ * jmp_buf and sigjmp_buf are encapsulated in different structs to force
+ * compile-time diagnostics for mismatches. The structs are the same
+ * internally to avoid some run-time errors for mismatches.
+ */
+#ifndef _LOCORE
+#ifndef __ASSEMBLER__
+#if __BSD_VISIBLE || __POSIX_VISIBLE || __XSI_VISIBLE
+typedef struct _sigjmp_buf { long _sjb[_JBLEN + 1]; } sigjmp_buf[1];
+#endif
+
+typedef struct _jmp_buf { long _jb[_JBLEN + 1]; } jmp_buf[1];
+#endif /* __ASSEMBLER__ */
+#endif /* _LOCORE */
+
+#endif /* _MACHINE_SETJMP_H_ */
diff --git a/sys/mips/include/sf_buf.h b/sys/mips/include/sf_buf.h
new file mode 100644
index 0000000..0a9980c
--- /dev/null
+++ b/sys/mips/include/sf_buf.h
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (c) 2003, 2005 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: src/sys/i386/include/sf_buf.h,v 1.4 2005/02/13 06:23:13 alc
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SF_BUF_H_
+#define _MACHINE_SF_BUF_H_
+
+#include <sys/queue.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_page.h>
+
+struct vm_page;
+
+struct sf_buf {
+ LIST_ENTRY(sf_buf) list_entry; /* list of buffers */
+ TAILQ_ENTRY(sf_buf) free_entry; /* list of buffers */
+ struct vm_page *m; /* currently mapped page */
+ vm_offset_t kva; /* va of mapping */
+ int ref_count; /* usage of this mapping */
+#ifdef SMP
+ cpumask_t cpumask; /* cpus on which mapping is valid */
+#endif
+};
+
+static __inline vm_offset_t
+sf_buf_kva(struct sf_buf *sf)
+{
+
+ return (sf->kva);
+}
+
+static __inline struct vm_page *
+sf_buf_page(struct sf_buf *sf)
+{
+
+ return (sf->m);
+}
+
+#endif /* !_MACHINE_SF_BUF_H_ */
diff --git a/sys/mips/include/sigframe.h b/sys/mips/include/sigframe.h
new file mode 100644
index 0000000..6919882
--- /dev/null
+++ b/sys/mips/include/sigframe.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 1999 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: src/sys/alpha/include/sigframe.h,v 1.1 1999/09/29 15:06:26 marcel
+ * from: sigframe.h,v 1.1 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+#ifndef _MACHINE_SIGFRAME_H_
+#define _MACHINE_SIGFRAME_H_
+
+/*
+ * WARNING: code in locore.s assumes the layout shown for sf_signum
+ * thru sf_addr so... don't alter them!
+ */
+struct sigframe {
+ register_t sf_signum;
+ register_t sf_siginfo; /* code or pointer to sf_si */
+ register_t sf_ucontext; /* points to sf_uc */
+ register_t sf_addr; /* undocumented 4th arg */
+ ucontext_t sf_uc; /* = *sf_ucontext */
+ siginfo_t sf_si; /* = *sf_siginfo (SA_SIGINFO case) */
+ unsigned long __spare__[2];
+};
+
+#endif /* !_MACHINE_SIGFRAME_H_ */
diff --git a/sys/mips/include/signal.h b/sys/mips/include/signal.h
new file mode 100644
index 0000000..5107af0
--- /dev/null
+++ b/sys/mips/include/signal.h
@@ -0,0 +1,80 @@
+/* $OpenBSD: signal.h,v 1.2 1999/01/27 04:10:03 imp Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)signal.h 8.1 (Berkeley) 6/10/93
+ * JNPR: signal.h,v 1.4 2007/01/08 04:58:37 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SIGNAL_H_
+#define _MACHINE_SIGNAL_H_
+
+#include <sys/cdefs.h>
+#include <sys/_sigset.h>
+
+/*
+ * Machine-dependent signal definitions
+ */
+
+typedef int sig_atomic_t;
+
+#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE)
+/*
+ * Information pushed on stack when a signal is delivered.
+ * This is used by the kernel to restore state following
+ * execution of the signal handler. It is also made available
+ * to the handler to allow it to restore state properly if
+ * a non-standard exit is performed.
+ */
+
+struct sigcontext {
+ /*
+ * The fields following 'sc_mask' must match the definition
+ * of struct __mcontext. That way we can support
+ * struct sigcontext and ucontext_t at the same
+ * time.
+ */
+ __sigset_t sc_mask; /* signal mask to restore */
+ int sc_onstack; /* sigstack state to restore */
+ __register_t sc_pc; /* pc at time of signal */
+ __register_t sc_regs[32]; /* processor regs 0 to 31 */
+ __register_t mullo, mulhi; /* mullo and mulhi registers... */
+ int sc_fpused; /* fp has been used */
+ f_register_t sc_fpregs[33]; /* fp regs 0 to 31 and csr */
+ __register_t sc_fpc_eir; /* fp exception instruction reg */
+ int xxx[8]; /* XXX reserved */
+};
+
+#endif /* !_ANSI_SOURCE && !_POSIX_SOURCE */
+
+#endif /* !_MACHINE_SIGNAL_H_ */
diff --git a/sys/mips/include/smp.h b/sys/mips/include/smp.h
new file mode 100644
index 0000000..d0ac25d
--- /dev/null
+++ b/sys/mips/include/smp.h
@@ -0,0 +1,43 @@
+/*-
+ * ----------------------------------------------------------------------------
+ * "THE BEER-WARE LICENSE" (Revision 42):
+ * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
+ * can do whatever you want with this stuff. If we meet some day, and you think
+ * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
+ * ----------------------------------------------------------------------------
+ *
+ * from: src/sys/alpha/include/smp.h,v 1.8 2005/01/05 20:05:50 imp
+ * JNPR: smp.h,v 1.3 2006/12/02 09:53:41 katta
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _MACHINE_SMP_H_
+#define _MACHINE_SMP_H_
+
+#ifdef _KERNEL
+
+/*
+ * Interprocessor interrupts for SMP.
+ */
+#define IPI_INVLTLB 0x0001
+#define IPI_RENDEZVOUS 0x0002
+#define IPI_AST 0x0004
+#define IPI_STOP 0x0008
+
+#ifndef LOCORE
+
+extern u_int32_t boot_cpu_id;
+
+void ipi_selected(u_int cpus, u_int32_t ipi);
+void ipi_all(u_int32_t ipi);
+void ipi_all_but_self(u_int32_t ipi);
+void ipi_self(u_int32_t ipi);
+intrmask_t smp_handle_ipi(struct trapframe *frame);
+void smp_init_secondary(u_int32_t cpuid);
+void mips_ipi_send(int thread_id);
+
+#endif /* !LOCORE */
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_SMP_H_ */
diff --git a/sys/mips/include/stdarg.h b/sys/mips/include/stdarg.h
new file mode 100644
index 0000000..802ea73
--- /dev/null
+++ b/sys/mips/include/stdarg.h
@@ -0,0 +1,144 @@
+/*
+ * JNPR: stdarg.h,v 1.3 2006/09/15 12:52:34 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_STDARG_H_
+#define _MACHINE_STDARG_H_
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+
+#if __GNUC__ >= 3
+
+#ifndef _VA_LIST_DECLARED
+#define _VA_LIST_DECLARED
+typedef __va_list va_list;
+#endif
+#define va_start(v,l) __builtin_va_start((v),l)
+#define va_end __builtin_va_end
+#define va_arg __builtin_va_arg
+#define va_copy __builtin_va_copy
+
+#else /* __GNUC__ */
+
+
+/* ---------------------------------------- */
+/* VARARGS for MIPS/GNU CC */
+/* ---------------------------------------- */
+
+#include <machine/endian.h>
+
+/* These macros implement varargs for GNU C--either traditional or ANSI. */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+typedef char * __gnuc_va_list;
+typedef __gnuc_va_list va_list;
+
+#endif /* ! __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+
+#ifndef _VA_MIPS_H_ENUM
+#define _VA_MIPS_H_ENUM
+enum {
+ __no_type_class = -1,
+ __void_type_class,
+ __integer_type_class,
+ __char_type_class,
+ __enumeral_type_class,
+ __boolean_type_class,
+ __pointer_type_class,
+ __reference_type_class,
+ __offset_type_class,
+ __real_type_class,
+ __complex_type_class,
+ __function_type_class,
+ __method_type_class,
+ __record_type_class,
+ __union_type_class,
+ __array_type_class,
+ __string_type_class,
+ __set_type_class,
+ __file_type_class,
+ __lang_type_class
+};
+#endif
+
+/* In GCC version 2, we want an ellipsis at the end of the declaration
+ of the argument list. GCC version 1 can't parse it. */
+
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#else
+#define __va_ellipsis
+#endif
+
+
+#define va_start(__AP, __LASTARG) \
+ (__AP = (__gnuc_va_list) __builtin_next_arg (__LASTARG))
+
+#define va_end(__AP) ((void)0)
+
+
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+/* The __mips64 cases are reversed from the 32 bit cases, because the standard
+ 32 bit calling convention left-aligns all parameters smaller than a word,
+ whereas the __mips64 calling convention does not (and hence they are
+ right aligned). */
+
+#ifdef __mips64
+
+#define __va_rounded_size(__TYPE) (((sizeof (__TYPE) + 8 - 1) / 8) * 8)
+
+#define __va_reg_size 8
+
+#if defined(__MIPSEB__) || (BYTE_ORDER == BIG_ENDIAN)
+#define va_arg(__AP, __type) \
+ ((__type *) (void *) (__AP = (char *) \
+ ((((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8) \
+ + __va_rounded_size (__type))))[-1]
+#else /* ! __MIPSEB__ && !BYTE_ORDER == BIG_ENDIAN */
+#define va_arg(__AP, __type) \
+ ((__AP = (char *) ((((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8) \
+ + __va_rounded_size (__type))), \
+ *(__type *) (void *) (__AP - __va_rounded_size (__type)))
+#endif /* ! __MIPSEB__ && !BYTE_ORDER == BIG_ENDIAN */
+
+#else /* ! __mips64 */
+
+#define __va_rounded_size(__TYPE) \
+ (((sizeof (__TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+
+#define __va_reg_size 4
+
+#if defined(__MIPSEB__) || (BYTE_ORDER == BIG_ENDIAN)
+/* For big-endian machines. */
+#define va_arg(__AP, __type) \
+ ((__AP = (char *) ((__alignof__ (__type) > 4 \
+ ? ((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8 \
+ : ((__PTRDIFF_TYPE__)__AP + 4 - 1) & -4) \
+ + __va_rounded_size (__type))), \
+ *(__type *) (void *) (__AP - __va_rounded_size (__type)))
+#else /* ! __MIPSEB__ && !BYTE_ORDER == BIG_ENDIAN */
+/* For little-endian machines. */
+#define va_arg(__AP, __type) \
+ ((__type *) (void *) (__AP = (char *) ((__alignof__(__type) > 4 \
+ ? ((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8 \
+ : ((__PTRDIFF_TYPE__)__AP + 4 - 1) & -4) \
+ + __va_rounded_size(__type))))[-1]
+#endif /* ! __MIPSEB__ && !BYTE_ORDER == BIG_ENDIAN */
+#endif /* ! __mips64 */
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+#define va_copy(dest, src) (dest) = (src)
+
+#endif /* __GNUC__ */
+#endif /* _MACHINE_STDARG_H_ */
diff --git a/sys/mips/include/sysarch.h b/sys/mips/include/sysarch.h
new file mode 100644
index 0000000..acb3071
--- /dev/null
+++ b/sys/mips/include/sysarch.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Architecture specific syscalls (MIPS)
+ */
+#ifndef _MACHINE_SYSARCH_H_
+#define _MACHINE_SYSARCH_H_
+
+#ifndef _KERNEL
+#include <sys/cdefs.h>
+
+#if 0
+/* Something useful for each MIPS platform. */
+#else
+#define mips_tcb_set(tcb) do {} while (0)
+#define mips_tcb_get() NULL
+#endif /* _MIPS_ARCH_XLR */
+
+__BEGIN_DECLS
+int sysarch(int, void *);
+__END_DECLS
+#endif
+
+#endif /* !_MACHINE_SYSARCH_H_ */
diff --git a/sys/mips/include/timerreg.h b/sys/mips/include/timerreg.h
new file mode 100644
index 0000000..0ab7d40
--- /dev/null
+++ b/sys/mips/include/timerreg.h
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (C) 2005 TAKAHASHI Yoshihiro. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * The outputs of the three timers are connected as follows:
+ *
+ * timer 0 -> irq 0
+ * timer 1 -> dma chan 0 (for dram refresh)
+ * timer 2 -> speaker (via keyboard controller)
+ *
+ * Timer 0 is used to call hardclock.
+ * Timer 2 is used to generate console beeps.
+ */
+
+#ifndef _MACHINE_TIMERREG_H_
+#define _MACHINE_TIMERREG_H_
+
+#ifdef _KERNEL
+
+#include <dev/ic/i8253reg.h>
+
+#define IO_TIMER1 0x40 /* 8253 Timer #1 */
+#define TIMER_CNTR0 (IO_TIMER1 + TIMER_REG_CNTR0)
+#define TIMER_CNTR1 (IO_TIMER1 + TIMER_REG_CNTR1)
+#define TIMER_CNTR2 (IO_TIMER1 + TIMER_REG_CNTR2)
+#define TIMER_MODE (IO_TIMER1 + TIMER_REG_MODE)
+
+#define timer_spkr_acquire() \
+ acquire_timer2(TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT)
+#define timer_spkr_release() \
+ release_timer2()
+
+#define spkr_set_pitch(pitch) \
+ do { \
+ outb(TIMER_CNTR2, (pitch) & 0xff); \
+ outb(TIMER_CNTR2, (pitch) >> 8); \
+ } while(0)
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_TIMERREG_H_ */
diff --git a/sys/mips/include/trap.h b/sys/mips/include/trap.h
new file mode 100644
index 0000000..a00ca90
--- /dev/null
+++ b/sys/mips/include/trap.h
@@ -0,0 +1,117 @@
+/* $OpenBSD: trap.h,v 1.3 1999/01/27 04:46:06 imp Exp $ */
+
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: trap.h 1.1 90/07/09
+ * from: @(#)trap.h 8.1 (Berkeley) 6/10/93
+ * JNPR: trap.h,v 1.3 2006/12/02 09:53:41 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_TRAP_H_
+#define _MACHINE_TRAP_H_
+
+/*
+ * Trap codes also known in trap.c for name strings.
+ * Used for indexing so modify with care.
+ */
+
+#define T_INT 0 /* Interrupt pending */
+#define T_TLB_MOD 1 /* TLB modified fault */
+#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
+#define T_TLB_ST_MISS 3 /* TLB miss on a store */
+#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
+#define T_ADDR_ERR_ST 5 /* Address error on a store */
+#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
+#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
+#define T_SYSCALL 8 /* System call */
+#define T_BREAK 9 /* Breakpoint */
+#define T_RES_INST 10 /* Reserved instruction exception */
+#define T_COP_UNUSABLE 11 /* Coprocessor unusable */
+#define T_OVFLOW 12 /* Arithmetic overflow */
+#define T_TRAP 13 /* Trap instruction */
+#define T_VCEI 14 /* Virtual coherency instruction */
+#define T_FPE 15 /* Floating point exception */
+#define T_IWATCH 16 /* Inst. Watch address reference */
+#define T_C2E 18 /* Exception from coprocessor 2 */
+#define T_DWATCH 23 /* Data Watch address reference */
+#define T_MCHECK 24 /* Received an MCHECK */
+#define T_VCED 31 /* Virtual coherency data */
+
+#define T_USER 0x20 /* user-mode flag or'ed with type */
+
+#if !defined(SMP) && (defined(DDB) || defined(DEBUG))
+
+struct trapdebug { /* trap history buffer for debugging */
+ u_int status;
+ u_int cause;
+ u_int vadr;
+ u_int pc;
+ u_int ra;
+ u_int sp;
+ u_int code;
+};
+
+#define trapdebug_enter(x, cd) { \
+ intrmask_t s = disableintr(); \
+ trp->status = x->sr; \
+ trp->cause = x->cause; \
+ trp->vadr = x->badvaddr; \
+ trp->pc = x->pc; \
+ trp->sp = x->sp; \
+ trp->ra = x->ra; \
+ trp->code = cd; \
+ if (++trp == &trapdebug[TRAPSIZE]) \
+ trp = trapdebug; \
+ restoreintr(s); \
+}
+
+#define TRAPSIZE 10 /* Trap log buffer length */
+extern struct trapdebug trapdebug[TRAPSIZE], *trp;
+
+void trapDump(char *msg);
+
+#else
+
+#define trapdebug_enter(x, cd)
+
+#endif
+
+#ifndef LOCORE /* XXX */
+int check_address(void *);
+void platform_trap_enter(void);
+void platform_trap_exit(void);
+#endif
+
+#endif /* !_MACHINE_TRAP_H_ */
diff --git a/sys/mips/include/ucontext.h b/sys/mips/include/ucontext.h
new file mode 100644
index 0000000..d9dfe4e
--- /dev/null
+++ b/sys/mips/include/ucontext.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ucontext.h 8.1 (Berkeley) 6/10/93
+ * JNPR: ucontext.h,v 1.2 2007/08/09 11:23:32 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_UCONTEXT_H_
+#define _MACHINE_UCONTEXT_H_
+
+#ifndef _LOCORE
+
+typedef struct __mcontext {
+ /*
+ * These fields must match the corresponding fields in struct
+ * sigcontext which follow 'sc_mask'. That way we can support
+ * struct sigcontext and ucontext_t at the same time.
+ */
+ int mc_onstack; /* sigstack state to restore */
+ register_t mc_pc; /* pc at time of signal */
+ register_t mc_regs[32]; /* processor regs 0 to 31 */
+ register_t sr; /* status register */
+ register_t mullo, mulhi; /* mullo and mulhi registers... */
+ int mc_fpused; /* fp has been used */
+ f_register_t mc_fpregs[33]; /* fp regs 0 to 31 and csr */
+ register_t mc_fpc_eir; /* fp exception instruction reg */
+ int __spare__[8]; /* XXX reserved */
+} mcontext_t;
+#endif
+
+#define SZREG 4
+
+/* offsets into mcontext_t */
+#define UCTX_REG(x) (8 + (x)*SZREG)
+
+#define UCR_ZERO UCTX_REG(0)
+#define UCR_AT UCTX_REG(1)
+#define UCR_V0 UCTX_REG(2)
+#define UCR_V1 UCTX_REG(3)
+#define UCR_A0 UCTX_REG(4)
+#define UCR_A1 UCTX_REG(5)
+#define UCR_A2 UCTX_REG(6)
+#define UCR_A3 UCTX_REG(7)
+#define UCR_T0 UCTX_REG(8)
+#define UCR_T1 UCTX_REG(9)
+#define UCR_T2 UCTX_REG(10)
+#define UCR_T3 UCTX_REG(11)
+#define UCR_T4 UCTX_REG(12)
+#define UCR_T5 UCTX_REG(13)
+#define UCR_T6 UCTX_REG(14)
+#define UCR_T7 UCTX_REG(15)
+#define UCR_S0 UCTX_REG(16)
+#define UCR_S1 UCTX_REG(17)
+#define UCR_S2 UCTX_REG(18)
+#define UCR_S3 UCTX_REG(19)
+#define UCR_S4 UCTX_REG(20)
+#define UCR_S5 UCTX_REG(21)
+#define UCR_S6 UCTX_REG(22)
+#define UCR_S7 UCTX_REG(23)
+#define UCR_T8 UCTX_REG(24)
+#define UCR_T9 UCTX_REG(25)
+#define UCR_K0 UCTX_REG(26)
+#define UCR_K1 UCTX_REG(27)
+#define UCR_GP UCTX_REG(28)
+#define UCR_SP UCTX_REG(29)
+#define UCR_S8 UCTX_REG(30)
+#define UCR_RA UCTX_REG(31)
+#define UCR_SR UCTX_REG(32)
+#define UCR_MDLO UCTX_REG(33)
+#define UCR_MDHI UCTX_REG(34)
+
+#endif /* !_MACHINE_UCONTEXT_H_ */
diff --git a/sys/mips/include/varargs.h b/sys/mips/include/varargs.h
new file mode 100644
index 0000000..c916f33
--- /dev/null
+++ b/sys/mips/include/varargs.h
@@ -0,0 +1,59 @@
+/* $NetBSD: varargs.h,v 1.16 1999/01/22 14:19:54 mycroft Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)varargs.h 8.2 (Berkeley) 3/22/94
+ * JNPR: varargs.h,v 1.1 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+
+#ifndef _MIPS_VARARGS_H_
+#define _MIPS_VARARGS_H_
+
+#include <machine/stdarg.h>
+
+#if __GNUC__ == 1
+#define __va_ellipsis
+#else
+#define __va_ellipsis ...
+#endif
+
+#define va_alist __builtin_va_alist
+#define va_dcl long __builtin_va_alist; __va_ellipsis
+
+#undef va_start
+#define va_start(ap) \
+ ((ap) = (va_list)&__builtin_va_alist)
+
+#endif /* !_MIPS_VARARGS_H_ */
diff --git a/sys/mips/include/vmparam.h b/sys/mips/include/vmparam.h
new file mode 100644
index 0000000..a524293
--- /dev/null
+++ b/sys/mips/include/vmparam.h
@@ -0,0 +1,201 @@
+/* $OpenBSD: vmparam.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */
+/* $NetBSD: vmparam.h,v 1.5 1994/10/26 21:10:10 cgd Exp $ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: vmparam.h 1.16 91/01/18
+ * @(#)vmparam.h 8.2 (Berkeley) 4/22/94
+ * JNPR: vmparam.h,v 1.3.2.1 2007/09/10 06:01:28 girish
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_VMPARAM_H_
+#define _MACHINE_VMPARAM_H_
+
+/*
+ * Machine dependent constants mips processors.
+ */
+/*
+ * USRTEXT is the start of the user text/data space, while USRSTACK
+ * is the top (end) of the user stack.
+ */
+#define USRTEXT (1*PAGE_SIZE)
+/*
+ * USRSTACK needs to start a little below 0x8000000 because the R8000
+ * and some QED CPUs perform some virtual address checks before the
+ * offset is calculated.
+ */
+#define USRSTACK 0x7ffff000 /* Start of user stack */
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#ifndef MAXTSIZ
+#define MAXTSIZ (128UL*1024*1024) /* max text size */
+#endif
+#ifndef DFLDSIZ
+#define DFLDSIZ (128UL*1024*1024) /* initial data size limit */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ (1*1024UL*1024*1024) /* max data size */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ (8UL*1024*1024) /* initial stack size limit */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ (64UL*1024*1024) /* max stack size */
+#endif
+#ifndef SGROWSIZ
+#define SGROWSIZ (128UL*1024) /* amount to grow stack */
+#endif
+
+/*
+ * The time for a process to be blocked before being very swappable.
+ * This is a number of seconds which the system takes as being a non-trivial
+ * amount of real time. You probably shouldn't change this;
+ * it is used in subtle ways (fractions and multiples of it are, that is, like
+ * half of a ``long time'', almost a long time, etc.)
+ * It is related to human patience and other factors which don't really
+ * change over time.
+ */
+#define MAXSLP 20
+
+/*
+ * Mach derived constants
+ */
+
+/* user/kernel map constants */
+#define VM_MIN_ADDRESS ((vm_offset_t)0x00000000)
+#define VM_MAXUSER_ADDRESS ((vm_offset_t)0x80000000)
+#define VM_MAX_MMAP_ADDR VM_MAXUSER_ADDRESS
+#define VM_MAX_ADDRESS ((vm_offset_t)0x80000000)
+
+#ifndef VM_KERNEL_ALLOC_OFFSET
+#define VM_KERNEL_ALLOC_OFFSET ((vm_offset_t)0x00000000)
+#endif
+
+#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)0xC0000000)
+#define VM_KERNEL_WIRED_ADDR_END (VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET)
+#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)0xFFFFC000)
+
+/*
+ * Disable superpage reservations. (not sure if this is right
+ * I copied it from ARM)
+ */
+#ifndef VM_NRESERVLEVEL
+#define VM_NRESERVLEVEL 0
+#endif
+
+
+/* virtual sizes (bytes) for various kernel submaps */
+#ifndef VM_KMEM_SIZE
+#define VM_KMEM_SIZE (12 * 1024 * 1024)
+#endif
+
+/*
+ * How many physical pages per KVA page allocated.
+ * min(max(VM_KMEM_SIZE, Physical memory/VM_KMEM_SIZE_SCALE), VM_KMEM_SIZE_MAX)
+ * is the total KVA space allocated for kmem_map.
+ */
+#ifndef VM_KMEM_SIZE_SCALE
+#define VM_KMEM_SIZE_SCALE (3)
+#endif
+
+/*
+ * Ceiling on amount of kmem_map kva space.
+ */
+#ifndef VM_KMEM_SIZE_MAX
+#define VM_KMEM_SIZE_MAX (200 * 1024 * 1024)
+#endif
+
+/* initial pagein size of beginning of executable file */
+#ifndef VM_INITIAL_PAGEIN
+#define VM_INITIAL_PAGEIN 16
+#endif
+
+/*
+ * max number of non-contig chunks of physical RAM you can have
+ */
+#define VM_PHYSSEG_MAX 32
+
+/*
+ * The physical address space is densely populated.
+ */
+#define VM_PHYSSEG_DENSE
+
+/*
+ * Create three free page pools: VM_FREEPOOL_DEFAULT is the default pool
+ * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
+ * the pool from which physical pages for small UMA objects are
+ * allocated.
+ */
+#define VM_NFREEPOOL 3
+#define VM_FREEPOOL_CACHE 2
+#define VM_FREEPOOL_DEFAULT 0
+#define VM_FREEPOOL_DIRECT 1
+
+/*
+ * we support 1 free list:
+ *
+ * - DEFAULT for all systems
+ */
+
+#define VM_NFREELIST 1
+#define VM_FREELIST_DEFAULT 0
+
+/*
+ * The largest allocation size is 1MB.
+ */
+#define VM_NFREEORDER 9
+
+/*
+ * XXXMIPS: This values need to be changed!!!
+ */
+#if 0
+#define VM_MIN_ADDRESS ((vm_offset_t)0x0000000000010000)
+#define VM_MAXUSER_ADDRESS ((vm_offset_t)MIPS_KSEG0_START-1)
+#define VM_MAX_ADDRESS ((vm_offset_t)0x0000000100000000)
+#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)MIPS_KSEG3_START)
+#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)MIPS_KSEG3_END)
+#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
+
+/* virtual sizes (bytes) for various kernel submaps */
+#define VM_KMEM_SIZE (16*1024*1024) /* XXX ??? */
+#endif
+
+#define NBSEG 0x400000 /* bytes/segment */
+#define SEGOFSET (NBSEG-1) /* byte offset into segment */
+#define SEGSHIFT 22 /* LOG2(NBSEG) */
+
+#endif /* !_MACHINE_VMPARAM_H_ */
diff --git a/sys/mips/mips/autoconf.c b/sys/mips/mips/autoconf.c
new file mode 100644
index 0000000..99fd541
--- /dev/null
+++ b/sys/mips/mips/autoconf.c
@@ -0,0 +1,112 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)autoconf.c 7.1 (Berkeley) 5/9/91
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Setup the system to run on the current machine.
+ *
+ * Configure() is called at boot time and initializes the vba
+ * device tables and the memory controller monitoring. Available
+ * devices are determined (from possibilities mentioned in ioconf.c),
+ * and the drivers are initialized.
+ */
+#include "opt_bootp.h"
+#include "opt_bus.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/reboot.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/cons.h>
+
+#include <sys/socket.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+#include <net/ethernet.h>
+#include <netinet/in.h>
+
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+
+static void configure_first(void *);
+static void configure(void *);
+static void configure_final(void *);
+
+SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL);
+/* SI_ORDER_SECOND is hookable */
+SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL);
+/* SI_ORDER_MIDDLE is hookable */
+SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL);
+
+/*
+ * Determine i/o configuration for a machine.
+ */
+static void
+configure_first(dummy)
+ void *dummy;
+{
+
+ /* nexus0 is the top of the mips device tree */
+ device_add_child(root_bus, "nexus", 0);
+}
+
+static void
+configure(dummy)
+ void *dummy;
+{
+
+ /* initialize new bus architecture */
+ root_bus_configure();
+}
+
+static void
+configure_final(dummy)
+ void *dummy;
+{
+
+ cninit_finish();
+
+ if (bootverbose)
+ printf("Device configuration finished.\n");
+
+ cold = 0;
+}
diff --git a/sys/mips/mips/busdma_machdep.c b/sys/mips/mips/busdma_machdep.c
new file mode 100644
index 0000000..b51330f
--- /dev/null
+++ b/sys/mips/mips/busdma_machdep.c
@@ -0,0 +1,841 @@
+/*-
+ * Copyright (c) 2006 Fill this file and put your name here
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#define NO_DMA
+
+/*-
+ * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $NetBSD: bus_dma.c,v 1.17 2006/03/01 12:38:11 yamt Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/uio.h>
+#include <sys/ktr.h>
+#include <sys/kernel.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/cache.h>
+#include <machine/cpufunc.h>
+
+struct bus_dma_tag {
+ bus_dma_tag_t parent;
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_dma_filter_t *filter;
+ void *filterarg;
+ bus_size_t maxsize;
+ u_int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ int ref_count;
+ int map_count;
+ bus_dma_lock_t *lockfunc;
+ void *lockfuncarg;
+ /* XXX: machine-dependent fields */
+ vm_offset_t _physbase;
+ vm_offset_t _wbase;
+ vm_offset_t _wsize;
+};
+
+#define DMAMAP_LINEAR 0x1
+#define DMAMAP_MBUF 0x2
+#define DMAMAP_UIO 0x4
+#define DMAMAP_ALLOCATED 0x10
+#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
+#define DMAMAP_COHERENT 0x8
+struct bus_dmamap {
+ bus_dma_tag_t dmat;
+ int flags;
+ void *buffer;
+ void *origbuffer;
+ void *allocbuffer;
+ TAILQ_ENTRY(bus_dmamap) freelist;
+ int len;
+};
+
+static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
+ TAILQ_HEAD_INITIALIZER(dmamap_freelist);
+
+#define BUSDMA_STATIC_MAPS 500
+static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
+
+static struct mtx busdma_mtx;
+
+MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
+
+static void
+mips_dmamap_freelist_init(void *dummy)
+{
+ int i;
+
+ for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
+ TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
+}
+
+SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL);
+
+/*
+ * Check to see if the specified page is in an allowed DMA range.
+ */
+
+static __inline int
+bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
+ bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
+ int flags, vm_offset_t *lastaddrp, int *segp);
+
+/*
+ * Convenience function for manipulating driver locks from busdma (during
+ * busdma_swi, for example). Drivers that don't provide their own locks
+ * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
+ * non-mutex locking scheme don't have to use this at all.
+ */
+void
+busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
+{
+ struct mtx *dmtx;
+
+ dmtx = (struct mtx *)arg;
+ switch (op) {
+ case BUS_DMA_LOCK:
+ mtx_lock(dmtx);
+ break;
+ case BUS_DMA_UNLOCK:
+ mtx_unlock(dmtx);
+ break;
+ default:
+ panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
+ }
+}
+
+/*
+ * dflt_lock should never get called. It gets put into the dma tag when
+ * lockfunc == NULL, which is only valid if the maps that are associated
+ * with the tag are meant to never be defered.
+ * XXX Should have a way to identify which driver is responsible here.
+ */
+#ifndef NO_DMA
+static void
+dflt_lock(void *arg, bus_dma_lock_op_t op)
+{
+#ifdef INVARIANTS
+ panic("driver error: busdma dflt_lock called");
+#else
+ printf("DRIVER_ERROR: busdma dflt_lock called\n");
+#endif
+}
+#endif
+
+static __inline bus_dmamap_t
+_busdma_alloc_dmamap(void)
+{
+ bus_dmamap_t map;
+
+ mtx_lock(&busdma_mtx);
+ map = TAILQ_FIRST(&dmamap_freelist);
+ if (map)
+ TAILQ_REMOVE(&dmamap_freelist, map, freelist);
+ mtx_unlock(&busdma_mtx);
+ if (!map) {
+ map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (map)
+ map->flags = DMAMAP_ALLOCATED;
+ } else
+ map->flags = 0;
+ return (map);
+}
+
+static __inline void
+_busdma_free_dmamap(bus_dmamap_t map)
+{
+ if (map->flags & DMAMAP_ALLOCATED)
+ free(map, M_DEVBUF);
+ else {
+ mtx_lock(&busdma_mtx);
+ TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
+ mtx_unlock(&busdma_mtx);
+ }
+}
+
+int
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_size_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filter,
+ void *filterarg, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+#ifndef NO_DMA
+ bus_dma_tag_t newtag;
+ int error = 0;
+
+ /* Basic sanity checking */
+ if (boundary != 0 && boundary < maxsegsz)
+ maxsegsz = boundary;
+
+ /* Return a NULL tag on failure */
+ *dmat = NULL;
+
+ newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
+ M_ZERO | M_NOWAIT);
+ if (newtag == NULL) {
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+ __func__, newtag, 0, error);
+ return (ENOMEM);
+ }
+
+ newtag->parent = parent;
+ newtag->alignment = alignment;
+ newtag->boundary = boundary;
+ newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
+ newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
+ (PAGE_SIZE - 1);
+ newtag->filter = filter;
+ newtag->filterarg = filterarg;
+ newtag->maxsize = maxsize;
+ newtag->nsegments = nsegments;
+ newtag->maxsegsz = maxsegsz;
+ newtag->flags = flags;
+ newtag->ref_count = 1; /* Count ourself */
+ newtag->map_count = 0;
+ newtag->_wbase = 0;
+ newtag->_physbase = 0;
+ /* XXXMIPS: Should we limit window size to amount of physical memory */
+ newtag->_wsize = MIPS_KSEG1_START - MIPS_KSEG0_START;
+ if (lockfunc != NULL) {
+ newtag->lockfunc = lockfunc;
+ newtag->lockfuncarg = lockfuncarg;
+ } else {
+ newtag->lockfunc = dflt_lock;
+ newtag->lockfuncarg = NULL;
+ }
+
+ /* Take into account any restrictions imposed by our parent tag */
+ if (parent != NULL) {
+ newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
+ newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
+ if (newtag->boundary == 0)
+ newtag->boundary = parent->boundary;
+ else if (parent->boundary != 0)
+ newtag->boundary = MIN(parent->boundary,
+ newtag->boundary);
+ if (newtag->filter == NULL) {
+ /*
+ * Short circuit looking at our parent directly
+ * since we have encapsulated all of its information
+ */
+ newtag->filter = parent->filter;
+ newtag->filterarg = parent->filterarg;
+ newtag->parent = parent->parent;
+ }
+ if (newtag->parent != NULL)
+ atomic_add_int(&parent->ref_count, 1);
+ }
+
+ if (error != 0) {
+ free(newtag, M_DEVBUF);
+ } else {
+ *dmat = newtag;
+ }
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+ __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
+ return (error);
+#else
+ return ENOSYS;
+#endif
+
+}
+
+int
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+#ifdef KTR
+ bus_dma_tag_t dmat_copy = dmat;
+#endif
+
+ if (dmat != NULL) {
+
+ if (dmat->map_count != 0)
+ return (EBUSY);
+
+ while (dmat != NULL) {
+ bus_dma_tag_t parent;
+
+ parent = dmat->parent;
+ atomic_subtract_int(&dmat->ref_count, 1);
+ if (dmat->ref_count == 0) {
+ free(dmat, M_DEVBUF);
+ /*
+ * Last reference count, so
+ * release our reference
+ * count on our parent.
+ */
+ dmat = parent;
+ } else
+ dmat = NULL;
+ }
+ }
+ CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
+
+ return (0);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ bus_dmamap_t newmap;
+#ifdef KTR
+ int error = 0;
+#endif
+
+ newmap = _busdma_alloc_dmamap();
+ if (newmap == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
+ return (ENOMEM);
+ }
+ *mapp = newmap;
+ newmap->dmat = dmat;
+ dmat->map_count++;
+
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, error);
+
+ return (0);
+
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ _busdma_free_dmamap(map);
+ dmat->map_count--;
+ CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
+ return (0);
+}
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ bus_dmamap_t newmap = NULL;
+
+ int mflags;
+
+ if (flags & BUS_DMA_NOWAIT)
+ mflags = M_NOWAIT;
+ else
+ mflags = M_WAITOK;
+ if (flags & BUS_DMA_ZERO)
+ mflags |= M_ZERO;
+
+ newmap = _busdma_alloc_dmamap();
+ if (newmap == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, ENOMEM);
+ return (ENOMEM);
+ }
+ dmat->map_count++;
+ *mapp = newmap;
+ newmap->dmat = dmat;
+
+ if (dmat->maxsize <= PAGE_SIZE) {
+ *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
+ } else {
+ /*
+ * XXX Use Contigmalloc until it is merged into this facility
+ * and handles multi-seg allocations. Nobody is doing
+ * multi-seg allocations yet though.
+ */
+ vm_paddr_t maxphys;
+ if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) {
+ /* Note in the else case I just put in what was already
+ * being passed in dmat->lowaddr. I am not sure
+ * how this would have worked. Since lowaddr is in the
+ * max address postion. I would have thought that the
+ * caller would have wanted dmat->highaddr. That is
+ * presuming they are asking for physical addresses
+ * which is what contigmalloc takes. - RRS
+ */
+ maxphys = MIPS_KSEG0_LARGEST_PHYS - 1;
+ } else {
+ maxphys = dmat->lowaddr;
+ }
+ *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
+ 0ul, maxphys, dmat->alignment? dmat->alignment : 1ul,
+ dmat->boundary);
+ }
+ if (*vaddr == NULL) {
+ if (newmap != NULL) {
+ _busdma_free_dmamap(newmap);
+ dmat->map_count--;
+ }
+ *mapp = NULL;
+ return (ENOMEM);
+ }
+ if (flags & BUS_DMA_COHERENT) {
+ void *tmpaddr = (void *)*vaddr;
+
+ if (tmpaddr) {
+ tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr));
+ newmap->origbuffer = *vaddr;
+ newmap->allocbuffer = tmpaddr;
+ mips_dcache_wbinv_range((vm_offset_t)*vaddr,
+ dmat->maxsize);
+ *vaddr = tmpaddr;
+ } else
+ newmap->origbuffer = newmap->allocbuffer = NULL;
+ } else
+ newmap->origbuffer = newmap->allocbuffer = NULL;
+ return (0);
+
+}
+
+/*
+ * Free a piece of memory and it's allocated dmamap, that was allocated
+ * via bus_dmamem_alloc. Make the same choice for free/contigfree.
+ */
+void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ if (map->allocbuffer) {
+ KASSERT(map->allocbuffer == vaddr,
+ ("Trying to freeing the wrong DMA buffer"));
+ vaddr = map->origbuffer;
+ }
+ if (dmat->maxsize <= PAGE_SIZE)
+ free(vaddr, M_DEVBUF);
+ else {
+ contigfree(vaddr, dmat->maxsize, M_DEVBUF);
+ }
+ dmat->map_count--;
+ _busdma_free_dmamap(map);
+ CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
+
+}
+
+/*
+ * Utility function to load a linear buffer. lastaddrp holds state
+ * between invocations (for multiple-buffer loads). segp contains
+ * the starting segment on entrance, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+static __inline int
+bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
+ bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
+ int flags, vm_offset_t *lastaddrp, int *segp)
+{
+ bus_size_t sgsize;
+ bus_size_t bmask;
+ vm_offset_t curaddr, lastaddr;
+ vm_offset_t vaddr = (vm_offset_t)buf;
+ int seg;
+ int error = 0;
+
+ lastaddr = *lastaddrp;
+ bmask = ~(dmat->boundary - 1);
+
+ for (seg = *segp; buflen > 0 ; ) {
+ /*
+ * Get the physical address for this segment.
+ */
+ KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
+ curaddr = pmap_kextract(vaddr);
+
+ /*
+ * If we're beyond the current DMA window, indicate
+ * that and try to fall back onto something else.
+ */
+ if (curaddr < dmat->_physbase ||
+ curaddr >= (dmat->_physbase + dmat->_wsize))
+ return (EINVAL);
+
+ /*
+ * In a valid DMA range. Translate the physical
+ * memory address to an address in the DMA window.
+ */
+ curaddr = (curaddr - dmat->_physbase) + dmat->_wbase;
+
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * the previous segment if possible.
+ */
+ if (seg >= 0 && curaddr == lastaddr &&
+ (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+ (dmat->boundary == 0 ||
+ (segs[seg].ds_addr & bmask) ==
+ (curaddr & bmask))) {
+ segs[seg].ds_len += sgsize;
+ goto segdone;
+ } else {
+ if (++seg >= dmat->nsegments)
+ break;
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ }
+ if (error)
+ break;
+segdone:
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0)
+ error = EFBIG;
+
+ return error;
+}
+
+/*
+ * Map the buffer buf into bus space using the dmamap map.
+ */
+int
+bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags)
+{
+ vm_offset_t lastaddr = 0;
+ int error, nsegs = -1;
+#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+#else
+ bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
+#endif
+
+ KASSERT(dmat != NULL, ("dmatag is NULL"));
+ KASSERT(map != NULL, ("dmamap is NULL"));
+ map->flags &= ~DMAMAP_TYPE_MASK;
+ map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
+ map->buffer = buf;
+ map->len = buflen;
+ error = bus_dmamap_load_buffer(dmat,
+ dm_segments, map, buf, buflen, kernel_pmap,
+ flags, &lastaddr, &nsegs);
+
+ if (error)
+ (*callback)(callback_arg, NULL, 0, error);
+ else
+ (*callback)(callback_arg, dm_segments, nsegs + 1, error);
+
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, nsegs + 1, error);
+
+ return (0);
+
+}
+
+/*
+ * Like bus_dmamap_load(), but for mbufs.
+ */
+int
+bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
+{
+#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+#else
+ bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
+#endif
+ int nsegs = -1, error = 0;
+
+ M_ASSERTPKTHDR(m0);
+
+ map->flags &= ~DMAMAP_TYPE_MASK;
+ map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
+ map->buffer = m0;
+ map->len = 0;
+
+ if (m0->m_pkthdr.len <= dmat->maxsize) {
+ vm_offset_t lastaddr = 0;
+ struct mbuf *m;
+
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ if (m->m_len > 0) {
+ error = bus_dmamap_load_buffer(dmat,
+ dm_segments, map, m->m_data, m->m_len,
+ pmap_kernel(), flags, &lastaddr, &nsegs);
+ map->len += m->m_len;
+ }
+ }
+ } else {
+ error = EINVAL;
+ }
+
+ if (error) {
+ /*
+ * force "no valid mappings" on error in callback.
+ */
+ (*callback)(callback_arg, dm_segments, 0, 0, error);
+ } else {
+ (*callback)(callback_arg, dm_segments, nsegs + 1,
+ m0->m_pkthdr.len, error);
+ }
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, nsegs + 1);
+
+ return (error);
+}
+
+int
+bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
+ int flags)
+{
+ int error = 0;
+
+ M_ASSERTPKTHDR(m0);
+
+ flags |= BUS_DMA_NOWAIT;
+ *nsegs = -1;
+ map->flags &= ~DMAMAP_TYPE_MASK;
+ map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
+ map->buffer = m0;
+ map->len = 0;
+
+ if (m0->m_pkthdr.len <= dmat->maxsize) {
+ vm_offset_t lastaddr = 0;
+ struct mbuf *m;
+
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ if (m->m_len > 0) {
+ error = bus_dmamap_load_buffer(dmat, segs, map,
+ m->m_data, m->m_len,
+ pmap_kernel(), flags, &lastaddr, nsegs);
+ map->len += m->m_len;
+ }
+ }
+ } else {
+ error = EINVAL;
+ }
+
+ ++*nsegs;
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, *nsegs);
+
+ return (error);
+
+}
+
+/*
+ * Like bus_dmamap_load(), but for uios.
+ */
+int
+bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
+{
+
+ panic("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__);
+ return (0);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+void
+_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+
+ return;
+}
+
+static __inline void
+bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
+{
+
+ switch (op) {
+ case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
+ mips_dcache_wbinv_range((vm_offset_t)buf, len);
+ break;
+
+ case BUS_DMASYNC_PREREAD:
+#if 1
+ mips_dcache_wbinv_range((vm_offset_t)buf, len);
+#else
+ mips_dcache_inv_range((vm_offset_t)buf, len);
+#endif
+ break;
+
+ case BUS_DMASYNC_PREWRITE:
+ mips_dcache_wb_range((vm_offset_t)buf, len);
+ break;
+ }
+}
+
+void
+_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+ struct mbuf *m;
+ struct uio *uio;
+ int resid;
+ struct iovec *iov;
+
+
+ /*
+ * Mixing PRE and POST operations is not allowed.
+ */
+ if ((op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
+ (op & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
+ panic("_bus_dmamap_sync: mix PRE and POST");
+
+ /*
+ * Since we're dealing with a virtually-indexed, write-back
+ * cache, we need to do the following things:
+ *
+ * PREREAD -- Invalidate D-cache. Note we might have
+ * to also write-back here if we have to use an Index
+ * op, or if the buffer start/end is not cache-line aligned.
+ *
+ * PREWRITE -- Write-back the D-cache. If we have to use
+ * an Index op, we also have to invalidate. Note that if
+ * we are doing PREREAD|PREWRITE, we can collapse everything
+ * into a single op.
+ *
+ * POSTREAD -- Nothing.
+ *
+ * POSTWRITE -- Nothing.
+ */
+
+ /*
+ * Flush the write buffer.
+ * XXX Is this always necessary?
+ */
+ mips_wbflush();
+
+ op &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ if (op == 0)
+ return;
+
+ CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
+ switch(map->flags & DMAMAP_TYPE_MASK) {
+ case DMAMAP_LINEAR:
+ bus_dmamap_sync_buf(map->buffer, map->len, op);
+ break;
+ case DMAMAP_MBUF:
+ m = map->buffer;
+ while (m) {
+ if (m->m_len > 0)
+ bus_dmamap_sync_buf(m->m_data, m->m_len, op);
+ m = m->m_next;
+ }
+ break;
+ case DMAMAP_UIO:
+ uio = map->buffer;
+ iov = uio->uio_iov;
+ resid = uio->uio_resid;
+ for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
+ bus_size_t minlen = resid < iov[i].iov_len ? resid :
+ iov[i].iov_len;
+ if (minlen > 0) {
+ bus_dmamap_sync_buf(iov[i].iov_base, minlen, op);
+ resid -= minlen;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
diff --git a/sys/mips/mips/cache.c b/sys/mips/mips/cache.c
new file mode 100644
index 0000000..57be726
--- /dev/null
+++ b/sys/mips/mips/cache.c
@@ -0,0 +1,220 @@
+/*-
+ * Copyright (c) 2006 Fill this file and put your name here
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+
+#include <machine/cpuinfo.h>
+#include <machine/cache.h>
+
+struct mips_cache_ops mips_cache_ops;
+
+void
+mips_config_cache(struct mips_cpuinfo * cpuinfo)
+{
+ switch (cpuinfo->l1.ic_linesize) {
+ case 16:
+ mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_16;
+ mips_cache_ops.mco_icache_sync_range =
+ mipsNN_icache_sync_range_16;
+ mips_cache_ops.mco_icache_sync_range_index =
+ mipsNN_icache_sync_range_index_16;
+ break;
+ case 32:
+ mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_32;
+ mips_cache_ops.mco_icache_sync_range =
+ mipsNN_icache_sync_range_32;
+ mips_cache_ops.mco_icache_sync_range_index =
+ mipsNN_icache_sync_range_index_32;
+ break;
+#ifdef TARGET_OCTEON
+ case 128:
+ mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_128;
+ mips_cache_ops.mco_icache_sync_range =
+ mipsNN_icache_sync_range_128;
+ mips_cache_ops.mco_icache_sync_range_index =
+ mipsNN_icache_sync_range_index_128;
+ break;
+#endif
+
+#ifdef MIPS_DISABLE_L1_CACHE
+ case 0:
+ mips_cache_ops.mco_icache_sync_all = cache_noop;
+ mips_cache_ops.mco_icache_sync_range =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_icache_sync_range_index =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ break;
+#endif
+ default:
+ panic("no Icache ops for %d byte lines",
+ cpuinfo->l1.ic_linesize);
+ }
+
+ switch (cpuinfo->l1.dc_linesize) {
+ case 16:
+ mips_cache_ops.mco_pdcache_wbinv_all =
+ mips_cache_ops.mco_intern_pdcache_wbinv_all =
+ mipsNN_pdcache_wbinv_all_16;
+ mips_cache_ops.mco_pdcache_wbinv_range =
+ mipsNN_pdcache_wbinv_range_16;
+ mips_cache_ops.mco_pdcache_wbinv_range_index =
+ mips_cache_ops.mco_intern_pdcache_wbinv_range_index =
+ mipsNN_pdcache_wbinv_range_index_16;
+ mips_cache_ops.mco_pdcache_inv_range =
+ mipsNN_pdcache_inv_range_16;
+ mips_cache_ops.mco_pdcache_wb_range =
+ mips_cache_ops.mco_intern_pdcache_wb_range =
+ mipsNN_pdcache_wb_range_16;
+ break;
+ case 32:
+ mips_cache_ops.mco_pdcache_wbinv_all =
+ mips_cache_ops.mco_intern_pdcache_wbinv_all =
+ mipsNN_pdcache_wbinv_all_32;
+ mips_cache_ops.mco_pdcache_wbinv_range =
+ mipsNN_pdcache_wbinv_range_32;
+ mips_cache_ops.mco_pdcache_wbinv_range_index =
+ mips_cache_ops.mco_intern_pdcache_wbinv_range_index =
+ mipsNN_pdcache_wbinv_range_index_32;
+ mips_cache_ops.mco_pdcache_inv_range =
+ mipsNN_pdcache_inv_range_32;
+ mips_cache_ops.mco_pdcache_wb_range =
+ mips_cache_ops.mco_intern_pdcache_wb_range =
+ mipsNN_pdcache_wb_range_32;
+ break;
+#ifdef TARGET_OCTEON
+ case 128:
+ mips_cache_ops.mco_pdcache_wbinv_all =
+ mips_cache_ops.mco_intern_pdcache_wbinv_all =
+ mipsNN_pdcache_wbinv_all_128;
+ mips_cache_ops.mco_pdcache_wbinv_range =
+ mipsNN_pdcache_wbinv_range_128;
+ mips_cache_ops.mco_pdcache_wbinv_range_index =
+ mips_cache_ops.mco_intern_pdcache_wbinv_range_index =
+ mipsNN_pdcache_wbinv_range_index_128;
+ mips_cache_ops.mco_pdcache_inv_range =
+ mipsNN_pdcache_inv_range_128;
+ mips_cache_ops.mco_pdcache_wb_range =
+ mips_cache_ops.mco_intern_pdcache_wb_range =
+ mipsNN_pdcache_wb_range_128;
+ break;
+#endif
+#ifdef MIPS_DISABLE_L1_CACHE
+ case 0:
+ mips_cache_ops.mco_pdcache_wbinv_all = cache_noop;
+ mips_cache_ops.mco_intern_pdcache_wbinv_all = cache_noop;
+ mips_cache_ops.mco_pdcache_wbinv_range =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_pdcache_wbinv_range_index =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_intern_pdcache_wbinv_range_index =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_pdcache_inv_range =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_pdcache_wb_range =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_intern_pdcache_wb_range =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ break;
+#endif
+ default:
+ panic("no Dcache ops for %d byte lines",
+ cpuinfo->l1.dc_linesize);
+ }
+
+ mipsNN_cache_init(cpuinfo);
+
+#if 0
+ if (mips_cpu_flags &
+ (CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_I_D_CACHE_COHERENT)) {
+#ifdef CACHE_DEBUG
+ printf(" Dcache is coherent\n");
+#endif
+ mips_cache_ops.mco_pdcache_wbinv_all = cache_noop;
+ mips_cache_ops.mco_pdcache_wbinv_range =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_pdcache_wbinv_range_index =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_pdcache_inv_range =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_pdcache_wb_range =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ }
+ if (mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT) {
+#ifdef CACHE_DEBUG
+ printf(" Icache is coherent against Dcache\n");
+#endif
+ mips_cache_ops.mco_intern_pdcache_wbinv_all =
+ cache_noop;
+ mips_cache_ops.mco_intern_pdcache_wbinv_range_index =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ mips_cache_ops.mco_intern_pdcache_wb_range =
+ (void (*)(vaddr_t, vsize_t))cache_noop;
+ }
+#endif
+
+ /* Check that all cache ops are set up. */
+ if (mips_picache_size || 1) { /* XXX- must have primary Icache */
+ if (!mips_cache_ops.mco_icache_sync_all)
+ panic("no icache_sync_all cache op");
+ if (!mips_cache_ops.mco_icache_sync_range)
+ panic("no icache_sync_range cache op");
+ if (!mips_cache_ops.mco_icache_sync_range_index)
+ panic("no icache_sync_range_index cache op");
+ }
+ if (mips_pdcache_size || 1) { /* XXX- must have primary Icache */
+ if (!mips_cache_ops.mco_pdcache_wbinv_all)
+ panic("no pdcache_wbinv_all");
+ if (!mips_cache_ops.mco_pdcache_wbinv_range)
+ panic("no pdcache_wbinv_range");
+ if (!mips_cache_ops.mco_pdcache_wbinv_range_index)
+ panic("no pdcache_wbinv_range_index");
+ if (!mips_cache_ops.mco_pdcache_inv_range)
+ panic("no pdcache_inv_range");
+ if (!mips_cache_ops.mco_pdcache_wb_range)
+ panic("no pdcache_wb_range");
+ }
+
+ /* XXXMIPS: No secondary cache handlers yet */
+#ifdef notyet
+ if (mips_sdcache_size) {
+ if (!mips_cache_ops.mco_sdcache_wbinv_all)
+ panic("no sdcache_wbinv_all");
+ if (!mips_cache_ops.mco_sdcache_wbinv_range)
+ panic("no sdcache_wbinv_range");
+ if (!mips_cache_ops.mco_sdcache_wbinv_range_index)
+ panic("no sdcache_wbinv_range_index");
+ if (!mips_cache_ops.mco_sdcache_inv_range)
+ panic("no sdcache_inv_range");
+ if (!mips_cache_ops.mco_sdcache_wb_range)
+ panic("no sdcache_wb_range");
+ }
+#endif
+}
diff --git a/sys/mips/mips/cache_mipsNN.c b/sys/mips/mips/cache_mipsNN.c
new file mode 100644
index 0000000..4037885
--- /dev/null
+++ b/sys/mips/mips/cache_mipsNN.c
@@ -0,0 +1,608 @@
+/* $NetBSD: cache_mipsNN.c,v 1.10 2005/12/24 20:07:19 perry Exp $ */
+
+/*
+ * Copyright 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/param.h>
+
+#include <machine/cache.h>
+#include <machine/cache_r4k.h>
+#include <machine/cpuinfo.h>
+
+#define round_line16(x) (((x) + 15) & ~15)
+#define trunc_line16(x) ((x) & ~15)
+
+#define round_line32(x) (((x) + 31) & ~31)
+#define trunc_line32(x) ((x) & ~31)
+
+
+#ifdef SB1250_PASS1
+#define SYNC __asm volatile("sync; sync")
+#else
+#define SYNC __asm volatile("sync")
+#endif
+
+#ifdef TARGET_OCTEON
+#define SYNCI mips_sync_icache();
+#else
+#define SYNCI
+#endif
+
+
+__asm(".set mips32");
+
+static int picache_size;
+static int picache_stride;
+static int picache_loopcount;
+static int picache_way_mask;
+static int pdcache_size;
+static int pdcache_stride;
+static int pdcache_loopcount;
+static int pdcache_way_mask;
+
+void
+mipsNN_cache_init(struct mips_cpuinfo * cpuinfo)
+{
+ int flush_multiple_lines_per_way;
+
+ flush_multiple_lines_per_way = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize * cpuinfo->l1.ic_linesize > PAGE_SIZE;
+ if (cpuinfo->icache_virtual) {
+ /*
+ * With a virtual Icache we don't need to flush
+ * multiples of the page size with index ops; we just
+ * need to flush one pages' worth.
+ */
+ flush_multiple_lines_per_way = 0;
+ }
+
+ if (flush_multiple_lines_per_way) {
+ picache_stride = PAGE_SIZE;
+ picache_loopcount = (cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize / PAGE_SIZE) *
+ cpuinfo->l1.ic_nways;
+ } else {
+ picache_stride = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize;
+ picache_loopcount = cpuinfo->l1.ic_nways;
+ }
+
+ if (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize < PAGE_SIZE) {
+ pdcache_stride = cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize;
+ pdcache_loopcount = cpuinfo->l1.dc_nways;
+ } else {
+ pdcache_stride = PAGE_SIZE;
+ pdcache_loopcount = (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize / PAGE_SIZE) *
+ cpuinfo->l1.dc_nways;
+ }
+ picache_size = cpuinfo->l1.ic_size;
+ picache_way_mask = cpuinfo->l1.ic_nways - 1;
+ pdcache_size = cpuinfo->l1.dc_size;
+ pdcache_way_mask = cpuinfo->l1.dc_nways - 1;
+#define CACHE_DEBUG
+#ifdef CACHE_DEBUG
+ if (cpuinfo->icache_virtual)
+ printf(" icache is virtual\n");
+ printf(" picache_stride = %d\n", picache_stride);
+ printf(" picache_loopcount = %d\n", picache_loopcount);
+ printf(" pdcache_stride = %d\n", pdcache_stride);
+ printf(" pdcache_loopcount = %d\n", pdcache_loopcount);
+#endif
+}
+
+void
+mipsNN_icache_sync_all_16(void)
+{
+ vm_offset_t va, eva;
+
+ va = MIPS_PHYS_TO_KSEG0(0);
+ eva = va + picache_size;
+
+ /*
+ * Since we're hitting the whole thing, we don't have to
+ * worry about the N different "ways".
+ */
+
+ mips_intern_dcache_wbinv_all();
+
+ while (va < eva) {
+ cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+ va += (32 * 16);
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_icache_sync_all_32(void)
+{
+ vm_offset_t va, eva;
+
+ va = MIPS_PHYS_TO_KSEG0(0);
+ eva = va + picache_size;
+
+ /*
+ * Since we're hitting the whole thing, we don't have to
+ * worry about the N different "ways".
+ */
+
+ mips_intern_dcache_wbinv_all();
+
+ while (va < eva) {
+ cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+ va += (32 * 32);
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_icache_sync_range_16(vm_offset_t va, vm_size_t size)
+{
+ vm_offset_t eva;
+
+ eva = round_line16(va + size);
+ va = trunc_line16(va);
+
+ mips_intern_dcache_wb_range(va, (eva - va));
+
+ while ((eva - va) >= (32 * 16)) {
+ cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
+ va += (32 * 16);
+ }
+
+ while (va < eva) {
+ cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
+ va += 16;
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_icache_sync_range_32(vm_offset_t va, vm_size_t size)
+{
+ vm_offset_t eva;
+
+ eva = round_line32(va + size);
+ va = trunc_line32(va);
+
+ mips_intern_dcache_wb_range(va, (eva - va));
+
+ while ((eva - va) >= (32 * 32)) {
+ cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
+ va += (32 * 32);
+ }
+
+ while (va < eva) {
+ cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
+ va += 32;
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_icache_sync_range_index_16(vm_offset_t va, vm_size_t size)
+{
+ unsigned int eva, tmpva;
+ int i, stride, loopcount;
+
+ /*
+ * Since we're doing Index ops, we expect to not be able
+ * to access the address we've been given. So, get the
+ * bits that determine the cache index, and make a KSEG0
+ * address out of them.
+ */
+ va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
+
+ eva = round_line16(va + size);
+ va = trunc_line16(va);
+
+ /*
+ * GCC generates better code in the loops if we reference local
+ * copies of these global variables.
+ */
+ stride = picache_stride;
+ loopcount = picache_loopcount;
+
+ mips_intern_dcache_wbinv_range_index(va, (eva - va));
+
+ while ((eva - va) >= (8 * 16)) {
+ tmpva = va;
+ for (i = 0; i < loopcount; i++, tmpva += stride)
+ cache_r4k_op_8lines_16(tmpva,
+ CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+ va += 8 * 16;
+ }
+
+ while (va < eva) {
+ tmpva = va;
+ for (i = 0; i < loopcount; i++, tmpva += stride)
+ cache_op_r4k_line(tmpva,
+ CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+ va += 16;
+ }
+}
+
+void
+mipsNN_icache_sync_range_index_32(vm_offset_t va, vm_size_t size)
+{
+ unsigned int eva, tmpva;
+ int i, stride, loopcount;
+
+ /*
+ * Since we're doing Index ops, we expect to not be able
+ * to access the address we've been given. So, get the
+ * bits that determine the cache index, and make a KSEG0
+ * address out of them.
+ */
+ va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
+
+ eva = round_line32(va + size);
+ va = trunc_line32(va);
+
+ /*
+ * GCC generates better code in the loops if we reference local
+ * copies of these global variables.
+ */
+ stride = picache_stride;
+ loopcount = picache_loopcount;
+
+ mips_intern_dcache_wbinv_range_index(va, (eva - va));
+
+ while ((eva - va) >= (8 * 32)) {
+ tmpva = va;
+ for (i = 0; i < loopcount; i++, tmpva += stride)
+ cache_r4k_op_8lines_32(tmpva,
+ CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ tmpva = va;
+ for (i = 0; i < loopcount; i++, tmpva += stride)
+ cache_op_r4k_line(tmpva,
+ CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+ va += 32;
+ }
+}
+
+void
+mipsNN_pdcache_wbinv_all_16(void)
+{
+ vm_offset_t va, eva;
+
+ va = MIPS_PHYS_TO_KSEG0(0);
+ eva = va + pdcache_size;
+
+ /*
+ * Since we're hitting the whole thing, we don't have to
+ * worry about the N different "ways".
+ */
+
+ while (va < eva) {
+ cache_r4k_op_32lines_16(va,
+ CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+ va += (32 * 16);
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_pdcache_wbinv_all_32(void)
+{
+ vm_offset_t va, eva;
+
+ va = MIPS_PHYS_TO_KSEG0(0);
+ eva = va + pdcache_size;
+
+ /*
+ * Since we're hitting the whole thing, we don't have to
+ * worry about the N different "ways".
+ */
+
+ while (va < eva) {
+ cache_r4k_op_32lines_32(va,
+ CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+ va += (32 * 32);
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_pdcache_wbinv_range_16(vm_offset_t va, vm_size_t size)
+{
+ vm_offset_t eva;
+
+ eva = round_line16(va + size);
+ va = trunc_line16(va);
+
+ while ((eva - va) >= (32 * 16)) {
+ cache_r4k_op_32lines_16(va,
+ CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
+ va += (32 * 16);
+ }
+
+ while (va < eva) {
+ cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
+ va += 16;
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_pdcache_wbinv_range_32(vm_offset_t va, vm_size_t size)
+{
+ vm_offset_t eva;
+
+ eva = round_line32(va + size);
+ va = trunc_line32(va);
+
+ while ((eva - va) >= (32 * 32)) {
+ cache_r4k_op_32lines_32(va,
+ CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
+ va += (32 * 32);
+ }
+
+ while (va < eva) {
+ cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
+ va += 32;
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_pdcache_wbinv_range_index_16(vm_offset_t va, vm_size_t size)
+{
+ unsigned int eva, tmpva;
+ int i, stride, loopcount;
+
+ /*
+ * Since we're doing Index ops, we expect to not be able
+ * to access the address we've been given. So, get the
+ * bits that determine the cache index, and make a KSEG0
+ * address out of them.
+ */
+ va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
+
+ eva = round_line16(va + size);
+ va = trunc_line16(va);
+
+ /*
+ * GCC generates better code in the loops if we reference local
+ * copies of these global variables.
+ */
+ stride = pdcache_stride;
+ loopcount = pdcache_loopcount;
+
+ while ((eva - va) >= (8 * 16)) {
+ tmpva = va;
+ for (i = 0; i < loopcount; i++, tmpva += stride)
+ cache_r4k_op_8lines_16(tmpva,
+ CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+ va += 8 * 16;
+ }
+
+ while (va < eva) {
+ tmpva = va;
+ for (i = 0; i < loopcount; i++, tmpva += stride)
+ cache_op_r4k_line(tmpva,
+ CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+ va += 16;
+ }
+}
+
+void
+mipsNN_pdcache_wbinv_range_index_32(vm_offset_t va, vm_size_t size)
+{
+ unsigned int eva, tmpva;
+ int i, stride, loopcount;
+
+ /*
+ * Since we're doing Index ops, we expect to not be able
+ * to access the address we've been given. So, get the
+ * bits that determine the cache index, and make a KSEG0
+ * address out of them.
+ */
+ va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
+
+ eva = round_line32(va + size);
+ va = trunc_line32(va);
+
+ /*
+ * GCC generates better code in the loops if we reference local
+ * copies of these global variables.
+ */
+ stride = pdcache_stride;
+ loopcount = pdcache_loopcount;
+
+ while ((eva - va) >= (8 * 32)) {
+ tmpva = va;
+ for (i = 0; i < loopcount; i++, tmpva += stride)
+ cache_r4k_op_8lines_32(tmpva,
+ CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+ va += 8 * 32;
+ }
+
+ while (va < eva) {
+ tmpva = va;
+ for (i = 0; i < loopcount; i++, tmpva += stride)
+ cache_op_r4k_line(tmpva,
+ CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+ va += 32;
+ }
+}
+
+void
+mipsNN_pdcache_inv_range_16(vm_offset_t va, vm_size_t size)
+{
+ vm_offset_t eva;
+
+ eva = round_line16(va + size);
+ va = trunc_line16(va);
+
+ while ((eva - va) >= (32 * 16)) {
+ cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
+ va += (32 * 16);
+ }
+
+ while (va < eva) {
+ cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
+ va += 16;
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_pdcache_inv_range_32(vm_offset_t va, vm_size_t size)
+{
+ vm_offset_t eva;
+
+ eva = round_line32(va + size);
+ va = trunc_line32(va);
+
+ while ((eva - va) >= (32 * 32)) {
+ cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
+ va += (32 * 32);
+ }
+
+ while (va < eva) {
+ cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
+ va += 32;
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_pdcache_wb_range_16(vm_offset_t va, vm_size_t size)
+{
+ vm_offset_t eva;
+
+ eva = round_line16(va + size);
+ va = trunc_line16(va);
+
+ while ((eva - va) >= (32 * 16)) {
+ cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
+ va += (32 * 16);
+ }
+
+ while (va < eva) {
+ cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
+ va += 16;
+ }
+
+ SYNC;
+}
+
+void
+mipsNN_pdcache_wb_range_32(vm_offset_t va, vm_size_t size)
+{
+ vm_offset_t eva;
+
+ eva = round_line32(va + size);
+ va = trunc_line32(va);
+
+ while ((eva - va) >= (32 * 32)) {
+ cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
+ va += (32 * 32);
+ }
+
+ while (va < eva) {
+ cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
+ va += 32;
+ }
+
+ SYNC;
+}
+
+
+#ifdef TARGET_OCTEON
+
+void
+mipsNN_icache_sync_all_128(void)
+{
+ SYNCI
+}
+
+void
+mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size)
+{
+ SYNC;
+}
+
+void
+mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size)
+{
+}
+
+
+void
+mipsNN_pdcache_wbinv_all_128(void)
+{
+}
+
+
+void
+mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size)
+{
+ SYNC;
+}
+
+void
+mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size)
+{
+}
+
+void
+mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size)
+{
+}
+
+void
+mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size)
+{
+ SYNC;
+}
+
+#endif
diff --git a/sys/mips/mips/copystr.S b/sys/mips/mips/copystr.S
new file mode 100644
index 0000000..473c980
--- /dev/null
+++ b/sys/mips/mips/copystr.S
@@ -0,0 +1,148 @@
+/*-
+ * Copyright (c) [year] [your name]
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include "assym.s"
+#include <machine/asm.h>
+#include <machine/asmacros.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/errno.h>
+
+/*
+ * copystr(9)
+ * <v0>int copystr(<a0>const void *src, <a1>void *dst, <a2>size_t len,
+ * <a3>size_t *done)
+ */
+ENTRY(copystr)
+ .set noreorder
+ .set noat
+ move v0, zero
+ beqz a2, 2f
+ move t1, zero
+1: subu a2, 1
+ lbu t0, 0(a0)
+ addu a0, 1
+ sb t0, 0(a1)
+ addu a1, 1
+ beqz t0, 3f /* NULL - end of string*/
+ addu t1, 1
+ bnez a2, 1b
+ nop
+2: /* ENAMETOOLONG */
+ li v0, ENAMETOOLONG
+3: /* done != NULL -> how many bytes were copied */
+ beqz a3, 4f
+ nop
+ sw t1, 0(a3)
+4: jr ra
+ nop
+ .set reorder
+ .set at
+END(copystr)
+
+/*
+ * int copyinstr(void *uaddr, void *kaddr, size_t maxlen, size_t *lencopied)
+ * Copy a NIL-terminated string, at most maxlen characters long, from the
+ * user's address space. Return the number of characters copied (including
+ * the NIL) in *lencopied. If the string is too long, return ENAMETOOLONG;
+ * else return 0 or EFAULT.
+ */
+LEAF(copyinstr)
+ .set noreorder
+ .set noat
+ lw t2, pcpup
+ lw v1, PC_CURPCB(t2)
+ la v0, _C_LABEL(copystrerr)
+ blt a0, zero, _C_LABEL(copystrerr)
+ sw v0, PCB_ONFAULT(v1)
+ move t0, a2
+ beq a2, zero, 4f
+1:
+ lbu v0, 0(a0)
+ subu a2, a2, 1
+ beq v0, zero, 2f
+ sb v0, 0(a1)
+ addu a0, a0, 1
+ bne a2, zero, 1b
+ addu a1, a1, 1
+4:
+ li v0, ENAMETOOLONG
+2:
+ beq a3, zero, 3f
+ subu a2, t0, a2
+ sw a2, 0(a3)
+3:
+ j ra # v0 is 0 or ENAMETOOLONG
+ sw zero, PCB_ONFAULT(v1)
+ .set reorder
+ .set at
+END(copyinstr)
+
+/*
+ * int copyoutstr(void *uaddr, void *kaddr, size_t maxlen, size_t *lencopied);
+ * Copy a NIL-terminated string, at most maxlen characters long, into the
+ * user's address space. Return the number of characters copied (including
+ * the NIL) in *lencopied. If the string is too long, return ENAMETOOLONG;
+ * else return 0 or EFAULT.
+ */
+LEAF(copyoutstr)
+ .set noreorder
+ .set noat
+ lw t2, pcpup
+ lw v1, PC_CURPCB(t2)
+ la v0, _C_LABEL(copystrerr)
+ blt a1, zero, _C_LABEL(copystrerr)
+ sw v0, PCB_ONFAULT(v1)
+ move t0, a2
+ beq a2, zero, 4f
+1:
+ lbu v0, 0(a0)
+ subu a2, a2, 1
+ beq v0, zero, 2f
+ sb v0, 0(a1)
+ addu a0, a0, 1
+ bne a2, zero, 1b
+ addu a1, a1, 1
+4:
+ li v0, ENAMETOOLONG
+2:
+ beq a3, zero, 3f
+ subu a2, t0, a2
+ sw a2, 0(a3)
+3:
+ j ra # v0 is 0 or ENAMETOOLONG
+ sw zero, PCB_ONFAULT(v1)
+ .set reorder
+ .set at
+END(copyoutstr)
+
+LEAF(copystrerr)
+ sw zero, PCB_ONFAULT(v1)
+ j ra
+ li v0, EFAULT # return EFAULT
+END(copystrerr)
diff --git a/sys/mips/mips/cpu.c b/sys/mips/mips/cpu.c
new file mode 100644
index 0000000..6eac8f7
--- /dev/null
+++ b/sys/mips/mips/cpu.c
@@ -0,0 +1,328 @@
+/*-
+ * Copyright (c) 2004 Juli Mallett. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/stdint.h>
+
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <machine/cache.h>
+#include <machine/cpufunc.h>
+#include <machine/cpuinfo.h>
+#include <machine/cpuregs.h>
+#include <machine/intr_machdep.h>
+#include <machine/locore.h>
+#include <machine/pte.h>
+
+static struct mips_cpuinfo cpuinfo;
+
+union cpuprid cpu_id;
+union cpuprid fpu_id;
+
+/*
+ * Attempt to identify the MIPS CPU as much as possible.
+ *
+ * XXX: Assumes the CPU is MIPS32 compliant.
+ * XXX: For now, skip config register selections 2 and 3
+ * as we don't currently use L2/L3 cache or additional
+ * MIPS32 processor features.
+ */
+static void
+mips_get_identity(struct mips_cpuinfo *cpuinfo)
+{
+ u_int32_t prid;
+ u_int32_t cfg0;
+ u_int32_t cfg1;
+ u_int32_t tmp;
+
+ memset(cpuinfo, 0, sizeof(struct mips_cpuinfo));
+
+ /* Read and store the PrID ID for CPU identification. */
+ prid = mips_rd_prid();
+ cpuinfo->cpu_vendor = MIPS_PRID_CID(prid);
+ cpuinfo->cpu_rev = MIPS_PRID_REV(prid);
+ cpuinfo->cpu_impl = MIPS_PRID_IMPL(prid);
+
+ /* Read config register selection 0 to learn TLB type. */
+ cfg0 = mips_rd_config();
+
+ cpuinfo->tlb_type = ((cfg0 & MIPS_CONFIG0_MT_MASK) >> MIPS_CONFIG0_MT_SHIFT);
+ cpuinfo->icache_virtual = cfg0 & MIPS_CONFIG0_VI;
+
+ /* If config register selection 1 does not exist, exit. */
+ if (!(cfg0 & MIPS3_CONFIG_CM))
+ return;
+
+ /* Learn TLB size and L1 cache geometry. */
+ cfg1 = mips_rd_config_sel1();
+ cpuinfo->tlb_nentries = ((cfg1 & MIPS_CONFIG1_TLBSZ_MASK) >> MIPS_CONFIG1_TLBSZ_SHIFT) + 1;
+
+ /* L1 instruction cache. */
+ tmp = 1 << (((cfg1 & MIPS_CONFIG1_IL_MASK) >> MIPS_CONFIG1_IL_SHIFT) + 1);
+ if (tmp != 0) {
+ cpuinfo->l1.ic_linesize = tmp;
+ cpuinfo->l1.ic_nways = (((cfg1 & MIPS_CONFIG1_IA_MASK) >> MIPS_CONFIG1_IA_SHIFT)) + 1;
+ cpuinfo->l1.ic_nsets = 1 << (((cfg1 & MIPS_CONFIG1_IS_MASK) >> MIPS_CONFIG1_IS_SHIFT) + 6);
+ cpuinfo->l1.ic_size = cpuinfo->l1.ic_linesize * cpuinfo->l1.ic_nsets
+ * cpuinfo->l1.ic_nways;
+ }
+
+ /* L1 data cache. */
+ tmp = 1 << (((cfg1 & MIPS_CONFIG1_DL_MASK) >> MIPS_CONFIG1_DL_SHIFT) + 1);
+ if (tmp != 0) {
+ cpuinfo->l1.dc_linesize = tmp;
+ cpuinfo->l1.dc_nways = (((cfg1 & MIPS_CONFIG1_DA_MASK) >> MIPS_CONFIG1_DA_SHIFT)) + 1;
+ cpuinfo->l1.dc_nsets = 1 << (((cfg1 & MIPS_CONFIG1_DS_MASK) >> MIPS_CONFIG1_DS_SHIFT) + 6);
+#ifdef TARGET_OCTEON
+ /*
+ * Octeon does 128 byte line-size. But Config-Sel1 doesn't show
+ * 128 line-size, 1 Set, 64 ways.
+ */
+ cpuinfo->l1.dc_linesize = 128;
+ cpuinfo->l1.dc_nsets = 1;
+ cpuinfo->l1.dc_nways = 64;
+#endif
+ cpuinfo->l1.dc_size = cpuinfo->l1.dc_linesize * cpuinfo->l1.dc_nsets
+ * cpuinfo->l1.dc_nways;
+ }
+}
+
+void
+mips_cpu_init(void)
+{
+ mips_get_identity(&cpuinfo);
+ num_tlbentries = cpuinfo.tlb_nentries;
+ Mips_SetWIRED(0);
+ Mips_TLBFlush(num_tlbentries);
+ Mips_SetWIRED(VMWIRED_ENTRIES);
+ mips_config_cache(&cpuinfo);
+ mips_vector_init();
+
+ mips_icache_sync_all();
+ mips_dcache_wbinv_all();
+}
+
+void
+cpu_identify(void)
+{
+ printf("cpu%d: ", 0); /* XXX per-cpu */
+ switch (cpuinfo.cpu_vendor) {
+ case MIPS_PRID_CID_MTI:
+ printf("MIPS Technologies");
+ break;
+ case MIPS_PRID_CID_BROADCOM:
+ case MIPS_PRID_CID_SIBYTE:
+ printf("Broadcom");
+ break;
+ case MIPS_PRID_CID_ALCHEMY:
+ printf("AMD");
+ break;
+ case MIPS_PRID_CID_SANDCRAFT:
+ printf("Sandcraft");
+ break;
+ case MIPS_PRID_CID_PHILIPS:
+ printf("Philips");
+ break;
+ case MIPS_PRID_CID_TOSHIBA:
+ printf("Toshiba");
+ break;
+ case MIPS_PRID_CID_LSI:
+ printf("LSI");
+ break;
+ case MIPS_PRID_CID_LEXRA:
+ printf("Lexra");
+ break;
+ case MIPS_PRID_CID_PREHISTORIC:
+ default:
+ printf("Unknown");
+ break;
+ }
+ printf(" processor v%d.%d\n", cpuinfo.cpu_rev, cpuinfo.cpu_impl);
+
+ printf(" MMU: ");
+ if (cpuinfo.tlb_type == MIPS_MMU_NONE) {
+ printf("none present\n");
+ } else {
+ if (cpuinfo.tlb_type == MIPS_MMU_TLB) {
+ printf("Standard TLB");
+ } else if (cpuinfo.tlb_type == MIPS_MMU_BAT) {
+ printf("Standard BAT");
+ } else if (cpuinfo.tlb_type == MIPS_MMU_FIXED) {
+ printf("Fixed mapping");
+ }
+ printf(", %d entries\n", cpuinfo.tlb_nentries);
+ }
+
+ printf(" L1 i-cache: ");
+ if (cpuinfo.l1.ic_linesize == 0) {
+ printf("disabled");
+ } else {
+ if (cpuinfo.l1.ic_nways == 1) {
+ printf("direct-mapped with");
+ } else {
+ printf ("%d ways of", cpuinfo.l1.ic_nways);
+ }
+ printf(" %d sets, %d bytes per line\n", cpuinfo.l1.ic_nsets, cpuinfo.l1.ic_linesize);
+ }
+
+ printf(" L1 d-cache: ");
+ if (cpuinfo.l1.dc_linesize == 0) {
+ printf("disabled");
+ } else {
+ if (cpuinfo.l1.dc_nways == 1) {
+ printf("direct-mapped with");
+ } else {
+ printf ("%d ways of", cpuinfo.l1.dc_nways);
+ }
+ printf(" %d sets, %d bytes per line\n", cpuinfo.l1.dc_nsets, cpuinfo.l1.dc_linesize);
+ }
+}
+
+static struct rman cpu_hardirq_rman;
+
+static devclass_t cpu_devclass;
+
+/*
+ * Device methods
+ */
+static int cpu_probe(device_t);
+static int cpu_attach(device_t);
+static struct resource *cpu_alloc_resource(device_t, device_t, int, int *,
+ u_long, u_long, u_long, u_int);
+static int cpu_setup_intr(device_t, device_t, struct resource *, int,
+ driver_filter_t *f, driver_intr_t *, void *,
+ void **);
+
+static device_method_t cpu_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, cpu_probe),
+ DEVMETHOD(device_attach, cpu_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+ /* Bus interface */
+ DEVMETHOD(bus_alloc_resource, cpu_alloc_resource),
+ DEVMETHOD(bus_setup_intr, cpu_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
+ { 0, 0 }
+};
+
+static driver_t cpu_driver = {
+ "cpu", cpu_methods, 1
+};
+
+static int
+cpu_probe(device_t dev)
+{
+ return (0);
+}
+
+static int
+cpu_attach(device_t dev)
+{
+ int error;
+#ifdef notyet
+ device_t clock;
+#endif
+
+ cpu_hardirq_rman.rm_start = 0;
+ cpu_hardirq_rman.rm_end = 5;
+ cpu_hardirq_rman.rm_type = RMAN_ARRAY;
+ cpu_hardirq_rman.rm_descr = "CPU Hard Interrupts";
+
+ error = rman_init(&cpu_hardirq_rman);
+ if (error != 0) {
+ device_printf(dev, "failed to initialize irq resources\n");
+ return (error);
+ }
+ /* XXX rman_manage_all. */
+ error = rman_manage_region(&cpu_hardirq_rman,
+ cpu_hardirq_rman.rm_start,
+ cpu_hardirq_rman.rm_end);
+ if (error != 0) {
+ device_printf(dev, "failed to manage irq resources\n");
+ return (error);
+ }
+
+ if (device_get_unit(dev) != 0)
+ panic("can't attach more cpus");
+ device_set_desc(dev, "MIPS32 processor");
+
+#ifdef notyet
+ clock = device_add_child(dev, "clock", device_get_unit(dev));
+ if (clock == NULL)
+ device_printf(dev, "clock failed to attach");
+#endif
+
+ return (bus_generic_attach(dev));
+}
+
+static struct resource *
+cpu_alloc_resource(device_t dev, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+ struct resource *res;
+
+ if (type != SYS_RES_IRQ)
+ return (NULL);
+ res = rman_reserve_resource(&cpu_hardirq_rman, start, end, count, 0,
+ child);
+ return (res);
+}
+
+static int
+cpu_setup_intr(device_t dev, device_t child, struct resource *res, int flags,
+ driver_filter_t *filt, driver_intr_t *handler, void *arg,
+ void **cookiep)
+{
+ int error;
+ int intr;
+
+ error = rman_activate_resource(res);
+ if (error != 0) {
+ device_printf(child, "could not activate irq\n");
+ return (error);
+ }
+
+ intr = rman_get_start(res);
+
+ cpu_establish_hardintr(device_get_nameunit(child), filt, handler, arg,
+ intr, flags, cookiep);
+ device_printf(child, "established CPU interrupt %d\n", intr);
+ return (0);
+}
+
+DRIVER_MODULE(cpu, root, cpu_driver, cpu_devclass, 0, 0);
diff --git a/sys/mips/mips/db_disasm.c b/sys/mips/mips/db_disasm.c
new file mode 100644
index 0000000..21e5c90
--- /dev/null
+++ b/sys/mips/mips/db_disasm.c
@@ -0,0 +1,392 @@
+/* $OpenBSD: db_disasm.c,v 1.1 1998/03/16 09:03:24 pefo Exp $ */
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)kadb.c 8.1 (Berkeley) 6/10/93
+ * Id: db_disasm.c,v 1.1 1998/03/16 09:03:24 pefo Exp
+ * JNPR: db_disasm.c,v 1.1 2006/08/07 05:38:57 katta
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <vm/vm_param.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <sys/systm.h>
+
+#include <machine/mips_opcode.h>
+#include <machine/db_machdep.h>
+#include <ddb/ddb.h>
+#include <ddb/db_output.h>
+
+static char *op_name[64] = {
+/* 0 */ "spec", "bcond","j", "jal", "beq", "bne", "blez", "bgtz",
+/* 8 */ "addi", "addiu","slti", "sltiu","andi", "ori", "xori", "lui",
+/*16 */ "cop0", "cop1", "cop2", "cop3", "beql", "bnel", "blezl","bgtzl",
+/*24 */ "daddi","daddiu","ldl", "ldr", "op34", "op35", "op36", "op37",
+/*32 */ "lb", "lh", "lwl", "lw", "lbu", "lhu", "lwr", "lwu",
+/*40 */ "sb", "sh", "swl", "sw", "sdl", "sdr", "swr", "cache",
+/*48 */ "ll", "lwc1", "lwc2", "lwc3", "lld", "ldc1", "ldc2", "ld",
+/*56 */ "sc", "swc1", "swc2", "swc3", "scd", "sdc1", "sdc2", "sd"
+};
+
+static char *spec_name[64] = {
+/* 0 */ "sll", "spec01","srl", "sra", "sllv", "spec05","srlv","srav",
+/* 8 */ "jr", "jalr", "spec12","spec13","syscall","break","spec16","sync",
+/*16 */ "mfhi", "mthi", "mflo", "mtlo", "dsllv","spec25","dsrlv","dsrav",
+/*24 */ "mult", "multu","div", "divu", "dmult","dmultu","ddiv","ddivu",
+/*32 */ "add", "addu", "sub", "subu", "and", "or", "xor", "nor",
+/*40 */ "spec50","spec51","slt","sltu", "dadd","daddu","dsub","dsubu",
+/*48 */ "tge","tgeu","tlt","tltu","teq","spec65","tne","spec67",
+/*56 */ "dsll","spec71","dsrl","dsra","dsll32","spec75","dsrl32","dsra32"
+};
+
+static char *bcond_name[32] = {
+/* 0 */ "bltz", "bgez", "bltzl", "bgezl", "?", "?", "?", "?",
+/* 8 */ "tgei", "tgeiu", "tlti", "tltiu", "teqi", "?", "tnei", "?",
+/*16 */ "bltzal", "bgezal", "bltzall", "bgezall", "?", "?", "?", "?",
+/*24 */ "?", "?", "?", "?", "?", "?", "?", "?",
+};
+
+static char *cop1_name[64] = {
+/* 0 */ "fadd", "fsub", "fmpy", "fdiv", "fsqrt","fabs", "fmov", "fneg",
+/* 8 */ "fop08","fop09","fop0a","fop0b","fop0c","fop0d","fop0e","fop0f",
+/*16 */ "fop10","fop11","fop12","fop13","fop14","fop15","fop16","fop17",
+/*24 */ "fop18","fop19","fop1a","fop1b","fop1c","fop1d","fop1e","fop1f",
+/*32 */ "fcvts","fcvtd","fcvte","fop23","fcvtw","fop25","fop26","fop27",
+/*40 */ "fop28","fop29","fop2a","fop2b","fop2c","fop2d","fop2e","fop2f",
+/*48 */ "fcmp.f","fcmp.un","fcmp.eq","fcmp.ueq","fcmp.olt","fcmp.ult",
+ "fcmp.ole","fcmp.ule",
+/*56 */ "fcmp.sf","fcmp.ngle","fcmp.seq","fcmp.ngl","fcmp.lt","fcmp.nge",
+ "fcmp.le","fcmp.ngt"
+};
+
+static char *fmt_name[16] = {
+ "s", "d", "e", "fmt3",
+ "w", "fmt5", "fmt6", "fmt7",
+ "fmt8", "fmt9", "fmta", "fmtb",
+ "fmtc", "fmtd", "fmte", "fmtf"
+};
+
+static char *reg_name[32] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra"
+};
+
+static char *c0_opname[64] = {
+ "c0op00","tlbr", "tlbwi", "c0op03","c0op04","c0op05","tlbwr", "c0op07",
+ "tlbp", "c0op11","c0op12","c0op13","c0op14","c0op15","c0op16","c0op17",
+ "rfe", "c0op21","c0op22","c0op23","c0op24","c0op25","c0op26","c0op27",
+ "eret","c0op31","c0op32","c0op33","c0op34","c0op35","c0op36","c0op37",
+ "c0op40","c0op41","c0op42","c0op43","c0op44","c0op45","c0op46","c0op47",
+ "c0op50","c0op51","c0op52","c0op53","c0op54","c0op55","c0op56","c0op57",
+ "c0op60","c0op61","c0op62","c0op63","c0op64","c0op65","c0op66","c0op67",
+ "c0op70","c0op71","c0op72","c0op73","c0op74","c0op75","c0op77","c0op77",
+};
+
+static char *c0_reg[32] = {
+ "index","random","tlblo0","tlblo1","context","tlbmask","wired","c0r7",
+ "badvaddr","count","tlbhi","c0r11","sr","cause","epc", "prid",
+ "config","lladr","watchlo","watchhi","xcontext","c0r21","c0r22","c0r23",
+ "c0r24","c0r25","ecc","cacheerr","taglo","taghi","errepc","c0r31"
+};
+
+static int md_printins(int ins, int mdbdot);
+
+db_addr_t
+db_disasm(db_addr_t loc, boolean_t altfmt)
+
+{
+ int ins;
+
+ if (vtophys((vm_offset_t)loc)) {
+ db_read_bytes((vm_offset_t)loc, (size_t)sizeof(int),
+ (char *)&ins);
+ md_printins(ins, loc);
+ }
+
+ return (loc + sizeof(int));
+}
+
+/* ARGSUSED */
+static int
+md_printins(int ins, int mdbdot)
+{
+ InstFmt i;
+ int delay = 0;
+
+ i.word = ins;
+
+ switch (i.JType.op) {
+ case OP_SPECIAL:
+ if (i.word == 0) {
+ db_printf("nop");
+ break;
+ }
+ if (i.RType.func == OP_ADDU && i.RType.rt == 0) {
+ db_printf("move\t%s,%s",
+ reg_name[i.RType.rd], reg_name[i.RType.rs]);
+ break;
+ }
+ db_printf("%s", spec_name[i.RType.func]);
+ switch (i.RType.func) {
+ case OP_SLL:
+ case OP_SRL:
+ case OP_SRA:
+ case OP_DSLL:
+ case OP_DSRL:
+ case OP_DSRA:
+ case OP_DSLL32:
+ case OP_DSRL32:
+ case OP_DSRA32:
+ db_printf("\t%s,%s,%d", reg_name[i.RType.rd],
+ reg_name[i.RType.rt], i.RType.shamt);
+ break;
+
+ case OP_SLLV:
+ case OP_SRLV:
+ case OP_SRAV:
+ case OP_DSLLV:
+ case OP_DSRLV:
+ case OP_DSRAV:
+ db_printf("\t%s,%s,%s", reg_name[i.RType.rd],
+ reg_name[i.RType.rt], reg_name[i.RType.rs]);
+ break;
+
+ case OP_MFHI:
+ case OP_MFLO:
+ db_printf("\t%s", reg_name[i.RType.rd]);
+ break;
+
+ case OP_JR:
+ case OP_JALR:
+ delay = 1;
+ /* FALLTHROUGH */
+ case OP_MTLO:
+ case OP_MTHI:
+ db_printf("\t%s", reg_name[i.RType.rs]);
+ break;
+
+ case OP_MULT:
+ case OP_MULTU:
+ case OP_DMULT:
+ case OP_DMULTU:
+ case OP_DIV:
+ case OP_DIVU:
+ case OP_DDIV:
+ case OP_DDIVU:
+ db_printf("\t%s,%s",
+ reg_name[i.RType.rs], reg_name[i.RType.rt]);
+ break;
+
+ case OP_SYSCALL:
+ case OP_SYNC:
+ break;
+
+ case OP_BREAK:
+ db_printf("\t%d", (i.RType.rs << 5) | i.RType.rt);
+ break;
+
+ default:
+ db_printf("\t%s,%s,%s", reg_name[i.RType.rd],
+ reg_name[i.RType.rs], reg_name[i.RType.rt]);
+ };
+ break;
+
+ case OP_BCOND:
+ db_printf("%s\t%s,", bcond_name[i.IType.rt],
+ reg_name[i.IType.rs]);
+ goto pr_displ;
+
+ case OP_BLEZ:
+ case OP_BLEZL:
+ case OP_BGTZ:
+ case OP_BGTZL:
+ db_printf("%s\t%s,", op_name[i.IType.op],
+ reg_name[i.IType.rs]);
+ goto pr_displ;
+
+ case OP_BEQ:
+ case OP_BEQL:
+ if (i.IType.rs == 0 && i.IType.rt == 0) {
+ db_printf("b\t");
+ goto pr_displ;
+ }
+ /* FALLTHROUGH */
+ case OP_BNE:
+ case OP_BNEL:
+ db_printf("%s\t%s,%s,", op_name[i.IType.op],
+ reg_name[i.IType.rs], reg_name[i.IType.rt]);
+ pr_displ:
+ delay = 1;
+ db_printf("0x%08x", mdbdot + 4 + ((short)i.IType.imm << 2));
+ break;
+
+ case OP_COP0:
+ switch (i.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ db_printf("bc0%c\t",
+ "ft"[i.RType.rt & COPz_BC_TF_MASK]);
+ goto pr_displ;
+
+ case OP_MT:
+ db_printf("mtc0\t%s,%s",
+ reg_name[i.RType.rt], c0_reg[i.RType.rd]);
+ break;
+
+ case OP_DMT:
+ db_printf("dmtc0\t%s,%s",
+ reg_name[i.RType.rt], c0_reg[i.RType.rd]);
+ break;
+
+ case OP_MF:
+ db_printf("mfc0\t%s,%s",
+ reg_name[i.RType.rt], c0_reg[i.RType.rd]);
+ break;
+
+ case OP_DMF:
+ db_printf("dmfc0\t%s,%s",
+ reg_name[i.RType.rt], c0_reg[i.RType.rd]);
+ break;
+
+ default:
+ db_printf("%s", c0_opname[i.FRType.func]);
+ };
+ break;
+
+ case OP_COP1:
+ switch (i.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ db_printf("bc1%c\t",
+ "ft"[i.RType.rt & COPz_BC_TF_MASK]);
+ goto pr_displ;
+
+ case OP_MT:
+ db_printf("mtc1\t%s,f%d",
+ reg_name[i.RType.rt], i.RType.rd);
+ break;
+
+ case OP_MF:
+ db_printf("mfc1\t%s,f%d",
+ reg_name[i.RType.rt], i.RType.rd);
+ break;
+
+ case OP_CT:
+ db_printf("ctc1\t%s,f%d",
+ reg_name[i.RType.rt], i.RType.rd);
+ break;
+
+ case OP_CF:
+ db_printf("cfc1\t%s,f%d",
+ reg_name[i.RType.rt], i.RType.rd);
+ break;
+
+ default:
+ db_printf("%s.%s\tf%d,f%d,f%d",
+ cop1_name[i.FRType.func], fmt_name[i.FRType.fmt],
+ i.FRType.fd, i.FRType.fs, i.FRType.ft);
+ };
+ break;
+
+ case OP_J:
+ case OP_JAL:
+ db_printf("%s\t", op_name[i.JType.op]);
+ db_printf("0x%8x",(mdbdot & 0xF0000000) | (i.JType.target << 2));
+ delay = 1;
+ break;
+
+ case OP_LWC1:
+ case OP_SWC1:
+ db_printf("%s\tf%d,", op_name[i.IType.op], i.IType.rt);
+ goto loadstore;
+
+ case OP_LB:
+ case OP_LH:
+ case OP_LW:
+ case OP_LD:
+ case OP_LBU:
+ case OP_LHU:
+ case OP_LWU:
+ case OP_SB:
+ case OP_SH:
+ case OP_SW:
+ case OP_SD:
+ db_printf("%s\t%s,", op_name[i.IType.op],
+ reg_name[i.IType.rt]);
+ loadstore:
+ db_printf("%d(%s)", (short)i.IType.imm, reg_name[i.IType.rs]);
+ break;
+
+ case OP_ORI:
+ case OP_XORI:
+ if (i.IType.rs == 0) {
+ db_printf("li\t%s,0x%x",
+ reg_name[i.IType.rt], i.IType.imm);
+ break;
+ }
+ /* FALLTHROUGH */
+ case OP_ANDI:
+ db_printf("%s\t%s,%s,0x%x", op_name[i.IType.op],
+ reg_name[i.IType.rt], reg_name[i.IType.rs], i.IType.imm);
+ break;
+
+ case OP_LUI:
+ db_printf("%s\t%s,0x%x", op_name[i.IType.op],
+ reg_name[i.IType.rt], i.IType.imm);
+ break;
+
+ case OP_ADDI:
+ case OP_DADDI:
+ case OP_ADDIU:
+ case OP_DADDIU:
+ if (i.IType.rs == 0) {
+ db_printf("li\t%s,%d", reg_name[i.IType.rt],
+ (short)i.IType.imm);
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ db_printf("%s\t%s,%s,%d", op_name[i.IType.op],
+ reg_name[i.IType.rt], reg_name[i.IType.rs],
+ (short)i.IType.imm);
+ }
+ return (delay);
+}
diff --git a/sys/mips/mips/db_interface.c b/sys/mips/mips/db_interface.c
new file mode 100644
index 0000000..455c03e
--- /dev/null
+++ b/sys/mips/mips/db_interface.c
@@ -0,0 +1,339 @@
+/* $OpenBSD: db_machdep.c,v 1.2 1998/09/15 10:50:13 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed under OpenBSD by
+ * Per Fogelstrom, Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * JNPR: db_interface.c,v 1.6.2.1 2007/08/29 12:24:49 girish
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/cons.h>
+#include <sys/lock.h>
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+
+#include <machine/cache.h>
+#include <machine/db_machdep.h>
+#include <machine/mips_opcode.h>
+#include <machine/vmparam.h>
+#include <machine/md_var.h>
+#define NO_REG_DEFS 1 /* Prevent asm.h from including regdef.h */
+#include <machine/asm.h>
+#include <machine/setjmp.h>
+
+#include <ddb/ddb.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_access.h>
+#include <ddb/db_output.h>
+#include <ddb/db_variables.h>
+#include <sys/kdb.h>
+
+static db_varfcn_t db_frame;
+
+#define DB_OFFSET(x) (db_expr_t *)offsetof(struct trapframe, x)
+struct db_variable db_regs[] = {
+ { "at", DB_OFFSET(ast), db_frame },
+ { "v0", DB_OFFSET(v0), db_frame },
+ { "v1", DB_OFFSET(v1), db_frame },
+ { "a0", DB_OFFSET(a0), db_frame },
+ { "a1", DB_OFFSET(a1), db_frame },
+ { "a2", DB_OFFSET(a2), db_frame },
+ { "a3", DB_OFFSET(a3), db_frame },
+ { "t0", DB_OFFSET(t0), db_frame },
+ { "t1", DB_OFFSET(t1), db_frame },
+ { "t2", DB_OFFSET(t2), db_frame },
+ { "t3", DB_OFFSET(t3), db_frame },
+ { "t4", DB_OFFSET(t4), db_frame },
+ { "t5", DB_OFFSET(t5), db_frame },
+ { "t6", DB_OFFSET(t6), db_frame },
+ { "t7", DB_OFFSET(t7), db_frame },
+ { "s0", DB_OFFSET(s0), db_frame },
+ { "s1", DB_OFFSET(s1), db_frame },
+ { "s2", DB_OFFSET(s2), db_frame },
+ { "s3", DB_OFFSET(s3), db_frame },
+ { "s4", DB_OFFSET(s4), db_frame },
+ { "s5", DB_OFFSET(s5), db_frame },
+ { "s6", DB_OFFSET(s6), db_frame },
+ { "s7", DB_OFFSET(s7), db_frame },
+ { "t8", DB_OFFSET(t8), db_frame },
+ { "t9", DB_OFFSET(t9), db_frame },
+ { "k0", DB_OFFSET(k0), db_frame },
+ { "k1", DB_OFFSET(k1), db_frame },
+ { "gp", DB_OFFSET(gp), db_frame },
+ { "sp", DB_OFFSET(sp), db_frame },
+ { "s8", DB_OFFSET(s8), db_frame },
+ { "ra", DB_OFFSET(ra), db_frame },
+ { "sr", DB_OFFSET(sr), db_frame },
+ { "lo", DB_OFFSET(mullo), db_frame },
+ { "hi", DB_OFFSET(mulhi), db_frame },
+ { "bad", DB_OFFSET(badvaddr), db_frame },
+ { "cs", DB_OFFSET(cause), db_frame },
+ { "pc", DB_OFFSET(pc), db_frame },
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+int (*do_db_log_stack_trace_cmd)(char *);
+
+static int
+db_frame(struct db_variable *vp, db_expr_t *valuep, int op)
+{
+ int *reg;
+
+ if (kdb_frame == NULL)
+ return (0);
+
+ reg = (int *)((uintptr_t)kdb_frame + (db_expr_t)vp->valuep);
+ if (op == DB_VAR_GET)
+ *valuep = *reg;
+ else
+ *reg = *valuep;
+ return (1);
+}
+
+int
+db_read_bytes(vm_offset_t addr, size_t size, char *data)
+{
+ jmp_buf jb;
+ void *prev_jb;
+ int ret;
+
+ prev_jb = kdb_jmpbuf(jb);
+ ret = setjmp(jb);
+ if (ret == 0) {
+ /*
+ * 'addr' could be a memory-mapped I/O address. Try to
+ * do atomic load/store in unit of size requested.
+ */
+ if ((size == 2 || size == 4 || size == 8) &&
+ ((addr & (size -1)) == 0) &&
+ (((vm_offset_t)data & (size -1)) == 0)) {
+ switch (size) {
+ case 2:
+ *(uint16_t *)data = *(uint16_t *)addr;
+ break;
+ case 4:
+ *(uint32_t *)data = *(uint32_t *)addr;
+ break;
+ case 8:
+ atomic_load_64((volatile u_int64_t *)addr,
+ (u_int64_t *)data);
+ break;
+ }
+ } else {
+ char *src;
+
+ src = (char *)addr;
+ while (size-- > 0)
+ *data++ = *src++;
+ }
+ }
+
+ (void)kdb_jmpbuf(prev_jb);
+ return (ret);
+}
+
+int
+db_write_bytes(vm_offset_t addr, size_t size, char *data)
+{
+ int ret;
+ jmp_buf jb;
+ void *prev_jb;
+
+ prev_jb = kdb_jmpbuf(jb);
+ ret = setjmp(jb);
+
+ if (ret == 0) {
+ /*
+ * 'addr' could be a memory-mapped I/O address. Try to
+ * do atomic load/store in unit of size requested.
+ */
+ if ((size == 2 || size == 4 || size == 8) &&
+ ((addr & (size -1)) == 0) &&
+ (((vm_offset_t)data & (size -1)) == 0)) {
+ switch (size) {
+ case 2:
+ *(uint16_t *)addr = *(uint16_t *)data;
+ break;
+ case 4:
+ *(uint32_t *)addr = *(uint32_t *)data;
+ break;
+ case 8:
+ atomic_store_64((volatile u_int64_t *)addr,
+ (u_int64_t *)data);
+ break;
+ }
+ } else {
+ char *dst;
+ size_t len = size;
+
+ dst = (char *)addr;
+ while (len-- > 0)
+ *dst++ = *data++;
+ }
+
+ mips_icache_sync_range((db_addr_t) addr, size);
+ mips_dcache_wbinv_range((db_addr_t) addr, size);
+ }
+ (void)kdb_jmpbuf(prev_jb);
+ return (ret);
+}
+
+/*
+ * To do a single step ddb needs to know the next address
+ * that we will get to. It means that we need to find out
+ * both the address for a branch taken and for not taken, NOT! :-)
+ * MipsEmulateBranch will do the job to find out _exactly_ which
+ * address we will end up at so the 'dual bp' method is not
+ * requiered.
+ */
+db_addr_t
+next_instr_address(db_addr_t pc, boolean_t bd)
+{
+ db_addr_t next;
+
+ next = (db_addr_t)MipsEmulateBranch(kdb_frame, pc, 0, 0);
+ return (next);
+}
+
+
+/*
+ * Decode instruction and figure out type.
+ */
+int
+db_inst_type(int ins)
+{
+ InstFmt inst;
+ int ityp = 0;
+
+ inst.word = ins;
+ switch ((int)inst.JType.op) {
+ case OP_SPECIAL:
+ switch ((int)inst.RType.func) {
+ case OP_JR:
+ ityp = IT_BRANCH;
+ break;
+ case OP_JALR:
+ case OP_SYSCALL:
+ ityp = IT_CALL;
+ break;
+ }
+ break;
+
+ case OP_BCOND:
+ switch ((int)inst.IType.rt) {
+ case OP_BLTZ:
+ case OP_BLTZL:
+ case OP_BGEZ:
+ case OP_BGEZL:
+ ityp = IT_BRANCH;
+ break;
+
+ case OP_BLTZAL:
+ case OP_BLTZALL:
+ case OP_BGEZAL:
+ case OP_BGEZALL:
+ ityp = IT_CALL;
+ break;
+ }
+ break;
+
+ case OP_JAL:
+ ityp = IT_CALL;
+ break;
+
+ case OP_J:
+ case OP_BEQ:
+ case OP_BEQL:
+ case OP_BNE:
+ case OP_BNEL:
+ case OP_BLEZ:
+ case OP_BLEZL:
+ case OP_BGTZ:
+ case OP_BGTZL:
+ ityp = IT_BRANCH;
+ break;
+
+ case OP_COP1:
+ switch (inst.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ ityp = IT_BRANCH;
+ break;
+ }
+ break;
+
+ case OP_LB:
+ case OP_LH:
+ case OP_LW:
+ case OP_LD:
+ case OP_LBU:
+ case OP_LHU:
+ case OP_LWU:
+ case OP_LWC1:
+ ityp = IT_LOAD;
+ break;
+
+ case OP_SB:
+ case OP_SH:
+ case OP_SW:
+ case OP_SD:
+ case OP_SWC1:
+ ityp = IT_STORE;
+ break;
+ }
+ return (ityp);
+}
+
+/*
+ * Return the next pc if the given branch is taken.
+ * MachEmulateBranch() runs analysis for branch delay slot.
+ */
+db_addr_t
+branch_taken(int inst, db_addr_t pc)
+{
+ db_addr_t ra;
+ register_t fpucsr;
+
+ /* TBD: when is fsr set */
+ fpucsr = (curthread) ? curthread->td_pcb->pcb_regs.fsr : 0;
+ ra = (db_addr_t)MipsEmulateBranch(kdb_frame, pc, fpucsr, 0);
+ return (ra);
+}
diff --git a/sys/mips/mips/db_trace.c b/sys/mips/mips/db_trace.c
new file mode 100644
index 0000000..fe2aa6e
--- /dev/null
+++ b/sys/mips/mips/db_trace.c
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 2004-2005, Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * JNPR: db_trace.c,v 1.8 2007/08/09 11:23:32 katta
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kdb.h>
+#include <sys/proc.h>
+#include <sys/stack.h>
+#include <sys/sysent.h>
+
+#include <machine/db_machdep.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+
+#include <ddb/ddb.h>
+
+int
+db_md_set_watchpoint(db_expr_t addr, db_expr_t size)
+{
+
+ return(0);
+}
+
+
+int
+db_md_clr_watchpoint( db_expr_t addr, db_expr_t size)
+{
+
+ return(0);
+}
+
+
+void
+db_md_list_watchpoints()
+{
+}
+
+static int
+db_backtrace(struct thread *td, db_addr_t frame, int count)
+{
+ stacktrace_subr((struct trapframe *)frame,
+ (int (*) (const char *, ...))db_printf);
+ return (0);
+}
+
+void
+db_trace_self(void)
+{
+ db_trace_thread (curthread, -1);
+ return;
+}
+
+int
+db_trace_thread(struct thread *thr, int count)
+{
+ struct pcb *ctx;
+
+ ctx = kdb_thr_ctx(thr);
+ return (db_backtrace(thr, (db_addr_t) &ctx->pcb_regs, count));
+}
+
+void
+db_show_mdpcpu(struct pcpu *pc)
+{
+
+ db_printf("ipis = 0x%x\n", pc->pc_pending_ipis);
+ db_printf("next ASID = %d\n", pc->pc_next_asid);
+ db_printf("GENID = %d\n", pc->pc_asid_generation);
+ return;
+}
diff --git a/sys/mips/mips/dump_machdep.c b/sys/mips/mips/dump_machdep.c
new file mode 100644
index 0000000..ec7a91d
--- /dev/null
+++ b/sys/mips/mips/dump_machdep.c
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2006 Fill this file and put your name here
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Note to writer, when using pmap_kenter_temporary() you must,
+ * after using the va to write out the page, call
+ * pmap_kenter_temporary_free(). You should probably also
+ * pin the dump thread to the CPU with sched_pin().
+ */
diff --git a/sys/mips/mips/elf_machdep.c b/sys/mips/mips/elf_machdep.c
new file mode 100644
index 0000000..4a062bd
--- /dev/null
+++ b/sys/mips/mips/elf_machdep.c
@@ -0,0 +1,268 @@
+/*-
+ * Copyright 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: src/sys/i386/i386/elf_machdep.c,v 1.20 2004/08/11 02:35:05 marcel
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/linker.h>
+#include <sys/sysent.h>
+#include <sys/imgact_elf.h>
+#include <sys/syscall.h>
+#include <sys/signalvar.h>
+#include <sys/vnode.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_param.h>
+
+#include <machine/elf.h>
+#include <machine/md_var.h>
+
+struct sysentvec elf32_freebsd_sysvec = {
+ SYS_MAXSYSCALL,
+ sysent,
+ 0,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ __elfN(freebsd_fixup),
+ sendsig,
+ sigcode,
+ &szsigcode,
+ NULL,
+ "FreeBSD ELF32",
+ __elfN(coredump),
+ NULL,
+ MINSIGSTKSZ,
+ PAGE_SIZE,
+ VM_MIN_ADDRESS,
+ VM_MAXUSER_ADDRESS,
+ USRSTACK,
+ PS_STRINGS,
+ VM_PROT_ALL,
+ exec_copyout_strings,
+ exec_setregs,
+ NULL
+};
+
+static Elf32_Brandinfo freebsd_brand_info = {
+ ELFOSABI_FREEBSD,
+ EM_MIPS,
+ "FreeBSD",
+ NULL,
+ "/libexec/ld-elf.so.1",
+ &elf32_freebsd_sysvec,
+ NULL,
+ };
+
+SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_ANY,
+ (sysinit_cfunc_t) elf32_insert_brand_entry,
+ &freebsd_brand_info);
+
+static Elf32_Brandinfo freebsd_brand_oinfo = {
+ ELFOSABI_FREEBSD,
+ EM_MIPS,
+ "FreeBSD",
+ NULL,
+ "/usr/libexec/ld-elf.so.1",
+ &elf32_freebsd_sysvec,
+ NULL,
+ };
+
+SYSINIT(oelf32, SI_SUB_EXEC, SI_ORDER_ANY,
+ (sysinit_cfunc_t) elf32_insert_brand_entry,
+ &freebsd_brand_oinfo);
+
+
+void
+elf32_dump_thread(struct thread *td __unused, void *dst __unused,
+ size_t *off __unused)
+{
+}
+
+/* Process one elf relocation with addend. */
+static int
+elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
+ int type, int local, elf_lookup_fn lookup)
+{
+ Elf_Addr *where = (Elf_Addr *)NULL;;
+ Elf_Addr addr;
+ Elf_Addr addend = (Elf_Addr)0;
+ Elf_Word rtype = (Elf_Word)0, symidx;
+ const Elf_Rel *rel;
+ const Elf_Rela *rela;
+
+ switch (type) {
+ case ELF_RELOC_REL:
+ rel = (const Elf_Rel *)data;
+ where = (Elf_Addr *) (relocbase + rel->r_offset);
+ addend = *where;
+ rtype = ELF_R_TYPE(rel->r_info);
+ symidx = ELF_R_SYM(rel->r_info);
+ break;
+ case ELF_RELOC_RELA:
+ rela = (const Elf_Rela *)data;
+ where = (Elf_Addr *) (relocbase + rela->r_offset);
+ addend = rela->r_addend;
+ rtype = ELF_R_TYPE(rela->r_info);
+ symidx = ELF_R_SYM(rela->r_info);
+ break;
+ default:
+ panic("unknown reloc type %d\n", type);
+ }
+
+ if (local) {
+#if 0 /* TBD */
+ if (rtype == R_386_RELATIVE) { /* A + B */
+ addr = relocbase + addend;
+ if (*where != addr)
+ *where = addr;
+ }
+ return (0);
+#endif
+ }
+
+ switch (rtype) {
+
+ case R_MIPS_NONE: /* none */
+ break;
+
+ case R_MIPS_16: /* S + sign-extend(A) */
+ /*
+ * There shouldn't be R_MIPS_16 relocs in kernel objects.
+ */
+ printf("kldload: unexpected R_MIPS_16 relocation\n");
+ return -1;
+ break;
+
+ case R_MIPS_32: /* S + A - P */
+ addr = lookup(lf, symidx, 1);
+ if (addr == 0)
+ return -1;
+ addr += addend;
+ if (*where != addr)
+ *where = addr;
+ break;
+
+ case R_MIPS_REL32: /* A - EA + S */
+ /*
+ * There shouldn't be R_MIPS_REL32 relocs in kernel objects?
+ */
+ printf("kldload: unexpected R_MIPS_REL32 relocation\n");
+ return -1;
+ break;
+
+ case R_MIPS_26: /* ((A << 2) | (P & 0xf0000000) + S) >> 2 */
+ break;
+
+ case R_MIPS_HI16:
+ /* extern/local: ((AHL + S) - ((short)(AHL + S)) >> 16 */
+ /* _gp_disp: ((AHL + GP - P) - (short)(AHL + GP - P)) >> 16 */
+ break;
+
+ case R_MIPS_LO16:
+ /* extern/local: AHL + S */
+ /* _gp_disp: AHL + GP - P + 4 */
+ break;
+
+ case R_MIPS_GPREL16:
+ /* extern/local: ((AHL + S) - ((short)(AHL + S)) >> 16 */
+ /* _gp_disp: ((AHL + GP - P) - (short)(AHL + GP - P)) >> 16 */
+ break;
+
+ case R_MIPS_LITERAL: /* sign-extend(A) + L */
+ break;
+
+ case R_MIPS_GOT16: /* external: G */
+ /* local: tbd */
+ break;
+
+ case R_MIPS_PC16: /* sign-extend(A) + S - P */
+ break;
+
+ case R_MIPS_CALL16: /* G */
+ break;
+
+ case R_MIPS_GPREL32: /* A + S + GP0 - GP */
+ break;
+
+ case R_MIPS_GOTHI16: /* (G - (short)G) >> 16 + A */
+ break;
+
+ case R_MIPS_GOTLO16: /* G & 0xffff */
+ break;
+
+ case R_MIPS_CALLHI16: /* (G - (short)G) >> 16 + A */
+ break;
+
+ case R_MIPS_CALLLO16: /* G & 0xffff */
+ break;
+
+ default:
+ printf("kldload: unexpected relocation type %d\n",
+ rtype);
+ return (-1);
+ }
+ return(0);
+}
+
+int
+elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
+ elf_lookup_fn lookup)
+{
+
+ return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
+}
+
+int
+elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
+ int type, elf_lookup_fn lookup)
+{
+
+ return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup));
+}
+
+int
+elf_cpu_load_file(linker_file_t lf __unused)
+{
+
+ return (0);
+}
+
+int
+elf_cpu_unload_file(linker_file_t lf __unused)
+{
+
+ return (0);
+}
diff --git a/sys/mips/mips/exception.S b/sys/mips/mips/exception.S
new file mode 100644
index 0000000..fb7614d
--- /dev/null
+++ b/sys/mips/mips/exception.S
@@ -0,0 +1,1287 @@
+/* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Digital Equipment Corporation and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
+ * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
+ * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
+ * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
+ * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
+ * JNPR: exception.S,v 1.5 2007/01/08 04:58:37 katta
+ * $FreeBSD$
+ */
+
+/*
+ * Contains code that is the first executed at boot time plus
+ * assembly language support routines.
+ */
+
+#include "opt_cputype.h"
+#include "opt_ddb.h"
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+#include <machine/cpuregs.h>
+#include <machine/pte.h>
+
+#include "assym.s"
+
+#if defined(ISA_MIPS32)
+#undef WITH_64BIT_CP0
+#elif defined(ISA_MIPS64)
+#define WITH_64BIT_CP0
+#elif defined(ISA_MIPS3)
+#define WITH_64BIT_CP0
+#else
+#error "Please write the code for this ISA"
+#endif
+
+#ifdef WITH_64BIT_CP0
+#define _SLL dsll
+#define _SRL dsrl
+#define _MFC0 dmfc0
+#define _MTC0 dmtc0
+#define WIRED_SHIFT 34
+#else
+#define _SLL sll
+#define _SRL srl
+#define _MFC0 mfc0
+#define _MTC0 mtc0
+#define WIRED_SHIFT 2
+#endif
+ .set noreorder # Noreorder is default style!
+#if defined(ISA_MIPS32)
+ .set mips32
+#elif defined(ISA_MIPS64)
+ .set mips64
+#elif defined(ISA_MIPS3)
+ .set mips3
+#endif
+
+/*
+ * Assume that w alaways need nops to escape CP0 hazard
+ * TODO: Make hazard delays configurable. Stuck with 5 cycles on the moment
+ * For more info on CP0 hazards see Chapter 7 (p.99) of "MIPS32 Architecture
+ * For Programmers Volume III: The MIPS32 Privileged Resource Architecture"
+ */
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+#define HAZARD_DELAY nop;nop;nop;nop;nop;
+
+/*
+ *----------------------------------------------------------------------------
+ *
+ * MipsTLBMiss --
+ *
+ * Vector code for the TLB-miss exception vector 0x80000000.
+ *
+ * This code is copied to the TLB exception vector address to
+ * which the CPU jumps in response to an exception or a TLB miss.
+ * NOTE: This code must be position independent!!!
+ *
+ *
+ */
+
+ .set noat
+VECTOR(MipsTLBMiss, unknown)
+ j _C_LABEL(MipsDoTLBMiss)
+ mfc0 k0, COP_0_BAD_VADDR # get the fault address
+ nop
+VECTOR_END(MipsTLBMiss)
+
+/*
+ *----------------------------------------------------------------------------
+ *
+ * MipsDoTLBMiss --
+ *
+ * This is the real TLB Miss Handler code.
+ * 'segbase' points to the base of the segment table for user processes.
+ *
+ * Don't check for invalid pte's here. We load them as well and
+ * let the processor trap to load the correct value after service.
+ *----------------------------------------------------------------------------
+ */
+MipsDoTLBMiss:
+#ifndef SMP
+ lui k1, %hi(_C_LABEL(pcpup))
+#endif
+ #k0 already has BadVA
+ bltz k0, 1f #02: k0<0 -> 1f (kernel fault)
+ srl k0, k0, SEGSHIFT - 2 #03: k0=seg offset (almost)
+#ifdef SMP
+ GET_CPU_PCPU(k1)
+#else
+ lw k1, %lo(_C_LABEL(pcpup))(k1)
+#endif
+ lw k1, PC_SEGBASE(k1)
+ beqz k1, 2f #05: make sure segbase is not null
+ andi k0, k0, 0x7fc #06: k0=seg offset (mask 0x3)
+ addu k1, k0, k1 #07: k1=seg entry address
+ lw k1, 0(k1) #08: k1=seg entry
+ mfc0 k0, COP_0_BAD_VADDR #09: k0=bad address (again)
+ beq k1, zero, 2f #0a: ==0 -- no page table
+ srl k0, PGSHIFT - 2 #0b: k0=VPN (aka va>>10)
+
+ andi k0, k0, ((NPTEPG/2) - 1) << 3 #0c: k0=page tab offset
+ addu k1, k1, k0 #0d: k1=pte address
+ lw k0, 0(k1) #0e: k0=lo0 pte
+ lw k1, 4(k1) #0f: k1=lo1 pte
+ _SLL k0, k0, WIRED_SHIFT #10: keep bottom 30 bits
+ _SRL k0, k0, WIRED_SHIFT #11: keep bottom 30 bits
+ _MTC0 k0, COP_0_TLB_LO0 #12: lo0 is loaded
+ _SLL k1, k1, WIRED_SHIFT #13: keep bottom 30 bits
+ _SRL k1, k1, WIRED_SHIFT #14: keep bottom 30 bits
+ _MTC0 k1, COP_0_TLB_LO1 #15: lo1 is loaded
+ HAZARD_DELAY
+ tlbwr #1a: write to tlb
+ HAZARD_DELAY
+ eret #1f: retUrn from exception
+1: j _C_LABEL(MipsTLBMissException) #20: kernel exception
+ nop #21: branch delay slot
+2: j SlowFault #22: no page table present
+ nop #23: branch delay slot
+
+ .set at
+
+/*
+ * This code is copied to the general exception vector address to
+ * handle all execptions except RESET and TLBMiss.
+ * NOTE: This code must be position independent!!!
+ */
+VECTOR(MipsException, unknown)
+/*
+ * Find out what mode we came from and jump to the proper handler.
+ */
+ .set noat
+ mfc0 k0, COP_0_STATUS_REG # Get the status register
+ mfc0 k1, COP_0_CAUSE_REG # Get the cause register value.
+ and k0, k0, SR_KSU_USER # test for user mode
+ # sneaky but the bits are
+ # with us........
+ sll k0, k0, 3 # shift user bit for cause index
+ and k1, k1, CR_EXC_CODE # Mask out the cause bits.
+ or k1, k1, k0 # change index to user table
+1:
+ la k0, _C_LABEL(machExceptionTable) # get base of the jump table
+ addu k0, k0, k1 # Get the address of the
+ # function entry. Note that
+ # the cause is already
+ # shifted left by 2 bits so
+ # we dont have to shift.
+ lw k0, 0(k0) # Get the function address
+ nop
+ j k0 # Jump to the function.
+ nop
+ .set at
+VECTOR_END(MipsException)
+
+/*
+ * We couldn't find a TLB entry.
+ * Find out what mode we came from and call the appropriate handler.
+ */
+SlowFault:
+ .set noat
+ mfc0 k0, COP_0_STATUS_REG
+ nop
+ and k0, k0, SR_KSU_USER
+ bne k0, zero, _C_LABEL(MipsUserGenException)
+ nop
+ .set at
+/*
+ * Fall though ...
+ */
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsKernGenException --
+ *
+ * Handle an exception from kernel mode.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+#if defined(ISA_MIPS32)
+#define STORE sw /* 32 bit mode regsave instruction */
+#define LOAD lw /* 32 bit mode regload instruction */
+#define RSIZE 4 /* 32 bit mode register size */
+#elif defined(ISA_MIPS64)
+#define STORE sd /* 64 bit mode regsave instruction */
+#define LOAD ld /* 64 bit mode regload instruction */
+#define RSIZE 8 /* 64 bit mode register size */
+#else
+#error "Please write code for this isa."
+#endif
+
+#define SAVE_REG(reg, offs, base) \
+ STORE reg, STAND_ARG_SIZE + (RSIZE * offs) (base)
+
+#ifdef TARGET_OCTEON
+#define CLEAR_STATUS \
+ mfc0 a0, COP_0_STATUS_REG ;\
+ li a2, (MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX) ; \
+ or a0, a0, a2 ; \
+ li a2, ~(MIPS_SR_INT_IE|MIPS_SR_EXL) ; \
+ and a0, a0, a2 ; \
+ mtc0 a0, COP_0_STATUS_REG
+#else
+#define CLEAR_STATUS \
+ mfc0 a0, COP_0_STATUS_REG ;\
+ li a2, ~(MIPS_SR_INT_IE|MIPS_SR_EXL) ; \
+ and a0, a0, a2 ; \
+ mtc0 a0, COP_0_STATUS_REG
+#endif
+
+#define SAVE_CPU \
+ SAVE_REG(AT, AST, sp) ;\
+ .set at ; \
+ SAVE_REG(v0, V0, sp) ;\
+ SAVE_REG(v1, V1, sp) ;\
+ SAVE_REG(a0, A0, sp) ;\
+ SAVE_REG(a1, A1, sp) ;\
+ SAVE_REG(a2, A2, sp) ;\
+ SAVE_REG(a3, A3, sp) ;\
+ SAVE_REG(t0, T0, sp) ;\
+ SAVE_REG(t1, T1, sp) ;\
+ SAVE_REG(t2, T2, sp) ;\
+ SAVE_REG(t3, T3, sp) ;\
+ SAVE_REG(t4, T4, sp) ;\
+ SAVE_REG(t5, T5, sp) ;\
+ SAVE_REG(t6, T6, sp) ;\
+ SAVE_REG(t7, T7, sp) ;\
+ SAVE_REG(t8, T8, sp) ;\
+ SAVE_REG(t9, T9, sp) ;\
+ SAVE_REG(gp, GP, sp) ;\
+ SAVE_REG(s0, S0, sp) ;\
+ SAVE_REG(s1, S1, sp) ;\
+ SAVE_REG(s2, S2, sp) ;\
+ SAVE_REG(s3, S3, sp) ;\
+ SAVE_REG(s4, S4, sp) ;\
+ SAVE_REG(s5, S5, sp) ;\
+ SAVE_REG(s6, S6, sp) ;\
+ SAVE_REG(s7, S7, sp) ;\
+ SAVE_REG(s8, S8, sp) ;\
+ mflo v0 ;\
+ mfhi v1 ;\
+ mfc0 a0, COP_0_STATUS_REG ;\
+ mfc0 a1, COP_0_CAUSE_REG ;\
+ mfc0 a2, COP_0_BAD_VADDR ;\
+ mfc0 a3, COP_0_EXC_PC ;\
+ SAVE_REG(v0, MULLO, sp) ;\
+ SAVE_REG(v1, MULHI, sp) ;\
+ SAVE_REG(a0, SR, sp) ;\
+ SAVE_REG(a1, CAUSE, sp) ;\
+ SAVE_REG(ra, RA, sp) ;\
+ SAVE_REG(a2, BADVADDR, sp) ;\
+ SAVE_REG(a3, PC, sp) ;\
+ addu v0, sp, KERN_EXC_FRAME_SIZE ;\
+ SAVE_REG(v0, SP, sp) ;\
+ CLEAR_STATUS ;\
+ addu a0, sp, STAND_ARG_SIZE ;\
+ ITLBNOPFIX
+
+#define RESTORE_REG(reg, offs, base) \
+ LOAD reg, STAND_ARG_SIZE + (RSIZE * offs) (base)
+
+#define RESTORE_CPU \
+ mtc0 zero,COP_0_STATUS_REG ;\
+ RESTORE_REG(a0, SR, sp) ;\
+ RESTORE_REG(t0, MULLO, sp) ;\
+ RESTORE_REG(t1, MULHI, sp) ;\
+ mtc0 a0, COP_0_STATUS_REG ;\
+ mtlo t0 ;\
+ mthi t1 ;\
+ _MTC0 v0, COP_0_EXC_PC ;\
+ .set noat ; \
+ RESTORE_REG(AT, AST, sp) ;\
+ RESTORE_REG(v0, V0, sp) ;\
+ RESTORE_REG(v1, V1, sp) ;\
+ RESTORE_REG(a0, A0, sp) ;\
+ RESTORE_REG(a1, A1, sp) ;\
+ RESTORE_REG(a2, A2, sp) ;\
+ RESTORE_REG(a3, A3, sp) ;\
+ RESTORE_REG(t0, T0, sp) ;\
+ RESTORE_REG(t1, T1, sp) ;\
+ RESTORE_REG(t2, T2, sp) ;\
+ RESTORE_REG(t3, T3, sp) ;\
+ RESTORE_REG(t4, T4, sp) ;\
+ RESTORE_REG(t5, T5, sp) ;\
+ RESTORE_REG(t6, T6, sp) ;\
+ RESTORE_REG(t7, T7, sp) ;\
+ RESTORE_REG(t8, T8, sp) ;\
+ RESTORE_REG(t9, T9, sp) ;\
+ RESTORE_REG(s0, S0, sp) ;\
+ RESTORE_REG(s1, S1, sp) ;\
+ RESTORE_REG(s2, S2, sp) ;\
+ RESTORE_REG(s3, S3, sp) ;\
+ RESTORE_REG(s4, S4, sp) ;\
+ RESTORE_REG(s5, S5, sp) ;\
+ RESTORE_REG(s6, S6, sp) ;\
+ RESTORE_REG(s7, S7, sp) ;\
+ RESTORE_REG(s8, S8, sp) ;\
+ RESTORE_REG(gp, GP, sp) ;\
+ RESTORE_REG(ra, RA, sp) ;\
+ addu sp, sp, KERN_EXC_FRAME_SIZE
+
+
+/*
+ * The kernel exception stack contains 18 saved general registers,
+ * the status register and the multiply lo and high registers.
+ * In addition, we set this up for linkage conventions.
+ */
+#define KERN_REG_SIZE (NUMSAVEREGS * RSIZE)
+#define KERN_EXC_FRAME_SIZE (STAND_FRAME_SIZE + KERN_REG_SIZE + 16)
+
+NNON_LEAF(MipsKernGenException, KERN_EXC_FRAME_SIZE, ra)
+ .set noat
+ subu sp, sp, KERN_EXC_FRAME_SIZE
+ .mask 0x80000000, (STAND_RA_OFFSET - KERN_EXC_FRAME_SIZE)
+/*
+ * Save CPU state, building 'frame'.
+ */
+ SAVE_CPU
+/*
+ * Call the exception handler. a0 points at the saved frame.
+ */
+ la gp, _C_LABEL(_gp)
+ la k0, _C_LABEL(trap)
+ jalr k0
+ sw a3, STAND_RA_OFFSET + KERN_REG_SIZE(sp) # for debugging
+
+ RESTORE_CPU # v0 contains the return address.
+ sync
+ eret
+ .set at
+END(MipsKernGenException)
+
+
+#define SAVE_U_PCB_REG(reg, offs, base) \
+ STORE reg, U_PCB_REGS + (RSIZE * offs) (base)
+
+#define RESTORE_U_PCB_REG(reg, offs, base) \
+ LOAD reg, U_PCB_REGS + (RSIZE * offs) (base)
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsUserGenException --
+ *
+ * Handle an exception from user mode.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+NNON_LEAF(MipsUserGenException, STAND_FRAME_SIZE, ra)
+ .set noat
+ .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
+/*
+ * Save all of the registers except for the kernel temporaries in u.u_pcb.
+ */
+ GET_CPU_PCPU(k1)
+ lw k1, PC_CURPCB(k1)
+ SAVE_U_PCB_REG(AT, AST, k1)
+ .set at
+ SAVE_U_PCB_REG(v0, V0, k1)
+ SAVE_U_PCB_REG(v1, V1, k1)
+ SAVE_U_PCB_REG(a0, A0, k1)
+ mflo v0
+ SAVE_U_PCB_REG(a1, A1, k1)
+ SAVE_U_PCB_REG(a2, A2, k1)
+ SAVE_U_PCB_REG(a3, A3, k1)
+ SAVE_U_PCB_REG(t0, T0, k1)
+ mfhi v1
+ SAVE_U_PCB_REG(t1, T1, k1)
+ SAVE_U_PCB_REG(t2, T2, k1)
+ SAVE_U_PCB_REG(t3, T3, k1)
+ SAVE_U_PCB_REG(t4, T4, k1)
+ mfc0 a0, COP_0_STATUS_REG # First arg is the status reg.
+ SAVE_U_PCB_REG(t5, T5, k1)
+ SAVE_U_PCB_REG(t6, T6, k1)
+ SAVE_U_PCB_REG(t7, T7, k1)
+ SAVE_U_PCB_REG(s0, S0, k1)
+ mfc0 a1, COP_0_CAUSE_REG # Second arg is the cause reg.
+ SAVE_U_PCB_REG(s1, S1, k1)
+ SAVE_U_PCB_REG(s2, S2, k1)
+ SAVE_U_PCB_REG(s3, S3, k1)
+ SAVE_U_PCB_REG(s4, S4, k1)
+ mfc0 a2, COP_0_BAD_VADDR # Third arg is the fault addr
+ SAVE_U_PCB_REG(s5, S5, k1)
+ SAVE_U_PCB_REG(s6, S6, k1)
+ SAVE_U_PCB_REG(s7, S7, k1)
+ SAVE_U_PCB_REG(t8, T8, k1)
+ mfc0 a3, COP_0_EXC_PC # Fourth arg is the pc.
+ SAVE_U_PCB_REG(t9, T9, k1)
+ SAVE_U_PCB_REG(gp, GP, k1)
+ SAVE_U_PCB_REG(sp, SP, k1)
+ SAVE_U_PCB_REG(s8, S8, k1)
+ subu sp, k1, STAND_FRAME_SIZE # switch to kernel SP
+ SAVE_U_PCB_REG(ra, RA, k1)
+ SAVE_U_PCB_REG(v0, MULLO, k1)
+ SAVE_U_PCB_REG(v1, MULHI, k1)
+ SAVE_U_PCB_REG(a0, SR, k1)
+ SAVE_U_PCB_REG(a1, CAUSE, k1)
+ SAVE_U_PCB_REG(a2, BADVADDR, k1)
+ SAVE_U_PCB_REG(a3, PC, k1)
+ sw a3, STAND_RA_OFFSET(sp) # for debugging
+ la gp, _C_LABEL(_gp) # switch to kernel GP
+# Turn off fpu and enter kernel mode
+ and t0, a0, ~(SR_COP_1_BIT | SR_EXL | SR_KSU_MASK | SR_INT_ENAB)
+#ifdef TARGET_OCTEON
+ or t0, t0, (MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX)
+#endif
+ mtc0 t0, COP_0_STATUS_REG
+ addu a0, k1, U_PCB_REGS
+ ITLBNOPFIX
+
+/*
+ * Call the exception handler.
+ */
+ la k0, _C_LABEL(trap)
+ jalr k0
+ nop
+/*
+ * Restore user registers and return.
+ * First disable interrupts and set exeption level.
+ */
+ DO_AST
+
+ mtc0 zero, COP_0_STATUS_REG # disable int
+ ITLBNOPFIX
+ li v0, SR_EXL
+ mtc0 v0, COP_0_STATUS_REG # set exeption level
+ ITLBNOPFIX
+
+/*
+ * The use of k1 for storing the PCB pointer must be done only
+ * after interrupts are disabled. Otherwise it will get overwritten
+ * by the interrupt code.
+ */
+ GET_CPU_PCPU(k1)
+ lw k1, PC_CURPCB(k1)
+
+ RESTORE_U_PCB_REG(t0, MULLO, k1)
+ RESTORE_U_PCB_REG(t1, MULHI, k1)
+ mtlo t0
+ mthi t1
+ RESTORE_U_PCB_REG(a0, PC, k1)
+ RESTORE_U_PCB_REG(v0, V0, k1)
+ _MTC0 a0, COP_0_EXC_PC # set return address
+ RESTORE_U_PCB_REG(v1, V1, k1)
+ RESTORE_U_PCB_REG(a0, A0, k1)
+ RESTORE_U_PCB_REG(a1, A1, k1)
+ RESTORE_U_PCB_REG(a2, A2, k1)
+ RESTORE_U_PCB_REG(a3, A3, k1)
+ RESTORE_U_PCB_REG(t0, T0, k1)
+ RESTORE_U_PCB_REG(t1, T1, k1)
+ RESTORE_U_PCB_REG(t2, T2, k1)
+ RESTORE_U_PCB_REG(t3, T3, k1)
+ RESTORE_U_PCB_REG(t4, T4, k1)
+ RESTORE_U_PCB_REG(t5, T5, k1)
+ RESTORE_U_PCB_REG(t6, T6, k1)
+ RESTORE_U_PCB_REG(t7, T7, k1)
+ RESTORE_U_PCB_REG(s0, S0, k1)
+ RESTORE_U_PCB_REG(s1, S1, k1)
+ RESTORE_U_PCB_REG(s2, S2, k1)
+ RESTORE_U_PCB_REG(s3, S3, k1)
+ RESTORE_U_PCB_REG(s4, S4, k1)
+ RESTORE_U_PCB_REG(s5, S5, k1)
+ RESTORE_U_PCB_REG(s6, S6, k1)
+ RESTORE_U_PCB_REG(s7, S7, k1)
+ RESTORE_U_PCB_REG(t8, T8, k1)
+ RESTORE_U_PCB_REG(t9, T9, k1)
+ RESTORE_U_PCB_REG(gp, GP, k1)
+ RESTORE_U_PCB_REG(sp, SP, k1)
+ RESTORE_U_PCB_REG(k0, SR, k1)
+ RESTORE_U_PCB_REG(s8, S8, k1)
+ RESTORE_U_PCB_REG(ra, RA, k1)
+#ifdef TARGET_OCTEON
+ and k0, k0, ~(MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX)
+#endif
+ or k0, k0, (MIPS_SR_INT_IE)
+ .set noat
+ RESTORE_U_PCB_REG(AT, AST, k1)
+
+/*
+ * The restoration of the user SR must be done only after
+ * k1 is no longer needed. Otherwise, k1 will get clobbered after
+ * interrupts are enabled.
+ */
+ mtc0 k0, COP_0_STATUS_REG # still exeption level
+ ITLBNOPFIX
+ sync
+ eret
+ .set at
+END(MipsUserGenException)
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsKernIntr --
+ *
+ * Handle an interrupt from kernel mode.
+ * Interrupts use the standard kernel stack.
+ * switch_exit sets up a kernel stack after exit so interrupts won't fail.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+
+NNON_LEAF(MipsKernIntr, KERN_EXC_FRAME_SIZE, ra)
+ .set noat
+ subu sp, sp, KERN_EXC_FRAME_SIZE
+ .mask 0x80000000, (STAND_RA_OFFSET - KERN_EXC_FRAME_SIZE)
+/*
+ * Save the relevant kernel registers onto the stack.
+ */
+ SAVE_CPU
+
+/*
+ * Call the interrupt handler.
+ */
+ la gp, _C_LABEL(_gp)
+ addu a0, sp, STAND_ARG_SIZE
+ la k0, _C_LABEL(cpu_intr)
+ jalr k0
+ sw a3, STAND_RA_OFFSET + KERN_REG_SIZE(sp)
+ /* Why no AST processing here? */
+/*
+ * Restore registers and return from the interrupt.
+ */
+ lw v0, STAND_RA_OFFSET + KERN_REG_SIZE(sp)
+ RESTORE_CPU
+ sync
+ eret
+ .set at
+END(MipsKernIntr)
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsUserIntr --
+ *
+ * Handle an interrupt from user mode.
+ * Note: we save minimal state in the u.u_pcb struct and use the standard
+ * kernel stack since there has to be a u page if we came from user mode.
+ * If there is a pending software interrupt, then save the remaining state
+ * and call softintr(). This is all because if we call switch() inside
+ * interrupt(), not all the user registers have been saved in u.u_pcb.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+NNON_LEAF(MipsUserIntr, STAND_FRAME_SIZE, ra)
+ .set noat
+ .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
+/*
+ * Save the relevant user registers into the u.u_pcb struct.
+ * We don't need to save s0 - s8 because the compiler does it for us.
+ */
+ GET_CPU_PCPU(k1)
+ lw k1, PC_CURPCB(k1)
+ SAVE_U_PCB_REG(AT, AST, k1)
+ .set at
+ SAVE_U_PCB_REG(v0, V0, k1)
+ SAVE_U_PCB_REG(v1, V1, k1)
+ SAVE_U_PCB_REG(a0, A0, k1)
+ SAVE_U_PCB_REG(a1, A1, k1)
+ SAVE_U_PCB_REG(a2, A2, k1)
+ SAVE_U_PCB_REG(a3, A3, k1)
+ SAVE_U_PCB_REG(t0, T0, k1)
+ SAVE_U_PCB_REG(t1, T1, k1)
+ SAVE_U_PCB_REG(t2, T2, k1)
+ SAVE_U_PCB_REG(t3, T3, k1)
+ SAVE_U_PCB_REG(t4, T4, k1)
+ SAVE_U_PCB_REG(t5, T5, k1)
+ SAVE_U_PCB_REG(t6, T6, k1)
+ SAVE_U_PCB_REG(t7, T7, k1)
+ SAVE_U_PCB_REG(t8, T8, k1)
+ SAVE_U_PCB_REG(t9, T9, k1)
+ SAVE_U_PCB_REG(gp, GP, k1)
+ SAVE_U_PCB_REG(sp, SP, k1)
+ SAVE_U_PCB_REG(ra, RA, k1)
+/*
+ * save remaining user state in u.u_pcb.
+ */
+ SAVE_U_PCB_REG(s0, S0, k1)
+ SAVE_U_PCB_REG(s1, S1, k1)
+ SAVE_U_PCB_REG(s2, S2, k1)
+ SAVE_U_PCB_REG(s3, S3, k1)
+ SAVE_U_PCB_REG(s4, S4, k1)
+ SAVE_U_PCB_REG(s5, S5, k1)
+ SAVE_U_PCB_REG(s6, S6, k1)
+ SAVE_U_PCB_REG(s7, S7, k1)
+ SAVE_U_PCB_REG(s8, S8, k1)
+
+ mflo v0 # get lo/hi late to avoid stall
+ mfhi v1
+ mfc0 a0, COP_0_STATUS_REG
+ mfc0 a1, COP_0_CAUSE_REG
+ mfc0 a3, COP_0_EXC_PC
+ SAVE_U_PCB_REG(v0, MULLO, k1)
+ SAVE_U_PCB_REG(v1, MULHI, k1)
+ SAVE_U_PCB_REG(a0, SR, k1)
+ SAVE_U_PCB_REG(a1, CAUSE, k1)
+ SAVE_U_PCB_REG(a3, PC, k1) # PC in a3, note used later!
+ subu sp, k1, STAND_FRAME_SIZE # switch to kernel SP
+ la gp, _C_LABEL(_gp) # switch to kernel GP
+
+# Turn off fpu, disable interrupts, set kernel mode kernel mode, clear exception level.
+ and t0, a0, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
+#ifdef TARGET_OCTEON
+ or t0, t0, (MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX)
+#endif
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ addu a0, k1, U_PCB_REGS
+/*
+ * Call the interrupt handler.
+ */
+ la k0, _C_LABEL(cpu_intr)
+ jalr k0
+ sw a3, STAND_RA_OFFSET(sp) # for debugging
+/*
+ * Since interrupts are enabled at this point, we use a1 instead of
+ * k0 or k1 to store the PCB pointer. This is because k0 and k1
+ * are not preserved across interrupts. ** RRS - And how did the
+ * get enabled? cpu_intr clears the cause register but it does
+ * not touch the sr as far as I can see thus intr are still
+ * disabled.
+ */
+ DO_AST
+
+/*
+ * Restore user registers and return. NOTE: interrupts are enabled.
+ */
+
+/*
+ * Since interrupts are enabled at this point, we use a1 instead of
+ * k0 or k1 to store the PCB pointer. This is because k0 and k1
+ * are not preserved across interrupts.
+ */
+ mtc0 zero, COP_0_STATUS_REG
+ ITLBNOPFIX
+ li v0, SR_EXL
+ mtc0 v0, COP_0_STATUS_REG # set exeption level bit.
+ ITLBNOPFIX
+
+ GET_CPU_PCPU(k1)
+ lw a1, PC_CURPCB(k1)
+ RESTORE_U_PCB_REG(s0, S0, k1)
+ RESTORE_U_PCB_REG(s1, S1, k1)
+ RESTORE_U_PCB_REG(s2, S2, k1)
+ RESTORE_U_PCB_REG(s3, S3, k1)
+ RESTORE_U_PCB_REG(s4, S4, k1)
+ RESTORE_U_PCB_REG(s5, S5, k1)
+ RESTORE_U_PCB_REG(s6, S6, k1)
+ RESTORE_U_PCB_REG(s7, S7, k1)
+ RESTORE_U_PCB_REG(s8, S8, k1)
+ RESTORE_U_PCB_REG(t0, MULLO, k1)
+ RESTORE_U_PCB_REG(t1, MULHI, k1)
+ RESTORE_U_PCB_REG(t2, PC, k1)
+ mtlo t0
+ mthi t1
+ _MTC0 t2, COP_0_EXC_PC # set return address
+ RESTORE_U_PCB_REG(v0, V0, k1)
+ RESTORE_U_PCB_REG(v1, V1, k1)
+ RESTORE_U_PCB_REG(a0, A0, k1)
+ RESTORE_U_PCB_REG(a1, A1, k1)
+ RESTORE_U_PCB_REG(a2, A2, k1)
+ RESTORE_U_PCB_REG(a3, A3, k1)
+ RESTORE_U_PCB_REG(t0, T0, k1)
+ RESTORE_U_PCB_REG(t1, T1, k1)
+ RESTORE_U_PCB_REG(t2, T2, k1)
+ RESTORE_U_PCB_REG(t3, T3, k1)
+ RESTORE_U_PCB_REG(t4, T4, k1)
+ RESTORE_U_PCB_REG(t5, T5, k1)
+ RESTORE_U_PCB_REG(t6, T6, k1)
+ RESTORE_U_PCB_REG(t7, T7, k1)
+ RESTORE_U_PCB_REG(t8, T8, k1)
+ RESTORE_U_PCB_REG(t9, T9, k1)
+ RESTORE_U_PCB_REG(gp, GP, k1)
+ RESTORE_U_PCB_REG(k0, SR, k1)
+ RESTORE_U_PCB_REG(sp, SP, k1)
+ RESTORE_U_PCB_REG(ra, RA, k1)
+#ifdef TARGET_OCTEON
+ and k0, k0, ~(MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX)
+#endif
+ or k0, k0, (MIPS_SR_INT_IE|SR_EXL)
+ .set noat
+ RESTORE_U_PCB_REG(AT, AST, k1)
+
+/*
+ * The restoration of the user SR must be done only after
+ * k1 is no longer needed. Otherwise, k1 will get clobbered after
+ * interrupts are enabled.
+ */
+ mtc0 k0, COP_0_STATUS_REG # SR with EXL set.
+ ITLBNOPFIX
+ sync
+ eret
+ .set at
+END(MipsUserIntr)
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsTLBInvalidException --
+ *
+ * Handle a TLB invalid exception.
+ * The BaddVAddr, Context, and EntryHi registers contain the failed
+ * virtual address.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+NLEAF(MipsTLBInvalidException)
+ .set noat
+ mfc0 k0, COP_0_STATUS_REG
+ nop
+ and k0, k0, SR_KSU_USER
+ bne k0, zero, _C_LABEL(MipsUserTLBInvalidException)
+ nop
+ .set at
+END(MipsTLBInvalidException)
+/*
+ * Fall through ...
+ */
+
+NLEAF(MipsKernTLBInvalidException)
+ .set noat
+ mfc0 k0, COP_0_BAD_VADDR # get the fault address
+
+
+ li k1, VM_MAXUSER_ADDRESS
+ sltu k1, k0, k1
+ beqz k1, 1f
+ nop
+#ifdef SMP
+ GET_CPU_PCPU(k1)
+#else
+ lui k1, %hi(_C_LABEL(pcpup))
+ lw k1, %lo(_C_LABEL(pcpup))(k1)
+#endif
+ lw k1, PC_SEGBASE(k1) # works for single cpu????
+ beqz k1, _C_LABEL(MipsKernGenException) # seg tab is null
+ nop
+ b 2f
+ nop
+1:
+ li k1, (VM_MAX_KERNEL_ADDRESS)
+ bgez k0, _C_LABEL(MipsKernGenException) # full trap processing
+ sltu k1, k1, k0 # check fault address against
+ bnez k1, _C_LABEL(MipsKernGenException) # kernel_segmap upper bound
+ lui k1, %hi(_C_LABEL(kernel_segmap)) # k1=hi of segbase
+ lw k1, %lo(_C_LABEL(kernel_segmap))(k1) # k1=segment tab base
+ beqz k1, _C_LABEL(MipsKernGenException) # seg tab is null
+2:
+ srl k0, 20 # k0=seg offset (almost)
+ andi k0, k0, 0xffc # k0=seg offset (mask 0x3)
+ addu k1, k0, k1 # k1=seg entry address
+ lw k1, 0(k1) # k1=seg entry
+ mfc0 k0, COP_0_BAD_VADDR # k0=bad address (again)
+ beq k1, zero, _C_LABEL(MipsKernGenException) # ==0 -- no page table
+ srl k0, k0, PGSHIFT-2
+ andi k0, k0, 0xffc # compute offset from index
+ tlbp # Probe the invalid entry
+ addu k1, k1, k0
+ and k0, k0, 4 # check even/odd page
+ nop # required for QED 5230
+ bne k0, zero, KernTLBIOdd
+ nop
+
+ mfc0 k0, COP_0_TLB_INDEX
+ nop
+ bltz k0, sys_stk_chk
+
+ sltiu k0, k0, VMWIRED_ENTRIES # index below wired entries?
+ bne k0, zero, sys_stk_chk
+ lw k0, 0(k1) # get PTE entry
+
+ _SLL k0, k0, WIRED_SHIFT # get rid of "wired" bit
+ _SRL k0, k0, WIRED_SHIFT
+ _MTC0 k0, COP_0_TLB_LO0 # load PTE entry
+ and k0, k0, PTE_V # check for valid entry
+ nop # required for QED5230
+ beq k0, zero, _C_LABEL(MipsKernGenException) # PTE invalid
+ lw k0, 4(k1) # get odd PTE entry
+ _SLL k0, k0, WIRED_SHIFT
+ _SRL k0, k0, WIRED_SHIFT
+ _MTC0 k0, COP_0_TLB_LO1 # load PTE entry
+ HAZARD_DELAY
+ tlbwi # write TLB
+ HAZARD_DELAY
+ eret
+
+KernTLBIOdd:
+ mfc0 k0, COP_0_TLB_INDEX
+ nop
+ bltz k0, sys_stk_chk
+
+ sltiu k0, k0, VMWIRED_ENTRIES # index below wired entries?
+ bne k0, zero, sys_stk_chk
+ lw k0, 0(k1) # get PTE entry
+
+ _SLL k0, k0, WIRED_SHIFT # get rid of wired bit
+ _SRL k0, k0, WIRED_SHIFT
+ _MTC0 k0, COP_0_TLB_LO1 # save PTE entry
+ and k0, k0, PTE_V # check for valid entry
+ nop # required for QED5230
+ beq k0, zero, _C_LABEL(MipsKernGenException) # PTE invalid
+ lw k0, -4(k1) # get even PTE entry
+ _SLL k0, k0, WIRED_SHIFT
+ _SRL k0, k0, WIRED_SHIFT
+ _MTC0 k0, COP_0_TLB_LO0 # save PTE entry
+ HAZARD_DELAY
+ tlbwi # update TLB
+ HAZARD_DELAY
+ eret
+
+ .set at
+END(MipsKernTLBInvalidException)
+
+
+NLEAF(MipsUserTLBInvalidException)
+ .set noat
+ mfc0 k0, COP_0_BAD_VADDR # get the fault address
+
+ li k1, VM_MAXUSER_ADDRESS
+ sltu k1, k0, k1
+ beqz k1, _C_LABEL(MipsUserGenException)
+ nop
+#ifdef SMP
+ GET_CPU_PCPU(k1)
+#else
+ lui k1, %hi(_C_LABEL(pcpup))
+ lw k1, %lo(_C_LABEL(pcpup))(k1)
+#endif
+ lw k1, PC_SEGBASE(k1) # works for single cpu????
+ beqz k1, _C_LABEL(MipsUserGenException) # seg tab is null
+ nop
+2:
+ srl k0, 20 # k0=seg offset (almost)
+ andi k0, k0, 0xffc # k0=seg offset (mask 0x3)
+ addu k1, k0, k1 # k1=seg entry address
+ lw k1, 0(k1) # k1=seg entry
+ mfc0 k0, COP_0_BAD_VADDR # k0=bad address (again)
+ beq k1, zero, _C_LABEL(MipsUserGenException) # ==0 -- no page table
+ srl k0, k0, PGSHIFT-2
+ andi k0, k0, 0xffc # compute offset from index
+ tlbp # Probe the invalid entry
+ addu k1, k1, k0
+ and k0, k0, 4 # check even/odd page
+ nop # required for QED 5230
+ bne k0, zero, UserTLBIOdd
+ nop
+
+ mfc0 k0, COP_0_TLB_INDEX
+ nop
+ bltz k0, _C_LABEL(MipsUserGenException)
+
+ sltiu k0, k0, VMWIRED_ENTRIES # index below wired entries?
+ bne k0, zero, _C_LABEL(MipsUserGenException)
+ lw k0, 0(k1) # get PTE entry
+
+ _SLL k0, k0, WIRED_SHIFT # get rid of "wired" bit
+ _SRL k0, k0, WIRED_SHIFT
+ _MTC0 k0, COP_0_TLB_LO0 # load PTE entry
+ and k0, k0, PTE_V # check for valid entry
+ nop # required for QED5230
+ beq k0, zero, _C_LABEL(MipsUserGenException) # PTE invalid
+ lw k0, 4(k1) # get odd PTE entry
+ _SLL k0, k0, WIRED_SHIFT
+ _SRL k0, k0, WIRED_SHIFT
+ _MTC0 k0, COP_0_TLB_LO1 # load PTE entry
+ HAZARD_DELAY
+ tlbwi # write TLB
+ HAZARD_DELAY
+ eret
+
+UserTLBIOdd:
+ mfc0 k0, COP_0_TLB_INDEX
+ nop
+ bltz k0, _C_LABEL(MipsUserGenException)
+ sltiu k0, k0, VMWIRED_ENTRIES # index below wired entries?
+
+ bne k0, zero, _C_LABEL(MipsUserGenException)
+ lw k0, 0(k1) # get PTE entry
+
+ _SLL k0, k0, WIRED_SHIFT # get rid of wired bit
+ _SRL k0, k0, WIRED_SHIFT
+ _MTC0 k0, COP_0_TLB_LO1 # save PTE entry
+ and k0, k0, PTE_V # check for valid entry
+ nop # required for QED5230
+ beq k0, zero, _C_LABEL(MipsUserGenException) # PTE invalid
+ lw k0, -4(k1) # get even PTE entry
+ _SLL k0, k0, WIRED_SHIFT
+ _SRL k0, k0, WIRED_SHIFT
+ _MTC0 k0, COP_0_TLB_LO0 # save PTE entry
+ HAZARD_DELAY
+ tlbwi # update TLB
+ HAZARD_DELAY
+ eret
+
+ .set at
+END(MipsUserTLBInvalidException)
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsTLBMissException --
+ *
+ * Handle a TLB miss exception from kernel mode in kernel space.
+ * The BaddVAddr, Context, and EntryHi registers contain the failed
+ * virtual address.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+NLEAF(MipsTLBMissException)
+ .set noat
+ mfc0 k0, COP_0_BAD_VADDR # k0=bad address
+ li k1, (VM_MAX_KERNEL_ADDRESS) # check fault address against
+ sltu k1, k1, k0 # upper bound of kernel_segmap
+ bnez k1, _C_LABEL(MipsKernGenException) # out of bound
+ lui k1, %hi(_C_LABEL(kernel_segmap)) # k1=hi of segbase
+ srl k0, 20 # k0=seg offset (almost)
+ lw k1, %lo(_C_LABEL(kernel_segmap))(k1) # k1=segment tab base
+ beq k1, zero, _C_LABEL(MipsKernGenException) # ==0 -- no seg tab
+ andi k0, k0, 0xffc # k0=seg offset (mask 0x3)
+ addu k1, k0, k1 # k1=seg entry address
+ lw k1, 0(k1) # k1=seg entry
+ mfc0 k0, COP_0_BAD_VADDR # k0=bad address (again)
+ beq k1, zero, _C_LABEL(MipsKernGenException) # ==0 -- no page table
+ srl k0, 10 # k0=VPN (aka va>>10)
+ andi k0, k0, 0xff8 # k0=page tab offset
+ addu k1, k1, k0 # k1=pte address
+ lw k0, 0(k1) # k0=lo0 pte
+ lw k1, 4(k1) # k1=lo1 pte
+ _SLL k0, WIRED_SHIFT # chop bits [31..30]
+ _SRL k0, WIRED_SHIFT # chop bits [31..30]
+ _MTC0 k0, COP_0_TLB_LO0 # lo0 is loaded
+ _SLL k1, WIRED_SHIFT # chop bits [31..30]
+ _SRL k1, WIRED_SHIFT # chop bits [31..30]
+ _MTC0 k1, COP_0_TLB_LO1 # lo1 is loaded
+
+ HAZARD_DELAY
+ tlbwr # write to tlb
+ HAZARD_DELAY
+ eret # return from exception
+
+sys_stk_chk:
+ GET_CPU_PCPU(k0)
+ lw k0, PC_CURTHREAD(k0)
+ lw k0, TD_REALKSTACK(k0)
+ sltu k0, sp, k0 # check for stack overflow
+ beqz k0, _C_LABEL(MipsKernGenException) # not stack overflow
+ nop
+
+# stack overflow
+ la a0, _C_LABEL(_start) - START_FRAME - 8 # set sp to a valid place
+ sw sp, 24(a0)
+ move sp, a0
+ la a0, 1f
+ mfc0 a2, COP_0_STATUS_REG
+ mfc0 a3, COP_0_CAUSE_REG
+ _MFC0 a1, COP_0_EXC_PC
+ sw a2, 16(sp)
+ sw a3, 20(sp)
+ move a2, ra
+ la k0, _C_LABEL(printf)
+ jalr k0
+ mfc0 a3, COP_0_BAD_VADDR
+
+ la sp, _C_LABEL(_start) - START_FRAME # set sp to a valid place
+
+#if !defined(SMP) && defined(DDB)
+ la a0, 2f
+ la k0, _C_LABEL(trapDump)
+ jalr k0
+ nop
+
+ li a0, 0
+ lw a1, _C_LABEL(num_tlbentries)
+ la k0, _C_LABEL(db_dump_tlb)
+ jalr k0
+ addu a1, -1
+
+3:
+ b 3b
+ nop
+#endif
+
+ PANIC("kernel stack overflow")
+
+ .data
+ .globl lastktlbmiss
+lastktlbmiss:
+ .word 0
+lastktlbmisspc:
+ .word 0
+lastutlbmiss:
+ .word 0
+lastutlbmisspc:
+ .word 0
+
+1:
+ .asciiz "ktlbmiss: PC %x RA %x ADR %x\nSR %x CR %x SP %x\n"
+2:
+ .asciiz "stack ovf"
+ .text
+
+ .set at
+END(MipsTLBMissException)
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsFPTrap --
+ *
+ * Handle a floating point Trap.
+ *
+ * MipsFPTrap(statusReg, causeReg, pc)
+ * unsigned statusReg;
+ * unsigned causeReg;
+ * unsigned pc;
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+NON_LEAF(MipsFPTrap, STAND_FRAME_SIZE, ra)
+ subu sp, sp, STAND_FRAME_SIZE
+ mfc0 t0, COP_0_STATUS_REG
+ sw ra, STAND_RA_OFFSET(sp)
+ .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
+
+ or t1, t0, SR_COP_1_BIT
+ mtc0 t1, COP_0_STATUS_REG
+ ITLBNOPFIX
+ cfc1 t1, FPC_CSR # stall til FP done
+ cfc1 t1, FPC_CSR # now get status
+ nop
+ sll t2, t1, (31 - 17) # unimplemented operation?
+ bgez t2, 3f # no, normal trap
+ nop
+/*
+ * We got an unimplemented operation trap so
+ * fetch the instruction, compute the next PC and emulate the instruction.
+ */
+ bgez a1, 1f # Check the branch delay bit.
+ nop
+/*
+ * The instruction is in the branch delay slot so the branch will have to
+ * be emulated to get the resulting PC.
+ */
+ sw a2, STAND_FRAME_SIZE + 8(sp)
+ GET_CPU_PCPU(a0)
+ lw a0, PC_CURPCB(a0)
+ addu a0, a0, U_PCB_REGS # first arg is ptr to CPU registers
+ move a1, a2 # second arg is instruction PC
+ move a2, t1 # third arg is floating point CSR
+ la t3, _C_LABEL(MipsEmulateBranch) # compute PC after branch
+ jalr t3 # compute PC after branch
+ move a3, zero # fourth arg is FALSE
+/*
+ * Now load the floating-point instruction in the branch delay slot
+ * to be emulated.
+ */
+ lw a2, STAND_FRAME_SIZE + 8(sp) # restore EXC pc
+ b 2f
+ lw a0, 4(a2) # a0 = coproc instruction
+/*
+ * This is not in the branch delay slot so calculate the resulting
+ * PC (epc + 4) into v0 and continue to MipsEmulateFP().
+ */
+1:
+ lw a0, 0(a2) # a0 = coproc instruction
+ addu v0, a2, 4 # v0 = next pc
+2:
+ GET_CPU_PCPU(t2)
+ lw t2, PC_CURPCB(t2)
+ SAVE_U_PCB_REG(v0, PC, t2) # save new pc
+/*
+ * Check to see if the instruction to be emulated is a floating-point
+ * instruction.
+ */
+ srl a3, a0, OPCODE_SHIFT
+ beq a3, OPCODE_C1, 4f # this should never fail
+ nop
+/*
+ * Send a floating point exception signal to the current process.
+ */
+3:
+ GET_CPU_PCPU(a0)
+ lw a0, PC_CURTHREAD(a0) # get current thread
+ cfc1 a2, FPC_CSR # code = FP execptions
+ ctc1 zero, FPC_CSR # Clear exceptions
+ la t3, _C_LABEL(trapsignal)
+ jalr t3
+ li a1, SIGFPE
+ b FPReturn
+ nop
+
+/*
+ * Finally, we can call MipsEmulateFP() where a0 is the instruction to emulate.
+ */
+4:
+ la t3, _C_LABEL(MipsEmulateFP)
+ jalr t3
+ nop
+
+/*
+ * Turn off the floating point coprocessor and return.
+ */
+FPReturn:
+ mfc0 t0, COP_0_STATUS_REG
+ lw ra, STAND_RA_OFFSET(sp)
+ and t0, t0, ~SR_COP_1_BIT
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ j ra
+ addu sp, sp, STAND_FRAME_SIZE
+END(MipsFPTrap)
+
+
+#if 0
+/*
+ * Atomic ipending update
+ */
+LEAF(set_sint)
+ la v1, ipending
+1:
+ ll v0, 0(v1)
+ or v0, a0
+ sc v0, 0(v1)
+ beqz v0, 1b
+ j ra
+ nop
+END(set_sint)
+#endif
+
+/*
+ * Interrupt counters for vmstat.
+ */
+ .data
+ .globl intrcnt
+ .globl eintrcnt
+ .globl intrnames
+ .globl eintrnames
+intrnames:
+ .asciiz "clock"
+ .asciiz "rtc"
+ .asciiz "sio"
+ .asciiz "pe"
+ .asciiz "pic-nic"
+eintrnames:
+ .align 2
+intrcnt:
+ .word 0,0,0,0,0
+eintrcnt:
+
+
+/*
+ * Vector to real handler in KSEG1.
+ */
+ .text
+VECTOR(MipsCache, unknown)
+ la k0, _C_LABEL(MipsCacheException)
+ li k1, MIPS_PHYS_MASK
+ and k0, k1
+ li k1, MIPS_UNCACHED_MEMORY_ADDR
+ or k0, k1
+ j k0
+ nop
+VECTOR_END(MipsCache)
+
+ .set at
+
+
+/*
+ * Panic on cache errors. A lot more could be done to recover
+ * from some types of errors but it is tricky.
+ */
+NESTED_NOPROFILE(MipsCacheException, KERN_EXC_FRAME_SIZE, ra)
+ .set noat
+ .mask 0x80000000, -4
+ la k0, _C_LABEL(panic) # return to panic
+ la a0, 9f # panicstr
+ _MFC0 a1, COP_0_ERROR_PC
+ mfc0 a2, COP_0_CACHE_ERR # 3rd arg cache error
+
+ _MTC0 k0, COP_0_ERROR_PC # set return address
+
+ mfc0 k0, COP_0_STATUS_REG # restore status
+ li k1, SR_DIAG_DE # ignore further errors
+ or k0, k1
+ mtc0 k0, COP_0_STATUS_REG # restore status
+ HAZARD_DELAY
+
+ eret
+
+ MSG("cache error @ EPC 0x%x CachErr 0x%x");
+ .set at
+END(MipsCacheException)
diff --git a/sys/mips/mips/fp.S b/sys/mips/mips/fp.S
new file mode 100644
index 0000000..b211c12
--- /dev/null
+++ b/sys/mips/mips/fp.S
@@ -0,0 +1,3608 @@
+/* $OpenBSD: fp.S,v 1.2 1998/03/16 09:03:31 pefo Exp $ */
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)fp.s 8.1 (Berkeley) 6/10/93
+ * JNPR: fp.S,v 1.1 2006/08/07 05:38:57 katta
+ * $FreeBSD$
+ */
+
+/*
+ * Standard header stuff.
+ */
+
+#include <machine/asm.h>
+#include <machine/regnum.h>
+#include <machine/cpu.h>
+
+#include "assym.s"
+
+#define SEXP_INF 0xff
+#define DEXP_INF 0x7ff
+#define SEXP_BIAS 127
+#define DEXP_BIAS 1023
+#define SEXP_MIN -126
+#define DEXP_MIN -1022
+#define SEXP_MAX 127
+#define DEXP_MAX 1023
+#define WEXP_MAX 30 /* maximum unbiased exponent for int */
+#define WEXP_MIN -1 /* minimum unbiased exponent for int */
+#define SFRAC_BITS 23
+#define DFRAC_BITS 52
+#define SIMPL_ONE 0x00800000
+#define DIMPL_ONE 0x00100000
+#define SLEAD_ZEROS 31 - 23
+#define DLEAD_ZEROS 31 - 20
+#define STICKYBIT 1
+#define GUARDBIT 0x80000000
+#define SSIGNAL_NAN 0x00400000
+#define DSIGNAL_NAN 0x00080000
+#define SQUIET_NAN 0x003fffff
+#define DQUIET_NAN0 0x0007ffff
+#define DQUIET_NAN1 0xffffffff
+#define INT_MIN 0x80000000
+#define INT_MAX 0x7fffffff
+
+#define COND_UNORDERED 0x1
+#define COND_EQUAL 0x2
+#define COND_LESS 0x4
+#define COND_SIGNAL 0x8
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsEmulateFP --
+ *
+ * Emulate unimplemented floating point operations.
+ * This routine should only be called by MipsFPInterrupt().
+ *
+ * MipsEmulateFP(instr)
+ * unsigned instr;
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * Floating point registers are modified according to instruction.
+ *
+ *----------------------------------------------------------------------------
+ */
+NON_LEAF(MipsEmulateFP, STAND_FRAME_SIZE, ra)
+ subu sp, sp, STAND_FRAME_SIZE
+ sw ra, STAND_RA_OFFSET(sp)
+/*
+ * Decode the FMT field (bits 24-21) and FUNCTION field (bits 5-0).
+ */
+ srl v0, a0, 21 - 2 # get FMT field
+ and v0, v0, 0xF << 2 # mask FMT field
+ and v1, a0, 0x3F # mask FUNC field
+ sll v1, v1, 5 # align for table lookup
+ bgt v0, 4 << 2, ill # illegal format
+
+ or v1, v1, v0
+ cfc1 a1, FPC_CSR # get exception register
+ lw a3, func_fmt_tbl(v1) # switch on FUNC & FMT
+ and a1, a1, ~FPC_EXCEPTION_UNIMPL # clear exception
+ ctc1 a1, FPC_CSR
+ j a3
+
+ .rdata
+func_fmt_tbl:
+ .word add_s # 0
+ .word add_d # 0
+ .word ill # 0
+ .word ill # 0
+ .word ill # 0
+ .word ill # 0
+ .word ill # 0
+ .word ill # 0
+ .word sub_s # 1
+ .word sub_d # 1
+ .word ill # 1
+ .word ill # 1
+ .word ill # 1
+ .word ill # 1
+ .word ill # 1
+ .word ill # 1
+ .word mul_s # 2
+ .word mul_d # 2
+ .word ill # 2
+ .word ill # 2
+ .word ill # 2
+ .word ill # 2
+ .word ill # 2
+ .word ill # 2
+ .word div_s # 3
+ .word div_d # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word abs_s # 5
+ .word abs_d # 5
+ .word ill # 5
+ .word ill # 5
+ .word ill # 5
+ .word ill # 5
+ .word ill # 5
+ .word ill # 5
+ .word mov_s # 6
+ .word mov_d # 6
+ .word ill # 6
+ .word ill # 6
+ .word ill # 6
+ .word ill # 6
+ .word ill # 6
+ .word ill # 6
+ .word neg_s # 7
+ .word neg_d # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 32
+ .word cvt_s_d # 32
+ .word ill # 32
+ .word ill # 32
+ .word cvt_s_w # 32
+ .word ill # 32
+ .word ill # 32
+ .word ill # 32
+ .word cvt_d_s # 33
+ .word ill # 33
+ .word ill # 33
+ .word ill # 33
+ .word cvt_d_w # 33
+ .word ill # 33
+ .word ill # 33
+ .word ill # 33
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word cvt_w_s # 36
+ .word cvt_w_d # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word cmp_s # 48
+ .word cmp_d # 48
+ .word ill # 48
+ .word ill # 48
+ .word ill # 48
+ .word ill # 48
+ .word ill # 48
+ .word ill # 48
+ .word cmp_s # 49
+ .word cmp_d # 49
+ .word ill # 49
+ .word ill # 49
+ .word ill # 49
+ .word ill # 49
+ .word ill # 49
+ .word ill # 49
+ .word cmp_s # 50
+ .word cmp_d # 50
+ .word ill # 50
+ .word ill # 50
+ .word ill # 50
+ .word ill # 50
+ .word ill # 50
+ .word ill # 50
+ .word cmp_s # 51
+ .word cmp_d # 51
+ .word ill # 51
+ .word ill # 51
+ .word ill # 51
+ .word ill # 51
+ .word ill # 51
+ .word ill # 51
+ .word cmp_s # 52
+ .word cmp_d # 52
+ .word ill # 52
+ .word ill # 52
+ .word ill # 52
+ .word ill # 52
+ .word ill # 52
+ .word ill # 52
+ .word cmp_s # 53
+ .word cmp_d # 53
+ .word ill # 53
+ .word ill # 53
+ .word ill # 53
+ .word ill # 53
+ .word ill # 53
+ .word ill # 53
+ .word cmp_s # 54
+ .word cmp_d # 54
+ .word ill # 54
+ .word ill # 54
+ .word ill # 54
+ .word ill # 54
+ .word ill # 54
+ .word ill # 54
+ .word cmp_s # 55
+ .word cmp_d # 55
+ .word ill # 55
+ .word ill # 55
+ .word ill # 55
+ .word ill # 55
+ .word ill # 55
+ .word ill # 55
+ .word cmp_s # 56
+ .word cmp_d # 56
+ .word ill # 56
+ .word ill # 56
+ .word ill # 56
+ .word ill # 56
+ .word ill # 56
+ .word ill # 56
+ .word cmp_s # 57
+ .word cmp_d # 57
+ .word ill # 57
+ .word ill # 57
+ .word ill # 57
+ .word ill # 57
+ .word ill # 57
+ .word ill # 57
+ .word cmp_s # 58
+ .word cmp_d # 58
+ .word ill # 58
+ .word ill # 58
+ .word ill # 58
+ .word ill # 58
+ .word ill # 58
+ .word ill # 58
+ .word cmp_s # 59
+ .word cmp_d # 59
+ .word ill # 59
+ .word ill # 59
+ .word ill # 59
+ .word ill # 59
+ .word ill # 59
+ .word ill # 59
+ .word cmp_s # 60
+ .word cmp_d # 60
+ .word ill # 60
+ .word ill # 60
+ .word ill # 60
+ .word ill # 60
+ .word ill # 60
+ .word ill # 60
+ .word cmp_s # 61
+ .word cmp_d # 61
+ .word ill # 61
+ .word ill # 61
+ .word ill # 61
+ .word ill # 61
+ .word ill # 61
+ .word ill # 61
+ .word cmp_s # 62
+ .word cmp_d # 62
+ .word ill # 62
+ .word ill # 62
+ .word ill # 62
+ .word ill # 62
+ .word ill # 62
+ .word ill # 62
+ .word cmp_s # 63
+ .word cmp_d # 63
+ .word ill # 63
+ .word ill # 63
+ .word ill # 63
+ .word ill # 63
+ .word ill # 63
+ .word ill # 63
+ .text
+
+/*
+ * Single precision subtract.
+ */
+sub_s:
+ jal get_ft_fs_s
+ xor t4, t4, 1 # negate FT sign bit
+ b add_sub_s
+/*
+ * Single precision add.
+ */
+add_s:
+ jal get_ft_fs_s
+add_sub_s:
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ bne t5, SEXP_INF, result_fs_s # if FT is not inf, result=FS
+ bne t2, zero, result_fs_s # if FS is NAN, result is FS
+ bne t6, zero, result_ft_s # if FT is NAN, result is FT
+ bne t0, t4, invalid_s # both infinities same sign?
+ b result_fs_s # result is in FS
+1:
+ beq t5, SEXP_INF, result_ft_s # if FT is inf, result=FT
+ bne t1, zero, 4f # is FS a denormalized num?
+ beq t2, zero, 3f # is FS zero?
+ bne t5, zero, 2f # is FT a denormalized num?
+ beq t6, zero, result_fs_s # FT is zero, result=FS
+ jal renorm_fs_s
+ jal renorm_ft_s
+ b 5f
+2:
+ jal renorm_fs_s
+ subu t5, t5, SEXP_BIAS # unbias FT exponent
+ or t6, t6, SIMPL_ONE # set implied one bit
+ b 5f
+3:
+ bne t5, zero, result_ft_s # if FT != 0, result=FT
+ bne t6, zero, result_ft_s
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ bne v0, FPC_ROUND_RM, 1f # round to -infinity?
+ or t0, t0, t4 # compute result sign
+ b result_fs_s
+1:
+ and t0, t0, t4 # compute result sign
+ b result_fs_s
+4:
+ bne t5, zero, 2f # is FT a denormalized num?
+ beq t6, zero, result_fs_s # FT is zero, result=FS
+ subu t1, t1, SEXP_BIAS # unbias FS exponent
+ or t2, t2, SIMPL_ONE # set implied one bit
+ jal renorm_ft_s
+ b 5f
+2:
+ subu t1, t1, SEXP_BIAS # unbias FS exponent
+ or t2, t2, SIMPL_ONE # set implied one bit
+ subu t5, t5, SEXP_BIAS # unbias FT exponent
+ or t6, t6, SIMPL_ONE # set implied one bit
+/*
+ * Perform the addition.
+ */
+5:
+ move t8, zero # no shifted bits (sticky reg)
+ beq t1, t5, 4f # no shift needed
+ subu v0, t1, t5 # v0 = difference of exponents
+ move v1, v0 # v1 = abs(difference)
+ bge v0, zero, 1f
+ negu v1
+1:
+ ble v1, SFRAC_BITS+2, 2f # is difference too great?
+ li t8, STICKYBIT # set the sticky bit
+ bge v0, zero, 1f # check which exp is larger
+ move t1, t5 # result exp is FTs
+ move t2, zero # FSs fraction shifted is zero
+ b 4f
+1:
+ move t6, zero # FTs fraction shifted is zero
+ b 4f
+2:
+ li t9, 32 # compute 32 - abs(exp diff)
+ subu t9, t9, v1
+ bgt v0, zero, 3f # if FS > FT, shift FTs frac
+ move t1, t5 # FT > FS, result exp is FTs
+ sll t8, t2, t9 # save bits shifted out
+ srl t2, t2, v1 # shift FSs fraction
+ b 4f
+3:
+ sll t8, t6, t9 # save bits shifted out
+ srl t6, t6, v1 # shift FTs fraction
+4:
+ bne t0, t4, 1f # if signs differ, subtract
+ addu t2, t2, t6 # add fractions
+ b norm_s
+1:
+ blt t2, t6, 3f # subtract larger from smaller
+ bne t2, t6, 2f # if same, result=0
+ move t1, zero # result=0
+ move t2, zero
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ bne v0, FPC_ROUND_RM, 1f # round to -infinity?
+ or t0, t0, t4 # compute result sign
+ b result_fs_s
+1:
+ and t0, t0, t4 # compute result sign
+ b result_fs_s
+2:
+ sltu t9, zero, t8 # compute t2:zero - t6:t8
+ subu t8, zero, t8
+ subu t2, t2, t6 # subtract fractions
+ subu t2, t2, t9 # subtract barrow
+ b norm_s
+3:
+ move t0, t4 # sign of result = FTs
+ sltu t9, zero, t8 # compute t6:zero - t2:t8
+ subu t8, zero, t8
+ subu t2, t6, t2 # subtract fractions
+ subu t2, t2, t9 # subtract barrow
+ b norm_s
+
+/*
+ * Double precision subtract.
+ */
+sub_d:
+ jal get_ft_fs_d
+ xor t4, t4, 1 # negate sign bit
+ b add_sub_d
+/*
+ * Double precision add.
+ */
+add_d:
+ jal get_ft_fs_d
+add_sub_d:
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ bne t5, DEXP_INF, result_fs_d # if FT is not inf, result=FS
+ bne t2, zero, result_fs_d # if FS is NAN, result is FS
+ bne t3, zero, result_fs_d
+ bne t6, zero, result_ft_d # if FT is NAN, result is FT
+ bne t7, zero, result_ft_d
+ bne t0, t4, invalid_d # both infinities same sign?
+ b result_fs_d # result is in FS
+1:
+ beq t5, DEXP_INF, result_ft_d # if FT is inf, result=FT
+ bne t1, zero, 4f # is FS a denormalized num?
+ bne t2, zero, 1f # is FS zero?
+ beq t3, zero, 3f
+1:
+ bne t5, zero, 2f # is FT a denormalized num?
+ bne t6, zero, 1f
+ beq t7, zero, result_fs_d # FT is zero, result=FS
+1:
+ jal renorm_fs_d
+ jal renorm_ft_d
+ b 5f
+2:
+ jal renorm_fs_d
+ subu t5, t5, DEXP_BIAS # unbias FT exponent
+ or t6, t6, DIMPL_ONE # set implied one bit
+ b 5f
+3:
+ bne t5, zero, result_ft_d # if FT != 0, result=FT
+ bne t6, zero, result_ft_d
+ bne t7, zero, result_ft_d
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ bne v0, FPC_ROUND_RM, 1f # round to -infinity?
+ or t0, t0, t4 # compute result sign
+ b result_fs_d
+1:
+ and t0, t0, t4 # compute result sign
+ b result_fs_d
+4:
+ bne t5, zero, 2f # is FT a denormalized num?
+ bne t6, zero, 1f
+ beq t7, zero, result_fs_d # FT is zero, result=FS
+1:
+ subu t1, t1, DEXP_BIAS # unbias FS exponent
+ or t2, t2, DIMPL_ONE # set implied one bit
+ jal renorm_ft_d
+ b 5f
+2:
+ subu t1, t1, DEXP_BIAS # unbias FS exponent
+ or t2, t2, DIMPL_ONE # set implied one bit
+ subu t5, t5, DEXP_BIAS # unbias FT exponent
+ or t6, t6, DIMPL_ONE # set implied one bit
+/*
+ * Perform the addition.
+ */
+5:
+ move t8, zero # no shifted bits (sticky reg)
+ beq t1, t5, 4f # no shift needed
+ subu v0, t1, t5 # v0 = difference of exponents
+ move v1, v0 # v1 = abs(difference)
+ bge v0, zero, 1f
+ negu v1
+1:
+ ble v1, DFRAC_BITS+2, 2f # is difference too great?
+ li t8, STICKYBIT # set the sticky bit
+ bge v0, zero, 1f # check which exp is larger
+ move t1, t5 # result exp is FTs
+ move t2, zero # FSs fraction shifted is zero
+ move t3, zero
+ b 4f
+1:
+ move t6, zero # FTs fraction shifted is zero
+ move t7, zero
+ b 4f
+2:
+ li t9, 32
+ bge v0, zero, 3f # if FS > FT, shift FTs frac
+ move t1, t5 # FT > FS, result exp is FTs
+ blt v1, t9, 1f # shift right by < 32?
+ subu v1, v1, t9
+ subu t9, t9, v1
+ sll t8, t2, t9 # save bits shifted out
+ sltu t9, zero, t3 # dont lose any one bits
+ or t8, t8, t9 # save sticky bit
+ srl t3, t2, v1 # shift FSs fraction
+ move t2, zero
+ b 4f
+1:
+ subu t9, t9, v1
+ sll t8, t3, t9 # save bits shifted out
+ srl t3, t3, v1 # shift FSs fraction
+ sll t9, t2, t9 # save bits shifted out of t2
+ or t3, t3, t9 # and put into t3
+ srl t2, t2, v1
+ b 4f
+3:
+ blt v1, t9, 1f # shift right by < 32?
+ subu v1, v1, t9
+ subu t9, t9, v1
+ sll t8, t6, t9 # save bits shifted out
+ srl t7, t6, v1 # shift FTs fraction
+ move t6, zero
+ b 4f
+1:
+ subu t9, t9, v1
+ sll t8, t7, t9 # save bits shifted out
+ srl t7, t7, v1 # shift FTs fraction
+ sll t9, t6, t9 # save bits shifted out of t2
+ or t7, t7, t9 # and put into t3
+ srl t6, t6, v1
+4:
+ bne t0, t4, 1f # if signs differ, subtract
+ addu t3, t3, t7 # add fractions
+ sltu t9, t3, t7 # compute carry
+ addu t2, t2, t6 # add fractions
+ addu t2, t2, t9 # add carry
+ b norm_d
+1:
+ blt t2, t6, 3f # subtract larger from smaller
+ bne t2, t6, 2f
+ bltu t3, t7, 3f
+ bne t3, t7, 2f # if same, result=0
+ move t1, zero # result=0
+ move t2, zero
+ move t3, zero
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ bne v0, FPC_ROUND_RM, 1f # round to -infinity?
+ or t0, t0, t4 # compute result sign
+ b result_fs_d
+1:
+ and t0, t0, t4 # compute result sign
+ b result_fs_d
+2:
+ beq t8, zero, 1f # compute t2:t3:zero - t6:t7:t8
+ subu t8, zero, t8
+ sltu v0, t3, 1 # compute barrow out
+ subu t3, t3, 1 # subtract barrow
+ subu t2, t2, v0
+1:
+ sltu v0, t3, t7
+ subu t3, t3, t7 # subtract fractions
+ subu t2, t2, t6 # subtract fractions
+ subu t2, t2, v0 # subtract barrow
+ b norm_d
+3:
+ move t0, t4 # sign of result = FTs
+ beq t8, zero, 1f # compute t6:t7:zero - t2:t3:t8
+ subu t8, zero, t8
+ sltu v0, t7, 1 # compute barrow out
+ subu t7, t7, 1 # subtract barrow
+ subu t6, t6, v0
+1:
+ sltu v0, t7, t3
+ subu t3, t7, t3 # subtract fractions
+ subu t2, t6, t2 # subtract fractions
+ subu t2, t2, v0 # subtract barrow
+ b norm_d
+
+/*
+ * Single precision multiply.
+ */
+mul_s:
+ jal get_ft_fs_s
+ xor t0, t0, t4 # compute sign of result
+ move t4, t0
+ bne t1, SEXP_INF, 2f # is FS an infinity?
+ bne t2, zero, result_fs_s # if FS is a NAN, result=FS
+ bne t5, SEXP_INF, 1f # FS is inf, is FT an infinity?
+ bne t6, zero, result_ft_s # if FT is a NAN, result=FT
+ b result_fs_s # result is infinity
+1:
+ bne t5, zero, result_fs_s # inf * zero? if no, result=FS
+ bne t6, zero, result_fs_s
+ b invalid_s # infinity * zero is invalid
+2:
+ bne t5, SEXP_INF, 1f # FS != inf, is FT an infinity?
+ bne t1, zero, result_ft_s # zero * inf? if no, result=FT
+ bne t2, zero, result_ft_s
+ bne t6, zero, result_ft_s # if FT is a NAN, result=FT
+ b invalid_s # zero * infinity is invalid
+1:
+ bne t1, zero, 1f # is FS zero?
+ beq t2, zero, result_fs_s # result is zero
+ jal renorm_fs_s
+ b 2f
+1:
+ subu t1, t1, SEXP_BIAS # unbias FS exponent
+ or t2, t2, SIMPL_ONE # set implied one bit
+2:
+ bne t5, zero, 1f # is FT zero?
+ beq t6, zero, result_ft_s # result is zero
+ jal renorm_ft_s
+ b 2f
+1:
+ subu t5, t5, SEXP_BIAS # unbias FT exponent
+ or t6, t6, SIMPL_ONE # set implied one bit
+2:
+ addu t1, t1, t5 # compute result exponent
+ addu t1, t1, 9 # account for binary point
+ multu t2, t6 # multiply fractions
+ mflo t8
+ mfhi t2
+ b norm_s
+
+/*
+ * Double precision multiply.
+ */
+mul_d:
+ jal get_ft_fs_d
+ xor t0, t0, t4 # compute sign of result
+ move t4, t0
+ bne t1, DEXP_INF, 2f # is FS an infinity?
+ bne t2, zero, result_fs_d # if FS is a NAN, result=FS
+ bne t3, zero, result_fs_d
+ bne t5, DEXP_INF, 1f # FS is inf, is FT an infinity?
+ bne t6, zero, result_ft_d # if FT is a NAN, result=FT
+ bne t7, zero, result_ft_d
+ b result_fs_d # result is infinity
+1:
+ bne t5, zero, result_fs_d # inf * zero? if no, result=FS
+ bne t6, zero, result_fs_d
+ bne t7, zero, result_fs_d
+ b invalid_d # infinity * zero is invalid
+2:
+ bne t5, DEXP_INF, 1f # FS != inf, is FT an infinity?
+ bne t1, zero, result_ft_d # zero * inf? if no, result=FT
+ bne t2, zero, result_ft_d # if FS is a NAN, result=FS
+ bne t3, zero, result_ft_d
+ bne t6, zero, result_ft_d # if FT is a NAN, result=FT
+ bne t7, zero, result_ft_d
+ b invalid_d # zero * infinity is invalid
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ beq t3, zero, result_fs_d # result is zero
+1:
+ jal renorm_fs_d
+ b 3f
+2:
+ subu t1, t1, DEXP_BIAS # unbias FS exponent
+ or t2, t2, DIMPL_ONE # set implied one bit
+3:
+ bne t5, zero, 2f # is FT zero?
+ bne t6, zero, 1f
+ beq t7, zero, result_ft_d # result is zero
+1:
+ jal renorm_ft_d
+ b 3f
+2:
+ subu t5, t5, DEXP_BIAS # unbias FT exponent
+ or t6, t6, DIMPL_ONE # set implied one bit
+3:
+ addu t1, t1, t5 # compute result exponent
+ addu t1, t1, 12 # ???
+ multu t3, t7 # multiply fractions (low * low)
+ move t4, t2 # free up t2,t3 for result
+ move t5, t3
+ mflo a3 # save low order bits
+ mfhi t8
+ not v0, t8
+ multu t4, t7 # multiply FS(high) * FT(low)
+ mflo v1
+ mfhi t3 # init low result
+ sltu v0, v0, v1 # compute carry
+ addu t8, v1
+ multu t5, t6 # multiply FS(low) * FT(high)
+ addu t3, t3, v0 # add carry
+ not v0, t8
+ mflo v1
+ mfhi t2
+ sltu v0, v0, v1
+ addu t8, v1
+ multu t4, t6 # multiply FS(high) * FT(high)
+ addu t3, v0
+ not v1, t3
+ sltu v1, v1, t2
+ addu t3, t2
+ not v0, t3
+ mfhi t2
+ addu t2, v1
+ mflo v1
+ sltu v0, v0, v1
+ addu t2, v0
+ addu t3, v1
+ sltu a3, zero, a3 # reduce t8,a3 to just t8
+ or t8, a3
+ b norm_d
+
+/*
+ * Single precision divide.
+ */
+div_s:
+ jal get_ft_fs_s
+ xor t0, t0, t4 # compute sign of result
+ move t4, t0
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, result_fs_s # if FS is NAN, result is FS
+ bne t5, SEXP_INF, result_fs_s # is FT an infinity?
+ bne t6, zero, result_ft_s # if FT is NAN, result is FT
+ b invalid_s # infinity/infinity is invalid
+1:
+ bne t5, SEXP_INF, 1f # is FT an infinity?
+ bne t6, zero, result_ft_s # if FT is NAN, result is FT
+ move t1, zero # x / infinity is zero
+ move t2, zero
+ b result_fs_s
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ bne t5, zero, result_fs_s # FS=zero, is FT zero?
+ beq t6, zero, invalid_s # 0 / 0
+ b result_fs_s # result = zero
+1:
+ jal renorm_fs_s
+ b 3f
+2:
+ subu t1, t1, SEXP_BIAS # unbias FS exponent
+ or t2, t2, SIMPL_ONE # set implied one bit
+3:
+ bne t5, zero, 2f # is FT zero?
+ bne t6, zero, 1f
+ or a1, a1, FPC_EXCEPTION_DIV0 | FPC_STICKY_DIV0
+ and v0, a1, FPC_ENABLE_DIV0 # trap enabled?
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ li t1, SEXP_INF # result is infinity
+ move t2, zero
+ b result_fs_s
+1:
+ jal renorm_ft_s
+ b 3f
+2:
+ subu t5, t5, SEXP_BIAS # unbias FT exponent
+ or t6, t6, SIMPL_ONE # set implied one bit
+3:
+ subu t1, t1, t5 # compute exponent
+ subu t1, t1, 3 # compensate for result position
+ li v0, SFRAC_BITS+3 # number of bits to divide
+ move t8, t2 # init dividend
+ move t2, zero # init result
+1:
+ bltu t8, t6, 3f # is dividend >= divisor?
+2:
+ subu t8, t8, t6 # subtract divisor from dividend
+ or t2, t2, 1 # remember that we did
+ bne t8, zero, 3f # if not done, continue
+ sll t2, t2, v0 # shift result to final position
+ b norm_s
+3:
+ sll t8, t8, 1 # shift dividend
+ sll t2, t2, 1 # shift result
+ subu v0, v0, 1 # are we done?
+ bne v0, zero, 1b # no, continue
+ b norm_s
+
+/*
+ * Double precision divide.
+ */
+div_d:
+ jal get_ft_fs_d
+ xor t0, t0, t4 # compute sign of result
+ move t4, t0
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, result_fs_d # if FS is NAN, result is FS
+ bne t3, zero, result_fs_d
+ bne t5, DEXP_INF, result_fs_d # is FT an infinity?
+ bne t6, zero, result_ft_d # if FT is NAN, result is FT
+ bne t7, zero, result_ft_d
+ b invalid_d # infinity/infinity is invalid
+1:
+ bne t5, DEXP_INF, 1f # is FT an infinity?
+ bne t6, zero, result_ft_d # if FT is NAN, result is FT
+ bne t7, zero, result_ft_d
+ move t1, zero # x / infinity is zero
+ move t2, zero
+ move t3, zero
+ b result_fs_d
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ bne t3, zero, 1f
+ bne t5, zero, result_fs_d # FS=zero, is FT zero?
+ bne t6, zero, result_fs_d
+ beq t7, zero, invalid_d # 0 / 0
+ b result_fs_d # result = zero
+1:
+ jal renorm_fs_d
+ b 3f
+2:
+ subu t1, t1, DEXP_BIAS # unbias FS exponent
+ or t2, t2, DIMPL_ONE # set implied one bit
+3:
+ bne t5, zero, 2f # is FT zero?
+ bne t6, zero, 1f
+ bne t7, zero, 1f
+ or a1, a1, FPC_EXCEPTION_DIV0 | FPC_STICKY_DIV0
+ and v0, a1, FPC_ENABLE_DIV0 # trap enabled?
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # Save exceptions
+ li t1, DEXP_INF # result is infinity
+ move t2, zero
+ move t3, zero
+ b result_fs_d
+1:
+ jal renorm_ft_d
+ b 3f
+2:
+ subu t5, t5, DEXP_BIAS # unbias FT exponent
+ or t6, t6, DIMPL_ONE # set implied one bit
+3:
+ subu t1, t1, t5 # compute exponent
+ subu t1, t1, 3 # compensate for result position
+ li v0, DFRAC_BITS+3 # number of bits to divide
+ move t8, t2 # init dividend
+ move t9, t3
+ move t2, zero # init result
+ move t3, zero
+1:
+ bltu t8, t6, 3f # is dividend >= divisor?
+ bne t8, t6, 2f
+ bltu t9, t7, 3f
+2:
+ sltu v1, t9, t7 # subtract divisor from dividend
+ subu t9, t9, t7
+ subu t8, t8, t6
+ subu t8, t8, v1
+ or t3, t3, 1 # remember that we did
+ bne t8, zero, 3f # if not done, continue
+ bne t9, zero, 3f
+ li v1, 32 # shift result to final position
+ blt v0, v1, 2f # shift < 32 bits?
+ subu v0, v0, v1 # shift by > 32 bits
+ sll t2, t3, v0 # shift upper part
+ move t3, zero
+ b norm_d
+2:
+ subu v1, v1, v0 # shift by < 32 bits
+ sll t2, t2, v0 # shift upper part
+ srl t9, t3, v1 # save bits shifted out
+ or t2, t2, t9 # and put into upper part
+ sll t3, t3, v0
+ b norm_d
+3:
+ sll t8, t8, 1 # shift dividend
+ srl v1, t9, 31 # save bit shifted out
+ or t8, t8, v1 # and put into upper part
+ sll t9, t9, 1
+ sll t2, t2, 1 # shift result
+ srl v1, t3, 31 # save bit shifted out
+ or t2, t2, v1 # and put into upper part
+ sll t3, t3, 1
+ subu v0, v0, 1 # are we done?
+ bne v0, zero, 1b # no, continue
+ sltu v0, zero, t9 # be sure to save any one bits
+ or t8, t8, v0 # from the lower remainder
+ b norm_d
+
+/*
+ * Single precision absolute value.
+ */
+abs_s:
+ jal get_fs_s
+ move t0, zero # set sign positive
+ b result_fs_s
+
+/*
+ * Double precision absolute value.
+ */
+abs_d:
+ jal get_fs_d
+ move t0, zero # set sign positive
+ b result_fs_d
+
+/*
+ * Single precision move.
+ */
+mov_s:
+ jal get_fs_s
+ b result_fs_s
+
+/*
+ * Double precision move.
+ */
+mov_d:
+ jal get_fs_d
+ b result_fs_d
+
+/*
+ * Single precision negate.
+ */
+neg_s:
+ jal get_fs_s
+ xor t0, t0, 1 # reverse sign
+ b result_fs_s
+
+/*
+ * Double precision negate.
+ */
+neg_d:
+ jal get_fs_d
+ xor t0, t0, 1 # reverse sign
+ b result_fs_d
+
+/*
+ * Convert double to single.
+ */
+cvt_s_d:
+ jal get_fs_d
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ li t1, SEXP_INF # convert to single
+ sll t2, t2, 3 # convert D fraction to S
+ srl t8, t3, 32 - 3
+ or t2, t2, t8
+ b result_fs_s
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ beq t3, zero, result_fs_s # result=0
+1:
+ jal renorm_fs_d
+ subu t1, t1, 3 # correct exp for shift below
+ b 3f
+2:
+ subu t1, t1, DEXP_BIAS # unbias exponent
+ or t2, t2, DIMPL_ONE # add implied one bit
+3:
+ sll t2, t2, 3 # convert D fraction to S
+ srl t8, t3, 32 - 3
+ or t2, t2, t8
+ sll t8, t3, 3
+ b norm_noshift_s
+
+/*
+ * Convert integer to single.
+ */
+cvt_s_w:
+ jal get_fs_int
+ bne t2, zero, 1f # check for zero
+ move t1, zero
+ b result_fs_s
+/*
+ * Find out how many leading zero bits are in t2 and put in t9.
+ */
+1:
+ move v0, t2
+ move t9, zero
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2 the correct number of bits.
+ */
+1:
+ subu t9, t9, SLEAD_ZEROS # dont count leading zeros
+ li t1, 23 # init exponent
+ subu t1, t1, t9 # compute exponent
+ beq t9, zero, 1f
+ li v0, 32
+ blt t9, zero, 2f # if shift < 0, shift right
+ subu v0, v0, t9
+ sll t2, t2, t9 # shift left
+1:
+ add t1, t1, SEXP_BIAS # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+ b result_fs_s
+2:
+ negu t9 # shift right by t9
+ subu v0, v0, t9
+ sll t8, t2, v0 # save bits shifted out
+ srl t2, t2, t9
+ b norm_noshift_s
+
+/*
+ * Convert single to double.
+ */
+cvt_d_s:
+ jal get_fs_s
+ move t3, zero
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ li t1, DEXP_INF # convert to double
+ b result_fs_d
+1:
+ bne t1, zero, 2f # is FS denormalized or zero?
+ beq t2, zero, result_fs_d # is FS zero?
+ jal renorm_fs_s
+ move t8, zero
+ b norm_d
+2:
+ addu t1, t1, DEXP_BIAS - SEXP_BIAS # bias exponent correctly
+ sll t3, t2, 32 - 3 # convert S fraction to D
+ srl t2, t2, 3
+ b result_fs_d
+
+/*
+ * Convert integer to double.
+ */
+cvt_d_w:
+ jal get_fs_int
+ bne t2, zero, 1f # check for zero
+ move t1, zero # result=0
+ move t3, zero
+ b result_fs_d
+/*
+ * Find out how many leading zero bits are in t2 and put in t9.
+ */
+1:
+ move v0, t2
+ move t9, zero
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2 the correct number of bits.
+ */
+1:
+ subu t9, t9, DLEAD_ZEROS # dont count leading zeros
+ li t1, DEXP_BIAS + 20 # init exponent
+ subu t1, t1, t9 # compute exponent
+ beq t9, zero, 1f
+ li v0, 32
+ blt t9, zero, 2f # if shift < 0, shift right
+ subu v0, v0, t9
+ sll t2, t2, t9 # shift left
+1:
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ move t3, zero
+ b result_fs_d
+2:
+ negu t9 # shift right by t9
+ subu v0, v0, t9
+ sll t3, t2, v0
+ srl t2, t2, t9
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ b result_fs_d
+
+/*
+ * Convert single to integer.
+ */
+cvt_w_s:
+ jal get_fs_s
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, invalid_w # invalid conversion
+1:
+ bne t1, zero, 1f # is FS zero?
+ beq t2, zero, result_fs_w # result is zero
+ move t2, zero # result is an inexact zero
+ b inexact_w
+1:
+ subu t1, t1, SEXP_BIAS # unbias exponent
+ or t2, t2, SIMPL_ONE # add implied one bit
+ sll t3, t2, 32 - 3 # convert S fraction to D
+ srl t2, t2, 3
+ b cvt_w
+
+/*
+ * Convert double to integer.
+ */
+cvt_w_d:
+ jal get_fs_d
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, invalid_w # invalid conversion
+ bne t3, zero, invalid_w # invalid conversion
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ beq t3, zero, result_fs_w # result is zero
+1:
+ move t2, zero # result is an inexact zero
+ b inexact_w
+2:
+ subu t1, t1, DEXP_BIAS # unbias exponent
+ or t2, t2, DIMPL_ONE # add implied one bit
+cvt_w:
+ blt t1, WEXP_MIN, underflow_w # is exponent too small?
+ li v0, WEXP_MAX+1
+ bgt t1, v0, overflow_w # is exponent too large?
+ bne t1, v0, 1f # special check for INT_MIN
+ beq t0, zero, overflow_w # if positive, overflow
+ bne t2, DIMPL_ONE, overflow_w
+ bne t3, zero, overflow_w
+ li t2, INT_MIN # result is INT_MIN
+ b result_fs_w
+1:
+ subu v0, t1, 20 # compute amount to shift
+ beq v0, zero, 2f # is shift needed?
+ li v1, 32
+ blt v0, zero, 1f # if shift < 0, shift right
+ subu v1, v1, v0 # shift left
+ sll t2, t2, v0
+ srl t9, t3, v1 # save bits shifted out of t3
+ or t2, t2, t9 # and put into t2
+ sll t3, t3, v0 # shift FSs fraction
+ b 2f
+1:
+ negu v0 # shift right by v0
+ subu v1, v1, v0
+ sll t8, t3, v1 # save bits shifted out
+ sltu t8, zero, t8 # dont lose any ones
+ srl t3, t3, v0 # shift FSs fraction
+ or t3, t3, t8
+ sll t9, t2, v1 # save bits shifted out of t2
+ or t3, t3, t9 # and put into t3
+ srl t2, t2, v0
+/*
+ * round result (t0 is sign, t2 is integer part, t3 is fractional part).
+ */
+2:
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t3, zero, 5f # if no fraction bits, continue
+ addu t2, t2, 1 # add rounding bit
+ blt t2, zero, overflow_w # overflow?
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t3 # add remainder
+ sltu v1, v0, t3 # compute carry out
+ beq v1, zero, 4f # if no carry, continue
+ addu t2, t2, 1 # add carry to result
+ blt t2, zero, overflow_w # overflow?
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t2, t2, ~1 # clear LSB (round to nearest)
+5:
+ beq t0, zero, 1f # result positive?
+ negu t2 # convert to negative integer
+1:
+ beq t3, zero, result_fs_w # is result exact?
+/*
+ * Handle inexact exception.
+ */
+inexact_w:
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b result_fs_w
+
+/*
+ * Conversions to integer which overflow will trap (if enabled),
+ * or generate an inexact trap (if enabled),
+ * or generate an invalid exception.
+ */
+overflow_w:
+ or a1, a1, FPC_EXCEPTION_OVERFLOW | FPC_STICKY_OVERFLOW
+ and v0, a1, FPC_ENABLE_OVERFLOW
+ bne v0, zero, fpe_trap
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, inexact_w # inexact traps enabled?
+ b invalid_w
+
+/*
+ * Conversions to integer which underflow will trap (if enabled),
+ * or generate an inexact trap (if enabled),
+ * or generate an invalid exception.
+ */
+underflow_w:
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ and v0, a1, FPC_ENABLE_UNDERFLOW
+ bne v0, zero, fpe_trap
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, inexact_w # inexact traps enabled?
+ b invalid_w
+
+/*
+ * Compare single.
+ */
+cmp_s:
+ jal get_cmp_s
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, unordered # FS is a NAN
+1:
+ bne t5, SEXP_INF, 2f # is FT an infinity?
+ bne t6, zero, unordered # FT is a NAN
+2:
+ sll t1, t1, 23 # reassemble exp & frac
+ or t1, t1, t2
+ sll t5, t5, 23 # reassemble exp & frac
+ or t5, t5, t6
+ beq t0, zero, 1f # is FS positive?
+ negu t1
+1:
+ beq t4, zero, 1f # is FT positive?
+ negu t5
+1:
+ li v0, COND_LESS
+ blt t1, t5, test_cond # is FS < FT?
+ li v0, COND_EQUAL
+ beq t1, t5, test_cond # is FS == FT?
+ move v0, zero # FS > FT
+ b test_cond
+
+/*
+ * Compare double.
+ */
+cmp_d:
+ jal get_cmp_d
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, unordered
+ bne t3, zero, unordered # FS is a NAN
+1:
+ bne t5, DEXP_INF, 2f # is FT an infinity?
+ bne t6, zero, unordered
+ bne t7, zero, unordered # FT is a NAN
+2:
+ sll t1, t1, 20 # reassemble exp & frac
+ or t1, t1, t2
+ sll t5, t5, 20 # reassemble exp & frac
+ or t5, t5, t6
+ beq t0, zero, 1f # is FS positive?
+ not t3 # negate t1,t3
+ not t1
+ addu t3, t3, 1
+ seq v0, t3, zero # compute carry
+ addu t1, t1, v0
+1:
+ beq t4, zero, 1f # is FT positive?
+ not t7 # negate t5,t7
+ not t5
+ addu t7, t7, 1
+ seq v0, t7, zero # compute carry
+ addu t5, t5, v0
+1:
+ li v0, COND_LESS
+ blt t1, t5, test_cond # is FS(MSW) < FT(MSW)?
+ move v0, zero
+ bne t1, t5, test_cond # is FS(MSW) > FT(MSW)?
+ li v0, COND_LESS
+ bltu t3, t7, test_cond # is FS(LSW) < FT(LSW)?
+ li v0, COND_EQUAL
+ beq t3, t7, test_cond # is FS(LSW) == FT(LSW)?
+ move v0, zero # FS > FT
+test_cond:
+ and v0, v0, a0 # condition match instruction?
+set_cond:
+ bne v0, zero, 1f
+ and a1, a1, ~FPC_COND_BIT # clear condition bit
+ b 2f
+1:
+ or a1, a1, FPC_COND_BIT # set condition bit
+2:
+ ctc1 a1, FPC_CSR # save condition bit
+ b done
+
+unordered:
+ and v0, a0, COND_UNORDERED # this cmp match unordered?
+ bne v0, zero, 1f
+ and a1, a1, ~FPC_COND_BIT # clear condition bit
+ b 2f
+1:
+ or a1, a1, FPC_COND_BIT # set condition bit
+2:
+ and v0, a0, COND_SIGNAL
+ beq v0, zero, 1f # is this a signaling cmp?
+ or a1, a1, FPC_EXCEPTION_INVALID | FPC_STICKY_INVALID
+ and v0, a1, FPC_ENABLE_INVALID
+ bne v0, zero, fpe_trap
+1:
+ ctc1 a1, FPC_CSR # save condition bit
+ b done
+
+/*
+ * Determine the amount to shift the fraction in order to restore the
+ * normalized position. After that, round and handle exceptions.
+ */
+norm_s:
+ move v0, t2
+ move t9, zero # t9 = num of leading zeros
+ bne t2, zero, 1f
+ move v0, t8
+ addu t9, 32
+1:
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2,t8 the correct number of bits.
+ */
+1:
+ subu t9, t9, SLEAD_ZEROS # dont count leading zeros
+ subu t1, t1, t9 # adjust the exponent
+ beq t9, zero, norm_noshift_s
+ li v1, 32
+ blt t9, zero, 1f # if shift < 0, shift right
+ subu v1, v1, t9
+ sll t2, t2, t9 # shift t2,t8 left
+ srl v0, t8, v1 # save bits shifted out
+ or t2, t2, v0
+ sll t8, t8, t9
+ b norm_noshift_s
+1:
+ negu t9 # shift t2,t8 right by t9
+ subu v1, v1, t9
+ sll v0, t8, v1 # save bits shifted out
+ sltu v0, zero, v0 # be sure to save any one bits
+ srl t8, t8, t9
+ or t8, t8, v0
+ sll v0, t2, v1 # save bits shifted out
+ or t8, t8, v0
+ srl t2, t2, t9
+norm_noshift_s:
+ move t5, t1 # save unrounded exponent
+ move t6, t2 # save unrounded fraction
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t8, zero, 5f # if exact, continue
+ addu t2, t2, 1 # add rounding bit
+ bne t2, SIMPL_ONE<<1, 5f # need to adjust exponent?
+ addu t1, t1, 1 # adjust exponent
+ srl t2, t2, 1 # renormalize fraction
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t8 # add remainder
+ sltu v1, v0, t8 # compute carry out
+ beq v1, zero, 4f # if no carry, continue
+ addu t2, t2, 1 # add carry to result
+ bne t2, SIMPL_ONE<<1, 4f # need to adjust exponent?
+ addu t1, t1, 1 # adjust exponent
+ srl t2, t2, 1 # renormalize fraction
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t2, t2, ~1 # clear LSB (round to nearest)
+5:
+ bgt t1, SEXP_MAX, overflow_s # overflow?
+ blt t1, SEXP_MIN, underflow_s # underflow?
+ bne t8, zero, inexact_s # is result inexact?
+ addu t1, t1, SEXP_BIAS # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+ b result_fs_s
+
+/*
+ * Handle inexact exception.
+ */
+inexact_s:
+ addu t1, t1, SEXP_BIAS # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+inexact_nobias_s:
+ jal set_fd_s # save result
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b done
+
+/*
+ * Overflow will trap (if enabled),
+ * or generate an inexact trap (if enabled),
+ * or generate an infinity.
+ */
+overflow_s:
+ or a1, a1, FPC_EXCEPTION_OVERFLOW | FPC_STICKY_OVERFLOW
+ and v0, a1, FPC_ENABLE_OVERFLOW
+ beq v0, zero, 1f
+ subu t1, t1, 192 # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+ jal set_fd_s # save result
+ b fpe_trap
+1:
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 1f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 2f # round to +infinity
+ bne t0, zero, 3f
+1:
+ li t1, SEXP_MAX # result is max finite
+ li t2, 0x007fffff
+ b inexact_s
+2:
+ bne t0, zero, 1b
+3:
+ li t1, SEXP_MAX + 1 # result is infinity
+ move t2, zero
+ b inexact_s
+
+/*
+ * In this implementation, "tininess" is detected "after rounding" and
+ * "loss of accuracy" is detected as "an inexact result".
+ */
+underflow_s:
+ and v0, a1, FPC_ENABLE_UNDERFLOW
+ beq v0, zero, 1f
+/*
+ * Underflow is enabled so compute the result and trap.
+ */
+ addu t1, t1, 192 # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+ jal set_fd_s # save result
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ b fpe_trap
+/*
+ * Underflow is not enabled so compute the result,
+ * signal inexact result (if it is) and trap (if enabled).
+ */
+1:
+ move t1, t5 # get unrounded exponent
+ move t2, t6 # get unrounded fraction
+ li t9, SEXP_MIN # compute shift amount
+ subu t9, t9, t1 # shift t2,t8 right by t9
+ blt t9, SFRAC_BITS+2, 3f # shift all the bits out?
+ move t1, zero # result is inexact zero
+ move t2, zero
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+/*
+ * Now round the zero result.
+ * Only need to worry about rounding to +- infinity when the sign matches.
+ */
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, inexact_nobias_s # round to nearest
+ beq v0, FPC_ROUND_RZ, inexact_nobias_s # round to zero
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, inexact_nobias_s # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, inexact_nobias_s # if sign is negative, truncate
+2:
+ addu t2, t2, 1 # add rounding bit
+ b inexact_nobias_s
+3:
+ li v1, 32
+ subu v1, v1, t9
+ sltu v0, zero, t8 # be sure to save any one bits
+ sll t8, t2, v1 # save bits shifted out
+ or t8, t8, v0 # include sticky bits
+ srl t2, t2, t9
+/*
+ * Now round the denormalized result.
+ */
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t8, zero, 5f # if exact, continue
+ addu t2, t2, 1 # add rounding bit
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t8 # add remainder
+ sltu v1, v0, t8 # compute carry out
+ beq v1, zero, 4f # if no carry, continue
+ addu t2, t2, 1 # add carry to result
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t2, t2, ~1 # clear LSB (round to nearest)
+5:
+ move t1, zero # denorm or zero exponent
+ jal set_fd_s # save result
+ beq t8, zero, done # check for exact result
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b done
+
+/*
+ * Determine the amount to shift the fraction in order to restore the
+ * normalized position. After that, round and handle exceptions.
+ */
+norm_d:
+ move v0, t2
+ move t9, zero # t9 = num of leading zeros
+ bne t2, zero, 1f
+ move v0, t3
+ addu t9, 32
+ bne t3, zero, 1f
+ move v0, t8
+ addu t9, 32
+1:
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2,t3,t8 the correct number of bits.
+ */
+1:
+ subu t9, t9, DLEAD_ZEROS # dont count leading zeros
+ subu t1, t1, t9 # adjust the exponent
+ beq t9, zero, norm_noshift_d
+ li v1, 32
+ blt t9, zero, 2f # if shift < 0, shift right
+ blt t9, v1, 1f # shift by < 32?
+ subu t9, t9, v1 # shift by >= 32
+ subu v1, v1, t9
+ sll t2, t3, t9 # shift left by t9
+ srl v0, t8, v1 # save bits shifted out
+ or t2, t2, v0
+ sll t3, t8, t9
+ move t8, zero
+ b norm_noshift_d
+1:
+ subu v1, v1, t9
+ sll t2, t2, t9 # shift left by t9
+ srl v0, t3, v1 # save bits shifted out
+ or t2, t2, v0
+ sll t3, t3, t9
+ srl v0, t8, v1 # save bits shifted out
+ or t3, t3, v0
+ sll t8, t8, t9
+ b norm_noshift_d
+2:
+ negu t9 # shift right by t9
+ subu v1, v1, t9 # (known to be < 32 bits)
+ sll v0, t8, v1 # save bits shifted out
+ sltu v0, zero, v0 # be sure to save any one bits
+ srl t8, t8, t9
+ or t8, t8, v0
+ sll v0, t3, v1 # save bits shifted out
+ or t8, t8, v0
+ srl t3, t3, t9
+ sll v0, t2, v1 # save bits shifted out
+ or t3, t3, v0
+ srl t2, t2, t9
+norm_noshift_d:
+ move t5, t1 # save unrounded exponent
+ move t6, t2 # save unrounded fraction (MS)
+ move t7, t3 # save unrounded fraction (LS)
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t8, zero, 5f # if exact, continue
+ addu t3, t3, 1 # add rounding bit
+ bne t3, zero, 5f # branch if no carry
+ addu t2, t2, 1 # add carry
+ bne t2, DIMPL_ONE<<1, 5f # need to adjust exponent?
+ addu t1, t1, 1 # adjust exponent
+ srl t2, t2, 1 # renormalize fraction
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t8 # add remainder
+ sltu v1, v0, t8 # compute carry out
+ beq v1, zero, 4f # branch if no carry
+ addu t3, t3, 1 # add carry
+ bne t3, zero, 4f # branch if no carry
+ addu t2, t2, 1 # add carry to result
+ bne t2, DIMPL_ONE<<1, 4f # need to adjust exponent?
+ addu t1, t1, 1 # adjust exponent
+ srl t2, t2, 1 # renormalize fraction
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t3, t3, ~1 # clear LSB (round to nearest)
+5:
+ bgt t1, DEXP_MAX, overflow_d # overflow?
+ blt t1, DEXP_MIN, underflow_d # underflow?
+ bne t8, zero, inexact_d # is result inexact?
+ addu t1, t1, DEXP_BIAS # bias exponent
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ b result_fs_d
+
+/*
+ * Handle inexact exception.
+ */
+inexact_d:
+ addu t1, t1, DEXP_BIAS # bias exponent
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+inexact_nobias_d:
+ jal set_fd_d # save result
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b done
+
+/*
+ * Overflow will trap (if enabled),
+ * or generate an inexact trap (if enabled),
+ * or generate an infinity.
+ */
+overflow_d:
+ or a1, a1, FPC_EXCEPTION_OVERFLOW | FPC_STICKY_OVERFLOW
+ and v0, a1, FPC_ENABLE_OVERFLOW
+ beq v0, zero, 1f
+ subu t1, t1, 1536 # bias exponent
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ jal set_fd_d # save result
+ b fpe_trap
+1:
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 1f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 2f # round to +infinity
+ bne t0, zero, 3f
+1:
+ li t1, DEXP_MAX # result is max finite
+ li t2, 0x000fffff
+ li t3, 0xffffffff
+ b inexact_d
+2:
+ bne t0, zero, 1b
+3:
+ li t1, DEXP_MAX + 1 # result is infinity
+ move t2, zero
+ move t3, zero
+ b inexact_d
+
+/*
+ * In this implementation, "tininess" is detected "after rounding" and
+ * "loss of accuracy" is detected as "an inexact result".
+ */
+underflow_d:
+ and v0, a1, FPC_ENABLE_UNDERFLOW
+ beq v0, zero, 1f
+/*
+ * Underflow is enabled so compute the result and trap.
+ */
+ addu t1, t1, 1536 # bias exponent
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ jal set_fd_d # save result
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ b fpe_trap
+/*
+ * Underflow is not enabled so compute the result,
+ * signal inexact result (if it is) and trap (if enabled).
+ */
+1:
+ move t1, t5 # get unrounded exponent
+ move t2, t6 # get unrounded fraction (MS)
+ move t3, t7 # get unrounded fraction (LS)
+ li t9, DEXP_MIN # compute shift amount
+ subu t9, t9, t1 # shift t2,t8 right by t9
+ blt t9, DFRAC_BITS+2, 3f # shift all the bits out?
+ move t1, zero # result is inexact zero
+ move t2, zero
+ move t3, zero
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+/*
+ * Now round the zero result.
+ * Only need to worry about rounding to +- infinity when the sign matches.
+ */
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, inexact_nobias_d # round to nearest
+ beq v0, FPC_ROUND_RZ, inexact_nobias_d # round to zero
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, inexact_nobias_d # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, inexact_nobias_d # if sign is negative, truncate
+2:
+ addu t3, t3, 1 # add rounding bit
+ b inexact_nobias_d
+3:
+ li v1, 32
+ blt t9, v1, 1f # shift by < 32?
+ subu t9, t9, v1 # shift right by >= 32
+ subu v1, v1, t9
+ sltu v0, zero, t8 # be sure to save any one bits
+ sll t8, t2, v1 # save bits shifted out
+ or t8, t8, v0 # include sticky bits
+ srl t3, t2, t9
+ move t2, zero
+ b 2f
+1:
+ subu v1, v1, t9 # shift right by t9
+ sltu v0, zero, t8 # be sure to save any one bits
+ sll t8, t3, v1 # save bits shifted out
+ or t8, t8, v0 # include sticky bits
+ srl t3, t3, t9
+ sll v0, t2, v1 # save bits shifted out
+ or t3, t3, v0
+ srl t2, t2, t9
+/*
+ * Now round the denormalized result.
+ */
+2:
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t8, zero, 5f # if exact, continue
+ addu t3, t3, 1 # add rounding bit
+ bne t3, zero, 5f # if no carry, continue
+ addu t2, t2, 1 # add carry
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t8 # add remainder
+ sltu v1, v0, t8 # compute carry out
+ beq v1, zero, 4f # if no carry, continue
+ addu t3, t3, 1 # add rounding bit
+ bne t3, zero, 4f # if no carry, continue
+ addu t2, t2, 1 # add carry
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t3, t3, ~1 # clear LSB (round to nearest)
+5:
+ move t1, zero # denorm or zero exponent
+ jal set_fd_d # save result
+ beq t8, zero, done # check for exact result
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b done
+
+/*
+ * Signal an invalid operation if the trap is enabled; otherwise,
+ * the result is a quiet NAN.
+ */
+invalid_s: # trap invalid operation
+ or a1, a1, FPC_EXCEPTION_INVALID | FPC_STICKY_INVALID
+ and v0, a1, FPC_ENABLE_INVALID
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ move t0, zero # result is a quiet NAN
+ li t1, SEXP_INF
+ li t2, SQUIET_NAN
+ jal set_fd_s # save result (in t0,t1,t2)
+ b done
+
+/*
+ * Signal an invalid operation if the trap is enabled; otherwise,
+ * the result is a quiet NAN.
+ */
+invalid_d: # trap invalid operation
+ or a1, a1, FPC_EXCEPTION_INVALID | FPC_STICKY_INVALID
+ and v0, a1, FPC_ENABLE_INVALID
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ move t0, zero # result is a quiet NAN
+ li t1, DEXP_INF
+ li t2, DQUIET_NAN0
+ li t3, DQUIET_NAN1
+ jal set_fd_d # save result (in t0,t1,t2,t3)
+ b done
+
+/*
+ * Signal an invalid operation if the trap is enabled; otherwise,
+ * the result is INT_MAX or INT_MIN.
+ */
+invalid_w: # trap invalid operation
+ or a1, a1, FPC_EXCEPTION_INVALID | FPC_STICKY_INVALID
+ and v0, a1, FPC_ENABLE_INVALID
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ bne t0, zero, 1f
+ li t2, INT_MAX # result is INT_MAX
+ b result_fs_w
+1:
+ li t2, INT_MIN # result is INT_MIN
+ b result_fs_w
+
+/*
+ * Trap if the hardware should have handled this case.
+ */
+fpe_trap:
+ move a2, a1 # code = FP CSR
+ ctc1 a1, FPC_CSR # save exceptions
+ break 0
+
+/*
+ * Send an illegal instruction signal to the current process.
+ */
+ill:
+ ctc1 a1, FPC_CSR # save exceptions
+ move a2, a0 # code = FP instruction
+ break 0
+
+result_ft_s:
+ move t0, t4 # result is FT
+ move t1, t5
+ move t2, t6
+result_fs_s: # result is FS
+ jal set_fd_s # save result (in t0,t1,t2)
+ b done
+
+result_fs_w:
+ jal set_fd_word # save result (in t2)
+ b done
+
+result_ft_d:
+ move t0, t4 # result is FT
+ move t1, t5
+ move t2, t6
+ move t3, t7
+result_fs_d: # result is FS
+ jal set_fd_d # save result (in t0,t1,t2,t3)
+
+done:
+ lw ra, STAND_RA_OFFSET(sp)
+ addu sp, sp, STAND_FRAME_SIZE
+ j ra
+END(MipsEmulateFP)
+
+/*----------------------------------------------------------------------------
+ * get_fs_int --
+ *
+ * Read (integer) the FS register (bits 15-11).
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t2 contains the fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_fs_int)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, get_fs_int_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_fs_int_tbl:
+ .word get_fs_int_f0
+ .word get_fs_int_f2
+ .word get_fs_int_f4
+ .word get_fs_int_f6
+ .word get_fs_int_f8
+ .word get_fs_int_f10
+ .word get_fs_int_f12
+ .word get_fs_int_f14
+ .word get_fs_int_f16
+ .word get_fs_int_f18
+ .word get_fs_int_f20
+ .word get_fs_int_f22
+ .word get_fs_int_f24
+ .word get_fs_int_f26
+ .word get_fs_int_f28
+ .word get_fs_int_f30
+ .text
+
+get_fs_int_f0:
+ mfc1 t2, $f0
+ b get_fs_int_done
+get_fs_int_f2:
+ mfc1 t2, $f2
+ b get_fs_int_done
+get_fs_int_f4:
+ mfc1 t2, $f4
+ b get_fs_int_done
+get_fs_int_f6:
+ mfc1 t2, $f6
+ b get_fs_int_done
+get_fs_int_f8:
+ mfc1 t2, $f8
+ b get_fs_int_done
+get_fs_int_f10:
+ mfc1 t2, $f10
+ b get_fs_int_done
+get_fs_int_f12:
+ mfc1 t2, $f12
+ b get_fs_int_done
+get_fs_int_f14:
+ mfc1 t2, $f14
+ b get_fs_int_done
+get_fs_int_f16:
+ mfc1 t2, $f16
+ b get_fs_int_done
+get_fs_int_f18:
+ mfc1 t2, $f18
+ b get_fs_int_done
+get_fs_int_f20:
+ mfc1 t2, $f20
+ b get_fs_int_done
+get_fs_int_f22:
+ mfc1 t2, $f22
+ b get_fs_int_done
+get_fs_int_f24:
+ mfc1 t2, $f24
+ b get_fs_int_done
+get_fs_int_f26:
+ mfc1 t2, $f26
+ b get_fs_int_done
+get_fs_int_f28:
+ mfc1 t2, $f28
+ b get_fs_int_done
+get_fs_int_f30:
+ mfc1 t2, $f30
+get_fs_int_done:
+ srl t0, t2, 31 # init the sign bit
+ bge t2, zero, 1f
+ negu t2
+1:
+ j ra
+END(get_fs_int)
+
+/*----------------------------------------------------------------------------
+ * get_ft_fs_s --
+ *
+ * Read (single precision) the FT register (bits 20-16) and
+ * the FS register (bits 15-11) and break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the FS sign
+ * t1 contains the FS (biased) exponent
+ * t2 contains the FS fraction
+ * t4 contains the FT sign
+ * t5 contains the FT (biased) exponent
+ * t6 contains the FT fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_ft_fs_s)
+ srl a3, a0, 17 - 2 # get FT field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, get_ft_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_ft_s_tbl:
+ .word get_ft_s_f0
+ .word get_ft_s_f2
+ .word get_ft_s_f4
+ .word get_ft_s_f6
+ .word get_ft_s_f8
+ .word get_ft_s_f10
+ .word get_ft_s_f12
+ .word get_ft_s_f14
+ .word get_ft_s_f16
+ .word get_ft_s_f18
+ .word get_ft_s_f20
+ .word get_ft_s_f22
+ .word get_ft_s_f24
+ .word get_ft_s_f26
+ .word get_ft_s_f28
+ .word get_ft_s_f30
+ .text
+
+get_ft_s_f0:
+ mfc1 t4, $f0
+ b get_ft_s_done
+get_ft_s_f2:
+ mfc1 t4, $f2
+ b get_ft_s_done
+get_ft_s_f4:
+ mfc1 t4, $f4
+ b get_ft_s_done
+get_ft_s_f6:
+ mfc1 t4, $f6
+ b get_ft_s_done
+get_ft_s_f8:
+ mfc1 t4, $f8
+ b get_ft_s_done
+get_ft_s_f10:
+ mfc1 t4, $f10
+ b get_ft_s_done
+get_ft_s_f12:
+ mfc1 t4, $f12
+ b get_ft_s_done
+get_ft_s_f14:
+ mfc1 t4, $f14
+ b get_ft_s_done
+get_ft_s_f16:
+ mfc1 t4, $f16
+ b get_ft_s_done
+get_ft_s_f18:
+ mfc1 t4, $f18
+ b get_ft_s_done
+get_ft_s_f20:
+ mfc1 t4, $f20
+ b get_ft_s_done
+get_ft_s_f22:
+ mfc1 t4, $f22
+ b get_ft_s_done
+get_ft_s_f24:
+ mfc1 t4, $f24
+ b get_ft_s_done
+get_ft_s_f26:
+ mfc1 t4, $f26
+ b get_ft_s_done
+get_ft_s_f28:
+ mfc1 t4, $f28
+ b get_ft_s_done
+get_ft_s_f30:
+ mfc1 t4, $f30
+get_ft_s_done:
+ srl t5, t4, 23 # get exponent
+ and t5, t5, 0xFF
+ and t6, t4, 0x7FFFFF # get fraction
+ srl t4, t4, 31 # get sign
+ bne t5, SEXP_INF, 1f # is it a signaling NAN?
+ and v0, t6, SSIGNAL_NAN
+ bne v0, zero, invalid_s
+1:
+ /* fall through to get FS */
+
+/*----------------------------------------------------------------------------
+ * get_fs_s --
+ *
+ * Read (single precision) the FS register (bits 15-11) and
+ * break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+ALEAF(get_fs_s)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, get_fs_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_fs_s_tbl:
+ .word get_fs_s_f0
+ .word get_fs_s_f2
+ .word get_fs_s_f4
+ .word get_fs_s_f6
+ .word get_fs_s_f8
+ .word get_fs_s_f10
+ .word get_fs_s_f12
+ .word get_fs_s_f14
+ .word get_fs_s_f16
+ .word get_fs_s_f18
+ .word get_fs_s_f20
+ .word get_fs_s_f22
+ .word get_fs_s_f24
+ .word get_fs_s_f26
+ .word get_fs_s_f28
+ .word get_fs_s_f30
+ .text
+
+get_fs_s_f0:
+ mfc1 t0, $f0
+ b get_fs_s_done
+get_fs_s_f2:
+ mfc1 t0, $f2
+ b get_fs_s_done
+get_fs_s_f4:
+ mfc1 t0, $f4
+ b get_fs_s_done
+get_fs_s_f6:
+ mfc1 t0, $f6
+ b get_fs_s_done
+get_fs_s_f8:
+ mfc1 t0, $f8
+ b get_fs_s_done
+get_fs_s_f10:
+ mfc1 t0, $f10
+ b get_fs_s_done
+get_fs_s_f12:
+ mfc1 t0, $f12
+ b get_fs_s_done
+get_fs_s_f14:
+ mfc1 t0, $f14
+ b get_fs_s_done
+get_fs_s_f16:
+ mfc1 t0, $f16
+ b get_fs_s_done
+get_fs_s_f18:
+ mfc1 t0, $f18
+ b get_fs_s_done
+get_fs_s_f20:
+ mfc1 t0, $f20
+ b get_fs_s_done
+get_fs_s_f22:
+ mfc1 t0, $f22
+ b get_fs_s_done
+get_fs_s_f24:
+ mfc1 t0, $f24
+ b get_fs_s_done
+get_fs_s_f26:
+ mfc1 t0, $f26
+ b get_fs_s_done
+get_fs_s_f28:
+ mfc1 t0, $f28
+ b get_fs_s_done
+get_fs_s_f30:
+ mfc1 t0, $f30
+get_fs_s_done:
+ srl t1, t0, 23 # get exponent
+ and t1, t1, 0xFF
+ and t2, t0, 0x7FFFFF # get fraction
+ srl t0, t0, 31 # get sign
+ bne t1, SEXP_INF, 1f # is it a signaling NAN?
+ and v0, t2, SSIGNAL_NAN
+ bne v0, zero, invalid_s
+1:
+ j ra
+END(get_ft_fs_s)
+
+/*----------------------------------------------------------------------------
+ * get_ft_fs_d --
+ *
+ * Read (double precision) the FT register (bits 20-16) and
+ * the FS register (bits 15-11) and break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the FS sign
+ * t1 contains the FS (biased) exponent
+ * t2 contains the FS fraction
+ * t3 contains the FS remaining fraction
+ * t4 contains the FT sign
+ * t5 contains the FT (biased) exponent
+ * t6 contains the FT fraction
+ * t7 contains the FT remaining fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_ft_fs_d)
+ srl a3, a0, 17 - 2 # get FT field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, get_ft_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_ft_d_tbl:
+ .word get_ft_d_f0
+ .word get_ft_d_f2
+ .word get_ft_d_f4
+ .word get_ft_d_f6
+ .word get_ft_d_f8
+ .word get_ft_d_f10
+ .word get_ft_d_f12
+ .word get_ft_d_f14
+ .word get_ft_d_f16
+ .word get_ft_d_f18
+ .word get_ft_d_f20
+ .word get_ft_d_f22
+ .word get_ft_d_f24
+ .word get_ft_d_f26
+ .word get_ft_d_f28
+ .word get_ft_d_f30
+ .text
+
+get_ft_d_f0:
+ mfc1 t7, $f0
+ mfc1 t4, $f1
+ b get_ft_d_done
+get_ft_d_f2:
+ mfc1 t7, $f2
+ mfc1 t4, $f3
+ b get_ft_d_done
+get_ft_d_f4:
+ mfc1 t7, $f4
+ mfc1 t4, $f5
+ b get_ft_d_done
+get_ft_d_f6:
+ mfc1 t7, $f6
+ mfc1 t4, $f7
+ b get_ft_d_done
+get_ft_d_f8:
+ mfc1 t7, $f8
+ mfc1 t4, $f9
+ b get_ft_d_done
+get_ft_d_f10:
+ mfc1 t7, $f10
+ mfc1 t4, $f11
+ b get_ft_d_done
+get_ft_d_f12:
+ mfc1 t7, $f12
+ mfc1 t4, $f13
+ b get_ft_d_done
+get_ft_d_f14:
+ mfc1 t7, $f14
+ mfc1 t4, $f15
+ b get_ft_d_done
+get_ft_d_f16:
+ mfc1 t7, $f16
+ mfc1 t4, $f17
+ b get_ft_d_done
+get_ft_d_f18:
+ mfc1 t7, $f18
+ mfc1 t4, $f19
+ b get_ft_d_done
+get_ft_d_f20:
+ mfc1 t7, $f20
+ mfc1 t4, $f21
+ b get_ft_d_done
+get_ft_d_f22:
+ mfc1 t7, $f22
+ mfc1 t4, $f23
+ b get_ft_d_done
+get_ft_d_f24:
+ mfc1 t7, $f24
+ mfc1 t4, $f25
+ b get_ft_d_done
+get_ft_d_f26:
+ mfc1 t7, $f26
+ mfc1 t4, $f27
+ b get_ft_d_done
+get_ft_d_f28:
+ mfc1 t7, $f28
+ mfc1 t4, $f29
+ b get_ft_d_done
+get_ft_d_f30:
+ mfc1 t7, $f30
+ mfc1 t4, $f31
+get_ft_d_done:
+ srl t5, t4, 20 # get exponent
+ and t5, t5, 0x7FF
+ and t6, t4, 0xFFFFF # get fraction
+ srl t4, t4, 31 # get sign
+ bne t5, DEXP_INF, 1f # is it a signaling NAN?
+ and v0, t6, DSIGNAL_NAN
+ bne v0, zero, invalid_d
+1:
+ /* fall through to get FS */
+
+/*----------------------------------------------------------------------------
+ * get_fs_d --
+ *
+ * Read (double precision) the FS register (bits 15-11) and
+ * break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ * t3 contains the remaining fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+ALEAF(get_fs_d)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, get_fs_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_fs_d_tbl:
+ .word get_fs_d_f0
+ .word get_fs_d_f2
+ .word get_fs_d_f4
+ .word get_fs_d_f6
+ .word get_fs_d_f8
+ .word get_fs_d_f10
+ .word get_fs_d_f12
+ .word get_fs_d_f14
+ .word get_fs_d_f16
+ .word get_fs_d_f18
+ .word get_fs_d_f20
+ .word get_fs_d_f22
+ .word get_fs_d_f24
+ .word get_fs_d_f26
+ .word get_fs_d_f28
+ .word get_fs_d_f30
+ .text
+
+get_fs_d_f0:
+ mfc1 t3, $f0
+ mfc1 t0, $f1
+ b get_fs_d_done
+get_fs_d_f2:
+ mfc1 t3, $f2
+ mfc1 t0, $f3
+ b get_fs_d_done
+get_fs_d_f4:
+ mfc1 t3, $f4
+ mfc1 t0, $f5
+ b get_fs_d_done
+get_fs_d_f6:
+ mfc1 t3, $f6
+ mfc1 t0, $f7
+ b get_fs_d_done
+get_fs_d_f8:
+ mfc1 t3, $f8
+ mfc1 t0, $f9
+ b get_fs_d_done
+get_fs_d_f10:
+ mfc1 t3, $f10
+ mfc1 t0, $f11
+ b get_fs_d_done
+get_fs_d_f12:
+ mfc1 t3, $f12
+ mfc1 t0, $f13
+ b get_fs_d_done
+get_fs_d_f14:
+ mfc1 t3, $f14
+ mfc1 t0, $f15
+ b get_fs_d_done
+get_fs_d_f16:
+ mfc1 t3, $f16
+ mfc1 t0, $f17
+ b get_fs_d_done
+get_fs_d_f18:
+ mfc1 t3, $f18
+ mfc1 t0, $f19
+ b get_fs_d_done
+get_fs_d_f20:
+ mfc1 t3, $f20
+ mfc1 t0, $f21
+ b get_fs_d_done
+get_fs_d_f22:
+ mfc1 t3, $f22
+ mfc1 t0, $f23
+ b get_fs_d_done
+get_fs_d_f24:
+ mfc1 t3, $f24
+ mfc1 t0, $f25
+ b get_fs_d_done
+get_fs_d_f26:
+ mfc1 t3, $f26
+ mfc1 t0, $f27
+ b get_fs_d_done
+get_fs_d_f28:
+ mfc1 t3, $f28
+ mfc1 t0, $f29
+ b get_fs_d_done
+get_fs_d_f30:
+ mfc1 t3, $f30
+ mfc1 t0, $f31
+get_fs_d_done:
+ srl t1, t0, 20 # get exponent
+ and t1, t1, 0x7FF
+ and t2, t0, 0xFFFFF # get fraction
+ srl t0, t0, 31 # get sign
+ bne t1, DEXP_INF, 1f # is it a signaling NAN?
+ and v0, t2, DSIGNAL_NAN
+ bne v0, zero, invalid_d
+1:
+ j ra
+END(get_ft_fs_d)
+
+/*----------------------------------------------------------------------------
+ * get_cmp_s --
+ *
+ * Read (single precision) the FS register (bits 15-11) and
+ * the FT register (bits 20-16) and break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ * t4 contains the sign
+ * t5 contains the (biased) exponent
+ * t6 contains the fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_cmp_s)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, cmp_fs_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+cmp_fs_s_tbl:
+ .word cmp_fs_s_f0
+ .word cmp_fs_s_f2
+ .word cmp_fs_s_f4
+ .word cmp_fs_s_f6
+ .word cmp_fs_s_f8
+ .word cmp_fs_s_f10
+ .word cmp_fs_s_f12
+ .word cmp_fs_s_f14
+ .word cmp_fs_s_f16
+ .word cmp_fs_s_f18
+ .word cmp_fs_s_f20
+ .word cmp_fs_s_f22
+ .word cmp_fs_s_f24
+ .word cmp_fs_s_f26
+ .word cmp_fs_s_f28
+ .word cmp_fs_s_f30
+ .text
+
+cmp_fs_s_f0:
+ mfc1 t0, $f0
+ b cmp_fs_s_done
+cmp_fs_s_f2:
+ mfc1 t0, $f2
+ b cmp_fs_s_done
+cmp_fs_s_f4:
+ mfc1 t0, $f4
+ b cmp_fs_s_done
+cmp_fs_s_f6:
+ mfc1 t0, $f6
+ b cmp_fs_s_done
+cmp_fs_s_f8:
+ mfc1 t0, $f8
+ b cmp_fs_s_done
+cmp_fs_s_f10:
+ mfc1 t0, $f10
+ b cmp_fs_s_done
+cmp_fs_s_f12:
+ mfc1 t0, $f12
+ b cmp_fs_s_done
+cmp_fs_s_f14:
+ mfc1 t0, $f14
+ b cmp_fs_s_done
+cmp_fs_s_f16:
+ mfc1 t0, $f16
+ b cmp_fs_s_done
+cmp_fs_s_f18:
+ mfc1 t0, $f18
+ b cmp_fs_s_done
+cmp_fs_s_f20:
+ mfc1 t0, $f20
+ b cmp_fs_s_done
+cmp_fs_s_f22:
+ mfc1 t0, $f22
+ b cmp_fs_s_done
+cmp_fs_s_f24:
+ mfc1 t0, $f24
+ b cmp_fs_s_done
+cmp_fs_s_f26:
+ mfc1 t0, $f26
+ b cmp_fs_s_done
+cmp_fs_s_f28:
+ mfc1 t0, $f28
+ b cmp_fs_s_done
+cmp_fs_s_f30:
+ mfc1 t0, $f30
+cmp_fs_s_done:
+ srl t1, t0, 23 # get exponent
+ and t1, t1, 0xFF
+ and t2, t0, 0x7FFFFF # get fraction
+ srl t0, t0, 31 # get sign
+
+ srl a3, a0, 17 - 2 # get FT field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, cmp_ft_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+cmp_ft_s_tbl:
+ .word cmp_ft_s_f0
+ .word cmp_ft_s_f2
+ .word cmp_ft_s_f4
+ .word cmp_ft_s_f6
+ .word cmp_ft_s_f8
+ .word cmp_ft_s_f10
+ .word cmp_ft_s_f12
+ .word cmp_ft_s_f14
+ .word cmp_ft_s_f16
+ .word cmp_ft_s_f18
+ .word cmp_ft_s_f20
+ .word cmp_ft_s_f22
+ .word cmp_ft_s_f24
+ .word cmp_ft_s_f26
+ .word cmp_ft_s_f28
+ .word cmp_ft_s_f30
+ .text
+
+cmp_ft_s_f0:
+ mfc1 t4, $f0
+ b cmp_ft_s_done
+cmp_ft_s_f2:
+ mfc1 t4, $f2
+ b cmp_ft_s_done
+cmp_ft_s_f4:
+ mfc1 t4, $f4
+ b cmp_ft_s_done
+cmp_ft_s_f6:
+ mfc1 t4, $f6
+ b cmp_ft_s_done
+cmp_ft_s_f8:
+ mfc1 t4, $f8
+ b cmp_ft_s_done
+cmp_ft_s_f10:
+ mfc1 t4, $f10
+ b cmp_ft_s_done
+cmp_ft_s_f12:
+ mfc1 t4, $f12
+ b cmp_ft_s_done
+cmp_ft_s_f14:
+ mfc1 t4, $f14
+ b cmp_ft_s_done
+cmp_ft_s_f16:
+ mfc1 t4, $f16
+ b cmp_ft_s_done
+cmp_ft_s_f18:
+ mfc1 t4, $f18
+ b cmp_ft_s_done
+cmp_ft_s_f20:
+ mfc1 t4, $f20
+ b cmp_ft_s_done
+cmp_ft_s_f22:
+ mfc1 t4, $f22
+ b cmp_ft_s_done
+cmp_ft_s_f24:
+ mfc1 t4, $f24
+ b cmp_ft_s_done
+cmp_ft_s_f26:
+ mfc1 t4, $f26
+ b cmp_ft_s_done
+cmp_ft_s_f28:
+ mfc1 t4, $f28
+ b cmp_ft_s_done
+cmp_ft_s_f30:
+ mfc1 t4, $f30
+cmp_ft_s_done:
+ srl t5, t4, 23 # get exponent
+ and t5, t5, 0xFF
+ and t6, t4, 0x7FFFFF # get fraction
+ srl t4, t4, 31 # get sign
+ j ra
+END(get_cmp_s)
+
+/*----------------------------------------------------------------------------
+ * get_cmp_d --
+ *
+ * Read (double precision) the FS register (bits 15-11) and
+ * the FT register (bits 20-16) and break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ * t3 contains the remaining fraction
+ * t4 contains the sign
+ * t5 contains the (biased) exponent
+ * t6 contains the fraction
+ * t7 contains the remaining fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_cmp_d)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, cmp_fs_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+cmp_fs_d_tbl:
+ .word cmp_fs_d_f0
+ .word cmp_fs_d_f2
+ .word cmp_fs_d_f4
+ .word cmp_fs_d_f6
+ .word cmp_fs_d_f8
+ .word cmp_fs_d_f10
+ .word cmp_fs_d_f12
+ .word cmp_fs_d_f14
+ .word cmp_fs_d_f16
+ .word cmp_fs_d_f18
+ .word cmp_fs_d_f20
+ .word cmp_fs_d_f22
+ .word cmp_fs_d_f24
+ .word cmp_fs_d_f26
+ .word cmp_fs_d_f28
+ .word cmp_fs_d_f30
+ .text
+
+cmp_fs_d_f0:
+ mfc1 t3, $f0
+ mfc1 t0, $f1
+ b cmp_fs_d_done
+cmp_fs_d_f2:
+ mfc1 t3, $f2
+ mfc1 t0, $f3
+ b cmp_fs_d_done
+cmp_fs_d_f4:
+ mfc1 t3, $f4
+ mfc1 t0, $f5
+ b cmp_fs_d_done
+cmp_fs_d_f6:
+ mfc1 t3, $f6
+ mfc1 t0, $f7
+ b cmp_fs_d_done
+cmp_fs_d_f8:
+ mfc1 t3, $f8
+ mfc1 t0, $f9
+ b cmp_fs_d_done
+cmp_fs_d_f10:
+ mfc1 t3, $f10
+ mfc1 t0, $f11
+ b cmp_fs_d_done
+cmp_fs_d_f12:
+ mfc1 t3, $f12
+ mfc1 t0, $f13
+ b cmp_fs_d_done
+cmp_fs_d_f14:
+ mfc1 t3, $f14
+ mfc1 t0, $f15
+ b cmp_fs_d_done
+cmp_fs_d_f16:
+ mfc1 t3, $f16
+ mfc1 t0, $f17
+ b cmp_fs_d_done
+cmp_fs_d_f18:
+ mfc1 t3, $f18
+ mfc1 t0, $f19
+ b cmp_fs_d_done
+cmp_fs_d_f20:
+ mfc1 t3, $f20
+ mfc1 t0, $f21
+ b cmp_fs_d_done
+cmp_fs_d_f22:
+ mfc1 t3, $f22
+ mfc1 t0, $f23
+ b cmp_fs_d_done
+cmp_fs_d_f24:
+ mfc1 t3, $f24
+ mfc1 t0, $f25
+ b cmp_fs_d_done
+cmp_fs_d_f26:
+ mfc1 t3, $f26
+ mfc1 t0, $f27
+ b cmp_fs_d_done
+cmp_fs_d_f28:
+ mfc1 t3, $f28
+ mfc1 t0, $f29
+ b cmp_fs_d_done
+cmp_fs_d_f30:
+ mfc1 t3, $f30
+ mfc1 t0, $f31
+cmp_fs_d_done:
+ srl t1, t0, 20 # get exponent
+ and t1, t1, 0x7FF
+ and t2, t0, 0xFFFFF # get fraction
+ srl t0, t0, 31 # get sign
+
+ srl a3, a0, 17 - 2 # get FT field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, cmp_ft_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+cmp_ft_d_tbl:
+ .word cmp_ft_d_f0
+ .word cmp_ft_d_f2
+ .word cmp_ft_d_f4
+ .word cmp_ft_d_f6
+ .word cmp_ft_d_f8
+ .word cmp_ft_d_f10
+ .word cmp_ft_d_f12
+ .word cmp_ft_d_f14
+ .word cmp_ft_d_f16
+ .word cmp_ft_d_f18
+ .word cmp_ft_d_f20
+ .word cmp_ft_d_f22
+ .word cmp_ft_d_f24
+ .word cmp_ft_d_f26
+ .word cmp_ft_d_f28
+ .word cmp_ft_d_f30
+ .text
+
+cmp_ft_d_f0:
+ mfc1 t7, $f0
+ mfc1 t4, $f1
+ b cmp_ft_d_done
+cmp_ft_d_f2:
+ mfc1 t7, $f2
+ mfc1 t4, $f3
+ b cmp_ft_d_done
+cmp_ft_d_f4:
+ mfc1 t7, $f4
+ mfc1 t4, $f5
+ b cmp_ft_d_done
+cmp_ft_d_f6:
+ mfc1 t7, $f6
+ mfc1 t4, $f7
+ b cmp_ft_d_done
+cmp_ft_d_f8:
+ mfc1 t7, $f8
+ mfc1 t4, $f9
+ b cmp_ft_d_done
+cmp_ft_d_f10:
+ mfc1 t7, $f10
+ mfc1 t4, $f11
+ b cmp_ft_d_done
+cmp_ft_d_f12:
+ mfc1 t7, $f12
+ mfc1 t4, $f13
+ b cmp_ft_d_done
+cmp_ft_d_f14:
+ mfc1 t7, $f14
+ mfc1 t4, $f15
+ b cmp_ft_d_done
+cmp_ft_d_f16:
+ mfc1 t7, $f16
+ mfc1 t4, $f17
+ b cmp_ft_d_done
+cmp_ft_d_f18:
+ mfc1 t7, $f18
+ mfc1 t4, $f19
+ b cmp_ft_d_done
+cmp_ft_d_f20:
+ mfc1 t7, $f20
+ mfc1 t4, $f21
+ b cmp_ft_d_done
+cmp_ft_d_f22:
+ mfc1 t7, $f22
+ mfc1 t4, $f23
+ b cmp_ft_d_done
+cmp_ft_d_f24:
+ mfc1 t7, $f24
+ mfc1 t4, $f25
+ b cmp_ft_d_done
+cmp_ft_d_f26:
+ mfc1 t7, $f26
+ mfc1 t4, $f27
+ b cmp_ft_d_done
+cmp_ft_d_f28:
+ mfc1 t7, $f28
+ mfc1 t4, $f29
+ b cmp_ft_d_done
+cmp_ft_d_f30:
+ mfc1 t7, $f30
+ mfc1 t4, $f31
+cmp_ft_d_done:
+ srl t5, t4, 20 # get exponent
+ and t5, t5, 0x7FF
+ and t6, t4, 0xFFFFF # get fraction
+ srl t4, t4, 31 # get sign
+ j ra
+END(get_cmp_d)
+
+/*----------------------------------------------------------------------------
+ * set_fd_s --
+ *
+ * Write (single precision) the FD register (bits 10-6).
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Arguments:
+ * a0 contains the FP instruction
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ *
+ * set_fd_word --
+ *
+ * Write (integer) the FD register (bits 10-6).
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Arguments:
+ * a0 contains the FP instruction
+ * t2 contains the integer
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(set_fd_s)
+ sll t0, t0, 31 # position sign
+ sll t1, t1, 23 # position exponent
+ or t2, t2, t0
+ or t2, t2, t1
+ALEAF(set_fd_word)
+ srl a3, a0, 7 - 2 # get FD field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, set_fd_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+set_fd_s_tbl:
+ .word set_fd_s_f0
+ .word set_fd_s_f2
+ .word set_fd_s_f4
+ .word set_fd_s_f6
+ .word set_fd_s_f8
+ .word set_fd_s_f10
+ .word set_fd_s_f12
+ .word set_fd_s_f14
+ .word set_fd_s_f16
+ .word set_fd_s_f18
+ .word set_fd_s_f20
+ .word set_fd_s_f22
+ .word set_fd_s_f24
+ .word set_fd_s_f26
+ .word set_fd_s_f28
+ .word set_fd_s_f30
+ .text
+
+set_fd_s_f0:
+ mtc1 t2, $f0
+ j ra
+set_fd_s_f2:
+ mtc1 t2, $f2
+ j ra
+set_fd_s_f4:
+ mtc1 t2, $f4
+ j ra
+set_fd_s_f6:
+ mtc1 t2, $f6
+ j ra
+set_fd_s_f8:
+ mtc1 t2, $f8
+ j ra
+set_fd_s_f10:
+ mtc1 t2, $f10
+ j ra
+set_fd_s_f12:
+ mtc1 t2, $f12
+ j ra
+set_fd_s_f14:
+ mtc1 t2, $f14
+ j ra
+set_fd_s_f16:
+ mtc1 t2, $f16
+ j ra
+set_fd_s_f18:
+ mtc1 t2, $f18
+ j ra
+set_fd_s_f20:
+ mtc1 t2, $f20
+ j ra
+set_fd_s_f22:
+ mtc1 t2, $f22
+ j ra
+set_fd_s_f24:
+ mtc1 t2, $f24
+ j ra
+set_fd_s_f26:
+ mtc1 t2, $f26
+ j ra
+set_fd_s_f28:
+ mtc1 t2, $f28
+ j ra
+set_fd_s_f30:
+ mtc1 t2, $f30
+ j ra
+END(set_fd_s)
+
+/*----------------------------------------------------------------------------
+ * set_fd_d --
+ *
+ * Write (double precision) the FT register (bits 10-6).
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Arguments:
+ * a0 contains the FP instruction
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ * t3 contains the remaining fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(set_fd_d)
+ sll t0, t0, 31 # set sign
+ sll t1, t1, 20 # set exponent
+ or t0, t0, t1
+ or t0, t0, t2 # set fraction
+ srl a3, a0, 7 - 2 # get FD field (even regs only)
+ and a3, a3, 0xF << 2 # mask FD field
+ lw a3, set_fd_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+set_fd_d_tbl:
+ .word set_fd_d_f0
+ .word set_fd_d_f2
+ .word set_fd_d_f4
+ .word set_fd_d_f6
+ .word set_fd_d_f8
+ .word set_fd_d_f10
+ .word set_fd_d_f12
+ .word set_fd_d_f14
+ .word set_fd_d_f16
+ .word set_fd_d_f18
+ .word set_fd_d_f20
+ .word set_fd_d_f22
+ .word set_fd_d_f24
+ .word set_fd_d_f26
+ .word set_fd_d_f28
+ .word set_fd_d_f30
+ .text
+
+set_fd_d_f0:
+ mtc1 t3, $f0
+ mtc1 t0, $f1
+ j ra
+set_fd_d_f2:
+ mtc1 t3, $f2
+ mtc1 t0, $f3
+ j ra
+set_fd_d_f4:
+ mtc1 t3, $f4
+ mtc1 t0, $f5
+ j ra
+set_fd_d_f6:
+ mtc1 t3, $f6
+ mtc1 t0, $f7
+ j ra
+set_fd_d_f8:
+ mtc1 t3, $f8
+ mtc1 t0, $f9
+ j ra
+set_fd_d_f10:
+ mtc1 t3, $f10
+ mtc1 t0, $f11
+ j ra
+set_fd_d_f12:
+ mtc1 t3, $f12
+ mtc1 t0, $f13
+ j ra
+set_fd_d_f14:
+ mtc1 t3, $f14
+ mtc1 t0, $f15
+ j ra
+set_fd_d_f16:
+ mtc1 t3, $f16
+ mtc1 t0, $f17
+ j ra
+set_fd_d_f18:
+ mtc1 t3, $f18
+ mtc1 t0, $f19
+ j ra
+set_fd_d_f20:
+ mtc1 t3, $f20
+ mtc1 t0, $f21
+ j ra
+set_fd_d_f22:
+ mtc1 t3, $f22
+ mtc1 t0, $f23
+ j ra
+set_fd_d_f24:
+ mtc1 t3, $f24
+ mtc1 t0, $f25
+ j ra
+set_fd_d_f26:
+ mtc1 t3, $f26
+ mtc1 t0, $f27
+ j ra
+set_fd_d_f28:
+ mtc1 t3, $f28
+ mtc1 t0, $f29
+ j ra
+set_fd_d_f30:
+ mtc1 t3, $f30
+ mtc1 t0, $f31
+ j ra
+END(set_fd_d)
+
+/*----------------------------------------------------------------------------
+ * renorm_fs_s --
+ *
+ * Results:
+ * t1 unbiased exponent
+ * t2 normalized fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(renorm_fs_s)
+/*
+ * Find out how many leading zero bits are in t2 and put in t9.
+ */
+ move v0, t2
+ move t9, zero
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2 the correct number of bits.
+ */
+1:
+ subu t9, t9, SLEAD_ZEROS # dont count normal leading zeros
+ li t1, SEXP_MIN
+ subu t1, t1, t9 # adjust exponent
+ sll t2, t2, t9
+ j ra
+END(renorm_fs_s)
+
+/*----------------------------------------------------------------------------
+ * renorm_fs_d --
+ *
+ * Results:
+ * t1 unbiased exponent
+ * t2,t3 normalized fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(renorm_fs_d)
+/*
+ * Find out how many leading zero bits are in t2,t3 and put in t9.
+ */
+ move v0, t2
+ move t9, zero
+ bne t2, zero, 1f
+ move v0, t3
+ addu t9, 32
+1:
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2,t3 the correct number of bits.
+ */
+1:
+ subu t9, t9, DLEAD_ZEROS # dont count normal leading zeros
+ li t1, DEXP_MIN
+ subu t1, t1, t9 # adjust exponent
+ li v0, 32
+ blt t9, v0, 1f
+ subu t9, t9, v0 # shift fraction left >= 32 bits
+ sll t2, t3, t9
+ move t3, zero
+ j ra
+1:
+ subu v0, v0, t9 # shift fraction left < 32 bits
+ sll t2, t2, t9
+ srl v1, t3, v0
+ or t2, t2, v1
+ sll t3, t3, t9
+ j ra
+END(renorm_fs_d)
+
+/*----------------------------------------------------------------------------
+ * renorm_ft_s --
+ *
+ * Results:
+ * t5 unbiased exponent
+ * t6 normalized fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(renorm_ft_s)
+/*
+ * Find out how many leading zero bits are in t6 and put in t9.
+ */
+ move v0, t6
+ move t9, zero
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t6 the correct number of bits.
+ */
+1:
+ subu t9, t9, SLEAD_ZEROS # dont count normal leading zeros
+ li t5, SEXP_MIN
+ subu t5, t5, t9 # adjust exponent
+ sll t6, t6, t9
+ j ra
+END(renorm_ft_s)
+
+/*----------------------------------------------------------------------------
+ * renorm_ft_d --
+ *
+ * Results:
+ * t5 unbiased exponent
+ * t6,t7 normalized fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(renorm_ft_d)
+/*
+ * Find out how many leading zero bits are in t6,t7 and put in t9.
+ */
+ move v0, t6
+ move t9, zero
+ bne t6, zero, 1f
+ move v0, t7
+ addu t9, 32
+1:
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t6,t7 the correct number of bits.
+ */
+1:
+ subu t9, t9, DLEAD_ZEROS # dont count normal leading zeros
+ li t5, DEXP_MIN
+ subu t5, t5, t9 # adjust exponent
+ li v0, 32
+ blt t9, v0, 1f
+ subu t9, t9, v0 # shift fraction left >= 32 bits
+ sll t6, t7, t9
+ move t7, zero
+ j ra
+1:
+ subu v0, v0, t9 # shift fraction left < 32 bits
+ sll t6, t6, t9
+ srl v1, t7, v0
+ or t6, t6, v1
+ sll t7, t7, t9
+ j ra
+END(renorm_ft_d)
diff --git a/sys/mips/mips/gdb_machdep.c b/sys/mips/mips/gdb_machdep.c
new file mode 100644
index 0000000..ae77e6b
--- /dev/null
+++ b/sys/mips/mips/gdb_machdep.c
@@ -0,0 +1,189 @@
+/* $NetBSD: kgdb_machdep.c,v 1.11 2005/12/24 22:45:35 perry Exp $ */
+
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1996 Matthias Pfaller.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Matthias Pfaller.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * JNPR: gdb_machdep.c,v 1.1 2007/08/09 12:25:25 katta
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/signal.h>
+#include <sys/pcpu.h>
+
+#include <machine/gdb_machdep.h>
+#include <machine/pcb.h>
+#include <machine/reg.h>
+#include <machine/trap.h>
+
+#include <gdb/gdb.h>
+
+void *
+gdb_cpu_getreg(int regnum, size_t *regsz)
+{
+
+ *regsz = gdb_cpu_regsz(regnum);
+ if (kdb_thread == PCPU_GET(curthread)) {
+ switch (regnum) {
+ /*
+ * XXX: May need to add more registers
+ */
+ case 2: return (&kdb_frame->v0);
+ case 3: return (&kdb_frame->v1);
+ }
+ }
+ switch (regnum) {
+ case 16: return (&kdb_thrctx->pcb_context.val[0]);
+ case 17: return (&kdb_thrctx->pcb_context.val[1]);
+ case 18: return (&kdb_thrctx->pcb_context.val[2]);
+ case 19: return (&kdb_thrctx->pcb_context.val[3]);
+ case 20: return (&kdb_thrctx->pcb_context.val[4]);
+ case 21: return (&kdb_thrctx->pcb_context.val[5]);
+ case 22: return (&kdb_thrctx->pcb_context.val[6]);
+ case 23: return (&kdb_thrctx->pcb_context.val[7]);
+ case 29: return (&kdb_thrctx->pcb_context.val[8]);
+ case 30: return (&kdb_thrctx->pcb_context.val[9]);
+ case 31: return (&kdb_thrctx->pcb_context.val[10]);
+ }
+ return (NULL);
+}
+
+void
+gdb_cpu_setreg(int regnum, void *val)
+{
+ switch (regnum) {
+ case GDB_REG_PC:
+ kdb_thrctx->pcb_context.val[10] = *(register_t *)val;
+ if (kdb_thread == PCPU_GET(curthread))
+ kdb_frame->pc = *(register_t *)val;
+ }
+}
+
+int
+gdb_cpu_signal(int entry, int code)
+{
+ switch (entry) {
+ case T_TLB_MOD:
+ case T_TLB_MOD+T_USER:
+ case T_TLB_LD_MISS:
+ case T_TLB_ST_MISS:
+ case T_TLB_LD_MISS+T_USER:
+ case T_TLB_ST_MISS+T_USER:
+ case T_ADDR_ERR_LD: /* misaligned access */
+ case T_ADDR_ERR_ST: /* misaligned access */
+ case T_BUS_ERR_LD_ST: /* BERR asserted to CPU */
+ case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */
+ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */
+ case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to CPU */
+ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to CPU */
+ return (SIGSEGV);
+
+ case T_BREAK:
+ case T_BREAK+T_USER:
+ return (SIGTRAP);
+
+ case T_RES_INST+T_USER:
+ case T_COP_UNUSABLE+T_USER:
+ return (SIGILL);
+
+ case T_FPE+T_USER:
+ case T_OVFLOW+T_USER:
+ return (SIGFPE);
+
+ default:
+ return (SIGEMT);
+ }
+}
diff --git a/sys/mips/mips/genassym.c b/sys/mips/mips/genassym.c
new file mode 100644
index 0000000..90974c7
--- /dev/null
+++ b/sys/mips/mips/genassym.c
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (c) 1982, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
+ * from: src/sys/i386/i386/genassym.c,v 1.86.2.1 2000/05/16 06:58:06 dillon
+ * JNPR: genassym.c,v 1.4 2007/08/09 11:23:32 katta
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/assym.h>
+#include <machine/pte.h>
+#include <sys/proc.h>
+#include <sys/errno.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/resourcevar.h>
+#include <sys/ucontext.h>
+#include <sys/vmmeter.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <sys/proc.h>
+#include <machine/pcb.h>
+#include <machine/sigframe.h>
+#include <machine/proc.h>
+
+#ifndef offsetof
+#define offsetof(t,m) (int)((&((t *)0L)->m))
+#endif
+
+
+ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
+ASSYM(TD_UPTE, offsetof(struct thread, td_md.md_upte));
+ASSYM(TD_REALKSTACK, offsetof(struct thread, td_md.md_realstack));
+ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
+ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
+ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
+
+ASSYM(TF_REG_SR, offsetof(struct trapframe, sr));
+
+ASSYM(U_PCB_REGS, offsetof(struct pcb, pcb_regs.zero));
+ASSYM(U_PCB_CONTEXT, offsetof(struct pcb, pcb_context));
+ASSYM(U_PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
+ASSYM(U_PCB_FPREGS, offsetof(struct pcb, pcb_regs.f0));
+
+ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
+ASSYM(PC_SEGBASE, offsetof(struct pcpu, pc_segbase));
+ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
+ASSYM(PC_FPCURTHREAD, offsetof(struct pcpu, pc_fpcurthread));
+ASSYM(PC_BOOT_STACK, offsetof(struct pcpu, pc_boot_stack));
+ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
+ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap));
+
+ASSYM(VM_MAX_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS);
+ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
+ASSYM(VM_KERNEL_ALLOC_OFFSET, VM_KERNEL_ALLOC_OFFSET);
+ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc));
+ASSYM(SIGFPE, SIGFPE);
+ASSYM(PGSHIFT, PGSHIFT);
+ASSYM(NBPG, NBPG);
+ASSYM(SEGSHIFT, SEGSHIFT);
+ASSYM(NPTEPG, NPTEPG);
+ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
+ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
+ASSYM(PCPU_SIZE, sizeof(struct pcpu));
diff --git a/sys/mips/mips/in_cksum.c b/sys/mips/mips/in_cksum.c
new file mode 100644
index 0000000..19d3d3c
--- /dev/null
+++ b/sys/mips/mips/in_cksum.c
@@ -0,0 +1,248 @@
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1996
+ * Matt Thomas <matt@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ * (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+
+static const u_int32_t in_masks[] = {
+#ifndef _MISEB
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+#else
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0xFF000000, 0xFFFF0000, 0xFFFFFF00, /* offset 0 */
+ 0x00000000, 0x00FF0000, 0x00FFFF00, 0x00FFFFFF, /* offset 1 */
+ 0x00000000, 0x0000FF00, 0x0000FFFF, 0x0000FFFF, /* offset 2 */
+ 0x00000000, 0x000000FF, 0x000000FF, 0x000000FF, /* offset 3 */
+#endif
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const void *buf, int len)
+{
+ const u_int32_t *lw = (const u_int32_t *) buf;
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (long) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (long) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((long) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+
+ if ((clen ^ (int) addr) & 1)
+ sum += in_cksumdata(addr, mlen) << 8;
+ else
+ sum += in_cksumdata(addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+ u_int64_t sum = in_cksumdata(ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+ REDUCE16;
+ return (~sum & 0xffff);
+}
diff --git a/sys/mips/mips/intr_machdep.c b/sys/mips/mips/intr_machdep.c
new file mode 100644
index 0000000..445d83e
--- /dev/null
+++ b/sys/mips/mips/intr_machdep.c
@@ -0,0 +1,199 @@
+/*-
+ * Copyright (c) 2006 Fill this file and put your name here
+ * Copyright (c) 2002-2004 Juli Mallett <jmallett@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+
+#include <machine/clock.h>
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/cpuinfo.h>
+#include <machine/cpuregs.h>
+#include <machine/frame.h>
+#include <machine/intr_machdep.h>
+#include <machine/md_var.h>
+#include <machine/trap.h>
+
+static struct intr_event *hardintr_events[NHARD_IRQS];
+static struct intr_event *softintr_events[NSOFT_IRQS];
+
+#ifdef notyet
+static int intrcnt_tab[NHARD_IRQS + NSOFT_IRQS];
+static int intrcnt_index = 0;
+static int last_printed = 0;
+#endif
+
+void
+mips_mask_irq(void)
+{
+
+ printf("Unimplemented: %s\n", __func__);
+}
+
+void
+mips_unmask_irq(void)
+{
+
+ printf("Unimplemented: %s\n", __func__);
+}
+
+void
+cpu_establish_hardintr(const char *name, driver_filter_t *filt,
+ void (*handler)(void*), void *arg, int irq, int flags,
+ void **cookiep)
+{
+ struct intr_event *event;
+ int error;
+
+ printf("Establish HARD IRQ %d: filt %p handler %p arg %p\n", irq, filt, handler, arg);
+ /*
+ * We have 6 levels, but thats 0 - 5 (not including 6)
+ */
+ if (irq < 0 || irq >= NHARD_IRQS)
+ panic("%s called for unknown hard intr %d", __func__, irq);
+
+ event = hardintr_events[irq];
+ if (event == NULL) {
+ error = intr_event_create(&event, (void *)irq, 0,
+ (mask_fn)mips_mask_irq, (mask_fn)mips_unmask_irq,
+ (mask_fn)mips_unmask_irq, NULL, "hard intr%d:", irq);
+ if (error)
+ return;
+ hardintr_events[irq] = event;
+#ifdef notyet
+ last_printed +=
+ snprintf(intrnames + last_printed,
+ MAXCOMLEN + 1,
+ "hard irq%d: %s", irq, name);
+ last_printed++;
+ intrcnt_tab[irq] = intrcnt_index;
+ intrcnt_index++;
+#endif
+
+ }
+
+ intr_event_add_handler(event, name, filt, handler, arg,
+ intr_priority(flags), flags, cookiep);
+
+ mips_wr_status(mips_rd_status() | (((1<< irq) << 8) << 2));
+}
+
+void
+cpu_establish_softintr(const char *name, driver_filter_t *filt,
+ void (*handler)(void*), void *arg, int irq, int flags,
+ void **cookiep)
+{
+ struct intr_event *event;
+ int error;
+
+ printf("Establish SOFT IRQ %d: filt %p handler %p arg %p\n", irq, filt, handler, arg);
+ if (irq < 0 || irq > NSOFT_IRQS)
+ panic("%s called for unknown hard intr %d", __func__, irq);
+
+ event = softintr_events[irq];
+ if (event == NULL) {
+ error = intr_event_create(&event, (void *)irq, 0,
+ (mask_fn)mips_mask_irq, (mask_fn)mips_unmask_irq,
+ (mask_fn)mips_unmask_irq, NULL, "intr%d:", irq);
+ if (error)
+ return;
+ softintr_events[irq] = event;
+ }
+
+ intr_event_add_handler(event, name, filt, handler, arg,
+ intr_priority(flags), flags, cookiep);
+
+ mips_wr_status(mips_rd_status() | (((1<< irq) << 8)));
+}
+
+void
+cpu_intr(struct trapframe *tf)
+{
+ struct intr_handler *ih;
+ struct intr_event *event;
+ register_t cause;
+ int hard;
+ int intr;
+ int i;
+ int thread;
+
+ critical_enter();
+
+ cause = mips_rd_cause();
+ intr = (cause & MIPS_INT_MASK) >> 8;
+ cause &= ~MIPS_INT_MASK;
+ mips_wr_cause(cause);
+ while ((i = fls(intr)) != 0) {
+ intr &= ~(1 << (i - 1));
+ switch (i) {
+ case 1: case 2:
+ /* Software interrupt. */
+ i--; /* Get a 0-offset interrupt. */
+ hard = 0;
+ event = softintr_events[i];
+ break;
+ default:
+ /* Hardware interrupt. */
+ i -= 2; /* Trim software interrupt bits. */
+ i--; /* Get a 0-offset interrupt. */
+ hard = 1;
+ event = hardintr_events[i];
+ break;
+ }
+
+ if (!event || TAILQ_EMPTY(&event->ie_handlers))
+ {
+ printf("stray %s interrupt %d\n",
+ hard ? "hard" : "soft", i);
+ continue;
+ }
+
+ /* Execute fast handlers. */
+ thread = 0;
+ TAILQ_FOREACH(ih, &event->ie_handlers, ih_next) {
+ if (ih->ih_filter == NULL)
+ thread = 1;
+ else
+ ih->ih_filter(ih->ih_argument ?
+ ih->ih_argument : tf);
+ }
+
+ /* Schedule thread if needed. */
+ if (thread)
+ intr_event_schedule_thread(event);
+ }
+
+ KASSERT(i == 0, ("all interrupts handled"));
+
+ critical_exit();
+}
diff --git a/sys/mips/mips/locore.S b/sys/mips/mips/locore.S
new file mode 100644
index 0000000..afcabd4
--- /dev/null
+++ b/sys/mips/mips/locore.S
@@ -0,0 +1,279 @@
+/* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Digital Equipment Corporation and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
+ * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
+ * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
+ * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
+ *
+ * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
+ * JNPR: locore.S,v 1.6.2.1 2007/08/29 12:24:49 girish
+ * $FreeBSD$
+ */
+
+/*
+ * FREEBSD_DEVELOPERS_FIXME
+ * The start routine below was written for a multi-core CPU
+ * with each core being hyperthreaded. This serves as an example
+ * for a complex CPU architecture. For a different CPU complex
+ * please make necessary changes to read CPU-ID etc.
+ * A clean solution would be to have a different locore file for
+ * each CPU type.
+ */
+
+/*
+ * Contains code that is the first executed at boot time plus
+ * assembly language support routines.
+ */
+
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/cpuregs.h>
+#include <machine/regnum.h>
+
+#include "assym.s"
+
+ .data
+#ifdef YAMON
+GLOBAL(fenvp)
+ .space 4 # Assumes mips32? Is that OK?
+#endif
+#ifdef CFE /* Assumes MIPS32, bad? */
+GLOBAL(cfe_handle)
+ .space 4
+GLOBAL(cfe_vector)
+ .space 4
+#endif
+#if defined(TARGET_OCTEON)
+GLOBAL(app_descriptor_addr)
+ .space 8
+#endif
+GLOBAL(stackspace)
+ .space NBPG /* Smaller than it should be since it's temp. */
+ .align 8
+GLOBAL(topstack)
+
+
+ .set noreorder
+
+ .text
+
+GLOBAL(btext)
+ASM_ENTRY(_start)
+VECTOR(_locore, unknown)
+ /* UNSAFE TO USE a0..a3, since some bootloaders pass that to us */
+ mtc0 zero, COP_0_CAUSE_REG # Clear soft interrupts
+
+#if defined(TARGET_OCTEON)
+ /*
+ * t1: Bits to set explicitly:
+ * Enable FPU
+ */
+
+ /* Set these bits */
+ li t1, (MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT | MIPS_SR_PX | MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_SX | MIPS_SR_BEV)
+
+ /* Reset these bits */
+ li t0, ~(MIPS_SR_DE | MIPS_SR_SOFT_RESET | MIPS_SR_ERL | MIPS_SR_EXL | MIPS_SR_INT_IE)
+#else
+ /*
+ * t0: Bits to preserve if set:
+ * Soft reset
+ * Boot exception vectors (firmware-provided)
+ */
+ li t0, (MIPS_SR_BEV | MIPS_SR_SOFT_RESET)
+ /*
+ * t1: Bits to set explicitly:
+ * Enable FPU
+ */
+ li t1, MIPS_SR_COP_1_BIT
+#endif
+ /*
+ * Read coprocessor 0 status register, clear bits not
+ * preserved (namely, clearing interrupt bits), and set
+ * bits we want to explicitly set.
+ */
+ mfc0 t2, COP_0_STATUS_REG
+ and t2, t0
+ or t2, t1
+ mtc0 t2, COP_0_STATUS_REG
+ COP0_SYNC
+ /* Make sure KSEG0 is cached */
+ li t0, CFG_K0_CACHED
+ mtc0 t0, MIPS_COP_0_CONFIG
+ COP0_SYNC
+
+ /* Read and store the PrID FPU ID for CPU identification, if any. */
+ mfc0 t2, COP_0_STATUS_REG
+ mfc0 t0, MIPS_COP_0_PRID
+#ifndef CPU_NOFPU
+ and t2, MIPS_SR_COP_1_BIT
+ beqz t2, 1f
+ move t1, zero
+ cfc1 t1, MIPS_FPU_ID
+1:
+#else
+ /*
+ * This platform has no FPU, and attempting to detect one
+ * using the official method causes an exception.
+ */
+ move t1, zero
+#endif
+ sw t0, _C_LABEL(cpu_id)
+ sw t1, _C_LABEL(fpu_id)
+
+/*
+ * Initialize stack and call machine startup.
+ */
+ la sp, _C_LABEL(topstack) - START_FRAME
+ la gp, _C_LABEL(_gp)
+ sw zero, START_FRAME - 4(sp) # Zero out old ra for debugger
+
+ /*xxximp
+ * now that we pass a0...a3 to the platform_init routine, do we need
+ * to stash this stuff here?
+ */
+#ifdef YAMON
+ /* Save YAMON boot environment pointer */
+ sw a2, _C_LABEL(fenvp)
+#endif
+#ifdef CFE
+ /*
+ * Save the CFE context passed to us by the loader.
+ */
+ li t1, 0x43464531
+ bne a3, t1, no_cfe /* Check for "CFE1" signature */
+ sw a0, _C_LABEL(cfe_handle)/* Firmware data segment */
+ sw a2, _C_LABEL(cfe_vector)/* Firmware entry vector */
+no_cfe:
+#endif
+#if defined(TARGET_OCTEON)
+ la a0, app_descriptor_addr
+ sw a3, 0(a0) /* Store app descriptor ptr */
+#endif
+
+ /*
+ * The following needs to be done differently for each platform and
+ * there needs to be a good way to plug this in.
+ */
+#if defined(SMP) && defined(CPU_XLR)
+/*
+ * Block all the slave CPUs
+ */
+ /*
+ * Read the cpu id from the cp0 config register
+ * cpuid[9:4], thrid[3: 0]
+ */
+ mfc0 a0, COP_0_CONFIG, 7
+ srl a1, a0, 4
+ andi a1, a1, 0x3f
+ andi a0, a0, 0xf
+
+ /* calculate linear cpuid */
+ sll t0, a1, 2
+ addu a2, t0, a0
+/* Initially, disable all hardware threads on each core except thread0 */
+ li t1, VCPU_ID_0
+ li t2, XLR_THREAD_ENABLE_IND
+ mtcr t1, t2
+#endif
+
+
+#if defined(TARGET_OCTEON) /* Maybe this is mips32/64 generic? */
+ .set push
+ .set mips32r2
+ rdhwr t0, $0
+ .set pop
+#else
+ move t0, zero
+#endif
+
+ /* Stage the secondary cpu start until later */
+ bne t0, zero, start_secondary
+ nop
+
+#ifdef SMP
+ la t0, _C_LABEL(__pcpu)
+ SET_CPU_PCPU(t0)
+ /* If not master cpu, jump... */
+/*XXX this assumes the above #if 0'd code runs */
+ bne a2, zero, start_secondary
+ nop
+#endif
+
+ /* Call the platform-specific startup code. */
+ jal _C_LABEL(platform_start)
+ sw zero, START_FRAME - 8(sp) # Zero out old fp for debugger
+
+ la sp, _C_LABEL(thread0)
+ lw a0, TD_PCB(sp)
+ li t0, ~7
+ and a0, a0, t0
+ subu sp, a0, START_FRAME
+
+ jal _C_LABEL(mi_startup) # mi_startup(frame)
+ sw zero, START_FRAME - 8(sp) # Zero out old fp for debugger
+
+ PANIC("Startup failed!")
+
+#ifdef SMP
+start_secondary:
+ move a0, a1
+2:
+ addiu t0, PCPU_SIZE
+ subu a1, 1
+ bne a1, zero, 2b
+ nop
+ SET_CPU_PCPU(t0)
+smp_wait:
+ lw sp, PC_BOOT_STACK(t0)
+ beqz sp, smp_wait
+ nop
+ jal _C_LABEL(smp_init_secondary)
+ nop
+#else
+start_secondary:
+ b start_secondary
+ nop
+#endif
+
+VECTOR_END(_locore)
diff --git a/sys/mips/mips/machdep.c b/sys/mips/mips/machdep.c
new file mode 100644
index 0000000..84ac979
--- /dev/null
+++ b/sys/mips/mips/machdep.c
@@ -0,0 +1,557 @@
+ /* $OpenBSD: machdep.c,v 1.33 1998/09/15 10:58:54 pefo Exp $ */
+/* tracked to 1.38 */
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, The Mach Operating System project at
+ * Carnegie-Mellon University and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)machdep.c 8.3 (Berkeley) 1/12/94
+ * Id: machdep.c,v 1.33 1998/09/15 10:58:54 pefo Exp
+ * JNPR: machdep.c,v 1.11.2.3 2007/08/29 12:24:49 girish
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_md.h"
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/linker.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <sys/reboot.h>
+#include <sys/sched.h>
+#include <sys/sysctl.h>
+#include <sys/sysproto.h>
+#include <sys/vmmeter.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_extern.h>
+#include <sys/socket.h>
+
+#include <sys/user.h>
+#include <sys/cons.h>
+#include <sys/syslog.h>
+#include <machine/cache.h>
+#include <machine/cpu.h>
+#include <machine/pltfm.h>
+#include <net/netisr.h>
+#include <machine/md_var.h>
+#if 0
+#include <machine/defs.h>
+#endif
+#include <machine/clock.h>
+#include <machine/asm.h>
+#include <machine/bootinfo.h>
+#ifdef DDB
+#include <sys/kdb.h>
+#include <ddb/ddb.h>
+#endif
+
+#include <sys/random.h>
+#include <machine/ns16550.h>
+#include <net/if.h>
+
+#define BOOTINFO_DEBUG 0
+
+char machine[] = "mips";
+SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "Machine class");
+
+static char cpu_model[30];
+SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0, "Machine model");
+
+#if 0 /* see comment below */
+static void getmemsize(void);
+#endif
+
+int cold = 1;
+int Maxmem;
+long realmem = 0;
+int cpu_clock = MIPS_DEFAULT_HZ;
+SYSCTL_INT(_hw, OID_AUTO, clockrate, CTLFLAG_RD,
+ &cpu_clock, 0, "CPU instruction clock rate");
+int clocks_running = 0;
+
+vm_offset_t kstack0;
+
+#ifdef SMP
+struct pcpu __pcpu[32];
+char pcpu_boot_stack[KSTACK_PAGES * PAGE_SIZE * (MAXCPU-1)];
+#else
+struct pcpu pcpu;
+struct pcpu *pcpup = &pcpu;
+#endif
+
+vm_offset_t phys_avail[10];
+#ifdef UNIMPLEMENTED
+struct platform platform;
+#endif
+
+vm_paddr_t mips_wired_tlb_physmem_start;
+vm_paddr_t mips_wired_tlb_physmem_end;
+u_int need_wired_tlb_page_pool;
+
+static void cpu_startup(void *);
+SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
+
+struct kva_md_info kmi;
+
+int cpucfg; /* Value of processor config register */
+int num_tlbentries = 64; /* Size of the CPU tlb */
+int cputype;
+
+extern char MipsException[], MipsExceptionEnd[];
+
+/* TLB miss handler address and end */
+extern char MipsTLBMiss[], MipsTLBMissEnd[];
+
+/* Cache error handler */
+extern char MipsCache[], MipsCacheEnd[];
+
+extern char edata[], end[];
+
+u_int32_t bootdev;
+struct bootinfo bootinfo;
+
+
+static void
+cpu_startup(void *dummy)
+{
+
+ if (boothowto & RB_VERBOSE)
+ bootverbose++;
+
+ /*
+ * Good {morning,afternoon,evening,night}.
+ */
+ printf("%s", version);
+
+ printf("real memory = %lu (%luK bytes)\n", ptoa(Maxmem),
+ ptoa(Maxmem) / 1024);
+ realmem = Maxmem;
+ /*
+ * Display any holes after the first chunk of extended memory.
+ */
+ if (bootverbose) {
+ int indx;
+
+ printf("Physical memory chunk(s):\n");
+ for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
+ int size1 = phys_avail[indx + 1] - phys_avail[indx];
+
+ printf("0x%08x - 0x%08x, %u bytes (%u pages)\n",
+ phys_avail[indx], phys_avail[indx + 1] - 1, size1,
+ size1 / PAGE_SIZE);
+ }
+ }
+
+ vm_ksubmap_init(&kmi);
+
+ printf("avail memory = %lu (%luMB)\n", ptoa(cnt.v_free_count),
+ ptoa(cnt.v_free_count) / 1048576);
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+ vm_pager_bufferinit();
+}
+
+/*
+ * Shutdown the CPU as much as possible
+ */
+void
+cpu_reset(void)
+{
+ for (;;)
+ ;
+}
+
+/* Get current clock frequency for the given cpu id. */
+int
+cpu_est_clockrate(int cpu_id, uint64_t *rate)
+{
+
+ return (ENXIO);
+}
+
+/*
+ * Shutdown the CPU as much as possible
+ */
+void
+cpu_halt(void)
+{
+ for (;;)
+ ;
+}
+
+#ifdef PORT_TO_JMIPS
+
+static int
+sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
+{
+}
+
+SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT | CTLFLAG_RW,
+ &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
+#endif /* PORT_TO_JMIPS */
+
+#ifdef PORT_TO_JMIPS
+/* art */
+SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, CTLFLAG_RW,
+ &disable_rtc_set, 0, "");
+#endif /* PORT_TO_JMIPS */
+
+SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, CTLFLAG_RD, &bootinfo,
+ bootinfo, "");
+
+#ifdef PORT_TO_JMIPS
+/* dchu */
+SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, CTLFLAG_RW,
+ &wall_cmos_clock, 0, "");
+#endif /* PORT_TO_JMIPS */
+
+/*
+ * Initialize mips and configure to run kernel
+ */
+
+void
+mips_proc0_init(void)
+{
+ proc_linkup(&proc0, &thread0);
+ thread0.td_kstack = kstack0;
+ thread0.td_kstack_pages = KSTACK_PAGES - 1;
+ if (thread0.td_kstack & (1 << PAGE_SHIFT))
+ thread0.td_md.md_realstack = thread0.td_kstack + PAGE_SIZE;
+ else
+ thread0.td_md.md_realstack = thread0.td_kstack;
+ /* Initialize pcpu info of cpu-zero */
+#ifdef SMP
+ pcpu_init(&__pcpu[0], 0, sizeof(struct pcpu));
+#else
+ pcpu_init(pcpup, 0, sizeof(struct pcpu));
+#endif
+ /*
+ * Do not use cpu_thread_alloc to initialize these fields
+ * thread0 is the only thread that has kstack located in KSEG0
+ * while cpu_thread_alloc handles kstack allocated in KSEG2.
+ */
+ thread0.td_pcb = (struct pcb *)(thread0.td_md.md_realstack +
+ (thread0.td_kstack_pages - 1) * PAGE_SIZE) - 1;
+ thread0.td_frame = &thread0.td_pcb->pcb_regs;
+ /*
+ * There is no need to initialize md_upte array for thread0 as it's
+ * located in .bss section and should be explicitly zeroed during
+ * kernel initialization.
+ */
+
+ PCPU_SET(curthread, &thread0);
+ PCPU_SET(curpcb, thread0.td_pcb);
+}
+
+#ifdef DEBUG_UART_POLLED
+void
+init_bootstrap_console()
+{
+ /*
+ * Initalize the (temporary) bootstrap console interface, so
+ * we can use printf until the VM system starts being setup.
+ * The real console is initialized before then.
+ */
+ uart_post_init(PA_2_K1VA(ADDR_NS16550_UART1));
+}
+#endif
+
+struct msgbuf *msgbufp=0;
+
+#if 0
+/*
+ * This code has been moved to the platform_init code. The only
+ * thing that's beign done here that hasn't been moved is the wired tlb
+ * pool stuff. I'm still trying to understand that feature..., since
+ * it maps from the end the kernel to 0x08000000 somehow. But the stuff
+ * was stripped out, so it is hard to say what's going on....
+ */
+u_int32_t freemem_start;
+
+static void
+getmemsize()
+{
+ vm_offset_t kern_start, kern_end;
+ vm_offset_t AllowMem, memsize;
+ const char *cp;
+ size_t sz;
+ int phys_avail_cnt;
+
+ /* Determine memory layout */
+ phys_avail_cnt = 0;
+ kern_start = mips_trunc_page(MIPS_CACHED_TO_PHYS(btext));
+ if (kern_start < freemem_start)
+panic("kernel load address too low, overlapping with memory reserved for FPC IPC\n");
+
+ if (kern_start > freemem_start) {
+ phys_avail[phys_avail_cnt++] = freemem_start;
+ /*
+ * Since the stack is setup just before kern_start,
+ * leave some space for stack to grow
+ */
+ phys_avail[phys_avail_cnt++] = kern_start - PAGE_SIZE * 3;
+ MIPS_DEBUG_PRINT("phys_avail : %p - %p", \
+ phys_avail[phys_avail_cnt-2], phys_avail[phys_avail_cnt-1]);
+ }
+
+ kern_end = (vm_offset_t) end;
+ kern_end = (vm_offset_t) mips_round_page(kern_end);
+ MIPS_DEBUG_PRINT("kern_start : 0x%x, kern_end : 0x%x", btext, kern_end);
+ phys_avail[phys_avail_cnt++] = MIPS_CACHED_TO_PHYS(kern_end);
+
+ if (need_wired_tlb_page_pool) {
+ mips_wired_tlb_physmem_start = MIPS_CACHED_TO_PHYS(kern_end);
+ mips_wired_tlb_physmem_end = 0x08000000;
+ MIPS_DEBUG_PRINT("%s: unmapped page start [0x%x] end[0x%x]\n",\
+ __FUNCTION__, mips_wired_tlb_physmem_start, \
+ mips_wired_tlb_physmem_end);
+ if (mips_wired_tlb_physmem_start > mips_wired_tlb_physmem_end)
+ panic("Error in Page table page physical address assignment\n");
+ }
+
+ if (bootinfo.bi_memsizes_valid)
+ memsize = bootinfo.bi_basemem * 1024;
+ else {
+ memsize = SDRAM_MEM_SIZE;
+ }
+
+ /*
+ * hw.physmem is a size in bytes; we also allow k, m, and g suffixes
+ * for the appropriate modifiers.
+ */
+ if ((cp = getenv("hw.physmem")) != NULL) {
+ vm_offset_t sanity;
+ char *ep;
+
+ sanity = AllowMem = strtouq(cp, &ep, 0);
+ if ((ep != cp) && (*ep != 0)) {
+ switch(*ep) {
+ case 'g':
+ case 'G':
+ AllowMem <<= 10;
+ case 'm':
+ case 'M':
+ AllowMem <<= 10;
+ case 'k':
+ case 'K':
+ AllowMem <<= 10;
+ break;
+ default:
+ AllowMem = sanity = 0;
+ }
+ if (AllowMem < sanity)
+ AllowMem = 0;
+ }
+ if (!AllowMem || (AllowMem < (kern_end - KERNBASE)))
+ printf("Ignoring invalid hw.physmem size of '%s'\n", cp);
+ } else
+ AllowMem = 0;
+
+ if (AllowMem)
+ memsize = (memsize > AllowMem) ? AllowMem : memsize;
+
+ phys_avail[phys_avail_cnt++] = SDRAM_ADDR_START + memsize;
+ MIPS_DEBUG_PRINT("phys_avail : 0x%x - 0x%x", \
+ phys_avail[phys_avail_cnt-2], phys_avail[phys_avail_cnt-1]);
+ phys_avail[phys_avail_cnt] = 0;
+
+ physmem = btoc(memsize);
+ Maxmem = physmem;
+
+ /*
+ * Initialize error message buffer (at high end of memory).
+ */
+ sz = round_page(MSGBUF_SIZE);
+ msgbufp = (struct msgbuf *) pmap_steal_memory(sz);
+ msgbufinit(msgbufp, sz);
+ printf("%s: msgbufp[size=%d] = 0x%p\n", __FUNCTION__, sz, msgbufp);
+}
+#endif
+
+/*
+ * Initialize the hardware exception vectors, and the jump table used to
+ * call locore cache and TLB management functions, based on the kind
+ * of CPU the kernel is running on.
+ */
+void
+mips_vector_init(void)
+{
+ /*
+ * Copy down exception vector code.
+ */
+ if (MipsTLBMissEnd - MipsTLBMiss > 0x80)
+ panic("startup: UTLB code too large");
+
+ if (MipsCacheEnd - MipsCache > 0x80)
+ panic("startup: Cache error code too large");
+
+ bcopy(MipsTLBMiss, (void *)TLB_MISS_EXC_VEC,
+ MipsTLBMissEnd - MipsTLBMiss);
+
+#ifdef TARGET_OCTEON
+/* Fake, but sufficient, for the 32-bit with 64-bit hardware addresses */
+ bcopy(MipsTLBMiss, (void *)XTLB_MISS_EXC_VEC,
+ MipsTLBMissEnd - MipsTLBMiss);
+#endif
+
+ bcopy(MipsException, (void *)GEN_EXC_VEC,
+ MipsExceptionEnd - MipsException);
+
+ bcopy(MipsCache, (void *)CACHE_ERR_EXC_VEC,
+ MipsCacheEnd - MipsCache);
+
+ /*
+ * Clear out the I and D caches.
+ */
+ mips_icache_sync_all();
+ mips_dcache_wbinv_all();
+
+ /*
+ * Mask all interrupts. Each interrupt will be enabled
+ * when handler is installed for it
+ */
+ set_intr_mask (ALL_INT_MASK);
+ /* Clear BEV in SR so we start handling our own exceptions */
+ mips_cp0_status_write(mips_cp0_status_read() & ~SR_BOOT_EXC_VEC);
+
+}
+
+/*
+ * Initialise a struct pcpu.
+ */
+void
+cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
+{
+#ifdef SMP
+ if (cpuid != 0)
+ pcpu->pc_boot_stack = (void *)(pcpu_boot_stack + cpuid *
+ (KSTACK_PAGES * PAGE_SIZE));
+#endif
+ pcpu->pc_next_asid = 1;
+ pcpu->pc_asid_generation = 1;
+}
+
+int
+sysarch(struct thread *td, register struct sysarch_args *uap)
+{
+ return (ENOSYS);
+}
+
+int
+fill_dbregs(struct thread *td, struct dbreg *dbregs)
+{
+
+ /* No debug registers on mips */
+ return (ENOSYS);
+}
+
+int
+set_dbregs(struct thread *td, struct dbreg *dbregs)
+{
+
+ /* No debug registers on mips */
+ return (ENOSYS);
+}
+
+int spinco;
+void
+spinlock_enter(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ if (td->td_md.md_spinlock_count == 0)
+ td->td_md.md_saved_intr = disableintr();
+ td->td_md.md_spinlock_count++;
+ critical_enter();
+}
+
+void
+spinlock_exit(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ critical_exit();
+ td->td_md.md_spinlock_count--;
+ if (td->td_md.md_spinlock_count == 0)
+ restoreintr(td->td_md.md_saved_intr);
+}
+
+u_int32_t
+get_cyclecount(void)
+{
+ u_int32_t count;
+
+ mfc0_macro(count, 9);
+ return (count);
+}
+
+/*
+ * call platform specific code to halt (until next interrupt) for the idle loop
+ */
+void
+cpu_idle(void)
+{
+ if (mips_cp0_status_read() & SR_INT_ENAB)
+ __asm __volatile ("wait");
+ else
+ panic("ints disabled in idleproc!");
+}
+
+void
+dumpsys(struct dumperinfo *di __unused)
+{
+
+ printf("Kernel dumps not implemented on this architecture\n");
+}
diff --git a/sys/mips/mips/mainbus.c b/sys/mips/mips/mainbus.c
new file mode 100644
index 0000000..54163f2
--- /dev/null
+++ b/sys/mips/mips/mainbus.c
@@ -0,0 +1,343 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: src/sys/i386/i386/nexus.c,v 1.26.2.5 2000/11/16 09:30:57 nyan
+ * JNPR: mainbus.c,v 1.2.4.1 2007/08/16 13:02:11 girish
+ */
+
+/*
+ * This code implements a `root mainbus' for Intel Architecture
+ * machines. The function of the root mainbus is to serve as an
+ * attachment point for both processors and buses, and to manage
+ * resources which are common to all of them. In particular,
+ * this code implements the core resource managers for interrupt
+ * requests, DMA requests (which rightfully should be a part of the
+ * ISA code but it's easier to do it here for now), I/O port addresses,
+ * and I/O memory address space.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+
+#include <machine/vmparam.h>
+#include <vm/vm.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <vm/pmap.h>
+#include <machine/pmap.h>
+#include <machine/resource.h>
+
+#ifdef DCHU_DEBUG_UART
+#include <machine/pltfm.h>
+#include <machine/ns16550.h>
+#endif
+
+static struct rman irq_rman, port_rman, mem_rman;
+
+static int mainbus_probe(device_t);
+static int mainbus_attach(device_t);
+static int mainbus_print_child(device_t, device_t);
+static device_t mainbus_add_child(device_t bus, int order, const char *name,
+ int unit);
+static struct resource *mainbus_alloc_resource(device_t, device_t, int, int *,
+ u_long, u_long, u_long, u_int);
+static int mainbus_activate_resource(device_t, device_t, int, int,
+ struct resource *);
+static int mainbus_deactivate_resource(device_t, device_t, int, int,
+ struct resource *);
+static int mainbus_release_resource(device_t, device_t, int, int,
+ struct resource *);
+static int mainbus_setup_intr(device_t, device_t, struct resource *,
+ int flags, int (*)(void *), void *, void **);
+static int mainbus_teardown_intr(device_t, device_t, struct resource *,
+ void *);
+
+static device_method_t mainbus_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, mainbus_probe),
+ DEVMETHOD(device_attach, mainbus_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_print_child, mainbus_print_child),
+ DEVMETHOD(bus_add_child, mainbus_add_child),
+ DEVMETHOD(bus_read_ivar, bus_generic_read_ivar),
+ DEVMETHOD(bus_write_ivar, bus_generic_write_ivar),
+ DEVMETHOD(bus_alloc_resource, mainbus_alloc_resource),
+ DEVMETHOD(bus_release_resource, mainbus_release_resource),
+ DEVMETHOD(bus_activate_resource, mainbus_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, mainbus_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, mainbus_setup_intr),
+ DEVMETHOD(bus_teardown_intr, mainbus_teardown_intr),
+
+ { 0, 0 }
+};
+
+static driver_t mainbus_driver = {
+ "mainbus",
+ mainbus_methods,
+ 1, /* no softc */
+};
+static devclass_t mainbus_devclass;
+
+#ifdef DEBUG_UART
+#define printf(s) puts_post(PA_2_K1VA(ADDR_NS16550_UART1), s)
+#endif
+
+DRIVER_MODULE(mainbus, root, mainbus_driver, mainbus_devclass, 0, 0);
+
+static int
+mainbus_probe(device_t dev)
+{
+
+#ifdef DEBUG_BRINGUP
+ device_verbose(dev); /* print attach message */
+#else
+ device_quiet(dev); /* suppress attach message for neatness */
+#endif
+
+ irq_rman.rm_start = 0;
+ irq_rman.rm_type = RMAN_ARRAY;
+ irq_rman.rm_descr = "Interrupt request lines";
+ irq_rman.rm_end = 15;
+ if (rman_init(&irq_rman) ||
+ rman_manage_region(&irq_rman, irq_rman.rm_start, irq_rman.rm_end))
+ panic("mainbus_probe irq_rman");
+
+ /*
+ * IO ports and Memory truely are global at this level,
+ * as are APIC interrupts (however many IO APICS there turn out
+ * to be on large systems..)
+ */
+ port_rman.rm_start = 0;
+ port_rman.rm_end = 0xffff;
+ port_rman.rm_type = RMAN_ARRAY;
+ port_rman.rm_descr = "I/O ports";
+ if (rman_init(&port_rman) || rman_manage_region(&port_rman, 0, 0xffff))
+ panic("mainbus_probe port_rman");
+
+ mem_rman.rm_start = 0;
+ mem_rman.rm_end = ~0u;
+ mem_rman.rm_type = RMAN_ARRAY;
+ mem_rman.rm_descr = "I/O memory addresses";
+ if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, ~0))
+ panic("mainbus_probe mem_rman");
+
+ return bus_generic_probe(dev);
+}
+
+static int
+mainbus_attach(device_t dev)
+{
+ /*
+ * First, deal with the children we know about already
+ */
+ bus_generic_attach(dev);
+
+ return 0;
+}
+
+static int
+mainbus_print_child(device_t bus, device_t child)
+{
+ int retval = 0;
+
+ retval += bus_print_child_header(bus, child);
+#ifndef DEBUG_UART
+ retval += printf(" on motherboard\n");
+#endif
+
+ return (retval);
+}
+
+static device_t
+mainbus_add_child(device_t bus, int order, const char *name, int unit)
+{
+ return device_add_child_ordered(bus, order, name, unit);
+}
+
+/*
+ * Allocate a resource on behalf of child. NB: child is usually going to be a
+ * child of one of our descendants, not a direct child of mainbus0.
+ * (Exceptions include npx.)
+ */
+static struct resource *
+mainbus_alloc_resource(device_t bus, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+ struct resource *rv;
+ struct rman *rm;
+ int needactivate = flags & RF_ACTIVE;
+
+ flags &= ~RF_ACTIVE;
+
+ switch (type) {
+ case SYS_RES_IRQ:
+ rm = &irq_rman;
+ break;
+
+ case SYS_RES_DRQ:
+ return 0;
+
+ case SYS_RES_IOPORT:
+ rm = &port_rman;
+ break;
+
+ case SYS_RES_MEMORY:
+ rm = &mem_rman;
+ break;
+
+ default:
+ return 0;
+ }
+
+ rv = rman_reserve_resource(rm, start, end, count, flags, child);
+
+ if (rv == 0) {
+ printf("mainbus_alloc_resource: no resource is available\n");
+ return 0;
+ }
+
+ if (type == SYS_RES_MEMORY) {
+ rman_set_bustag(rv, MIPS_BUS_SPACE_MEM);
+
+ } else if (type == SYS_RES_IOPORT) {
+ rman_set_bustag(rv, MIPS_BUS_SPACE_IO);
+ /* IBM-PC: the type of bus_space_handle_t is u_int */
+ rman_set_bushandle(rv, rman_get_start(rv));
+ }
+
+ if (needactivate) {
+ if (bus_activate_resource(child, type, *rid, rv)) {
+ rman_release_resource(rv);
+ return 0;
+ }
+ }
+
+ return rv;
+}
+
+static int
+mainbus_activate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ /*
+ * If this is a memory resource, map it into the kernel.
+ */
+#ifdef TARGET_OCTEON
+ uint64_t temp;
+#endif
+ if (rman_get_bustag(r) == MIPS_BUS_SPACE_MEM) {
+ caddr_t vaddr = 0;
+ {
+ u_int32_t paddr, psize, poffs;
+
+ paddr = rman_get_start(r);
+ psize = rman_get_size(r);
+
+ poffs = paddr - trunc_page(paddr);
+ vaddr = (caddr_t) pmap_mapdev(paddr-poffs, psize+poffs)
+ + poffs;
+ }
+ rman_set_virtual(r, vaddr);
+ /* IBM-PC: the type of bus_space_handle_t is u_int */
+#ifdef TARGET_OCTEON
+ temp = 0x0000000000000000;
+ temp |= (uint32_t)vaddr;
+ rman_set_bushandle(r, (bus_space_handle_t) temp);
+#else
+ rman_set_bushandle(r, (bus_space_handle_t) vaddr);
+#endif
+ }
+ return (rman_activate_resource(r));
+}
+
+static int
+mainbus_deactivate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ /*
+ * If this is a memory resource, unmap it.
+ */
+ if ((rman_get_bustag(r) == MIPS_BUS_SPACE_MEM) && (rman_get_end(r) >=
+ 1024 * 1024)) {
+ u_int32_t psize;
+
+ psize = rman_get_size(r);
+ pmap_unmapdev((vm_offset_t)rman_get_virtual(r), psize);
+ }
+
+ return (rman_deactivate_resource(r));
+}
+
+static int
+mainbus_release_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ if (rman_get_flags(r) & RF_ACTIVE) {
+ int error = bus_deactivate_resource(child, type, rid, r);
+ if (error)
+ return error;
+ }
+ return (rman_release_resource(r));
+}
+
+/*
+ * Currently this uses the really grody interface from kern/kern_intr.c
+ * (which really doesn't belong in kern/anything.c). Eventually, all of
+ * the code in kern_intr.c and machdep_intr.c should get moved here, since
+ * this is going to be the official interface.
+ *
+ * Set up handler for external interrupt events.
+ * Use CR_INT_<n> to select the proper interrupt
+ * condition to dispatch on.
+ */
+static int
+mainbus_setup_intr(device_t bus, device_t child, struct resource *irq,
+ int flags, int (*ihand)(void *), void *arg, void **cookiep)
+{
+ panic("can never mainbus_setup_intr");
+}
+
+static int
+mainbus_teardown_intr(device_t dev, device_t child, struct resource *r,
+ void *ih)
+{
+ panic("can never mainbus_teardown_intr");
+}
diff --git a/sys/mips/mips/mem.c b/sys/mips/mips/mem.c
new file mode 100644
index 0000000..04cc937
--- /dev/null
+++ b/sys/mips/mips/mem.c
@@ -0,0 +1,185 @@
+/* $OpenBSD: mem.c,v 1.2 1998/08/31 17:42:34 millert Exp $ */
+/* $NetBSD: mem.c,v 1.6 1995/04/10 11:55:03 mycroft Exp $ */
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mem.c 8.3 (Berkeley) 1/12/94
+ * JNPR: mem.c,v 1.3 2007/08/09 11:23:32 katta Exp $
+ */
+
+/*
+ * Memory special file
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/conf.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <sys/user.h>
+#include <sys/msgbuf.h>
+#include <sys/systm.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/uio.h>
+#include <sys/sched.h>
+#include <sys/malloc.h>
+#include <machine/pte.h>
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/atomic.h>
+#include <machine/pltfm.h>
+#include <machine/memdev.h>
+
+
+extern struct sysmaps sysmaps_pcpu[];
+/*ARGSUSED*/
+int
+memrw(dev, uio, flags)
+ struct cdev *dev;
+ struct uio *uio;
+ int flags;
+{
+ register vm_offset_t v;
+ register int c;
+ register struct iovec *iov;
+ int error = 0;
+
+ while (uio->uio_resid > 0 && error == 0) {
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ if (uio->uio_iovcnt < 0)
+ panic("mmrw");
+ continue;
+ }
+
+ /* minor device 0 is physical memory */
+ if (minor(dev) == CDEV_MINOR_MEM) {
+ v = uio->uio_offset;
+ c = iov->iov_len;
+
+ vm_offset_t va;
+ vm_paddr_t pa;
+ register int o;
+
+ if (v + c > (SDRAM_ADDR_START + ctob(physmem)))
+ return (EFAULT);
+
+ if (is_cacheable_mem(v) && is_cacheable_mem(v + c)) {
+ struct fpage *fp;
+ struct sysmaps *sysmaps;
+
+ sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
+ mtx_lock(&sysmaps->lock);
+ sched_pin();
+
+ fp = &sysmaps->fp[PMAP_FPAGE1];
+ pa = uio->uio_offset & ~PAGE_MASK;
+ va = pmap_map_fpage(pa, fp, FALSE);
+ o = (int)uio->uio_offset & PAGE_MASK;
+ c = (u_int)(PAGE_SIZE -
+ ((int)iov->iov_base & PAGE_MASK));
+ c = min(c, (u_int)(PAGE_SIZE - o));
+ c = min(c, (u_int)iov->iov_len);
+ error = uiomove((caddr_t)(va + o), (int)c, uio);
+ pmap_unmap_fpage(pa, fp);
+ sched_unpin();
+ mtx_unlock(&sysmaps->lock);
+ } else
+ return (EFAULT);
+ continue;
+ }
+
+ /* minor device 1 is kernel memory */
+ else if (minor(dev) == CDEV_MINOR_KMEM) {
+ v = uio->uio_offset;
+ c = min(iov->iov_len, MAXPHYS);
+ vm_offset_t addr, eaddr;
+ vm_offset_t wired_tlb_virtmem_end;
+
+ /*
+ * Make sure that all of the pages are currently
+ * resident so that we don't create any zero-fill pages.
+ */
+ addr = trunc_page(uio->uio_offset);
+ eaddr = round_page(uio->uio_offset + c);
+
+ if (addr < (vm_offset_t) VM_MIN_KERNEL_ADDRESS)
+ return EFAULT;
+
+ wired_tlb_virtmem_end = VM_MIN_KERNEL_ADDRESS +
+ VM_KERNEL_ALLOC_OFFSET;
+ if ((addr < wired_tlb_virtmem_end) &&
+ (eaddr >= wired_tlb_virtmem_end))
+ addr = wired_tlb_virtmem_end;
+
+ if (addr >= wired_tlb_virtmem_end) {
+ for (; addr < eaddr; addr += PAGE_SIZE)
+ if (pmap_extract(kernel_pmap,addr) == 0)
+ return EFAULT;
+
+ if (!kernacc((caddr_t)(int)uio->uio_offset, c,
+ uio->uio_rw == UIO_READ ?
+ VM_PROT_READ : VM_PROT_WRITE))
+ return (EFAULT);
+ }
+
+ error = uiomove((caddr_t)v, c, uio);
+ continue;
+ }
+
+ }
+ return (error);
+}
+
+/*ARGSUSED*/
+int
+memmmap(struct cdev *dev, vm_offset_t off, vm_paddr_t *paddr, int prot)
+{
+
+ return (EOPNOTSUPP);
+}
+
+void
+dev_mem_md_init(void)
+{
+}
diff --git a/sys/mips/mips/mips_subr.c b/sys/mips/mips/mips_subr.c
new file mode 100644
index 0000000..f5bed4e
--- /dev/null
+++ b/sys/mips/mips/mips_subr.c
@@ -0,0 +1,48 @@
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <machine/cp0.h>
+
+static void
+mips_setwatchlo(u_int32_t watchlo)
+{
+
+ __asm __volatile ("mtc0 %0, $18, 0" : : "r" (watchlo));
+}
+
+static void
+mips_setwatchhi(u_int32_t watchhi)
+{
+
+ __asm __volatile ("mtc0 %0, $19, 0" : : "r" (watchhi));
+}
+
+
+/*
+ * mips_watchpoint -- set/clear a watchpoint
+ */
+void mips_watchpoint(void *addr, int access);//XXX kludge
+
+void
+mips_watchpoint(void *addr, int access)
+{
+ u_int32_t watchlo = 0;
+ u_int32_t watchhi = 0;
+
+ if (addr != NULL) {
+ /*
+ * Set a new watchpoint.
+ * Parameter addr points to the address we'd like to monitor.
+ */
+ watchhi = WATCHHI_GLOBAL_BIT;
+ watchlo = (u_int32_t)addr & WATCHLO_PADDR0_MASK;
+
+ access &= WATCHLO_STORE | WATCHLO_LOAD | WATCHLO_FETCH;
+
+ watchlo |= access;
+ }
+ mips_setwatchlo(watchlo);
+ mips_setwatchhi(watchhi);
+}
diff --git a/sys/mips/mips/mp_machdep.c b/sys/mips/mips/mp_machdep.c
new file mode 100644
index 0000000..897ab07
--- /dev/null
+++ b/sys/mips/mips/mp_machdep.c
@@ -0,0 +1,313 @@
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_kstack_pages.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/ktr.h>
+#include <sys/proc.h>
+#include <sys/cons.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/bus.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+
+#include <machine/atomic.h>
+#include <machine/clock.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/pmap.h>
+#include <machine/smp.h>
+
+static struct mtx ap_boot_mtx;
+extern struct pcpu __pcpu[];
+extern int num_tlbentries;
+void mips_start_timer(void);
+static volatile int aps_ready = 0;
+
+u_int32_t boot_cpu_id;
+
+
+void
+cpu_mp_announce(void)
+{
+}
+
+/*
+ * To implement IPIs on MIPS CPU, we use the Interrupt Line 2 ( bit 4 of cause
+ * register) and a bitmap to avoid redundant IPI interrupts. To interrupt a
+ * set of CPUs, the sender routine runs in a ' loop ' sending interrupts to
+ * all the specified CPUs. A single Mutex (smp_ipi_mtx) is used for all IPIs
+ * that spinwait for delivery. This includes the following IPIs
+ * IPI_RENDEZVOUS
+ * IPI_INVLPG
+ * IPI_INVLTLB
+ * IPI_INVLRNG
+ */
+
+/*
+ * send an IPI to a set of cpus.
+ */
+void
+ipi_selected(u_int32_t cpus, u_int ipi)
+{
+ struct pcpu *pcpu;
+ u_int cpuid, new_pending, old_pending;
+
+ CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);
+
+ while ((cpuid = ffs(cpus)) != 0) {
+ cpuid--;
+ cpus &= ~(1 << cpuid);
+ pcpu = pcpu_find(cpuid);
+
+ if (pcpu) {
+ do {
+ old_pending = pcpu->pc_pending_ipis;
+ new_pending = old_pending | ipi;
+ } while (!atomic_cmpset_int(&pcpu->pc_pending_ipis,
+ old_pending, new_pending));
+
+ if (old_pending)
+ continue;
+
+ mips_ipi_send (cpuid);
+ }
+ }
+}
+
+/*
+ * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
+ */
+void
+ipi_all(u_int ipi)
+{
+
+ ipi_selected(all_cpus, ipi);
+}
+
+/*
+ * send an IPI to all CPUs EXCEPT myself
+ */
+void
+ipi_all_but_self(u_int ipi)
+{
+
+ ipi_selected(PCPU_GET(other_cpus), ipi);
+}
+
+/*
+ * send an IPI to myself
+ */
+void
+ipi_self(u_int ipi)
+{
+
+ ipi_selected(PCPU_GET(cpumask), ipi);
+}
+
+/*
+ * Handle an IPI sent to this processor.
+ */
+intrmask_t
+smp_handle_ipi(struct trapframe *frame)
+{
+ cpumask_t cpumask; /* This cpu mask */
+ u_int ipi, ipi_bitmap;
+
+ ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
+ cpumask = PCPU_GET(cpumask);
+
+ CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
+ while (ipi_bitmap) {
+ /*
+ * Find the lowest set bit.
+ */
+ ipi = ipi_bitmap & ~(ipi_bitmap - 1);
+ ipi_bitmap &= ~ipi;
+ switch (ipi) {
+ case IPI_INVLTLB:
+ CTR0(KTR_SMP, "IPI_INVLTLB");
+ break;
+
+ case IPI_RENDEZVOUS:
+ CTR0(KTR_SMP, "IPI_RENDEZVOUS");
+ smp_rendezvous_action();
+ break;
+
+ case IPI_AST:
+ CTR0(KTR_SMP, "IPI_AST");
+ break;
+
+ case IPI_STOP:
+ CTR0(KTR_SMP, "IPI_STOP");
+ atomic_set_int(&stopped_cpus, cpumask);
+
+ while ((started_cpus & cpumask) == 0)
+ ;
+ atomic_clear_int(&started_cpus, cpumask);
+ atomic_clear_int(&stopped_cpus, cpumask);
+ break;
+ }
+ }
+ return CR_INT_IPI;
+ }
+
+void
+cpu_mp_setmaxid(void)
+{
+
+ mp_maxid = MAXCPU - 1;
+}
+
+void
+smp_init_secondary(u_int32_t cpuid)
+{
+
+ if (cpuid >= MAXCPU)
+ panic ("cpu id exceeds MAXCPU\n");
+
+ /* tlb init */
+ R4K_SetWIRED(0);
+ R4K_TLBFlush(num_tlbentries);
+ R4K_SetWIRED(VMWIRED_ENTRIES);
+ MachSetPID(0);
+
+ Mips_SyncCache();
+
+ mips_cp0_status_write(0);
+ while (!aps_ready)
+ ;
+
+ mips_sync(); mips_sync();
+ /* Initialize curthread. */
+ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
+ PCPU_SET(curthread, PCPU_GET(idlethread));
+
+ mtx_lock_spin(&ap_boot_mtx);
+
+ smp_cpus++;
+
+ CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
+
+ /* Build our map of 'other' CPUs. */
+ PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
+
+ printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
+
+ if (smp_cpus == mp_ncpus) {
+ smp_started = 1;
+ smp_active = 1;
+ }
+
+ mtx_unlock_spin(&ap_boot_mtx);
+
+ while (smp_started == 0)
+ ; /* nothing */
+ /* Enable Interrupt */
+ mips_cp0_status_write(SR_INT_ENAB);
+ /* ok, now grab sched_lock and enter the scheduler */
+ mtx_lock_spin(&sched_lock);
+
+ /*
+ * Correct spinlock nesting. The idle thread context that we are
+ * borrowing was created so that it would start out with a single
+ * spin lock (sched_lock) held in fork_trampoline(). Since we've
+ * explicitly acquired locks in this function, the nesting count
+ * is now 2 rather than 1. Since we are nested, calling
+ * spinlock_exit() will simply adjust the counts without allowing
+ * spin lock using code to interrupt us.
+ */
+ spinlock_exit();
+ KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
+
+ binuptime(PCPU_PTR(switchtime));
+ PCPU_SET(switchticks, ticks);
+
+ /* kick off the clock on this cpu */
+ mips_start_timer();
+ cpu_throw(NULL, choosethread()); /* doesn't return */
+
+ panic("scheduler returned us to %s", __func__);
+}
+
+static int
+smp_start_secondary(int cpuid)
+{
+ struct pcpu *pcpu;
+ int i;
+
+ if (bootverbose)
+ printf("smp_start_secondary: starting cpu %d\n", cpuid);
+
+ pcpu_init(&__pcpu[cpuid], cpuid, sizeof(struct pcpu));
+
+ if (bootverbose)
+ printf("smp_start_secondary: cpu %d started\n", cpuid);
+
+ return 1;
+}
+
+int
+cpu_mp_probe(void)
+{
+ int i, cpus;
+
+ /* XXX: Need to check for valid platforms here. */
+
+ boot_cpu_id = PCPU_GET(cpuid);
+ KASSERT(boot_cpu_id == 0, ("cpu_mp_probe() called on non-primary CPU"));
+ all_cpus = PCPU_GET(cpumask);
+ mp_ncpus = 1;
+
+ /* Make sure we have at least one secondary CPU. */
+ cpus = 0;
+ for (i = 0; i < MAXCPU; i++) {
+ cpus++;
+ }
+ return (cpus);
+}
+
+void
+cpu_mp_start(void)
+{
+ int i, cpuid;
+
+ mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
+
+ cpuid = 1;
+ for (i = 0; i < MAXCPU; i++) {
+
+ if (i == boot_cpu_id)
+ continue;
+ if (smp_start_secondary(i)) {
+ all_cpus |= (1 << cpuid);
+ mp_ncpus++;
+ cpuid++;
+ }
+ }
+ idle_mask |= CR_INT_IPI;
+ PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
+}
+
+static void
+release_aps(void *dummy __unused)
+{
+ if (bootverbose && mp_ncpus > 1)
+ printf("%s: releasing secondary CPUs\n", __func__);
+ atomic_store_rel_int(&aps_ready, 1);
+
+ while (mp_ncpus > 1 && smp_started == 0)
+ ; /* nothing */
+}
+
+SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
diff --git a/sys/mips/mips/nexus.c b/sys/mips/mips/nexus.c
new file mode 100644
index 0000000..8ee4bcf
--- /dev/null
+++ b/sys/mips/mips/nexus.c
@@ -0,0 +1,474 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This code implements a `root nexus' for MIPS Architecture
+ * machines. The function of the root nexus is to serve as an
+ * attachment point for both processors and buses, and to manage
+ * resources which are common to all of them. In particular,
+ * this code implements the core resource managers for interrupt
+ * requests and memory address space.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/interrupt.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/intr_machdep.h>
+#include <machine/pmap.h>
+#include <machine/resource.h>
+#include <machine/vmparam.h>
+
+static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device");
+
+struct nexus_device {
+ struct resource_list nx_resources;
+};
+
+#define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev))
+#define NUM_MIPS_IRQS 6
+#define MIPS_MEM_RID 0x20
+
+static struct rman irq_rman;
+static struct rman mem_rman;
+
+#ifdef notyet
+/*
+ * XXX: TODO: Implement bus space barrier functions.
+ * Currently tag and handle are set when memory resources
+ * are activated.
+ */
+struct bus_space_tag nexus_bustag = {
+ NULL, /* cookie */
+ NULL, /* parent bus tag */
+ NEXUS_BUS_SPACE, /* type */
+ nexus_bus_barrier, /* bus_space_barrier */
+};
+#endif
+
+static struct resource *
+ nexus_alloc_resource(device_t, device_t, int, int *, u_long,
+ u_long, u_long, u_int);
+static int nexus_activate_resource(device_t, device_t, int, int,
+ struct resource *);
+static device_t nexus_add_child(device_t, int, const char *, int);
+static int nexus_attach(device_t);
+static int nexus_deactivate_resource(device_t, device_t, int, int,
+ struct resource *);
+static void nexus_delete_resource(device_t, device_t, int, int);
+static struct resource_list *
+ nexus_get_reslist(device_t, device_t);
+static int nexus_get_resource(device_t, device_t, int, int, u_long *,
+ u_long *);
+static void nexus_hinted_child(device_t, const char *, int);
+static int nexus_print_child(device_t, device_t);
+static int nexus_print_all_resources(device_t dev);
+static int nexus_probe(device_t);
+static int nexus_release_resource(device_t, device_t, int, int,
+ struct resource *);
+static int nexus_set_resource(device_t, device_t, int, int, u_long,
+ u_long);
+static int nexus_setup_intr(device_t dev, device_t child,
+ struct resource *res, int flags, driver_filter_t *filt,
+ driver_intr_t *intr, void *arg, void **cookiep);
+static int nexus_teardown_intr(device_t, device_t, struct resource *,
+ void *);
+
+static device_method_t nexus_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nexus_probe),
+ DEVMETHOD(device_attach, nexus_attach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_add_child, nexus_add_child),
+ DEVMETHOD(bus_activate_resource,nexus_activate_resource),
+ DEVMETHOD(bus_alloc_resource, nexus_alloc_resource),
+ DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource),
+ DEVMETHOD(bus_delete_resource, nexus_delete_resource),
+ DEVMETHOD(bus_get_resource, nexus_get_resource),
+ DEVMETHOD(bus_get_resource_list, nexus_get_reslist),
+ DEVMETHOD(bus_hinted_child, nexus_hinted_child),
+ DEVMETHOD(bus_print_child, nexus_print_child),
+ DEVMETHOD(bus_release_resource, nexus_release_resource),
+ DEVMETHOD(bus_set_resource, nexus_set_resource),
+ DEVMETHOD(bus_setup_intr, nexus_setup_intr),
+ DEVMETHOD(bus_teardown_intr, nexus_teardown_intr),
+
+ { 0, 0 }
+};
+
+static driver_t nexus_driver = {
+ "nexus",
+ nexus_methods,
+ 1 /* no softc */
+};
+static devclass_t nexus_devclass;
+
+static int
+nexus_probe(device_t dev)
+{
+
+ device_set_desc(dev, "MIPS32 root nexus");
+
+ irq_rman.rm_start = 0;
+ irq_rman.rm_end = NUM_MIPS_IRQS - 1;
+ irq_rman.rm_type = RMAN_ARRAY;
+ irq_rman.rm_descr = "Hardware IRQs";
+ if (rman_init(&irq_rman) != 0 ||
+ rman_manage_region(&irq_rman, 0, NUM_MIPS_IRQS - 1) != 0) {
+ panic("%s: irq_rman", __func__);
+ }
+
+ mem_rman.rm_start = 0;
+ mem_rman.rm_end = ~0u;
+ mem_rman.rm_type = RMAN_ARRAY;
+ mem_rman.rm_descr = "Memory addresses";
+ if (rman_init(&mem_rman) != 0 ||
+ rman_manage_region(&mem_rman, 0, ~0) != 0) {
+ panic("%s: mem_rman", __func__);
+ }
+
+ return (0);
+}
+
+static int
+nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags,
+ driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep)
+{
+ int irq;
+
+ register_t sr = intr_disable();
+ irq = rman_get_start(res);
+ if (irq >= NUM_MIPS_IRQS)
+ return (0);
+
+ cpu_establish_hardintr(device_get_nameunit(child), filt, intr, arg,
+ irq, flags, cookiep);
+ intr_restore(sr);
+ return (0);
+}
+
+static int
+nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih)
+{
+
+ printf("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__);
+ return (0);
+}
+
+static int
+nexus_attach(device_t dev)
+{
+
+ bus_generic_probe(dev);
+ bus_enumerate_hinted_children(dev);
+ bus_generic_attach(dev);
+
+ return (0);
+}
+
+static int
+nexus_print_child(device_t bus, device_t child)
+{
+ int retval = 0;
+
+ retval += bus_print_child_header(bus, child);
+ retval += nexus_print_all_resources(child);
+ if (device_get_flags(child))
+ retval += printf(" flags %#x", device_get_flags(child));
+ retval += printf(" on %s\n", device_get_nameunit(bus));
+
+ return (retval);
+}
+
+static int
+nexus_print_all_resources(device_t dev)
+{
+ struct nexus_device *ndev = DEVTONX(dev);
+ struct resource_list *rl = &ndev->nx_resources;
+ int retval = 0;
+
+ if (STAILQ_FIRST(rl))
+ retval += printf(" at");
+
+ retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
+ retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
+
+ return (retval);
+}
+
+static void
+nexus_hinted_child(device_t bus, const char *dname, int dunit)
+{
+ device_t child;
+ long maddr;
+ int msize;
+ int result;
+
+ child = BUS_ADD_CHILD(bus, 0, dname, dunit);
+
+ /*
+ * Set hard-wired resources for hinted child using
+ * specific RIDs.
+ */
+ resource_long_value(dname, dunit, "maddr", &maddr);
+ resource_int_value(dname, dunit, "msize", &msize);
+
+ printf("%s: discovered hinted child %s at maddr %p(%d)\n",
+ __func__, device_get_nameunit(child),
+ (void *)(intptr_t)maddr, msize);
+
+ result = bus_set_resource(child, SYS_RES_MEMORY, MIPS_MEM_RID,
+ maddr, msize);
+ if (result != 0) {
+ device_printf(bus, "warning: bus_set_resource() failed\n");
+ }
+}
+
+static device_t
+nexus_add_child(device_t bus, int order, const char *name, int unit)
+{
+ device_t child;
+ struct nexus_device *ndev;
+
+ ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO);
+ if (!ndev)
+ return (0);
+ resource_list_init(&ndev->nx_resources);
+
+ child = device_add_child_ordered(bus, order, name, unit);
+
+ /* should we free this in nexus_child_detached? */
+ device_set_ivars(child, ndev);
+
+ return (child);
+}
+
+/*
+ * Allocate a resource on behalf of child. NB: child is usually going to be a
+ * child of one of our descendants, not a direct child of nexus0.
+ * (Exceptions include footbridge.)
+ */
+static struct resource *
+nexus_alloc_resource(device_t bus, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+ struct nexus_device *ndev = DEVTONX(child);
+ struct resource *rv;
+ struct resource_list_entry *rle;
+ struct rman *rm;
+ int isdefault, needactivate, passthrough;
+
+ printf("%s: entry (%p, %p, %d, %p, %p, %p, %ld, %d)\n",
+ __func__, bus, child, type, rid, (void *)(intptr_t)start,
+ (void *)(intptr_t)end, count, flags);
+ printf("%s: requested rid is %d\n", __func__, *rid);
+
+ isdefault = (start == 0UL && end == ~0UL && count == 1);
+ needactivate = flags & RF_ACTIVE;
+ passthrough = (device_get_parent(child) != bus);
+ rle = NULL;
+
+ /*
+ * If this is an allocation of the "default" range for a given RID,
+ * and we know what the resources for this device are (ie. they aren't
+ * maintained by a child bus), then work out the start/end values.
+ */
+ if (isdefault) {
+ rle = resource_list_find(&ndev->nx_resources, type, *rid);
+ if (rle == NULL)
+ return (NULL);
+ if (rle->res != NULL) {
+ panic("%s: resource entry is busy", __func__);
+ }
+ start = rle->start;
+ end = rle->end;
+ count = rle->count;
+ }
+
+ switch (type) {
+ case SYS_RES_IRQ:
+ rm = &irq_rman;
+ break;
+ case SYS_RES_MEMORY:
+ rm = &mem_rman;
+ break;
+ default:
+ printf("%s: unknown resource type %d\n", __func__, type);
+ return (0);
+ }
+
+ rv = rman_reserve_resource(rm, start, end, count, flags, child);
+ if (rv == 0) {
+ printf("%s: could not reserve resource\n", __func__);
+ return (0);
+ }
+
+ rman_set_rid(rv, *rid);
+
+ if (needactivate) {
+ if (bus_activate_resource(child, type, *rid, rv)) {
+ printf("%s: could not activate resource\n", __func__);
+ rman_release_resource(rv);
+ return (0);
+ }
+ }
+
+ return (rv);
+}
+
+static int
+nexus_activate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+#ifdef TARGET_OCTEON
+ uint64_t temp;
+#endif
+ /*
+ * If this is a memory resource, track the direct mapping
+ * in the uncached MIPS KSEG1 segment.
+ */
+ if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) {
+ caddr_t vaddr = 0;
+ u_int32_t paddr;
+ u_int32_t psize;
+ u_int32_t poffs;
+
+ paddr = rman_get_start(r);
+ psize = rman_get_size(r);
+ poffs = paddr - trunc_page(paddr);
+ vaddr = (caddr_t) pmap_mapdev(paddr-poffs, psize+poffs) + poffs;
+
+ rman_set_virtual(r, vaddr);
+ rman_set_bustag(r, MIPS_BUS_SPACE_MEM);
+#ifdef TARGET_OCTEON
+ temp = 0x0000000000000000;
+ temp |= (uint32_t)vaddr;
+ rman_set_bushandle(r, (bus_space_handle_t)temp);
+#else
+ rman_set_bushandle(r, (bus_space_handle_t)vaddr);
+#endif
+ }
+
+ return (rman_activate_resource(r));
+}
+
+static struct resource_list *
+nexus_get_reslist(device_t dev, device_t child)
+{
+ struct nexus_device *ndev = DEVTONX(child);
+
+ return (&ndev->nx_resources);
+}
+
+static int
+nexus_set_resource(device_t dev, device_t child, int type, int rid,
+ u_long start, u_long count)
+{
+ struct nexus_device *ndev = DEVTONX(child);
+ struct resource_list *rl = &ndev->nx_resources;
+ struct resource_list_entry *rle;
+
+ printf("%s: entry (%p, %p, %d, %d, %p, %ld)\n",
+ __func__, dev, child, type, rid, (void *)(intptr_t)start, count);
+
+ rle = resource_list_add(rl, type, rid, start, start + count - 1,
+ count);
+ if (rle == NULL)
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+nexus_get_resource(device_t dev, device_t child, int type, int rid,
+ u_long *startp, u_long *countp)
+{
+ struct nexus_device *ndev = DEVTONX(child);
+ struct resource_list *rl = &ndev->nx_resources;
+ struct resource_list_entry *rle;
+
+ rle = resource_list_find(rl, type, rid);
+ if (!rle)
+ return(ENOENT);
+ if (startp)
+ *startp = rle->start;
+ if (countp)
+ *countp = rle->count;
+ return (0);
+}
+
+static void
+nexus_delete_resource(device_t dev, device_t child, int type, int rid)
+{
+ struct nexus_device *ndev = DEVTONX(child);
+ struct resource_list *rl = &ndev->nx_resources;
+
+ printf("%s: entry\n", __func__);
+
+ resource_list_delete(rl, type, rid);
+}
+
+static int
+nexus_release_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ printf("%s: entry\n", __func__);
+
+ if (rman_get_flags(r) & RF_ACTIVE) {
+ int error = bus_deactivate_resource(child, type, rid, r);
+ if (error)
+ return error;
+ }
+
+ return (rman_release_resource(r));
+}
+
+static int
+nexus_deactivate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+
+ return (rman_deactivate_resource(r));
+}
+
+DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0);
diff --git a/sys/mips/mips/pm_machdep.c b/sys/mips/mips/pm_machdep.c
new file mode 100644
index 0000000..e9e6ea2
--- /dev/null
+++ b/sys/mips/mips/pm_machdep.c
@@ -0,0 +1,541 @@
+/*-
+ * Copyright (c) 1992 Terrence R. Lambert.
+ * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
+ * from: src/sys/i386/i386/machdep.c,v 1.385.2.3 2000/05/10 02:04:46 obrien
+ * JNPR: pm_machdep.c,v 1.9.2.1 2007/08/16 15:59:10 girish
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_compat.h"
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysent.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/ucontext.h>
+#include <sys/lock.h>
+#include <sys/sysproto.h>
+#include <sys/ptrace.h>
+#include <sys/syslog.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+#include <sys/user.h>
+#include <sys/uio.h>
+#include <machine/reg.h>
+#include <machine/md_var.h>
+#include <machine/sigframe.h>
+#include <machine/vmparam.h>
+#include <sys/vnode.h>
+#include <fs/pseudofs/pseudofs.h>
+#include <fs/procfs/procfs.h>
+
+#define UCONTEXT_MAGIC 0xACEDBADE
+
+/*
+ * Send an interrupt to process.
+ *
+ * Stack is set up to allow sigcode stored
+ * at top to call routine, followed by kcall
+ * to sigreturn routine below. After sigreturn
+ * resets the signal mask, the stack, and the
+ * frame pointer, it returns to the user
+ * specified pc, psl.
+ */
+void
+sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
+{
+ struct proc *p;
+ struct thread *td;
+ struct trapframe *regs;
+ struct sigacts *psp;
+ struct sigframe sf, *sfp;
+ int sig;
+ int oonstack;
+
+ td = curthread;
+ p = td->td_proc;
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ sig = ksi->ksi_signo;
+ psp = p->p_sigacts;
+ mtx_assert(&psp->ps_mtx, MA_OWNED);
+
+ regs = td->td_frame;
+ oonstack = sigonstack(regs->sp);
+
+ /* save user context */
+ bzero(&sf, sizeof(struct sigframe));
+ sf.sf_uc.uc_sigmask = *mask;
+ sf.sf_uc.uc_stack = td->td_sigstk;
+ sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
+ sf.sf_uc.uc_mcontext.mc_pc = regs->pc;
+ sf.sf_uc.uc_mcontext.mullo = regs->mullo;
+ sf.sf_uc.uc_mcontext.mulhi = regs->mulhi;
+ sf.sf_uc.uc_mcontext.mc_regs[0] = UCONTEXT_MAGIC; /* magic number */
+ bcopy((void *)&regs->ast, (void *)&sf.sf_uc.uc_mcontext.mc_regs[1],
+ sizeof(sf.sf_uc.uc_mcontext.mc_regs) - sizeof(register_t));
+ sf.sf_uc.uc_mcontext.mc_fpused = td->td_md.md_flags & MDTD_FPUSED;
+ if (sf.sf_uc.uc_mcontext.mc_fpused) {
+ /* if FPU has current state, save it first */
+ if (td == PCPU_GET(fpcurthread))
+ MipsSaveCurFPState(td);
+ bcopy((void *)&td->td_frame->f0,
+ (void *)sf.sf_uc.uc_mcontext.mc_fpregs,
+ sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
+ }
+
+ /* Allocate and validate space for the signal handler context. */
+ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
+ SIGISMEMBER(psp->ps_sigonstack, sig)) {
+ sfp = (struct sigframe *)((vm_offset_t)(td->td_sigstk.ss_sp +
+ td->td_sigstk.ss_size - sizeof(struct sigframe))
+ & ~(sizeof(__int64_t) - 1));
+ } else
+ sfp = (struct sigframe *)((vm_offset_t)(regs->sp -
+ sizeof(struct sigframe)) & ~(sizeof(__int64_t) - 1));
+
+ /* Translate the signal is appropriate */
+ if (p->p_sysent->sv_sigtbl) {
+ if (sig <= p->p_sysent->sv_sigsize)
+ sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
+ }
+
+ /* Build the argument list for the signal handler. */
+ regs->a0 = sig;
+ regs->a2 = (register_t)&sfp->sf_uc;
+ if (SIGISMEMBER(psp->ps_siginfo, sig)) {
+ /* Signal handler installed with SA_SIGINFO. */
+ regs->a1 = (register_t)&sfp->sf_si;
+ /* sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; */
+
+ /* fill siginfo structure */
+ sf.sf_si.si_signo = sig;
+ sf.sf_si.si_code = ksi->ksi_code;
+ sf.sf_si.si_addr = (void*)regs->badvaddr;
+ } else {
+ /* Old FreeBSD-style arguments. */
+ regs->a1 = ksi->ksi_code;
+ regs->a3 = regs->badvaddr;
+ /* sf.sf_ahu.sf_handler = catcher; */
+ }
+
+ mtx_unlock(&psp->ps_mtx);
+ PROC_UNLOCK(p);
+
+ /*
+ * Copy the sigframe out to the user's stack.
+ */
+ if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) {
+ /*
+ * Something is wrong with the stack pointer.
+ * ...Kill the process.
+ */
+ PROC_LOCK(p);
+ sigexit(td, SIGILL);
+ }
+
+ regs->pc = (register_t) catcher;
+ regs->t9 = (register_t) catcher;
+ regs->sp = (register_t) sfp;
+ /*
+ * Signal trampoline code is at base of user stack.
+ */
+ regs->ra = (register_t) PS_STRINGS - *(p->p_sysent->sv_szsigcode);
+ PROC_LOCK(p);
+ mtx_lock(&psp->ps_mtx);
+}
+
+#ifdef GONE_IN_7
+/*
+ * Build siginfo_t for SA thread
+ */
+void
+cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
+{
+ struct proc *p;
+ struct thread *td;
+
+ td = curthread;
+ p = td->td_proc;
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+
+ bzero(si, sizeof(*si));
+ si->si_signo = sig;
+ si->si_code = code;
+ /* XXXKSE fill other fields */
+}
+#endif
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc as specified by
+ * context left by sendsig.
+ */
+int
+sigreturn(struct thread *td, struct sigreturn_args *uap)
+{
+ struct trapframe *regs;
+ const ucontext_t *ucp;
+ struct proc *p;
+ ucontext_t uc;
+ int error;
+
+ ucp = &uc;
+ p = td->td_proc;
+
+ error = copyin(uap->sigcntxp, &uc, sizeof(uc));
+ if (error != 0)
+ return (error);
+
+ regs = td->td_frame;
+
+/* #ifdef DEBUG */
+ if (ucp->uc_mcontext.mc_regs[ZERO] != UCONTEXT_MAGIC) {
+ printf("sigreturn: pid %d, ucp %p\n", p->p_pid, ucp);
+ printf(" old sp %x ra %x pc %x\n",
+ regs->sp, regs->ra, regs->pc);
+ printf(" new sp %x ra %x pc %x z %x\n",
+ ucp->uc_mcontext.mc_regs[SP],
+ ucp->uc_mcontext.mc_regs[RA],
+ ucp->uc_mcontext.mc_regs[PC],
+ ucp->uc_mcontext.mc_regs[ZERO]);
+ return EINVAL;
+ }
+/* #endif */
+
+ bcopy((const void *)&ucp->uc_mcontext.mc_regs[1], (void *)&regs->ast,
+ sizeof(ucp->uc_mcontext.mc_regs) - sizeof(register_t));
+
+ if (ucp->uc_mcontext.mc_fpused)
+ bcopy((const void *)ucp->uc_mcontext.mc_fpregs,
+ (void *)&td->td_frame->f0,
+ sizeof(ucp->uc_mcontext.mc_fpregs));
+
+ regs->pc = ucp->uc_mcontext.mc_pc;
+ regs->mullo = ucp->uc_mcontext.mullo;
+ regs->mulhi = ucp->uc_mcontext.mulhi;
+
+ PROC_LOCK(p);
+ td->td_sigmask = ucp->uc_sigmask;
+ SIG_CANTMASK(td->td_sigmask);
+ signotify(td);
+ PROC_UNLOCK(p);
+ return(EJUSTRETURN);
+}
+
+
+int
+ptrace_set_pc(struct thread *td, unsigned long addr)
+{
+ td->td_frame->pc = (register_t) addr;
+ return 0;
+}
+
+static int
+ptrace_read_int(struct thread *td, off_t addr, int *v)
+{
+ struct iovec iov;
+ struct uio uio;
+
+ PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
+ iov.iov_base = (caddr_t) v;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)addr;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_READ;
+ uio.uio_td = td;
+ return proc_rwmem(td->td_proc, &uio);
+}
+
+static int
+ptrace_write_int(struct thread *td, off_t addr, int v)
+{
+ struct iovec iov;
+ struct uio uio;
+
+ PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
+ iov.iov_base = (caddr_t) &v;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)addr;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_td = td;
+ return proc_rwmem(td->td_proc, &uio);
+}
+
+int
+ptrace_single_step(struct thread *td)
+{
+ unsigned va;
+ struct trapframe *locr0 = td->td_frame;
+ int i;
+ int bpinstr = BREAK_SSTEP;
+ int curinstr;
+ struct proc *p;
+
+ p = td->td_proc;
+ PROC_UNLOCK(p);
+ /*
+ * Fetch what's at the current location.
+ */
+ ptrace_read_int(td, (off_t)locr0->pc, &curinstr);
+
+ /* compute next address after current location */
+ if(curinstr != 0) {
+ va = MipsEmulateBranch(locr0, locr0->pc, locr0->fsr,
+ (u_int)&curinstr);
+ } else {
+ va = locr0->pc + 4;
+ }
+ if (td->td_md.md_ss_addr) {
+ printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
+ p->p_comm, p->p_pid, td->td_md.md_ss_addr, va); /* XXX */
+ return (EFAULT);
+ }
+ td->td_md.md_ss_addr = va;
+ /*
+ * Fetch what's at the current location.
+ */
+ ptrace_read_int(td, (off_t)va, &td->td_md.md_ss_instr);
+
+ /*
+ * Store breakpoint instruction at the "next" location now.
+ */
+ i = ptrace_write_int (td, va, bpinstr);
+
+ /*
+ * The sync'ing of I & D caches is done by procfs_domem()
+ * through procfs_rwmem().
+ */
+
+ PROC_LOCK(p);
+ if (i < 0)
+ return (EFAULT);
+#if 0
+ printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n",
+ p->p_comm, p->p_pid, p->p_md.md_ss_addr,
+ p->p_md.md_ss_instr, locr0->pc, curinstr); /* XXX */
+#endif
+ return (0);
+}
+
+
+void
+makectx(struct trapframe *tf, struct pcb *pcb)
+{
+
+ pcb->pcb_regs.ra = tf->ra;
+ pcb->pcb_regs.pc = tf->pc;
+ pcb->pcb_regs.sp = tf->sp;
+}
+
+int
+fill_regs(struct thread *td, struct reg *regs)
+{
+ memcpy(regs, td->td_frame, sizeof(struct reg));
+ return (0);
+}
+
+int
+set_regs(struct thread *td, struct reg *regs)
+{
+ struct trapframe *f;
+ register_t sr;
+
+ f = (struct trapframe *) td->td_frame;
+ /*
+ * Don't allow the user to change SR
+ */
+ sr = f->sr;
+ memcpy(td->td_frame, regs, sizeof(struct reg));
+ f->sr = sr;
+ return (0);
+}
+
+int
+get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
+{
+ struct trapframe *tp;
+
+ tp = td->td_frame;
+ PROC_LOCK(curthread->td_proc);
+ mcp->mc_onstack = sigonstack(tp->sp);
+ PROC_UNLOCK(curthread->td_proc);
+ bcopy((void *)&td->td_frame->zero, (void *)&mcp->mc_regs,
+ sizeof(mcp->mc_regs));
+
+ mcp->mc_fpused = td->td_md.md_flags & MDTD_FPUSED;
+ if (mcp->mc_fpused) {
+ bcopy((void *)&td->td_frame->f0, (void *)&mcp->mc_fpregs,
+ sizeof(mcp->mc_fpregs));
+ }
+ mcp->mc_pc = td->td_frame->pc;
+ mcp->mullo = td->td_frame->mullo;
+ mcp->mulhi = td->td_frame->mulhi;
+ return (0);
+}
+
+int
+set_mcontext(struct thread *td, const mcontext_t *mcp)
+{
+ struct trapframe *tp;
+
+ tp = td->td_frame;
+ bcopy((void *)&mcp->mc_regs, (void *)&td->td_frame->zero,
+ sizeof(mcp->mc_regs));
+
+ td->td_md.md_flags = mcp->mc_fpused & MDTD_FPUSED;
+ if (mcp->mc_fpused) {
+ bcopy((void *)&mcp->mc_fpregs, (void *)&td->td_frame->f0,
+ sizeof(mcp->mc_fpregs));
+ }
+ td->td_frame->pc = mcp->mc_pc;
+ td->td_frame->mullo = mcp->mullo;
+ td->td_frame->mulhi = mcp->mulhi;
+ /* Dont let user to set any bits in Status and casue registers */
+
+ return (0);
+}
+
+int
+fill_fpregs(struct thread *td, struct fpreg *fpregs)
+{
+ if (td == PCPU_GET(fpcurthread))
+ MipsSaveCurFPState(td);
+ memcpy(fpregs, &td->td_frame->f0, sizeof(struct fpreg));
+ return 0;
+}
+
+int
+set_fpregs(struct thread *td, struct fpreg *fpregs)
+{
+ if (PCPU_GET(fpcurthread) == td)
+ PCPU_SET(fpcurthread, (struct thread *)0);
+ memcpy(&td->td_frame->f0, fpregs, sizeof(struct fpreg));
+ return 0;
+}
+
+
+/*
+ * Clear registers on exec
+ * $sp is set to the stack pointer passed in. $pc is set to the entry
+ * point given by the exec_package passed in, as is $t9 (used for PIC
+ * code by the MIPS elf abi).
+ */
+void
+exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
+{
+
+ bzero((caddr_t)td->td_frame, sizeof(struct trapframe));
+
+ /*
+ * Make sp 64-bit aligned.
+ */
+ td->td_frame->sp = ((register_t) stack) & ~(sizeof(__int64_t) - 1);
+ td->td_frame->pc = entry & ~3;
+ td->td_frame->t9 = entry & ~3; /* abicall req */
+#if 0
+// td->td_frame->sr = SR_KSU_USER | SR_EXL | SR_INT_ENAB;
+//? td->td_frame->sr |= idle_mask & ALL_INT_MASK;
+#else
+ td->td_frame->sr = SR_KSU_USER | SR_EXL;// mips2 also did COP_0_BIT
+#endif
+#ifdef TARGET_OCTEON
+ td->td_frame->sr |= MIPS_SR_COP_2_BIT | MIPS32_SR_PX | MIPS_SR_UX |
+ MIPS_SR_KX | MIPS_SR_SX;
+#endif
+ /*
+ * FREEBSD_DEVELOPERS_FIXME:
+ * Setup any other CPU-Specific registers (Not MIPS Standard)
+ * and/or bits in other standard MIPS registers (if CPU-Specific)
+ * that are needed.
+ */
+
+ /*
+ * Set up arguments for the rtld-capable crt0:
+ * a0 stack pointer
+ * a1 rtld cleanup (filled in by dynamic loader)
+ * a2 rtld object (filled in by dynamic loader)
+ * a3 ps_strings
+ */
+ td->td_frame->a0 = (register_t) stack;
+ td->td_frame->a1 = 0;
+ td->td_frame->a2 = 0;
+ td->td_frame->a3 = (register_t)ps_strings;
+
+ td->td_md.md_flags &= ~MDTD_FPUSED;
+ if (PCPU_GET(fpcurthread) == td)
+ PCPU_SET(fpcurthread, (struct thread *)0);
+ td->td_md.md_ss_addr = 0;
+}
+
+int
+ptrace_clear_single_step(struct thread *td)
+{
+ int i;
+ struct proc *p;
+
+ p = td->td_proc;
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ if (!td->td_md.md_ss_addr)
+ return EINVAL;
+
+ /*
+ * Restore original instruction and clear BP
+ */
+ i = ptrace_write_int (td, td->td_md.md_ss_addr, td->td_md.md_ss_instr);
+
+ /* The sync'ing of I & D caches is done by procfs_domem(). */
+
+ if (i < 0) {
+ log(LOG_ERR, "SS %s %d: can't restore instruction at %x: %x\n",
+ p->p_comm, p->p_pid, td->td_md.md_ss_addr,
+ td->td_md.md_ss_instr);
+ }
+ td->td_md.md_ss_addr = 0;
+ return 0;
+}
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
new file mode 100644
index 0000000..36a4728
--- /dev/null
+++ b/sys/mips/mips/pmap.c
@@ -0,0 +1,3229 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
+ * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
+ * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
+ */
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+/* XXXimp
+ * mips2 has a pmap_initialized, but we don't use it here. Why?
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ddb.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/msgbuf.h>
+#include <sys/vmmeter.h>
+#include <sys/mman.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/uma.h>
+#include <sys/pcpu.h>
+#include <sys/sched.h>
+#ifdef SMP
+#include <sys/smp.h>
+#endif
+
+#include <machine/cache.h>
+#include <machine/pltfm.h>
+#include <machine/md_var.h>
+
+#if defined(DIAGNOSTIC)
+#define PMAP_DIAGNOSTIC
+#endif
+
+#ifndef PMAP_SHPGPERPROC
+#define PMAP_SHPGPERPROC 200
+#endif
+
+#if !defined(PMAP_DIAGNOSTIC)
+#define PMAP_INLINE __inline
+#else
+#define PMAP_INLINE
+#endif
+
+/*
+ * Get PDEs and PTEs for user/kernel address space
+ */
+#define pmap_pde(m, v) (&((m)->pm_segtab[(vm_offset_t)(v) >> SEGSHIFT]))
+#define segtab_pde(m, v) (m[(vm_offset_t)(v) >> SEGSHIFT])
+
+#define pmap_pte_w(pte) ((*(int *)pte & PTE_W) != 0)
+#define pmap_pde_v(pte) ((*(int *)pte) != 0)
+#define pmap_pte_m(pte) ((*(int *)pte & PTE_M) != 0)
+#define pmap_pte_v(pte) ((*(int *)pte & PTE_V) != 0)
+
+#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PTE_W):(*(int *)pte &= ~PTE_W))
+#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
+
+#define MIPS_SEGSIZE (1L << SEGSHIFT)
+#define mips_segtrunc(va) ((va) & ~(MIPS_SEGSIZE-1))
+#define pmap_TLB_invalidate_all() MIPS_TBIAP()
+#define pmap_va_asid(pmap, va) ((va) | ((pmap)->pm_asid[PCPU_GET(cpuid)].asid << VMTLB_PID_SHIFT))
+#define is_kernel_pmap(x) ((x) == kernel_pmap)
+
+static struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+pd_entry_t *kernel_segmap;
+
+vm_offset_t avail_start; /* PA of first available physical page */
+vm_offset_t avail_end; /* PA of last available physical page */
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+
+static int nkpt;
+unsigned pmap_max_asid; /* max ASID supported by the system */
+
+
+#define PMAP_ASID_RESERVED 0
+
+
+vm_offset_t kernel_vm_end;
+
+static void pmap_asid_alloc(pmap_t pmap);
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+static uma_zone_t pvzone;
+static struct vm_object pvzone_obj;
+static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
+int pmap_pagedaemon_waken = 0;
+
+struct fpage fpages_shared[FPAGES_SHARED];
+
+struct sysmaps sysmaps_pcpu[MAXCPU];
+
+static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
+static pv_entry_t get_pv_entry(void);
+static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
+
+static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
+static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
+static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
+static boolean_t pmap_testbit(vm_page_t m, int bit);
+static void
+pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
+ vm_page_t m, boolean_t wired);
+
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
+
+static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
+static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
+static int init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
+static void pmap_TLB_invalidate_kernel(vm_offset_t);
+static void pmap_TLB_update_kernel(vm_offset_t, pt_entry_t);
+static void pmap_init_fpage(void);
+
+#ifdef SMP
+static void pmap_invalidate_page_action(void *arg);
+static void pmap_invalidate_all_action(void *arg);
+static void pmap_update_page_action(void *arg);
+
+#endif
+
+struct local_sysmaps {
+ struct mtx lock;
+ pt_entry_t CMAP1;
+ pt_entry_t CMAP2;
+ caddr_t CADDR1;
+ caddr_t CADDR2;
+ uint16_t valid1, valid2;
+};
+
+/* This structure is for large memory
+ * above 512Meg. We can't (in 32 bit mode)
+ * just use the direct mapped MIPS_CACHED_TO_PHYS()
+ * macros since we can't see the memory and must
+ * map it in when we need to access it. In 64
+ * bit mode this goes away.
+ */
+static struct local_sysmaps sysmap_lmem[MAXCPU];
+caddr_t virtual_sys_start = (caddr_t)0;
+
+pd_entry_t
+pmap_segmap(pmap_t pmap, vm_offset_t va)
+{
+ if (pmap->pm_segtab)
+ return (pmap->pm_segtab[((vm_offset_t)(va) >> SEGSHIFT)]);
+ else
+ return ((pd_entry_t)0);
+}
+
+/*
+ * Routine: pmap_pte
+ * Function:
+ * Extract the page table entry associated
+ * with the given map/virtual_address pair.
+ */
+pt_entry_t *
+pmap_pte(pmap_t pmap, vm_offset_t va)
+{
+ pt_entry_t *pdeaddr;
+
+ if (pmap) {
+ pdeaddr = (pt_entry_t *)pmap_segmap(pmap, va);
+ if (pdeaddr) {
+ return pdeaddr + vad_to_pte_offset(va);
+ }
+ }
+ return ((pt_entry_t *)0);
+}
+
+
+vm_offset_t
+pmap_steal_memory(vm_size_t size)
+{
+ vm_size_t bank_size;
+ vm_offset_t pa, va;
+
+ size = round_page(size);
+
+ bank_size = phys_avail[1] - phys_avail[0];
+ while (size > bank_size) {
+ int i;
+
+ for (i = 0; phys_avail[i + 2]; i += 2) {
+ phys_avail[i] = phys_avail[i + 2];
+ phys_avail[i + 1] = phys_avail[i + 3];
+ }
+ phys_avail[i] = 0;
+ phys_avail[i + 1] = 0;
+ if (!phys_avail[0])
+ panic("pmap_steal_memory: out of memory");
+ bank_size = phys_avail[1] - phys_avail[0];
+ }
+
+ pa = phys_avail[0];
+ phys_avail[0] += size;
+ if (pa >= MIPS_KSEG0_LARGEST_PHYS) {
+ panic("Out of memory below 512Meg?");
+ }
+ va = MIPS_PHYS_TO_CACHED(pa);
+ bzero((caddr_t)va, size);
+ return va;
+}
+
+/*
+ * Bootstrap the system enough to run with virtual memory. This
+ * assumes that the phys_avail array has been initialized.
+ */
+void
+pmap_bootstrap(void)
+{
+ pt_entry_t *pgtab;
+ pt_entry_t *pte;
+ int i, j;
+ int memory_larger_than_512meg = 0;
+
+ /* Sort. */
+again:
+ for (i = 0; phys_avail[i + 1] != 0; i += 2) {
+ if (phys_avail[i + 1] >= MIPS_KSEG0_LARGEST_PHYS) {
+ memory_larger_than_512meg++;
+ }
+ if (i < 2)
+ continue;
+ if (phys_avail[i - 2] > phys_avail[i]) {
+ vm_paddr_t ptemp[2];
+
+
+ ptemp[0] = phys_avail[i + 0];
+ ptemp[1] = phys_avail[i + 1];
+
+ phys_avail[i + 0] = phys_avail[i - 2];
+ phys_avail[i + 1] = phys_avail[i - 1];
+
+ phys_avail[i - 2] = ptemp[0];
+ phys_avail[i - 1] = ptemp[1];
+ goto again;
+ }
+ }
+
+ if (bootverbose) {
+ printf("Physical memory chunk(s):\n");
+ for (i = 0; phys_avail[i + 1] != 0; i += 2) {
+ vm_paddr_t size;
+
+ size = phys_avail[i + 1] - phys_avail[i];
+ printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
+ (uintmax_t) phys_avail[i],
+ (uintmax_t) phys_avail[i + 1] - 1,
+ (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
+ }
+ }
+ /*
+ * Steal the message buffer from the beginning of memory.
+ */
+ msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
+ msgbufinit(msgbufp, MSGBUF_SIZE);
+
+ /*
+ * Steal thread0 kstack.
+ */
+ kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
+
+
+ virtual_avail = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+ /*
+ * Steal some virtual space that will not be in kernel_segmap. This
+ * va memory space will be used to map in kernel pages that are
+ * outside the 512Meg region. Note that we only do this steal when
+ * we do have memory in this region, that way for systems with
+ * smaller memory we don't "steal" any va ranges :-)
+ */
+ if (memory_larger_than_512meg) {
+ for (i = 0; i < MAXCPU; i++) {
+ sysmap_lmem[i].CMAP1 = PTE_G;
+ sysmap_lmem[i].CMAP2 = PTE_G;
+ sysmap_lmem[i].CADDR1 = (caddr_t)virtual_avail;
+ virtual_avail += PAGE_SIZE;
+ sysmap_lmem[i].CADDR2 = (caddr_t)virtual_avail;
+ virtual_avail += PAGE_SIZE;
+ sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
+ PMAP_LGMEM_LOCK_INIT(&sysmap_lmem[i]);
+ }
+ }
+ virtual_sys_start = (caddr_t)virtual_avail;
+ /*
+ * Allocate segment table for the kernel
+ */
+ kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
+
+ /*
+ * Allocate second level page tables for the kernel
+ */
+ nkpt = NKPT;
+ if (memory_larger_than_512meg) {
+ /*
+ * If we have a large memory system we CANNOT afford to hit
+ * pmap_growkernel() and allocate memory. Since we MAY end
+ * up with a page that is NOT mappable. For that reason we
+ * up front grab more. Normall NKPT is 120 (YMMV see pmap.h)
+ * this gives us 480meg of kernel virtual addresses at the
+ * cost of 120 pages (each page gets us 4 Meg). Since the
+ * kernel starts at virtual_avail, we can use this to
+ * calculate how many entris are left from there to the end
+ * of the segmap, we want to allocate all of it, which would
+ * be somewhere above 0xC0000000 - 0xFFFFFFFF which results
+ * in about 256 entries or so instead of the 120.
+ */
+ nkpt = (PAGE_SIZE / sizeof(pd_entry_t)) - (virtual_avail >> SEGSHIFT);
+ }
+ pgtab = (pt_entry_t *)pmap_steal_memory(PAGE_SIZE * nkpt);
+
+ /*
+ * The R[4-7]?00 stores only one copy of the Global bit in the
+ * translation lookaside buffer for each 2 page entry. Thus invalid
+ * entrys must have the Global bit set so when Entry LO and Entry HI
+ * G bits are anded together they will produce a global bit to store
+ * in the tlb.
+ */
+ for (i = 0, pte = pgtab; i < (nkpt * NPTEPG); i++, pte++)
+ *pte = PTE_G;
+
+ printf("Va=0x%x Ve=%x\n", virtual_avail, virtual_end);
+ /*
+ * The segment table contains the KVA of the pages in the second
+ * level page table.
+ */
+ printf("init kernel_segmap va >> = %d nkpt:%d\n",
+ (virtual_avail >> SEGSHIFT),
+ nkpt);
+ for (i = 0, j = (virtual_avail >> SEGSHIFT); i < nkpt; i++, j++)
+ kernel_segmap[j] = (pd_entry_t)(pgtab + (i * NPTEPG));
+
+ avail_start = phys_avail[0];
+ for (i = 0; phys_avail[i + 2]; i += 2);
+ avail_end = phys_avail[i + 1];
+
+ /*
+ * The kernel's pmap is statically allocated so we don't have to use
+ * pmap_create, which is unlikely to work correctly at this part of
+ * the boot sequence (XXX and which no longer exists).
+ */
+ kernel_pmap = &kernel_pmap_store;
+
+ PMAP_LOCK_INIT(kernel_pmap);
+ kernel_pmap->pm_segtab = kernel_segmap;
+ kernel_pmap->pm_active = ~0;
+ TAILQ_INIT(&kernel_pmap->pm_pvlist);
+ printf("avail_start:0x%x avail_end:0x%x\n",
+ avail_start, avail_end);
+
+ kernel_pmap->pm_asid[PCPU_GET(cpuid)].asid = PMAP_ASID_RESERVED;
+ kernel_pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
+ pmap_max_asid = VMNUM_PIDS;
+ MachSetPID(0);
+}
+
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_list_count = 0;
+ m->md.pv_flags = 0;
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ * pmap_init has been enhanced to support in a fairly consistant
+ * way, discontiguous physical memory.
+ */
+void
+pmap_init(void)
+{
+
+ if (need_wired_tlb_page_pool) {
+ pmap_init_fpage();
+ }
+ /*
+ * Initialize the address space (zone) for the pv entries. Set a
+ * high water mark so that the system can recover from excessive
+ * numbers of pv entries.
+ */
+ pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
+ NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
+ pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count;
+ pv_entry_high_water = 9 * (pv_entry_max / 10);
+ uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
+}
+
+/***************************************************
+ * Low level helper routines.....
+ ***************************************************/
+
+#if defined(PMAP_DIAGNOSTIC)
+
+/*
+ * This code checks for non-writeable/modified pages.
+ * This should be an invalid condition.
+ */
+static int
+pmap_nw_modified(pt_entry_t pte)
+{
+ if ((pte & (PTE_M | PTE_RO)) == (PTE_M | PTE_RO))
+ return (1);
+ else
+ return (0);
+}
+
+#endif
+
+
+/*
+ * this routine defines the region(s) of memory that should
+ * not be tested for the modified bit.
+ */
+static PMAP_INLINE int
+pmap_track_modified(vm_offset_t va)
+{
+ /*
+ * Kernel submap initialization has been moved for MD to MI code. ie
+ * from cpu_startup() to vm_ksubmap_init(). clean_sva and clean_eva
+ * are part of the kmi structure.
+ */
+ if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
+ return (1);
+ else
+ return (0);
+}
+
+static void
+pmap_invalidate_all(pmap_t pmap)
+{
+#ifdef SMP
+ smp_rendezvous(0, pmap_invalidate_all_action, 0, (void *)pmap);
+}
+
+static void
+pmap_invalidate_all_action(void *arg)
+{
+ pmap_t pmap = (pmap_t)arg;
+
+#endif
+
+ if (pmap->pm_active & PCPU_GET(cpumask)) {
+ pmap_TLB_invalidate_all();
+ } else
+ pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
+}
+
+struct pmap_invalidate_page_arg {
+ pmap_t pmap;
+ vm_offset_t va;
+};
+
+static __inline void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+#ifdef SMP
+ struct pmap_invalidate_page_arg arg;
+
+ arg.pmap = pmap;
+ arg.va = va;
+
+ smp_rendezvous(0, pmap_invalidate_page_action, 0, (void *)&arg);
+}
+
+static void
+pmap_invalidate_page_action(void *arg)
+{
+ pmap_t pmap = ((struct pmap_invalidate_page_arg *)arg)->pmap;
+ vm_offset_t va = ((struct pmap_invalidate_page_arg *)arg)->va;
+
+#endif
+
+ if (is_kernel_pmap(pmap)) {
+ pmap_TLB_invalidate_kernel(va);
+ return;
+ }
+ if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
+ return;
+ else if (!(pmap->pm_active & PCPU_GET(cpumask))) {
+ pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
+ return;
+ }
+ va = pmap_va_asid(pmap, (va & ~PGOFSET));
+ mips_TBIS(va);
+}
+
+static void
+pmap_TLB_invalidate_kernel(vm_offset_t va)
+{
+ u_int32_t pid;
+
+ MachTLBGetPID(pid);
+ va = va | (pid << VMTLB_PID_SHIFT);
+ mips_TBIS(va);
+}
+
+struct pmap_update_page_arg {
+ pmap_t pmap;
+ vm_offset_t va;
+ pt_entry_t pte;
+};
+
+void
+pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
+{
+#ifdef SMP
+ struct pmap_update_page_arg arg;
+
+ arg.pmap = pmap;
+ arg.va = va;
+ arg.pte = pte;
+
+ smp_rendezvous(0, pmap_update_page_action, 0, (void *)&arg);
+}
+
+static void
+pmap_update_page_action(void *arg)
+{
+ pmap_t pmap = ((struct pmap_update_page_arg *)arg)->pmap;
+ vm_offset_t va = ((struct pmap_update_page_arg *)arg)->va;
+ pt_entry_t pte = ((struct pmap_update_page_arg *)arg)->pte;
+
+#endif
+ if (is_kernel_pmap(pmap)) {
+ pmap_TLB_update_kernel(va, pte);
+ return;
+ }
+ if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
+ return;
+ else if (!(pmap->pm_active & PCPU_GET(cpumask))) {
+ pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
+ return;
+ }
+ va = pmap_va_asid(pmap, va);
+ MachTLBUpdate(va, pte);
+}
+
+static void
+pmap_TLB_update_kernel(vm_offset_t va, pt_entry_t pte)
+{
+ u_int32_t pid;
+
+ MachTLBGetPID(pid);
+ va = va | (pid << VMTLB_PID_SHIFT);
+
+ MachTLBUpdate(va, pte);
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+vm_paddr_t
+pmap_extract(pmap_t pmap, vm_offset_t va)
+{
+ pt_entry_t *pte;
+ vm_offset_t retval = 0;
+
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, va);
+ if (pte) {
+ retval = mips_tlbpfn_to_paddr(*pte) | (va & PAGE_MASK);
+ }
+ PMAP_UNLOCK(pmap);
+ return retval;
+}
+
+/*
+ * Routine: pmap_extract_and_hold
+ * Function:
+ * Atomically extract and hold the physical page
+ * with the given pmap and virtual address pair
+ * if that mapping permits the given protection.
+ */
+vm_page_t
+pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+{
+ pt_entry_t pte;
+ vm_page_t m;
+
+ m = NULL;
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+
+ pte = *pmap_pte(pmap, va);
+ if (pte != 0 && pmap_pte_v(&pte) &&
+ ((pte & PTE_RW) || (prot & VM_PROT_WRITE) == 0)) {
+ m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pte));
+ vm_page_hold(m);
+ }
+ vm_page_unlock_queues();
+ PMAP_UNLOCK(pmap);
+ return (m);
+}
+
+/***************************************************
+ * Low level mapping routines.....
+ ***************************************************/
+
+/*
+ * add a wired page to the kva
+ */
+ /* PMAP_INLINE */ void
+pmap_kenter(vm_offset_t va, vm_paddr_t pa)
+{
+ register pt_entry_t *pte;
+ pt_entry_t npte, opte;
+
+ npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W;
+
+ if (is_cacheable_mem(pa))
+ npte |= PTE_CACHE;
+ else
+ npte |= PTE_UNCACHED;
+
+ pte = pmap_pte(kernel_pmap, va);
+ opte = *pte;
+ *pte = npte;
+
+ pmap_update_page(kernel_pmap, va, npte);
+}
+
+/*
+ * remove a page from the kernel pagetables
+ */
+ /* PMAP_INLINE */ void
+pmap_kremove(vm_offset_t va)
+{
+ register pt_entry_t *pte;
+
+ pte = pmap_pte(kernel_pmap, va);
+ *pte = PTE_G;
+ pmap_invalidate_page(kernel_pmap, va);
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
+{
+ vm_offset_t va, sva;
+
+ va = sva = *virt;
+ while (start < end) {
+ pmap_kenter(va, start);
+ va += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ *virt = va;
+ return (sva);
+}
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ */
+void
+pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
+ va += PAGE_SIZE;
+ }
+}
+
+/*
+ * this routine jerks page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ */
+void
+pmap_qremove(vm_offset_t va, int count)
+{
+ while (count-- > 0) {
+ pmap_kremove(va);
+ va += PAGE_SIZE;
+ }
+}
+
+/***************************************************
+ * Page table page management routines.....
+ ***************************************************/
+
+/*
+ * floating pages (FPAGES) management routines
+ *
+ * FPAGES are the reserved virtual memory areas which can be
+ * mapped to any physical memory. This gets used typically
+ * in the following functions:
+ *
+ * pmap_zero_page
+ * pmap_copy_page
+ */
+
+/*
+ * Create the floating pages, aka FPAGES!
+ */
+static void
+pmap_init_fpage()
+{
+ vm_offset_t kva;
+ int i, j;
+ struct sysmaps *sysmaps;
+
+ /*
+ * We allocate a total of (FPAGES*MAXCPU + FPAGES_SHARED + 1) pages
+ * at first. FPAGES & FPAGES_SHARED should be EVEN Then we'll adjust
+ * 'kva' to be even-page aligned so that the fpage area can be wired
+ * in the TLB with a single TLB entry.
+ */
+ kva = kmem_alloc_nofault(kernel_map,
+ (FPAGES * MAXCPU + 1 + FPAGES_SHARED) * PAGE_SIZE);
+ if ((void *)kva == NULL)
+ panic("pmap_init_fpage: fpage allocation failed");
+
+ /*
+ * Make up start at an even page number so we can wire down the
+ * fpage area in the tlb with a single tlb entry.
+ */
+ if ((((vm_offset_t)kva) >> PGSHIFT) & 1) {
+ /*
+ * 'kva' is not even-page aligned. Adjust it and free the
+ * first page which is unused.
+ */
+ kmem_free(kernel_map, (vm_offset_t)kva, NBPG);
+ kva = ((vm_offset_t)kva) + NBPG;
+ } else {
+ /*
+ * 'kva' is even page aligned. We don't need the last page,
+ * free it.
+ */
+ kmem_free(kernel_map, ((vm_offset_t)kva) + FSPACE, NBPG);
+ }
+
+ for (i = 0; i < MAXCPU; i++) {
+ sysmaps = &sysmaps_pcpu[i];
+ mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
+
+ /* Assign FPAGES pages to the CPU */
+ for (j = 0; j < FPAGES; j++)
+ sysmaps->fp[j].kva = kva + (j) * PAGE_SIZE;
+ kva = ((vm_offset_t)kva) + (FPAGES * PAGE_SIZE);
+ }
+
+ /*
+ * An additional 2 pages are needed, one for pmap_zero_page_idle()
+ * and one for coredump. These pages are shared by all cpu's
+ */
+ fpages_shared[PMAP_FPAGE3].kva = kva;
+ fpages_shared[PMAP_FPAGE_KENTER_TEMP].kva = kva + PAGE_SIZE;
+}
+
+/*
+ * Map the page to the fpage virtual address as specified thru' fpage id
+ */
+vm_offset_t
+pmap_map_fpage(vm_paddr_t pa, struct fpage *fp, boolean_t check_unmaped)
+{
+ vm_offset_t kva;
+ register pt_entry_t *pte;
+ pt_entry_t npte;
+
+ KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
+ /*
+ * Check if the fpage is free
+ */
+ if (fp->state) {
+ if (check_unmaped == TRUE)
+ pmap_unmap_fpage(pa, fp);
+ else
+ panic("pmap_map_fpage: fpage is busy");
+ }
+ fp->state = TRUE;
+ kva = fp->kva;
+
+ npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ pte = pmap_pte(kernel_pmap, kva);
+ *pte = npte;
+
+ pmap_TLB_update_kernel(kva, npte);
+
+ return (kva);
+}
+
+/*
+ * Unmap the page from the fpage virtual address as specified thru' fpage id
+ */
+void
+pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp)
+{
+ vm_offset_t kva;
+ register pt_entry_t *pte;
+
+ KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
+ /*
+ * Check if the fpage is busy
+ */
+ if (!(fp->state)) {
+ panic("pmap_unmap_fpage: fpage is free");
+ }
+ kva = fp->kva;
+
+ pte = pmap_pte(kernel_pmap, kva);
+ *pte = PTE_G;
+ pmap_TLB_invalidate_kernel(kva);
+
+ fp->state = FALSE;
+
+ /*
+ * Should there be any flush operation at the end?
+ */
+}
+
+/* Revision 1.507
+ *
+ * Simplify the reference counting of page table pages. Specifically, use
+ * the page table page's wired count rather than its hold count to contain
+ * the reference count.
+ */
+
+/*
+ * This routine unholds page table pages, and if the hold count
+ * drops to zero, then it decrements the wire count.
+ */
+static int
+_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
+{
+
+ /*
+ * unmap the page table page
+ */
+ pmap->pm_segtab[m->pindex] = 0;
+ --pmap->pm_stats.resident_count;
+
+ if (pmap->pm_ptphint == m)
+ pmap->pm_ptphint = NULL;
+
+ /*
+ * If the page is finally unwired, simply free it.
+ */
+ vm_page_free_zero(m);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
+ return (1);
+}
+
+static PMAP_INLINE int
+pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
+{
+ --m->wire_count;
+ if (m->wire_count == 0)
+ return (_pmap_unwire_pte_hold(pmap, m));
+ else
+ return (0);
+}
+
+/*
+ * After removing a page table entry, this routine is used to
+ * conditionally free the page, and manage the hold/wire counts.
+ */
+static int
+pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
+{
+ unsigned ptepindex;
+ pd_entry_t pteva;
+
+ if (va >= VM_MAXUSER_ADDRESS)
+ return (0);
+
+ if (mpte == NULL) {
+ ptepindex = (va >> SEGSHIFT);
+ if (pmap->pm_ptphint &&
+ (pmap->pm_ptphint->pindex == ptepindex)) {
+ mpte = pmap->pm_ptphint;
+ } else {
+ pteva = *pmap_pde(pmap, va);
+ mpte = PHYS_TO_VM_PAGE(MIPS_CACHED_TO_PHYS(pteva));
+ pmap->pm_ptphint = mpte;
+ }
+ }
+ return pmap_unwire_pte_hold(pmap, mpte);
+}
+
+void
+pmap_pinit0(pmap_t pmap)
+{
+ int i;
+
+ PMAP_LOCK_INIT(pmap);
+ pmap->pm_segtab = kernel_segmap;
+ pmap->pm_active = 0;
+ pmap->pm_ptphint = NULL;
+ for (i = 0; i < MAXCPU; i++) {
+ pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
+ pmap->pm_asid[i].gen = 0;
+ }
+ PCPU_SET(curpmap, pmap);
+ TAILQ_INIT(&pmap->pm_pvlist);
+ bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+int
+pmap_pinit(pmap_t pmap)
+{
+ vm_page_t ptdpg;
+ int i;
+ int req;
+
+ PMAP_LOCK_INIT(pmap);
+
+ req = VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO;
+
+#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
+ if (need_wired_tlb_page_pool)
+ req |= VM_ALLOC_WIRED_TLB_PG_POOL;
+#endif
+ /*
+ * allocate the page directory page
+ */
+ ptdpg = vm_page_alloc(NULL, NUSERPGTBLS, req);
+
+#if 0
+ /* I think we can just delete these, now that PG_BUSY is gone */
+ vm_page_lock_queues();
+ vm_page_flag_clear(ptdpg, PTE_BUSY); /* not usually mapped */
+#endif
+ ptdpg->valid = VM_PAGE_BITS_ALL;
+
+#if 0
+ vm_page_unlock_queues();
+#endif
+
+ pmap->pm_segtab = (pd_entry_t *)
+ MIPS_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(ptdpg));
+ if ((ptdpg->flags & PG_ZERO) == 0)
+ bzero(pmap->pm_segtab, PAGE_SIZE);
+
+ pmap->pm_active = 0;
+ pmap->pm_ptphint = NULL;
+ for (i = 0; i < MAXCPU; i++) {
+ pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
+ pmap->pm_asid[i].gen = 0;
+ }
+ TAILQ_INIT(&pmap->pm_pvlist);
+ bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+
+ return (1);
+}
+
+/*
+ * this routine is called if the page table page is not
+ * mapped correctly.
+ */
+static vm_page_t
+_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
+{
+ vm_offset_t pteva, ptepa;
+ vm_page_t m;
+ int req;
+
+ KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
+ (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
+ ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
+
+ req = VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ;
+#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
+ if (need_wired_tlb_page_pool)
+ req |= VM_ALLOC_WIRED_TLB_PG_POOL;
+#endif
+ /*
+ * Find or fabricate a new pagetable page
+ */
+ if ((m = vm_page_alloc(NULL, ptepindex, req)) == NULL) {
+ if (flags & M_WAITOK) {
+ PMAP_UNLOCK(pmap);
+ vm_page_unlock_queues();
+ VM_WAIT;
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+ }
+ /*
+ * Indicate the need to retry. While waiting, the page
+ * table page may have been allocated.
+ */
+ return (NULL);
+ }
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+
+ KASSERT(m->queue == PQ_NONE,
+ ("_pmap_allocpte: %p->queue != PQ_NONE", m));
+
+ /*
+ * Map the pagetable page into the process address space, if it
+ * isn't already there.
+ */
+
+ pmap->pm_stats.resident_count++;
+
+ ptepa = VM_PAGE_TO_PHYS(m);
+ pteva = MIPS_PHYS_TO_CACHED(ptepa);
+ pmap->pm_segtab[ptepindex] = (pd_entry_t)pteva;
+
+ /*
+ * Set the page table hint
+ */
+ pmap->pm_ptphint = m;
+
+ /*
+ * Kernel page tables are allocated in pmap_bootstrap() or
+ * pmap_growkernel().
+ */
+ if (is_kernel_pmap(pmap))
+ panic("_pmap_allocpte() called for kernel pmap\n");
+
+ m->valid = VM_PAGE_BITS_ALL;
+ vm_page_flag_clear(m, PG_ZERO);
+
+ return (m);
+}
+
+static vm_page_t
+pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
+{
+ unsigned ptepindex;
+ vm_offset_t pteva;
+ vm_page_t m;
+
+ KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
+ (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
+ ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
+
+ /*
+ * Calculate pagetable page index
+ */
+ ptepindex = va >> SEGSHIFT;
+retry:
+ /*
+ * Get the page directory entry
+ */
+ pteva = (vm_offset_t)pmap->pm_segtab[ptepindex];
+
+ /*
+ * If the page table page is mapped, we just increment the hold
+ * count, and activate it.
+ */
+ if (pteva) {
+ /*
+ * In order to get the page table page, try the hint first.
+ */
+ if (pmap->pm_ptphint &&
+ (pmap->pm_ptphint->pindex == ptepindex)) {
+ m = pmap->pm_ptphint;
+ } else {
+ m = PHYS_TO_VM_PAGE(MIPS_CACHED_TO_PHYS(pteva));
+ pmap->pm_ptphint = m;
+ }
+ m->wire_count++;
+ } else {
+ /*
+ * Here if the pte page isn't mapped, or if it has been
+ * deallocated.
+ */
+ m = _pmap_allocpte(pmap, ptepindex, flags);
+ if (m == NULL && (flags & M_WAITOK))
+ goto retry;
+ }
+ return m;
+}
+
+
+/***************************************************
+* Pmap allocation/deallocation routines.
+ ***************************************************/
+/*
+ * Revision 1.397
+ * - Merged pmap_release and pmap_release_free_page. When pmap_release is
+ * called only the page directory page(s) can be left in the pmap pte
+ * object, since all page table pages will have been freed by
+ * pmap_remove_pages and pmap_remove. In addition, there can only be one
+ * reference to the pmap and the page directory is wired, so the page(s)
+ * can never be busy. So all there is to do is clear the magic mappings
+ * from the page directory and free the page(s).
+ */
+
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap_t pmap)
+{
+ vm_page_t ptdpg;
+
+ KASSERT(pmap->pm_stats.resident_count == 0,
+ ("pmap_release: pmap resident count %ld != 0",
+ pmap->pm_stats.resident_count));
+
+ ptdpg = PHYS_TO_VM_PAGE(MIPS_CACHED_TO_PHYS(pmap->pm_segtab));
+
+ vm_page_lock_queues();
+ ptdpg->wire_count--;
+ atomic_subtract_int(&cnt.v_wire_count, 1);
+ vm_page_free_zero(ptdpg);
+ vm_page_unlock_queues();
+}
+
+/*
+ * Changes:
+ * * Replace splhigh()/splx() with critical_enter()/critical_exit()
+ * * Use the VM_ALLOC_WIRED flag for allocating the new page.
+ */
+
+/*
+ * grow the number of kernel page table entries, if needed
+ */
+void
+pmap_growkernel(vm_offset_t addr)
+{
+ vm_offset_t ptppaddr;
+ vm_page_t nkpg;
+ pt_entry_t *pte;
+ int i, req;
+
+ critical_enter();
+ if (kernel_vm_end == 0) {
+ kernel_vm_end = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
+ nkpt = 0;
+ while (segtab_pde(kernel_segmap, kernel_vm_end)) {
+ kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
+ ~(PAGE_SIZE * NPTEPG - 1);
+ nkpt++;
+ if (kernel_vm_end - 1 >= kernel_map->max_offset) {
+ kernel_vm_end = kernel_map->max_offset;
+ break;
+ }
+ }
+ }
+ addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
+ if (addr - 1 >= kernel_map->max_offset)
+ addr = kernel_map->max_offset;
+ while (kernel_vm_end < addr) {
+ if (segtab_pde(kernel_segmap, kernel_vm_end)) {
+ kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
+ ~(PAGE_SIZE * NPTEPG - 1);
+ if (kernel_vm_end - 1 >= kernel_map->max_offset) {
+ kernel_vm_end = kernel_map->max_offset;
+ break;
+ }
+ continue;
+ }
+ /*
+ * This index is bogus, but out of the way
+ */
+ req = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
+#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
+ if (need_wired_tlb_page_pool)
+ req |= VM_ALLOC_WIRED_TLB_PG_POOL;
+#endif
+ nkpg = vm_page_alloc(NULL, nkpt, req);
+ if (!nkpg)
+ panic("pmap_growkernel: no memory to grow kernel");
+
+ nkpt++;
+
+ ptppaddr = VM_PAGE_TO_PHYS(nkpg);
+ if (ptppaddr >= MIPS_KSEG0_LARGEST_PHYS) {
+ /*
+ * We need to do something here, but I am not sure
+ * what. We can access anything in the 0 - 512Meg
+ * region, but if we get a page to go in the kernel
+ * segmap that is outside of of that we really need
+ * to have another mapping beyond the temporary ones
+ * I have. Not sure how to do this yet. FIXME FIXME.
+ */
+ panic("Gak, can't handle a k-page table outside of lower 512Meg");
+ }
+ pte = (pt_entry_t *)MIPS_PHYS_TO_CACHED(ptppaddr);
+ segtab_pde(kernel_segmap, kernel_vm_end) = (pd_entry_t)pte;
+
+ /*
+ * The R[4-7]?00 stores only one copy of the Global bit in
+ * the translation lookaside buffer for each 2 page entry.
+ * Thus invalid entrys must have the Global bit set so when
+ * Entry LO and Entry HI G bits are anded together they will
+ * produce a global bit to store in the tlb.
+ */
+ for (i = 0; i < NPTEPG; i++, pte++)
+ *pte = PTE_G;
+
+ kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
+ ~(PAGE_SIZE * NPTEPG - 1);
+ if (kernel_vm_end - 1 >= kernel_map->max_offset) {
+ kernel_vm_end = kernel_map->max_offset;
+ break;
+ }
+ }
+ critical_exit();
+}
+
+/***************************************************
+* page management routines.
+ ***************************************************/
+
+/*
+ * free the pv_entry back to the free list
+ */
+static PMAP_INLINE void
+free_pv_entry(pv_entry_t pv)
+{
+
+ pv_entry_count--;
+ uma_zfree(pvzone, pv);
+}
+
+/*
+ * get a new pv_entry, allocating a block from the system
+ * when needed.
+ * the memory allocation is performed bypassing the malloc code
+ * because of the possibility of allocations at interrupt time.
+ */
+static pv_entry_t
+get_pv_entry(void)
+{
+
+ pv_entry_count++;
+ if ((pv_entry_count > pv_entry_high_water) &&
+ (pmap_pagedaemon_waken == 0)) {
+ pmap_pagedaemon_waken = 1;
+ wakeup(&vm_pages_needed);
+ }
+ return uma_zalloc(pvzone, M_NOWAIT);
+}
+
+/*
+ * Revision 1.370
+ *
+ * Move pmap_collect() out of the machine-dependent code, rename it
+ * to reflect its new location, and add page queue and flag locking.
+ *
+ * Notes: (1) alpha, i386, and ia64 had identical implementations
+ * of pmap_collect() in terms of machine-independent interfaces;
+ * (2) sparc64 doesn't require it; (3) powerpc had it as a TODO.
+ *
+ * MIPS implementation was identical to alpha [Junos 8.2]
+ */
+
+/*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+
+static void
+pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ if (pmap == pv->pv_pmap && va == pv->pv_va)
+ break;
+ }
+ } else {
+ TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
+ if (va == pv->pv_va)
+ break;
+ }
+ }
+
+ KASSERT(pv != NULL, ("pmap_remove_entry: pv not found"));
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ m->md.pv_list_count--;
+ if (TAILQ_FIRST(&m->md.pv_list) == NULL)
+ vm_page_flag_clear(m, PG_WRITEABLE);
+
+ TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
+ free_pv_entry(pv);
+}
+
+/*
+ * Create a pv entry for page at pa for
+ * (pmap, va).
+ */
+static void
+pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m,
+ boolean_t wired)
+{
+
+ pv_entry_t pv;
+
+ pv = get_pv_entry();
+ if (pv == NULL)
+ panic("no pv entries: increase vm.pmap.shpgperproc");
+ pv->pv_va = va;
+ pv->pv_pmap = pmap;
+ pv->pv_ptem = mpte;
+ pv->pv_wired = wired;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+ m->md.pv_list_count++;
+
+}
+
+/*
+ * pmap_remove_pte: do the things to unmap a page in a process
+ */
+static int
+pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va)
+{
+ pt_entry_t oldpte;
+ vm_page_t m;
+ vm_offset_t pa;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ oldpte = loadandclear((u_int *)ptq);
+ if (is_kernel_pmap(pmap))
+ *ptq = PTE_G;
+
+ if (oldpte & PTE_W)
+ pmap->pm_stats.wired_count -= 1;
+
+ pmap->pm_stats.resident_count -= 1;
+ pa = mips_tlbpfn_to_paddr(oldpte);
+
+ if (page_is_managed(pa)) {
+ m = PHYS_TO_VM_PAGE(pa);
+ if (oldpte & PTE_M) {
+#if defined(PMAP_DIAGNOSTIC)
+ if (pmap_nw_modified(oldpte)) {
+ printf(
+ "pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
+ va, oldpte);
+ }
+#endif
+ if (pmap_track_modified(va))
+ vm_page_dirty(m);
+ }
+ if (m->md.pv_flags & PV_TABLE_REF)
+ vm_page_flag_set(m, PG_REFERENCED);
+ m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
+
+ pmap_remove_entry(pmap, m, va);
+ }
+ return pmap_unuse_pt(pmap, va, NULL);
+
+}
+
+/*
+ * Remove a single page from a process address space
+ */
+static void
+pmap_remove_page(struct pmap *pmap, vm_offset_t va)
+{
+ register pt_entry_t *ptq;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ ptq = pmap_pte(pmap, va);
+
+ /*
+ * if there is no pte for this address, just skip it!!!
+ */
+ if (!ptq || !pmap_pte_v(ptq)) {
+ return;
+ }
+ /*
+ * get a local va for mappings for this pmap.
+ */
+ (void)pmap_remove_pte(pmap, ptq, va);
+ pmap_invalidate_page(pmap, va);
+
+ return;
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t va, nva;
+
+ if (pmap == NULL)
+ return;
+
+ if (pmap->pm_stats.resident_count == 0)
+ return;
+
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+
+ /*
+ * special handling of removing one page. a very common operation
+ * and easy to short circuit some code.
+ */
+ if ((sva + PAGE_SIZE) == eva) {
+ pmap_remove_page(pmap, sva);
+ goto out;
+ }
+ for (va = sva; va < eva; va = nva) {
+ if (!*pmap_pde(pmap, va)) {
+ nva = mips_segtrunc(va + MIPS_SEGSIZE);
+ continue;
+ }
+ pmap_remove_page(pmap, va);
+ nva = va + PAGE_SIZE;
+ }
+
+out:
+ vm_page_unlock_queues();
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * Routine: pmap_remove_all
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ *
+ * Notes:
+ * Original versions of this routine were very
+ * inefficient because they iteratively called
+ * pmap_remove (slow...)
+ */
+
+void
+pmap_remove_all(vm_page_t m)
+{
+ register pv_entry_t pv;
+ register pt_entry_t *pte, tpte;
+
+#if defined(PMAP_DEBUG)
+ /*
+ * XXX This makes pmap_remove_all() illegal for non-managed pages!
+ */
+ if (m->flags & PG_FICTITIOUS) {
+ panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
+ }
+#endif
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+
+ if (m->md.pv_flags & PV_TABLE_REF)
+ vm_page_flag_set(m, PG_REFERENCED);
+
+ while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+ PMAP_LOCK(pv->pv_pmap);
+ pv->pv_pmap->pm_stats.resident_count--;
+
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+
+ tpte = loadandclear((u_int *)pte);
+ if (is_kernel_pmap(pv->pv_pmap))
+ *pte = PTE_G;
+
+ if (tpte & PTE_W)
+ pv->pv_pmap->pm_stats.wired_count--;
+
+ /*
+ * Update the vm_page_t clean and reference bits.
+ */
+ if (tpte & PTE_M) {
+#if defined(PMAP_DIAGNOSTIC)
+ if (pmap_nw_modified(tpte)) {
+ printf(
+ "pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
+ pv->pv_va, tpte);
+ }
+#endif
+ if (pmap_track_modified(pv->pv_va))
+ vm_page_dirty(m);
+ }
+ pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
+
+ TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ m->md.pv_list_count--;
+ pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
+ PMAP_UNLOCK(pv->pv_pmap);
+ free_pv_entry(pv);
+ }
+
+ vm_page_flag_clear(m, PG_WRITEABLE);
+ m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ pt_entry_t *pte;
+
+ if (pmap == NULL)
+ return;
+
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+ if (prot & VM_PROT_WRITE)
+ return;
+
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+ while (sva < eva) {
+ pt_entry_t pbits, obits;
+ vm_page_t m;
+ vm_offset_t pa;
+
+ /*
+ * If segment table entry is empty, skip this segment.
+ */
+ if (!*pmap_pde(pmap, sva)) {
+ sva = mips_segtrunc(sva + MIPS_SEGSIZE);
+ continue;
+ }
+ /*
+ * If pte is invalid, skip this page
+ */
+ pte = pmap_pte(pmap, sva);
+ if (!pmap_pte_v(pte)) {
+ sva += PAGE_SIZE;
+ continue;
+ }
+retry:
+ obits = pbits = *pte;
+ pa = mips_tlbpfn_to_paddr(pbits);
+
+ if (page_is_managed(pa)) {
+ m = PHYS_TO_VM_PAGE(pa);
+ if (m->md.pv_flags & PV_TABLE_REF) {
+ vm_page_flag_set(m, PG_REFERENCED);
+ m->md.pv_flags &= ~PV_TABLE_REF;
+ }
+ if (pbits & PTE_M) {
+ if (pmap_track_modified(sva)) {
+ vm_page_dirty(m);
+ m->md.pv_flags &= ~PV_TABLE_MOD;
+ }
+ }
+ }
+ pbits = (pbits & ~PTE_M) | PTE_RO;
+
+ if (pbits != *pte) {
+ if (!atomic_cmpset_int((u_int *)pte, obits, pbits))
+ goto retry;
+ pmap_update_page(pmap, sva, pbits);
+ }
+ sva += PAGE_SIZE;
+ }
+ vm_page_unlock_queues();
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+void
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t fault_type, vm_page_t m, vm_prot_t prot,
+ boolean_t wired)
+{
+ vm_offset_t pa, opa;
+ register pt_entry_t *pte;
+ pt_entry_t origpte, newpte;
+ vm_page_t mpte, om;
+ int rw = 0;
+
+ if (pmap == NULL)
+ return;
+
+ va &= ~PAGE_MASK;
+#ifdef PMAP_DIAGNOSTIC
+ if (va > VM_MAX_KERNEL_ADDRESS)
+ panic("pmap_enter: toobig");
+#endif
+
+ mpte = NULL;
+
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+
+ /*
+ * In the case that a page table page is not resident, we are
+ * creating it here.
+ */
+ if (va < VM_MAXUSER_ADDRESS) {
+ mpte = pmap_allocpte(pmap, va, M_WAITOK);
+ }
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Page Directory table entry not valid, we need a new PT page
+ */
+ if (pte == NULL) {
+ panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n",
+ (void *)pmap->pm_segtab, va);
+ }
+ pa = VM_PAGE_TO_PHYS(m);
+ om = NULL;
+ origpte = *pte;
+ opa = mips_tlbpfn_to_paddr(origpte);
+
+ /*
+ * Mapping has not changed, must be protection or wiring change.
+ */
+ if ((origpte & PTE_V) && (opa == pa)) {
+ /*
+ * Wiring change, just update stats. We don't worry about
+ * wiring PT pages as they remain resident as long as there
+ * are valid mappings in them. Hence, if a user page is
+ * wired, the PT page will be also.
+ */
+ if (wired && ((origpte & PTE_W) == 0))
+ pmap->pm_stats.wired_count++;
+ else if (!wired && (origpte & PTE_W))
+ pmap->pm_stats.wired_count--;
+
+#if defined(PMAP_DIAGNOSTIC)
+ if (pmap_nw_modified(origpte)) {
+ printf(
+ "pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
+ va, origpte);
+ }
+#endif
+
+ /*
+ * Remove extra pte reference
+ */
+ if (mpte)
+ mpte->wire_count--;
+
+ /*
+ * We might be turning off write access to the page, so we
+ * go ahead and sense modify status.
+ */
+ if (page_is_managed(opa)) {
+ om = m;
+ }
+ goto validate;
+ }
+ /*
+ * Mapping has changed, invalidate old range and fall through to
+ * handle validating new mapping.
+ */
+ if (opa) {
+ if (origpte & PTE_W)
+ pmap->pm_stats.wired_count--;
+
+ if (page_is_managed(opa)) {
+ om = PHYS_TO_VM_PAGE(opa);
+ pmap_remove_entry(pmap, om, va);
+ }
+ if (mpte != NULL) {
+ mpte->wire_count--;
+ KASSERT(mpte->wire_count > 0,
+ ("pmap_enter: missing reference to page table page,"
+ " va: 0x%x", va));
+ }
+ } else
+ pmap->pm_stats.resident_count++;
+
+ /*
+ * Enter on the PV list if part of our managed memory. Note that we
+ * raise IPL while manipulating pv_table since pmap_enter can be
+ * called at interrupt time.
+ */
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
+ pmap_insert_entry(pmap, va, mpte, m, wired);
+ }
+ /*
+ * Increment counters
+ */
+ if (wired)
+ pmap->pm_stats.wired_count++;
+
+validate:
+ rw = init_pte_prot(va, m, prot);
+
+ /*
+ * Now validate mapping with desired protection/wiring.
+ */
+ newpte = mips_paddr_to_tlbpfn(pa) | rw | PTE_V;
+
+ if (is_cacheable_mem(pa))
+ newpte |= PTE_CACHE;
+ else
+ newpte |= PTE_UNCACHED;
+
+ if (wired)
+ newpte |= PTE_W;
+
+ if (is_kernel_pmap(pmap)) {
+ newpte |= PTE_G;
+ }
+
+ /*
+ * if the mapping or permission bits are different, we need to
+ * update the pte.
+ */
+ if (origpte != newpte) {
+ if (origpte & PTE_V) {
+ *pte = newpte;
+ if (page_is_managed(opa) && (opa != pa)) {
+ if (om->md.pv_flags & PV_TABLE_REF)
+ vm_page_flag_set(om, PG_REFERENCED);
+ om->md.pv_flags &=
+ ~(PV_TABLE_REF | PV_TABLE_MOD);
+ }
+ if (origpte & PTE_M) {
+ KASSERT((origpte & PTE_RW),
+ ("pmap_enter: modified page not writable:"
+ " va: 0x%x, pte: 0x%lx", va, origpte));
+ if ((page_is_managed(opa)) &&
+ pmap_track_modified(va))
+ vm_page_dirty(om);
+ }
+ } else {
+ *pte = newpte;
+ }
+ }
+ pmap_update_page(pmap, va, newpte);
+
+ /*
+ * Sync I & D caches for executable pages. Do this only if the the
+ * target pmap belongs to the current process. Otherwise, an
+ * unresolvable TLB miss may occur.
+ */
+ if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
+ (prot & VM_PROT_EXECUTE)) {
+ mips_icache_sync_range(va, NBPG);
+ mips_dcache_wbinv_range(va, NBPG);
+ }
+ vm_page_unlock_queues();
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * but is *MUCH* faster than pmap_enter...
+ */
+
+
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
+{
+ pt_entry_t *pte;
+ vm_offset_t pa;
+ vm_page_t mpte = NULL;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ PMAP_LOCK(pmap);
+ /*
+ * In the case that a page table page is not resident, we are
+ * creating it here.
+ */
+ if (va < VM_MAXUSER_ADDRESS) {
+ unsigned ptepindex;
+ vm_offset_t pteva;
+
+ /*
+ * Calculate pagetable page index
+ */
+ ptepindex = va >> SEGSHIFT;
+ if (mpte && (mpte->pindex == ptepindex)) {
+ mpte->wire_count++;
+ } else {
+ retry:
+ /*
+ * Get the page directory entry
+ */
+ pteva = (vm_offset_t)pmap->pm_segtab[ptepindex];
+
+ /*
+ * If the page table page is mapped, we just
+ * increment the hold count, and activate it.
+ */
+ if (pteva) {
+ if (pmap->pm_ptphint &&
+ (pmap->pm_ptphint->pindex == ptepindex)) {
+ mpte = pmap->pm_ptphint;
+ } else {
+ mpte = PHYS_TO_VM_PAGE(MIPS_CACHED_TO_PHYS(pteva));
+ pmap->pm_ptphint = mpte;
+ }
+ mpte->wire_count++;
+ } else {
+ mpte = _pmap_allocpte(pmap, ptepindex, M_NOWAIT);
+ if (mpte == NULL) {
+ PMAP_UNLOCK(pmap);
+ vm_page_busy(m);
+ vm_page_unlock_queues();
+ VM_OBJECT_UNLOCK(m->object);
+ VM_WAIT;
+ VM_OBJECT_LOCK(m->object);
+ vm_page_lock_queues();
+ vm_page_wakeup(m);
+ PMAP_LOCK(pmap);
+ goto retry;
+ }
+ }
+ }
+ } else {
+ mpte = NULL;
+ }
+
+ pte = pmap_pte(pmap, va);
+ if (pmap_pte_v(pte)) {
+ if (mpte)
+ pmap_unwire_pte_hold(pmap, mpte);
+ PMAP_UNLOCK(pmap);
+ return;
+ }
+ /*
+ * Enter on the PV list if part of our managed memory. Note that we
+ * raise IPL while manipulating pv_table since pmap_enter can be
+ * called at interrupt time.
+ */
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
+ pmap_insert_entry(pmap, va, mpte, m, FALSE);
+
+ /*
+ * Increment counters
+ */
+ pmap->pm_stats.resident_count++;
+
+ pa = VM_PAGE_TO_PHYS(m);
+
+ /*
+ * Now validate mapping with RO protection
+ */
+ *pte = mips_paddr_to_tlbpfn(pa) | PTE_V;
+
+ if (is_cacheable_mem(pa))
+ *pte |= PTE_CACHE;
+ else
+ *pte |= PTE_UNCACHED;
+
+ if (is_kernel_pmap(pmap))
+ *pte |= PTE_G;
+ else {
+ *pte |= PTE_RO;
+ /*
+ * Sync I & D caches. Do this only if the the target pmap
+ * belongs to the current process. Otherwise, an
+ * unresolvable TLB miss may occur. */
+ if (pmap == &curproc->p_vmspace->vm_pmap) {
+ va &= ~PAGE_MASK;
+ mips_icache_sync_range(va, NBPG);
+ mips_dcache_wbinv_range(va, NBPG);
+ }
+ }
+
+ PMAP_UNLOCK(pmap);
+ return;
+}
+
+/*
+ * Make a temporary mapping for a physical address. This is only intended
+ * to be used for panic dumps.
+ */
+void *
+pmap_kenter_temporary(vm_paddr_t pa, int i)
+{
+ vm_offset_t va;
+
+ if (i != 0)
+ printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
+ __func__);
+
+#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
+ if (need_wired_tlb_page_pool) {
+ va = pmap_map_fpage(pa, &fpages_shared[PMAP_FPAGE_KENTER_TEMP],
+ TRUE);
+ } else
+#endif
+ if (pa < MIPS_KSEG0_LARGEST_PHYS) {
+ va = MIPS_PHYS_TO_CACHED(pa);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ /* Since this is for the debugger, no locks or any other fun */
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ sysm->valid1 = 1;
+ va = (vm_offset_t)sysm->CADDR1;
+ }
+ return ((void *)va);
+}
+
+void
+pmap_kenter_temporary_free(vm_paddr_t pa)
+{
+ int cpu;
+ struct local_sysmaps *sysm;
+
+ if (pa < MIPS_KSEG0_LARGEST_PHYS) {
+ /* nothing to do for this case */
+ return;
+ }
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ if (sysm->valid1) {
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ }
+}
+
+/*
+ * Moved the code to Machine Independent
+ * vm_map_pmap_enter()
+ */
+
+/*
+ * Maps a sequence of resident pages belonging to the same object.
+ * The sequence begins with the given page m_start. This page is
+ * mapped at the given virtual address start. Each subsequent page is
+ * mapped at a virtual address that is offset from start by the same
+ * amount as the page is offset from m_start within the object. The
+ * last page in the sequence is the page with the largest offset from
+ * m_start that can be mapped at a virtual address less than the given
+ * virtual address end. Not every virtual page between start and end
+ * is mapped; only those for which a resident page exists with the
+ * corresponding offset from m_start are mapped.
+ */
+void
+pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
+ vm_page_t m_start, vm_prot_t prot)
+{
+ vm_page_t m;
+ vm_pindex_t diff, psize;
+
+ psize = atop(end - start);
+ m = m_start;
+ while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
+ /* FIX ME FIX ME - prot is passed in both the
+ * the normal spot m, prot but also as the fault_type
+ * which we don't use. If we ever use it in pmap_enter
+ * we will have to fix this.
+ */
+ pmap_enter(pmap, start + ptoa(diff), prot, m, prot &
+ (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ m = TAILQ_NEXT(m, listq);
+ }
+}
+
+/*
+ * pmap_object_init_pt preloads the ptes for a given object
+ * into the specified pmap. This eliminates the blast of soft
+ * faults on process startup and immediately after an mmap.
+ */
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
+ vm_object_t object, vm_pindex_t pindex, vm_size_t size)
+{
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE,
+ ("pmap_object_init_pt: non-device object"));
+}
+
+/*
+ * Routine: pmap_change_wiring
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+{
+ register pt_entry_t *pte;
+
+ if (pmap == NULL)
+ return;
+
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, va);
+
+ if (wired && !pmap_pte_w(pte))
+ pmap->pm_stats.wired_count++;
+ else if (!wired && pmap_pte_w(pte))
+ pmap->pm_stats.wired_count--;
+
+ /*
+ * Wiring is not a hardware characteristic so there is no need to
+ * invalidate TLB.
+ */
+ pmap_pte_set_w(pte, wired);
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
+ vm_size_t len, vm_offset_t src_addr)
+{
+}
+
+/*
+ * pmap_zero_page zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents.
+ */
+void
+pmap_zero_page(vm_page_t m)
+{
+ vm_offset_t va;
+ vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
+
+#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
+ if (need_wired_tlb_page_pool) {
+ struct fpage *fp1;
+ struct sysmaps *sysmaps;
+
+ sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
+ mtx_lock(&sysmaps->lock);
+ sched_pin();
+
+ fp1 = &sysmaps->fp[PMAP_FPAGE1];
+ va = pmap_map_fpage(phys, fp1, FALSE);
+ bzero((caddr_t)va, PAGE_SIZE);
+ pmap_unmap_fpage(phys, fp1);
+ sched_unpin();
+ mtx_unlock(&sysmaps->lock);
+ /*
+ * Should you do cache flush?
+ */
+ } else
+#endif
+ if (phys < MIPS_KSEG0_LARGEST_PHYS) {
+
+ va = MIPS_PHYS_TO_CACHED(phys);
+
+ bzero((caddr_t)va, PAGE_SIZE);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ PMAP_LGMEM_LOCK(sysm);
+ sched_pin();
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ sysm->valid1 = 1;
+ bzero(sysm->CADDR1, PAGE_SIZE);
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ sched_unpin();
+ PMAP_LGMEM_UNLOCK(sysm);
+ }
+
+}
+
+/*
+ * pmap_zero_page_area zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents.
+ *
+ * off and size may not cover an area beyond a single hardware page.
+ */
+void
+pmap_zero_page_area(vm_page_t m, int off, int size)
+{
+ vm_offset_t va;
+ vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
+
+#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
+ if (need_wired_tlb_page_pool) {
+ struct fpage *fp1;
+ struct sysmaps *sysmaps;
+
+ sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
+ mtx_lock(&sysmaps->lock);
+ sched_pin();
+
+ fp1 = &sysmaps->fp[PMAP_FPAGE1];
+ va = pmap_map_fpage(phys, fp1, FALSE);
+ bzero((caddr_t)va + off, size);
+ pmap_unmap_fpage(phys, fp1);
+
+ sched_unpin();
+ mtx_unlock(&sysmaps->lock);
+ } else
+#endif
+ if (phys < MIPS_KSEG0_LARGEST_PHYS) {
+ va = MIPS_PHYS_TO_CACHED(phys);
+ bzero((char *)(caddr_t)va + off, size);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ PMAP_LGMEM_LOCK(sysm);
+ sched_pin();
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ sysm->valid1 = 1;
+ bzero((char *)sysm->CADDR1 + off, size);
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ sched_unpin();
+ PMAP_LGMEM_UNLOCK(sysm);
+ }
+}
+
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+ vm_offset_t va;
+ vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
+
+#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
+ if (need_wired_tlb_page_pool) {
+ sched_pin();
+ va = pmap_map_fpage(phys, &fpages_shared[PMAP_FPAGE3], FALSE);
+ bzero((caddr_t)va, PAGE_SIZE);
+ pmap_unmap_fpage(phys, &fpages_shared[PMAP_FPAGE3]);
+ sched_unpin();
+ } else
+#endif
+ if (phys < MIPS_KSEG0_LARGEST_PHYS) {
+ va = MIPS_PHYS_TO_CACHED(phys);
+ bzero((caddr_t)va, PAGE_SIZE);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ PMAP_LGMEM_LOCK(sysm);
+ sched_pin();
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ sysm->valid1 = 1;
+ bzero(sysm->CADDR1, PAGE_SIZE);
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ sched_unpin();
+ PMAP_LGMEM_UNLOCK(sysm);
+ }
+
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bcopy to copy the page, one machine dependent page at a
+ * time.
+ */
+void
+pmap_copy_page(vm_page_t src, vm_page_t dst)
+{
+ vm_offset_t va_src, va_dst;
+ vm_paddr_t phy_src = VM_PAGE_TO_PHYS(src);
+ vm_paddr_t phy_dst = VM_PAGE_TO_PHYS(dst);
+
+
+#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
+ if (need_wired_tlb_page_pool) {
+ struct fpage *fp1, *fp2;
+ struct sysmaps *sysmaps;
+
+ sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
+ mtx_lock(&sysmaps->lock);
+ sched_pin();
+
+ fp1 = &sysmaps->fp[PMAP_FPAGE1];
+ fp2 = &sysmaps->fp[PMAP_FPAGE2];
+
+ va_src = pmap_map_fpage(phy_src, fp1, FALSE);
+ va_dst = pmap_map_fpage(phy_dst, fp2, FALSE);
+
+ bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
+
+ pmap_unmap_fpage(phy_src, fp1);
+ pmap_unmap_fpage(phy_dst, fp2);
+ sched_unpin();
+ mtx_unlock(&sysmaps->lock);
+
+ /*
+ * Should you flush the cache?
+ */
+ } else
+#endif
+ {
+ if ((phy_src < MIPS_KSEG0_LARGEST_PHYS) && (phy_dst < MIPS_KSEG0_LARGEST_PHYS)) {
+ /* easy case, all can be accessed via KSEG0 */
+ va_src = MIPS_PHYS_TO_CACHED(phy_src);
+ va_dst = MIPS_PHYS_TO_CACHED(phy_dst);
+ bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ PMAP_LGMEM_LOCK(sysm);
+ sched_pin();
+ if (phy_src < MIPS_KSEG0_LARGEST_PHYS) {
+ /* one side needs mapping - dest */
+ va_src = MIPS_PHYS_TO_CACHED(phy_src);
+ sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
+ sysm->valid2 = 2;
+ va_dst = (vm_offset_t)sysm->CADDR2;
+ } else if (phy_dst < MIPS_KSEG0_LARGEST_PHYS) {
+ /* one side needs mapping - src */
+ va_dst = MIPS_PHYS_TO_CACHED(phy_dst);
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ va_src = (vm_offset_t)sysm->CADDR1;
+ sysm->valid1 = 1;
+ } else {
+ /* all need mapping */
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
+ sysm->valid1 = sysm->valid2 = 1;
+ va_src = (vm_offset_t)sysm->CADDR1;
+ va_dst = (vm_offset_t)sysm->CADDR2;
+ }
+ bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
+ if (sysm->valid1) {
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ }
+ if (sysm->valid2) {
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR2);
+ sysm->CMAP2 = 0;
+ sysm->valid2 = 0;
+ }
+ sched_unpin();
+ PMAP_LGMEM_UNLOCK(sysm);
+ }
+ }
+}
+
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page. This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
+boolean_t
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+{
+ pv_entry_t pv;
+ int loops = 0;
+
+ if (m->flags & PG_FICTITIOUS)
+ return FALSE;
+
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+
+ /*
+ * Not found, check current mappings returning immediately if found.
+ */
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ if (pv->pv_pmap == pmap) {
+ PMAP_UNLOCK(pmap);
+ vm_page_unlock_queues();
+ return TRUE;
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ PMAP_UNLOCK(pmap);
+ vm_page_unlock_queues();
+ return (FALSE);
+}
+
+#define PMAP_REMOVE_PAGES_CURPROC_ONLY
+/*
+ * Remove all pages from specified address space
+ * this aids process exit speeds. Also, this code
+ * is special cased for current process only, but
+ * can have the more generic (and slightly slower)
+ * mode enabled. This is much faster than pmap_remove
+ * in the case of running down an entire address space.
+ */
+void
+pmap_remove_pages(pmap_t pmap)
+{
+ pt_entry_t *pte, tpte;
+ pv_entry_t pv, npv;
+ vm_page_t m;
+
+#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
+ if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
+ printf("warning: pmap_remove_pages called with non-current pmap\n");
+ return;
+ }
+#endif
+
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+ sched_pin();
+ //XXX need to be TAILQ_FOREACH_SAFE ?
+ for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
+ pv;
+ pv = npv) {
+
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ if (!pmap_pte_v(pte))
+ panic("pmap_remove_pages: page on pm_pvlist has no pte\n");
+ tpte = *pte;
+
+/*
+ * We cannot remove wired pages from a process' mapping at this time
+ */
+ if (tpte & PTE_W) {
+ npv = TAILQ_NEXT(pv, pv_plist);
+ continue;
+ }
+ *pte = is_kernel_pmap(pmap) ? PTE_G : 0;
+
+ m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(tpte));
+
+ KASSERT(m < &vm_page_array[vm_page_array_size],
+ ("pmap_remove_pages: bad tpte %lx", tpte));
+
+ pv->pv_pmap->pm_stats.resident_count--;
+
+ /*
+ * Update the vm_page_t clean and reference bits.
+ */
+ if (tpte & PTE_M) {
+ vm_page_dirty(m);
+ }
+ npv = TAILQ_NEXT(pv, pv_plist);
+ TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
+
+ m->md.pv_list_count--;
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
+ vm_page_flag_clear(m, PG_WRITEABLE);
+ }
+ pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
+ free_pv_entry(pv);
+ }
+ sched_unpin();
+ pmap_invalidate_all(pmap);
+ PMAP_UNLOCK(pmap);
+ vm_page_unlock_queues();
+}
+
+/*
+ * pmap_testbit tests bits in pte's
+ * note that the testbit/changebit routines are inline,
+ * and a lot of things compile-time evaluate.
+ */
+static boolean_t
+pmap_testbit(vm_page_t m, int bit)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ boolean_t rv = FALSE;
+
+ if (m->flags & PG_FICTITIOUS)
+ return rv;
+
+ if (TAILQ_FIRST(&m->md.pv_list) == NULL)
+ return rv;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+
+ if (bit & PTE_M) {
+ if (!pmap_track_modified(pv->pv_va))
+ continue;
+ }
+#if defined(PMAP_DIAGNOSTIC)
+ if (!pv->pv_pmap) {
+ printf("Null pmap (tb) at va: 0x%x\n", pv->pv_va);
+ continue;
+ }
+#endif
+ PMAP_LOCK(pv->pv_pmap);
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ rv = (*pte & bit) != 0;
+ PMAP_UNLOCK(pv->pv_pmap);
+ if (rv)
+ break;
+ }
+ return (rv);
+}
+
+/*
+ * this routine is used to modify bits in ptes
+ */
+static __inline void
+pmap_changebit(vm_page_t m, int bit, boolean_t setem)
+{
+ register pv_entry_t pv;
+ register pt_entry_t *pte;
+
+ if (m->flags & PG_FICTITIOUS)
+ return;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ /*
+ * Loop over all current mappings setting/clearing as appropos If
+ * setting RO do we need to clear the VAC?
+ */
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+
+ /*
+ * don't write protect pager mappings
+ */
+ if (!setem && (bit == PTE_RW)) {
+ if (!pmap_track_modified(pv->pv_va))
+ continue;
+ }
+#if defined(PMAP_DIAGNOSTIC)
+ if (!pv->pv_pmap) {
+ printf("Null pmap (cb) at va: 0x%x\n", pv->pv_va);
+ continue;
+ }
+#endif
+
+ PMAP_LOCK(pv->pv_pmap);
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+
+ if (setem) {
+ *(int *)pte |= bit;
+ pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
+ } else {
+ vm_offset_t pbits = *(vm_offset_t *)pte;
+
+ if (pbits & bit) {
+ if (bit == PTE_RW) {
+ if (pbits & PTE_M) {
+ vm_page_dirty(m);
+ }
+ *(int *)pte = (pbits & ~(PTE_M | PTE_RW)) |
+ PTE_RO;
+ } else {
+ *(int *)pte = pbits & ~bit;
+ }
+ pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
+ }
+ }
+ PMAP_UNLOCK(pv->pv_pmap);
+ }
+ if (!setem && bit == PTE_RW)
+ vm_page_flag_clear(m, PG_WRITEABLE);
+}
+
+/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ pv_entry_t pv;
+ int count;
+
+ count = 0;
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
+ if (pv->pv_wired)
+ count++;
+ return (count);
+}
+
+/*
+ * Clear the write and modified bits in each of the given page's mappings.
+ */
+void
+pmap_remove_write(vm_page_t m)
+{
+ pv_entry_t pv, npv;
+ vm_offset_t va;
+ pt_entry_t *pte;
+
+ if ((m->flags & PG_WRITEABLE) == 0)
+ return;
+
+ /*
+ * Loop over all current mappings setting/clearing as appropos.
+ */
+ for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) {
+ npv = TAILQ_NEXT(pv, pv_plist);
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+
+ if ((pte == NULL) || !mips_pg_v(*pte))
+ panic("page on pm_pvlist has no pte\n");
+
+ va = pv->pv_va;
+ pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
+ VM_PROT_READ | VM_PROT_EXECUTE);
+ }
+ vm_page_flag_clear(m, PG_WRITEABLE);
+}
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return the count of reference bits for a page, clearing all of them.
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+ if (m->flags & PG_FICTITIOUS)
+ return (0);
+
+ if (m->md.pv_flags & PV_TABLE_REF) {
+ m->md.pv_flags &= ~PV_TABLE_REF;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page was modified
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+ if (m->flags & PG_FICTITIOUS)
+ return FALSE;
+
+ if (m->md.pv_flags & PV_TABLE_MOD)
+ return TRUE;
+ else
+ return pmap_testbit(m, PTE_M);
+}
+
+/* N/C */
+
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *pte;
+ boolean_t rv;
+
+ rv = FALSE;
+ PMAP_LOCK(pmap);
+ if (*pmap_pde(pmap, addr)) {
+ pte = pmap_pte(pmap, addr);
+ rv = (*pte == 0);
+ }
+ PMAP_UNLOCK(pmap);
+ return (rv);
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(vm_page_t m)
+{
+ if (m->flags & PG_FICTITIOUS)
+ return;
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ if (m->md.pv_flags & PV_TABLE_MOD) {
+ pmap_changebit(m, PTE_M, FALSE);
+ m->md.pv_flags &= ~PV_TABLE_MOD;
+ }
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+void
+pmap_clear_reference(vm_page_t m)
+{
+ if (m->flags & PG_FICTITIOUS)
+ return;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ if (m->md.pv_flags & PV_TABLE_REF) {
+ m->md.pv_flags &= ~PV_TABLE_REF;
+ }
+}
+
+/*
+ * Miscellaneous support routines follow
+ */
+
+/*
+ * Map a set of physical memory pages into the kernel virtual
+ * address space. Return a pointer to where it is mapped. This
+ * routine is intended to be used for mapping device memory,
+ * NOT real memory.
+ */
+
+/*
+ * Map a set of physical memory pages into the kernel virtual
+ * address space. Return a pointer to where it is mapped. This
+ * routine is intended to be used for mapping device memory,
+ * NOT real memory.
+ */
+void *
+pmap_mapdev(vm_offset_t pa, vm_size_t size)
+{
+ vm_offset_t va, tmpva, offset;
+
+ /*
+ * KSEG1 maps only first 512M of phys address space. For
+ * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
+ */
+ if (pa + size < MIPS_KSEG0_LARGEST_PHYS)
+ return (void *)MIPS_PHYS_TO_KSEG1(pa);
+ else {
+ offset = pa & PAGE_MASK;
+ size = roundup(size, PAGE_SIZE);
+
+ va = kmem_alloc_nofault(kernel_map, size);
+ if (!va)
+ panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
+ for (tmpva = va; size > 0;) {
+ pmap_kenter(tmpva, pa);
+ size -= PAGE_SIZE;
+ tmpva += PAGE_SIZE;
+ pa += PAGE_SIZE;
+ }
+ }
+
+ return ((void *)(va + offset));
+}
+
+void
+pmap_unmapdev(vm_offset_t va, vm_size_t size)
+{
+}
+
+/*
+ * perform the pmap work for mincore
+ */
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr)
+{
+
+ pt_entry_t *ptep, pte;
+ vm_page_t m;
+ int val = 0;
+
+ PMAP_LOCK(pmap);
+ ptep = pmap_pte(pmap, addr);
+ pte = (ptep != NULL) ? *ptep : 0;
+ PMAP_UNLOCK(pmap);
+
+ if (mips_pg_v(pte)) {
+ vm_offset_t pa;
+
+ val = MINCORE_INCORE;
+ pa = mips_tlbpfn_to_paddr(pte);
+ if (!page_is_managed(pa))
+ return val;
+
+ m = PHYS_TO_VM_PAGE(pa);
+
+ /*
+ * Modified by us
+ */
+ if (pte & PTE_M)
+ val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
+ /*
+ * Modified by someone
+ */
+ else {
+ vm_page_lock_queues();
+ if (m->dirty || pmap_is_modified(m))
+ val |= MINCORE_MODIFIED_OTHER;
+ vm_page_unlock_queues();
+ }
+ /*
+ * Referenced by us or someone
+ */
+ vm_page_lock_queues();
+ if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
+ val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
+ vm_page_flag_set(m, PG_REFERENCED);
+ }
+ vm_page_unlock_queues();
+ }
+ return val;
+}
+
+void
+pmap_activate(struct thread *td)
+{
+ pmap_t pmap, oldpmap;
+ struct proc *p = td->td_proc;
+
+ critical_enter();
+
+ pmap = vmspace_pmap(p->p_vmspace);
+ oldpmap = PCPU_GET(curpmap);
+
+ if (oldpmap)
+ atomic_clear_32(&oldpmap->pm_active, PCPU_GET(cpumask));
+ atomic_set_32(&pmap->pm_active, PCPU_GET(cpumask));
+ pmap_asid_alloc(pmap);
+ if (td == curthread) {
+ PCPU_SET(segbase, pmap->pm_segtab);
+ MachSetPID(pmap->pm_asid[PCPU_GET(cpuid)].asid);
+ }
+ PCPU_SET(curpmap, pmap);
+ critical_exit();
+}
+
+/* TBD */
+
+vm_offset_t
+pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
+{
+
+ if ((obj == NULL) || (size < NBSEG) || (obj->type != OBJT_DEVICE)) {
+ return addr;
+ }
+ addr = (addr + (NBSEG - 1)) & ~(NBSEG - 1);
+ return addr;
+}
+
+int pmap_pid_dump(int pid);
+
+int
+pmap_pid_dump(int pid)
+{
+ pmap_t pmap;
+ struct proc *p;
+ int npte = 0;
+ int index;
+
+ sx_slock(&allproc_lock);
+ LIST_FOREACH(p, &allproc, p_list) {
+ if (p->p_pid != pid)
+ continue;
+
+ if (p->p_vmspace) {
+ int i, j;
+
+ printf("vmspace is %p\n",
+ p->p_vmspace);
+ index = 0;
+ pmap = vmspace_pmap(p->p_vmspace);
+ printf("pmap asid:%x generation:%x\n",
+ pmap->pm_asid[0].asid,
+ pmap->pm_asid[0].gen);
+ for (i = 0; i < NUSERPGTBLS; i++) {
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ unsigned base = i << SEGSHIFT;
+
+ pde = &pmap->pm_segtab[i];
+ if (pde && pmap_pde_v(pde)) {
+ for (j = 0; j < 1024; j++) {
+ unsigned va = base +
+ (j << PAGE_SHIFT);
+
+ pte = pmap_pte(pmap, va);
+ if (pte && pmap_pte_v(pte)) {
+ vm_offset_t pa;
+ vm_page_t m;
+
+ pa = mips_tlbpfn_to_paddr(*pte);
+ m = PHYS_TO_VM_PAGE(pa);
+ printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
+ va, pa,
+ m->hold_count,
+ m->wire_count,
+ m->flags);
+ npte++;
+ index++;
+ if (index >= 2) {
+ index = 0;
+ printf("\n");
+ } else {
+ printf(" ");
+ }
+ }
+ }
+ }
+ }
+ } else {
+ printf("Process pid:%d has no vm_space\n", pid);
+ }
+ break;
+ }
+ sx_sunlock(&allproc_lock);
+ return npte;
+}
+
+
+#if defined(DEBUG)
+
+static void pads(pmap_t pm);
+void pmap_pvdump(vm_offset_t pa);
+
+/* print address space of pmap*/
+static void
+pads(pmap_t pm)
+{
+ unsigned va, i, j;
+ pt_entry_t *ptep;
+
+ if (pm == kernel_pmap)
+ return;
+ for (i = 0; i < NPTEPG; i++)
+ if (pm->pm_segtab[i])
+ for (j = 0; j < NPTEPG; j++) {
+ va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
+ if (pm == kernel_pmap && va < KERNBASE)
+ continue;
+ if (pm != kernel_pmap &&
+ va >= VM_MAXUSER_ADDRESS)
+ continue;
+ ptep = pmap_pte(pm, va);
+ if (pmap_pte_v(ptep))
+ printf("%x:%x ", va, *(int *)ptep);
+ }
+
+}
+
+void
+pmap_pvdump(vm_offset_t pa)
+{
+ register pv_entry_t pv;
+ vm_page_t m;
+
+ printf("pa %x", pa);
+ m = PHYS_TO_VM_PAGE(pa);
+ for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
+ pads(pv->pv_pmap);
+ }
+ printf(" ");
+}
+
+/* N/C */
+#endif
+
+
+/*
+ * Allocate TLB address space tag (called ASID or TLBPID) and return it.
+ * It takes almost as much or more time to search the TLB for a
+ * specific ASID and flush those entries as it does to flush the entire TLB.
+ * Therefore, when we allocate a new ASID, we just take the next number. When
+ * we run out of numbers, we flush the TLB, increment the generation count
+ * and start over. ASID zero is reserved for kernel use.
+ */
+static void
+pmap_asid_alloc(pmap)
+ pmap_t pmap;
+{
+ if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
+ pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
+ else {
+ if (PCPU_GET(next_asid) == pmap_max_asid) {
+ MIPS_TBIAP();
+ PCPU_SET(asid_generation,
+ (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
+ if (PCPU_GET(asid_generation) == 0) {
+ PCPU_SET(asid_generation, 1);
+ }
+ PCPU_SET(next_asid, 1); /* 0 means invalid */
+ }
+ pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
+ pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
+ PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
+ }
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW | PDB_TLBPID)) {
+ if (curproc)
+ printf("pmap_asid_alloc: curproc %d '%s' ",
+ curproc->p_pid, curproc->p_comm);
+ else
+ printf("pmap_asid_alloc: curproc <none> ");
+ printf("segtab %p asid %d\n", pmap->pm_segtab,
+ pmap->pm_asid[PCPU_GET(cpuid)].asid);
+ }
+#endif
+}
+
+int
+page_is_managed(vm_offset_t pa)
+{
+ vm_offset_t pgnum = mips_btop(pa);
+
+ if (pgnum >= first_page && (pgnum < (first_page + vm_page_array_size))) {
+ vm_page_t m;
+
+ m = PHYS_TO_VM_PAGE(pa);
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
+{
+ int rw = 0;
+
+ if (!(prot & VM_PROT_WRITE))
+ rw = PTE_ROPAGE;
+ else {
+ if (va >= VM_MIN_KERNEL_ADDRESS) {
+ /*
+ * Don't bother to trap on kernel writes, just
+ * record page as dirty.
+ */
+ rw = PTE_RWPAGE;
+ vm_page_dirty(m);
+ } else if ((m->md.pv_flags & PV_TABLE_MOD) || m->dirty)
+ rw = PTE_RWPAGE;
+ else
+ rw = PTE_CWPAGE;
+ }
+ return rw;
+}
+
+/*
+ * pmap_page_is_free:
+ *
+ * Called when a page is freed to allow pmap to clean up
+ * any extra state associated with the page. In this case
+ * clear modified/referenced bits.
+ */
+void
+pmap_page_is_free(vm_page_t m)
+{
+
+ m->md.pv_flags = 0;
+}
+
+/*
+ * pmap_set_modified:
+ *
+ * Sets the page modified and reference bits for the specified page.
+ */
+void
+pmap_set_modified(vm_offset_t pa)
+{
+
+ PHYS_TO_VM_PAGE(pa)->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD);
+}
+
+#include <machine/db_machdep.h>
+
+/*
+ * Dump the translation buffer (TLB) in readable form.
+ */
+
+void
+db_dump_tlb(int first, int last)
+{
+ struct tlb tlb;
+ int tlbno;
+
+ tlbno = first;
+
+ while (tlbno <= last) {
+ MachTLBRead(tlbno, &tlb);
+ if (tlb.tlb_lo0 & PTE_V || tlb.tlb_lo1 & PTE_V) {
+ printf("TLB %2d vad 0x%08x ", tlbno, (tlb.tlb_hi & 0xffffff00));
+ } else {
+ printf("TLB*%2d vad 0x%08x ", tlbno, (tlb.tlb_hi & 0xffffff00));
+ }
+ printf("0=0x%08x ", pfn_to_vad(tlb.tlb_lo0));
+ printf("%c", tlb.tlb_lo0 & PTE_M ? 'M' : ' ');
+ printf("%c", tlb.tlb_lo0 & PTE_G ? 'G' : ' ');
+ printf(" atr %x ", (tlb.tlb_lo0 >> 3) & 7);
+ printf("1=0x%08x ", pfn_to_vad(tlb.tlb_lo1));
+ printf("%c", tlb.tlb_lo1 & PTE_M ? 'M' : ' ');
+ printf("%c", tlb.tlb_lo1 & PTE_G ? 'G' : ' ');
+ printf(" atr %x ", (tlb.tlb_lo1 >> 3) & 7);
+ printf(" sz=%x pid=%x\n", tlb.tlb_mask,
+ (tlb.tlb_hi & 0x000000ff)
+ );
+ tlbno++;
+ }
+}
+
+#ifdef DDB
+#include <sys/kernel.h>
+#include <ddb/ddb.h>
+
+DB_SHOW_COMMAND(tlb, ddb_dump_tlb)
+{
+ db_dump_tlb(0, num_tlbentries - 1);
+}
+
+#endif
+
+/*
+ * Routine: pmap_kextract
+ * Function:
+ * Extract the physical page address associated
+ * virtual address.
+ */
+ /* PMAP_INLINE */ vm_offset_t
+pmap_kextract(vm_offset_t va)
+{
+ vm_offset_t pa = 0;
+
+ if (va < MIPS_CACHED_MEMORY_ADDR) {
+ /* user virtual address */
+ pt_entry_t *ptep;
+
+ if (curproc && curproc->p_vmspace) {
+ ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
+ if (ptep)
+ pa = mips_tlbpfn_to_paddr(*ptep) |
+ (va & PAGE_MASK);
+ }
+ } else if (va >= MIPS_CACHED_MEMORY_ADDR &&
+ va < MIPS_UNCACHED_MEMORY_ADDR)
+ pa = MIPS_CACHED_TO_PHYS(va);
+ else if (va >= MIPS_UNCACHED_MEMORY_ADDR &&
+ va < MIPS_KSEG2_START)
+ pa = MIPS_UNCACHED_TO_PHYS(va);
+#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
+ else if (need_wired_tlb_page_pool && ((va >= VM_MIN_KERNEL_ADDRESS) &&
+ (va < (VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET))))
+ pa = MIPS_CACHED_TO_PHYS(va);
+#endif
+ else if (va >= MIPS_KSEG2_START && va < VM_MAX_KERNEL_ADDRESS) {
+ pt_entry_t *ptep;
+
+ if (kernel_pmap) {
+ if (va >= (vm_offset_t)virtual_sys_start) {
+ /* Its inside the virtual address range */
+ ptep = pmap_pte(kernel_pmap, va);
+ if (ptep)
+ pa = mips_tlbpfn_to_paddr(*ptep) |
+ (va & PAGE_MASK);
+ } else {
+ int i;
+
+ /*
+ * its inside the special mapping area, I
+ * don't think this should happen, but if it
+ * does I want it toa all work right :-)
+ * Note if it does happen, we assume the
+ * caller has the lock? FIXME, this needs to
+ * be checked FIXEM - RRS.
+ */
+ for (i = 0; i < MAXCPU; i++) {
+ if ((sysmap_lmem[i].valid1) && ((vm_offset_t)sysmap_lmem[i].CADDR1 == va)) {
+ pa = mips_tlbpfn_to_paddr(sysmap_lmem[i].CMAP1);
+ break;
+ }
+ if ((sysmap_lmem[i].valid2) && ((vm_offset_t)sysmap_lmem[i].CADDR2 == va)) {
+ pa = mips_tlbpfn_to_paddr(sysmap_lmem[i].CMAP2);
+ break;
+ }
+ }
+ }
+ }
+ }
+ return pa;
+}
diff --git a/sys/mips/mips/psraccess.S b/sys/mips/mips/psraccess.S
new file mode 100644
index 0000000..003c1d5
--- /dev/null
+++ b/sys/mips/mips/psraccess.S
@@ -0,0 +1,196 @@
+/* $OpenBSD$ */
+/*
+ * Copyright (c) 2001 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * JNPR: psraccess.S,v 1.4.2.1 2007/09/10 10:36:50 girish
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * Low level code to manage processor specific registers.
+ */
+
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+
+#include "assym.s"
+
+/*
+ * FREEBSD_DEVELOPERS_FIXME
+ * Some MIPS CPU may need delays using nops between executing CP0 Instructions
+ */
+#define MIPS_CPU_NOP_DELAY nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+ .set noreorder # Noreorder is default style!
+
+/*
+ * Set/clear software interrupt.
+ */
+
+LEAF(setsoftintr0)
+ mfc0 v0, COP_0_CAUSE_REG # read cause register
+ nop
+ or v0, v0, SOFT_INT_MASK_0 # set soft clock interrupt
+ mtc0 v0, COP_0_CAUSE_REG # save it
+ j ra
+ nop
+END(setsoftintr0)
+
+LEAF(clearsoftintr0)
+ mfc0 v0, COP_0_CAUSE_REG # read cause register
+ nop
+ and v0, v0, ~SOFT_INT_MASK_0 # clear soft clock interrupt
+ mtc0 v0, COP_0_CAUSE_REG # save it
+ j ra
+ nop
+END(clearsoftintr0)
+
+LEAF(setsoftintr1)
+ mfc0 v0, COP_0_CAUSE_REG # read cause register
+ nop
+ or v0, v0, SOFT_INT_MASK_1 # set soft net interrupt
+ mtc0 v0, COP_0_CAUSE_REG # save it
+ j ra
+ nop
+END(setsoftintr1)
+
+LEAF(clearsoftintr1)
+ mfc0 v0, COP_0_CAUSE_REG # read cause register
+ nop
+ and v0, v0, ~SOFT_INT_MASK_1 # clear soft net interrupt
+ mtc0 v0, COP_0_CAUSE_REG # save it
+ j ra
+ nop
+END(clearsoftintr1)
+
+/*
+ * Set/change interrupt priority routines.
+ * These routines return the previous state.
+ */
+LEAF(restoreintr)
+ mfc0 t0,COP_0_STATUS_REG
+ and t1,t0,SR_INT_ENAB
+ beq a0,t1,1f
+ xor t0,SR_INT_ENAB
+
+ .set noreorder
+
+ mtc0 t0,COP_0_STATUS_REG
+ nop
+ nop
+ nop
+ nop
+1:
+ j ra
+ nop
+END(restoreintr)
+
+/*
+ * Set/change interrupt priority routines.
+ * These routines return the previous state.
+ */
+
+LEAF(enableintr)
+#ifdef TARGET_OCTEON
+ .set mips64r2
+ ei v0
+ and v0, SR_INT_ENAB # return old interrupt enable bit
+ .set mips0
+#else
+ mfc0 v0, COP_0_STATUS_REG # read status register
+ nop
+ or v1, v0, SR_INT_ENAB
+ mtc0 v1, COP_0_STATUS_REG # enable all interrupts
+ and v0, SR_INT_ENAB # return old interrupt enable
+#endif
+ j ra
+ nop
+END(enableintr)
+
+
+LEAF(disableintr)
+#ifdef TARGET_OCTEON
+ .set mips64r2
+ di v0
+ and v0, SR_INT_ENAB # return old interrupt enable bit
+ .set mips0
+#else
+ mfc0 v0, COP_0_STATUS_REG # read status register
+ nop
+ and v1, v0, ~SR_INT_ENAB
+ mtc0 v1, COP_0_STATUS_REG # disable all interrupts
+ MIPS_CPU_NOP_DELAY
+ and v0, SR_INT_ENAB # return old interrupt enable
+#endif
+ j ra
+ nop
+END(disableintr)
+
+LEAF(set_intr_mask)
+ li t0, SR_INT_MASK # 1 means masked so invert.
+ not a0, a0 # 1 means masked so invert.
+ and a0, t0 # 1 means masked so invert.
+ mfc0 v0, COP_0_STATUS_REG
+ li v1, ~SR_INT_MASK
+ and v1, v0
+ or v1, a0
+ mtc0 v1, COP_0_STATUS_REG
+ MIPS_CPU_NOP_DELAY
+ move v0, v1
+ jr ra
+ nop
+
+END(set_intr_mask)
+
+LEAF(get_intr_mask)
+ li a0, 0
+ mfc0 v0, COP_0_STATUS_REG
+ li v1, SR_INT_MASK
+ and v0, v1
+ or v0, a0
+ jr ra
+ nop
+
+END(get_intr_mask)
+
+/*
+ * u_int32_t mips_cp0_config1_read(void)
+ *
+ * Return the current value of the CP0 Config (Select 1) register.
+ */
+LEAF(mips_cp0_config1_read)
+ .set push
+ .set mips32
+ mfc0 v0, COP_0_CONFIG, 1
+ j ra
+ nop
+ .set pop
+END(mips_cp0_config1_read)
diff --git a/sys/mips/mips/stack_machdep.c b/sys/mips/mips/stack_machdep.c
new file mode 100644
index 0000000..85ee3ba
--- /dev/null
+++ b/sys/mips/mips/stack_machdep.c
@@ -0,0 +1,153 @@
+/*-
+ * Copyright (c) 2005 Antoine Brodin
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/stack.h>
+
+#include <machine/mips_opcode.h>
+
+#include <machine/param.h>
+#include <machine/pcb.h>
+#include <machine/regnum.h>
+
+static u_register_t
+stack_register_fetch(u_register_t sp, u_register_t stack_pos)
+{
+ u_register_t * stack =
+ ((u_register_t *)sp + stack_pos/sizeof(u_register_t));
+
+ return *stack;
+}
+
+static void
+stack_capture(struct stack *st, u_register_t pc, u_register_t sp)
+{
+ u_register_t ra = 0, i, stacksize;
+ short ra_stack_pos = 0;
+ InstFmt insn;
+
+ stack_zero(st);
+
+ for (;;) {
+ stacksize = 0;
+ if (pc <= (u_register_t)btext)
+ break;
+ for (i = pc; i >= (u_register_t)btext; i -= sizeof (insn)) {
+ bcopy((void *)i, &insn, sizeof insn);
+ switch (insn.IType.op) {
+ case OP_ADDI:
+ case OP_ADDIU:
+ if (insn.IType.rs != SP || insn.IType.rt != SP)
+ break;
+ stacksize = -(short)insn.IType.imm;
+ break;
+
+ case OP_SW:
+ if (insn.IType.rs != SP || insn.IType.rt != RA)
+ break;
+ ra_stack_pos = (short)insn.IType.imm;
+ break;
+ default:
+ break;
+ }
+
+ if (stacksize)
+ break;
+ }
+
+ if (stack_put(st, pc) == -1)
+ break;
+
+ for (i = pc; !ra; i += sizeof (insn)) {
+ bcopy((void *)i, &insn, sizeof insn);
+
+ switch (insn.IType.op) {
+ case OP_SPECIAL:
+ if((insn.RType.func == OP_JR))
+ {
+ if (ra >= (u_register_t)btext)
+ break;
+ if (insn.RType.rs != RA)
+ break;
+ ra = stack_register_fetch(sp,
+ ra_stack_pos);
+ if (!ra)
+ goto done;
+ ra -= 8;
+ }
+ break;
+ default:
+ break;
+ }
+ /* eret */
+ if (insn.word == 0x42000018)
+ goto done;
+ }
+
+ if (pc == ra && stacksize == 0)
+ break;
+
+ sp += stacksize;
+ pc = ra;
+ ra = 0;
+ }
+done:
+ return;
+}
+
+void
+stack_save_td(struct stack *st, struct thread *td)
+{
+ u_register_t pc, sp;
+
+ if (TD_IS_SWAPPED(td))
+ panic("stack_save_td: swapped");
+ if (TD_IS_RUNNING(td))
+ panic("stack_save_td: running");
+
+ pc = td->td_pcb->pcb_regs.pc;
+ sp = td->td_pcb->pcb_regs.sp;
+ stack_capture(st, pc, sp);
+}
+
+void
+stack_save(struct stack *st)
+{
+ u_register_t pc, sp;
+
+ if (curthread == NULL)
+ panic("stack_save: curthread == NULL)");
+
+ pc = curthread->td_pcb->pcb_regs.pc;
+ sp = curthread->td_pcb->pcb_regs.sp;
+ stack_capture(st, pc, sp);
+}
diff --git a/sys/mips/mips/support.S b/sys/mips/mips/support.S
new file mode 100644
index 0000000..269042c
--- /dev/null
+++ b/sys/mips/mips/support.S
@@ -0,0 +1,1537 @@
+/* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Digital Equipment Corporation and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
+ * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
+ * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
+ * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
+ *
+ * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
+ * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
+ * $FreeBSD$
+ */
+
+/*
+ * Contains code that is the first executed at boot time plus
+ * assembly language support routines.
+ */
+
+#include "opt_ddb.h"
+#include <sys/errno.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+
+#include "assym.s"
+
+ .set noreorder # Noreorder is default style!
+
+/*
+ * Primitives
+ */
+
+/*
+ * This table is indexed by u.u_pcb.pcb_onfault in trap().
+ * The reason for using this table rather than storing an address in
+ * u.u_pcb.pcb_onfault is simply to make the code faster.
+ */
+ .globl onfault_table
+ .data
+ .align 3
+onfault_table:
+ .word 0 # invalid index number
+#define BADERR 1
+ .word baderr
+#define COPYERR 2
+ .word copyerr
+#define FSWBERR 3
+ .word fswberr
+#define FSWINTRBERR 4
+ .word fswintrberr
+#if defined(DDB) || defined(DEBUG)
+#define DDBERR 5
+ .word ddberr
+#else
+ .word 0
+#endif
+
+ .text
+
+/*
+ * See if access to addr with a len type instruction causes a machine check.
+ * len is length of access (1=byte, 2=short, 4=long)
+ *
+ * badaddr(addr, len)
+ * char *addr;
+ * int len;
+ */
+LEAF(badaddr)
+ li v0, BADERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ bne a1, 1, 2f
+ sw v0, U_PCB_ONFAULT(v1)
+ b 5f
+ lbu v0, (a0)
+2:
+ bne a1, 2, 4f
+ nop
+ b 5f
+ lhu v0, (a0)
+4:
+ lw v0, (a0)
+5:
+ sw zero, U_PCB_ONFAULT(v1)
+ j ra
+ move v0, zero # made it w/o errors
+baderr:
+ j ra
+ li v0, 1 # trap sends us here
+END(badaddr)
+
+/*
+ * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
+ * Copy a NIL-terminated string, at most maxlen characters long. Return the
+ * number of characters copied (including the NIL) in *lencopied. If the
+ * string is too long, return ENAMETOOLONG; else return 0.
+ */
+LEAF(copystr)
+ move t0, a2
+ beq a2, zero, 4f
+1:
+ lbu v0, 0(a0)
+ subu a2, a2, 1
+ beq v0, zero, 2f
+ sb v0, 0(a1) # each byte until NIL
+ addu a0, a0, 1
+ bne a2, zero, 1b # less than maxlen
+ addu a1, a1, 1
+4:
+ li v0, ENAMETOOLONG # run out of space
+2:
+ beq a3, zero, 3f # return num. of copied bytes
+ subu a2, t0, a2 # if the 4th arg was non-NULL
+ sw a2, 0(a3)
+3:
+ j ra # v0 is 0 or ENAMETOOLONG
+ nop
+END(copystr)
+
+
+/*
+ * fillw(pat, addr, count)
+ */
+LEAF(fillw)
+1:
+ addiu a2, a2, -1
+ sh a0, 0(a1)
+ bne a2,zero, 1b
+ addiu a1, a1, 2
+
+ jr ra
+ nop
+END(fillw)
+
+/*
+ * Optimized memory zero code.
+ * mem_zero_page(addr);
+ */
+LEAF(mem_zero_page)
+ li v0, NBPG
+1:
+ subu v0, 8
+ sd zero, 0(a0)
+ bne zero, v0, 1b
+ addu a0, 8
+ jr ra
+ nop
+END(mem_zero_page)
+
+/*
+ * Block I/O routines mainly used by I/O drivers.
+ *
+ * Args as: a0 = port
+ * a1 = memory address
+ * a2 = count
+ */
+LEAF(insb)
+ beq a2, zero, 2f
+ addu a2, a1
+1:
+ lbu v0, 0(a0)
+ addiu a1, 1
+ bne a1, a2, 1b
+ sb v0, -1(a1)
+2:
+ jr ra
+ nop
+END(insb)
+
+LEAF(insw)
+ beq a2, zero, 2f
+ addu a2, a2
+ addu a2, a1
+1:
+ lhu v0, 0(a0)
+ addiu a1, 2
+ bne a1, a2, 1b
+ sh v0, -2(a1)
+2:
+ jr ra
+ nop
+END(insw)
+
+LEAF(insl)
+ beq a2, zero, 2f
+ sll a2, 2
+ addu a2, a1
+1:
+ lw v0, 0(a0)
+ addiu a1, 4
+ bne a1, a2, 1b
+ sw v0, -4(a1)
+2:
+ jr ra
+ nop
+END(insl)
+
+LEAF(outsb)
+ beq a2, zero, 2f
+ addu a2, a1
+1:
+ lbu v0, 0(a1)
+ addiu a1, 1
+ bne a1, a2, 1b
+ sb v0, 0(a0)
+2:
+ jr ra
+ nop
+END(outsb)
+
+LEAF(outsw)
+ beq a2, zero, 2f
+ addu a2, a2
+ li v0, 1
+ and v0, a1
+ bne v0, zero, 3f # arghh, unaligned.
+ addu a2, a1
+1:
+ lhu v0, 0(a1)
+ addiu a1, 2
+ bne a1, a2, 1b
+ sh v0, 0(a0)
+2:
+ jr ra
+ nop
+3:
+ LWHI v0, 0(a1)
+ LWLO v0, 3(a1)
+ addiu a1, 2
+ bne a1, a2, 3b
+ sh v0, 0(a0)
+
+ jr ra
+ nop
+END(outsw)
+
+LEAF(outsl)
+ beq a2, zero, 2f
+ sll a2, 2
+ li v0, 3
+ and v0, a1
+ bne v0, zero, 3f # arghh, unaligned.
+ addu a2, a1
+1:
+ lw v0, 0(a1)
+ addiu a1, 4
+ bne a1, a2, 1b
+ sw v0, 0(a0)
+2:
+ jr ra
+ nop
+3:
+ LWHI v0, 0(a1)
+ LWLO v0, 3(a1)
+ addiu a1, 4
+ bne a1, a2, 3b
+ sw v0, 0(a0)
+
+ jr ra
+ nop
+END(outsl)
+
+/*
+ * Copy a null terminated string from the user address space into
+ * the kernel address space.
+ *
+ * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
+ * caddr_t fromaddr;
+ * caddr_t toaddr;
+ * u_int maxlength;
+ * u_int *lencopied;
+ */
+NON_LEAF(copyinstr, STAND_FRAME_SIZE, ra)
+ subu sp, sp, STAND_FRAME_SIZE
+ .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
+ sw ra, STAND_RA_OFFSET(sp)
+ blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
+ li v0, COPYERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ jal _C_LABEL(copystr)
+ sw v0, U_PCB_ONFAULT(v1)
+ lw ra, STAND_RA_OFFSET(sp)
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw zero, U_PCB_ONFAULT(v1)
+ j ra
+ addu sp, sp, STAND_FRAME_SIZE
+END(copyinstr)
+
+/*
+ * Copy a null terminated string from the kernel address space into
+ * the user address space.
+ *
+ * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
+ * caddr_t fromaddr;
+ * caddr_t toaddr;
+ * u_int maxlength;
+ * u_int *lencopied;
+ */
+NON_LEAF(copyoutstr, STAND_FRAME_SIZE, ra)
+ subu sp, sp, STAND_FRAME_SIZE
+ .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
+ sw ra, STAND_RA_OFFSET(sp)
+ blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
+ li v0, COPYERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ jal _C_LABEL(copystr)
+ sw v0, U_PCB_ONFAULT(v1)
+ lw ra, STAND_RA_OFFSET(sp)
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw zero, U_PCB_ONFAULT(v1)
+ j ra
+ addu sp, sp, STAND_FRAME_SIZE
+END(copyoutstr)
+
+/*
+ * Copy specified amount of data from user space into the kernel
+ * copyin(from, to, len)
+ * caddr_t *from; (user source address)
+ * caddr_t *to; (kernel destination address)
+ * unsigned len;
+ */
+NON_LEAF(copyin, STAND_FRAME_SIZE, ra)
+ subu sp, sp, STAND_FRAME_SIZE
+ .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
+ sw ra, STAND_RA_OFFSET(sp)
+ blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
+ li v0, COPYERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ jal _C_LABEL(bcopy)
+ sw v0, U_PCB_ONFAULT(v1)
+ lw ra, STAND_RA_OFFSET(sp)
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1) # bcopy modified v1, so reload
+ sw zero, U_PCB_ONFAULT(v1)
+ addu sp, sp, STAND_FRAME_SIZE
+ j ra
+ move v0, zero
+END(copyin)
+
+/*
+ * Copy specified amount of data from kernel to the user space
+ * copyout(from, to, len)
+ * caddr_t *from; (kernel source address)
+ * caddr_t *to; (user destination address)
+ * unsigned len;
+ */
+NON_LEAF(copyout, STAND_FRAME_SIZE, ra)
+ subu sp, sp, STAND_FRAME_SIZE
+ .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
+ sw ra, STAND_RA_OFFSET(sp)
+ blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
+ li v0, COPYERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ jal _C_LABEL(bcopy)
+ sw v0, U_PCB_ONFAULT(v1)
+ lw ra, STAND_RA_OFFSET(sp)
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1) # bcopy modified v1, so reload
+ sw zero, U_PCB_ONFAULT(v1)
+ addu sp, sp, STAND_FRAME_SIZE
+ j ra
+ move v0, zero
+END(copyout)
+
+LEAF(copyerr)
+ lw ra, STAND_RA_OFFSET(sp)
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw zero, U_PCB_ONFAULT(v1)
+ addu sp, sp, STAND_FRAME_SIZE
+ j ra
+ li v0, EFAULT # return error
+END(copyerr)
+
+/*
+ * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
+ * user text space.
+ * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
+ * user data space.
+ */
+LEAF(fuword)
+ALEAF(fuword32)
+ALEAF(fuiword)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, FSWBERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw v0, U_PCB_ONFAULT(v1)
+ lw v0, 0(a0) # fetch word
+ j ra
+ sw zero, U_PCB_ONFAULT(v1)
+END(fuword)
+
+LEAF(fusword)
+ALEAF(fuisword)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, FSWBERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw v0, U_PCB_ONFAULT(v1)
+ lhu v0, 0(a0) # fetch short
+ j ra
+ sw zero, U_PCB_ONFAULT(v1)
+END(fusword)
+
+LEAF(fubyte)
+ALEAF(fuibyte)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, FSWBERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw v0, U_PCB_ONFAULT(v1)
+ lbu v0, 0(a0) # fetch byte
+ j ra
+ sw zero, U_PCB_ONFAULT(v1)
+END(fubyte)
+
+LEAF(suword)
+XLEAF(suword32)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, FSWBERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw v0, U_PCB_ONFAULT(v1)
+ sw a1, 0(a0) # store word
+ sw zero, U_PCB_ONFAULT(v1)
+ j ra
+ move v0, zero
+END(suword)
+
+/*
+ * casuword(9)
+ * <v0>u_long casuword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long newval)
+ */
+ENTRY(casuword)
+ break
+ li v0, -1
+ jr ra
+ nop
+END(casuword)
+
+/*
+ * casuword32(9)
+ * <v0>uint32_t casuword(<a0>uint32_t *p, <a1>uint32_t oldval,
+ * <a2>uint32_t newval)
+ */
+ENTRY(casuword32)
+ break
+ li v0, -1
+ jr ra
+ nop
+END(casuword32)
+
+#if 0
+ /* unused in FreeBSD */
+/*
+ * Have to flush instruction cache afterwards.
+ */
+LEAF(suiword)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, FSWBERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw v0, U_PCB_ONFAULT(v1)
+ sw a1, 0(a0) # store word
+ sw zero, U_PCB_ONFAULT(v1)
+ j _C_LABEL(Mips_SyncICache) # FlushICache sets v0 = 0. (Ugly)
+ li a1, 4 # size of word
+END(suiword)
+#endif
+
+/*
+ * Will have to flush the instruction cache if byte merging is done in hardware.
+ */
+LEAF(susword)
+ALEAF(suisword)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, FSWBERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw v0, U_PCB_ONFAULT(v1)
+ sh a1, 0(a0) # store short
+ sw zero, U_PCB_ONFAULT(v1)
+ j ra
+ move v0, zero
+END(susword)
+
+LEAF(subyte)
+ALEAF(suibyte)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, FSWBERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw v0, U_PCB_ONFAULT(v1)
+ sb a1, 0(a0) # store byte
+ sw zero, U_PCB_ONFAULT(v1)
+ j ra
+ move v0, zero
+END(subyte)
+
+LEAF(fswberr)
+ j ra
+ li v0, -1
+END(fswberr)
+
+/*
+ * fuswintr and suswintr are just like fusword and susword except that if
+ * the page is not in memory or would cause a trap, then we return an error.
+ * The important thing is to prevent sleep() and switch().
+ */
+LEAF(fuswintr)
+ blt a0, zero, fswintrberr # make sure address is in user space
+ li v0, FSWINTRBERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw v0, U_PCB_ONFAULT(v1)
+ lhu v0, 0(a0) # fetch short
+ j ra
+ sw zero, U_PCB_ONFAULT(v1)
+END(fuswintr)
+
+LEAF(suswintr)
+ blt a0, zero, fswintrberr # make sure address is in user space
+ li v0, FSWINTRBERR
+ GET_CPU_PCPU(v1)
+ lw v1, PC_CURPCB(v1)
+ sw v0, U_PCB_ONFAULT(v1)
+ sh a1, 0(a0) # store short
+ sw zero, U_PCB_ONFAULT(v1)
+ j ra
+ move v0, zero
+END(suswintr)
+
+LEAF(fswintrberr)
+ j ra
+ li v0, -1
+END(fswintrberr)
+
+/*
+ * Insert 'p' after 'q'.
+ * _insque(p, q)
+ * caddr_t p, q;
+ */
+LEAF(_insque)
+ lw v0, 0(a1) # v0 = q->next
+ sw a1, 4(a0) # p->prev = q
+ sw v0, 0(a0) # p->next = q->next
+ sw a0, 4(v0) # q->next->prev = p
+ j ra
+ sw a0, 0(a1) # q->next = p
+END(_insque)
+
+/*
+ * Remove item 'p' from queue.
+ * _remque(p)
+ * caddr_t p;
+ */
+LEAF(_remque)
+ lw v0, 0(a0) # v0 = p->next
+ lw v1, 4(a0) # v1 = p->prev
+ nop
+ sw v0, 0(v1) # p->prev->next = p->next
+ j ra
+ sw v1, 4(v0) # p->next->prev = p->prev
+END(_remque)
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_GetCOUNT --
+ *
+ * Mips_GetCOUNT()
+ *
+ * Results:
+ * Returns the current COUNT reg.
+ *
+ * Side effects:
+ * None.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_GetCOUNT)
+ mfc0 v0, COP_0_COUNT
+ nop #???
+ nop #???
+ j ra
+ nop
+END(Mips_GetCOUNT)
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_SetCOMPARE --
+ *
+ * Mips_SetCOMPARE()
+ *
+ * Results:
+ * Sets a new value to the COMPARE register.
+ *
+ * Side effects:
+ * The COMPARE equal interrupt is acknowledged.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_SetCOMPARE)
+ mtc0 a0, COP_0_COMPARE
+ j ra
+ nop
+END(Mips_SetCOMPARE)
+
+LEAF(Mips_GetCOMPARE)
+ mfc0 v0, COP_0_COMPARE
+ j ra
+ nop
+END(Mips_GetCOMPARE)
+
+/*
+ * u_int32_t mips_cp0_status_read(void)
+ *
+ * Return the current value of the CP0 Status register.
+ */
+LEAF(mips_cp0_status_read)
+ mfc0 v0, COP_0_STATUS_REG
+ j ra
+ nop
+END(mips_cp0_status_read)
+
+/*
+ * void mips_cp0_status_write(u_int32_t)
+ *
+ * Set the value of the CP0 Status register.
+ *
+ * Note: This is almost certainly not the way you want to write a
+ * "permanent" value to to the CP0 Status register, since it gets
+ * saved in trap frames and restores.
+ */
+LEAF(mips_cp0_status_write)
+ mtc0 a0, COP_0_STATUS_REG
+ nop
+ nop
+ j ra
+ nop
+END(mips_cp0_status_write)
+
+
+/*
+ * memcpy(to, from, len)
+ * {ov}bcopy(from, to, len)
+ */
+LEAF(memcpy)
+ .set noreorder
+ move v0, a0 # swap from and to
+ move a0, a1
+ move a1, v0
+ALEAF(bcopy)
+ALEAF(ovbcopy)
+ .set noreorder
+ addu t0, a0, a2 # t0 = end of s1 region
+ sltu t1, a1, t0
+ sltu t2, a0, a1
+ and t1, t1, t2 # t1 = true if from < to < (from+len)
+ beq t1, zero, forward # non overlapping, do forward copy
+ slt t2, a2, 12 # check for small copy
+
+ ble a2, zero, 2f
+ addu t1, a1, a2 # t1 = end of to region
+1:
+ lb v1, -1(t0) # copy bytes backwards,
+ subu t0, t0, 1 # doesnt happen often so do slow way
+ subu t1, t1, 1
+ bne t0, a0, 1b
+ sb v1, 0(t1)
+2:
+ j ra
+ nop
+forward:
+ bne t2, zero, smallcpy # do a small bcopy
+ xor v1, a0, a1 # compare low two bits of addresses
+ and v1, v1, 3
+ subu a3, zero, a1 # compute # bytes to word align address
+ beq v1, zero, aligned # addresses can be word aligned
+ and a3, a3, 3
+
+ beq a3, zero, 1f
+ subu a2, a2, a3 # subtract from remaining count
+ LWHI v1, 0(a0) # get next 4 bytes (unaligned)
+ LWLO v1, 3(a0)
+ addu a0, a0, a3
+ SWHI v1, 0(a1) # store 1, 2, or 3 bytes to align a1
+ addu a1, a1, a3
+1:
+ and v1, a2, 3 # compute number of words left
+ subu a3, a2, v1
+ move a2, v1
+ addu a3, a3, a0 # compute ending address
+2:
+ LWHI v1, 0(a0) # copy words a0 unaligned, a1 aligned
+ LWLO v1, 3(a0)
+ addu a0, a0, 4
+ sw v1, 0(a1)
+ addu a1, a1, 4
+ bne a0, a3, 2b
+ nop # We have to do this mmu-bug.
+ b smallcpy
+ nop
+aligned:
+ beq a3, zero, 1f
+ subu a2, a2, a3 # subtract from remaining count
+ LWHI v1, 0(a0) # copy 1, 2, or 3 bytes to align
+ addu a0, a0, a3
+ SWHI v1, 0(a1)
+ addu a1, a1, a3
+1:
+ and v1, a2, 3 # compute number of whole words left
+ subu a3, a2, v1
+ move a2, v1
+ addu a3, a3, a0 # compute ending address
+2:
+ lw v1, 0(a0) # copy words
+ addu a0, a0, 4
+ sw v1, 0(a1)
+ bne a0, a3, 2b
+ addu a1, a1, 4
+smallcpy:
+ ble a2, zero, 2f
+ addu a3, a2, a0 # compute ending address
+1:
+ lbu v1, 0(a0) # copy bytes
+ addu a0, a0, 1
+ sb v1, 0(a1)
+ bne a0, a3, 1b
+ addu a1, a1, 1 # MMU BUG ? can not do -1(a1) at 0x80000000!!
+2:
+ j ra
+ nop
+END(memcpy)
+
+/*
+ * memset(void *s1, int c, int len)
+ * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
+ */
+LEAF(memset)
+ .set noreorder
+ blt a2, 12, memsetsmallclr # small amount to clear?
+ move v0, a0 # save s1 for result
+
+ sll t1, a1, 8 # compute c << 8 in t1
+ or t1, t1, a1 # compute c << 8 | c in 11
+ sll t2, t1, 16 # shift that left 16
+ or t1, t2, t1 # or together
+
+ subu t0, zero, a0 # compute # bytes to word align address
+ and t0, t0, 3
+ beq t0, zero, 1f # skip if word aligned
+ subu a2, a2, t0 # subtract from remaining count
+ SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align
+ addu a0, a0, t0
+1:
+ and v1, a2, 3 # compute number of whole words left
+ subu t0, a2, v1
+ subu a2, a2, t0
+ addu t0, t0, a0 # compute ending address
+2:
+ addu a0, a0, 4 # clear words
+#ifdef MIPS3_5900
+ nop
+ nop
+ nop
+ nop
+#endif
+ bne a0, t0, 2b # unrolling loop does not help
+ sw t1, -4(a0) # since we are limited by memory speed
+
+memsetsmallclr:
+ ble a2, zero, 2f
+ addu t0, a2, a0 # compute ending address
+1:
+ addu a0, a0, 1 # clear bytes
+#ifdef MIPS3_5900
+ nop
+ nop
+ nop
+ nop
+#endif
+ bne a0, t0, 1b
+ sb a1, -1(a0)
+2:
+ j ra
+ nop
+ .set reorder
+END(memset)
+
+/*
+ * bzero(s1, n)
+ */
+LEAF(bzero)
+ALEAF(blkclr)
+ .set noreorder
+ blt a1, 12, smallclr # small amount to clear?
+ subu a3, zero, a0 # compute # bytes to word align address
+ and a3, a3, 3
+ beq a3, zero, 1f # skip if word aligned
+ subu a1, a1, a3 # subtract from remaining count
+ SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
+ addu a0, a0, a3
+1:
+ and v0, a1, 3 # compute number of words left
+ subu a3, a1, v0
+ move a1, v0
+ addu a3, a3, a0 # compute ending address
+2:
+ addu a0, a0, 4 # clear words
+ bne a0, a3, 2b # unrolling loop does not help
+ sw zero, -4(a0) # since we are limited by memory speed
+smallclr:
+ ble a1, zero, 2f
+ addu a3, a1, a0 # compute ending address
+1:
+ addu a0, a0, 1 # clear bytes
+ bne a0, a3, 1b
+ sb zero, -1(a0)
+2:
+ j ra
+ nop
+END(bzero)
+
+
+/*
+ * bcmp(s1, s2, n)
+ * memcmp(s1, s2, n)
+ */
+LEAF(bcmp)
+ALEAF(memcmp)
+ .set noreorder
+ blt a2, 16, smallcmp # is it worth any trouble?
+ xor v0, a0, a1 # compare low two bits of addresses
+ and v0, v0, 3
+ subu a3, zero, a1 # compute # bytes to word align address
+ bne v0, zero, unalignedcmp # not possible to align addresses
+ and a3, a3, 3
+
+ beq a3, zero, 1f
+ subu a2, a2, a3 # subtract from remaining count
+ move v0, v1 # init v0,v1 so unmodified bytes match
+ LWHI v0, 0(a0) # read 1, 2, or 3 bytes
+ LWHI v1, 0(a1)
+ addu a1, a1, a3
+ bne v0, v1, nomatch
+ addu a0, a0, a3
+1:
+ and a3, a2, ~3 # compute number of whole words left
+ subu a2, a2, a3 # which has to be >= (16-3) & ~3
+ addu a3, a3, a0 # compute ending address
+2:
+ lw v0, 0(a0) # compare words
+ lw v1, 0(a1)
+ addu a0, a0, 4
+ bne v0, v1, nomatch
+ addu a1, a1, 4
+ bne a0, a3, 2b
+ nop
+ b smallcmp # finish remainder
+ nop
+unalignedcmp:
+ beq a3, zero, 2f
+ subu a2, a2, a3 # subtract from remaining count
+ addu a3, a3, a0 # compute ending address
+1:
+ lbu v0, 0(a0) # compare bytes until a1 word aligned
+ lbu v1, 0(a1)
+ addu a0, a0, 1
+ bne v0, v1, nomatch
+ addu a1, a1, 1
+ bne a0, a3, 1b
+ nop
+2:
+ and a3, a2, ~3 # compute number of whole words left
+ subu a2, a2, a3 # which has to be >= (16-3) & ~3
+ addu a3, a3, a0 # compute ending address
+3:
+ LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned
+ LWLO v0, 3(a0)
+ lw v1, 0(a1)
+ addu a0, a0, 4
+ bne v0, v1, nomatch
+ addu a1, a1, 4
+ bne a0, a3, 3b
+ nop
+smallcmp:
+ ble a2, zero, match
+ addu a3, a2, a0 # compute ending address
+1:
+ lbu v0, 0(a0)
+ lbu v1, 0(a1)
+ addu a0, a0, 1
+ bne v0, v1, nomatch
+ addu a1, a1, 1
+ bne a0, a3, 1b
+ nop
+match:
+ j ra
+ move v0, zero
+nomatch:
+ j ra
+ li v0, 1
+END(bcmp)
+
+
+/*
+ * bit = ffs(value)
+ */
+LEAF(ffs)
+ .set noreorder
+ beq a0, zero, 2f
+ move v0, zero
+1:
+ and v1, a0, 1 # bit set?
+ addu v0, v0, 1
+ beq v1, zero, 1b # no, continue
+ srl a0, a0, 1
+2:
+ j ra
+ nop
+END(ffs)
+
+LEAF(get_current_fp)
+ j ra
+ move v0, s8
+END(get_current_fp)
+
+LEAF(loadandclear)
+ .set noreorder
+1:
+ ll v0, 0(a0)
+ move t0, zero
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(loadandclear)
+
+#if 0
+/*
+ * u_int32_t atomic_cmpset_32(u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
+ * Atomically compare the value stored at p with cmpval
+ * and if the two values are equal, update value *p with
+ * newval. Return zero if compare failed, non-zero otherwise
+ *
+ */
+
+LEAF(atomic_cmpset_32)
+ .set noreorder
+1:
+ ll t0, 0(a0)
+ move v0, zero
+ bne t0, a1, 2f
+ move t1, a2
+ sc t1, 0(a0)
+ beq t1, zero, 1b
+ or v0, v0, 1
+2:
+ j ra
+ nop
+END(atomic_cmpset_32)
+
+/**
+ * u_int32_t
+ * atomic_readandclear_32(u_int32_t *a)
+ * {
+ * u_int32_t retval;
+ * retval = *a;
+ * *a = 0;
+ * }
+ */
+LEAF(atomic_readandclear_32)
+ .set noreorder
+1:
+ ll t0, 0(a0)
+ move t1, zero
+ move v0, t0
+ sc t1, 0(a0)
+ beq t1, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_readandclear_32)
+
+/**
+ * void
+ * atomic_set_32(u_int32_t *a, u_int32_t b)
+ * {
+ * *a |= b;
+ * }
+ */
+LEAF(atomic_set_32)
+ .set noreorder
+1:
+ ll t0, 0(a0)
+ or t0, t0, a1
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_set_32)
+
+/**
+ * void
+ * atomic_add_32(uint32_t *a, uint32_t b)
+ * {
+ * *a += b;
+ * }
+ */
+LEAF(atomic_add_32)
+ .set noreorder
+ srl a0, a0, 2 # round down address to be 32-bit aligned
+ sll a0, a0, 2
+1:
+ ll t0, 0(a0)
+ addu t0, t0, a1
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_add_32)
+
+/**
+ * void
+ * atomic_clear_32(u_int32_t *a, u_int32_t b)
+ * {
+ * *a &= ~b;
+ * }
+ */
+LEAF(atomic_clear_32)
+ .set noreorder
+ srl a0, a0, 2 # round down address to be 32-bit aligned
+ sll a0, a0, 2
+ nor a1, zero, a1
+1:
+ ll t0, 0(a0)
+ and t0, t0, a1 # t1 has the new lower 16 bits
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_clear_32)
+
+/**
+ * void
+ * atomic_subtract_32(uint16_t *a, uint16_t b)
+ * {
+ * *a -= b;
+ * }
+ */
+LEAF(atomic_subtract_32)
+ .set noreorder
+ srl a0, a0, 2 # round down address to be 32-bit aligned
+ sll a0, a0, 2
+1:
+ ll t0, 0(a0)
+ subu t0, t0, a1
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_subtract_32)
+
+#endif
+
+/**
+ * void
+ * atomic_set_16(u_int16_t *a, u_int16_t b)
+ * {
+ * *a |= b;
+ * }
+ */
+LEAF(atomic_set_16)
+ .set noreorder
+ srl a0, a0, 2 # round down address to be 32-bit aligned
+ sll a0, a0, 2
+ andi a1, a1, 0xffff
+1:
+ ll t0, 0(a0)
+ or t0, t0, a1
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_set_16)
+
+/**
+ * void
+ * atomic_clear_16(u_int16_t *a, u_int16_t b)
+ * {
+ * *a &= ~b;
+ * }
+ */
+LEAF(atomic_clear_16)
+ .set noreorder
+ srl a0, a0, 2 # round down address to be 32-bit aligned
+ sll a0, a0, 2
+ nor a1, zero, a1
+1:
+ ll t0, 0(a0)
+ move t1, t0
+ andi t1, t1, 0xffff # t1 has the original lower 16 bits
+ and t1, t1, a1 # t1 has the new lower 16 bits
+ srl t0, t0, 16 # preserve original top 16 bits
+ sll t0, t0, 16
+ or t0, t0, t1
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_clear_16)
+
+
+/**
+ * void
+ * atomic_subtract_16(uint16_t *a, uint16_t b)
+ * {
+ * *a -= b;
+ * }
+ */
+LEAF(atomic_subtract_16)
+ .set noreorder
+ srl a0, a0, 2 # round down address to be 32-bit aligned
+ sll a0, a0, 2
+1:
+ ll t0, 0(a0)
+ move t1, t0
+ andi t1, t1, 0xffff # t1 has the original lower 16 bits
+ subu t1, t1, a1
+ andi t1, t1, 0xffff # t1 has the new lower 16 bits
+ srl t0, t0, 16 # preserve original top 16 bits
+ sll t0, t0, 16
+ or t0, t0, t1
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_subtract_16)
+
+/**
+ * void
+ * atomic_add_16(uint16_t *a, uint16_t b)
+ * {
+ * *a += b;
+ * }
+ */
+LEAF(atomic_add_16)
+ .set noreorder
+ srl a0, a0, 2 # round down address to be 32-bit aligned
+ sll a0, a0, 2
+1:
+ ll t0, 0(a0)
+ move t1, t0
+ andi t1, t1, 0xffff # t1 has the original lower 16 bits
+ addu t1, t1, a1
+ andi t1, t1, 0xffff # t1 has the new lower 16 bits
+ srl t0, t0, 16 # preserve original top 16 bits
+ sll t0, t0, 16
+ or t0, t0, t1
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_add_16)
+
+/**
+ * void
+ * atomic_add_8(uint8_t *a, uint8_t b)
+ * {
+ * *a += b;
+ * }
+ */
+LEAF(atomic_add_8)
+ .set noreorder
+ srl a0, a0, 2 # round down address to be 32-bit aligned
+ sll a0, a0, 2
+1:
+ ll t0, 0(a0)
+ move t1, t0
+ andi t1, t1, 0xff # t1 has the original lower 8 bits
+ addu t1, t1, a1
+ andi t1, t1, 0xff # t1 has the new lower 8 bits
+ srl t0, t0, 8 # preserve original top 24 bits
+ sll t0, t0, 8
+ or t0, t0, t1
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_add_8)
+
+
+/**
+ * void
+ * atomic_subtract_8(uint8_t *a, uint8_t b)
+ * {
+ * *a += b;
+ * }
+ */
+LEAF(atomic_subtract_8)
+ .set noreorder
+ srl a0, a0, 2 # round down address to be 32-bit aligned
+ sll a0, a0, 2
+1:
+ ll t0, 0(a0)
+ move t1, t0
+ andi t1, t1, 0xff # t1 has the original lower 8 bits
+ subu t1, t1, a1
+ andi t1, t1, 0xff # t1 has the new lower 8 bits
+ srl t0, t0, 8 # preserve original top 24 bits
+ sll t0, t0, 8
+ or t0, t0, t1
+ sc t0, 0(a0)
+ beq t0, zero, 1b
+ nop
+ j ra
+ nop
+END(atomic_subtract_8)
+
+/*
+ * atomic 64-bit register read/write assembly language support routines.
+ */
+
+ .set noreorder # Noreorder is default style!
+#ifndef _MIPS_ARCH_XLR
+ .set mips3
+#endif
+
+LEAF(atomic_readandclear_64)
+1:
+ lld v0, 0(a0)
+ li t0, 0
+ scd t0, 0(a0)
+ beqz t0, 1b
+ nop
+ j ra
+ nop
+END(atomic_readandclear_64)
+
+LEAF(atomic_store_64)
+ mfc0 t1, COP_0_STATUS_REG
+ and t2, t1, ~SR_INT_ENAB
+ mtc0 t2, COP_0_STATUS_REG
+ nop
+ nop
+ nop
+ nop
+ ld t0, (a1)
+ nop
+ nop
+ sd t0, (a0)
+ nop
+ nop
+ mtc0 t1,COP_0_STATUS_REG
+ nop
+ nop
+ nop
+ nop
+ j ra
+ nop
+END(atomic_store_64)
+
+LEAF(atomic_load_64)
+ mfc0 t1, COP_0_STATUS_REG
+ and t2, t1, ~SR_INT_ENAB
+ mtc0 t2, COP_0_STATUS_REG
+ nop
+ nop
+ nop
+ nop
+ ld t0, (a0)
+ nop
+ nop
+ sd t0, (a1)
+ nop
+ nop
+ mtc0 t1,COP_0_STATUS_REG
+ nop
+ nop
+ nop
+ nop
+ j ra
+ nop
+END(atomic_load_64)
+
+#if defined(DDB) || defined(DEBUG)
+
+LEAF(kdbpeek)
+ li v1, DDBERR
+ and v0, a0, 3 # unaligned ?
+ GET_CPU_PCPU(t1)
+ lw t1, PC_CURPCB(t1)
+ bne v0, zero, 1f
+ sw v1, U_PCB_ONFAULT(t1)
+
+ lw v0, (a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t1)
+
+1:
+ LWHI v0, 0(a0)
+ LWLO v0, 3(a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t1)
+END(kdbpeek)
+
+ddberr:
+ jr ra
+ nop
+
+#if defined(DDB)
+LEAF(kdbpoke)
+ li v1, DDBERR
+ and v0, a0, 3 # unaligned ?
+ GET_CPU_PCPU(t1)
+ lw t1, PC_CURPCB(t1)
+ bne v0, zero, 1f
+ sw v1, U_PCB_ONFAULT(t1)
+
+ sw a1, (a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t1)
+
+1:
+ SWHI a1, 0(a0)
+ SWLO a1, 3(a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t1)
+END(kdbpoke)
+
+ .data
+ .globl esym
+esym: .word 0
+
+#ifndef _MIPS_ARCH_XLR
+ .set mips2
+#endif
+#endif /* DDB */
+#endif /* DDB || DEBUG */
+
+#ifndef MIPS_ISAIII
+#define STORE sw /* 32 bit mode regsave instruction */
+#define LOAD lw /* 32 bit mode regload instruction */
+#define RSIZE 4 /* 32 bit mode register size */
+#else
+#define STORE sd /* 64 bit mode regsave instruction */
+#define LOAD ld /* 64 bit mode regload instruction */
+#define RSIZE 8 /* 64 bit mode register size */
+#endif
+
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+ .text
+LEAF(breakpoint)
+ break BREAK_SOVER_VAL
+ jr ra
+ nop
+ END(breakpoint)
+
+LEAF(setjmp)
+ mfc0 v0, COP_0_STATUS_REG # Later the "real" spl value!
+ STORE s0, (RSIZE * PREG_S0)(a0)
+ STORE s1, (RSIZE * PREG_S1)(a0)
+ STORE s2, (RSIZE * PREG_S2)(a0)
+ STORE s3, (RSIZE * PREG_S3)(a0)
+ STORE s4, (RSIZE * PREG_S4)(a0)
+ STORE s5, (RSIZE * PREG_S5)(a0)
+ STORE s6, (RSIZE * PREG_S6)(a0)
+ STORE s7, (RSIZE * PREG_S7)(a0)
+ STORE s8, (RSIZE * PREG_SP)(a0)
+ STORE sp, (RSIZE * PREG_S8)(a0)
+ STORE ra, (RSIZE * PREG_RA)(a0)
+ STORE v0, (RSIZE * PREG_SR)(a0)
+ jr ra
+ li v0, 0 # setjmp return
+END(setjmp)
+
+LEAF(longjmp)
+ LOAD v0, (RSIZE * PREG_SR)(a0)
+ LOAD ra, (RSIZE * PREG_RA)(a0)
+ LOAD s0, (RSIZE * PREG_S0)(a0)
+ LOAD s1, (RSIZE * PREG_S1)(a0)
+ LOAD s2, (RSIZE * PREG_S2)(a0)
+ LOAD s3, (RSIZE * PREG_S3)(a0)
+ LOAD s4, (RSIZE * PREG_S4)(a0)
+ LOAD s5, (RSIZE * PREG_S5)(a0)
+ LOAD s6, (RSIZE * PREG_S6)(a0)
+ LOAD s7, (RSIZE * PREG_S7)(a0)
+ LOAD s8, (RSIZE * PREG_S8)(a0)
+ LOAD sp, (RSIZE * PREG_SP)(a0)
+ mtc0 v0, COP_0_STATUS_REG # Later the "real" spl value!
+ ITLBNOPFIX
+ jr ra
+ li v0, 1 # longjmp return
+END(longjmp)
+
+LEAF(fusufault)
+ GET_CPU_PCPU(t0)
+ lw t0, PC_CURTHREAD(t0)
+ lw t0, TD_PCB(t0)
+ sw zero, U_PCB_ONFAULT(t0)
+ li v0, -1
+ j ra
+END(fusufault)
+
+ /* Define a new md function 'casuptr'. This atomically compares and sets
+ a pointer that is in user space. It will be used as the basic primitive
+ for a kernel supported user space lock implementation. */
+LEAF(casuptr)
+
+ li t0, VM_MAXUSER_ADDRESS /* verify address validity */
+ blt a0, t0, fusufault /* trap faults */
+ nop
+
+ GET_CPU_PCPU(t1)
+ lw t1, PC_CURTHREAD(t1)
+ lw t1, TD_PCB(t1)
+
+ lw t2, fusufault
+ sw t2, U_PCB_ONFAULT(t1)
+1:
+ ll v0, 0(a0) /* try to load the old value */
+ beq v0, a1, 2f /* compare */
+ move t0, a2 /* setup value to write */
+ sc t0, 0(a0) /* write if address still locked */
+ beq t0, zero, 1b /* if it failed, spin */
+2:
+ sw zero, U_PCB_ONFAULT(t1) /* clean up */
+ j ra
+END(casuptr)
+
+
+#ifdef TARGET_OCTEON
+/*
+ * void octeon_enable_shadow(void)
+ * turns on access to CC and CCRes
+ */
+LEAF(octeon_enable_shadow)
+ li t1, 0x0000000f
+ mtc0 t1, COP_0_INFO
+ jr ra
+ nop
+END(octeon_enable_shadow)
+
+
+LEAF(octeon_get_shadow)
+ mfc0 v0, COP_0_INFO
+ jr ra
+ nop
+END(octeon_get_shadow)
+
+/*
+ * octeon_set_control(addr, uint32_t val)
+ */
+LEAF(octeon_set_control)
+ .set mips64r2
+ or t1, a1, zero
+/* dmfc0 a1, 9, 7*/
+ .word 0x40254807
+ sd a1, 0(a0)
+ or a1, t1, zero
+/* dmtc0 a1, 9, 7*/
+ .word 0x40a54807
+ jr ra
+ nop
+ .set mips0
+END(octeon_set_control)
+
+/*
+ * octeon_get_control(addr)
+ */
+LEAF(octeon_get_control)
+ .set mips64r2
+/* dmfc0 a1, 9, 7 */
+ .word 0x40254807
+ sd a1, 0(a0)
+ jr ra
+ nop
+ .set mips0
+END(octeon_get_control)
+#endif
diff --git a/sys/mips/mips/swtch.S b/sys/mips/mips/swtch.S
new file mode 100644
index 0000000..84585cb
--- /dev/null
+++ b/sys/mips/mips/swtch.S
@@ -0,0 +1,650 @@
+/* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Digital Equipment Corporation and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
+ * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
+ * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
+ * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
+ *
+ * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
+ * JNPR: swtch.S,v 1.6.2.1 2007/09/10 10:36:50 girish
+ * $FreeBSD$
+ */
+
+/*
+ * Contains code that is the first executed at boot time plus
+ * assembly language support routines.
+ */
+
+#include "opt_cputype.h"
+#include <sys/syscall.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/cpuregs.h>
+#include <machine/regnum.h>
+#include <machine/pte.h>
+
+#include "assym.s"
+
+#if defined(ISA_MIPS32)
+#undef WITH_64BIT_CP0
+#elif defined(ISA_MIPS64)
+#define WITH_64BIT_CP0
+#elif defined(ISA_MIPS3)
+#define WITH_64BIT_CP0
+#else
+#error "Please write the code for this ISA"
+#endif
+
+#ifdef WITH_64BIT_CP0
+#define _SLL dsll
+#define _SRL dsrl
+#define _MFC0 dmfc0
+#define _MTC0 dmtc0
+#define WIRED_SHIFT 34
+#define PAGE_SHIFT 34
+#else
+#define _SLL sll
+#define _SRL srl
+#define _MFC0 mfc0
+#define _MTC0 mtc0
+#define WIRED_SHIFT 2
+#define PAGE_SHIFT 2
+#endif
+ .set noreorder # Noreorder is default style!
+#if defined(ISA_MIPS32)
+ .set mips32
+#elif defined(ISA_MIPS64)
+ .set mips64
+#elif defined(ISA_MIPS3)
+ .set mips3
+#endif
+
+#if defined(ISA_MIPS32)
+#define STORE sw /* 32 bit mode regsave instruction */
+#define LOAD lw /* 32 bit mode regload instruction */
+#define RSIZE 4 /* 32 bit mode register size */
+#define STORE_FP swc1 /* 32 bit mode fp regsave instruction */
+#define LOAD_FP lwc1 /* 32 bit mode fp regload instruction */
+#define FP_RSIZE 4 /* 32 bit mode fp register size */
+#else
+#define STORE sd /* 64 bit mode regsave instruction */
+#define LOAD ld /* 64 bit mode regload instruction */
+#define RSIZE 8 /* 64 bit mode register size */
+#define STORE_FP sdc1 /* 64 bit mode fp regsave instruction */
+#define LOAD_FP ldc1 /* 64 bit mode fp regload instruction */
+#define FP_RSIZE 8 /* 64 bit mode fp register size */
+#endif
+
+/*
+ * FREEBSD_DEVELOPERS_FIXME
+ * Some MIPS CPU may need delays using nops between executing CP0 Instructions
+ */
+
+#if 1
+#define HAZARD_DELAY nop ; nop ; nop ; nop
+#else
+#define HAZARD_DELAY
+#endif
+
+#define SAVE_U_PCB_REG(reg, offs, base) \
+ STORE reg, U_PCB_REGS + (RSIZE * offs) (base)
+
+#define RESTORE_U_PCB_REG(reg, offs, base) \
+ LOAD reg, U_PCB_REGS + (RSIZE * offs) (base)
+
+#define SAVE_U_PCB_FPREG(reg, offs, base) \
+ STORE_FP reg, U_PCB_FPREGS + (FP_RSIZE * offs) (base)
+
+#define RESTORE_U_PCB_FPREG(reg, offs, base) \
+ LOAD_FP reg, U_PCB_FPREGS + (FP_RSIZE * offs) (base)
+
+#define SAVE_U_PCB_FPSR(reg, offs, base) \
+ STORE reg, U_PCB_FPREGS + (FP_RSIZE * offs) (base)
+
+#define RESTORE_U_PCB_FPSR(reg, offs, base) \
+ LOAD reg, U_PCB_FPREGS + (FP_RSIZE * offs) (base)
+
+#define SAVE_U_PCB_CONTEXT(reg, offs, base) \
+ STORE reg, U_PCB_CONTEXT + (RSIZE * offs) (base)
+
+#define RESTORE_U_PCB_CONTEXT(reg, offs, base) \
+ LOAD reg, U_PCB_CONTEXT + (RSIZE * offs) (base)
+
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+/*
+ * Setup for and return to user.
+ */
+LEAF(fork_trampoline)
+ move a0,s0
+ move a1,s1
+ jal _C_LABEL(fork_exit)
+ move a2,s2 #BDSlot
+
+ DO_AST
+
+/*
+ * Since interrupts are enabled at this point, we use a1 instead of
+ * k0 or k1 to store the PCB pointer. This is because k0 and k1
+ * are not preserved across interrupts.
+ */
+ GET_CPU_PCPU(a1)
+ lw a1, PC_CURPCB(a1)
+1:
+
+ mfc0 v0, COP_0_STATUS_REG # set exeption level bit.
+ or v0, SR_EXL
+ and v0, ~(SR_INT_ENAB)
+ mtc0 v0, COP_0_STATUS_REG # set exeption level bit.
+ nop
+ nop
+ nop
+ nop
+ .set noat
+ move k1, a1
+ RESTORE_U_PCB_REG(t0, MULLO, k1)
+ RESTORE_U_PCB_REG(t1, MULHI, k1)
+ mtlo t0
+ mthi t1
+ RESTORE_U_PCB_REG(a0, PC, k1)
+ RESTORE_U_PCB_REG(AT, AST, k1)
+ RESTORE_U_PCB_REG(v0, V0, k1)
+ _MTC0 a0, COP_0_EXC_PC # set return address
+
+/*
+ * The use of k1 for storing the PCB pointer must be done only
+ * after interrupts are disabled. Otherwise it will get overwritten
+ * by the interrupt code.
+ */
+ RESTORE_U_PCB_REG(v1, V1, k1)
+ RESTORE_U_PCB_REG(a0, A0, k1)
+ RESTORE_U_PCB_REG(a1, A1, k1)
+ RESTORE_U_PCB_REG(a2, A2, k1)
+ RESTORE_U_PCB_REG(a3, A3, k1)
+ RESTORE_U_PCB_REG(t0, T0, k1)
+ RESTORE_U_PCB_REG(t1, T1, k1)
+ RESTORE_U_PCB_REG(t2, T2, k1)
+ RESTORE_U_PCB_REG(t3, T3, k1)
+ RESTORE_U_PCB_REG(t4, T4, k1)
+ RESTORE_U_PCB_REG(t5, T5, k1)
+ RESTORE_U_PCB_REG(t6, T6, k1)
+ RESTORE_U_PCB_REG(t7, T7, k1)
+ RESTORE_U_PCB_REG(s0, S0, k1)
+ RESTORE_U_PCB_REG(s1, S1, k1)
+ RESTORE_U_PCB_REG(s2, S2, k1)
+ RESTORE_U_PCB_REG(s3, S3, k1)
+ RESTORE_U_PCB_REG(s4, S4, k1)
+ RESTORE_U_PCB_REG(s5, S5, k1)
+ RESTORE_U_PCB_REG(s6, S6, k1)
+ RESTORE_U_PCB_REG(s7, S7, k1)
+ RESTORE_U_PCB_REG(t8, T8, k1)
+ RESTORE_U_PCB_REG(t9, T9, k1)
+ RESTORE_U_PCB_REG(k0, SR, k1)
+ RESTORE_U_PCB_REG(gp, GP, k1)
+ RESTORE_U_PCB_REG(s8, S8, k1)
+ RESTORE_U_PCB_REG(ra, RA, k1)
+ RESTORE_U_PCB_REG(sp, SP, k1)
+ mtc0 k0, COP_0_STATUS_REG # switch to user mode (when eret...)
+ HAZARD_DELAY
+ sync
+ eret
+ .set at
+END(fork_trampoline)
+
+/*
+ * Update pcb, saving current processor state.
+ * Note: this only works if pcbp != curproc's pcb since
+ * cpu_switch() will copy over pcb_context.
+ *
+ * savectx(struct pcb *pcbp);
+ */
+LEAF(savectx)
+ SAVE_U_PCB_CONTEXT(s0, PREG_S0, a0)
+ SAVE_U_PCB_CONTEXT(s1, PREG_S1, a0)
+ SAVE_U_PCB_CONTEXT(s2, PREG_S2, a0)
+ SAVE_U_PCB_CONTEXT(s3, PREG_S3, a0)
+ mfc0 v0, COP_0_STATUS_REG
+ SAVE_U_PCB_CONTEXT(s4, PREG_S4, a0)
+ SAVE_U_PCB_CONTEXT(s5, PREG_S5, a0)
+ SAVE_U_PCB_CONTEXT(s6, PREG_S6, a0)
+ SAVE_U_PCB_CONTEXT(s7, PREG_S7, a0)
+ SAVE_U_PCB_CONTEXT(sp, PREG_SP, a0)
+ SAVE_U_PCB_CONTEXT(s8, PREG_S8, a0)
+ SAVE_U_PCB_CONTEXT(ra, PREG_RA, a0)
+ SAVE_U_PCB_CONTEXT(v0, PREG_SR, a0)
+ SAVE_U_PCB_CONTEXT(gp, PREG_GP, a0)
+ /*
+ * FREEBSD_DEVELOPERS_FIXME:
+ * In case there are CPU-specific registers that need
+ * to be saved with the other registers do so here.
+ */
+ j ra
+ move v0, zero
+END(savectx)
+
+
+KSEG0TEXT_START;
+
+NON_LEAF(mips_cpu_throw, STAND_FRAME_SIZE, ra)
+ mfc0 t0, COP_0_STATUS_REG # t0 = saved status register
+ nop
+ nop
+ and a3, t0, ~(SR_INT_ENAB)
+ mtc0 a3, COP_0_STATUS_REG # Disable all interrupts
+ ITLBNOPFIX
+ j mips_sw1 # We're not interested in old
+ # thread's context, so jump
+ # right to action
+ nop # BDSLOT
+END(mips_cpu_throw)
+
+/*
+ *XXX Fixme: should be written to new interface that requires lock
+ * storage. We fake it for now.
+ * cpu_switch(struct thread *old, struct thread *new);
+ * Find the highest priority process and resume it.
+ */
+NON_LEAF(cpu_switch, STAND_FRAME_SIZE, ra)
+ mfc0 t0, COP_0_STATUS_REG # t0 = saved status register
+ nop
+ nop
+ and a3, t0, ~(SR_INT_ENAB)
+ mtc0 a3, COP_0_STATUS_REG # Disable all interrupts
+ ITLBNOPFIX
+ beqz a0, mips_sw1
+ move a3, a0
+ lw a0, TD_PCB(a0) # load PCB addr of curproc
+ SAVE_U_PCB_CONTEXT(sp, PREG_SP, a0) # save old sp
+ subu sp, sp, STAND_FRAME_SIZE
+ sw ra, STAND_RA_OFFSET(sp)
+ .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
+ SAVE_U_PCB_CONTEXT(s0, PREG_S0, a0) # do a 'savectx()'
+ SAVE_U_PCB_CONTEXT(s1, PREG_S1, a0)
+ SAVE_U_PCB_CONTEXT(s2, PREG_S2, a0)
+ SAVE_U_PCB_CONTEXT(s3, PREG_S3, a0)
+ SAVE_U_PCB_CONTEXT(s4, PREG_S4, a0)
+ SAVE_U_PCB_CONTEXT(s5, PREG_S5, a0)
+ SAVE_U_PCB_CONTEXT(s6, PREG_S6, a0)
+ SAVE_U_PCB_CONTEXT(s7, PREG_S7, a0)
+ SAVE_U_PCB_CONTEXT(s8, PREG_S8, a0)
+ SAVE_U_PCB_CONTEXT(ra, PREG_RA, a0) # save return address
+ SAVE_U_PCB_CONTEXT(t0, PREG_SR, a0) # save status register
+ SAVE_U_PCB_CONTEXT(gp, PREG_GP, a0)
+ /*
+ * FREEBSD_DEVELOPERS_FIXME:
+ * In case there are CPU-specific registers that need
+ * to be saved with the other registers do so here.
+ */
+
+ sw a3, TD_LOCK(a0) # Switchout td_lock
+
+mips_sw1:
+#if defined(SMP) && defined(SCHED_ULE)
+ la t0, _C_LABEL(blocked_lock)
+blocked_loop:
+ lw t1, TD_LOCK(a1)
+ beq t0, t1, blocked_loop
+ nop
+#endif
+ move s7, a1 # Store newthread
+/*
+ * Switch to new context.
+ */
+ GET_CPU_PCPU(a3)
+ sw a1, PC_CURTHREAD(a3)
+ lw a2, TD_PCB(a1)
+ sw a2, PC_CURPCB(a3)
+ lw v0, TD_REALKSTACK(a1)
+ li s0, (MIPS_KSEG2_START+VM_KERNEL_ALLOC_OFFSET) # If Uarea addr is below kseg2,
+ bltu v0, s0, sw2 # no need to insert in TLB.
+ lw a1, TD_UPTE+0(s7) # t0 = first u. pte
+ lw a2, TD_UPTE+4(s7) # t1 = 2nd u. pte
+ and s0, v0, PTE_ODDPG
+ beq s0, zero, entry0
+ nop
+
+ PANIC_KSEG0("USPACE sat on odd page boundary", t1)
+
+/*
+ * Wiredown the USPACE of newproc in TLB entry#0. Check whether target
+ * USPACE is already in another place of TLB before that, and if so
+ * invalidate that TLB entry.
+ * NOTE: This is hard coded to UPAGES == 2.
+ * Also, there should be no TLB faults at this point.
+ */
+entry0:
+ mtc0 v0, COP_0_TLB_HI # VPN = va
+ HAZARD_DELAY
+ tlbp # probe VPN
+ HAZARD_DELAY
+ mfc0 s0, COP_0_TLB_INDEX
+ nop
+pgm:
+ bltz s0, entry0set
+ li t1, MIPS_KSEG0_START + 0x0fff0000 # invalidate tlb entry
+ sll s0, PAGE_SHIFT + 1
+ addu t1, s0
+ mtc0 t1, COP_0_TLB_HI
+ mtc0 zero, COP_0_TLB_LO0
+ mtc0 zero, COP_0_TLB_LO1
+ HAZARD_DELAY
+ tlbwi
+ HAZARD_DELAY
+ mtc0 v0, COP_0_TLB_HI # set VPN again
+entry0set:
+/* SMP!! - Works only for unshared TLB case - i.e. no v-cpus */
+ mtc0 zero, COP_0_TLB_INDEX # TLB entry #0
+# or a1, PG_G
+ mtc0 a1, COP_0_TLB_LO0 # upte[0]
+# or a2, PG_G
+ mtc0 a2, COP_0_TLB_LO1 # upte[1]
+ HAZARD_DELAY
+ tlbwi # set TLB entry #0
+ HAZARD_DELAY
+/*
+ * Now running on new u struct.
+ */
+sw2:
+ la t1, _C_LABEL(pmap_activate) # s7 = new proc pointer
+ jalr t1 # s7 = new proc pointer
+ move a0, s7 # BDSLOT
+/*
+ * Restore registers and return.
+ */
+ lw a0, TD_PCB(s7)
+ RESTORE_U_PCB_CONTEXT(gp, PREG_GP, a0)
+ RESTORE_U_PCB_CONTEXT(v0, PREG_SR, a0) # restore kernel context
+ RESTORE_U_PCB_CONTEXT(ra, PREG_RA, a0)
+ RESTORE_U_PCB_CONTEXT(s0, PREG_S0, a0)
+ RESTORE_U_PCB_CONTEXT(s1, PREG_S1, a0)
+ RESTORE_U_PCB_CONTEXT(s2, PREG_S2, a0)
+ RESTORE_U_PCB_CONTEXT(s3, PREG_S3, a0)
+ RESTORE_U_PCB_CONTEXT(s4, PREG_S4, a0)
+ RESTORE_U_PCB_CONTEXT(s5, PREG_S5, a0)
+ RESTORE_U_PCB_CONTEXT(s6, PREG_S6, a0)
+ RESTORE_U_PCB_CONTEXT(s7, PREG_S7, a0)
+ RESTORE_U_PCB_CONTEXT(sp, PREG_SP, a0)
+ RESTORE_U_PCB_CONTEXT(s8, PREG_S8, a0)
+ /*
+ * FREEBSD_DEVELOPERS_FIXME:
+ * In case there are CPU-specific registers that need
+ * to be restored with the other registers do so here.
+ */
+ mtc0 v0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+ j ra
+ nop
+END(cpu_switch)
+KSEG0TEXT_END;
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsSwitchFPState --
+ *
+ * Save the current state into 'from' and restore it from 'to'.
+ *
+ * MipsSwitchFPState(from, to)
+ * struct thread *from;
+ * struct trapframe *to;
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(MipsSwitchFPState)
+ mfc0 t1, COP_0_STATUS_REG # Save old SR
+ li t0, SR_COP_1_BIT # enable the coprocessor
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+ beq a0, zero, 1f # skip save if NULL pointer
+ nop
+/*
+ * First read out the status register to make sure that all FP operations
+ * have completed.
+ */
+ lw a0, TD_PCB(a0) # get pointer to pcb for proc
+ cfc1 t0, FPC_CSR # stall til FP done
+ cfc1 t0, FPC_CSR # now get status
+ li t3, ~SR_COP_1_BIT
+ RESTORE_U_PCB_REG(t2, PS, a0) # get CPU status register
+ SAVE_U_PCB_FPSR(t0, FSR_NUM, a0) # save FP status
+ and t2, t2, t3 # clear COP_1 enable bit
+ SAVE_U_PCB_REG(t2, PS, a0) # save new status register
+/*
+ * Save the floating point registers.
+ */
+ SAVE_U_PCB_FPREG($f0, F0_NUM, a0)
+ SAVE_U_PCB_FPREG($f1, F1_NUM, a0)
+ SAVE_U_PCB_FPREG($f2, F2_NUM, a0)
+ SAVE_U_PCB_FPREG($f3, F3_NUM, a0)
+ SAVE_U_PCB_FPREG($f4, F4_NUM, a0)
+ SAVE_U_PCB_FPREG($f5, F5_NUM, a0)
+ SAVE_U_PCB_FPREG($f6, F6_NUM, a0)
+ SAVE_U_PCB_FPREG($f7, F7_NUM, a0)
+ SAVE_U_PCB_FPREG($f8, F8_NUM, a0)
+ SAVE_U_PCB_FPREG($f9, F9_NUM, a0)
+ SAVE_U_PCB_FPREG($f10, F10_NUM, a0)
+ SAVE_U_PCB_FPREG($f11, F11_NUM, a0)
+ SAVE_U_PCB_FPREG($f12, F12_NUM, a0)
+ SAVE_U_PCB_FPREG($f13, F13_NUM, a0)
+ SAVE_U_PCB_FPREG($f14, F14_NUM, a0)
+ SAVE_U_PCB_FPREG($f15, F15_NUM, a0)
+ SAVE_U_PCB_FPREG($f16, F16_NUM, a0)
+ SAVE_U_PCB_FPREG($f17, F17_NUM, a0)
+ SAVE_U_PCB_FPREG($f18, F18_NUM, a0)
+ SAVE_U_PCB_FPREG($f19, F19_NUM, a0)
+ SAVE_U_PCB_FPREG($f20, F20_NUM, a0)
+ SAVE_U_PCB_FPREG($f21, F21_NUM, a0)
+ SAVE_U_PCB_FPREG($f22, F22_NUM, a0)
+ SAVE_U_PCB_FPREG($f23, F23_NUM, a0)
+ SAVE_U_PCB_FPREG($f24, F24_NUM, a0)
+ SAVE_U_PCB_FPREG($f25, F25_NUM, a0)
+ SAVE_U_PCB_FPREG($f26, F26_NUM, a0)
+ SAVE_U_PCB_FPREG($f27, F27_NUM, a0)
+ SAVE_U_PCB_FPREG($f28, F28_NUM, a0)
+ SAVE_U_PCB_FPREG($f29, F29_NUM, a0)
+ SAVE_U_PCB_FPREG($f30, F30_NUM, a0)
+ SAVE_U_PCB_FPREG($f31, F31_NUM, a0)
+
+1:
+/*
+ * Restore the floating point registers.
+ */
+ RESTORE_U_PCB_FPSR(t0, FSR_NUM, a1) # get status register
+ RESTORE_U_PCB_FPREG($f0, F0_NUM, a1)
+ RESTORE_U_PCB_FPREG($f1, F1_NUM, a1)
+ RESTORE_U_PCB_FPREG($f2, F2_NUM, a1)
+ RESTORE_U_PCB_FPREG($f3, F3_NUM, a1)
+ RESTORE_U_PCB_FPREG($f4, F4_NUM, a1)
+ RESTORE_U_PCB_FPREG($f5, F5_NUM, a1)
+ RESTORE_U_PCB_FPREG($f6, F6_NUM, a1)
+ RESTORE_U_PCB_FPREG($f7, F7_NUM, a1)
+ RESTORE_U_PCB_FPREG($f8, F8_NUM, a1)
+ RESTORE_U_PCB_FPREG($f9, F9_NUM, a1)
+ RESTORE_U_PCB_FPREG($f10, F10_NUM, a1)
+ RESTORE_U_PCB_FPREG($f11, F11_NUM, a1)
+ RESTORE_U_PCB_FPREG($f12, F12_NUM, a1)
+ RESTORE_U_PCB_FPREG($f13, F13_NUM, a1)
+ RESTORE_U_PCB_FPREG($f14, F14_NUM, a1)
+ RESTORE_U_PCB_FPREG($f15, F15_NUM, a1)
+ RESTORE_U_PCB_FPREG($f16, F16_NUM, a1)
+ RESTORE_U_PCB_FPREG($f17, F17_NUM, a1)
+ RESTORE_U_PCB_FPREG($f18, F18_NUM, a1)
+ RESTORE_U_PCB_FPREG($f19, F19_NUM, a1)
+ RESTORE_U_PCB_FPREG($f20, F20_NUM, a1)
+ RESTORE_U_PCB_FPREG($f21, F21_NUM, a1)
+ RESTORE_U_PCB_FPREG($f22, F22_NUM, a1)
+ RESTORE_U_PCB_FPREG($f23, F23_NUM, a1)
+ RESTORE_U_PCB_FPREG($f24, F24_NUM, a1)
+ RESTORE_U_PCB_FPREG($f25, F25_NUM, a1)
+ RESTORE_U_PCB_FPREG($f26, F26_NUM, a1)
+ RESTORE_U_PCB_FPREG($f27, F27_NUM, a1)
+ RESTORE_U_PCB_FPREG($f28, F28_NUM, a1)
+ RESTORE_U_PCB_FPREG($f29, F29_NUM, a1)
+ RESTORE_U_PCB_FPREG($f30, F30_NUM, a1)
+ RESTORE_U_PCB_FPREG($f31, F31_NUM, a1)
+
+ and t0, t0, ~FPC_EXCEPTION_BITS
+ ctc1 t0, FPC_CSR
+ nop
+
+ mtc0 t1, COP_0_STATUS_REG # Restore the status register.
+ ITLBNOPFIX
+ j ra
+ nop
+END(MipsSwitchFPState)
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsSaveCurFPState --
+ *
+ * Save the current floating point coprocessor state.
+ *
+ * MipsSaveCurFPState(td)
+ * struct thread *td;
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * machFPCurProcPtr is cleared.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(MipsSaveCurFPState)
+ lw a0, TD_PCB(a0) # get pointer to pcb for thread
+ mfc0 t1, COP_0_STATUS_REG # Disable interrupts and
+ li t0, SR_COP_1_BIT # enable the coprocessor
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ GET_CPU_PCPU(a1)
+ sw zero, PC_FPCURTHREAD(a1) # indicate state has been saved
+/*
+ * First read out the status register to make sure that all FP operations
+ * have completed.
+ */
+ RESTORE_U_PCB_REG(t2, PS, a0) # get CPU status register
+ li t3, ~SR_COP_1_BIT
+ and t2, t2, t3 # clear COP_1 enable bit
+ cfc1 t0, FPC_CSR # stall til FP done
+ cfc1 t0, FPC_CSR # now get status
+ SAVE_U_PCB_REG(t2, PS, a0) # save new status register
+ SAVE_U_PCB_FPSR(t0, FSR_NUM, a0) # save FP status
+/*
+ * Save the floating point registers.
+ */
+ SAVE_U_PCB_FPREG($f0, F0_NUM, a0)
+ SAVE_U_PCB_FPREG($f1, F1_NUM, a0)
+ SAVE_U_PCB_FPREG($f2, F2_NUM, a0)
+ SAVE_U_PCB_FPREG($f3, F3_NUM, a0)
+ SAVE_U_PCB_FPREG($f4, F4_NUM, a0)
+ SAVE_U_PCB_FPREG($f5, F5_NUM, a0)
+ SAVE_U_PCB_FPREG($f6, F6_NUM, a0)
+ SAVE_U_PCB_FPREG($f7, F7_NUM, a0)
+ SAVE_U_PCB_FPREG($f8, F8_NUM, a0)
+ SAVE_U_PCB_FPREG($f9, F9_NUM, a0)
+ SAVE_U_PCB_FPREG($f10, F10_NUM, a0)
+ SAVE_U_PCB_FPREG($f11, F11_NUM, a0)
+ SAVE_U_PCB_FPREG($f12, F12_NUM, a0)
+ SAVE_U_PCB_FPREG($f13, F13_NUM, a0)
+ SAVE_U_PCB_FPREG($f14, F14_NUM, a0)
+ SAVE_U_PCB_FPREG($f15, F15_NUM, a0)
+ SAVE_U_PCB_FPREG($f16, F16_NUM, a0)
+ SAVE_U_PCB_FPREG($f17, F17_NUM, a0)
+ SAVE_U_PCB_FPREG($f18, F18_NUM, a0)
+ SAVE_U_PCB_FPREG($f19, F19_NUM, a0)
+ SAVE_U_PCB_FPREG($f20, F20_NUM, a0)
+ SAVE_U_PCB_FPREG($f21, F21_NUM, a0)
+ SAVE_U_PCB_FPREG($f22, F22_NUM, a0)
+ SAVE_U_PCB_FPREG($f23, F23_NUM, a0)
+ SAVE_U_PCB_FPREG($f24, F24_NUM, a0)
+ SAVE_U_PCB_FPREG($f25, F25_NUM, a0)
+ SAVE_U_PCB_FPREG($f26, F26_NUM, a0)
+ SAVE_U_PCB_FPREG($f27, F27_NUM, a0)
+ SAVE_U_PCB_FPREG($f28, F28_NUM, a0)
+ SAVE_U_PCB_FPREG($f29, F29_NUM, a0)
+ SAVE_U_PCB_FPREG($f30, F30_NUM, a0)
+ SAVE_U_PCB_FPREG($f31, F31_NUM, a0)
+
+ mtc0 t1, COP_0_STATUS_REG # Restore the status register.
+ ITLBNOPFIX
+ j ra
+ nop
+END(MipsSaveCurFPState)
+
+/*
+ * When starting init, call this to configure the process for user
+ * mode. This will be inherited by other processes.
+ */
+LEAF_NOPROFILE(prepare_usermode)
+ j ra
+ nop
+END(prepare_usermode)
+
+
+/*
+ * This code is copied the user's stack for returning from signal handlers
+ * (see sendsig() and sigreturn()). We have to compute the address
+ * of the sigcontext struct for the sigreturn call.
+ */
+ .globl _C_LABEL(sigcode)
+_C_LABEL(sigcode):
+ addu a0, sp, SIGF_UC # address of ucontext
+ li v0, SYS_sigreturn
+# sigreturn (ucp)
+ syscall
+ break 0 # just in case sigreturn fails
+ .globl _C_LABEL(esigcode)
+_C_LABEL(esigcode):
+
+ .data
+ .globl szsigcode
+szsigcode:
+ .long esigcode-sigcode
+ .text
diff --git a/sys/mips/mips/tick.c b/sys/mips/mips/tick.c
new file mode 100644
index 0000000..0f8ba26
--- /dev/null
+++ b/sys/mips/mips/tick.c
@@ -0,0 +1,369 @@
+/*-
+ * Copyright (c) 2006-2007 Bruce M. Simpson.
+ * Copyright (c) 2003-2004 Juli Mallett.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Simple driver for the 32-bit interval counter built in to all
+ * MIPS32 CPUs.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysctl.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/power.h>
+#include <sys/smp.h>
+#include <sys/time.h>
+#include <sys/timetc.h>
+
+#include <machine/clock.h>
+#include <machine/locore.h>
+#include <machine/md_var.h>
+
+uint64_t counter_freq;
+uint64_t cycles_per_tick;
+uint64_t cycles_per_usec;
+uint64_t cycles_per_sec;
+uint64_t cycles_per_hz;
+
+u_int32_t counter_upper = 0;
+u_int32_t counter_lower_last = 0;
+int tick_started = 0;
+
+struct clk_ticks
+{
+ u_long hard_ticks;
+ u_long stat_ticks;
+ u_long prof_ticks;
+ /*
+ * pad for cache line alignment of pcpu info
+ * cache-line-size - number of used bytes
+ */
+ char pad[32-(3*sizeof (u_long))];
+} static pcpu_ticks[MAXCPU];
+
+/*
+ * Device methods
+ */
+static int clock_probe(device_t);
+static void clock_identify(driver_t *, device_t);
+static int clock_attach(device_t);
+static unsigned counter_get_timecount(struct timecounter *tc);
+
+static struct timecounter counter_timecounter = {
+ counter_get_timecount, /* get_timecount */
+ 0, /* no poll_pps */
+ 0xffffffffu, /* counter_mask */
+ 0, /* frequency */
+ "MIPS32", /* name */
+ 800, /* quality (adjusted in code) */
+};
+
+void
+mips_timer_early_init(uint64_t clock_hz)
+{
+ /* Initialize clock early so that we can use DELAY sooner */
+ counter_freq = clock_hz;
+ cycles_per_usec = (clock_hz / (1000 * 1000));
+}
+
+void
+cpu_initclocks(void)
+{
+
+ if (!tick_started) {
+ tc_init(&counter_timecounter);
+ tick_started++;
+ }
+}
+
+static uint64_t
+tick_ticker(void)
+{
+ uint64_t ret;
+ uint32_t ticktock;
+
+ /*
+ * XXX: MIPS64 platforms can read 64-bits of counter directly.
+ * Also: the tc code is supposed to cope with things wrapping
+ * from the time counter, so I'm not sure why all these hoops
+ * are even necessary.
+ */
+ ticktock = mips_rd_count();
+ critical_enter();
+ if (ticktock < counter_lower_last)
+ counter_upper++;
+ counter_lower_last = ticktock;
+ critical_exit();
+
+ ret = ((uint64_t) counter_upper << 32) | counter_lower_last;
+ return (ret);
+}
+
+void
+mips_timer_init_params(uint64_t platform_counter_freq, int double_count)
+{
+
+ /*
+ * XXX: Do not use printf here: uart code 8250 may use DELAY so this
+ * function should be called before cninit.
+ */
+ counter_freq = platform_counter_freq;
+ cycles_per_tick = counter_freq / 1000;
+ if (double_count)
+ cycles_per_tick *= 2;
+ cycles_per_hz = counter_freq / hz;
+ cycles_per_usec = counter_freq / (1 * 1000 * 1000);
+ cycles_per_sec = counter_freq ;
+
+ counter_timecounter.tc_frequency = counter_freq;
+ /*
+ * XXX: Some MIPS32 cores update the Count register only every two
+ * pipeline cycles.
+ * XXX2: We can read this from the hardware register on some
+ * systems. Need to investigate.
+ */
+ if (double_count != 0) {
+ cycles_per_hz /= 2;
+ cycles_per_usec /= 2;
+ cycles_per_sec /= 2;
+ }
+ printf("hz=%d cyl_per_hz:%jd cyl_per_usec:%jd freq:%jd cyl_per_hz:%jd cyl_per_sec:%jd\n",
+ hz,
+ cycles_per_tick,
+ cycles_per_usec,
+ counter_freq,
+ cycles_per_hz,
+ cycles_per_sec
+ );
+ set_cputicker(tick_ticker, counter_freq, 1);
+}
+
+static int
+sysctl_machdep_counter_freq(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ uint64_t freq;
+
+ if (counter_timecounter.tc_frequency == 0)
+ return (EOPNOTSUPP);
+ freq = counter_freq;
+ error = sysctl_handle_int(oidp, &freq, sizeof(freq), req);
+ if (error == 0 && req->newptr != NULL) {
+ counter_freq = freq;
+ counter_timecounter.tc_frequency = counter_freq;
+ }
+ return (error);
+}
+
+SYSCTL_PROC(_machdep, OID_AUTO, counter_freq, CTLTYPE_QUAD | CTLFLAG_RW,
+ 0, sizeof(u_int), sysctl_machdep_counter_freq, "IU", "");
+
+static unsigned
+counter_get_timecount(struct timecounter *tc)
+{
+
+ return (mips_rd_count());
+}
+
+
+void
+cpu_startprofclock(void)
+{
+ /* nothing to do */
+}
+
+void
+cpu_stopprofclock(void)
+{
+ /* nothing to do */
+}
+
+/*
+ * Wait for about n microseconds (at least!).
+ */
+void
+DELAY(int n)
+{
+ uint32_t cur, last, delta, usecs;
+
+ /*
+ * This works by polling the timer and counting the number of
+ * microseconds that go by.
+ */
+ last = mips_rd_count();
+ delta = usecs = 0;
+
+ while (n > usecs) {
+ cur = mips_rd_count();
+
+ /* Check to see if the timer has wrapped around. */
+ if (cur < last)
+ delta += (cur + (cycles_per_hz - last));
+ else
+ delta += (cur - last);
+
+ last = cur;
+
+ if (delta >= cycles_per_usec) {
+ usecs += delta / cycles_per_usec;
+ delta %= cycles_per_usec;
+ }
+ }
+}
+
+#ifdef TARGET_OCTEON
+int64_t wheel_run = 0;
+
+void octeon_led_run_wheel(void);
+
+#endif
+/*
+ * Device section of file below
+ */
+static int
+clock_intr(void *arg)
+{
+ struct clk_ticks *cpu_ticks;
+ struct trapframe *tf;
+ uint32_t ltick;
+ /*
+ * Set next clock edge.
+ */
+ ltick = mips_rd_count();
+ mips_wr_compare(ltick + cycles_per_tick);
+ cpu_ticks = &pcpu_ticks[PCPU_GET(cpuid)];
+ critical_enter();
+ if (ltick < counter_lower_last) {
+ counter_upper++;
+ counter_lower_last = ltick;
+ }
+ /*
+ * Magic. Setting up with an arg of NULL means we get passed tf.
+ */
+ tf = (struct trapframe *)arg;
+
+ /* Fire hardclock at hz. */
+ cpu_ticks->hard_ticks += cycles_per_tick;
+ if (cpu_ticks->hard_ticks >= cycles_per_hz) {
+ cpu_ticks->hard_ticks -= cycles_per_hz;
+ if (PCPU_GET(cpuid) == 0)
+ hardclock(USERMODE(tf->sr), tf->pc);
+ else
+ hardclock_cpu(USERMODE(tf->sr));
+ }
+ /* Fire statclock at stathz. */
+ cpu_ticks->stat_ticks += stathz;
+ if (cpu_ticks->stat_ticks >= cycles_per_hz) {
+ cpu_ticks->stat_ticks -= cycles_per_hz;
+ statclock(USERMODE(tf->sr));
+ }
+
+ /* Fire profclock at profhz, but only when needed. */
+ cpu_ticks->prof_ticks += profhz;
+ if (cpu_ticks->prof_ticks >= cycles_per_hz) {
+ cpu_ticks->prof_ticks -= cycles_per_hz;
+ if (profprocs != 0)
+ profclock(USERMODE(tf->sr), tf->pc);
+ }
+ critical_exit();
+#ifdef TARGET_OCTEON
+ /* Run the FreeBSD display once every hz ticks */
+ wheel_run += cycles_per_tick;
+ if (wheel_run >= cycles_per_sec) {
+ wheel_run = 0;
+ octeon_led_run_wheel();
+ }
+#endif
+ return (FILTER_HANDLED);
+}
+
+static int
+clock_probe(device_t dev)
+{
+
+ if (device_get_unit(dev) != 0)
+ panic("can't attach more clocks");
+
+ device_set_desc(dev, "Generic MIPS32 ticker");
+ return (0);
+}
+
+static void
+clock_identify(driver_t * drv, device_t parent)
+{
+
+ BUS_ADD_CHILD(parent, 0, "clock", 0);
+}
+
+static int
+clock_attach(device_t dev)
+{
+ struct resource *irq;
+ int error;
+ int rid;
+
+ rid = 0;
+ irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 5, 5, 1, RF_ACTIVE);
+ if (irq == NULL) {
+ device_printf(dev, "failed to allocate irq\n");
+ return (ENXIO);
+ }
+ error = bus_setup_intr(dev, irq, INTR_TYPE_CLK, clock_intr, NULL,
+ NULL, NULL);
+
+ if (error != 0) {
+ device_printf(dev, "bus_setup_intr returned %d\n", error);
+ return (error);
+ }
+ mips_wr_compare(mips_rd_count() + counter_freq / hz);
+ return (0);
+}
+
+static device_method_t clock_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, clock_probe),
+ DEVMETHOD(device_identify, clock_identify),
+ DEVMETHOD(device_attach, clock_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+ {0, 0}
+};
+
+static driver_t clock_driver = {
+ "clock", clock_methods, 32
+};
+
+static devclass_t clock_devclass;
+
+DRIVER_MODULE(clock, nexus, clock_driver, clock_devclass, 0, 0);
diff --git a/sys/mips/mips/tlb.S b/sys/mips/mips/tlb.S
new file mode 100644
index 0000000..28636b1
--- /dev/null
+++ b/sys/mips/mips/tlb.S
@@ -0,0 +1,509 @@
+/* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Digital Equipment Corporation and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
+ * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
+ * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
+ * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
+ * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
+ *
+ * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
+ * JNPR: tlb.S,v 1.1.4.2 2007/09/10 09:02:05 girish
+ * $FreeBSD$
+ */
+
+/*
+ * Contains code that is the first executed at boot time plus
+ * assembly language support routines.
+ */
+
+#include "opt_cputype.h"
+
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/cpuregs.h>
+#include <machine/regnum.h>
+#include <machine/pte.h>
+
+#include "assym.s"
+
+#if defined(ISA_MIPS32)
+#undef WITH_64BIT_CP0
+#elif defined(ISA_MIPS64)
+#define WITH_64BIT_CP0
+#elif defined(ISA_MIPS3)
+#define WITH_64BIT_CP0
+#else
+#error "Please write the code for this ISA"
+#endif
+
+#ifdef WITH_64BIT_CP0
+#define _SLL dsll
+#define _SRL dsrl
+#define _MFC0 dmfc0
+#define _MTC0 dmtc0
+#define WIRED_SHIFT 34
+#define PAGE_SHIFT 34
+#else
+#define _SLL sll
+#define _SRL srl
+#define _MFC0 mfc0
+#define _MTC0 mtc0
+#define WIRED_SHIFT 2
+#define PAGE_SHIFT 2
+#endif
+ .set noreorder # Noreorder is default style!
+#if defined(ISA_MIPS32)
+ .set mips32
+#elif defined(ISA_MIPS64)
+ .set mips64
+#elif defined(ISA_MIPS3)
+ .set mips3
+#endif
+
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+/*
+ * FREEBSD_DEVELOPERS_FIXME
+ * Some MIPS CPU may need delays using nops between executing CP0 Instructions
+ */
+#define MIPS_CPU_NOP_DELAY nop;nop;nop;nop;
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_TLBWriteIndexed(unsigned index, tlb *tlb);
+ *
+ * Write the given entry into the TLB at the given index.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * TLB entry set.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_TLBWriteIndexed)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ lw a2, 8(a1)
+ lw a3, 12(a1)
+ _MFC0 t0, COP_0_TLB_HI # Save the current PID.
+
+ _MTC0 a2, COP_0_TLB_LO0 # Set up entry low0.
+ _MTC0 a3, COP_0_TLB_LO1 # Set up entry low1.
+ lw a2, 0(a1)
+ lw a3, 4(a1)
+ mtc0 a0, COP_0_TLB_INDEX # Set the index.
+ _MTC0 a2, COP_0_TLB_PG_MASK # Set up entry mask.
+ _MTC0 a3, COP_0_TLB_HI # Set up entry high.
+ MIPS_CPU_NOP_DELAY
+ tlbwi # Write the TLB
+ MIPS_CPU_NOP_DELAY
+
+ _MTC0 t0, COP_0_TLB_HI # Restore the PID.
+ nop
+ _MTC0 zero, COP_0_TLB_PG_MASK # Default mask value.
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ j ra
+ nop
+END(Mips_TLBWriteIndexed)
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_SetPID(int pid);
+ *
+ * Write the given pid into the TLB pid reg.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * PID set in the entry hi register.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_SetPID)
+ _MTC0 a0, COP_0_TLB_HI # Write the hi reg value
+ nop # required for QED5230
+ nop # required for QED5230
+ j ra
+ nop
+END(Mips_SetPID)
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_SetWIRED(int wired);
+ *
+ * Write the given value into the TLB wired reg.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * WIRED set in the wired register.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_SetWIRED)
+ mtc0 a0, COP_0_TLB_WIRED
+ j ra
+ nop
+END(Mips_SetWIRED)
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_GetWIRED(void);
+ *
+ * Get the value from the TLB wired reg.
+ *
+ * Results:
+ * Value of wired reg.
+ *
+ * Side effects:
+ * None.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_GetWIRED)
+ mfc0 v0, COP_0_TLB_WIRED
+ j ra
+ nop
+END(Mips_GetWIRED)
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_TLBFlush(tlbsize);
+ *
+ * Flush the "random" entries from the TLB.
+ * Uses "wired" register to determine what register to start with.
+ * Arg "tlbsize" is the number of entries to flush.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * The TLB is flushed.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_TLBFlush)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ mfc0 t1, COP_0_TLB_WIRED
+ li v0, MIPS_KSEG3_START + 0x0fff0000 # invalid address
+ _MFC0 t0, COP_0_TLB_HI # Save the PID
+
+ _MTC0 v0, COP_0_TLB_HI # Mark entry high as invalid
+ _MTC0 zero, COP_0_TLB_LO0 # Zero out low entry0.
+ _MTC0 zero, COP_0_TLB_LO1 # Zero out low entry1.
+ mtc0 zero, COP_0_TLB_PG_MASK # Zero out mask entry.
+/*
+ * Align the starting value (t1) and the upper bound (a0).
+ */
+1:
+ mtc0 t1, COP_0_TLB_INDEX # Set the index register.
+ ITLBNOPFIX
+ _MTC0 t0, COP_0_TLB_HI # Restore the PID
+ addu t1, t1, 1 # Increment index.
+ addu t0, t0, 8 * 1024
+ MIPS_CPU_NOP_DELAY
+ tlbwi # Write the TLB entry.
+ MIPS_CPU_NOP_DELAY
+ bne t1, a0, 1b
+ nop
+
+ _MTC0 t0, COP_0_TLB_HI # Restore the PID
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ j ra
+ nop
+END(Mips_TLBFlush)
+
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_TLBFlushAddr(unsigned TLBhi);
+ *
+ * Flush any TLB entries for the given address and TLB PID.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * The process's page is flushed from the TLB.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_TLBFlushAddr)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ li v0, (PTE_HVPN | PTE_ASID)
+ and a0, a0, v0 # Make shure valid hi value.
+ _MFC0 t0, COP_0_TLB_HI # Get current PID
+ mfc0 t3, COP_0_TLB_PG_MASK # Save current pgMask
+ _MTC0 a0, COP_0_TLB_HI # look for addr & PID
+ MIPS_CPU_NOP_DELAY
+ tlbp # Probe for the entry.
+ MIPS_CPU_NOP_DELAY
+ mfc0 v0, COP_0_TLB_INDEX # See what we got
+ li t1, MIPS_KSEG0_START + 0x0fff0000
+ bltz v0, 1f # index < 0 => !found
+ nop
+ # Load invalid entry, each TLB entry should have it's own bogus
+ # address calculated by following expression:
+ # MIPS_KSEG0_START + 0x0fff0000 + 2 * i * PAGE_SIZE;
+ # One bogus value for every TLB entry might cause MCHECK exception
+ sll v0, PAGE_SHIFT + 1
+ addu t1, v0
+ _MTC0 t1, COP_0_TLB_HI # Mark entry high as invalid
+
+ _MTC0 zero, COP_0_TLB_LO0 # Zero out low entry.
+ _MTC0 zero, COP_0_TLB_LO1 # Zero out low entry.
+ MIPS_CPU_NOP_DELAY
+ tlbwi
+ MIPS_CPU_NOP_DELAY
+1:
+ _MTC0 t0, COP_0_TLB_HI # restore PID
+ mtc0 t3, COP_0_TLB_PG_MASK # Restore pgMask
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ j ra
+ nop
+END(Mips_TLBFlushAddr)
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_TLBUpdate(unsigned virpageadr, lowregx);
+ *
+ * Update the TLB if highreg is found; otherwise, enter the data.
+ *
+ * Results:
+ * < 0 if loaded >= 0 if updated.
+ *
+ * Side effects:
+ * None.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_TLBUpdate)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ and t1, a0, 0x1000 # t1 = Even/Odd flag
+ li v0, (PTE_HVPN | PTE_ASID)
+ and a0, a0, v0
+ _MFC0 t0, COP_0_TLB_HI # Save current PID
+ _MTC0 a0, COP_0_TLB_HI # Init high reg
+ and a2, a1, PTE_G # Copy global bit
+ MIPS_CPU_NOP_DELAY
+ tlbp # Probe for the entry.
+ _SLL a1, a1, WIRED_SHIFT
+ _SRL a1, a1, WIRED_SHIFT
+ nop
+ mfc0 v0, COP_0_TLB_INDEX # See what we got
+ bne t1, zero, 2f # Decide even odd
+# EVEN
+ nop
+ bltz v0, 1f # index < 0 => !found
+ MIPS_CPU_NOP_DELAY
+
+ tlbr # update, read entry first
+ MIPS_CPU_NOP_DELAY
+ _MTC0 a1, COP_0_TLB_LO0 # init low reg0.
+ MIPS_CPU_NOP_DELAY
+ tlbwi # update slot found
+ b 4f
+ nop
+1:
+ mtc0 zero, COP_0_TLB_PG_MASK # init mask.
+ _MTC0 a0, COP_0_TLB_HI # init high reg.
+ _MTC0 a1, COP_0_TLB_LO0 # init low reg0.
+ _MTC0 a2, COP_0_TLB_LO1 # init low reg1.
+ MIPS_CPU_NOP_DELAY
+ tlbwr # enter into a random slot
+ MIPS_CPU_NOP_DELAY
+ b 4f
+ nop
+# ODD
+2:
+ nop
+ bltz v0, 3f # index < 0 => !found
+ MIPS_CPU_NOP_DELAY
+
+ tlbr # read the entry first
+ MIPS_CPU_NOP_DELAY
+ _MTC0 a1, COP_0_TLB_LO1 # init low reg1.
+ MIPS_CPU_NOP_DELAY
+ tlbwi # update slot found
+ MIPS_CPU_NOP_DELAY
+ b 4f
+ nop
+3:
+ mtc0 zero, COP_0_TLB_PG_MASK # init mask.
+ _MTC0 a0, COP_0_TLB_HI # init high reg.
+ _MTC0 a2, COP_0_TLB_LO0 # init low reg0.
+ _MTC0 a1, COP_0_TLB_LO1 # init low reg1.
+ MIPS_CPU_NOP_DELAY
+ tlbwr # enter into a random slot
+
+4: # Make shure pipeline
+ MIPS_CPU_NOP_DELAY
+ _MTC0 t0, COP_0_TLB_HI # restore PID
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ j ra
+ nop
+END(Mips_TLBUpdate)
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_TLBRead(unsigned entry, struct tlb *tlb);
+ *
+ * Read the TLB entry.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * tlb will contain the TLB entry found.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_TLBRead)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ _MFC0 t0, COP_0_TLB_HI # Get current PID
+
+ mtc0 a0, COP_0_TLB_INDEX # Set the index register
+ MIPS_CPU_NOP_DELAY
+ tlbr # Read from the TLB
+ MIPS_CPU_NOP_DELAY
+ mfc0 t2, COP_0_TLB_PG_MASK # fetch the hi entry
+ _MFC0 t3, COP_0_TLB_HI # fetch the hi entry
+ _MFC0 t4, COP_0_TLB_LO0 # See what we got
+ _MFC0 t5, COP_0_TLB_LO1 # See what we got
+ _MTC0 t0, COP_0_TLB_HI # restore PID
+ MIPS_CPU_NOP_DELAY
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ sw t2, 0(a1)
+ sw t3, 4(a1)
+ sw t4, 8(a1)
+ j ra
+ sw t5, 12(a1)
+END(Mips_TLBRead)
+
+/*--------------------------------------------------------------------------
+ *
+ * Mips_TLBGetPID(void);
+ *
+ * Results:
+ * Returns the current TLB pid reg.
+ *
+ * Side effects:
+ * None.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(Mips_TLBGetPID)
+ _MFC0 v0, COP_0_TLB_HI # get PID
+ j ra
+ and v0, v0, VMTLB_PID # mask off PID
+END(Mips_TLBGetPID)
+
+
+
+/*--------------------------------------------------------------------------
+ *
+ * void mips_TBIAP(int sizeofTLB);
+ *
+ * Invalidate TLB entries belong to per process user spaces while
+ * leaving entries for kernel space marked global intact.
+ *
+ *--------------------------------------------------------------------------
+ */
+LEAF(mips_TBIAP)
+ mfc0 v1, COP_0_STATUS_REG # save status register
+ mtc0 zero, COP_0_STATUS_REG # disable interrupts
+
+ _MFC0 t4, COP_0_TLB_HI # Get current PID
+ move t2, a0
+ mfc0 t1, COP_0_TLB_WIRED
+ li v0, MIPS_KSEG0_START + 0x0fff0000 # invalid address
+ mfc0 t3, COP_0_TLB_PG_MASK # save current pgMask
+
+ # do {} while (t1 < t2)
+1:
+ mtc0 t1, COP_0_TLB_INDEX # set index
+ MIPS_CPU_NOP_DELAY
+ tlbr # obtain an entry
+ MIPS_CPU_NOP_DELAY
+ _MFC0 a0, COP_0_TLB_LO1
+ and a0, a0, PTE_G # check to see it has G bit
+ bnez a0, 2f
+ nop
+
+ _MTC0 v0, COP_0_TLB_HI # make entryHi invalid
+ _MTC0 zero, COP_0_TLB_LO0 # zero out entryLo0
+ _MTC0 zero, COP_0_TLB_LO1 # zero out entryLo1
+ mtc0 zero, COP_0_TLB_PG_MASK # zero out mask entry
+ MIPS_CPU_NOP_DELAY
+ tlbwi # invalidate the TLB entry
+2:
+ addu t1, t1, 1
+ addu v0, 1 << (PAGE_SHIFT + 1)
+ bne t1, t2, 1b
+ nop
+
+ _MTC0 t4, COP_0_TLB_HI # restore PID
+ mtc0 t3, COP_0_TLB_PG_MASK # restore pgMask
+ MIPS_CPU_NOP_DELAY
+ mtc0 v1, COP_0_STATUS_REG # restore status register
+ j ra # new ASID will be set soon
+ nop
+ .set mips2
+END(mips_TBIAP)
diff --git a/sys/mips/mips/trap.c b/sys/mips/mips/trap.c
new file mode 100644
index 0000000..015a28a
--- /dev/null
+++ b/sys/mips/mips/trap.c
@@ -0,0 +1,1815 @@
+/* $OpenBSD: trap.c,v 1.19 1998/09/30 12:40:41 pefo Exp $ */
+/* tracked to 1.23 */
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: trap.c 1.32 91/04/06
+ *
+ * from: @(#)trap.c 8.5 (Berkeley) 1/11/94
+ * JNPR: trap.c,v 1.13.2.2 2007/08/29 10:03:49 girish
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ddb.h"
+#include "opt_global.h"
+
+#define NO_REG_DEFS 1 /* Prevent asm.h from including regdef.h */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysent.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/signalvar.h>
+#include <sys/syscall.h>
+#include <sys/lock.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_param.h>
+#include <sys/vmmeter.h>
+#include <sys/ptrace.h>
+#include <sys/user.h>
+#include <sys/buf.h>
+#include <sys/vnode.h>
+#include <sys/pioctl.h>
+#include <sys/sysctl.h>
+#include <sys/syslog.h>
+#include <sys/bus.h>
+#ifdef KTRACE
+#include <sys/ktrace.h>
+#endif
+#include <net/netisr.h>
+
+#include <machine/trap.h>
+#include <machine/psl.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+#include <machine/pte.h>
+#include <machine/pmap.h>
+#include <machine/mips_opcode.h>
+#include <machine/frame.h>
+#include <machine/regnum.h>
+#include <machine/rm7000.h>
+#include <machine/archtype.h>
+#include <machine/asm.h>
+
+#ifdef DDB
+#include <machine/db_machdep.h>
+#include <ddb/db_sym.h>
+#include <ddb/ddb.h>
+#include <sys/kdb.h>
+#endif
+
+#include <sys/cdefs.h>
+#include <sys/syslog.h>
+
+
+#ifdef TRAP_DEBUG
+int trap_debug = 1;
+
+#endif
+
+extern unsigned onfault_table[];
+
+extern void MipsKernGenException(void);
+extern void MipsUserGenException(void);
+extern void MipsKernIntr(void);
+extern void MipsUserIntr(void);
+extern void MipsTLBInvalidException(void);
+extern void MipsKernTLBInvalidException(void);
+extern void MipsUserTLBInvalidException(void);
+extern void MipsTLBMissException(void);
+static void log_bad_page_fault(char *, struct trapframe *, int);
+static void log_frame_dump(struct trapframe *frame);
+static void get_mapping_info(vm_offset_t, pd_entry_t **, pt_entry_t **);
+
+#ifdef TRAP_DEBUG
+static void trap_frame_dump(struct trapframe *frame);
+
+#endif
+extern char edata[];
+
+void (*machExceptionTable[]) (void)= {
+/*
+ * The kernel exception handlers.
+ */
+ MipsKernIntr, /* external interrupt */
+ MipsKernGenException, /* TLB modification */
+ MipsKernTLBInvalidException, /* TLB miss (load or instr. fetch) */
+ MipsKernTLBInvalidException, /* TLB miss (store) */
+ MipsKernGenException, /* address error (load or I-fetch) */
+ MipsKernGenException, /* address error (store) */
+ MipsKernGenException, /* bus error (I-fetch) */
+ MipsKernGenException, /* bus error (load or store) */
+ MipsKernGenException, /* system call */
+ MipsKernGenException, /* breakpoint */
+ MipsKernGenException, /* reserved instruction */
+ MipsKernGenException, /* coprocessor unusable */
+ MipsKernGenException, /* arithmetic overflow */
+ MipsKernGenException, /* trap exception */
+ MipsKernGenException, /* viritual coherence exception inst */
+ MipsKernGenException, /* floating point exception */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* watch exception */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* reserved */
+ MipsKernGenException, /* viritual coherence exception data */
+/*
+ * The user exception handlers.
+ */
+ MipsUserIntr, /* 0 */
+ MipsUserGenException, /* 1 */
+ MipsUserTLBInvalidException, /* 2 */
+ MipsUserTLBInvalidException, /* 3 */
+ MipsUserGenException, /* 4 */
+ MipsUserGenException, /* 5 */
+ MipsUserGenException, /* 6 */
+ MipsUserGenException, /* 7 */
+ MipsUserGenException, /* 8 */
+ MipsUserGenException, /* 9 */
+ MipsUserGenException, /* 10 */
+ MipsUserGenException, /* 11 */
+ MipsUserGenException, /* 12 */
+ MipsUserGenException, /* 13 */
+ MipsUserGenException, /* 14 */
+ MipsUserGenException, /* 15 */
+ MipsUserGenException, /* 16 */
+ MipsUserGenException, /* 17 */
+ MipsUserGenException, /* 18 */
+ MipsUserGenException, /* 19 */
+ MipsUserGenException, /* 20 */
+ MipsUserGenException, /* 21 */
+ MipsUserGenException, /* 22 */
+ MipsUserGenException, /* 23 */
+ MipsUserGenException, /* 24 */
+ MipsUserGenException, /* 25 */
+ MipsUserGenException, /* 26 */
+ MipsUserGenException, /* 27 */
+ MipsUserGenException, /* 28 */
+ MipsUserGenException, /* 29 */
+ MipsUserGenException, /* 20 */
+ MipsUserGenException, /* 31 */
+};
+
+char *trap_type[] = {
+ "external interrupt",
+ "TLB modification",
+ "TLB miss (load or instr. fetch)",
+ "TLB miss (store)",
+ "address error (load or I-fetch)",
+ "address error (store)",
+ "bus error (I-fetch)",
+ "bus error (load or store)",
+ "system call",
+ "breakpoint",
+ "reserved instruction",
+ "coprocessor unusable",
+ "arithmetic overflow",
+ "trap",
+ "viritual coherency instruction",
+ "floating point",
+ "reserved 16",
+ "reserved 17",
+ "reserved 18",
+ "reserved 19",
+ "reserved 20",
+ "reserved 21",
+ "reserved 22",
+ "watch",
+ "reserved 24",
+ "reserved 25",
+ "reserved 26",
+ "reserved 27",
+ "reserved 28",
+ "reserved 29",
+ "reserved 30",
+ "viritual coherency data",
+};
+
+#if !defined(SMP) && (defined(DDB) || defined(DEBUG))
+struct trapdebug trapdebug[TRAPSIZE], *trp = trapdebug;
+
+#endif
+
+#if defined(DDB) || defined(DEBUG)
+void stacktrace(struct trapframe *);
+void logstacktrace(struct trapframe *);
+int kdbpeek(int *);
+
+/* extern functions printed by name in stack backtraces */
+extern void MipsTLBMiss(void);
+extern void MipsUserSyscallException(void);
+extern char _locore[];
+extern char _locoreEnd[];
+
+#endif /* DDB || DEBUG */
+
+extern void MipsSwitchFPState(struct thread *, struct trapframe *);
+extern void MipsFPTrap(u_int, u_int, u_int);
+
+u_int trap(struct trapframe *);
+u_int MipsEmulateBranch(struct trapframe *, int, int, u_int);
+
+#define KERNLAND(x) ((int)(x) < 0)
+#define DELAYBRANCH(x) ((int)(x) < 0)
+
+/*
+ * kdbpeekD(addr) - skip one word starting at 'addr', then read the second word
+ */
+#define kdbpeekD(addr) kdbpeek(((int *)(addr)) + 1)
+int rrs_debug = 0;
+
+/*
+ * MIPS load/store access type
+ */
+enum {
+ MIPS_LHU_ACCESS = 1,
+ MIPS_LH_ACCESS,
+ MIPS_LWU_ACCESS,
+ MIPS_LW_ACCESS,
+ MIPS_LD_ACCESS,
+ MIPS_SH_ACCESS,
+ MIPS_SW_ACCESS,
+ MIPS_SD_ACCESS
+};
+
+char *access_name[] = {
+ "Load Halfword Unsigned",
+ "Load Halfword",
+ "Load Word Unsigned",
+ "Load Word",
+ "Load Doubleword",
+ "Store Halfword",
+ "Store Word",
+ "Store Doubleword"
+};
+
+
+static int allow_unaligned_acc = 1;
+
+SYSCTL_INT(_vm, OID_AUTO, allow_unaligned_acc, CTLFLAG_RW,
+ &allow_unaligned_acc, 0, "Allow unaligned accesses");
+
+static int emulate_unaligned_access(struct trapframe *frame);
+
+extern char *syscallnames[];
+
+/*
+ * Handle an exception.
+ * Called from MipsKernGenException() or MipsUserGenException()
+ * when a processor trap occurs.
+ * In the case of a kernel trap, we return the pc where to resume if
+ * p->p_addr->u_pcb.pcb_onfault is set, otherwise, return old pc.
+ */
+u_int
+trap(trapframe)
+ struct trapframe *trapframe;
+{
+ int type, usermode;
+ int i = 0;
+ unsigned ucode = 0;
+ struct thread *td = curthread;
+ struct proc *p = curproc;
+ vm_prot_t ftype;
+ pt_entry_t *pte;
+ unsigned int entry;
+ pmap_t pmap;
+ int quad_syscall = 0;
+ int access_type;
+ ksiginfo_t ksi;
+ char *msg = NULL;
+ register_t addr = 0;
+
+ trapdebug_enter(trapframe, 0);
+
+ type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT;
+ if (USERMODE(trapframe->sr)) {
+ type |= T_USER;
+ usermode = 1;
+ } else {
+ usermode = 0;
+ }
+
+ /*
+ * Enable hardware interrupts if they were on before the trap. If it
+ * was off disable all (splhigh) so we don't accidently enable it
+ * when doing a spllower().
+ */
+/*XXX do in locore? */
+ if (trapframe->sr & SR_INT_ENAB) {
+ set_intr_mask(~(trapframe->sr & ALL_INT_MASK));
+ enableintr();
+ } else {
+ disableintr();
+ }
+
+#ifdef TRAP_DEBUG
+ if (trap_debug) {
+ static vm_offset_t last_badvaddr = 0;
+ static vm_offset_t this_badvaddr = 0;
+ static int count = 0;
+ u_int32_t pid;
+
+ printf("trap type %x (%s - ", type,
+ trap_type[type & (~T_USER)]);
+
+ if (type & T_USER)
+ printf("user mode)\n");
+ else
+ printf("kernel mode)\n");
+
+#ifdef SMP
+ printf("cpuid = %d\n", PCPU_GET(cpuid));
+#endif
+ MachTLBGetPID(pid);
+ printf("badaddr = %p, pc = %p, ra = %p, sp = %p, sr = 0x%x, pid = %d, ASID = 0x%x\n",
+ trapframe->badvaddr, trapframe->pc, trapframe->ra,
+ trapframe->sp, trapframe->sr,
+ (curproc ? curproc->p_pid : -1), pid);
+
+ switch (type & ~T_USER) {
+ case T_TLB_MOD:
+ case T_TLB_LD_MISS:
+ case T_TLB_ST_MISS:
+ case T_ADDR_ERR_LD:
+ case T_ADDR_ERR_ST:
+ this_badvaddr = trapframe->badvaddr;
+ break;
+ case T_SYSCALL:
+ this_badvaddr = trapframe->ra;
+ break;
+ default:
+ this_badvaddr = trapframe->pc;
+ break;
+ }
+ if ((last_badvaddr == this_badvaddr) &&
+ ((type & ~T_USER) != T_SYSCALL)) {
+ if (++count == 3) {
+ trap_frame_dump(trapframe);
+ panic("too many faults at %p\n", last_badvaddr);
+ }
+ } else {
+ last_badvaddr = this_badvaddr;
+ count = 0;
+ }
+ }
+#endif
+ switch (type) {
+ case T_MCHECK:
+#ifdef DDB
+ kdb_trap(type, 0, trapframe);
+#endif
+ panic("MCHECK\n");
+ break;
+ case T_TLB_MOD:
+ /* check for kernel address */
+ if (KERNLAND(trapframe->badvaddr)) {
+ vm_offset_t pa;
+
+ PMAP_LOCK(kernel_pmap);
+ if (!(pte = pmap_segmap(kernel_pmap,
+ trapframe->badvaddr)))
+ panic("trap: ktlbmod: invalid segmap");
+ pte += (trapframe->badvaddr >> PGSHIFT) & (NPTEPG - 1);
+ entry = *pte;
+#ifdef SMP
+ /* It is possible that some other CPU changed m-bit */
+ if (!mips_pg_v(entry) || (entry & mips_pg_m_bit())) {
+ trapframe->badvaddr &= ~PGOFSET;
+ pmap_update_page(kernel_pmap,
+ trapframe->badvaddr, entry);
+ PMAP_UNLOCK(kernel_pmap);
+ return (trapframe->pc);
+ }
+#else
+ if (!mips_pg_v(entry) || (entry & mips_pg_m_bit()))
+ panic("trap: ktlbmod: invalid pte");
+#endif
+ if (entry & mips_pg_ro_bit()) {
+ /* write to read only page in the kernel */
+ ftype = VM_PROT_WRITE;
+ PMAP_UNLOCK(kernel_pmap);
+ goto kernel_fault;
+ }
+ entry |= mips_pg_m_bit();
+ *pte = entry;
+ trapframe->badvaddr &= ~PGOFSET;
+ pmap_update_page(kernel_pmap, trapframe->badvaddr, entry);
+ pa = mips_tlbpfn_to_paddr(entry);
+ if (!page_is_managed(pa))
+ panic("trap: ktlbmod: unmanaged page");
+ pmap_set_modified(pa);
+ PMAP_UNLOCK(kernel_pmap);
+ return (trapframe->pc);
+ }
+ /* FALLTHROUGH */
+
+ case T_TLB_MOD + T_USER:
+ {
+ vm_offset_t pa;
+
+ pmap = &p->p_vmspace->vm_pmap;
+
+ PMAP_LOCK(pmap);
+ if (!(pte = pmap_segmap(pmap, trapframe->badvaddr)))
+ panic("trap: utlbmod: invalid segmap");
+ pte += (trapframe->badvaddr >> PGSHIFT) & (NPTEPG - 1);
+ entry = *pte;
+#ifdef SMP
+ /* It is possible that some other CPU changed m-bit */
+ if (!mips_pg_v(entry) || (entry & mips_pg_m_bit())) {
+ trapframe->badvaddr = (trapframe->badvaddr & ~PGOFSET);
+ pmap_update_page(pmap, trapframe->badvaddr, entry);
+ PMAP_UNLOCK(pmap);
+ goto out;
+ }
+#else
+ if (!mips_pg_v(entry) || (entry & mips_pg_m_bit())) {
+ panic("trap: utlbmod: invalid pte");
+ }
+#endif
+
+ if (entry & mips_pg_ro_bit()) {
+ /* write to read only page */
+ ftype = VM_PROT_WRITE;
+ PMAP_UNLOCK(pmap);
+ goto dofault;
+ }
+ entry |= mips_pg_m_bit();
+ *pte = entry;
+ trapframe->badvaddr = (trapframe->badvaddr & ~PGOFSET);
+ pmap_update_page(pmap, trapframe->badvaddr, entry);
+ trapframe->badvaddr |= (pmap->pm_asid[PCPU_GET(cpuid)].asid << VMTLB_PID_SHIFT);
+ pa = mips_tlbpfn_to_paddr(entry);
+ if (!page_is_managed(pa))
+ panic("trap: utlbmod: unmanaged page");
+ pmap_set_modified(pa);
+
+ PMAP_UNLOCK(pmap);
+ if (!usermode) {
+ return (trapframe->pc);
+ }
+ goto out;
+ }
+
+ case T_TLB_LD_MISS:
+ case T_TLB_ST_MISS:
+ ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
+ /* check for kernel address */
+ if (KERNLAND(trapframe->badvaddr)) {
+ vm_offset_t va;
+ int rv;
+
+ kernel_fault:
+ va = trunc_page((vm_offset_t)trapframe->badvaddr);
+ rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
+ if (rv == KERN_SUCCESS)
+ return (trapframe->pc);
+ if ((i = td->td_pcb->pcb_onfault) != 0) {
+ td->td_pcb->pcb_onfault = 0;
+ return (onfault_table[i]);
+ }
+ goto err;
+ }
+ /*
+ * It is an error for the kernel to access user space except
+ * through the copyin/copyout routines.
+ */
+ if ((i = td->td_pcb->pcb_onfault) == 0)
+ goto err;
+ /* check for fuswintr() or suswintr() getting a page fault */
+ if (i == 4) {
+ return (onfault_table[i]);
+ }
+ goto dofault;
+
+ case T_TLB_LD_MISS + T_USER:
+ ftype = VM_PROT_READ;
+ goto dofault;
+
+ case T_TLB_ST_MISS + T_USER:
+ ftype = VM_PROT_WRITE;
+dofault:
+ {
+ vm_offset_t va;
+ struct vmspace *vm;
+ vm_map_t map;
+ int rv = 0;
+ int flag;
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ va = trunc_page((vm_offset_t)trapframe->badvaddr);
+ if ((vm_offset_t)trapframe->badvaddr < VM_MIN_KERNEL_ADDRESS) {
+ if (ftype & VM_PROT_WRITE)
+ flag = VM_FAULT_DIRTY;
+ else
+ flag = VM_FAULT_NORMAL;
+ } else {
+ /*
+ * Don't allow user-mode faults in kernel
+ * address space.
+ */
+ goto nogo;
+ }
+
+ /*
+ * Keep swapout from messing with us during this
+ * critical time.
+ */
+ PROC_LOCK(p);
+ ++p->p_lock;
+ PROC_UNLOCK(p);
+
+ rv = vm_fault(map, va, ftype, flag);
+
+ PROC_LOCK(p);
+ --p->p_lock;
+ PROC_UNLOCK(p);
+#ifdef VMFAULT_TRACE
+ printf("vm_fault(%x (pmap %x), %x (%x), %x, %d) -> %x at pc %x\n",
+ map, &vm->vm_pmap, va, trapframe->badvaddr, ftype, flag,
+ rv, trapframe->pc);
+#endif
+
+ if (rv == KERN_SUCCESS) {
+ if (!usermode) {
+ return (trapframe->pc);
+ }
+ goto out;
+ }
+ nogo:
+ if (!usermode) {
+ if ((i = td->td_pcb->pcb_onfault) != 0) {
+ td->td_pcb->pcb_onfault = 0;
+ return (onfault_table[i]);
+ }
+ goto err;
+ }
+ ucode = ftype;
+ i = ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
+ addr = trapframe->pc;
+
+ msg = "BAD_PAGE_FAULT";
+ log_bad_page_fault(msg, trapframe, type);
+
+ break;
+ }
+
+ case T_ADDR_ERR_LD + T_USER: /* misaligned or kseg access */
+ case T_ADDR_ERR_ST + T_USER: /* misaligned or kseg access */
+ if (allow_unaligned_acc) {
+ int mode;
+
+ if (type == (T_ADDR_ERR_LD + T_USER))
+ mode = VM_PROT_READ;
+ else
+ mode = VM_PROT_WRITE;
+
+ /*
+ * ADDR_ERR faults have higher priority than TLB
+ * Miss faults. Therefore, it is necessary to
+ * verify that the faulting address is a valid
+ * virtual address within the process' address space
+ * before trying to emulate the unaligned access.
+ */
+ if (useracc((caddr_t)
+ (((vm_offset_t)trapframe->badvaddr) &
+ ~(sizeof(int) - 1)), sizeof(int) * 2, mode)) {
+ access_type = emulate_unaligned_access(
+ trapframe);
+ if (access_type != 0)
+ goto out;
+ }
+ }
+ msg = "ADDRESS_ERR";
+
+ /* FALL THROUGH */
+
+ case T_BUS_ERR_IFETCH + T_USER: /* BERR asserted to cpu */
+ case T_BUS_ERR_LD_ST + T_USER: /* BERR asserted to cpu */
+ ucode = 0; /* XXX should be VM_PROT_something */
+ i = SIGBUS;
+ addr = trapframe->pc;
+ if (!msg)
+ msg = "BUS_ERR";
+ log_bad_page_fault(msg, trapframe, type);
+ break;
+
+ case T_SYSCALL + T_USER:
+ {
+ struct trapframe *locr0 = td->td_frame;
+ struct sysent *callp;
+ unsigned int code;
+ unsigned int tpc;
+ int nargs, nsaved;
+ register_t args[8];
+
+ /*
+ * note: PCPU_LAZY_INC() can only be used if we can
+ * afford occassional inaccuracy in the count.
+ */
+ PCPU_LAZY_INC(cnt.v_syscall);
+ if (td->td_ucred != p->p_ucred)
+ cred_update_thread(td);
+#ifdef KSE
+ if (p->p_flag & P_SA)
+ thread_user_enter(td);
+#endif
+ /* compute next PC after syscall instruction */
+ tpc = trapframe->pc; /* Remember if restart */
+ if (DELAYBRANCH(trapframe->cause)) { /* Check BD bit */
+ locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0,
+ 0);
+ } else {
+ locr0->pc += sizeof(int);
+ }
+ code = locr0->v0;
+
+ switch (code) {
+ case SYS_syscall:
+ /*
+ * Code is first argument, followed by
+ * actual args.
+ */
+ code = locr0->a0;
+ args[0] = locr0->a1;
+ args[1] = locr0->a2;
+ args[2] = locr0->a3;
+ nsaved = 3;
+ break;
+
+ case SYS___syscall:
+ /*
+ * Like syscall, but code is a quad, so as
+ * to maintain quad alignment for the rest
+ * of the arguments.
+ */
+ if (_QUAD_LOWWORD == 0) {
+ code = locr0->a0;
+ } else {
+ code = locr0->a1;
+ }
+ args[0] = locr0->a2;
+ args[1] = locr0->a3;
+ nsaved = 2;
+ quad_syscall = 1;
+ break;
+
+ default:
+ args[0] = locr0->a0;
+ args[1] = locr0->a1;
+ args[2] = locr0->a2;
+ args[3] = locr0->a3;
+ nsaved = 4;
+ }
+#ifdef TRAP_DEBUG
+ printf("SYSCALL #%d pid:%u\n", code, p->p_pid);
+#endif
+
+ if (p->p_sysent->sv_mask)
+ code &= p->p_sysent->sv_mask;
+
+ if (code >= p->p_sysent->sv_size)
+ callp = &p->p_sysent->sv_table[0];
+ else
+ callp = &p->p_sysent->sv_table[code];
+
+ nargs = callp->sy_narg;
+
+ if (nargs > nsaved) {
+ i = copyin((caddr_t)(locr0->sp +
+ 4 * sizeof(register_t)), (caddr_t)&args[nsaved],
+ (u_int)(nargs - nsaved) * sizeof(register_t));
+ if (i) {
+ locr0->v0 = i;
+ locr0->a3 = 1;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_SYSCALL))
+ ktrsyscall(code, nargs, args);
+#endif
+ goto done;
+ }
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_SYSCALL))
+ ktrsyscall(code, nargs, args);
+#endif
+ td->td_retval[0] = 0;
+ td->td_retval[1] = locr0->v1;
+
+#if !defined(SMP) && (defined(DDB) || defined(DEBUG))
+ if (trp == trapdebug)
+ trapdebug[TRAPSIZE - 1].code = code;
+ else
+ trp[-1].code = code;
+#endif
+ STOPEVENT(p, S_SCE, nargs);
+
+ PTRACESTOP_SC(p, td, S_PT_SCE);
+ i = (*callp->sy_call) (td, args);
+#if 0
+ /*
+ * Reinitialize proc pointer `p' as it may be
+ * different if this is a child returning from fork
+ * syscall.
+ */
+ td = curthread;
+ locr0 = td->td_frame;
+#endif
+ trapdebug_enter(locr0, -code);
+ switch (i) {
+ case 0:
+ if (quad_syscall && code != SYS_lseek) {
+ /*
+ * System call invoked through the
+ * SYS___syscall interface but the
+ * return value is really just 32
+ * bits.
+ */
+ locr0->v0 = td->td_retval[0];
+ if (_QUAD_LOWWORD)
+ locr0->v1 = td->td_retval[0];
+ locr0->a3 = 0;
+ } else {
+ locr0->v0 = td->td_retval[0];
+ locr0->v1 = td->td_retval[1];
+ locr0->a3 = 0;
+ }
+ break;
+
+ case ERESTART:
+ locr0->pc = tpc;
+ break;
+
+ case EJUSTRETURN:
+ break; /* nothing to do */
+
+ default:
+ if (quad_syscall && code != SYS_lseek) {
+ locr0->v0 = i;
+ if (_QUAD_LOWWORD)
+ locr0->v1 = i;
+ locr0->a3 = 1;
+ } else {
+ locr0->v0 = i;
+ locr0->a3 = 1;
+ }
+ }
+
+ /*
+ * The sync'ing of I & D caches for SYS_ptrace() is
+ * done by procfs_domem() through procfs_rwmem()
+ * instead of being done here under a special check
+ * for SYS_ptrace().
+ */
+ done:
+ /*
+ * Check for misbehavior.
+ */
+ WITNESS_WARN(WARN_PANIC, NULL, "System call %s returning",
+ (code >= 0 && code < SYS_MAXSYSCALL) ?
+ syscallnames[code] : "???");
+ KASSERT(td->td_critnest == 0,
+ ("System call %s returning in a critical section",
+ (code >= 0 && code < SYS_MAXSYSCALL) ?
+ syscallnames[code] : "???"));
+ KASSERT(td->td_locks == 0,
+ ("System call %s returning with %d locks held",
+ (code >= 0 && code < SYS_MAXSYSCALL) ?
+ syscallnames[code] : "???",
+ td->td_locks));
+ userret(td, trapframe);
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(code, i, td->td_retval[0]);
+#endif
+ /*
+ * This works because errno is findable through the
+ * register set. If we ever support an emulation
+ * where this is not the case, this code will need
+ * to be revisited.
+ */
+ STOPEVENT(p, S_SCX, code);
+
+ PTRACESTOP_SC(p, td, S_PT_SCX);
+
+ mtx_assert(&Giant, MA_NOTOWNED);
+ return (trapframe->pc);
+ }
+
+#ifdef DDB
+ case T_BREAK:
+ kdb_trap(type, 0, trapframe);
+ return (trapframe->pc);
+#endif
+
+ case T_BREAK + T_USER:
+ {
+ unsigned int va, instr;
+
+ /* compute address of break instruction */
+ va = trapframe->pc;
+ if (DELAYBRANCH(trapframe->cause))
+ va += sizeof(int);
+
+ /* read break instruction */
+ instr = fuword((caddr_t)va);
+#if 0
+ printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
+ p->p_comm, p->p_pid, instr, trapframe->pc,
+ p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
+#endif
+ if (td->td_md.md_ss_addr != va || instr != BREAK_SSTEP) {
+ i = SIGTRAP;
+ addr = trapframe->pc;
+ break;
+ }
+ /*
+ * The restoration of the original instruction and
+ * the clearing of the berakpoint will be done later
+ * by the call to ptrace_clear_single_step() in
+ * issignal() when SIGTRAP is processed.
+ */
+ addr = trapframe->pc;
+ i = SIGTRAP;
+ break;
+ }
+
+ case T_IWATCH + T_USER:
+ case T_DWATCH + T_USER:
+ {
+ unsigned int va;
+
+ /* compute address of trapped instruction */
+ va = trapframe->pc;
+ if (DELAYBRANCH(trapframe->cause))
+ va += sizeof(int);
+ printf("watch exception @ 0x%x\n", va);
+ i = SIGTRAP;
+ addr = va;
+ break;
+ }
+
+ case T_TRAP + T_USER:
+ {
+ unsigned int va, instr;
+ struct trapframe *locr0 = td->td_frame;
+
+ /* compute address of trap instruction */
+ va = trapframe->pc;
+ if (DELAYBRANCH(trapframe->cause))
+ va += sizeof(int);
+ /* read break instruction */
+ instr = fuword((caddr_t)va);
+
+ if (DELAYBRANCH(trapframe->cause)) { /* Check BD bit */
+ locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0,
+ 0);
+ } else {
+ locr0->pc += sizeof(int);
+ }
+ addr = va;
+ i = SIGEMT; /* Stuff it with something for now */
+ break;
+ }
+
+ case T_RES_INST + T_USER:
+ i = SIGILL;
+ addr = trapframe->pc;
+ break;
+ case T_C2E:
+ case T_C2E + T_USER:
+ goto err;
+ break;
+ case T_COP_UNUSABLE:
+ goto err;
+ break;
+ case T_COP_UNUSABLE + T_USER:
+#if defined(SOFTFLOAT)
+ /* FP (COP1) instruction */
+ if ((trapframe->cause & CR_COP_ERR) == 0x10000000) {
+ i = SIGILL;
+ break;
+ }
+#endif
+ if ((trapframe->cause & CR_COP_ERR) != 0x10000000) {
+ i = SIGILL; /* only FPU instructions allowed */
+ break;
+ }
+ addr = trapframe->pc;
+ MipsSwitchFPState(PCPU_GET(fpcurthread), td->td_frame);
+ PCPU_SET(fpcurthread, td);
+ td->td_frame->sr |= SR_COP_1_BIT;
+ td->td_md.md_flags |= MDTD_FPUSED;
+ goto out;
+
+ case T_FPE:
+#if !defined(SMP) && (defined(DDB) || defined(DEBUG))
+ trapDump("fpintr");
+#else
+ printf("FPU Trap: PC %x CR %x SR %x\n",
+ trapframe->pc, trapframe->cause, trapframe->sr);
+ goto err;
+#endif
+
+ case T_FPE + T_USER:
+ MachFPTrap(trapframe->sr, trapframe->cause, trapframe->pc);
+ goto out;
+
+ case T_OVFLOW + T_USER:
+ i = SIGFPE;
+ addr = trapframe->pc;
+ break;
+
+ case T_ADDR_ERR_LD: /* misaligned access */
+ case T_ADDR_ERR_ST: /* misaligned access */
+#ifdef TRAP_DEBUG
+ printf("+++ ADDR_ERR: type = %d, badvaddr = %x\n", type,
+ trapframe->badvaddr);
+#endif
+ /* Only allow emulation on a user address */
+ if (allow_unaligned_acc &&
+ ((vm_offset_t)trapframe->badvaddr < VM_MAXUSER_ADDRESS)) {
+ int mode;
+
+ if (type == T_ADDR_ERR_LD)
+ mode = VM_PROT_READ;
+ else
+ mode = VM_PROT_WRITE;
+
+ /*
+ * ADDR_ERR faults have higher priority than TLB
+ * Miss faults. Therefore, it is necessary to
+ * verify that the faulting address is a valid
+ * virtual address within the process' address space
+ * before trying to emulate the unaligned access.
+ */
+ if (useracc((caddr_t)
+ (((vm_offset_t)trapframe->badvaddr) &
+ ~(sizeof(int) - 1)), sizeof(int) * 2, mode)) {
+ access_type = emulate_unaligned_access(
+ trapframe);
+ if (access_type != 0) {
+ return (trapframe->pc);
+ }
+ }
+ }
+ /* FALLTHROUGH */
+
+ case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */
+ if ((i = td->td_pcb->pcb_onfault) != 0) {
+ td->td_pcb->pcb_onfault = 0;
+ return (onfault_table[i]);
+ }
+ /* FALLTHROUGH */
+
+ default:
+err:
+
+#if !defined(SMP) && defined(DEBUG)
+ stacktrace(!usermode ? trapframe : td->td_frame);
+ trapDump("trap");
+#endif
+#ifdef SMP
+ printf("cpu:%d-", PCPU_GET(cpuid));
+#endif
+ printf("Trap cause = %d (%s - ", type,
+ trap_type[type & (~T_USER)]);
+
+ if (type & T_USER)
+ printf("user mode)\n");
+ else
+ printf("kernel mode)\n");
+
+#ifdef TRAP_DEBUG
+ printf("badvaddr = %x, pc = %x, ra = %x, sr = 0x%x\n",
+ trapframe->badvaddr, trapframe->pc, trapframe->ra,
+ trapframe->sr);
+#endif
+
+#ifdef KDB
+ if (debugger_on_panic || kdb_active) {
+ kdb_trap(type, 0, trapframe);
+ }
+#endif
+ panic("trap");
+ }
+ td->td_frame->pc = trapframe->pc;
+ td->td_frame->cause = trapframe->cause;
+ td->td_frame->badvaddr = trapframe->badvaddr;
+ ksiginfo_init_trap(&ksi);
+ ksi.ksi_signo = i;
+ ksi.ksi_code = ucode;
+ ksi.ksi_addr = (void *)addr;
+ ksi.ksi_trapno = type;
+ trapsignal(td, &ksi);
+out:
+
+ /*
+ * Note: we should only get here if returning to user mode.
+ */
+ userret(td, trapframe);
+ mtx_assert(&Giant, MA_NOTOWNED);
+ return (trapframe->pc);
+}
+
+#if !defined(SMP) && (defined(DDB) || defined(DEBUG))
+void
+trapDump(char *msg)
+{
+ int i, s;
+
+ s = disableintr();
+ printf("trapDump(%s)\n", msg);
+ for (i = 0; i < TRAPSIZE; i++) {
+ if (trp == trapdebug) {
+ trp = &trapdebug[TRAPSIZE - 1];
+ } else {
+ trp--;
+ }
+
+ if (trp->cause == 0)
+ break;
+
+ printf("%s: ADR %x PC %x CR %x SR %x\n",
+ trap_type[(trp->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT],
+ trp->vadr, trp->pc, trp->cause, trp->status);
+
+ printf(" RA %x SP %x code %d\n", trp->ra, trp->sp, trp->code);
+ }
+ restoreintr(s);
+}
+
+#endif
+
+
+/*
+ * Return the resulting PC as if the branch was executed.
+ */
+u_int
+MipsEmulateBranch(struct trapframe *framePtr, int instPC, int fpcCSR,
+ u_int instptr)
+{
+ InstFmt inst;
+ register_t *regsPtr = (register_t *) framePtr;
+ unsigned retAddr = 0;
+ int condition;
+
+#define GetBranchDest(InstPtr, inst) \
+ ((unsigned)InstPtr + 4 + ((short)inst.IType.imm << 2))
+
+
+ if (instptr) {
+ if (instptr < MIPS_KSEG0_START)
+ inst.word = fuword((void *)instptr);
+ else
+ inst = *(InstFmt *) instptr;
+ } else {
+ if ((vm_offset_t)instPC < MIPS_KSEG0_START)
+ inst.word = fuword((void *)instPC);
+ else
+ inst = *(InstFmt *) instPC;
+ }
+
+ switch ((int)inst.JType.op) {
+ case OP_SPECIAL:
+ switch ((int)inst.RType.func) {
+ case OP_JR:
+ case OP_JALR:
+ retAddr = regsPtr[inst.RType.rs];
+ break;
+
+ default:
+ retAddr = instPC + 4;
+ break;
+ }
+ break;
+
+ case OP_BCOND:
+ switch ((int)inst.IType.rt) {
+ case OP_BLTZ:
+ case OP_BLTZL:
+ case OP_BLTZAL:
+ case OP_BLTZALL:
+ if ((int)(regsPtr[inst.RType.rs]) < 0)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_BGEZ:
+ case OP_BGEZL:
+ case OP_BGEZAL:
+ case OP_BGEZALL:
+ if ((int)(regsPtr[inst.RType.rs]) >= 0)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_TGEI:
+ case OP_TGEIU:
+ case OP_TLTI:
+ case OP_TLTIU:
+ case OP_TEQI:
+ case OP_TNEI:
+ retAddr = instPC + 4; /* Like syscall... */
+ break;
+
+ default:
+ panic("MipsEmulateBranch: Bad branch cond");
+ }
+ break;
+
+ case OP_J:
+ case OP_JAL:
+ retAddr = (inst.JType.target << 2) |
+ ((unsigned)instPC & 0xF0000000);
+ break;
+
+ case OP_BEQ:
+ case OP_BEQL:
+ if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_BNE:
+ case OP_BNEL:
+ if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_BLEZ:
+ case OP_BLEZL:
+ if ((int)(regsPtr[inst.RType.rs]) <= 0)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_BGTZ:
+ case OP_BGTZL:
+ if ((int)(regsPtr[inst.RType.rs]) > 0)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_COP1:
+ switch (inst.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
+ condition = fpcCSR & FPC_COND_BIT;
+ else
+ condition = !(fpcCSR & FPC_COND_BIT);
+ if (condition)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ default:
+ retAddr = instPC + 4;
+ }
+ break;
+
+ default:
+ retAddr = instPC + 4;
+ }
+ return (retAddr);
+}
+
+
+#if defined(DDB) || defined(DEBUG)
+#define MIPS_JR_RA 0x03e00008 /* instruction code for jr ra */
+
+/* forward */
+char *fn_name(unsigned addr);
+
+/*
+ * Print a stack backtrace.
+ */
+void
+stacktrace(struct trapframe *regs)
+{
+ stacktrace_subr(regs, printf);
+}
+
+void
+stacktrace_subr(struct trapframe *regs, int (*printfn) (const char *,...))
+{
+ InstFmt i;
+ unsigned a0, a1, a2, a3, pc, sp, fp, ra, va, subr;
+ unsigned instr, mask;
+ unsigned int frames = 0;
+ int more, stksize;
+
+ /* get initial values from the exception frame */
+ sp = regs->sp;
+ pc = regs->pc;
+ fp = regs->s8;
+ ra = regs->ra; /* May be a 'leaf' function */
+ a0 = regs->a0;
+ a1 = regs->a1;
+ a2 = regs->a2;
+ a3 = regs->a3;
+
+/* Jump here when done with a frame, to start a new one */
+loop:
+
+/* Jump here after a nonstandard (interrupt handler) frame */
+ stksize = 0;
+ subr = 0;
+ if (frames++ > 100) {
+ (*printfn) ("\nstackframe count exceeded\n");
+ /* return breaks stackframe-size heuristics with gcc -O2 */
+ goto finish; /* XXX */
+ }
+ /* check for bad SP: could foul up next frame */
+ if (sp & 3 || sp < 0x80000000) {
+ (*printfn) ("SP 0x%x: not in kernel\n", sp);
+ ra = 0;
+ subr = 0;
+ goto done;
+ }
+#define Between(x, y, z) \
+ ( ((x) <= (y)) && ((y) < (z)) )
+#define pcBetween(a,b) \
+ Between((unsigned)a, pc, (unsigned)b)
+
+ /*
+ * Check for current PC in exception handler code that don't have a
+ * preceding "j ra" at the tail of the preceding function. Depends
+ * on relative ordering of functions in exception.S, swtch.S.
+ */
+ if (pcBetween(MipsKernGenException, MipsUserGenException))
+ subr = (unsigned)MipsKernGenException;
+ else if (pcBetween(MipsUserGenException, MipsKernIntr))
+ subr = (unsigned)MipsUserGenException;
+ else if (pcBetween(MipsKernIntr, MipsUserIntr))
+ subr = (unsigned)MipsKernIntr;
+ else if (pcBetween(MipsUserIntr, MipsTLBInvalidException))
+ subr = (unsigned)MipsUserIntr;
+ else if (pcBetween(MipsTLBInvalidException,
+ MipsKernTLBInvalidException))
+ subr = (unsigned)MipsTLBInvalidException;
+ else if (pcBetween(MipsKernTLBInvalidException,
+ MipsUserTLBInvalidException))
+ subr = (unsigned)MipsKernTLBInvalidException;
+ else if (pcBetween(MipsUserTLBInvalidException, MipsTLBMissException))
+ subr = (unsigned)MipsUserTLBInvalidException;
+ else if (pcBetween(cpu_switch, MipsSwitchFPState))
+ subr = (unsigned)cpu_switch;
+ else if (pcBetween(_locore, _locoreEnd)) {
+ subr = (unsigned)_locore;
+ ra = 0;
+ goto done;
+ }
+ /* check for bad PC */
+ if (pc & 3 || pc < (unsigned)0x80000000 || pc >= (unsigned)edata) {
+ (*printfn) ("PC 0x%x: not in kernel\n", pc);
+ ra = 0;
+ goto done;
+ }
+ /*
+ * Find the beginning of the current subroutine by scanning
+ * backwards from the current PC for the end of the previous
+ * subroutine.
+ */
+ if (!subr) {
+ va = pc - sizeof(int);
+ while ((instr = kdbpeek((int *)va)) != MIPS_JR_RA)
+ va -= sizeof(int);
+ va += 2 * sizeof(int); /* skip back over branch & delay slot */
+ /* skip over nulls which might separate .o files */
+ while ((instr = kdbpeek((int *)va)) == 0)
+ va += sizeof(int);
+ subr = va;
+ }
+ /* scan forwards to find stack size and any saved registers */
+ stksize = 0;
+ more = 3;
+ mask = 0;
+ for (va = subr; more; va += sizeof(int),
+ more = (more == 3) ? 3 : more - 1) {
+ /* stop if hit our current position */
+ if (va >= pc)
+ break;
+ instr = kdbpeek((int *)va);
+ i.word = instr;
+ switch (i.JType.op) {
+ case OP_SPECIAL:
+ switch (i.RType.func) {
+ case OP_JR:
+ case OP_JALR:
+ more = 2; /* stop after next instruction */
+ break;
+
+ case OP_SYSCALL:
+ case OP_BREAK:
+ more = 1; /* stop now */
+ };
+ break;
+
+ case OP_BCOND:
+ case OP_J:
+ case OP_JAL:
+ case OP_BEQ:
+ case OP_BNE:
+ case OP_BLEZ:
+ case OP_BGTZ:
+ more = 2; /* stop after next instruction */
+ break;
+
+ case OP_COP0:
+ case OP_COP1:
+ case OP_COP2:
+ case OP_COP3:
+ switch (i.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ more = 2; /* stop after next instruction */
+ };
+ break;
+
+ case OP_SW:
+ /* look for saved registers on the stack */
+ if (i.IType.rs != 29)
+ break;
+ /* only restore the first one */
+ if (mask & (1 << i.IType.rt))
+ break;
+ mask |= (1 << i.IType.rt);
+ switch (i.IType.rt) {
+ case 4:/* a0 */
+ a0 = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 5:/* a1 */
+ a1 = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 6:/* a2 */
+ a2 = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 7:/* a3 */
+ a3 = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 30: /* fp */
+ fp = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 31: /* ra */
+ ra = kdbpeek((int *)(sp + (short)i.IType.imm));
+ }
+ break;
+
+ case OP_SD:
+ /* look for saved registers on the stack */
+ if (i.IType.rs != 29)
+ break;
+ /* only restore the first one */
+ if (mask & (1 << i.IType.rt))
+ break;
+ mask |= (1 << i.IType.rt);
+ switch (i.IType.rt) {
+ case 4:/* a0 */
+ a0 = kdbpeekD((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 5:/* a1 */
+ a1 = kdbpeekD((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 6:/* a2 */
+ a2 = kdbpeekD((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 7:/* a3 */
+ a3 = kdbpeekD((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 30: /* fp */
+ fp = kdbpeekD((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 31: /* ra */
+ ra = kdbpeekD((int *)(sp + (short)i.IType.imm));
+ }
+ break;
+
+ case OP_ADDI:
+ case OP_ADDIU:
+ /* look for stack pointer adjustment */
+ if (i.IType.rs != 29 || i.IType.rt != 29)
+ break;
+ stksize = -((short)i.IType.imm);
+ }
+ }
+
+done:
+ (*printfn) ("%s+%x (%x,%x,%x,%x) ra %x sz %d\n",
+ fn_name(subr), pc - subr, a0, a1, a2, a3, ra, stksize);
+
+ if (ra) {
+ if (pc == ra && stksize == 0)
+ (*printfn) ("stacktrace: loop!\n");
+ else {
+ pc = ra;
+ sp += stksize;
+ ra = 0;
+ goto loop;
+ }
+ } else {
+finish:
+ if (curproc)
+ (*printfn) ("pid %d\n", curproc->p_pid);
+ else
+ (*printfn) ("curproc NULL\n");
+ }
+}
+
+/*
+ * Functions ``special'' enough to print by name
+ */
+#ifdef __STDC__
+#define Name(_fn) { (void*)_fn, # _fn }
+#else
+#define Name(_fn) { _fn, "_fn"}
+#endif
+static struct {
+ void *addr;
+ char *name;
+} names[] = {
+
+ Name(trap),
+ Name(MipsKernGenException),
+ Name(MipsUserGenException),
+ Name(MipsKernIntr),
+ Name(MipsUserIntr),
+ Name(cpu_switch),
+ {
+ 0, 0
+ }
+};
+
+/*
+ * Map a function address to a string name, if known; or a hex string.
+ */
+char *
+fn_name(unsigned addr)
+{
+ static char buf[17];
+ int i = 0;
+
+#ifdef DDB
+ db_expr_t diff;
+ c_db_sym_t sym;
+ char *symname;
+
+ diff = 0;
+ symname = NULL;
+ sym = db_search_symbol((db_addr_t)addr, DB_STGY_ANY, &diff);
+ db_symbol_values(sym, (const char **)&symname, (db_expr_t *)0);
+ if (symname && diff == 0)
+ return (symname);
+#endif
+
+ for (i = 0; names[i].name; i++)
+ if (names[i].addr == (void *)addr)
+ return (names[i].name);
+ sprintf(buf, "%x", addr);
+ return (buf);
+}
+
+#endif /* DDB */
+
+static void
+log_frame_dump(struct trapframe *frame)
+{
+ log(LOG_ERR, "Trapframe Register Dump:\n");
+ log(LOG_ERR, "\tzero: %08x\tat: %08x\tv0: %08x\tv1: %08x\n",
+ 0, frame->ast, frame->v0, frame->v1);
+
+ log(LOG_ERR, "\ta0: %08x\ta1: %08x\ta2: %08x\ta3: %08x\n",
+ frame->a0, frame->a1, frame->a2, frame->a3);
+
+ log(LOG_ERR, "\tt0: %08x\tt1: %08x\tt2: %08x\tt3: %08x\n",
+ frame->t0, frame->t1, frame->t2, frame->t3);
+
+ log(LOG_ERR, "\tt4: %08x\tt5: %08x\tt6: %08x\tt7: %08x\n",
+ frame->t4, frame->t5, frame->t6, frame->t7);
+
+ log(LOG_ERR, "\tt8: %08x\tt9: %08x\ts0: %08x\ts1: %08x\n",
+ frame->t8, frame->t9, frame->s0, frame->s1);
+
+ log(LOG_ERR, "\ts2: %08x\ts3: %08x\ts4: %08x\ts5: %08x\n",
+ frame->s2, frame->s3, frame->s4, frame->s5);
+
+ log(LOG_ERR, "\ts6: %08x\ts7: %08x\tk0: %08x\tk1: %08x\n",
+ frame->s6, frame->s7, frame->k0, frame->k1);
+
+ log(LOG_ERR, "\tgp: %08x\tsp: %08x\ts8: %08x\tra: %08x\n",
+ frame->gp, frame->sp, frame->s8, frame->ra);
+
+ log(LOG_ERR, "\tsr: %08x\tmullo: %08x\tmulhi: %08x\tbadvaddr: %08x\n",
+ frame->sr, frame->mullo, frame->mulhi, frame->badvaddr);
+
+#ifdef IC_REG
+ log(LOG_ERR, "\tcause: %08x\tpc: %08x\tic: %08x\n",
+ frame->cause, frame->pc, frame->ic);
+#else
+ log(LOG_ERR, "\tcause: %08x\tpc: %08x\n",
+ frame->cause, frame->pc);
+#endif
+}
+
+#ifdef TRAP_DEBUG
+static void
+trap_frame_dump(struct trapframe *frame)
+{
+ printf("Trapframe Register Dump:\n");
+ printf("\tzero: %08x\tat: %08x\tv0: %08x\tv1: %08x\n",
+ 0, frame->ast, frame->v0, frame->v1);
+
+ printf("\ta0: %08x\ta1: %08x\ta2: %08x\ta3: %08x\n",
+ frame->a0, frame->a1, frame->a2, frame->a3);
+
+ printf("\tt0: %08x\tt1: %08x\tt2: %08x\tt3: %08x\n",
+ frame->t0, frame->t1, frame->t2, frame->t3);
+
+ printf("\tt4: %08x\tt5: %08x\tt6: %08x\tt7: %08x\n",
+ frame->t4, frame->t5, frame->t6, frame->t7);
+
+ printf("\tt8: %08x\tt9: %08x\ts0: %08x\ts1: %08x\n",
+ frame->t8, frame->t9, frame->s0, frame->s1);
+
+ printf("\ts2: %08x\ts3: %08x\ts4: %08x\ts5: %08x\n",
+ frame->s2, frame->s3, frame->s4, frame->s5);
+
+ printf("\ts6: %08x\ts7: %08x\tk0: %08x\tk1: %08x\n",
+ frame->s6, frame->s7, frame->k0, frame->k1);
+
+ printf("\tgp: %08x\tsp: %08x\ts8: %08x\tra: %08x\n",
+ frame->gp, frame->sp, frame->s8, frame->ra);
+
+ printf("\tsr: %08x\tmullo: %08x\tmulhi: %08x\tbadvaddr: %08x\n",
+ frame->sr, frame->mullo, frame->mulhi, frame->badvaddr);
+
+#ifdef IC_REG
+ printf("\tcause: %08x\tpc: %08x\tic: %08x\n",
+ frame->cause, frame->pc, frame->ic);
+#else
+ printf("\tcause: %08x\tpc: %08x\n",
+ frame->cause, frame->pc);
+#endif
+}
+
+#endif
+
+
+static void
+get_mapping_info(vm_offset_t va, pd_entry_t **pdepp, pt_entry_t **ptepp)
+{
+ pt_entry_t *ptep;
+ pd_entry_t *pdep;
+ struct proc *p = curproc;
+
+ pdep = (&(p->p_vmspace->vm_pmap.pm_segtab[va >> SEGSHIFT]));
+ if (*pdep)
+ ptep = pmap_pte(&p->p_vmspace->vm_pmap, va);
+ else
+ ptep = (pt_entry_t *)0;
+
+ *pdepp = pdep;
+ *ptepp = ptep;
+}
+
+
+static void
+log_bad_page_fault(char *msg, struct trapframe *frame, int trap_type)
+{
+ pt_entry_t *ptep;
+ pd_entry_t *pdep;
+ unsigned int *addr;
+ struct proc *p = curproc;
+ char *read_or_write;
+ register_t pc;
+
+ trap_type &= ~T_USER;
+
+#ifdef SMP
+ printf("cpuid = %d\n", PCPU_GET(cpuid));
+#endif
+ switch (trap_type) {
+ case T_TLB_ST_MISS:
+ case T_ADDR_ERR_ST:
+ read_or_write = "write";
+ break;
+ case T_TLB_LD_MISS:
+ case T_ADDR_ERR_LD:
+ case T_BUS_ERR_IFETCH:
+ read_or_write = "read";
+ break;
+ default:
+ read_or_write = "";
+ }
+
+ pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
+ log(LOG_ERR, "%s: pid %d (%s), uid %d: pc 0x%x got a %s fault at 0x%x\n",
+ msg, p->p_pid, p->p_comm,
+ p->p_ucred ? p->p_ucred->cr_uid : -1,
+ pc,
+ read_or_write,
+ frame->badvaddr);
+
+ /* log registers in trap frame */
+ log_frame_dump(frame);
+
+ get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
+
+ /*
+ * Dump a few words around faulting instruction, if the addres is
+ * valid.
+ */
+ if (!(pc & 3) && (pc != frame->badvaddr) &&
+ (trap_type != T_BUS_ERR_IFETCH) &&
+ useracc((caddr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
+ /* dump page table entry for faulting instruction */
+ log(LOG_ERR, "Page table info for pc address 0x%x: pde = %p, pte = 0x%lx\n",
+ pc, *pdep, ptep ? *ptep : 0);
+
+ addr = (unsigned int *)pc;
+ log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
+ addr);
+ log(LOG_ERR, "%08x %08x %08x %08x\n",
+ addr[0], addr[1], addr[2], addr[3]);
+ } else {
+ log(LOG_ERR, "pc address 0x%x is inaccessible, pde = 0x%p, pte = 0x%lx\n",
+ pc, *pdep, ptep ? *ptep : 0);
+ }
+ /* panic("Bad trap");*/
+}
+
+
+/*
+ * Unaligned load/store emulation
+ */
+static int
+mips_unaligned_load_store(struct trapframe *frame, register_t addr, register_t pc)
+{
+ register_t *reg = (register_t *) frame;
+ u_int32_t inst = *((u_int32_t *) pc);
+ u_int32_t value_msb, value;
+ int access_type = 0;
+
+ switch (MIPS_INST_OPCODE(inst)) {
+ case OP_LHU:
+ lbu_macro(value_msb, addr);
+ addr += 1;
+ lbu_macro(value, addr);
+ value |= value_msb << 8;
+ reg[MIPS_INST_RT(inst)] = value;
+ access_type = MIPS_LHU_ACCESS;
+ break;
+
+ case OP_LH:
+ lb_macro(value_msb, addr);
+ addr += 1;
+ lbu_macro(value, addr);
+ value |= value_msb << 8;
+ reg[MIPS_INST_RT(inst)] = value;
+ access_type = MIPS_LH_ACCESS;
+ break;
+
+ case OP_LWU:
+ lwl_macro(value, addr);
+ addr += 3;
+ lwr_macro(value, addr);
+ value &= 0xffffffff;
+ reg[MIPS_INST_RT(inst)] = value;
+ access_type = MIPS_LWU_ACCESS;
+ break;
+
+ case OP_LW:
+ lwl_macro(value, addr);
+ addr += 3;
+ lwr_macro(value, addr);
+ reg[MIPS_INST_RT(inst)] = value;
+ access_type = MIPS_LW_ACCESS;
+ break;
+
+ case OP_SH:
+ value = reg[MIPS_INST_RT(inst)];
+ value_msb = value >> 8;
+ sb_macro(value_msb, addr);
+ addr += 1;
+ sb_macro(value, addr);
+ access_type = MIPS_SH_ACCESS;
+ break;
+
+ case OP_SW:
+ value = reg[MIPS_INST_RT(inst)];
+ swl_macro(value, addr);
+ addr += 3;
+ swr_macro(value, addr);
+ access_type = MIPS_SW_ACCESS;
+ break;
+
+ default:
+ break;
+ }
+
+ return access_type;
+}
+
+
+static int
+emulate_unaligned_access(struct trapframe *frame)
+{
+ register_t pc;
+ int access_type = 0;
+
+ pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
+
+ /*
+ * Fall through if it's instruction fetch exception
+ */
+ if (!((pc & 3) || (pc == frame->badvaddr))) {
+
+ /*
+ * Handle unaligned load and store
+ */
+
+ /*
+ * Return access type if the instruction was emulated.
+ * Otherwise restore pc and fall through.
+ */
+ access_type = mips_unaligned_load_store(frame,
+ frame->badvaddr, pc);
+
+ if (access_type) {
+ if (DELAYBRANCH(frame->cause))
+ frame->pc = MipsEmulateBranch(frame, frame->pc,
+ 0, 0);
+ else
+ frame->pc += 4;
+
+ log(LOG_INFO, "Unaligned %s: pc=0x%x, badvaddr=0x%x\n",
+ access_name[access_type - 1], pc, frame->badvaddr);
+ }
+ }
+ return access_type;
+}
diff --git a/sys/mips/mips/uio_machdep.c b/sys/mips/mips/uio_machdep.c
new file mode 100644
index 0000000..0872b4d
--- /dev/null
+++ b/sys/mips/mips/uio_machdep.c
@@ -0,0 +1,128 @@
+/*-
+ * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
+ * Copyright (c) 1982, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
+ * from: src/sys/i386/i386/uio_machdep.c,v 1.8 2005/02/13 23:09:36 alc
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/sf_buf.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+/*
+ * Implement uiomove(9) from physical memory using sf_bufs to reduce
+ * the creation and destruction of ephemeral mappings.
+ */
+int
+uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
+{
+ struct sf_buf *sf;
+ struct thread *td = curthread;
+ struct iovec *iov;
+ void *cp;
+ vm_offset_t page_offset;
+ size_t cnt;
+ int error = 0;
+ int save = 0;
+
+ KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
+ ("uiomove_fromphys: mode"));
+ KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
+ ("uiomove_fromphys proc"));
+ save = td->td_pflags & TDP_DEADLKTREAT;
+ td->td_pflags |= TDP_DEADLKTREAT;
+ while (n > 0 && uio->uio_resid) {
+ iov = uio->uio_iov;
+ cnt = iov->iov_len;
+ if (cnt == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ continue;
+ }
+ if (cnt > n)
+ cnt = n;
+ page_offset = offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - page_offset);
+ sched_pin();
+ sf = sf_buf_alloc(ma[offset >> PAGE_SHIFT], SFB_CPUPRIVATE);
+ cp = (char *)sf_buf_kva(sf) + page_offset;
+ switch (uio->uio_segflg) {
+ case UIO_USERSPACE:
+ if (ticks - PCPU_GET(switchticks) >= hogticks)
+ uio_yield();
+ if (uio->uio_rw == UIO_READ)
+ error = copyout(cp, iov->iov_base, cnt);
+ else
+ error = copyin(iov->iov_base, cp, cnt);
+ if (error) {
+ sf_buf_free(sf);
+ sched_unpin();
+ goto out;
+ }
+ break;
+ case UIO_SYSSPACE:
+ if (uio->uio_rw == UIO_READ)
+ bcopy(cp, iov->iov_base, cnt);
+ else
+ bcopy(iov->iov_base, cp, cnt);
+ break;
+ case UIO_NOCOPY:
+ break;
+ }
+ sf_buf_free(sf);
+ sched_unpin();
+ iov->iov_base = (char *)iov->iov_base + cnt;
+ iov->iov_len -= cnt;
+ uio->uio_resid -= cnt;
+ uio->uio_offset += cnt;
+ offset += cnt;
+ n -= cnt;
+ }
+out:
+ if (save == 0)
+ td->td_pflags &= ~TDP_DEADLKTREAT;
+ return (error);
+}
diff --git a/sys/mips/mips/vm_machdep.c b/sys/mips/mips/vm_machdep.c
new file mode 100644
index 0000000..26b6477
--- /dev/null
+++ b/sys/mips/mips/vm_machdep.c
@@ -0,0 +1,541 @@
+/*-
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * Copyright (c) 1989, 1990 William Jolitz
+ * Copyright (c) 1994 John Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
+ * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
+ * from: src/sys/i386/i386/vm_machdep.c,v 1.132.2.2 2000/08/26 04:19:26 yokota
+ * JNPR: vm_machdep.c,v 1.8.2.2 2007/08/16 15:59:17 girish
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/buf.h>
+#include <sys/vnode.h>
+#include <sys/vmmeter.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/unistd.h>
+
+#include <machine/clock.h>
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/pltfm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <sys/lock.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+
+#include <sys/user.h>
+#include <sys/mbuf.h>
+#include <sys/sf_buf.h>
+
+#ifndef NSFBUFS
+#define NSFBUFS (512 + maxusers * 16)
+#endif
+
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
+
+LIST_HEAD(sf_head, sf_buf);
+
+
+/*
+ * A hash table of active sendfile(2) buffers
+ */
+static struct sf_head *sf_buf_active;
+static u_long sf_buf_hashmask;
+
+#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
+
+static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
+static u_int sf_buf_alloc_want;
+
+/*
+ * A lock used to synchronize access to the hash table and free list
+ */
+static struct mtx sf_buf_lock;
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the pcb, set up the stack so that the child
+ * ready to run and return to user mode.
+ */
+void
+cpu_fork(register struct thread *td1,register struct proc *p2,
+ struct thread *td2,int flags)
+{
+ register struct proc *p1;
+ struct pcb *pcb2;
+
+ p1 = td1->td_proc;
+ if ((flags & RFPROC) == 0)
+ return;
+ /* It is assumed that the vm_thread_alloc called
+ * cpu_thread_alloc() before cpu_fork is called.
+ */
+
+ /* Point the pcb to the top of the stack */
+ pcb2 = td2->td_pcb;
+
+ /* Copy p1's pcb, note that in this case
+ * our pcb also includes the td_frame being copied
+ * too. The older mips2 code did an additional copy
+ * of the td_frame, for us thats not needed any
+ * longer (this copy does them both
+ */
+ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
+
+ /* Point mdproc and then copy over td1's contents
+ * md_proc is empty for MIPS
+ */
+ td2->td_md.md_flags = td1->td_md.md_flags & MDTD_FPUSED;
+
+ /*
+ * Set up return-value registers as fork() libc stub expects.
+ */
+ td2->td_frame->v0 = 0;
+ td2->td_frame->v1 = 1;
+ td2->td_frame->a3 = 0;
+
+ if (td1 == PCPU_GET(fpcurthread))
+ MipsSaveCurFPState(td1);
+
+ pcb2->pcb_context.val[PCB_REG_RA] = (register_t)fork_trampoline;
+ /* Make sp 64-bit aligned */
+ pcb2->pcb_context.val[PCB_REG_SP] = (register_t)(((vm_offset_t)td2->td_pcb &
+ ~(sizeof(__int64_t) - 1)) - STAND_FRAME_SIZE);
+ pcb2->pcb_context.val[PCB_REG_S0] = (register_t)fork_return;
+ pcb2->pcb_context.val[PCB_REG_S1] = (register_t)td2;
+ pcb2->pcb_context.val[PCB_REG_S2] = (register_t)td2->td_frame;
+ pcb2->pcb_context.val[PCB_REG_SR] = SR_INT_MASK;
+ /*
+ * FREEBSD_DEVELOPERS_FIXME:
+ * Setup any other CPU-Specific registers (Not MIPS Standard)
+ * and/or bits in other standard MIPS registers (if CPU-Specific)
+ * that are needed.
+ */
+
+ td2->td_md.md_saved_intr = MIPS_SR_INT_IE;
+ td2->td_md.md_spinlock_count = 1;
+#ifdef TARGET_OCTEON
+ pcb2->pcb_context.val[PCB_REG_SR] |= MIPS_SR_COP_2_BIT | MIPS32_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX;
+#endif
+
+}
+
+/*
+ * Intercept the return address from a freshly forked process that has NOT
+ * been scheduled yet.
+ *
+ * This is needed to make kernel threads stay in kernel mode.
+ */
+void
+cpu_set_fork_handler(struct thread *td, void (*func) __P((void *)), void *arg)
+{
+ /*
+ * Note that the trap frame follows the args, so the function
+ * is really called like this: func(arg, frame);
+ */
+ td->td_pcb->pcb_context.val[PCB_REG_S0] = (register_t) func;
+ td->td_pcb->pcb_context.val[PCB_REG_S1] = (register_t) arg;
+}
+
+void
+cpu_exit(struct thread *td)
+{
+}
+
+void
+cpu_thread_exit(struct thread *td)
+{
+
+ if (PCPU_GET(fpcurthread) == td)
+ PCPU_GET(fpcurthread) = (struct thread *)0;
+}
+
+void
+cpu_thread_free(struct thread *td)
+{
+}
+
+void
+cpu_thread_clean(struct thread *td)
+{
+}
+
+void
+cpu_thread_swapin(struct thread *td)
+{
+ pt_entry_t *pte;
+ int i;
+
+ /*
+ * The kstack may be at a different physical address now.
+ * Cache the PTEs for the Kernel stack in the machine dependent
+ * part of the thread struct so cpu_switch() can quickly map in
+ * the pcb struct and kernel stack.
+ */
+ if (!(pte = pmap_segmap(kernel_pmap, td->td_md.md_realstack)))
+ panic("cpu_thread_swapin: invalid segmap");
+ pte += ((vm_offset_t)td->td_md.md_realstack >> PGSHIFT) & (NPTEPG - 1);
+
+ for (i = 0; i < KSTACK_PAGES - 1; i++) {
+ td->td_md.md_upte[i] = *pte & ~(PTE_RO|PTE_WIRED);
+ pte++;
+ }
+}
+
+void
+cpu_thread_swapout(struct thread *td)
+{
+}
+
+void
+cpu_thread_alloc(struct thread *td)
+{
+ pt_entry_t *pte;
+ int i;
+
+ if(td->td_kstack & (1 << PAGE_SHIFT))
+ td->td_md.md_realstack = td->td_kstack + PAGE_SIZE;
+ else
+ td->td_md.md_realstack = td->td_kstack;
+
+ td->td_pcb = (struct pcb *)(td->td_md.md_realstack +
+ (td->td_kstack_pages - 1) * PAGE_SIZE) - 1;
+ td->td_frame = &td->td_pcb->pcb_regs;
+
+ if (!(pte = pmap_segmap(kernel_pmap, td->td_md.md_realstack)))
+ panic("cpu_thread_alloc: invalid segmap");
+ pte += ((vm_offset_t)td->td_md.md_realstack >> PGSHIFT) & (NPTEPG - 1);
+
+ for (i = 0; i < KSTACK_PAGES - 1; i++) {
+ td->td_md.md_upte[i] = *pte & ~(PTE_RO|PTE_WIRED);
+ pte++;
+ }
+}
+
+/*
+ * Initialize machine state (pcb and trap frame) for a new thread about to
+ * upcall. Put enough state in the new thread's PCB to get it to go back
+ * userret(), where we can intercept it again to set the return (upcall)
+ * Address and stack, along with those from upcals that are from other sources
+ * such as those generated in thread_userret() itself.
+ */
+void
+cpu_set_upcall(struct thread *td, struct thread *td0)
+{
+ struct pcb *pcb2;
+
+ /* Point the pcb to the top of the stack. */
+ pcb2 = td->td_pcb;
+
+ /*
+ * Copy the upcall pcb. This loads kernel regs.
+ * Those not loaded individually below get their default
+ * values here.
+ *
+ * XXXKSE It might be a good idea to simply skip this as
+ * the values of the other registers may be unimportant.
+ * This would remove any requirement for knowing the KSE
+ * at this time (see the matching comment below for
+ * more analysis) (need a good safe default).
+ * In MIPS, the trapframe is the first element of the PCB
+ * and gets copied when we copy the PCB. No seperate copy
+ * is needed.
+ */
+ bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
+
+ /*
+ * Set registers for trampoline to user mode.
+ */
+
+ pcb2->pcb_context.val[PCB_REG_RA] = (register_t)fork_trampoline;
+ /* Make sp 64-bit aligned */
+ pcb2->pcb_context.val[PCB_REG_SP] = (register_t)(((vm_offset_t)td->td_pcb &
+ ~(sizeof(__int64_t) - 1)) - STAND_FRAME_SIZE);
+ pcb2->pcb_context.val[PCB_REG_S0] = (register_t)fork_return;
+ pcb2->pcb_context.val[PCB_REG_S1] = (register_t)td;
+ pcb2->pcb_context.val[PCB_REG_S2] = (register_t)td->td_frame;
+
+
+ /* Dont set IE bit in SR. sched lock release will take care of it */
+/* idle_mask is jmips pcb2->pcb_context.val[11] = (ALL_INT_MASK & idle_mask); */
+ pcb2->pcb_context.val[PCB_REG_SR] = SR_INT_MASK;
+#ifdef TARGET_OCTEON
+ pcb2->pcb_context.val[PCB_REG_SR] |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT |
+ MIPS32_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX;
+#endif
+
+ /*
+ * FREEBSD_DEVELOPERS_FIXME:
+ * Setup any other CPU-Specific registers (Not MIPS Standard)
+ * that are needed.
+ */
+
+ /* SMP Setup to release sched_lock in fork_exit(). */
+ td->td_md.md_spinlock_count = 1;
+ td->td_md.md_saved_intr = MIPS_SR_INT_IE;
+#if 0
+ /* Maybe we need to fix this? */
+ td->td_md.md_saved_sr = ( (MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT) |
+ (MIPS32_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX) |
+ (MIPS_SR_INT_IE | MIPS_HARD_INT_MASK));
+#endif
+}
+
+/*
+ * Set that machine state for performing an upcall that has to
+ * be done in thread_userret() so that those upcalls generated
+ * in thread_userret() itself can be done as well.
+ */
+void
+cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
+ stack_t *stack)
+{
+ struct trapframe *tf;
+ u_int32_t sp;
+
+ /*
+ * At the point where a function is called, sp must be 8
+ * byte aligned[for compatibility with 64-bit CPUs]
+ * in ``See MIPS Run'' by D. Sweetman, p. 269
+ * align stack */
+ sp = ((uint32_t)(stack->ss_sp + stack->ss_size) & ~0x7) -
+ STAND_FRAME_SIZE;
+
+ /*
+ * Set the trap frame to point at the beginning of the uts
+ * function.
+ */
+ tf = td->td_frame;
+ bzero(tf, sizeof(struct trapframe));
+ tf->sp = (register_t)sp;
+ tf->pc = (register_t)entry;
+ tf->a0 = (register_t)arg;
+
+ tf->sr = SR_KSU_USER | SR_EXL;
+#ifdef TARGET_OCTEON
+ tf->sr |= MIPS_SR_INT_IE | MIPS_SR_COP_0_BIT | MIPS_SR_UX |
+ MIPS_SR_KX;
+#endif
+/* tf->sr |= (ALL_INT_MASK & idle_mask) | SR_INT_ENAB; */
+ /**XXX the above may now be wrong -- mips2 implements this as panic */
+ /*
+ * FREEBSD_DEVELOPERS_FIXME:
+ * Setup any other CPU-Specific registers (Not MIPS Standard)
+ * that are needed.
+ */
+}
+/*
+ * Convert kernel VA to physical address
+ */
+u_long
+kvtop(void *addr)
+{
+ vm_offset_t va;
+
+ va = pmap_kextract((vm_offset_t)addr);
+ if (va == 0)
+ panic("kvtop: zero page frame");
+ return((int)va);
+}
+
+/*
+ * Implement the pre-zeroed page mechanism.
+ * This routine is called from the idle loop.
+ */
+
+#define ZIDLE_LO(v) ((v) * 2 / 3)
+#define ZIDLE_HI(v) ((v) * 4 / 5)
+
+/*
+ * Tell whether this address is in some physical memory region.
+ * Currently used by the kernel coredump code in order to avoid
+ * dumping non-memory physical address space.
+ */
+int
+is_physical_memory(vm_offset_t addr)
+{
+ if (addr >= SDRAM_ADDR_START && addr <= SDRAM_ADDR_END)
+ return 1;
+ else
+ return 0;
+}
+
+int
+is_cacheable_mem(vm_offset_t pa)
+{
+ if ((pa >= SDRAM_ADDR_START && pa <= SDRAM_ADDR_END) ||
+#ifdef FLASH_ADDR_START
+ (pa >= FLASH_ADDR_START && pa <= FLASH_ADDR_END))
+#else
+ 0)
+#endif
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
+ */
+static void
+sf_buf_init(void *arg)
+{
+ struct sf_buf *sf_bufs;
+ vm_offset_t sf_base;
+ int i;
+
+ nsfbufs = NSFBUFS;
+ TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
+
+ sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
+ TAILQ_INIT(&sf_buf_freelist);
+ sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ for (i = 0; i < nsfbufs; i++) {
+ sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
+ TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
+ }
+ sf_buf_alloc_want = 0;
+ mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
+}
+
+/*
+ * Allocate an sf_buf for the given vm_page. On this machine, however, there
+ * is no sf_buf object. Instead, an opaque pointer to the given vm_page is
+ * returned.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m, int flags)
+{
+ struct sf_head *hash_list;
+ struct sf_buf *sf;
+ int error;
+
+ hash_list = &sf_buf_active[SF_BUF_HASH(m)];
+ mtx_lock(&sf_buf_lock);
+ LIST_FOREACH(sf, hash_list, list_entry) {
+ if (sf->m == m) {
+ sf->ref_count++;
+ if (sf->ref_count == 1) {
+ TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
+ nsfbufsused++;
+ nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
+ }
+ goto done;
+ }
+ }
+ while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
+ if (flags & SFB_NOWAIT)
+ goto done;
+ sf_buf_alloc_want++;
+ mbstat.sf_allocwait++;
+ error = msleep(&sf_buf_freelist, &sf_buf_lock,
+ (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ goto done;
+ }
+ TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
+ if (sf->m != NULL)
+ LIST_REMOVE(sf, list_entry);
+ LIST_INSERT_HEAD(hash_list, sf, list_entry);
+ sf->ref_count = 1;
+ sf->m = m;
+ nsfbufsused++;
+ nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
+ pmap_qenter(sf->kva, &sf->m, 1);
+done:
+ mtx_unlock(&sf_buf_lock);
+ return (sf);
+}
+
+/*
+ * Free the sf_buf. In fact, do nothing because there are no resources
+ * associated with the sf_buf.
+ */
+void
+sf_buf_free(struct sf_buf *sf)
+{
+ mtx_lock(&sf_buf_lock);
+ sf->ref_count--;
+ if (sf->ref_count == 0) {
+ TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
+ nsfbufsused--;
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_buf_freelist);
+ }
+ mtx_unlock(&sf_buf_lock);
+}
+
+/*
+ * Software interrupt handler for queued VM system processing.
+ */
+void
+swi_vm(void *dummy)
+{
+}
+
+int
+cpu_set_user_tls(struct thread *td, void *tls_base)
+{
+
+ /* TBD */
+ return (0);
+}
+
+void
+cpu_throw(struct thread *old, struct thread *new)
+{
+
+ func_2args_asmmacro(&mips_cpu_throw, old, new);
+ panic("mips_cpu_throw() returned");
+}
OpenPOWER on IntegriCloud