summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/sparc64/include/asi.h93
-rw-r--r--sys/sparc64/include/asmacros.h64
-rw-r--r--sys/sparc64/include/atomic.h532
-rw-r--r--sys/sparc64/include/bootinfo.h44
-rw-r--r--sys/sparc64/include/cpu.h15
-rw-r--r--sys/sparc64/include/cpufunc.h126
-rw-r--r--sys/sparc64/include/db_machdep.h44
-rw-r--r--sys/sparc64/include/frame.h39
-rw-r--r--sys/sparc64/include/globaldata.h2
-rw-r--r--sys/sparc64/include/mutex.h2
-rw-r--r--sys/sparc64/include/param.h48
-rw-r--r--sys/sparc64/include/pcb.h4
-rw-r--r--sys/sparc64/include/pcpu.h2
-rw-r--r--sys/sparc64/include/pmap.h30
-rw-r--r--sys/sparc64/include/proc.h1
-rw-r--r--sys/sparc64/include/pstate.h79
-rw-r--r--sys/sparc64/include/pv.h175
-rw-r--r--sys/sparc64/include/resource.h3
-rw-r--r--sys/sparc64/include/setjmp.h55
-rw-r--r--sys/sparc64/include/stdarg.h1
-rw-r--r--sys/sparc64/include/tlb.h149
-rw-r--r--sys/sparc64/include/trap.h70
-rw-r--r--sys/sparc64/include/tsb.h220
-rw-r--r--sys/sparc64/include/tte.h146
-rw-r--r--sys/sparc64/include/vmparam.h12
-rw-r--r--sys/sparc64/sparc64/autoconf.c17
-rw-r--r--sys/sparc64/sparc64/clock.c4
-rw-r--r--sys/sparc64/sparc64/db_disasm.c1116
-rw-r--r--sys/sparc64/sparc64/db_interface.c110
-rw-r--r--sys/sparc64/sparc64/db_trace.c275
-rw-r--r--sys/sparc64/sparc64/exception.S603
-rw-r--r--sys/sparc64/sparc64/exception.s603
-rw-r--r--sys/sparc64/sparc64/genassym.c163
-rw-r--r--sys/sparc64/sparc64/locore.S61
-rw-r--r--sys/sparc64/sparc64/locore.s61
-rw-r--r--sys/sparc64/sparc64/machdep.c285
-rw-r--r--sys/sparc64/sparc64/pmap.c736
-rw-r--r--sys/sparc64/sparc64/pv.c90
-rw-r--r--sys/sparc64/sparc64/support.S346
-rw-r--r--sys/sparc64/sparc64/support.s346
-rw-r--r--sys/sparc64/sparc64/swtch.S69
-rw-r--r--sys/sparc64/sparc64/swtch.s69
-rw-r--r--sys/sparc64/sparc64/trap.c65
-rw-r--r--sys/sparc64/sparc64/tsb.c279
-rw-r--r--sys/sparc64/sparc64/upa.c66
-rw-r--r--sys/sparc64/sparc64/vm_machdep.c38
46 files changed, 6905 insertions, 453 deletions
diff --git a/sys/sparc64/include/asi.h b/sys/sparc64/include/asi.h
new file mode 100644
index 0000000..34f146d
--- /dev/null
+++ b/sys/sparc64/include/asi.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ASI_H_
+#define _MACHINE_ASI_H_
+
+/*
+ * Standard v9 asis
+ */
+#define ASI_N 0x4
+#define ASI_NL 0xc
+#define ASI_AIUP 0x10
+#define ASI_AIUS 0x11
+#define ASI_AIUSL 0x19
+#define ASI_P 0x80
+#define ASI_S 0x81
+#define ASI_PNF 0x82
+#define ASI_SNF 0x83
+#define ASI_PL 0x88
+#define ASI_PNFL 0x8a
+#define ASI_SNFL 0x8b
+
+/*
+ * UltraSPARC extensions
+ */
+#define ASI_PHYS_USE_EC 0x14
+#define ASI_PHYS_BYPASS_EC_WITH_EBIT 0x15
+#define ASI_PHYS_USE_EC_L 0x1c
+#define ASI_PHYS_BYPASS_EC_WITH_EBIT_L 0x1d
+
+#define ASI_NUCLEUS_QUAD_LDD 0x24
+#define ASI_NUCLEUS_QUAD_LDD_L 0x2c
+
+#define ASI_IMMU 0x50
+#define AA_IMMU_TTR 0x0
+#define AA_IMMU_SFSR 0x18
+#define AA_IMMU_TSB 0x28
+#define AA_IMMU_TAR 0x30
+
+#define ASI_IMMU_TSB_8KB_PTR_REG 0x51
+#define ASI_IMMU_TSB_64KB_PTR_REG 0x52
+#define ASI_ITLB_DATA_IN_REG 0x54
+#define ASI_ITLB_DATA_ACCESS_REG 0x55
+#define ASI_ITLB_TAG_READ_REG 0x56
+#define ASI_IMMU_DEMAP 0x57
+
+#define ASI_DMMU_TAG_TARGET_REG 0x58
+
+#define ASI_DMMU 0x58
+#define AA_DMMU_TTR 0x0
+#define AA_DMMU_PCXR 0x8
+#define AA_DMMU_SCXR 0x10
+#define AA_DMMU_SFSR 0x18
+#define AA_DMMU_SFAR 0x20
+#define AA_DMMU_TSB 0x28
+#define AA_DMMU_TAR 0x30
+#define AA_DMMU_VWPR 0x38
+#define AA_DMMU_PWPR 0x40
+
+#define ASI_DMMU_TSB_8KB_PTR_REG 0x59
+#define ASI_DMMU_TSB_64KB_PTR_REG 0x5a
+#define ASI_DMMU_TSB_DIRECT_PTR_REG 0x5b
+#define ASI_DTLB_DATA_IN_REG 0x5c
+#define ASI_DTLB_DATA_ACCESS_REG 0x5d
+#define ASI_DTLB_TAG_READ_REG 0x5e
+#define ASI_DMMU_DEMAP 0x5f
+
+#endif /* !_MACHINE_ASI_H_ */
diff --git a/sys/sparc64/include/asmacros.h b/sys/sparc64/include/asmacros.h
new file mode 100644
index 0000000..efca5380
--- /dev/null
+++ b/sys/sparc64/include/asmacros.h
@@ -0,0 +1,64 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ASMACROS_H_
+#define _MACHINE_ASMACROS_H_
+
+#ifdef _KERNEL
+
+#define PCPU(member) %g7 + GD_ ## member
+#define DEBUGGER() ta %xcc, 1
+#define PANIC(msg, reg) \
+ .sect .rodata ; \
+9: .asciz msg ; \
+ .previous ; \
+ setx 9b, reg, %o0 ; \
+ call panic ; \
+ nop
+
+#endif
+
+#define DATA(name) \
+ .data ; \
+ .globl name ; \
+ .type name, @object ; \
+name ## :
+
+#define EMPTY
+
+#define ENTRY(name) \
+ .text ; \
+ .align 4 ; \
+ .globl name ; \
+ .type name, @function ; \
+name ## :
+
+#define END(name) \
+ .size name, . - name
+
+#endif /* !_MACHINE_ASMACROS_H_ */
diff --git a/sys/sparc64/include/atomic.h b/sys/sparc64/include/atomic.h
index b8273e9..3dcb616 100644
--- a/sys/sparc64/include/atomic.h
+++ b/sys/sparc64/include/atomic.h
@@ -26,327 +26,241 @@
* $FreeBSD$
*/
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+#include <machine/cpufunc.h>
+
/*
- * This is not atomic. It is just a stub to make things compile.
+ * Various simple arithmetic on memory which is atomic in the presence
+ * of interrupts and multiple processors. See atomic(9) for details.
+ * Note that efficient hardware support exists only for the 32 and 64
+ * bit variants; the 8 and 16 bit versions are not provided and should
+ * not be used in MI code.
+ *
+ * This implementation takes advantage of the fact that the sparc64
+ * cas instruction is both a load and a store. The loop is often coded
+ * as follows:
+ *
+ * do {
+ * expect = *p;
+ * new = expect + 1;
+ * } while (cas(p, expect, new) != expect);
+ *
+ * which performs an unnnecessary load on each iteration that the cas
+ * operation fails. Modified as follows:
+ *
+ * expect = *p;
+ * for (;;) {
+ * new = expect + 1;
+ * result = cas(p, expect, new);
+ * if (result == expect)
+ * break;
+ * expect = result;
+ * }
+ *
+ * the return value of cas is used to avoid the extra reload. At the
+ * time of writing, with gcc version 2.95.3, the branch for the if
+ * statement is predicted incorrectly as not taken, rather than taken.
+ * It is expected that the branch prediction hints available in gcc 3.0,
+ * __builtin_expect, will allow better code to be generated.
+ *
+ * The memory barriers provided by the acq and rel variants are intended
+ * to be sufficient for use of relaxed memory ordering. Due to the
+ * suggested assembly syntax of the membar operands containing a #
+ * character, they cannot be used in macros. The cmask and mmask bits
+ * are hard coded in machine/cpufunc.h and used here through macros.
+ * Hopefully sun will choose not to change the bit numbers.
*/
-#ifndef _MACHINE_ATOMIC_H_
-#define _MACHINE_ATOMIC_H_
+#define itype(sz) u_int ## sz ## _t
-#define __atomic_op(p, op, v) ({ \
- __typeof(*p) __v = (__typeof(*p))v; \
- *p op __v; \
-})
+#define atomic_cas_32(p, e, s) casa(p, e, s, ASI_N)
+#define atomic_cas_64(p, e, s) casxa(p, e, s, ASI_N)
+
+#define atomic_cas(p, e, s, sz) \
+ atomic_cas_ ## sz(p, e, s)
-#define __atomic_load(p) ({ \
- __typeof(*p) __v; \
- __v = *p; \
- __v; \
+#define atomic_cas_acq(p, e, s, sz) ({ \
+ itype(sz) v; \
+ v = atomic_cas(p, e, s, sz); \
+ membar(LoadLoad | LoadStore); \
+ v; \
})
-#define __atomic_load_clear(p) ({ \
- __typeof(*p) __v; \
- __v = *p; \
- *p = 0; \
- __v; \
+#define atomic_cas_rel(p, e, s, sz) ({ \
+ itype(sz) v; \
+ membar(LoadStore | StoreStore); \
+ v = atomic_cas(p, e, s, sz); \
+ v; \
})
-#define __atomic_cas(p, e, s) ({ \
- u_int __v; \
- if (*p == (__typeof(*p))e) { \
- *p = (__typeof(*p))s; \
- __v = 1; \
- } else { \
- __v = 0; \
+#define atomic_op(p, op, v, sz) do { \
+ itype(sz) e, r, s; \
+ for (e = *(volatile itype(sz) *)p;; e = r) { \
+ s = e op v; \
+ r = atomic_cas_ ## sz(p, e, s); \
+ if (r == e) \
+ break; \
} \
- __v; \
+} while (0)
+
+#define atomic_op_acq(p, op, v, sz) do { \
+ atomic_op(p, op, v, sz); \
+ membar(LoadLoad | LoadStore); \
+} while (0)
+
+#define atomic_op_rel(p, op, v, sz) do { \
+ membar(LoadStore | StoreStore); \
+ atomic_op(p, op, v, sz); \
+} while (0)
+
+#define atomic_load_acq(p, sz) ({ \
+ itype(sz) v; \
+ v = atomic_cas_ ## sz(p, 0, 0); \
+ membar(LoadLoad | LoadStore); \
+ v; \
})
-#define __atomic_op_8(p, op, v) __atomic_op(p, op, v)
-#define __atomic_op_16(p, op, v) __atomic_op(p, op, v)
-#define __atomic_op_32(p, op, v) __atomic_op(p, op, v)
-#define __atomic_load_32(p) __atomic_load(p)
-#define __atomic_load_clear_32(p) __atomic_load_clear(p)
-#define __atomic_cas_32(p, e, s) __atomic_cas(p, e, s)
-#define __atomic_op_64(p, op, v) __atomic_op(p, op, v)
-#define __atomic_load_64(p) __atomic_load(p)
-#define __atomic_load_clear_64(p) __atomic_load_clear(p)
-#define __atomic_cas_64(p, e, s) __atomic_cas(p, e, s)
-
-#define atomic_add_8(p, v) __atomic_op_8(p, +=, v)
-#define atomic_subtract_8(p, v) __atomic_op_8(p, -=, v)
-#define atomic_set_8(p, v) __atomic_op_8(p, |=, v)
-#define atomic_clear_8(p, v) __atomic_op_8(p, &=, ~v)
-#define atomic_store_8(p, v) __atomic_op_8(p, =, v)
-
-#define atomic_add_16(p, v) __atomic_op_16(p, +=, v)
-#define atomic_subtract_16(p, v) __atomic_op_16(p, -=, v)
-#define atomic_set_16(p, v) __atomic_op_16(p, |=, v)
-#define atomic_clear_16(p, v) __atomic_op_16(p, &=, ~v)
-#define atomic_store_16(p, v) __atomic_op_16(p, =, v)
-
-#define atomic_add_32(p, v) __atomic_op_32(p, +=, v)
-#define atomic_subtract_32(p, v) __atomic_op_32(p, -=, v)
-#define atomic_set_32(p, v) __atomic_op_32(p, |=, v)
-#define atomic_clear_32(p, v) __atomic_op_32(p, &=, ~v)
-#define atomic_store_32(p, v) __atomic_op_32(p, =, v)
-#define atomic_load_32(p) __atomic_load_32(p)
-#define atomic_readandclear_32(p) __atomic_load_clear_32(p)
-#define atomic_cmpset_32(p, e, s) __atomic_cas_32(p, e, s)
-
-#define atomic_add_64(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_64(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_64(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_64(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_64(p, v) __atomic_op_64(p, =, v)
-#define atomic_load_64(p) __atomic_load_64(p)
-#define atomic_readandclear_64(p) __atomic_load_clear_64(p)
-#define atomic_cmpset_64(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_acq_8(p, v) __atomic_op_8(p, +=, v)
-#define atomic_subtract_acq_8(p, v) __atomic_op_8(p, -=, v)
-#define atomic_set_acq_8(p, v) __atomic_op_8(p, |=, v)
-#define atomic_clear_acq_8(p, v) __atomic_op_8(p, &=, ~v)
-#define atomic_store_acq_8(p, v) __atomic_op_8(p, =, v)
-
-#define atomic_add_acq_16(p, v) __atomic_op_16(p, +=, v)
-#define atomic_subtract_acq_16(p, v) __atomic_op_16(p, -=, v)
-#define atomic_set_acq_16(p, v) __atomic_op_16(p, |=, v)
-#define atomic_clear_acq_16(p, v) __atomic_op_16(p, &=, ~v)
-#define atomic_store_acq_16(p, v) __atomic_op_16(p, =, v)
-
-#define atomic_add_acq_32(p, v) __atomic_op_32(p, +=, v)
-#define atomic_subtract_acq_32(p, v) __atomic_op_32(p, -=, v)
-#define atomic_set_acq_32(p, v) __atomic_op_32(p, |=, v)
-#define atomic_clear_acq_32(p, v) __atomic_op_32(p, &=, ~v)
-#define atomic_store_acq_32(p, v) __atomic_op_32(p, =, v)
-#define atomic_load_acq_32(p) __atomic_load_32(p)
-#define atomic_cmpset_acq_32(p, e, s) __atomic_cas_32(p, e, s)
-
-#define atomic_add_acq_64(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_acq_64(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_acq_64(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_acq_64(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_acq_64(p, v) __atomic_op_64(p, =, v)
-#define atomic_load_acq_64(p) __atomic_load_64(p)
-#define atomic_cmpset_acq_64(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_rel_8(p, v) __atomic_op_8(p, +=, v)
-#define atomic_subtract_rel_8(p, v) __atomic_op_8(p, -=, v)
-#define atomic_set_rel_8(p, v) __atomic_op_8(p, |=, v)
-#define atomic_clear_rel_8(p, v) __atomic_op_8(p, &=, ~v)
-#define atomic_store_rel_8(p, v) __atomic_op_8(p, =, v)
-
-#define atomic_add_rel_16(p, v) __atomic_op_16(p, +=, v)
-#define atomic_subtract_rel_16(p, v) __atomic_op_16(p, -=, v)
-#define atomic_set_rel_16(p, v) __atomic_op_16(p, |=, v)
-#define atomic_clear_rel_16(p, v) __atomic_op_16(p, &=, ~v)
-#define atomic_store_rel_16(p, v) __atomic_op_16(p, =, v)
-
-#define atomic_add_rel_32(p, v) __atomic_op_32(p, +=, v)
-#define atomic_subtract_rel_32(p, v) __atomic_op_32(p, -=, v)
-#define atomic_set_rel_32(p, v) __atomic_op_32(p, |=, v)
-#define atomic_clear_rel_32(p, v) __atomic_op_32(p, &=, ~v)
-#define atomic_store_rel_32(p, v) __atomic_op_32(p, =, v)
-#define atomic_cmpset_rel_32(p, e, s) __atomic_cas_32(p, e, s)
-
-#define atomic_add_rel_64(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_rel_64(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_rel_64(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_rel_64(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_rel_64(p, v) __atomic_op_64(p, =, v)
-#define atomic_cmpset_rel_64(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_char(p, v) __atomic_op_8(p, +=, v)
-#define atomic_subtract_char(p, v) __atomic_op_8(p, -=, v)
-#define atomic_set_char(p, v) __atomic_op_8(p, |=, v)
-#define atomic_clear_char(p, v) __atomic_op_8(p, &=, ~v)
-#define atomic_store_char(p, v) __atomic_op_8(p, =, v)
-
-#define atomic_add_short(p, v) __atomic_op_16(p, +=, v)
-#define atomic_subtract_short(p, v) __atomic_op_16(p, -=, v)
-#define atomic_set_short(p, v) __atomic_op_16(p, |=, v)
-#define atomic_clear_short(p, v) __atomic_op_16(p, &=, ~v)
-#define atomic_store_short(p, v) __atomic_op_16(p, =, v)
-
-#define atomic_add_int(p, v) __atomic_op_32(p, +=, v)
-#define atomic_subtract_int(p, v) __atomic_op_32(p, -=, v)
-#define atomic_set_int(p, v) __atomic_op_32(p, |=, v)
-#define atomic_clear_int(p, v) __atomic_op_32(p, &=, ~v)
-#define atomic_store_int(p, v) __atomic_op_32(p, =, v)
-#define atomic_load_int(p) __atomic_load_32(p)
-#define atomic_readandclear_int(p) __atomic_load_clear_32(p)
-#define atomic_cmpset_int(p, e, s) __atomic_cas_32(p, e, s)
-
-#define atomic_add_long(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_long(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_long(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_long(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_long(p, v) __atomic_op_64(p, =, v)
-#define atomic_load_long(p) __atomic_load_64(p)
-#define atomic_readandclear_long(p) __atomic_load_clear_64(p)
-#define atomic_cmpset_long(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_acq_char(p, v) __atomic_op_8(p, +=, v)
-#define atomic_subtract_acq_char(p, v) __atomic_op_8(p, -=, v)
-#define atomic_set_acq_char(p, v) __atomic_op_8(p, |=, v)
-#define atomic_clear_acq_char(p, v) __atomic_op_8(p, &=, ~v)
-#define atomic_store_acq_char(p, v) __atomic_op_8(p, =, v)
-
-#define atomic_add_acq_short(p, v) __atomic_op_16(p, +=, v)
-#define atomic_subtract_acq_short(p, v) __atomic_op_16(p, -=, v)
-#define atomic_set_acq_short(p, v) __atomic_op_16(p, |=, v)
-#define atomic_clear_acq_short(p, v) __atomic_op_16(p, &=, ~v)
-#define atomic_store_acq_short(p, v) __atomic_op_16(p, =, v)
-
-#define atomic_add_acq_int(p, v) __atomic_op_32(p, +=, v)
-#define atomic_subtract_acq_int(p, v) __atomic_op_32(p, -=, v)
-#define atomic_set_acq_int(p, v) __atomic_op_32(p, |=, v)
-#define atomic_clear_acq_int(p, v) __atomic_op_32(p, &=, ~v)
-#define atomic_store_acq_int(p, v) __atomic_op_32(p, =, v)
-#define atomic_load_acq_int(p) __atomic_load_32(p)
-#define atomic_cmpset_acq_int(p, e, s) __atomic_cas_32(p, e, s)
-
-#define atomic_add_acq_long(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_acq_long(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_acq_long(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_acq_long(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_acq_long(p, v) __atomic_op_64(p, =, v)
-#define atomic_load_acq_long(p) __atomic_load_64(p)
-#define atomic_cmpset_acq_long(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_rel_char(p, v) __atomic_op_8(p, +=, v)
-#define atomic_subtract_rel_char(p, v) __atomic_op_8(p, -=, v)
-#define atomic_set_rel_char(p, v) __atomic_op_8(p, |=, v)
-#define atomic_clear_rel_char(p, v) __atomic_op_8(p, &=, ~v)
-#define atomic_store_rel_char(p, v) __atomic_op_8(p, =, v)
-
-#define atomic_add_rel_short(p, v) __atomic_op_16(p, +=, v)
-#define atomic_subtract_rel_short(p, v) __atomic_op_16(p, -=, v)
-#define atomic_set_rel_short(p, v) __atomic_op_16(p, |=, v)
-#define atomic_clear_rel_short(p, v) __atomic_op_16(p, &=, ~v)
-#define atomic_store_rel_short(p, v) __atomic_op_16(p, =, v)
-
-#define atomic_add_rel_int(p, v) __atomic_op_32(p, +=, v)
-#define atomic_subtract_rel_int(p, v) __atomic_op_32(p, -=, v)
-#define atomic_set_rel_int(p, v) __atomic_op_32(p, |=, v)
-#define atomic_clear_rel_int(p, v) __atomic_op_32(p, &=, ~v)
-#define atomic_store_rel_int(p, v) __atomic_op_32(p, =, v)
-#define atomic_cmpset_rel_int(p, e, s) __atomic_cas_32(p, e, s)
-
-#define atomic_add_rel_long(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_rel_long(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_rel_long(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_rel_long(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_rel_long(p, v) __atomic_op_64(p, =, v)
-#define atomic_cmpset_rel_long(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_char(p, v) __atomic_op_8(p, +=, v)
-#define atomic_subtract_char(p, v) __atomic_op_8(p, -=, v)
-#define atomic_set_char(p, v) __atomic_op_8(p, |=, v)
-#define atomic_clear_char(p, v) __atomic_op_8(p, &=, ~v)
-#define atomic_store_char(p, v) __atomic_op_8(p, =, v)
-
-#define atomic_add_short(p, v) __atomic_op_16(p, +=, v)
-#define atomic_subtract_short(p, v) __atomic_op_16(p, -=, v)
-#define atomic_set_short(p, v) __atomic_op_16(p, |=, v)
-#define atomic_clear_short(p, v) __atomic_op_16(p, &=, ~v)
-#define atomic_store_short(p, v) __atomic_op_16(p, =, v)
-
-#define atomic_add_int(p, v) __atomic_op_32(p, +=, v)
-#define atomic_subtract_int(p, v) __atomic_op_32(p, -=, v)
-#define atomic_set_int(p, v) __atomic_op_32(p, |=, v)
-#define atomic_clear_int(p, v) __atomic_op_32(p, &=, ~v)
-#define atomic_store_int(p, v) __atomic_op_32(p, =, v)
-#define atomic_load_int(p) __atomic_load_32(p)
-#define atomic_readandclear_int(p) __atomic_load_clear_32(p)
-#define atomic_cmpset_int(p, e, s) __atomic_cas_32(p, e, s)
-
-#define atomic_add_long(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_long(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_long(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_long(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_long(p, v) __atomic_op_64(p, =, v)
-#define atomic_load_long(p) __atomic_load_64(p)
-#define atomic_readandclear_long(p) __atomic_load_clear_64(p)
-#define atomic_cmpset_long(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_ptr(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_ptr(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_ptr(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_ptr(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_ptr(p, v) __atomic_op_64(p, =, v)
-#define atomic_load_ptr(p) __atomic_load_64(p)
-#define atomic_readandclear_ptr(p) __atomic_load_clear_64(p)
-#define atomic_cmpset_ptr(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_acq_char(p, v) __atomic_op_8(p, +=, v)
-#define atomic_subtract_acq_char(p, v) __atomic_op_8(p, -=, v)
-#define atomic_set_acq_char(p, v) __atomic_op_8(p, |=, v)
-#define atomic_clear_acq_char(p, v) __atomic_op_8(p, &=, ~v)
-#define atomic_store_acq_char(p, v) __atomic_op_8(p, =, v)
-
-#define atomic_add_acq_short(p, v) __atomic_op_16(p, +=, v)
-#define atomic_subtract_acq_short(p, v) __atomic_op_16(p, -=, v)
-#define atomic_set_acq_short(p, v) __atomic_op_16(p, |=, v)
-#define atomic_clear_acq_short(p, v) __atomic_op_16(p, &=, ~v)
-#define atomic_store_acq_short(p, v) __atomic_op_16(p, =, v)
-
-#define atomic_add_acq_int(p, v) __atomic_op_32(p, +=, v)
-#define atomic_subtract_acq_int(p, v) __atomic_op_32(p, -=, v)
-#define atomic_set_acq_int(p, v) __atomic_op_32(p, |=, v)
-#define atomic_clear_acq_int(p, v) __atomic_op_32(p, &=, ~v)
-#define atomic_store_acq_int(p, v) __atomic_op_32(p, =, v)
-#define atomic_load_acq_int(p) __atomic_load_32(p)
-#define atomic_cmpset_acq_int(p, e, s) __atomic_cas_32(p, e, s)
-
-#define atomic_add_acq_long(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_acq_long(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_acq_long(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_acq_long(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_acq_long(p, v) __atomic_op_64(p, =, v)
-#define atomic_load_acq_long(p) __atomic_load_64(p)
-#define atomic_cmpset_acq_long(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_acq_ptr(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_acq_ptr(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_acq_ptr(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_acq_ptr(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_acq_ptr(p, v) __atomic_op_64(p, =, v)
-#define atomic_load_acq_ptr(p) __atomic_load_64(p)
-#define atomic_cmpset_acq_ptr(p, e, s) __atomic_cas_64(p, e, s)
-
-#define atomic_add_rel_char(p, v) __atomic_op_8(p, +=, v)
-#define atomic_subtract_rel_char(p, v) __atomic_op_8(p, -=, v)
-#define atomic_set_rel_char(p, v) __atomic_op_8(p, |=, v)
-#define atomic_clear_rel_char(p, v) __atomic_op_8(p, &=, ~v)
-#define atomic_store_rel_char(p, v) __atomic_op_8(p, =, v)
-
-#define atomic_add_rel_short(p, v) __atomic_op_16(p, +=, v)
-#define atomic_subtract_rel_short(p, v) __atomic_op_16(p, -=, v)
-#define atomic_set_rel_short(p, v) __atomic_op_16(p, |=, v)
-#define atomic_clear_rel_short(p, v) __atomic_op_16(p, &=, ~v)
-#define atomic_store_rel_short(p, v) __atomic_op_16(p, =, v)
-
-#define atomic_add_rel_int(p, v) __atomic_op_32(p, +=, v)
-#define atomic_subtract_rel_int(p, v) __atomic_op_32(p, -=, v)
-#define atomic_set_rel_int(p, v) __atomic_op_32(p, |=, v)
-#define atomic_clear_rel_int(p, v) __atomic_op_32(p, &=, ~v)
-#define atomic_store_rel_int(p, v) __atomic_op_32(p, =, v)
-#define atomic_cmpset_rel_int(p, e, s) __atomic_cas_32(p, e, s)
-
-#define atomic_add_rel_long(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_rel_long(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_rel_long(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_rel_long(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_rel_long(p, v) __atomic_op_64(p, =, v)
-#define atomic_cmpset_rel_long(p, e, s) __atomic_cas_64(p, e, s)
+#define atomic_load_clear(p, sz) ({ \
+ itype(sz) e, r; \
+ for (e = *(volatile itype(sz) *)p;; e = r) { \
+ r = atomic_cas_ ## sz(p, e, 0); \
+ if (r == e) \
+ break; \
+ } \
+ e; \
+})
-#define atomic_add_rel_ptr(p, v) __atomic_op_64(p, +=, v)
-#define atomic_subtract_rel_ptr(p, v) __atomic_op_64(p, -=, v)
-#define atomic_set_rel_ptr(p, v) __atomic_op_64(p, |=, v)
-#define atomic_clear_rel_ptr(p, v) __atomic_op_64(p, &=, ~v)
-#define atomic_store_rel_ptr(p, v) __atomic_op_64(p, =, v)
-#define atomic_cmpset_rel_ptr(p, e, s) __atomic_cas_64(p, e, s)
+#define atomic_store_rel(p, v, sz) do { \
+ itype(sz) e, r; \
+ membar(LoadStore | StoreStore); \
+ for (e = *(volatile itype(sz) *)p;; e = r) { \
+ r = atomic_cas_ ## sz(p, e, v); \
+ if (r == e) \
+ break; \
+ } \
+} while (0)
+
+#define ATOMIC_GEN(name, ptype, vtype, atype, sz) \
+ \
+static __inline void \
+atomic_add_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op(p, +, v, sz); \
+} \
+static __inline void \
+atomic_add_acq_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op_acq(p, +, v, sz); \
+} \
+static __inline void \
+atomic_add_rel_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op_rel(p, +, v, sz); \
+} \
+ \
+static __inline void \
+atomic_clear_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op(p, &, ~v, sz); \
+} \
+static __inline void \
+atomic_clear_acq_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op_acq(p, &, ~v, sz); \
+} \
+static __inline void \
+atomic_clear_rel_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op_rel(p, &, ~v, sz); \
+} \
+ \
+static __inline int \
+atomic_cmpset_ ## name(volatile ptype p, vtype e, vtype s) \
+{ \
+ return (((vtype)atomic_cas(p, e, s, sz)) == e); \
+} \
+static __inline int \
+atomic_cmpset_acq_ ## name(volatile ptype p, vtype e, vtype s) \
+{ \
+ return (((vtype)atomic_cas_acq(p, e, s, sz)) == e); \
+} \
+static __inline int \
+atomic_cmpset_rel_ ## name(volatile ptype p, vtype e, vtype s) \
+{ \
+ return (((vtype)atomic_cas_rel(p, e, s, sz)) == e); \
+} \
+ \
+static __inline vtype \
+atomic_load_acq_ ## name(volatile ptype p) \
+{ \
+ return ((vtype)atomic_cas_acq(p, 0, 0, sz)); \
+} \
+ \
+static __inline vtype \
+atomic_readandclear_ ## name(volatile ptype p) \
+{ \
+ return ((vtype)atomic_load_clear(p, sz)); \
+} \
+ \
+static __inline void \
+atomic_set_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op(p, |, v, sz); \
+} \
+static __inline void \
+atomic_set_acq_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op_acq(p, |, v, sz); \
+} \
+static __inline void \
+atomic_set_rel_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op_rel(p, |, v, sz); \
+} \
+ \
+static __inline void \
+atomic_subtract_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op(p, -, v, sz); \
+} \
+static __inline void \
+atomic_subtract_acq_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op_acq(p, -, v, sz); \
+} \
+static __inline void \
+atomic_subtract_rel_ ## name(volatile ptype p, atype v) \
+{ \
+ atomic_op_rel(p, -, v, sz); \
+} \
+ \
+static __inline void \
+atomic_store_rel_ ## name(volatile ptype p, vtype v) \
+{ \
+ atomic_store_rel(p, v, sz); \
+}
+
+ATOMIC_GEN(int, int *, int, int, 32);
+ATOMIC_GEN(32, int *, int, int, 32);
+
+ATOMIC_GEN(long, long *, long, long, 64);
+ATOMIC_GEN(64, long *, long, long, 64);
+
+ATOMIC_GEN(ptr, void *, void *, uintptr_t, 64);
+
+#undef ATOMIC_GEN
+#undef atomic_cas_32
+#undef atomic_cas_64
+#undef atomic_cas
+#undef atomic_cas_acq
+#undef atomic_cas_rel
+#undef atomic_op
+#undef atomic_op_acq
+#undef atomic_op_rel
+#undef atomic_load_acq
+#undef atomic_store_rel
+#undef atomic_load_clear
#endif /* !_MACHINE_ATOMIC_H_ */
diff --git a/sys/sparc64/include/bootinfo.h b/sys/sparc64/include/bootinfo.h
new file mode 100644
index 0000000..7da2226
--- /dev/null
+++ b/sys/sparc64/include/bootinfo.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BOOTINFO_H_
+#define _MACHINE_BOOTINFO_H_
+
+/*
+ * Increment the version number when you break binary compatibiity.
+ */
+#define BOOTINFO_VERSION 1
+
+struct bootinfo {
+ u_int bi_version;
+ u_long bi_end;
+ u_long bi_kpa;
+ u_long bi_metadata;
+};
+
+#endif /* !_MACHINE_BOOTINFO_H_ */
diff --git a/sys/sparc64/include/cpu.h b/sys/sparc64/include/cpu.h
index a6c206a..83d34c3 100644
--- a/sys/sparc64/include/cpu.h
+++ b/sys/sparc64/include/cpu.h
@@ -32,10 +32,13 @@
#include <machine/frame.h>
#define CLKF_USERMODE(cfp) (0)
-#define CLKF_PC(cfp) (0)
+#define CLKF_PC(cfp) ((cfp)->cf_tf.tf_tpc)
-#define cpu_getstack(p) (0)
-#define cpu_setstack(p, sp) (0)
+#define TRAPF_PC(tfp) ((tfp)->tf_tpc)
+#define TRAPF_USERMODE(tfp) (0)
+
+#define cpu_getstack(p) ((p)->p_frame->tf_sp)
+#define cpu_setstack(p, sp) ((p)->p_frame->tf_sp = (sp))
/*
* Arrange to handle pending profiling ticks before returning to user mode.
@@ -66,11 +69,13 @@
{ "wall_cmos_clock", CTLTYPE_INT }, \
}
+void fork_trampoline(void);
+
static __inline u_int64_t
get_cyclecount(void)
{
- static u_long now;
- return (++now);
+
+ return (rd(tick));
}
#endif /* !_MACHINE_CPU_H_ */
diff --git a/sys/sparc64/include/cpufunc.h b/sys/sparc64/include/cpufunc.h
index 57cf2ab..a3e5a6b 100644
--- a/sys/sparc64/include/cpufunc.h
+++ b/sys/sparc64/include/cpufunc.h
@@ -29,15 +29,139 @@
#ifndef _MACHINE_CPUFUNC_H_
#define _MACHINE_CPUFUNC_H_
+#include <machine/asi.h>
+#include <machine/pstate.h>
+
+/*
+ * membar operand macros for use in other macros when # is a special
+ * character. Keep these in sync with what the hardware expects.
+ */
+#define C_Lookaside (0)
+#define C_MemIssue (1)
+#define C_Sync (2)
+#define M_LoadLoad (0)
+#define M_StoreLoad (1)
+#define M_LoadStore (2)
+#define M_StoreStore (3)
+
+#define CMASK_SHIFT (4)
+#define MMASK_SHIFT (0)
+
+#define CMASK_GEN(bit) ((1 << (bit)) << CMASK_SHIFT)
+#define MMASK_GEN(bit) ((1 << (bit)) << MMASK_SHIFT)
+
+#define Lookaside CMASK_GEN(C_Lookaside)
+#define MemIssue CMASK_GEN(C_MemIssue)
+#define Sync CMASK_GEN(C_Sync)
+#define LoadLoad MMASK_GEN(M_LoadLoad)
+#define StoreLoad MMASK_GEN(M_StoreLoad)
+#define LoadStore MMASK_GEN(M_LoadStore)
+#define StoreStore MMASK_GEN(M_StoreStore)
+
+#define casa(rs1, rs2, rd, asi) ({ \
+ u_int __rd = (u_int32_t)(rd); \
+ __asm __volatile("casa [%1] %2, %3, %0" \
+ : "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2)); \
+ __rd; \
+})
+
+#define casxa(rs1, rs2, rd, asi) ({ \
+ u_long __rd = (u_int64_t)(rd); \
+ __asm __volatile("casxa [%1] %2, %3, %0" \
+ : "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2)); \
+ __rd; \
+})
+
+#define flush(va) do { \
+ __asm __volatile("flush %0" : : "r" (va)); \
+} while (0)
+
+#define ldxa(va, asi) ({ \
+ u_long __r; \
+ __asm __volatile("ldxa [%1] %2, %0" \
+ : "=r" (__r) : "r" (va), "n" (asi)); \
+ __r; \
+})
+
+#define stxa(va, asi, val) do { \
+ __asm __volatile("stxa %0, [%1] %2" \
+ : : "r" (val), "r" (va), "n" (asi)); \
+} while (0)
+
+#define membar(mask) do { \
+ __asm __volatile("membar %0" : : "n" (mask)); \
+} while (0)
+
+#define rd(name) ({ \
+ u_int64_t __sr; \
+ __asm __volatile("rd %%" #name ", %0" : "=r" (__sr) :); \
+ __sr; \
+})
+
+#define wr(name, val, xor) do { \
+ __asm __volatile("wr %0, %1, %%" #name \
+ : : "r" (val), "rI" (xor)); \
+} while (0)
+
+#define rdpr(name) ({ \
+ u_int64_t __pr; \
+ __asm __volatile("rdpr %%" #name", %0" : "=r" (__pr) :); \
+ __pr; \
+})
+
+#define wrpr(name, val, xor) do { \
+ __asm __volatile("wrpr %0, %1, %%" #name \
+ : : "r" (val), "rI" (xor)); \
+} while (0)
+
+static __inline void
+breakpoint(void)
+{
+ __asm __volatile("ta 1");
+}
+
+/*
+ * XXX use %pil for these.
+ */
static __inline critical_t
critical_enter(void)
{
- return (0);
+ critical_t ie;
+
+ ie = rdpr(pstate);
+ if (ie & PSTATE_IE)
+ wrpr(pstate, ie, PSTATE_IE);
+ return (ie);
}
static __inline void
critical_exit(critical_t ie)
{
+
+ if (ie & PSTATE_IE)
+ wrpr(pstate, ie, 0);
+}
+
+#if 0
+#define HAVE_INLINE_FFS
+/*
+ * See page 202 of the SPARC v9 Architecture Manual.
+ */
+static __inline int
+ffs(int mask)
+{
+ int result;
+ int neg;
+ int tmp;
+
+ __asm __volatile(
+ " neg %3, %1 ; "
+ " xnor %3, %1, %2 ; "
+ " popc %2, %0 ; "
+ " movrz %3, %%g0, %0 ; "
+ : "=r" (result), "=r" (neg), "=r" (tmp) : "r" (mask));
+ return (result);
}
+#endif
#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/sparc64/include/db_machdep.h b/sys/sparc64/include/db_machdep.h
index 5d091ee..0e4954e 100644
--- a/sys/sparc64/include/db_machdep.h
+++ b/sys/sparc64/include/db_machdep.h
@@ -29,4 +29,48 @@
#ifndef _MACHINE_DB_MACHDEP_H_
#define _MACHINE_DB_MACHDEP_H_
+#include <machine/frame.h>
+#include <machine/trap.h>
+
+#define BYTE_MSF (1)
+
+typedef vm_offset_t db_addr_t;
+typedef u_long db_expr_t;
+
+struct db_regs {
+ u_long dr_global[8];
+};
+
+typedef struct trapframe db_regs_t;
+extern db_regs_t ddb_regs;
+#define DDB_REGS (&ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->tf_tpc)
+
+#define BKPT_INST (0)
+#define BKPT_SIZE (4)
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK do { \
+ ddb_regs.tf_tpc = ddb_regs.tf_tnpc; \
+ ddb_regs.tf_tnpc += BKPT_SIZE; \
+} while (0);
+
+#define db_clear_single_step(regs)
+#define db_set_single_step(regs)
+
+#define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT)
+#define IS_WATCHPOINT_TRAP(type, code) (0)
+
+#define inst_trap_return(ins) (0)
+#define inst_return(ins) (0)
+#define inst_call(ins) (0)
+#define inst_load(ins) (0)
+#define inst_store(ins) (0)
+
+#define DB_SMALL_VALUE_MAX (0x7fffffff)
+#define DB_SMALL_VALUE_MIN (-0x40001)
+
+#define DB_ELFSIZE 64
+
#endif /* !_MACHINE_DB_MACHDEP_H_ */
diff --git a/sys/sparc64/include/frame.h b/sys/sparc64/include/frame.h
index da06e3c..76c58de 100644
--- a/sys/sparc64/include/frame.h
+++ b/sys/sparc64/include/frame.h
@@ -29,10 +29,47 @@
#ifndef _MACHINE_FRAME_H_
#define _MACHINE_FRAME_H_
+#define SPOFF 2047
+
+struct trapframe {
+ u_long tf_global[8];
+ u_long tf_out[8];
+ u_long tf_tnpc;
+ u_long tf_tpc;
+ u_long tf_tstate;
+ u_long tf_type;
+ void *tf_arg;
+};
+#define tf_sp tf_out[6]
+
+struct mmuframe {
+ u_long mf_sfar;
+ u_long mf_sfsr;
+ u_long mf_tar;
+};
+
+struct kdbframe {
+ u_long kf_fp;
+ u_long kf_cfp;
+ u_long kf_canrestore;
+ u_long kf_cansave;
+ u_long kf_cleanwin;
+ u_long kf_cwp;
+ u_long kf_otherwin;
+};
+
struct clockframe {
+ struct trapframe cf_tf;
};
-struct trapframe {
+struct frame {
+ u_long f_local[8];
+ u_long f_in[8];
+ u_long f_pad[8];
};
+#define f_fp f_in[6]
+#define f_pc f_in[7]
+
+int kdb_trap(struct trapframe *tf);
#endif /* !_MACHINE_FRAME_H_ */
diff --git a/sys/sparc64/include/globaldata.h b/sys/sparc64/include/globaldata.h
index 38b351e..7af3375 100644
--- a/sys/sparc64/include/globaldata.h
+++ b/sys/sparc64/include/globaldata.h
@@ -37,7 +37,7 @@ struct globaldata {
SLIST_ENTRY(globaldata) gd_allcpu;
struct pcb *gd_curpcb;
struct proc *gd_curproc;
- struct proc *gd_fpproc;
+ struct proc *gd_fpcurproc;
struct proc *gd_idleproc;
u_int gd_cpuid;
u_int gd_other_cpus;
diff --git a/sys/sparc64/include/mutex.h b/sys/sparc64/include/mutex.h
index 9c6bc1a..dd337be 100644
--- a/sys/sparc64/include/mutex.h
+++ b/sys/sparc64/include/mutex.h
@@ -32,6 +32,8 @@
static __inline void
mtx_intr_enable(struct mtx *mtx)
{
+
+ mtx->mtx_savecrit |= PSTATE_IE;
}
#endif /* !_MACHINE_MUTEX_H_ */
diff --git a/sys/sparc64/include/param.h b/sys/sparc64/include/param.h
index c164134..a54bb1b 100644
--- a/sys/sparc64/include/param.h
+++ b/sys/sparc64/include/param.h
@@ -85,15 +85,25 @@
#define ALIGNBYTES _ALIGNBYTES
#define ALIGN(p) _ALIGN(p)
-#define PAGE_SHIFT 13 /* LOG2(PAGE_SIZE) */
-#define PAGE_SIZE (1<<PAGE_SHIFT) /* bytes/page */
-#define PAGE_MASK (PAGE_SIZE-1)
-#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
+#define PAGE_SHIFT_8K 13
+#define PAGE_SIZE_8K (1<<PAGE_SHIFT_8K)
+#define PAGE_MASK_8K (PAGE_SIZE_8K-1)
-#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
-#define PDRSHIFT 22 /* LOG2(NBPDR) */
-#define NBPDR (1<<PDRSHIFT) /* bytes/page dir */
-#define PDRMASK (NBPDR-1)
+#define PAGE_SHIFT_64K 16
+#define PAGE_SIZE_64K (1<<PAGE_SHIFT_64K)
+#define PAGE_MASK_64K (PAGE_SIZE_64K-1)
+
+#define PAGE_SHIFT_512K 19
+#define PAGE_SIZE_512K (1<<PAGE_SHIFT_512K)
+#define PAGE_MASK_512K (PAGE_SIZE_512K-1)
+
+#define PAGE_SHIFT_4M 22
+#define PAGE_SIZE_4M (1<<PAGE_SHIFT_4M)
+#define PAGE_MASK_4M (PAGE_SIZE_4M-1)
+
+#define PAGE_SHIFT PAGE_SHIFT_8K /* LOG2(PAGE_SIZE) */
+#define PAGE_SIZE PAGE_SIZE_8K /* bytes/page */
+#define PAGE_MASK PAGE_MASK_8K
#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
#define DEV_BSIZE (1<<DEV_BSHIFT)
@@ -126,32 +136,24 @@
*/
/* clicks to bytes */
-#define ctob(x) ((x)<<PAGE_SHIFT)
+#define ctob(x) ((unsigned long)(x)<<PAGE_SHIFT)
/* bytes to clicks */
-#define btoc(x) (((unsigned)(x)+PAGE_MASK)>>PAGE_SHIFT)
+#define btoc(x) (((unsigned long)(x)+PAGE_MASK)>>PAGE_SHIFT)
-/*
- * btodb() is messy and perhaps slow because `bytes' may be an off_t. We
- * want to shift an unsigned type to avoid sign extension and we don't
- * want to widen `bytes' unnecessarily. Assume that the result fits in
- * a daddr_t.
- */
+/* bytes to disk blocks */
#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \
- (sizeof (bytes) > sizeof(long) \
- ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \
- : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT))
+ (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)
+/* disk blocks to bytes */
#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \
- ((off_t)(db) << DEV_BSHIFT)
+ (off_t)((unsigned long)(db) << DEV_BSHIFT)
/*
* Mach derived conversion macros
*/
-#define trunc_page(x) ((unsigned long)(x) & ~PAGE_MASK)
#define round_page(x) (((unsigned long)(x) + PAGE_MASK) & ~PAGE_MASK)
-#define trunc_4mpage(x) ((unsigned long)(x) & ~PDRMASK)
-#define round_4mpage(x) ((((unsigned long)(x)) + PDRMASK) & ~PDRMASK)
+#define trunc_page(x) ((unsigned long)(x) & ~PAGE_MASK)
#define atop(x) ((unsigned long)(x) >> PAGE_SHIFT)
#define ptoa(x) ((unsigned long)(x) << PAGE_SHIFT)
diff --git a/sys/sparc64/include/pcb.h b/sys/sparc64/include/pcb.h
index 3a14b74..d5ad7eb 100644
--- a/sys/sparc64/include/pcb.h
+++ b/sys/sparc64/include/pcb.h
@@ -30,6 +30,8 @@
#define _MACHINE_PCB_H_
struct pcb {
+ u_long pcb_fp;
+ u_long pcb_pc;
caddr_t pcb_onfault;
};
@@ -37,7 +39,7 @@ struct md_coredump {
};
#ifdef _KERNEL
-void savectx(struct pcb *pcb);
+int savectx(struct pcb *pcb);
#endif
#endif /* !_MACHINE_PCB_H_ */
diff --git a/sys/sparc64/include/pcpu.h b/sys/sparc64/include/pcpu.h
index 38b351e..7af3375 100644
--- a/sys/sparc64/include/pcpu.h
+++ b/sys/sparc64/include/pcpu.h
@@ -37,7 +37,7 @@ struct globaldata {
SLIST_ENTRY(globaldata) gd_allcpu;
struct pcb *gd_curpcb;
struct proc *gd_curproc;
- struct proc *gd_fpproc;
+ struct proc *gd_fpcurproc;
struct proc *gd_idleproc;
u_int gd_cpuid;
u_int gd_other_cpus;
diff --git a/sys/sparc64/include/pmap.h b/sys/sparc64/include/pmap.h
index 8740007..1a57d0e 100644
--- a/sys/sparc64/include/pmap.h
+++ b/sys/sparc64/include/pmap.h
@@ -29,30 +29,36 @@
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
-struct md_page {
-};
+#include <sys/kobj.h>
+#include <machine/tte.h>
-struct pmap {
- struct pmap_statistics pm_stats;
-};
+#define PMAP_CONTEXT_MAX 8192
-typedef struct pmap *pmap_t;
+#define pmap_resident_count(pm) (pm->pm_stats.resident_count)
-extern struct pmap __kernel_pmap;
-#define kernel_pmap (&__kernel_pmap)
+typedef struct pmap *pmap_t;
-#define pmap_resident_count(pm) (pm->pm_stats.resident_count)
+struct md_page {
+};
-#ifdef _KERNEL
+struct pmap {
+ struct stte pm_stte;
+ u_int pm_active;
+ u_int pm_context;
+ u_int pm_count;
+ struct pmap_statistics pm_stats;
+};
+void pmap_bootstrap(vm_offset_t skpa, vm_offset_t ekva);
vm_offset_t pmap_kextract(vm_offset_t va);
extern vm_offset_t avail_start;
extern vm_offset_t avail_end;
+extern vm_offset_t clean_eva;
+extern vm_offset_t clean_sva;
+extern struct pmap *kernel_pmap;
extern vm_offset_t phys_avail[];
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
-#endif
-
#endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/sparc64/include/proc.h b/sys/sparc64/include/proc.h
index e3f6e4d..8537f49 100644
--- a/sys/sparc64/include/proc.h
+++ b/sys/sparc64/include/proc.h
@@ -30,6 +30,7 @@
#define _MACHINE_PROC_H_
#include <machine/globals.h>
+#include <machine/tte.h>
struct mdproc {
};
diff --git a/sys/sparc64/include/pstate.h b/sys/sparc64/include/pstate.h
new file mode 100644
index 0000000..8ff55f3
--- /dev/null
+++ b/sys/sparc64/include/pstate.h
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PSTATE_H_
+#define _MACHINE_PSTATE_H_
+
+#define PSTATE_AG (1<<0)
+#define PSTATE_IE (1<<1)
+#define PSTATE_PRIV (1<<2)
+#define PSTATE_AM (1<<3)
+#define PSTATE_PEF (1<<4)
+#define PSTATE_RED (1<<5)
+
+#define PSTATE_MM_SHIFT (6)
+#define PSTATE_MM_MASK ((1<<PSTATE_MM_SHIFT)|(1<<(PSTATE_MM_SHIFT+1)))
+#define PSTATE_MM_TSO (0<<PSTATE_MM_SHIFT)
+#define PSTATE_MM_PSO (1<<PSTATE_MM_SHIFT)
+#define PSTATE_MM_RMO (2<<PSTATE_MM_SHIFT)
+
+#define PSTATE_TLE (1<<8)
+#define PSTATE_CLE (1<<9)
+#define PSTATE_MG (1<<10)
+#define PSTATE_IG (1<<11)
+
+#define VER_MANUF_SHIFT (48)
+#define VER_IMPL_SHIFT (32)
+#define VER_MASK_SHIFT (24)
+#define VER_MAXTL_SHIFT (8)
+#define VER_MAXWIN_SHIFT (0)
+
+#define VER_MANUF_SIZE (16)
+#define VER_IMPL_SIZE (16)
+#define VER_MASK_SIZE (8)
+#define VER_MAXTL_SIZE (8)
+#define VER_MAXWIN_SIZE (5)
+
+#define VER_MANUF_MASK (((1L<<VER_MANUF_SIZE)-1)<<VER_MANUF_SHIFT)
+#define VER_IMPL_MASK (((1L<<VER_IMPL_SIZE)-1)<<VER_IMPL_SHIFT)
+#define VER_MASK_MASK (((1L<<VER_MASK_SIZE)-1)<<VER_MASK_SHIFT)
+#define VER_MAXTL_MASK (((1L<<VER_MAXTL_SIZE)-1)<<VER_MAXTL_SHIFT)
+#define VER_MAXWIN_MASK (((1L<<VER_MAXWIN_SIZE)-1)<<VER_MAXWIN_SHIFT)
+
+#define VER_MANUF(ver) \
+ (((ver) & VER_MANUF_MASK) >> VER_MANUF_SHIFT)
+#define VER_IMPL(ver) \
+ (((ver) & VER_IMPL_MASK) >> VER_IMPL_SHIFT)
+#define VER_MASK(ver) \
+ (((ver) & VER_MASK_MASK) >> VER_MASK_SHIFT)
+#define VER_MAXTL(ver) \
+ (((ver) & VER_MAXTL_MASK) >> VER_MAXTL_SHIFT)
+#define VER_MAXWIN(ver) \
+ (((ver) & VER_MAXWIN_MASK) >> VER_MAXWIN_SHIFT)
+
+#endif /* !_MACHINE_PSTATE_H_ */
diff --git a/sys/sparc64/include/pv.h b/sys/sparc64/include/pv.h
new file mode 100644
index 0000000..e36d3f8
--- /dev/null
+++ b/sys/sparc64/include/pv.h
@@ -0,0 +1,175 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PV_H_
+#define _MACHINE_PV_H_
+
+#define PV_LOCK()
+#define PV_UNLOCK()
+
+#define ST_TTE offsetof(struct stte, st_tte)
+#define ST_NEXT offsetof(struct stte, st_next)
+#define ST_PREV offsetof(struct stte, st_prev)
+
+#define TTE_DATA offsetof(struct tte, tte_data)
+#define TTE_TAG offsetof(struct tte, tte_tag)
+
+#define PV_OFF(pa) ((vm_offset_t)(pa) - avail_start)
+#define PV_INDEX(pa) (PV_OFF(pa) >> PAGE_SHIFT)
+#define PV_SHIFT (3)
+
+#define casxp(pa, exp, src) \
+ casxa((vm_offset_t *)pa, exp, src, ASI_PHYS_USE_EC)
+#define ldxp(pa) ldxa(pa, ASI_PHYS_USE_EC)
+#define stxp(pa, val) stxa(pa, ASI_PHYS_USE_EC, val)
+
+extern vm_offset_t pv_table;
+extern u_long pv_generation;
+
+static __inline vm_offset_t
+pv_lookup(vm_offset_t pa)
+{
+ return (pv_table + (PV_INDEX(pa) << PV_SHIFT));
+}
+
+static __inline vm_offset_t
+pv_get_first(vm_offset_t pvh)
+{
+ return (ldxp(pvh));
+}
+
+static __inline vm_offset_t
+pv_get_next(vm_offset_t pstp)
+{
+ return (ldxp(pstp + ST_NEXT));
+}
+
+static __inline vm_offset_t
+pv_get_prev(vm_offset_t pstp)
+{
+ return (ldxp(pstp + ST_PREV));
+}
+
+static __inline u_long
+pv_get_tte_data(vm_offset_t pstp)
+{
+ return (ldxp(pstp + ST_TTE + TTE_DATA));
+}
+
+static __inline u_long
+pv_get_tte_tag(vm_offset_t pstp)
+{
+ return (ldxp(pstp + ST_TTE + TTE_TAG));
+}
+
+#define pv_get_tte(pstp) ({ \
+ struct tte __tte; \
+ __tte.tte_tag = pv_get_tte_tag(pstp); \
+ __tte.tte_data = pv_get_tte_data(pstp); \
+ __tte; \
+})
+
+static __inline void
+pv_set_first(vm_offset_t pvh, vm_offset_t first)
+{
+ stxp(pvh, first);
+}
+
+static __inline void
+pv_set_next(vm_offset_t pstp, vm_offset_t next)
+{
+ stxp(pstp + ST_NEXT, next);
+}
+
+static __inline void
+pv_set_prev(vm_offset_t pstp, vm_offset_t prev)
+{
+ stxp(pstp + ST_PREV, prev);
+}
+
+static __inline void
+pv_remove_phys(vm_offset_t pstp)
+{
+ vm_offset_t pv_next;
+ vm_offset_t pv_prev;
+
+ pv_next = pv_get_next(pstp);
+ pv_prev = pv_get_prev(pstp);
+ if (pv_next != 0)
+ pv_set_prev(pv_next, pv_prev);
+ stxp(pv_prev, pv_next);
+}
+
+static __inline void
+pv_bit_clear(vm_offset_t pstp, u_long bits)
+{
+ vm_offset_t dp;
+ vm_offset_t d1;
+ vm_offset_t d2;
+ vm_offset_t d3;
+
+ dp = pstp + ST_TTE + TTE_DATA;
+ for (d1 = ldxp(dp);; d1 = d3) {
+ d2 = d1 & ~bits;
+ d3 = casxp(dp, d1, d2);
+ if (d1 == d3)
+ break;
+ }
+}
+
+static __inline void
+pv_bit_set(vm_offset_t pstp, u_long bits)
+{
+ vm_offset_t dp;
+ vm_offset_t d1;
+ vm_offset_t d2;
+ vm_offset_t d3;
+
+ dp = pstp + ST_TTE + TTE_DATA;
+ for (d1 = ldxp(dp);; d1 = d3) {
+ d2 = d1 | bits;
+ d3 = casxp(dp, d1, d2);
+ if (d1 == d3)
+ break;
+ }
+}
+
+static __inline int
+pv_bit_test(vm_offset_t pstp, u_long bits)
+{
+ vm_offset_t dp;
+
+ dp = pstp + ST_TTE + TTE_DATA;
+ return ((casxp(dp, 0, 0) & bits) != 0);
+}
+
+void pv_dump(vm_offset_t pvh);
+void pv_insert(pmap_t pm, vm_offset_t pa, vm_offset_t va, struct stte *stp);
+void pv_remove_virt(struct stte *stp);
+
+#endif /* !_MACHINE_PV_H_ */
diff --git a/sys/sparc64/include/resource.h b/sys/sparc64/include/resource.h
index 28fcc98..783a1c4 100644
--- a/sys/sparc64/include/resource.h
+++ b/sys/sparc64/include/resource.h
@@ -1,4 +1,3 @@
-/* $FreeBSD$ */
/*
* Copyright 1998 Massachusetts Institute of Technology
*
@@ -26,6 +25,8 @@
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
+ *
+ * $FreeBSD$
*/
#ifndef _MACHINE_RESOURCE_H_
diff --git a/sys/sparc64/include/setjmp.h b/sys/sparc64/include/setjmp.h
new file mode 100644
index 0000000..a6a7ffb
--- /dev/null
+++ b/sys/sparc64/include/setjmp.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SETJMP_H_
+#define _MACHINE_SETJMP_H_
+
+#define _JBLEN 3
+
+#define _JB_FP 0
+#define _JB_PC 1
+#define _JB_SP 2
+
+/*
+ * jmp_buf and sigjmp_buf are encapsulated in different structs to force
+ * compile-time diagnostics for mismatches. The structs are the same
+ * internally to avoid some run-time errors for mismatches.
+ */
+#ifndef _ANSI_SOURCE
+struct _sigjmp_buf {
+ long _sjb[_JBLEN + 1];
+};
+typedef struct _sigjmp_buf sigjmp_buf[1];
+#endif
+
+struct _jmp_buf {
+ long _jb[_JBLEN + 1];
+};
+typedef struct _jmp_buf jmp_buf[1];
+
+#endif /* !_MACHINE_SETJMP_H_ */
diff --git a/sys/sparc64/include/stdarg.h b/sys/sparc64/include/stdarg.h
index a4b2550..2f0fd8c 100644
--- a/sys/sparc64/include/stdarg.h
+++ b/sys/sparc64/include/stdarg.h
@@ -33,6 +33,7 @@
* SUCH DAMAGE.
*
* from: @(#)stdarg.h 8.2 (Berkeley) 9/27/93
+ * from: NetBSD: stdarg.h,v 1.11 2000/07/23 21:36:56 mycroft Exp
* $FreeBSD$
*/
diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h
new file mode 100644
index 0000000..d723032
--- /dev/null
+++ b/sys/sparc64/include/tlb.h
@@ -0,0 +1,149 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_TLB_H_
+#define _MACHINE_TLB_H_
+
+#define TLB_SLOT_COUNT 64
+
+#define TLB_SLOT_TSB_KERNEL_MIN 60 /* XXX */
+#define TLB_SLOT_TSB_USER_PRIMARY 61
+#define TLB_SLOT_TSB_USER_SECONDARY 62
+#define TLB_SLOT_KERNEL 63
+
+#define TLB_DAR_SLOT_SHIFT (3)
+#define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT)
+
+#define TLB_TAR_VA(va) ((va) & ~PAGE_MASK)
+#define TLB_TAR_CTX(ctx) (ctx)
+
+#define TLB_DEMAP_ID_SHIFT (4)
+#define TLB_DEMAP_ID_PRIMARY (0)
+#define TLB_DEMAP_ID_SECONDARY (1)
+#define TLB_DEMAP_ID_NUCLEUS (2)
+
+#define TLB_DEMAP_TYPE_SHIFT (6)
+#define TLB_DEMAP_TYPE_PAGE (0)
+#define TLB_DEMAP_TYPE_CONTEXT (1)
+
+#define TLB_DEMAP_VA(va) ((va) & ~PAGE_MASK)
+#define TLB_DEMAP_ID(id) ((id) << TLB_DEMAP_ID_SHIFT)
+#define TLB_DEMAP_TYPE(type) ((type) << TLB_DEMAP_TYPE_SHIFT)
+
+#define TLB_DEMAP_PAGE (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_PAGE))
+#define TLB_DEMAP_CONTEXT (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_CONTEXT))
+
+#define TLB_DEMAP_PRIMARY (TLB_DEMAP_ID(TLB_DEMAP_ID_PRIMARY))
+#define TLB_DEMAP_SECONDARY (TLB_DEMAP_ID(TLB_DEMAP_ID_SECONDARY))
+#define TLB_DEMAP_NUCLEUS (TLB_DEMAP_ID(TLB_DEMAP_ID_NUCLEUS))
+
+#define TLB_CTX_KERNEL (0)
+
+#define TLB_DTLB (1 << 0)
+#define TLB_ITLB (1 << 1)
+
+static __inline void
+tlb_dtlb_page_demap(u_int ctx, vm_offset_t va)
+{
+ if (ctx == TLB_CTX_KERNEL) {
+ stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
+ ASI_DMMU_DEMAP, 0);
+ membar(Sync);
+ } else
+ TODO;
+}
+
+static __inline void
+tlb_dtlb_store(vm_offset_t va, struct tte tte)
+{
+ stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(0));
+ stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data);
+ membar(Sync);
+}
+
+static __inline void
+tlb_dtlb_store_slot(vm_offset_t va, struct tte tte, int slot)
+{
+ stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(0));
+ stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data);
+ membar(Sync);
+}
+
+static __inline void
+tlb_itlb_page_demap(u_int ctx, vm_offset_t va)
+{
+ if (ctx == TLB_CTX_KERNEL) {
+ stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
+ ASI_IMMU_DEMAP, 0);
+ flush(KERNBASE);
+ } else
+ TODO;
+}
+
+static __inline void
+tlb_itlb_store(vm_offset_t va, struct tte tte)
+{
+ TODO;
+}
+
+static __inline void
+tlb_itlb_store_slot(vm_offset_t va, struct tte tte, int slot)
+{
+ stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(0));
+ stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data);
+ flush(va);
+}
+
+static __inline void
+tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va)
+{
+ if (tlb & TLB_DTLB)
+ tlb_dtlb_page_demap(ctx, va);
+ if (tlb & TLB_ITLB)
+ tlb_itlb_page_demap(ctx, va);
+}
+
+static __inline void
+tlb_store(u_int tlb, vm_offset_t va, struct tte tte)
+{
+ if (tlb & TLB_DTLB)
+ tlb_dtlb_store(va, tte);
+ if (tlb & TLB_ITLB)
+ tlb_itlb_store(va, tte);
+}
+
+static __inline void
+tlb_store_slot(u_int tlb, vm_offset_t va, struct tte tte, int slot)
+{
+ if (tlb & TLB_DTLB)
+ tlb_dtlb_store_slot(va, tte, slot);
+ if (tlb & TLB_ITLB)
+ tlb_itlb_store_slot(va, tte, slot);
+}
+
+#endif /* !_MACHINE_TLB_H_ */
diff --git a/sys/sparc64/include/trap.h b/sys/sparc64/include/trap.h
new file mode 100644
index 0000000..4773592
--- /dev/null
+++ b/sys/sparc64/include/trap.h
@@ -0,0 +1,70 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_TRAP_H_
+#define _MACHINE_TRAP_H_
+
+#define T_RESERVED 0x0
+#define T_POWER_ON 0x1
+#define T_WATCHDOG 0x2
+#define T_RESET_EXT 0x3
+#define T_RESET_SOFT 0x4
+#define T_RED_STATE 0x5
+#define T_INSN_EXCPTN 0x6
+#define T_INSN_ERROR 0x7
+#define T_INSN_ILLEGAL 0x8
+#define T_PRIV_OPCODE 0x9
+#define T_FP_DISABLED 0xa
+#define T_FP_IEEE 0xb
+#define T_FP_OTHER 0xc
+#define T_TAG_OVFLW 0xd
+#define T_DIVIDE 0xe
+#define T_DATA_EXCPTN 0xf
+#define T_DATA_ERROR 0x10
+#define T_ALIGN 0x11
+#define T_ALIGN_LDDF 0x12
+#define T_ALIGN_STDF 0x13
+#define T_PRIV_ACTION 0x14
+#define T_INTERRUPT 0x15
+#define T_WATCH_PHYS 0x16
+#define T_WATCH_VIRT 0x17
+#define T_ECC 0x18
+#define T_IMMU_MISS 0x19
+#define T_DMMU_MISS 0x1a
+#define T_DMMU_PROT 0x1b
+#define T_SPILL 0x1c
+#define T_FILL 0x1d
+#define T_BREAKPOINT 0x1e
+
+#define T_KERNEL 0x20
+
+#ifndef LOCORE
+extern const char *trap_msg[];
+#endif
+
+#endif /* !_MACHINE_TRAP_H_ */
diff --git a/sys/sparc64/include/tsb.h b/sys/sparc64/include/tsb.h
new file mode 100644
index 0000000..5bc46cf
--- /dev/null
+++ b/sys/sparc64/include/tsb.h
@@ -0,0 +1,220 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: BSDI: pmap.v9.h,v 1.10.2.6 1999/08/23 22:18:44 cp Exp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_TSB_H_
+#define _MACHINE_TSB_H_
+
+#define TSB_KERNEL_MIN_ADDRESS (0x6e000000000)
+#define TSB_USER_MIN_ADDRESS (0x6f000000000)
+
+#define TSB_MASK_WIDTH (6)
+
+#define TSB_PRIMARY_BUCKET_SHIFT (2)
+#define TSB_PRIMARY_BUCKET_SIZE (1 << TSB_PRIMARY_BUCKET_SHIFT)
+#define TSB_PRIMARY_BUCKET_MASK (TSB_PRIMARY_BUCKET_SIZE - 1)
+#define TSB_SECONDARY_BUCKET_SHIFT (3)
+#define TSB_SECONDARY_BUCKET_SIZE (1 << TSB_SECONDARY_BUCKET_SHIFT)
+#define TSB_SECONDARY_BUCKET_MASK (TSB_SECONDARY_BUCKET_SIZE - 1)
+
+#define TSB_SECONDARY_STTE_SHIFT \
+ (STTE_SHIFT + TSB_SECONDARY_BUCKET_SHIFT)
+#define TSB_SECONDARY_STTE_MASK (1 << TSB_SECONDARY_STTE_SHIFT)
+
+#define TSB_LEVEL1_BUCKET_MASK \
+ ((TSB_SECONDARY_BUCKET_MASK & ~TSB_PRIMARY_BUCKET_MASK) << \
+ (PAGE_SHIFT - TSB_PRIMARY_BUCKET_SHIFT))
+#define TSB_LEVEL1_BUCKET_SHIFT \
+ (TSB_BUCKET_SPREAD_SHIFT + \
+ (TSB_SECONDARY_BUCKET_SHIFT - TSB_PRIMARY_BUCKET_SHIFT))
+
+#define TSB_BUCKET_SPREAD_SHIFT (2)
+
+#define TSB_DEPTH (7)
+
+#define TSB_KERNEL_PAGES (1)
+#define TSB_KERNEL_SIZE (TSB_KERNEL_PAGES * PAGE_SIZE_4M)
+#define TSB_KERNEL_MB (512)
+#define TSB_KERNEL_VM_RANGE (TSB_KERNEL_MB * (1 << 20))
+#define TSB_KERNEL_RANGE \
+ ((TSB_KERNEL_VM_RANGE / PAGE_SIZE) * sizeof (struct stte))
+#define TSB_KERNEL_MASK \
+ ((TSB_KERNEL_RANGE / sizeof (struct stte)) - 1)
+
+#define TSB_1M_STTE_SHIFT (21)
+#define TSB_1M_STTE_SIZE (1 << TSB_1M_SHIFT)
+
+#define TSB_SIZE_REG (7)
+
+extern vm_offset_t tsb_kernel_phys;
+
+static __inline struct stte *
+tsb_base(u_int level)
+{
+ vm_offset_t base;
+ size_t len;
+
+ if (level == 0)
+ base = TSB_USER_MIN_ADDRESS;
+ else {
+ len = 1UL << ((level * TSB_BUCKET_SPREAD_SHIFT) +
+ TSB_MASK_WIDTH + TSB_SECONDARY_BUCKET_SHIFT +
+ STTE_SHIFT);
+ base = TSB_USER_MIN_ADDRESS + len;
+ }
+ return (struct stte *)base;
+}
+
+static __inline u_long
+tsb_bucket_shift(u_int level)
+{
+ return (level == 0 ?
+ TSB_PRIMARY_BUCKET_SHIFT : TSB_SECONDARY_BUCKET_SHIFT);
+}
+
+static __inline u_long
+tsb_bucket_size(u_int level)
+{
+ return (1UL << tsb_bucket_shift(level));
+}
+
+static __inline u_long
+tsb_bucket_mask(u_int level)
+{
+ return (tsb_bucket_size(level) - 1);
+}
+
+static __inline u_long
+tsb_mask_width(u_int level)
+{
+ return ((level * TSB_BUCKET_SPREAD_SHIFT) + TSB_MASK_WIDTH);
+}
+
+static __inline u_long
+tsb_mask(u_int level)
+{
+ return ((1UL << tsb_mask_width(level)) - 1);
+}
+
+static __inline u_int
+tsb_tlb_slot(u_int level)
+{
+ return (level == 0 ?
+ TLB_SLOT_TSB_USER_PRIMARY : TLB_SLOT_TSB_USER_SECONDARY);
+}
+
+static __inline vm_offset_t
+tsb_stte_vtophys(pmap_t pm, struct stte *stp)
+{
+ vm_offset_t va;
+ u_long data;
+
+ va = (vm_offset_t)stp;
+ if (pm == kernel_pmap)
+ return (tsb_kernel_phys +
+ ((va - TSB_KERNEL_MIN_ADDRESS) << STTE_SHIFT));
+
+ if (trunc_page(va) == TSB_USER_MIN_ADDRESS)
+ data = pm->pm_stte.st_tte.tte_data;
+ else
+ data = ldxa(TLB_DAR_SLOT(tsb_tlb_slot(1)),
+ ASI_DTLB_DATA_ACCESS_REG);
+ return ((vm_offset_t)((TD_PA(data)) + (va & PAGE_MASK)));
+}
+
+static __inline struct stte *
+tsb_vpntobucket(vm_offset_t vpn, u_int level)
+{
+ return (tsb_base(level) +
+ ((vpn & tsb_mask(level)) << tsb_bucket_shift(level)));
+}
+
+static __inline struct stte *
+tsb_vtobucket(vm_offset_t va, u_int level)
+{
+ return (tsb_vpntobucket(va >> PAGE_SHIFT, level));
+}
+
+static __inline struct stte *
+tsb_kvpntostte(vm_offset_t vpn)
+{
+ struct stte *stp;
+
+ stp = (struct stte *)(TSB_KERNEL_MIN_ADDRESS +
+ ((vpn & TSB_KERNEL_MASK) << STTE_SHIFT));
+ return (stp);
+}
+
+static __inline struct stte *
+tsb_kvtostte(vm_offset_t va)
+{
+ return (tsb_kvpntostte(va >> PAGE_SHIFT));
+}
+
+static __inline void
+tsb_tte_enter_kernel(vm_offset_t va, struct tte tte)
+{
+ struct stte *stp;
+
+ stp = tsb_kvtostte(va);
+ stp->st_tte = tte;
+#if 1
+ pv_insert(kernel_pmap, TD_PA(tte.tte_data), va, stp);
+#endif
+}
+
+static __inline void
+tsb_remove_kernel(vm_offset_t va)
+{
+ struct stte *stp;
+
+ stp = tsb_kvtostte(va);
+ tte_invalidate(&stp->st_tte);
+#if 1
+ pv_remove_virt(stp);
+#endif
+}
+
+struct stte *tsb_get_bucket(pmap_t pm, u_int level, vm_offset_t va,
+ int allocate);
+int tsb_miss(pmap_t pm, u_int type, struct mmuframe *mf);
+struct tte tsb_page_alloc(pmap_t pm, vm_offset_t va);
+void tsb_page_fault(pmap_t pm, int level, vm_offset_t va, struct stte *stp);
+void tsb_page_init(void *va, int level);
+struct stte *tsb_stte_lookup(pmap_t pm, vm_offset_t va);
+struct stte *tsb_stte_promote(pmap_t pm, vm_offset_t va, struct stte *stp);
+void tsb_stte_remove(struct stte *stp);
+struct stte *tsb_tte_enter(pmap_t pm, vm_offset_t va, struct tte tte);
+void tsb_tte_local_remove(struct tte *tp);
+
+extern vm_offset_t tsb_bootstrap_pages[];
+extern int tsb_bootstrap_index;
+
+#endif /* !_MACHINE_TSB_H_ */
diff --git a/sys/sparc64/include/tte.h b/sys/sparc64/include/tte.h
new file mode 100644
index 0000000..f938560
--- /dev/null
+++ b/sys/sparc64/include/tte.h
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: BSDI: pmap.v9.h,v 1.10.2.6 1999/08/23 22:18:44 cp Exp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_TTE_H_
+#define _MACHINE_TTE_H_
+
+#include <machine/atomic.h>
+
+#define TTE_SHIFT 4
+#define STTE_SHIFT 5
+
+#define TT_CTX_SHIFT (48)
+#define TT_VA_SHIFT (22)
+#define TT_VPN_SHIFT (9)
+
+#define TT_CTX_SIZE (13)
+#define TT_VA_SIZE (42)
+
+#define TT_CTX_MASK ((1L << TT_CTX_SIZE) - 1)
+#define TT_VA_MASK ((1L << TT_VA_SIZE) - 1)
+
+#define TT_G (1L << 63)
+#define TT_CTX(ctx) (((u_long)(ctx) << TT_CTX_SHIFT) & TT_CTX_MASK)
+#define TT_VA(va) (((u_long)(va) >> TT_VA_SHIFT) & TT_VA_MASK)
+
+#define TD_SIZE_SHIFT (61)
+#define TD_SOFT2_SHIFT (50)
+#define TD_DIAG_SHIFT (41)
+#define TD_PA_SHIFT (13)
+#define TD_SOFT_SHIFT (7)
+
+#define TD_SIZE_SIZE (2)
+#define TD_SOFT2_SIZE (9)
+#define TD_DIAG_SIZE (9)
+#define TD_PA_SIZE (28)
+#define TD_SOFT_SIZE (6)
+
+#define TD_SIZE_MASK (((1L << TD_SIZE_SIZE) - 1) << TD_SIZE_SHIFT)
+#define TD_SOFT2_MASK (((1L << TD_SOFT2_SIZE) - 1) << TD_SOFT2_SHIFT)
+#define TD_DIAG_MASK (((1L << TD_DIAG_SIZE) - 1) << TD_DIAG_SHIFT)
+#define TD_PA_MASK (((1L << TD_PA_SIZE) - 1) << TD_PA_SHIFT)
+#define TD_SOFT_MASK (((1L << TD_SOFT_SIZE) - 1) << TD_SOFT_SHIFT)
+
+#define TD_VA_LOW_SHIFT TD_SOFT2_SHIFT
+#define TD_VA_LOW_MASK TD_SOFT2_MASK
+
+#define TS_EXEC (1L << 3)
+#define TS_MOD (1L << 2)
+#define TS_REF (1L << 1)
+#define TS_TSB (1L << 0)
+
+#define TD_V (1L << 63)
+#define TD_8K (0L << TD_SIZE_SHIFT)
+#define TD_64K (1L << TD_SIZE_SHIFT)
+#define TD_512K (2L << TD_SIZE_SHIFT)
+#define TD_4M (3L << TD_SIZE_SHIFT)
+#define TD_NFO (1L << 60)
+#define TD_IE (1L << 59)
+#define TD_VPN_LOW(vpn) ((vpn << TD_SOFT2_SHIFT) & TD_SOFT2_MASK)
+#define TD_VA_LOW(va) (TD_VPN_LOW((va) >> PAGE_SHIFT))
+#define TD_PA(pa) ((pa) & TD_PA_MASK)
+#define TD_EXEC (TS_EXEC << TD_SOFT_SHIFT)
+#define TD_MOD (TS_MOD << TD_SOFT_SHIFT)
+#define TD_REF (TS_REF << TD_SOFT_SHIFT)
+#define TD_TSB (TS_TSB << TD_SOFT_SHIFT)
+#define TD_L (1L << 6)
+#define TD_CP (1L << 5)
+#define TD_CV (1L << 4)
+#define TD_E (1L << 3)
+#define TD_P (1L << 2)
+#define TD_W (1L << 1)
+#define TD_G (1L << 0)
+
+struct tte {
+ u_long tte_tag;
+ u_long tte_data;
+};
+
+struct stte {
+ struct tte st_tte;
+ vm_offset_t st_next;
+ vm_offset_t st_prev;
+};
+
+static __inline u_int
+tte_get_ctx(struct tte tte)
+{
+ return ((tte.tte_tag & TT_CTX_MASK) >> TT_CTX_SHIFT);
+}
+
+static __inline vm_offset_t
+tte_get_vpn(struct tte tte)
+{
+ return (((tte.tte_tag & TT_VA_MASK) << TT_VPN_SHIFT) |
+ ((tte.tte_data & TD_VA_LOW_MASK) >> TD_VA_LOW_SHIFT));
+}
+
+static __inline vm_offset_t
+tte_get_va(struct tte tte)
+{
+ return (tte_get_vpn(tte) << PAGE_SHIFT);
+}
+
+static __inline void
+tte_invalidate(struct tte *tp)
+{
+ atomic_clear_long(&tp->tte_data, TD_V);
+}
+
+static __inline int
+tte_match(struct tte tte, vm_offset_t va)
+{
+ return ((tte.tte_data & TD_V) != 0 &&
+ ((tte.tte_tag ^ TT_VA(va)) & TT_VA_MASK) == 0 &&
+ ((tte.tte_data ^ TD_VA_LOW(va)) & TD_VA_LOW_MASK) == 0);
+}
+
+#endif /* !_MACHINE_TTE_H_ */
diff --git a/sys/sparc64/include/vmparam.h b/sys/sparc64/include/vmparam.h
index 81858cc..677b4f2 100644
--- a/sys/sparc64/include/vmparam.h
+++ b/sys/sparc64/include/vmparam.h
@@ -62,23 +62,23 @@
*/
#define MAXSLP 20
-#define VM_MAXUSER_ADDRESS 0
+#define VM_MAXUSER_ADDRESS (0x5ffffffffff)
#define USRSTACK VM_MAXUSER_ADDRESS
-#define VM_MIN_ADDRESS 0
+#define VM_MIN_ADDRESS (0)
/*
* Virtual size (bytes) for various kernel submaps.
*/
#ifndef VM_KMEM_SIZE
-#define VM_KMEM_SIZE (12*1024*1024)
+#define VM_KMEM_SIZE (12*1024*1024)
#endif
-#define VM_MIN_KERNEL_ADDRESS (0)
-#define VM_MAX_KERNEL_ADDRESS (0)
+#define VM_MIN_KERNEL_ADDRESS (0x60000000000)
+#define VM_MAX_KERNEL_ADDRESS (0x6e000000000)
-#define KERNBASE (0)
+#define KERNBASE (0x60000000000)
/*
* Initial pagein size of beginning of executable file.
diff --git a/sys/sparc64/sparc64/autoconf.c b/sys/sparc64/sparc64/autoconf.c
index ab581ab..517183d 100644
--- a/sys/sparc64/sparc64/autoconf.c
+++ b/sys/sparc64/sparc64/autoconf.c
@@ -27,6 +27,23 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cons.h>
+#include <sys/kernel.h>
dev_t dumpdev = NODEV;
dev_t rootdev = NODEV;
+
+static void configure(void *);
+
+SYSINIT(configure, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure, NULL);
+
+static void
+configure(void *v)
+{
+ device_add_child(root_bus, "upa", 0);
+ root_bus_configure();
+ cninit_finish();
+ cold = 0;
+}
diff --git a/sys/sparc64/sparc64/clock.c b/sys/sparc64/sparc64/clock.c
index d56a0b1..7632c00 100644
--- a/sys/sparc64/sparc64/clock.c
+++ b/sys/sparc64/sparc64/clock.c
@@ -32,7 +32,7 @@
void
cpu_initclocks(void)
{
- TODO;
+ /* XXX */
}
void
@@ -44,7 +44,7 @@ DELAY(int n)
void
inittodr(time_t base)
{
- TODO;
+ /* XXX */
}
void
diff --git a/sys/sparc64/sparc64/db_disasm.c b/sys/sparc64/sparc64/db_disasm.c
new file mode 100644
index 0000000..eb157c8
--- /dev/null
+++ b/sys/sparc64/sparc64/db_disasm.c
@@ -0,0 +1,1116 @@
+/*
+ * Copyright (c) 1994 David S. Miller, davem@nadzieja.rutgers.edu
+ * Copyright (c) 1995 Paul Kranenburg
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by David Miller.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * from: NetBSD: db_disasm.c,v 1.9 2000/08/16 11:29:42 pk Exp
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+
+#include <ddb/ddb.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+
+#include <machine/db_machdep.h>
+
+#ifndef V9
+#define V9
+#endif
+
+/* Sign extend values */
+#ifdef V9
+#define SIGNEX(v,width) ((((long long)(v))<<(64-width))>>(64-width))
+#else
+#define SIGNEX(v,width) ((((int)(v))<<(32-width))>>(32-width))
+#endif
+#define SIGN(v) (((v)<0)?"-":"")
+
+/*
+ * All Sparc instructions are 32-bits, with the one exception being
+ * the set instruction which is actually a macro which expands into
+ * two instructions...
+ *
+ * There are 5 different fields that can be used to identify which
+ * operation is encoded into a particular 32-bit insn. There are 3
+ * formats for instuctions, which one being used is determined by
+ * bits 30-31 of the insn. Here are the bit fields and their names:
+ *
+ * 1100 0000 0000 0000 0000 0000 0000 0000 op field, determines format
+ * 0000 0001 1100 0000 0000 0000 0000 0000 op2 field, format 2 only
+ * 0000 0001 1111 1000 0000 0000 0000 0000 op3 field, format 3 only
+ * 0000 0000 0000 0000 0010 0000 0000 0000 f3i bit, format 3 only
+ * 0000 0000 0000 0000 0001 0000 0000 0000 X bit, format 3 only
+ */
+
+#define OP(x) (((x) & 0x3) << 30)
+#define OP2(x) (((x) & 0x7) << 22)
+#define OP3(x) (((x) & 0x3f) << 19)
+#define OPF(x) (((x) & 0x1ff) << 5)
+#define F3I(x) (((x) & 0x1) << 13)
+
+/* various other fields */
+
+#define A(x) (((x) & 0x1) << 29)
+#define P(x) (((x) & 0x1) << 19)
+#define X(x) (((x) & 0x1) << 12)
+#define FCN(x) (((x) & 0x1f) << 25)
+#define RCOND2(x) (((x) & 0x7) << 25)
+#define RCOND34(x) (((x) & 0x7) << 10)
+#define COND(x) (((x) & 0xf) << 25)
+#define SW_TRAP(x) ((x) & 0x7f)
+#define SHCNT32(x) ((x) & 0x1f)
+#define SHCNT64(x) ((x) & 0x3f)
+#define IMM11(x) ((x) & 0x7ff)
+#define IMM22(x) ((x) & 0x3fffff)
+#define DISP19(x) ((x) & 0x7ffff)
+#define DISP22(x) ((x) & 0x3fffff)
+#define DISP30(x) ((x) & 0x3fffffffL)
+
+/* Register Operand Fields */
+#define RS1(x) (((x) & 0x1f) << 14)
+#define RS2(x) ((x) & 0x1f)
+#define RD(x) (((x) & 0x1f) << 25)
+
+/* FORMAT macros used in sparc_i table to decode each opcode */
+#define FORMAT1(a) (OP(a))
+#define FORMAT2(a,b) (OP(a) | OP2(b))
+#define FORMAT3(a,b,c) (OP(a) | OP3(b) | F3I(c))
+#define FORMAT3F(a,b,c) (OP(a) | OP3(b) | OPF(c))
+
+/* Helper macros to construct OP3 & OPF */
+#define OP3_X(x,y) ((((x) & 3) << 4) | ((y) & 0xf))
+#define OPF_X(x,y) ((((x) & 0x1f) << 4) | ((y) & 0xf))
+
+/* COND condition codes field... */
+#define COND2(y,x) (((((y)<<4) & 1)|((x) & 0xf)) << 14)
+
+struct sparc_insn {
+ unsigned int match;
+ char* name;
+ char* format;
+};
+
+char* regs[] = {
+ "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
+ "o0", "o1", "o2", "o3", "o4", "o5", "sp", "o7",
+ "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
+ "i0", "i1", "i2", "i3", "i4", "i5", "fp", "i7"
+};
+
+char* priv_regs[] = {
+ "tpc", "tnpc", "tstate", "tt", "tick", "tba", "pstate", "tl",
+ "pil", "cwp", "cansave", "canrestore", "cleanwin", "otherwin",
+ "wstate", "fq",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "ver"
+};
+
+char* state_regs[] = {
+ "y", "", "ccr", "asi", "tick", "pc", "fprs", "asr",
+ "", "", "", "", "", "", "", "",
+ "pcr", "pic", "dcr", "gsr", "set_softint", "clr_softint", "softint", "tick_cmpr", "",
+ "", "", "", "", "", "", "", ""
+};
+
+char* ccodes[] = {
+ "fcc0", "fcc1", "fcc2", "fcc3", "icc", "", "xcc", ""
+};
+
+char* prefetch[] = {
+ "n_reads", "one_read", "n_writes", "one_write", "page"
+};
+
+
+/* The sparc instruction table has a format field which tells what
+ the operand structure for this instruction is. Here are the codes:
+
+Modifiers (nust be first):
+ a -- opcode has annul bit
+ p -- opcode has branch prediction bit
+
+Codes:
+ 1 -- source register operand stored in rs1
+ 2 -- source register operand stored in rs2
+ d -- destination register operand stored in rd
+ 3 -- floating source register in rs1
+ 4 -- floating source register in rs2
+ e -- floating destination register in rd
+ i -- 13-bit immediate value stored in simm13
+ j -- 11-bit immediate value stored in simm11
+ l -- displacement using d16lo and d16hi
+ m -- 22-bit fcc displacement value
+ n -- 30-bit displacement used in call insns
+ o -- %fcc number specified in cc1 and cc0 fields
+ p -- address computed by the contents of rs1+rs2
+ q -- address computed by the contents of rs1+simm13
+ r -- prefetch
+ s -- %asi is implicit in the insn, rs1 value not used
+ t -- immediate 8-bit asi value
+ u -- 19-bit fcc displacement value
+ 5 -- hard register, %fsr lower-half
+ 6 -- hard register, %fsr all
+ 7 -- [reg_addr rs1+rs2] imm_asi
+ 8 -- [reg_addr rs1+simm13] %asi
+ 9 -- logical or of the cmask and mmask fields (membar insn)
+ 0 -- icc or xcc condition codes register
+ . -- %fcc, %icc, or %xcc in opf_cc field
+ r -- prefection function stored in fcn field
+ A -- privileged register encoded in rs1
+ B -- state register encoded in rs1
+ C -- %hi(value) where value is stored in imm22 field
+ D -- 32-bit shift count in shcnt32
+ E -- 64-bit shift count in shcnt64
+ F -- software trap number stored in sw_trap
+ G -- privileged register encoded in rd
+ H -- state register encoded in rd
+
+V8 only:
+ Y -- write y register
+ P -- write psr register
+ T -- write tbr register
+ W -- write wim register
+*/
+
+
+struct sparc_insn sparc_i[] = {
+
+ /*
+ * Format 1: Call
+ */
+ {(FORMAT1(1)), "call", "n"},
+
+ /*
+ * Format 0: Sethi & Branches
+ */
+ /* Illegal Instruction Trap */
+ {(FORMAT2(0, 0)), "illtrap", "m"},
+
+ /* Note: if imm22 is zero then this is actually a "nop" grrr... */
+ {(FORMAT2(0, 0x4)), "sethi", "Cd"},
+
+ /* Branch on Integer Co`ndition Codes "Bicc" */
+ {(FORMAT2(0, 2) | COND(8)), "ba", "a,m"},
+ {(FORMAT2(0, 2) | COND(0)), "bn", "a,m"},
+ {(FORMAT2(0, 2) | COND(9)), "bne", "a,m"},
+ {(FORMAT2(0, 2) | COND(1)), "be", "a,m"},
+ {(FORMAT2(0, 2) | COND(10)), "bg", "a,m"},
+ {(FORMAT2(0, 2) | COND(2)), "ble", "a,m"},
+ {(FORMAT2(0, 2) | COND(11)), "bge", "a,m"},
+ {(FORMAT2(0, 2) | COND(3)), "bl", "a,m"},
+ {(FORMAT2(0, 2) | COND(12)), "bgu", "a,m"},
+ {(FORMAT2(0, 2) | COND(4)), "bleu", "a,m"},
+ {(FORMAT2(0, 2) | COND(13)), "bcc", "a,m"},
+ {(FORMAT2(0, 2) | COND(5)), "bcs", "a,m"},
+ {(FORMAT2(0, 2) | COND(14)), "bpos", "a,m"},
+ {(FORMAT2(0, 2) | COND(6)), "bneg", "a,m"},
+ {(FORMAT2(0, 2) | COND(15)), "bvc", "a,m"},
+ {(FORMAT2(0, 2) | COND(7)), "bvs", "a,m"},
+
+ /* Branch on Integer Condition Codes with Prediction "BPcc" */
+ {(FORMAT2(0, 1) | COND(8)), "ba", "ap,u"},
+ {(FORMAT2(0, 1) | COND(0)), "bn", "ap,u"},
+ {(FORMAT2(0, 1) | COND(9)), "bne", "ap,u"},
+ {(FORMAT2(0, 1) | COND(1)), "be", "ap,u"},
+ {(FORMAT2(0, 1) | COND(10)), "bg", "ap,u"},
+ {(FORMAT2(0, 1) | COND(2)), "ble", "ap,u"},
+ {(FORMAT2(0, 1) | COND(11)), "bge", "ap,u"},
+ {(FORMAT2(0, 1) | COND(3)), "bl", "ap,u"},
+ {(FORMAT2(0, 1) | COND(12)), "bgu", "ap,u"},
+ {(FORMAT2(0, 1) | COND(4)), "bleu", "ap,u"},
+ {(FORMAT2(0, 1) | COND(13)), "bcc", "ap,u"},
+ {(FORMAT2(0, 1) | COND(5)), "bcs", "ap,u"},
+ {(FORMAT2(0, 1) | COND(14)), "bpos", "ap,u"},
+ {(FORMAT2(0, 1) | COND(6)), "bneg", "ap,u"},
+ {(FORMAT2(0, 1) | COND(15)), "bvc", "ap,u"},
+ {(FORMAT2(0, 1) | COND(7)), "bvs", "ap,u"},
+
+ /* Branch on Integer Register with Prediction "BPr" */
+ {(FORMAT2(0, 3) | RCOND2(1)), "brz", "ap,1l"},
+ {(FORMAT2(0, 3) | A(1) | P(1) | RCOND2(2)), "brlex", "ap,1l"},
+ {(FORMAT2(0, 3) | RCOND2(3)), "brlz", "ap,1l"},
+ {(FORMAT2(0, 3) | RCOND2(5)), "brnz", "ap,1l"},
+ {(FORMAT2(0, 3) | RCOND2(6)), "brgz", "ap,1l"},
+ {(FORMAT2(0, 3) | RCOND2(7)), "brgez", "ap,1l"},
+
+ /* Branch on Floating-Point Condition Codes with Prediction "FBPfcc" */
+ {(FORMAT2(0, 5) | COND(8)), "fba", "ap,m"},
+ {(FORMAT2(0, 5) | COND(0)), "fbn", "ap,m"},
+ {(FORMAT2(0, 5) | COND(7)), "fbu", "ap,m"},
+ {(FORMAT2(0, 5) | COND(6)), "fbg", "ap,m"},
+ {(FORMAT2(0, 5) | COND(5)), "fbug", "ap,m"},
+ {(FORMAT2(0, 5) | COND(4)), "fbl", "ap,m"},
+ {(FORMAT2(0, 5) | COND(3)), "fbul", "ap,m"},
+ {(FORMAT2(0, 5) | COND(2)), "fblg", "ap,m"},
+ {(FORMAT2(0, 5) | COND(1)), "fbne", "ap,m"},
+ {(FORMAT2(0, 5) | COND(9)), "fbe", "ap,m"},
+ {(FORMAT2(0, 5) | COND(10)), "fbue", "ap,m"},
+ {(FORMAT2(0, 5) | COND(11)), "fbge", "ap,m"},
+ {(FORMAT2(0, 5) | COND(12)), "fbuge", "ap,m"},
+ {(FORMAT2(0, 5) | COND(13)), "fble", "ap,m"},
+ {(FORMAT2(0, 5) | COND(14)), "fbule", "ap,m"},
+ {(FORMAT2(0, 5) | COND(15)), "fbo", "ap,m"},
+
+ /* Branch on Floating-Point Condition Codes "FBfcc" */
+ {(FORMAT2(0, 6) | COND(8)), "fba", "a,m"},
+ {(FORMAT2(0, 6) | COND(0)), "fbn", "a,m"},
+ {(FORMAT2(0, 6) | COND(7)), "fbu", "a,m"},
+ {(FORMAT2(0, 6) | COND(6)), "fbg", "a,m"},
+ {(FORMAT2(0, 6) | COND(5)), "fbug", "a,m"},
+ {(FORMAT2(0, 6) | COND(4)), "fbl", "a,m"},
+ {(FORMAT2(0, 6) | COND(3)), "fbul", "a,m"},
+ {(FORMAT2(0, 6) | COND(2)), "fblg", "a,m"},
+ {(FORMAT2(0, 6) | COND(1)), "fbne", "a,m"},
+ {(FORMAT2(0, 6) | COND(9)), "fbe", "a,m"},
+ {(FORMAT2(0, 6) | COND(10)), "fbue", "a,m"},
+ {(FORMAT2(0, 6) | COND(11)), "fbge", "a,m"},
+ {(FORMAT2(0, 6) | COND(12)), "fbuge", "a,m"},
+ {(FORMAT2(0, 6) | COND(13)), "fble", "a,m"},
+ {(FORMAT2(0, 6) | COND(14)), "fbule", "a,m"},
+ {(FORMAT2(0, 6) | COND(15)), "fbo", "a,m"},
+
+
+
+ /*
+ * Format 3/2: Arithmetic & misc (table 32, appendix E)
+ */
+ {FORMAT3(2, OP3_X(0,0), 0), "add", "12d"},
+ {FORMAT3(2, OP3_X(0,0), 1), "add", "1id"},
+ {FORMAT3(2, OP3_X(1,0), 0), "addcc", "12d"},
+ {FORMAT3(2, OP3_X(1,0), 1), "addcc", "1id"},
+ {FORMAT3(2, OP3_X(2,0), 0), "taddcc", "12d"},
+ {FORMAT3(2, OP3_X(2,0), 1), "taddcc", "1id"},
+#ifdef V9
+ {(FORMAT3(2, 0x30, 1) | RD(0xf)), "sir", "i"},
+ {FORMAT3(2, OP3_X(3,0), 0), "wr", "12H"},
+ {FORMAT3(2, OP3_X(3,0), 1), "wr", "1iH"},
+#else
+ {FORMAT3(2, OP3_X(3,0), 0), "wr", "12Y"}, /* wr 1, 2, %y */
+ {FORMAT3(2, OP3_X(3,0), 1), "wr", "1iY"}, /* wr 1, i, %y */
+#endif
+
+ {FORMAT3(2, OP3_X(0,1), 0), "and", "12d"},
+ {FORMAT3(2, OP3_X(0,1), 1), "and", "1id"},
+ {FORMAT3(2, OP3_X(1,1), 0), "andcc", "12d"},
+ {FORMAT3(2, OP3_X(1,1), 1), "andcc", "1id"},
+ {FORMAT3(2, OP3_X(2,1), 0), "tsubcc", "12d"},
+ {FORMAT3(2, OP3_X(2,1), 1), "tsubcc", "1id"},
+#ifdef V9
+ {FORMAT3(2, OP3_X(3,1), 0), "saved", ""},
+ {FORMAT3(2, OP3_X(3,1), 0)|FCN(1), "restored", ""},
+#else
+ {FORMAT3(2, OP3_X(3,1), 0), "wr", "12P"}, /* wr 1, 2, %psr */
+ {FORMAT3(2, OP3_X(3,1), 1), "wr", "1iP"}, /* wr 1, i, %psr */
+#endif
+
+ {FORMAT3(2, OP3_X(0,2), 0), "or", "12d"},
+ {FORMAT3(2, OP3_X(0,2), 1), "or", "1id"},
+ {FORMAT3(2, OP3_X(1,2), 0), "orcc", "12d"},
+ {FORMAT3(2, OP3_X(1,2), 1), "orcc", "1id"},
+ {FORMAT3(2, OP3_X(2,2), 0), "taddcctv", "12d"},
+ {FORMAT3(2, OP3_X(2,2), 1), "taddcctv", "1id"},
+#ifdef V9
+ {FORMAT3(2, OP3_X(3,2), 0), "wrpr", "12G"},
+ {FORMAT3(2, OP3_X(3,2), 1), "wrpr", "1iG"},
+#else
+ {FORMAT3(2, OP3_X(3,2), 0), "wr", "12W"}, /* wr 1, 2, %wim */
+ {FORMAT3(2, OP3_X(3,2), 1), "wr", "1iW"}, /* wr 1, i, %wim */
+#endif
+
+ {FORMAT3(2, OP3_X(0,3), 0), "xor", "12d"},
+ {FORMAT3(2, OP3_X(0,3), 1), "xor", "1id"},
+ {FORMAT3(2, OP3_X(1,3), 0), "xorcc", "12d"},
+ {FORMAT3(2, OP3_X(1,3), 1), "xorcc", "1id"},
+ {FORMAT3(2, OP3_X(2,3), 0), "tsubcctv", "12d"},
+ {FORMAT3(2, OP3_X(2,3), 1), "tsubcctv", "1id"},
+#ifdef V9
+ {FORMAT3(2, OP3_X(3,3), 0), "UNDEFINED", ""},
+#else
+ {FORMAT3(2, OP3_X(3,3), 0), "wr", "12T"}, /* wr 1, 2, %tbr */
+ {FORMAT3(2, OP3_X(3,3), 1), "wr", "1iT"}, /* wr 1, i, %tbr */
+#endif
+
+ {FORMAT3(2, OP3_X(0,4), 0), "sub", "12d"},
+ {FORMAT3(2, OP3_X(0,4), 1), "sub", "1id"},
+ {FORMAT3(2, OP3_X(1,4), 0), "subcc", "12d"},
+ {FORMAT3(2, OP3_X(1,4), 1), "subcc", "1id"},
+ {FORMAT3(2, OP3_X(2,4), 0), "mulscc", "12d"},
+ {FORMAT3(2, OP3_X(2,4), 1), "mulscc", "1id"},
+ {FORMAT3(2, OP3_X(3,4), 1), "FPop1", ""}, /* see below */
+
+ {FORMAT3(2, OP3_X(0,5), 0), "andn", "12d"},
+ {FORMAT3(2, OP3_X(0,5), 1), "andn", "1id"},
+ {FORMAT3(2, OP3_X(1,5), 0), "andncc", "12d"},
+ {FORMAT3(2, OP3_X(1,5), 1), "andncc", "1id"},
+ {FORMAT3(2, OP3_X(2,5), 0), "sll", "12d"},
+ {FORMAT3(2, OP3_X(2,5), 1), "sll", "1Dd"},
+ {FORMAT3(2, OP3_X(2,5), 0)|X(1), "sllx", "12d"},
+ {FORMAT3(2, OP3_X(2,5), 1)|X(1), "sllx", "1Ed"},
+ {FORMAT3(2, OP3_X(3,5), 1), "FPop2", ""}, /* see below */
+
+ {FORMAT3(2, OP3_X(0,6), 0), "orn", "12d"},
+ {FORMAT3(2, OP3_X(0,6), 1), "orn", "1id"},
+ {FORMAT3(2, OP3_X(1,6), 0), "orncc", "12d"},
+ {FORMAT3(2, OP3_X(1,6), 1), "orncc", "1id"},
+ {FORMAT3(2, OP3_X(2,6), 0), "srl", "12d"},
+ {FORMAT3(2, OP3_X(2,6), 1), "srl", "1Dd"},
+ {FORMAT3(2, OP3_X(2,6), 0)|X(1), "srlx", "12d"},
+ {FORMAT3(2, OP3_X(2,6), 1)|X(1), "srlx", "1Ed"},
+ {FORMAT3(2, OP3_X(3,6), 1), "impdep1", ""},
+
+ {FORMAT3(2, OP3_X(0,7), 0), "xorn", "12d"},
+ {FORMAT3(2, OP3_X(0,7), 1), "xorn", "1id"},
+ {FORMAT3(2, OP3_X(1,7), 0), "xorncc", "12d"},
+ {FORMAT3(2, OP3_X(1,7), 1), "xorncc", "1id"},
+ {FORMAT3(2, OP3_X(2,7), 0), "sra", "12d"},
+ {FORMAT3(2, OP3_X(2,7), 1), "sra", "1Dd"},
+ {FORMAT3(2, OP3_X(2,7), 0)|X(1), "srax", "12d"},
+ {FORMAT3(2, OP3_X(2,7), 1)|X(1), "srax", "1Ed"},
+ {FORMAT3(2, OP3_X(3,7), 1), "impdep2", ""},
+
+ {FORMAT3(2, OP3_X(0,8), 0), "addc", "12d"},
+ {FORMAT3(2, OP3_X(0,8), 1), "addc", "1id"},
+ {FORMAT3(2, OP3_X(1,8), 0), "addccc", "12d"},
+ {FORMAT3(2, OP3_X(1,8), 1), "addccc", "1id"},
+#ifdef V9
+ {(FORMAT3(2, 0x28, 1) | RS1(15)), "membar", "9"},
+ {(FORMAT3(2, 0x28, 0) | RS1(15)), "stbar", ""},
+ {FORMAT3(2, OP3_X(2,8), 0), "rd", "Bd"},
+#else
+ {FORMAT3(2, OP3_X(2,8), 0), "rd", "Yd"},
+#endif
+
+ {FORMAT3(2, OP3_X(3,8), 0), "jmpl", "pd"},
+ {FORMAT3(2, OP3_X(3,8), 1), "jmpl", "qd"},
+
+ {FORMAT3(2, OP3_X(0,9), 0), "mulx", "12d"},
+ {FORMAT3(2, OP3_X(0,9), 1), "mulx", "1id"},
+ {FORMAT3(2, OP3_X(1,9), 0), "UNDEFINED", ""},
+#ifdef V9
+ {FORMAT3(2, OP3_X(2,9), 0), "UNDEFINED", ""},
+#else
+ {FORMAT3(2, OP3_X(2,9), 0), "rd", "Pd"},
+#endif
+ {FORMAT3(2, OP3_X(3,9), 0), "return", "p"},
+ {FORMAT3(2, OP3_X(3,9), 1), "return", "q"},
+
+ {FORMAT3(2, OP3_X(0,10), 0), "umul", "12d"},
+ {FORMAT3(2, OP3_X(0,10), 1), "umul", "1id"},
+ {FORMAT3(2, OP3_X(1,10), 0), "umulcc", "12d"},
+ {FORMAT3(2, OP3_X(1,10), 1), "umulcc", "1id"},
+#ifdef V9
+ {FORMAT3(2, OP3_X(2,10), 0), "rdpr", "Ad"},
+#else
+ {FORMAT3(2, OP3_X(2,10), 0), "rd", "Wd"},
+#endif
+ /*
+ * OP3 = (3,10): TCC: Trap on Integer Condition Codes
+ */
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x8)), "ta", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x8)), "ta", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x0)), "tn", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x0)), "tn", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x9)), "tne", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x9)), "tne", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x1)), "te", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x1)), "te", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0xa)), "tg", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0xa)), "tg", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x2)), "tle", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x2)), "tle", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0xb)), "tge", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0xb)), "tge", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x3)), "tl", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x3)), "tl", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0xc)), "tgu", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0xc)), "tgu", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x4)), "tleu", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x4)), "tleu", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0xd)), "tcc", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0xd)), "tcc", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x5)), "tcs", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x5)), "tcs", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0xe)), "tpos", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0xe)), "tpos", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x6)), "tneg", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x6)), "tneg", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0xf)), "tvc", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0xf)), "tvc", "0F"},
+ {(FORMAT3(2, OP3_X(3,10), 0) | COND(0x7)), "tvs", "12F"},
+ {(FORMAT3(2, OP3_X(3,10), 1) | COND(0x7)), "tvs", "0F"},
+
+ {FORMAT3(2, OP3_X(0,11), 0), "smul", "12d"},
+ {FORMAT3(2, OP3_X(0,11), 1), "smul", "1id"},
+ {FORMAT3(2, OP3_X(1,11), 0), "smulcc", "12d"},
+ {FORMAT3(2, OP3_X(1,11), 1), "smulcc", "1id"},
+#ifdef V9
+ {FORMAT3(2, OP3_X(2,11), 0), "flushw", ""},
+#else
+ {FORMAT3(2, OP3_X(2,11), 0), "rd", "Td"},
+#endif
+ {FORMAT3(2, OP3_X(3,11), 0), "flush", "p"},
+ {FORMAT3(2, OP3_X(3,11), 1), "flush", "q"},
+
+ {FORMAT3(2, OP3_X(0,12), 0), "subc", "12d"},
+ {FORMAT3(2, OP3_X(0,12), 1), "subc", "1id"},
+ {FORMAT3(2, OP3_X(1,12), 0), "subccc", "12d"},
+ {FORMAT3(2, OP3_X(1,12), 1), "subccc", "1id"},
+ /*
+ * OP3 = (2,12): MOVcc, Move Integer Register on Condition
+ */
+ /* For Integer Condition Codes */
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,8)), "mova", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,8)), "mova", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,0)), "movn", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,0)), "movn", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,9)), "movne", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,9)), "movne", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,1)), "move", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,1)), "move", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,10)), "movg", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,10)), "movg", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,2)), "movle", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,2)), "movle", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,11)), "movge", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,11)), "movge", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,3)), "movl", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,3)), "movl", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,12)), "movgu", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,12)), "movgu", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,4)), "movleu", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,4)), "movleu", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,13)), "movcc", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,13)), "movcc", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,5)), "movcs", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,5)), "movcs", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,14)), "movpos", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,14)), "movpos", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,6)), "movneg", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,6)), "movneg", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,15)), "movvc", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,15)), "movvc", "02d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(1,7)), "movvs", "0jd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(1,7)), "movvs", "02d"},
+
+ /* For Floating-Point Condition Codes */
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,8)), "mova", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,8)), "mova", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,0)), "movn", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,0)), "movn", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,7)), "movu", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,7)), "movu", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,6)), "movg", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,6)), "movg", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,5)), "movug", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,5)), "movug", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,4)), "movl", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,4)), "movl", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,3)), "movul", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,3)), "movul", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,2)), "movlg", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,2)), "movlg", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,1)), "movne", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,1)), "movne", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,9)), "move", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,9)), "move", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,10)), "movue", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,10)), "movue", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,11)), "movge", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,11)), "movge", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,12)), "movuge", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,12)), "movuge", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,13)), "movle", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,13)), "movle", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,14)), "movule", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,14)), "movule", "o2d"},
+ {(FORMAT3(2, OP3_X(2,12), 1) | COND2(0,15)), "movo", "ojd"},
+ {(FORMAT3(2, OP3_X(2,12), 0) | COND2(0,15)), "movo", "o2d"},
+
+ {FORMAT3(2, OP3_X(3,12), 0), "save", "12d"},
+ {FORMAT3(2, OP3_X(3,12), 1), "save", "1id"},
+
+ {FORMAT3(2, OP3_X(0,13), 0), "udivx", "12d"},
+ {FORMAT3(2, OP3_X(0,13), 1), "udivx", "1id"},
+ {FORMAT3(2, OP3_X(1,13), 0), "UNDEFINED", ""},
+ {FORMAT3(2, OP3_X(2,13), 0), "sdivx", "12d"},
+ {FORMAT3(2, OP3_X(2,13), 1), "sdivx", "1id"},
+ {FORMAT3(2, OP3_X(3,13), 0), "restore", "12d"},
+ {FORMAT3(2, OP3_X(3,13), 1), "restore", "1id"},
+
+ {FORMAT3(2, OP3_X(0,14), 0), "udiv", "12d"},
+ {FORMAT3(2, OP3_X(0,14), 1), "udiv", "1id"},
+ {FORMAT3(2, OP3_X(1,14), 0), "udivcc", "12d"},
+ {FORMAT3(2, OP3_X(1,14), 1), "udivcc", "1id"},
+ {FORMAT3(2, OP3_X(2,14), 0), "popc", "2d"},
+ {FORMAT3(2, OP3_X(2,14), 1), "popc", "id"},
+
+ {FORMAT3(2, OP3_X(3,14), 0), "done", ""},
+ {FORMAT3(2, OP3_X(3,14)|FCN(1), 1), "retry", ""},
+
+ {FORMAT3(2, OP3_X(0,15), 0), "sdiv", "12d"},
+ {FORMAT3(2, OP3_X(0,15), 1), "sdiv", "1id"},
+ {FORMAT3(2, OP3_X(1,15), 0), "sdivcc", "12d"},
+ {FORMAT3(2, OP3_X(1,15), 1), "sdivcc", "1id"},
+ /*
+ * OP3 = (2,15): MOVr:
+ * Move Integer Register on Register Condition
+ */
+ {(FORMAT3(2, OP3_X(2,15), 1) | RCOND34(1)), "movrz", "1jd"},
+ {(FORMAT3(2, OP3_X(2,15), 0) | RCOND34(1)), "movrz", "12d"},
+ {(FORMAT3(2, OP3_X(2,15), 1) | RCOND34(2)), "movrlez", "1jd"},
+ {(FORMAT3(2, OP3_X(2,15), 0) | RCOND34(2)), "movrlez", "12d"},
+ {(FORMAT3(2, OP3_X(2,15), 1) | RCOND34(3)), "movrlz", "1jd"},
+ {(FORMAT3(2, OP3_X(2,15), 0) | RCOND34(3)), "movrlz", "12d"},
+ {(FORMAT3(2, OP3_X(2,15), 1) | RCOND34(5)), "movrnz", "1jd"},
+ {(FORMAT3(2, OP3_X(2,15), 0) | RCOND34(5)), "movrnz", "12d"},
+ {(FORMAT3(2, OP3_X(2,15), 1) | RCOND34(6)), "movrgz", "1jd"},
+ {(FORMAT3(2, OP3_X(2,15), 0) | RCOND34(6)), "movrgz", "12d"},
+ {(FORMAT3(2, OP3_X(2,15), 1) | RCOND34(7)), "movrgez", "1jd"},
+ {(FORMAT3(2, OP3_X(2,15), 0) | RCOND34(7)), "movrgez", "12d"},
+
+ {FORMAT3(2, OP3_X(3,15), 0), "UNDEFINED", ""},
+
+
+ /*
+ * Format 3/3: Load and store (appendix E, table 33)
+ */
+
+ /* Loads */
+ {(FORMAT3(3, OP3_X(0,0), 0)), "ld", "pd"}, /* officially: lduw */
+ {(FORMAT3(3, OP3_X(0,0), 1)), "ld", "qd"},
+ {(FORMAT3(3, OP3_X(1,0), 0)), "lda", "7d"}, /* officially: lduwa */
+ {(FORMAT3(3, OP3_X(1,0), 1)), "lda", "8d"},
+ {(FORMAT3(3, OP3_X(2,0), 0)), "ldf", "pe"},
+ {(FORMAT3(3, OP3_X(2,0), 1)), "ldf", "qe"},
+ {(FORMAT3(3, OP3_X(3,0), 0)), "ldfa", "7e"},
+ {(FORMAT3(3, OP3_X(3,0), 1)), "ldfa", "8e"},
+
+ {(FORMAT3(3, OP3_X(0,1), 0)), "ldub", "pd"},
+ {(FORMAT3(3, OP3_X(0,1), 1)), "ldub", "qd"},
+ {(FORMAT3(3, OP3_X(1,1), 0)), "lduba", "7d"},
+ {(FORMAT3(3, OP3_X(1,1), 1)), "lduba", "8d"},
+ {(FORMAT3(3, OP3_X(2,1), 0) | RD(0)), "ld", "p5"},
+ {(FORMAT3(3, OP3_X(2,1), 1) | RD(0)), "ld", "q5"},
+ {(FORMAT3(3, OP3_X(2,1), 0) | RD(1)), "ldx", "p6"},
+ {(FORMAT3(3, OP3_X(2,1), 1) | RD(1)), "ldx", "q6"},
+
+ {(FORMAT3(3, OP3_X(0,2), 0)), "lduh", "pd"},
+ {(FORMAT3(3, OP3_X(0,2), 1)), "lduh", "qd"},
+ {(FORMAT3(3, OP3_X(1,2), 0)), "lduha", "7d"},
+ {(FORMAT3(3, OP3_X(1,2), 1)), "lduha", "8d"},
+ {(FORMAT3(3, OP3_X(2,2), 0)), "ldq", "pe"},
+ {(FORMAT3(3, OP3_X(2,2), 1)), "ldq", "qe"},
+ {(FORMAT3(3, OP3_X(3,2), 0)), "ldqa", "7e"},
+ {(FORMAT3(3, OP3_X(3,2), 1)), "ldqa", "8e"},
+
+ {(FORMAT3(3, OP3_X(0,3), 0)), "ldd", "pd"},
+ {(FORMAT3(3, OP3_X(0,3), 1)), "ldd", "qd"},
+ {(FORMAT3(3, OP3_X(1,3), 0)), "ldda", "7d"},
+ {(FORMAT3(3, OP3_X(1,3), 1)), "ldda", "8d"},
+ {(FORMAT3(3, OP3_X(2,3), 0)), "ldd", "pe"},
+ {(FORMAT3(3, OP3_X(2,3), 1)), "ldd", "qe"},
+ {(FORMAT3(3, OP3_X(3,3), 0)), "ldda", "7e"},
+ {(FORMAT3(3, OP3_X(3,3), 1)), "ldda", "8e"},
+
+ {(FORMAT3(3, OP3_X(0,4), 0)), "st", "dp"}, /* officially: stw */
+ {(FORMAT3(3, OP3_X(0,4), 1)), "st", "dq"},
+ {(FORMAT3(3, OP3_X(1,4), 0)), "sta", "d7"}, /* officially: stwa */
+ {(FORMAT3(3, OP3_X(1,4), 1)), "sta", "d8"},
+ {(FORMAT3(3, OP3_X(2,4), 0)), "st", "ep"},
+ {(FORMAT3(3, OP3_X(2,4), 1)), "st", "eq"},
+ {(FORMAT3(3, OP3_X(3,4), 0)), "sta", "e7"},
+ {(FORMAT3(3, OP3_X(3,4), 1)), "sta", "e8"},
+
+ {(FORMAT3(3, OP3_X(0,5), 0)), "stb", "dp"},
+ {(FORMAT3(3, OP3_X(0,5), 1)), "stb", "dq"},
+ {(FORMAT3(3, OP3_X(1,5), 0)), "stba", "d7"},
+ {(FORMAT3(3, OP3_X(1,5), 1)), "stba", "d8"},
+ {(FORMAT3(3, OP3_X(2,5), 0)), "st", "5p"},
+ {(FORMAT3(3, OP3_X(2,5), 1)), "st", "5q"},
+ {(FORMAT3(3, OP3_X(2,5), 0)|RD(1)), "stx", "6p"},
+ {(FORMAT3(3, OP3_X(2,5), 1)|RD(1)), "stx", "6q"},
+
+ {(FORMAT3(3, OP3_X(0,6), 0)), "sth", "dp"},
+ {(FORMAT3(3, OP3_X(0,6), 1)), "sth", "dq"},
+ {(FORMAT3(3, OP3_X(1,6), 0)), "stha", "d7"},
+ {(FORMAT3(3, OP3_X(1,6), 1)), "stha", "d8"},
+ {(FORMAT3(3, OP3_X(2,6), 0)), "stq", "ep"},
+ {(FORMAT3(3, OP3_X(2,6), 1)), "stq", "eq"},
+ {(FORMAT3(3, OP3_X(3,6), 0)), "stqa", "e7"},
+ {(FORMAT3(3, OP3_X(3,6), 1)), "stqa", "e8"},
+
+ {(FORMAT3(3, OP3_X(0,7), 0)), "std", "dp"},
+ {(FORMAT3(3, OP3_X(0,7), 1)), "std", "dq"},
+ {(FORMAT3(3, OP3_X(1,7), 0)), "stda", "d7"},
+ {(FORMAT3(3, OP3_X(1,7), 1)), "stda", "d8"},
+ {(FORMAT3(3, OP3_X(2,7), 0)), "std", "ep"},
+ {(FORMAT3(3, OP3_X(2,7), 1)), "std", "eq"},
+ {(FORMAT3(3, OP3_X(3,7), 0)), "stda", "e7"},
+ {(FORMAT3(3, OP3_X(3,7), 1)), "stda", "e8"},
+
+ {(FORMAT3(3, OP3_X(0,8), 0)), "ldsw", "pd"},
+ {(FORMAT3(3, OP3_X(0,8), 1)), "ldsw", "qd"},
+ {(FORMAT3(3, OP3_X(1,8), 0)), "ldswa", "7d"},
+ {(FORMAT3(3, OP3_X(1,8), 1)), "ldswa", "8d"},
+
+ {(FORMAT3(3, OP3_X(0,9), 0)), "ldsb", "pd"},
+ {(FORMAT3(3, OP3_X(0,9), 1)), "ldsb", "qd"},
+ {(FORMAT3(3, OP3_X(1,9), 0)), "ldsba", "7d"},
+ {(FORMAT3(3, OP3_X(1,9), 1)), "ldsba", "8d"},
+
+ {(FORMAT3(3, OP3_X(0,10), 0)), "ldsh", "pd"},
+ {(FORMAT3(3, OP3_X(0,10), 1)), "ldsh", "qd"},
+ {(FORMAT3(3, OP3_X(1,10), 0)), "ldsha", "7d"},
+ {(FORMAT3(3, OP3_X(1,10), 1)), "ldsha", "8d"},
+
+ {(FORMAT3(3, OP3_X(0,11), 0)), "ldx", "pd"},
+ {(FORMAT3(3, OP3_X(0,11), 1)), "ldx", "qd"},
+ {(FORMAT3(3, OP3_X(1,11), 0)), "ldxa", "7d"},
+ {(FORMAT3(3, OP3_X(1,11), 1)), "ldxa", "8d"},
+
+ {(FORMAT3(3, OP3_X(3,12), 1)), "casa", "s2d"},
+ {(FORMAT3(3, OP3_X(3,12), 0)), "casa", "t2d"},
+
+ {(FORMAT3(3, OP3_X(0,13), 0)), "ldstub", "7d"},
+ {(FORMAT3(3, OP3_X(0,13), 1)), "ldstub", "8d"},
+ {(FORMAT3(3, OP3_X(1,13), 0)), "ldstuba", "pd"},
+ {(FORMAT3(3, OP3_X(1,13), 1)), "ldstuba", "qd"},
+ {(FORMAT3(3, OP3_X(2,13), 0)), "prefetch", "pr"},
+ {(FORMAT3(3, OP3_X(2,13), 1)), "prefetch", "qr"},
+ {(FORMAT3(3, OP3_X(3,13), 0)), "prefetcha", "7r"},
+ {(FORMAT3(3, OP3_X(3,13), 1)), "prefetcha", "8r"},
+
+ {(FORMAT3(3, OP3_X(0,14), 0)), "stx", "dp"},
+ {(FORMAT3(3, OP3_X(0,14), 1)), "stx", "dq"},
+ {(FORMAT3(3, OP3_X(1,14), 0)), "stwa", "d7"},
+ {(FORMAT3(3, OP3_X(1,14), 1)), "stwa", "d8"},
+ {(FORMAT3(3, OP3_X(3,14), 0)), "casxa", "t2d"},
+ {(FORMAT3(3, OP3_X(3,14), 1)), "casxa", "s2d"},
+
+ /* Swap Register */
+ {(FORMAT3(3, OP3_X(0,15), 0)), "swap", "pd"},
+ {(FORMAT3(3, OP3_X(0,15), 1)), "swap", "qd"},
+ {(FORMAT3(3, OP3_X(1,15), 0)), "swapa", "7d"},
+ {(FORMAT3(3, OP3_X(1,15), 1)), "swapa", "8d"},
+
+
+ /*
+ * OP3 = (3,4): FPop1 (table 34)
+ */
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(0,1))), "fmovs", ".4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(0,2))), "fmovd", ".4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(0,3))), "fmovq", ".4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(0,5))), "fnegs", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(0,6))), "fnegd", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(0,7))), "fnegq", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(0,9))), "fabss", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(0,10))), "fabsd", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(0,11))), "fabsq", "4e"},
+
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(2,9))), "fsqrts", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(2,10))), "fsqrtd", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(2,11))), "fsqrtq", "4e"},
+
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,1))), "fadds", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,2))), "faddd", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,3))), "faddq", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,5))), "fsubs", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,6))), "fsubd", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,7))), "fsubq", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,9))), "fmuls", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,10))), "fmuld", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,11))), "fmulq", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,13))), "fdivs", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,14))), "fdivd", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(4,15))), "fdivq", "34e"},
+
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(6,9))), "fsmuld", "34e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(6,14))), "fdmulq", "34e"},
+
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(8,1))), "fstox", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(8,2))), "fdtox", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(8,3))), "fqtox", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(8,4))), "fxtos", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(8,8))), "fxtod", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(8,12))), "fxtoq", "4e"},
+
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(12,4))), "fitos", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(12,6))), "fdtos", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(12,7))), "fqtos", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(12,8))), "fitod", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(12,9))), "fstod", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(12,11))), "fqtod", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(12,12))), "fitoq", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(12,13))), "fstoq", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(12,14))), "fdtoq", "4e"},
+
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(13,1))), "fstoi", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(13,2))), "fdtoi", "4e"},
+ {(FORMAT3F(2, OP3_X(3,4), OPF_X(13,3))), "fqtoi", "4e"},
+
+
+#ifdef xxx
+ /*
+ * OP3 =(3,5): FPop2 (table 35)
+ */
+ {(FORMAT3F(2, OP3_X(3,5), 81)), "fcmps", "o34"},
+ {(FORMAT3F(2, OP3_X(3,5), 82)), "fcmpd", "o34"},
+ {(FORMAT3F(2, OP3_X(3,5), 83)), "fcmpq", "o34"},
+ {(FORMAT3F(2, OP3_X(3,5), 85)), "fcmpes", "o34"},
+ {(FORMAT3F(2, OP3_X(3,5), 86)), "fcmped", "o34"},
+ {(FORMAT3F(2, OP3_X(3,5), 87)), "fcmpeq", "o34"},
+
+ /* Move Floating-Point Register on Condition "FMOVcc" */
+ /* FIXME should check for single, double, and quad movements */
+ /* Integer Condition Codes */
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,8)), "fmova", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,0)), "fmovn", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,9)), "fmovne", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,1)), "fmove", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,10)), "fmovg", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,2)), "fmovle", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,11)), "fmovge", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,3)), "fmovl", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,12)), "fmovgu", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,4)), "fmovleu", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,13)), "fmovcc", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,5)), "fmovcs", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,14)), "fmovpos", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,6)), "fmovneg", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,15)), "fmovvc", "04e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,7)), "fmovvs", "04e"},
+
+ /* Floating-Point Condition Codes */
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,8)), "fmova", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,0)), "fmovn", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,7)), "fmovu", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,6)), "fmovg", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,5)), "fmovug", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,4)), "fmovk", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,3)), "fmovul", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,2)), "fmovlg", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,1)), "fmovne", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,9)), "fmove", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,10)), "fmovue", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,11)), "fmovge", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,12)), "fmovuge", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,13)), "fmovle", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,14)), "fmovule", "o4e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | COND2(0,15)), "fmovo", "o4e"},
+
+ /* Move F-P Register on Integer Register Condition "FMOVr" */
+ /* FIXME: check for short, double, and quad's */
+ {(FORMAT3(2, OP3_X(3,5), 0) | RCOND34(1)), "fmovre", "14e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | RCOND34(2)), "fmovrlez", "14e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | RCOND34(3)), "fmovrlz", "14e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | RCOND34(5)), "fmovrne", "14e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | RCOND34(6)), "fmovrgz", "14e"},
+ {(FORMAT3(2, OP3_X(3,5), 0) | RCOND34(7)), "fmovrgez", "14e"},
+#endif
+ /* FP logical insns -- UltraSPARC extens */
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,0))), "fzero", "e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,1))), "fzeros", "e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,14))), "fone", "e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,15))), "fones", "e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,4))), "fsrc1", "3e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,5))), "fsrc1s", "3e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,8))), "fsrc2", "4e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,9))), "fsrc2s", "4e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,10))), "fnot1", "3e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,11))), "fnot1s", "3e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,6))), "fnot2", "4e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,7))), "fnot2s", "4e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,12))), "for", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,13))), "fors", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,2))), "fnor", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,3))), "fnors", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,0))), "fand", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,1))), "fands", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,14))), "fnand", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,15))), "fnands", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,12))), "fxor", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,13))), "fxors", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,2))), "fxnor", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,3))), "fxnors", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,10))), "fornot1", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,11))), "fornot1s", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,6))), "fornot2", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(7,7))), "fornot2s", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,8))), "fandnot1", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,9))), "fandnot1s", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,4))), "fandnot2", "34e"},
+ {(FORMAT3F(2, OP3_X(3,6), OPF_X(6,5))), "fandnot2s", "34e"},
+
+ /* grrrr.... */
+ {0, 0, 0}
+
+};
+
+db_addr_t
+db_disasm(loc, altfmt)
+ db_addr_t loc;
+ boolean_t altfmt;
+{
+ struct sparc_insn* i_ptr = (struct sparc_insn *)&sparc_i;
+
+ unsigned int insn, you_lose, bitmask;
+ int matchp;
+ char* f_ptr, *cp;
+
+ you_lose = 0;
+ matchp = 0;
+ insn = db_get_value(loc, 4, 0);
+
+ if (insn == 0x01000000) {
+ db_printf("nop\n");
+ return loc + 4;
+ }
+
+ while (i_ptr->name) {
+ /* calculate YOU_LOSE value */
+ bitmask= (i_ptr->match);
+ you_lose = (~bitmask);
+
+ if (((bitmask>>30) & 0x3) == 0x1) {
+ /* Call */
+ you_lose = ((~0x1)<<30);
+ } else if (((bitmask>>30) & 0x3) == 0x0) {
+ if (((bitmask>>22) & 0x7) == 0x4) {
+ /* Sethi */
+ you_lose &= (FORMAT2(0x3,0x7));
+ } else {
+ /* Branches */
+ you_lose &= (FORMAT2(0x3,0x7)|COND(0xf));
+ }
+ } else if (((bitmask>>30) & 0x3) == 0x2 &&
+ ((bitmask>>19) & 0x3f) == 0x34) /* XXX */ {
+ /* FPop1 */
+ you_lose &= (FORMAT3(0x3,0x3f,0x1) | OPF(0x1ff));
+ } else if (((bitmask>>30) & 0x3) == 0x2 &&
+ ((bitmask>>19) & 0x3f) == 0x3a) /* XXX */ {
+ /* Tcc */
+ you_lose &= (FORMAT3(0x3,0x3f,0x1) | COND(0xf));
+ } else if (((bitmask>>30) & 0x3) == 0x2 &&
+ ((bitmask>>21) & 0xf) == 0x9 &&
+ ((bitmask>>19) & 0x3) != 0) /* XXX */ {
+ /* shifts */
+ you_lose &= (FORMAT3(0x3,0x3f,0x1))|X(1);
+ } else if (((bitmask>>30) & 0x3) == 0x2 &&
+ ((bitmask>>19) & 0x3f) == 0x2c) /* XXX */ {
+ /* cmov */
+ you_lose &= (FORMAT3(0x3,0x3f,0x1) | COND2(1,0xf));
+ } else if (((bitmask>>30) & 0x3) == 0x2 &&
+ ((bitmask>>19) & 0x3f) == 0x35) /* XXX */ {
+ /* fmov */
+ you_lose &= (FORMAT3(0x3,0x3f,0x1) | COND2(1,0xf));
+ } else {
+ you_lose &= (FORMAT3(0x3,0x3f,0x1));
+ }
+
+ if (((bitmask & insn) == bitmask) && ((you_lose & insn) == 0)) {
+ matchp = 1;
+ break;
+ }
+ i_ptr++;
+ };
+
+ if (!matchp) {
+ db_printf("undefined\n");
+ return loc + 4;
+ }
+
+ db_printf("%s", i_ptr->name);
+
+ f_ptr = i_ptr->format;
+
+ for (cp = f_ptr; *cp; cp++) {
+ if (*cp == ',') {
+ for (;f_ptr < cp; f_ptr++)
+ switch (*f_ptr) {
+ case 'a':
+ if (insn & A(1))
+ db_printf(",a");
+ break;
+ case 'p':
+ if (insn & P(1))
+ db_printf(",pt");
+ else
+ db_printf(",pn");
+ break;
+ }
+ f_ptr++;
+ break;
+ }
+ }
+ db_printf(" \t");
+
+ while (*f_ptr) {
+ switch (*f_ptr) {
+ int64_t val;
+ case '1':
+ db_printf("%%%s", regs[((insn >> 14) & 0x1f)]);
+ break;
+ case '2':
+ db_printf("%%%s", regs[(insn & 0x1f)]);
+ break;
+ case 'd':
+ db_printf("%%%s", regs[((insn >> 25) & 0x1f)]);
+ break;
+ case '3':
+ db_printf("%%f%d", ((insn >> 14) & 0x1f));
+ break;
+ case '4':
+ db_printf("%%f%d", (insn & 0x1f));
+ break;
+ case 'e':
+ db_printf("%%f%d", ((insn >> 25) & 0x1f));
+ break;
+ case 'i':
+ /* simm13 -- signed */
+ val = SIGNEX(insn, 13);
+ db_printf("%s0x%x", SIGN(val), (int)abs(val));
+ break;
+ case 'j':
+ /* simm11 -- signed */
+ val = SIGNEX(insn, 11);
+ db_printf("%s0x%x", SIGN(val), (int)abs(val));
+ break;
+ case 'l':
+ val = (((insn>>20)&0x3)<<13)|(insn & 0x1fff);
+ val = SIGNEX(val, 16);
+ db_printsym((db_addr_t)(loc + (4 * val)), DB_STGY_ANY);
+ break;
+ case 'm':
+ db_printsym((db_addr_t)(loc + (4 * SIGNEX(insn, 22))),
+ DB_STGY_ANY);
+ break;
+ case 'u':
+ db_printsym((db_addr_t)(loc + (4 * SIGNEX(insn, 19))),
+ DB_STGY_ANY);
+ break;
+ case 'n':
+ db_printsym((db_addr_t)(loc + (4 * SIGNEX(insn, 30))),
+ DB_STGY_PROC);
+ break;
+ case 's':
+ db_printf("%%asi");
+ break;
+ case 't':
+ db_printf("0x%-2.2x", ((insn >> 5) & 0xff));
+ break;
+ case 'o':
+ db_printf("%%fcc%d", ((insn >> 25) & 0x3));
+ break;
+ case 'p':
+ case '7':
+ db_printf("[%%%s + %%%s]",
+ regs[((insn >> 14) & 0x1f)],
+ regs[(insn & 0x1f)]);
+ if (*f_ptr == '7')
+ db_printf(" %d", ((insn >> 5) & 0xff));
+ break;
+ case 'q':
+ case '8':
+ val = SIGNEX(insn, 13);
+ db_printf("[%%%s %c 0x%x]",
+ regs[((insn >> 14) & 0x1f)],
+ (int)((val<0)?'-':'+'),
+ (int)abs(val));
+ if (*f_ptr == '8')
+ db_printf(" %%asi");
+ break;
+ case '5':
+ db_printf("%%fsr");
+ break;
+ case '6':
+ db_printf("%%fsr");
+ break;
+ case '9':
+ db_printf("0x%xl",
+ ((insn & 0xf) | ((insn >> 4) & 0x7)));
+ break;
+ case '0':
+ db_printf("%%%s", ccodes[((insn >> 11) & 0x3) + 4]);
+ break;
+ case '.':
+ db_printf("%%%s", ccodes[((insn >> 11) & 0x7)]);
+ break;
+ case 'r':
+ db_printf("#%s", prefetch[((insn >> 25) & 0x1f)]);
+ break;
+ case 'A':
+ db_printf("%%%s", priv_regs[((insn >> 14) & 0x1f)]);
+ break;
+ case 'B':
+ db_printf("%%%s", state_regs[((insn >> 14) & 0x1f)]);
+ break;
+ case 'C':
+ db_printf("%%hi(0x%x)", ((insn & 0x3fffff) << 10));
+ break;
+ case 'D':
+ db_printf("0x%x", (insn & 0x1f));
+ break;
+ case 'E':
+ db_printf("%d", (insn & 0x3f));
+ break;
+ case 'F':
+ db_printf("%d", (insn & 0x3f));
+ break;
+ case 'G':
+ db_printf("%%%s", priv_regs[((insn >> 25) & 0x1f)]);
+ break;
+ case 'H':
+ db_printf("%%%s", state_regs[((insn >> 25) & 0x1f)]);
+ break;
+#ifndef V9
+ case 'P':
+ db_printf("%%psr");
+ break;
+ case 'T':
+ db_printf("%%tbr");
+ break;
+ case 'W':
+ db_printf("%%wim");
+ break;
+ case 'Y':
+ db_printf("%%y");
+ break;
+#endif
+ default:
+ db_printf("(UNKNOWN)");
+ break;
+ }
+ if (*(++f_ptr))
+ db_printf(", ");
+ };
+
+ db_printf("\n");
+
+ return (loc + 4);
+}
+
diff --git a/sys/sparc64/sparc64/db_interface.c b/sys/sparc64/sparc64/db_interface.c
new file mode 100644
index 0000000..636c7f5
--- /dev/null
+++ b/sys/sparc64/sparc64/db_interface.c
@@ -0,0 +1,110 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/reboot.h>
+#include <sys/cons.h>
+#include <sys/ktr.h>
+#include <sys/linker_set.h>
+#include <sys/lock.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <ddb/ddb.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+
+#include <setjmp.h>
+
+static jmp_buf *db_nofault = 0;
+extern jmp_buf db_jmpbuf;
+
+int db_active;
+db_regs_t ddb_regs;
+
+static jmp_buf db_global_jmpbuf;
+static int db_global_jmpbuf_valid;
+
+int
+kdb_trap(struct trapframe *tf)
+{
+ struct kdbframe *kf;
+
+ if (db_global_jmpbuf_valid)
+ longjmp(db_global_jmpbuf, 1);
+ ddb_regs = *tf;
+ kf = ddb_regs.tf_arg;
+ kf->kf_cfp = kf->kf_fp;
+ setjmp(db_global_jmpbuf);
+ db_global_jmpbuf_valid = TRUE;
+ db_active++;
+ cndbctl(TRUE);
+ db_trap(tf->tf_type, 0);
+ cndbctl(FALSE);
+ db_active--;
+ db_global_jmpbuf_valid = FALSE;
+ return (1);
+}
+
+void
+db_read_bytes(vm_offset_t addr, size_t size, char *data)
+{
+ char *src;
+
+ db_nofault = &db_jmpbuf;
+ src = (char *)addr;
+ while (size-- > 0)
+ *data++ = *src++;
+ db_nofault = NULL;
+}
+
+void
+db_write_bytes(vm_offset_t addr, size_t size, char *data)
+{
+ char *dst;
+
+ db_nofault = &db_jmpbuf;
+ dst = (char *)addr;
+ while (size-- > 0)
+ *dst++ = *data++;
+ db_nofault = NULL;
+}
+
+DB_COMMAND(reboot, db_reboot)
+{
+ cpu_reset();
+}
diff --git a/sys/sparc64/sparc64/db_trace.c b/sys/sparc64/sparc64/db_trace.c
new file mode 100644
index 0000000..26e0b15
--- /dev/null
+++ b/sys/sparc64/sparc64/db_trace.c
@@ -0,0 +1,275 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/linker_set.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/cpu.h>
+#include <machine/trap.h>
+#include <machine/vmparam.h>
+
+#include <ddb/ddb.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_watch.h>
+
+#define INKERNEL(va) \
+ ((va) >= VM_MIN_KERNEL_ADDRESS && (va) <= VM_MAX_KERNEL_ADDRESS)
+
+static db_varfcn_t db_show_in0;
+static db_varfcn_t db_show_in1;
+static db_varfcn_t db_show_in2;
+static db_varfcn_t db_show_in3;
+static db_varfcn_t db_show_in4;
+static db_varfcn_t db_show_in5;
+static db_varfcn_t db_show_in6;
+static db_varfcn_t db_show_in7;
+static db_varfcn_t db_show_local0;
+static db_varfcn_t db_show_local1;
+static db_varfcn_t db_show_local2;
+static db_varfcn_t db_show_local3;
+static db_varfcn_t db_show_local4;
+static db_varfcn_t db_show_local5;
+static db_varfcn_t db_show_local6;
+static db_varfcn_t db_show_local7;
+
+static void db_print_trap(struct trapframe *);
+
+extern char tl1_trap[];
+
+struct db_variable db_regs[] = {
+ { "g0", &ddb_regs.tf_global[0], FCN_NULL },
+ { "g1", &ddb_regs.tf_global[1], FCN_NULL },
+ { "g2", &ddb_regs.tf_global[2], FCN_NULL },
+ { "g3", &ddb_regs.tf_global[3], FCN_NULL },
+ { "g4", &ddb_regs.tf_global[4], FCN_NULL },
+ { "g5", &ddb_regs.tf_global[5], FCN_NULL },
+ { "g6", &ddb_regs.tf_global[6], FCN_NULL },
+ { "g7", &ddb_regs.tf_global[7], FCN_NULL },
+ { "i0", NULL, db_show_in0 },
+ { "i1", NULL, db_show_in1 },
+ { "i2", NULL, db_show_in2 },
+ { "i3", NULL, db_show_in3 },
+ { "i4", NULL, db_show_in4 },
+ { "i5", NULL, db_show_in5 },
+ { "i6", NULL, db_show_in6 },
+ { "i7", NULL, db_show_in7 },
+ { "l0", NULL, db_show_local0 },
+ { "l1", NULL, db_show_local1 },
+ { "l2", NULL, db_show_local2 },
+ { "l3", NULL, db_show_local3 },
+ { "l4", NULL, db_show_local4 },
+ { "l5", NULL, db_show_local5 },
+ { "l6", NULL, db_show_local6 },
+ { "l7", NULL, db_show_local7 },
+ { "tstate", &ddb_regs.tf_tstate, FCN_NULL },
+ { "tpc", &ddb_regs.tf_tpc, FCN_NULL },
+ { "tnpc", &ddb_regs.tf_tnpc, FCN_NULL }
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+void
+db_stack_trace_cmd(db_expr_t addr, boolean_t have_addr, db_expr_t count,
+ char *modif)
+{
+ struct trapframe *tf;
+ struct kdbframe *kfp;
+ struct frame *fp;
+ const char *name;
+ c_db_sym_t sym;
+ db_expr_t offset;
+ db_expr_t value;
+ db_addr_t nfp;
+ db_addr_t npc;
+ db_addr_t pc;
+ int trap;
+
+ trap = 0;
+ npc = 0;
+ if (count == -1)
+ count = 1024;
+ if (!have_addr) {
+ kfp = DDB_REGS->tf_arg;
+ fp = (struct frame *)(kfp->kf_cfp + SPOFF);
+ } else
+ fp = (struct frame *)(addr + SPOFF);
+ while (count-- && INKERNEL((vm_offset_t)fp)) {
+ pc = (db_addr_t)db_get_value((db_addr_t)&fp->f_pc,
+ sizeof(db_addr_t), FALSE);
+ if (trap) {
+ pc = npc;
+ trap = 0;
+ }
+ sym = db_search_symbol(pc, DB_STGY_ANY, &offset);
+ db_symbol_values(sym, &name, &value);
+ if (name == NULL)
+ name = "(null)";
+ if (value == (u_long)tl1_trap) {
+ nfp = db_get_value((db_addr_t)&fp->f_fp,
+ sizeof(u_long), FALSE) + SPOFF;
+ tf = (struct trapframe *)(nfp + sizeof(*fp));
+ npc = db_get_value((db_addr_t)&tf->tf_tpc,
+ sizeof(u_long), FALSE);
+ db_print_trap(tf);
+ trap = 1;
+ } else {
+ db_printf("%s() at ", name);
+ db_printsym(pc, DB_STGY_PROC);
+ db_printf("\n");
+ }
+ fp = (struct frame *)(db_get_value((db_addr_t)&fp->f_fp,
+ sizeof(u_long), FALSE) + SPOFF);
+ }
+}
+
+static void
+db_print_trap(struct trapframe *tf)
+{
+ struct mmuframe *mf;
+ u_long type;
+ u_long va;
+
+ type = db_get_value((db_addr_t)&tf->tf_type, sizeof(u_long), FALSE);
+ db_printf("-- %s trap (%s) -- ", type & T_KERNEL ? "kernel" : "user",
+ trap_msg[type & ~T_KERNEL]);
+ switch (type & ~T_KERNEL) {
+ case T_ALIGN:
+ mf = (struct mmuframe *)db_get_value((db_addr_t)&tf->tf_arg,
+ sizeof(void *), FALSE);
+ va = (u_long)db_get_value((db_addr_t)&mf->mf_sfar,
+ sizeof(u_long), FALSE);
+ db_printf("va=%#lx", va);
+ break;
+ default:
+ break;
+ }
+ db_printf("\n");
+}
+
+DB_COMMAND(down, db_frame_down)
+{
+ struct kdbframe *kfp;
+ struct frame *fp;
+ u_long cfp;
+ u_long ofp;
+
+ kfp = DDB_REGS->tf_arg;
+ fp = (struct frame *)(kfp->kf_fp + SPOFF);
+ cfp = kfp->kf_cfp;
+ for (;;) {
+ if (!INKERNEL((u_long)fp)) {
+ db_printf("already at bottom\n");
+ break;
+ }
+ ofp = db_get_value((db_addr_t)&fp->f_fp, sizeof(u_long),
+ FALSE);
+ if (ofp == cfp) {
+ kfp->kf_cfp = (u_long)fp - SPOFF;
+ break;
+ }
+ fp = (struct frame *)(ofp + SPOFF);
+ }
+}
+
+DB_COMMAND(up, db_frame_up)
+{
+ struct kdbframe *kfp;
+ struct frame *cfp;
+
+ kfp = DDB_REGS->tf_arg;
+ cfp = (struct frame *)(kfp->kf_cfp + SPOFF);
+ if (!INKERNEL((u_long)cfp)) {
+ db_printf("already at top\n");
+ return;
+ }
+ kfp->kf_cfp = db_get_value((db_addr_t)&cfp->f_fp, sizeof(u_long),
+ FALSE);
+}
+
+#define DB_SHOW_REG(name, num) \
+static int \
+db_show_ ## name ## num(struct db_variable *dp, db_expr_t *vp, int op) \
+{ \
+ struct kdbframe *kfp; \
+ struct frame *fp; \
+ \
+ kfp = DDB_REGS->tf_arg; \
+ fp = (struct frame *)(kfp->kf_cfp + SPOFF); \
+ if (op == DB_VAR_GET) \
+ *vp = db_get_value((db_addr_t)&fp->f_ ## name ## [num], \
+ sizeof(u_long), FALSE); \
+ else \
+ db_put_value((db_addr_t)&fp->f_ ## name ## [num], \
+ sizeof(u_long), *vp); \
+ return (0); \
+}
+
+DB_SHOW_REG(in, 0)
+DB_SHOW_REG(in, 1)
+DB_SHOW_REG(in, 2)
+DB_SHOW_REG(in, 3)
+DB_SHOW_REG(in, 4)
+DB_SHOW_REG(in, 5)
+DB_SHOW_REG(in, 6)
+DB_SHOW_REG(in, 7)
+DB_SHOW_REG(local, 0)
+DB_SHOW_REG(local, 1)
+DB_SHOW_REG(local, 2)
+DB_SHOW_REG(local, 3)
+DB_SHOW_REG(local, 4)
+DB_SHOW_REG(local, 5)
+DB_SHOW_REG(local, 6)
+DB_SHOW_REG(local, 7)
+
+int
+db_md_set_watchpoint(db_expr_t addr, db_expr_t size)
+{
+ return (-1);
+}
+
+
+int
+db_md_clr_watchpoint(db_expr_t addr, db_expr_t size)
+{
+ return (-1);
+}
+
+
+void
+db_md_list_watchpoints(void)
+{
+ return;
+}
diff --git a/sys/sparc64/sparc64/exception.S b/sys/sparc64/sparc64/exception.S
new file mode 100644
index 0000000..9abc581
--- /dev/null
+++ b/sys/sparc64/sparc64/exception.S
@@ -0,0 +1,603 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_ddb.h"
+
+#include <machine/asi.h>
+#include <machine/asmacros.h>
+#include <machine/trap.h>
+
+#include "assym.s"
+
+#define SPILL(storer, base, asi) \
+ storer %l0, [base + F_L0] asi ; \
+ storer %l1, [base + F_L1] asi ; \
+ storer %l2, [base + F_L2] asi ; \
+ storer %l3, [base + F_L3] asi ; \
+ storer %l4, [base + F_L4] asi ; \
+ storer %l5, [base + F_L5] asi ; \
+ storer %l6, [base + F_L6] asi ; \
+ storer %l7, [base + F_L7] asi ; \
+ storer %i0, [base + F_I0] asi ; \
+ storer %i1, [base + F_I1] asi ; \
+ storer %i2, [base + F_I2] asi ; \
+ storer %i3, [base + F_I3] asi ; \
+ storer %i4, [base + F_I4] asi ; \
+ storer %i5, [base + F_I5] asi ; \
+ storer %i6, [base + F_I6] asi ; \
+ storer %i7, [base + F_I7] asi
+
+#define FILL(loader, base, asi) \
+ loader [base + F_L0] asi, %l0 ; \
+ loader [base + F_L1] asi, %l1 ; \
+ loader [base + F_L2] asi, %l2 ; \
+ loader [base + F_L3] asi, %l3 ; \
+ loader [base + F_L4] asi, %l4 ; \
+ loader [base + F_L5] asi, %l5 ; \
+ loader [base + F_L6] asi, %l6 ; \
+ loader [base + F_L7] asi, %l7 ; \
+ loader [base + F_I0] asi, %i0 ; \
+ loader [base + F_I1] asi, %i1 ; \
+ loader [base + F_I2] asi, %i2 ; \
+ loader [base + F_I3] asi, %i3 ; \
+ loader [base + F_I4] asi, %i4 ; \
+ loader [base + F_I5] asi, %i5 ; \
+ loader [base + F_I6] asi, %i6 ; \
+ loader [base + F_I7] asi, %i7
+
+DATA(intrnames)
+ .asciz "foo"
+DATA(eintrnames)
+
+DATA(intrcnt)
+ .long 0
+DATA(eintrcnt)
+
+ .macro clean_window
+ clr %o0
+ clr %o1
+ clr %o2
+ clr %o3
+ clr %o4
+ clr %o5
+ clr %o6
+ clr %o7
+ clr %l0
+ clr %l1
+ clr %l2
+ clr %l3
+ clr %l4
+ clr %l5
+ clr %l6
+ rdpr %cleanwin, %l7
+ inc %l7
+ wrpr %l7, 0, %cleanwin
+ clr %l7
+ retry
+ .align 128
+ .endm
+
+ .macro tl0_gen type
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov \type, %o0
+ .align 32
+ .endm
+
+ .macro tl0_wide type
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov \type, %o0
+ .align 128
+ .endm
+
+ .macro tl0_reserved count
+ .rept \count
+ tl0_gen T_RESERVED
+ .endr
+ .endm
+
+ .macro tl0_intr_level
+ tl0_reserved 15
+ .endm
+
+ .macro tl0_intr_vector
+ tl0_gen 0
+ .endm
+
+ .macro tl0_immu_miss
+ tl0_wide T_IMMU_MISS
+ .endm
+
+ .macro tl0_dmmu_miss
+ tl0_wide T_DMMU_MISS
+ .endm
+
+ .macro tl0_dmmu_prot
+ tl0_wide T_DMMU_PROT
+ .endm
+
+ .macro tl0_spill_0_n
+ wr %g0, ASI_AIUP, %asi
+ SPILL(stxa, %sp + SPOFF, %asi)
+ saved
+ retry
+ .align 128
+ .endm
+
+ .macro tl0_spill_bad count
+ .rept \count
+ tl0_wide T_SPILL
+ .endr
+ .endm
+
+ .macro tl0_fill_0_n
+ wr %g0, ASI_AIUP, %asi
+ FILL(ldxa, %sp + SPOFF, %asi)
+ restored
+ retry
+ .align 128
+ .endm
+
+ .macro tl0_fill_bad count
+ .rept \count
+ tl0_wide T_FILL
+ .endr
+ .endm
+
+ .macro tl0_soft count
+ tl0_reserved \count
+ .endm
+
+ .macro tl1_gen type
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov \type | T_KERNEL, %o0
+ .align 32
+ .endm
+
+ .macro tl1_wide type
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov \type | T_KERNEL, %o0
+ .align 128
+ .endm
+
+ .macro tl1_reserved count
+ .rept \count
+ tl1_gen T_RESERVED
+ .endr
+ .endm
+
+ .macro tl1_insn_excptn
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov T_INSN_EXCPTN | T_KERNEL, %o0
+ .align 32
+ .endm
+
+ .macro tl1_align
+ b %xcc, tl1_sfsr_trap
+ nop
+ .align 32
+ .endm
+
+ENTRY(tl1_sfsr_trap)
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_SFAR] %asi, %g1
+ ldxa [%g0 + AA_DMMU_SFSR] %asi, %g2
+ stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
+ membar #Sync
+ save %sp, -(CCFSZ + MF_SIZEOF), %sp
+ stx %g1, [%sp + SPOFF + CCFSZ + MF_SFAR]
+ stx %g2, [%sp + SPOFF + CCFSZ + MF_SFSR]
+ mov T_ALIGN | T_KERNEL, %o0
+ b %xcc, tl1_trap
+ add %sp, SPOFF + CCFSZ, %o1
+END(tl1_sfsr_trap)
+
+ .macro tl1_intr_level
+ tl1_reserved 15
+ .endm
+
+ .macro tl1_intr_vector
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IG | PSTATE_AG, %pstate
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov T_INTERRUPT | T_KERNEL, %o0
+ .align 8
+ .endm
+
+ .macro tl1_immu_miss
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov T_IMMU_MISS | T_KERNEL, %o0
+ .align 128
+ .endm
+
+ .macro tl1_dmmu_miss
+ /*
+ * Load the target tte tag, and extract the context. If the context
+ * is non-zero handle as user space access. In either case, load the
+ * tsb 8k pointer.
+ */
+ ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g1
+ srlx %g1, TT_CTX_SHIFT, %g2
+ brnz,pn %g2, 2f
+ ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g2
+
+ /*
+ * Convert the tte pointer to an stte pointer, and add extra bits to
+ * accomodate for large tsb.
+ */
+ sllx %g2, STTE_SHIFT - TTE_SHIFT, %g2
+#ifdef notyet
+ mov AA_DMMU_TAR, %g3
+ ldxa [%g3] ASI_DMMU, %g3
+ srlx %g3, TSB_1M_STTE_SHIFT, %g3
+ and %g3, TSB_KERNEL_MASK >> TSB_1M_STTE_SHIFT, %g3
+ sllx %g3, TSB_1M_STTE_SHIFT, %g3
+ add %g2, %g3, %g2
+#endif
+
+ /*
+ * Load the tte, check that it's valid and that the tags match.
+ */
+ ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+ brgez,pn %g5, 2f
+ cmp %g4, %g1
+ bne %xcc, 2f
+ EMPTY
+
+ /*
+ * Set the refence bit, if its currently clear.
+ */
+ andcc %g5, TD_REF, %g0
+ bnz %xcc, 1f
+ or %g5, TD_REF, %g1
+ stx %g1, [%g2 + ST_TTE + TTE_DATA]
+
+ /*
+ * If the mod bit is clear, clear the write bit too.
+ */
+1: andcc %g5, TD_MOD, %g1
+ movz %xcc, TD_W, %g1
+ andn %g5, %g1, %g5
+
+ /*
+ * Load the tte data into the TLB and retry the instruction.
+ */
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
+ retry
+
+ /*
+ * For now just bail. This might cause a red state exception,
+ * but oh well.
+ */
+2: DEBUGGER()
+ .align 128
+ .endm
+
+ .macro tl1_dmmu_prot
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov T_DMMU_PROT | T_KERNEL, %o0
+ .align 128
+ .endm
+
+ .macro tl1_spill_0_n
+ SPILL(stx, %sp + SPOFF, EMPTY)
+ saved
+ retry
+ .align 128
+ .endm
+
+ .macro tl1_spill_bad count
+ .rept \count
+ tl1_wide T_SPILL
+ .endr
+ .endm
+
+ .macro tl1_fill_0_n
+ FILL(ldx, %sp + SPOFF, EMPTY)
+ restored
+ retry
+ .align 128
+ .endm
+
+ .macro tl1_fill_bad count
+ .rept \count
+ tl1_wide T_FILL
+ .endr
+ .endm
+
+ .macro tl1_breakpoint
+ b %xcc, tl1_breakpoint_trap
+ nop
+ .align 32
+ .endm
+
+ENTRY(tl1_breakpoint_trap)
+ save %sp, -(CCFSZ + KF_SIZEOF), %sp
+ flushw
+ stx %fp, [%sp + SPOFF + CCFSZ + KF_FP]
+ mov T_BREAKPOINT | T_KERNEL, %o0
+ b %xcc, tl1_trap
+ add %sp, SPOFF + CCFSZ, %o1
+END(tl1_breakpoint_trap)
+
+ .macro tl1_soft count
+ tl1_reserved \count
+ .endm
+
+ .sect .trap
+ .align 0x8000
+ .globl tl0_base
+
+tl0_base:
+ tl0_reserved 1 ! 0x0 unused
+tl0_power_on:
+ tl0_gen T_POWER_ON ! 0x1 power on reset
+tl0_watchdog:
+ tl0_gen T_WATCHDOG ! 0x2 watchdog rest
+tl0_reset_ext:
+ tl0_gen T_RESET_EXT ! 0x3 externally initiated reset
+tl0_reset_soft:
+ tl0_gen T_RESET_SOFT ! 0x4 software initiated reset
+tl0_red_state:
+ tl0_gen T_RED_STATE ! 0x5 red state exception
+ tl0_reserved 2 ! 0x6-0x7 reserved
+tl0_insn_excptn:
+ tl0_gen T_INSN_EXCPTN ! 0x8 instruction access exception
+ tl0_reserved 1 ! 0x9 reserved
+tl0_insn_error:
+ tl0_gen T_INSN_ERROR ! 0xa instruction access error
+ tl0_reserved 5 ! 0xb-0xf reserved
+tl0_insn_illegal:
+ tl0_gen T_INSN_ILLEGAL ! 0x10 illegal instruction
+tl0_priv_opcode:
+ tl0_gen T_PRIV_OPCODE ! 0x11 privileged opcode
+ tl0_reserved 14 ! 0x12-0x1f reserved
+tl0_fp_disabled:
+ tl0_gen T_FP_DISABLED ! 0x20 floating point disabled
+tl0_fp_ieee:
+ tl0_gen T_FP_IEEE ! 0x21 floating point exception ieee
+tl0_fp_other:
+ tl0_gen T_FP_OTHER ! 0x22 floating point exception other
+tl0_tag_ovflw:
+ tl0_gen T_TAG_OVFLW ! 0x23 tag overflow
+tl0_clean_window:
+ clean_window ! 0x24 clean window
+tl0_divide:
+ tl0_gen T_DIVIDE ! 0x28 division by zero
+ tl0_reserved 7 ! 0x29-0x2f reserved
+tl0_data_excptn:
+ tl0_gen T_DATA_EXCPTN ! 0x30 data access exception
+ tl0_reserved 1 ! 0x31 reserved
+tl0_data_error:
+ tl0_gen T_DATA_ERROR ! 0x32 data access error
+ tl0_reserved 1 ! 0x33 reserved
+tl0_align:
+ tl0_gen T_ALIGN ! 0x34 memory address not aligned
+tl0_align_lddf:
+ tl0_gen T_ALIGN_LDDF ! 0x35 lddf memory address not aligned
+tl0_align_stdf:
+ tl0_gen T_ALIGN_STDF ! 0x36 stdf memory address not aligned
+tl0_priv_action:
+ tl0_gen T_PRIV_ACTION ! 0x37 privileged action
+ tl0_reserved 9 ! 0x38-0x40 reserved
+tl0_intr_level:
+ tl0_intr_level ! 0x41-0x4f interrupt level 1 to 15
+ tl0_reserved 16 ! 0x50-0x5f reserved
+tl0_intr_vector:
+ tl0_intr_vector ! 0x60 interrupt vector
+tl0_watch_phys:
+ tl0_gen T_WATCH_PHYS ! 0x61 physical address watchpoint
+tl0_watch_virt:
+ tl0_gen T_WATCH_VIRT ! 0x62 virtual address watchpoint
+tl0_ecc:
+ tl0_gen T_ECC ! 0x63 corrected ecc error
+tl0_immu_miss:
+ tl0_immu_miss ! 0x64 fast instruction access mmu miss
+tl0_dmmu_miss:
+ tl0_dmmu_miss ! 0x68 fast data access mmu miss
+tl0_dmmu_prot:
+ tl0_dmmu_prot ! 0x6c fast data access protection
+ tl0_reserved 16 ! 0x70-0x7f reserved
+tl0_spill_0_n:
+ tl0_spill_0_n ! 0x80 spill 0 normal
+tl0_spill_bad:
+ tl0_spill_bad 15 ! 0x84-0xbf spill normal, other
+tl0_fill_0_n:
+ tl0_fill_0_n ! 0xc0 fill 0 normal
+tl0_fill_bad:
+ tl0_fill_bad 15 ! 0xc4-0xff fill normal, other
+tl0_sun_syscall:
+ tl0_reserved 1 ! 0x100 sun system call
+tl0_breakpoint:
+ tl0_gen T_BREAKPOINT ! 0x101 breakpoint
+ tl0_soft 126 ! 0x102-0x17f trap instruction
+ tl0_reserved 128 ! 0x180-0x1ff reserved
+
+tl1_base:
+ tl1_reserved 1 ! 0x200 unused
+tl1_power_on:
+ tl1_gen T_POWER_ON ! 0x201 power on reset
+tl1_watchdog:
+ tl1_gen T_WATCHDOG ! 0x202 watchdog rest
+tl1_reset_ext:
+ tl1_gen T_RESET_EXT ! 0x203 externally initiated reset
+tl1_reset_soft:
+ tl1_gen T_RESET_SOFT ! 0x204 software initiated reset
+tl1_red_state:
+ tl1_gen T_RED_STATE ! 0x205 red state exception
+ tl1_reserved 2 ! 0x206-0x207 reserved
+tl1_insn_excptn:
+ tl1_insn_excptn ! 0x208 instruction access exception
+ tl1_reserved 1 ! 0x209 reserved
+tl1_insn_error:
+ tl1_gen T_INSN_ERROR ! 0x20a instruction access error
+ tl1_reserved 5 ! 0x20b-0x20f reserved
+tl1_insn_illegal:
+ tl1_gen T_INSN_ILLEGAL ! 0x210 illegal instruction
+tl1_priv_opcode:
+ tl1_gen T_PRIV_OPCODE ! 0x211 privileged opcode
+ tl1_reserved 14 ! 0x212-0x21f reserved
+tl1_fp_disabled:
+ tl1_gen T_FP_DISABLED ! 0x220 floating point disabled
+tl1_fp_ieee:
+ tl1_gen T_FP_IEEE ! 0x221 floating point exception ieee
+tl1_fp_other:
+ tl1_gen T_FP_OTHER ! 0x222 floating point exception other
+tl1_tag_ovflw:
+ tl1_gen T_TAG_OVFLW ! 0x223 tag overflow
+tl1_clean_window:
+ clean_window ! 0x224 clean window
+tl1_divide:
+ tl1_gen T_DIVIDE ! 0x228 division by zero
+ tl1_reserved 7 ! 0x229-0x22f reserved
+tl1_data_excptn:
+ tl1_gen T_DATA_EXCPTN ! 0x230 data access exception
+ tl1_reserved 1 ! 0x231 reserved
+tl1_data_error:
+ tl1_gen T_DATA_ERROR ! 0x232 data access error
+ tl1_reserved 1 ! 0x233 reserved
+tl1_align:
+ tl1_align ! 0x234 memory address not aligned
+tl1_align_lddf:
+ tl1_gen T_ALIGN_LDDF ! 0x235 lddf memory address not aligned
+tl1_align_stdf:
+ tl1_gen T_ALIGN_STDF ! 0x236 stdf memory address not aligned
+tl1_priv_action:
+ tl1_gen T_PRIV_ACTION ! 0x237 privileged action
+ tl1_reserved 9 ! 0x238-0x240 reserved
+tl1_intr_level:
+ tl1_intr_level ! 0x241-0x24f interrupt level 1 to 15
+ tl1_reserved 16 ! 0x250-0x25f reserved
+tl1_intr_vector:
+ tl1_intr_vector ! 0x260 interrupt vector
+tl1_watch_phys:
+ tl1_gen T_WATCH_PHYS ! 0x261 physical address watchpoint
+tl1_watch_virt:
+ tl1_gen T_WATCH_VIRT ! 0x262 virtual address watchpoint
+tl1_ecc:
+ tl1_gen T_ECC ! 0x263 corrected ecc error
+tl1_immu_miss:
+ tl1_immu_miss ! 0x264 fast instruction access mmu miss
+tl1_dmmu_miss:
+ tl1_dmmu_miss ! 0x268 fast data access mmu miss
+tl1_dmmu_prot:
+ tl1_dmmu_prot ! 0x26c fast data access protection
+ tl1_reserved 16 ! 0x270-0x27f reserved
+tl1_spill_0_n:
+ tl1_spill_0_n ! 0x280 spill 0 normal
+tl1_spill_bad:
+ tl1_spill_bad 15 ! 0x284-0x2bf spill normal, other
+tl1_fill_0_n:
+ tl1_fill_0_n ! 0x2c0 fill 0 normal
+tl1_fill_bad:
+ tl1_fill_bad 15 ! 0x2c4-0x2ff fill normal, other
+ tl1_reserved 1 ! 0x300 trap instruction
+tl1_breakpoint:
+ tl1_breakpoint ! 0x301 breakpoint
+ tl1_soft 126 ! 0x302-0x37f trap instruction
+ tl1_reserved 128 ! 0x380-0x3ff reserved
+
+ENTRY(tl0_trap)
+ illtrap
+END(tl0_trap)
+
+/*
+ * void tl1_trap(u_long o0, u_long o1, u_long o2, u_long type)
+ */
+ENTRY(tl1_trap)
+ sub %sp, TF_SIZEOF, %sp
+ rdpr %tstate, %l0
+ stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
+ rdpr %tpc, %l1
+ stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
+ rdpr %tnpc, %l2
+ stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
+
+ wrpr %g0, 1, %tl
+ rdpr %pstate, %l7
+ wrpr %l7, PSTATE_AG | PSTATE_IE, %pstate
+
+ stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
+ stx %o1, [%sp + SPOFF + CCFSZ + TF_ARG]
+
+ stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
+ stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
+ stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
+ stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
+ stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
+ stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
+ stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
+
+ call trap
+ add %sp, CCFSZ + SPOFF, %o0
+
+ ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
+ ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
+ ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
+ ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
+ ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
+ ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
+ ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
+
+ ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
+ ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l1
+ ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
+
+ rdpr %pstate, %o0
+ wrpr %o0, PSTATE_AG | PSTATE_IE, %pstate
+
+ wrpr %g0, 2, %tl
+ wrpr %l0, 0, %tstate
+ wrpr %l1, 0, %tpc
+ wrpr %l2, 0, %tnpc
+
+ restore
+ retry
+END(tl1_trap)
+
+ENTRY(fork_trampoline)
+ mov %l0, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call fork_exit
+ nop
+ DEBUGGER()
+END(fork_trampoline)
diff --git a/sys/sparc64/sparc64/exception.s b/sys/sparc64/sparc64/exception.s
new file mode 100644
index 0000000..9abc581
--- /dev/null
+++ b/sys/sparc64/sparc64/exception.s
@@ -0,0 +1,603 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_ddb.h"
+
+#include <machine/asi.h>
+#include <machine/asmacros.h>
+#include <machine/trap.h>
+
+#include "assym.s"
+
+#define SPILL(storer, base, asi) \
+ storer %l0, [base + F_L0] asi ; \
+ storer %l1, [base + F_L1] asi ; \
+ storer %l2, [base + F_L2] asi ; \
+ storer %l3, [base + F_L3] asi ; \
+ storer %l4, [base + F_L4] asi ; \
+ storer %l5, [base + F_L5] asi ; \
+ storer %l6, [base + F_L6] asi ; \
+ storer %l7, [base + F_L7] asi ; \
+ storer %i0, [base + F_I0] asi ; \
+ storer %i1, [base + F_I1] asi ; \
+ storer %i2, [base + F_I2] asi ; \
+ storer %i3, [base + F_I3] asi ; \
+ storer %i4, [base + F_I4] asi ; \
+ storer %i5, [base + F_I5] asi ; \
+ storer %i6, [base + F_I6] asi ; \
+ storer %i7, [base + F_I7] asi
+
+#define FILL(loader, base, asi) \
+ loader [base + F_L0] asi, %l0 ; \
+ loader [base + F_L1] asi, %l1 ; \
+ loader [base + F_L2] asi, %l2 ; \
+ loader [base + F_L3] asi, %l3 ; \
+ loader [base + F_L4] asi, %l4 ; \
+ loader [base + F_L5] asi, %l5 ; \
+ loader [base + F_L6] asi, %l6 ; \
+ loader [base + F_L7] asi, %l7 ; \
+ loader [base + F_I0] asi, %i0 ; \
+ loader [base + F_I1] asi, %i1 ; \
+ loader [base + F_I2] asi, %i2 ; \
+ loader [base + F_I3] asi, %i3 ; \
+ loader [base + F_I4] asi, %i4 ; \
+ loader [base + F_I5] asi, %i5 ; \
+ loader [base + F_I6] asi, %i6 ; \
+ loader [base + F_I7] asi, %i7
+
+DATA(intrnames)
+ .asciz "foo"
+DATA(eintrnames)
+
+DATA(intrcnt)
+ .long 0
+DATA(eintrcnt)
+
+ .macro clean_window
+ clr %o0
+ clr %o1
+ clr %o2
+ clr %o3
+ clr %o4
+ clr %o5
+ clr %o6
+ clr %o7
+ clr %l0
+ clr %l1
+ clr %l2
+ clr %l3
+ clr %l4
+ clr %l5
+ clr %l6
+ rdpr %cleanwin, %l7
+ inc %l7
+ wrpr %l7, 0, %cleanwin
+ clr %l7
+ retry
+ .align 128
+ .endm
+
+ .macro tl0_gen type
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov \type, %o0
+ .align 32
+ .endm
+
+ .macro tl0_wide type
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov \type, %o0
+ .align 128
+ .endm
+
+ .macro tl0_reserved count
+ .rept \count
+ tl0_gen T_RESERVED
+ .endr
+ .endm
+
+ .macro tl0_intr_level
+ tl0_reserved 15
+ .endm
+
+ .macro tl0_intr_vector
+ tl0_gen 0
+ .endm
+
+ .macro tl0_immu_miss
+ tl0_wide T_IMMU_MISS
+ .endm
+
+ .macro tl0_dmmu_miss
+ tl0_wide T_DMMU_MISS
+ .endm
+
+ .macro tl0_dmmu_prot
+ tl0_wide T_DMMU_PROT
+ .endm
+
+ .macro tl0_spill_0_n
+ wr %g0, ASI_AIUP, %asi
+ SPILL(stxa, %sp + SPOFF, %asi)
+ saved
+ retry
+ .align 128
+ .endm
+
+ .macro tl0_spill_bad count
+ .rept \count
+ tl0_wide T_SPILL
+ .endr
+ .endm
+
+ .macro tl0_fill_0_n
+ wr %g0, ASI_AIUP, %asi
+ FILL(ldxa, %sp + SPOFF, %asi)
+ restored
+ retry
+ .align 128
+ .endm
+
+ .macro tl0_fill_bad count
+ .rept \count
+ tl0_wide T_FILL
+ .endr
+ .endm
+
+ .macro tl0_soft count
+ tl0_reserved \count
+ .endm
+
+ .macro tl1_gen type
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov \type | T_KERNEL, %o0
+ .align 32
+ .endm
+
+ .macro tl1_wide type
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov \type | T_KERNEL, %o0
+ .align 128
+ .endm
+
+ .macro tl1_reserved count
+ .rept \count
+ tl1_gen T_RESERVED
+ .endr
+ .endm
+
+ .macro tl1_insn_excptn
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov T_INSN_EXCPTN | T_KERNEL, %o0
+ .align 32
+ .endm
+
+ .macro tl1_align
+ b %xcc, tl1_sfsr_trap
+ nop
+ .align 32
+ .endm
+
+ENTRY(tl1_sfsr_trap)
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_SFAR] %asi, %g1
+ ldxa [%g0 + AA_DMMU_SFSR] %asi, %g2
+ stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
+ membar #Sync
+ save %sp, -(CCFSZ + MF_SIZEOF), %sp
+ stx %g1, [%sp + SPOFF + CCFSZ + MF_SFAR]
+ stx %g2, [%sp + SPOFF + CCFSZ + MF_SFSR]
+ mov T_ALIGN | T_KERNEL, %o0
+ b %xcc, tl1_trap
+ add %sp, SPOFF + CCFSZ, %o1
+END(tl1_sfsr_trap)
+
+ .macro tl1_intr_level
+ tl1_reserved 15
+ .endm
+
+ .macro tl1_intr_vector
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IG | PSTATE_AG, %pstate
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov T_INTERRUPT | T_KERNEL, %o0
+ .align 8
+ .endm
+
+ .macro tl1_immu_miss
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov T_IMMU_MISS | T_KERNEL, %o0
+ .align 128
+ .endm
+
+ .macro tl1_dmmu_miss
+ /*
+ * Load the target tte tag, and extract the context. If the context
+ * is non-zero handle as user space access. In either case, load the
+ * tsb 8k pointer.
+ */
+ ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g1
+ srlx %g1, TT_CTX_SHIFT, %g2
+ brnz,pn %g2, 2f
+ ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g2
+
+ /*
+ * Convert the tte pointer to an stte pointer, and add extra bits to
+ * accomodate for large tsb.
+ */
+ sllx %g2, STTE_SHIFT - TTE_SHIFT, %g2
+#ifdef notyet
+ mov AA_DMMU_TAR, %g3
+ ldxa [%g3] ASI_DMMU, %g3
+ srlx %g3, TSB_1M_STTE_SHIFT, %g3
+ and %g3, TSB_KERNEL_MASK >> TSB_1M_STTE_SHIFT, %g3
+ sllx %g3, TSB_1M_STTE_SHIFT, %g3
+ add %g2, %g3, %g2
+#endif
+
+ /*
+ * Load the tte, check that it's valid and that the tags match.
+ */
+ ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+ brgez,pn %g5, 2f
+ cmp %g4, %g1
+ bne %xcc, 2f
+ EMPTY
+
+ /*
+ * Set the refence bit, if its currently clear.
+ */
+ andcc %g5, TD_REF, %g0
+ bnz %xcc, 1f
+ or %g5, TD_REF, %g1
+ stx %g1, [%g2 + ST_TTE + TTE_DATA]
+
+ /*
+ * If the mod bit is clear, clear the write bit too.
+ */
+1: andcc %g5, TD_MOD, %g1
+ movz %xcc, TD_W, %g1
+ andn %g5, %g1, %g5
+
+ /*
+ * Load the tte data into the TLB and retry the instruction.
+ */
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
+ retry
+
+ /*
+ * For now just bail. This might cause a red state exception,
+ * but oh well.
+ */
+2: DEBUGGER()
+ .align 128
+ .endm
+
+ .macro tl1_dmmu_prot
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
+ save %sp, -CCFSZ, %sp
+ b %xcc, tl1_trap
+ mov T_DMMU_PROT | T_KERNEL, %o0
+ .align 128
+ .endm
+
+ .macro tl1_spill_0_n
+ SPILL(stx, %sp + SPOFF, EMPTY)
+ saved
+ retry
+ .align 128
+ .endm
+
+ .macro tl1_spill_bad count
+ .rept \count
+ tl1_wide T_SPILL
+ .endr
+ .endm
+
+ .macro tl1_fill_0_n
+ FILL(ldx, %sp + SPOFF, EMPTY)
+ restored
+ retry
+ .align 128
+ .endm
+
+ .macro tl1_fill_bad count
+ .rept \count
+ tl1_wide T_FILL
+ .endr
+ .endm
+
+ .macro tl1_breakpoint
+ b %xcc, tl1_breakpoint_trap
+ nop
+ .align 32
+ .endm
+
+ENTRY(tl1_breakpoint_trap)
+ save %sp, -(CCFSZ + KF_SIZEOF), %sp
+ flushw
+ stx %fp, [%sp + SPOFF + CCFSZ + KF_FP]
+ mov T_BREAKPOINT | T_KERNEL, %o0
+ b %xcc, tl1_trap
+ add %sp, SPOFF + CCFSZ, %o1
+END(tl1_breakpoint_trap)
+
+ .macro tl1_soft count
+ tl1_reserved \count
+ .endm
+
+ .sect .trap
+ .align 0x8000
+ .globl tl0_base
+
+tl0_base:
+ tl0_reserved 1 ! 0x0 unused
+tl0_power_on:
+ tl0_gen T_POWER_ON ! 0x1 power on reset
+tl0_watchdog:
+ tl0_gen T_WATCHDOG ! 0x2 watchdog rest
+tl0_reset_ext:
+ tl0_gen T_RESET_EXT ! 0x3 externally initiated reset
+tl0_reset_soft:
+ tl0_gen T_RESET_SOFT ! 0x4 software initiated reset
+tl0_red_state:
+ tl0_gen T_RED_STATE ! 0x5 red state exception
+ tl0_reserved 2 ! 0x6-0x7 reserved
+tl0_insn_excptn:
+ tl0_gen T_INSN_EXCPTN ! 0x8 instruction access exception
+ tl0_reserved 1 ! 0x9 reserved
+tl0_insn_error:
+ tl0_gen T_INSN_ERROR ! 0xa instruction access error
+ tl0_reserved 5 ! 0xb-0xf reserved
+tl0_insn_illegal:
+ tl0_gen T_INSN_ILLEGAL ! 0x10 illegal instruction
+tl0_priv_opcode:
+ tl0_gen T_PRIV_OPCODE ! 0x11 privileged opcode
+ tl0_reserved 14 ! 0x12-0x1f reserved
+tl0_fp_disabled:
+ tl0_gen T_FP_DISABLED ! 0x20 floating point disabled
+tl0_fp_ieee:
+ tl0_gen T_FP_IEEE ! 0x21 floating point exception ieee
+tl0_fp_other:
+ tl0_gen T_FP_OTHER ! 0x22 floating point exception other
+tl0_tag_ovflw:
+ tl0_gen T_TAG_OVFLW ! 0x23 tag overflow
+tl0_clean_window:
+ clean_window ! 0x24 clean window
+tl0_divide:
+ tl0_gen T_DIVIDE ! 0x28 division by zero
+ tl0_reserved 7 ! 0x29-0x2f reserved
+tl0_data_excptn:
+ tl0_gen T_DATA_EXCPTN ! 0x30 data access exception
+ tl0_reserved 1 ! 0x31 reserved
+tl0_data_error:
+ tl0_gen T_DATA_ERROR ! 0x32 data access error
+ tl0_reserved 1 ! 0x33 reserved
+tl0_align:
+ tl0_gen T_ALIGN ! 0x34 memory address not aligned
+tl0_align_lddf:
+ tl0_gen T_ALIGN_LDDF ! 0x35 lddf memory address not aligned
+tl0_align_stdf:
+ tl0_gen T_ALIGN_STDF ! 0x36 stdf memory address not aligned
+tl0_priv_action:
+ tl0_gen T_PRIV_ACTION ! 0x37 privileged action
+ tl0_reserved 9 ! 0x38-0x40 reserved
+tl0_intr_level:
+ tl0_intr_level ! 0x41-0x4f interrupt level 1 to 15
+ tl0_reserved 16 ! 0x50-0x5f reserved
+tl0_intr_vector:
+ tl0_intr_vector ! 0x60 interrupt vector
+tl0_watch_phys:
+ tl0_gen T_WATCH_PHYS ! 0x61 physical address watchpoint
+tl0_watch_virt:
+ tl0_gen T_WATCH_VIRT ! 0x62 virtual address watchpoint
+tl0_ecc:
+ tl0_gen T_ECC ! 0x63 corrected ecc error
+tl0_immu_miss:
+ tl0_immu_miss ! 0x64 fast instruction access mmu miss
+tl0_dmmu_miss:
+ tl0_dmmu_miss ! 0x68 fast data access mmu miss
+tl0_dmmu_prot:
+ tl0_dmmu_prot ! 0x6c fast data access protection
+ tl0_reserved 16 ! 0x70-0x7f reserved
+tl0_spill_0_n:
+ tl0_spill_0_n ! 0x80 spill 0 normal
+tl0_spill_bad:
+ tl0_spill_bad 15 ! 0x84-0xbf spill normal, other
+tl0_fill_0_n:
+ tl0_fill_0_n ! 0xc0 fill 0 normal
+tl0_fill_bad:
+ tl0_fill_bad 15 ! 0xc4-0xff fill normal, other
+tl0_sun_syscall:
+ tl0_reserved 1 ! 0x100 sun system call
+tl0_breakpoint:
+ tl0_gen T_BREAKPOINT ! 0x101 breakpoint
+ tl0_soft 126 ! 0x102-0x17f trap instruction
+ tl0_reserved 128 ! 0x180-0x1ff reserved
+
+tl1_base:
+ tl1_reserved 1 ! 0x200 unused
+tl1_power_on:
+ tl1_gen T_POWER_ON ! 0x201 power on reset
+tl1_watchdog:
+ tl1_gen T_WATCHDOG ! 0x202 watchdog rest
+tl1_reset_ext:
+ tl1_gen T_RESET_EXT ! 0x203 externally initiated reset
+tl1_reset_soft:
+ tl1_gen T_RESET_SOFT ! 0x204 software initiated reset
+tl1_red_state:
+ tl1_gen T_RED_STATE ! 0x205 red state exception
+ tl1_reserved 2 ! 0x206-0x207 reserved
+tl1_insn_excptn:
+ tl1_insn_excptn ! 0x208 instruction access exception
+ tl1_reserved 1 ! 0x209 reserved
+tl1_insn_error:
+ tl1_gen T_INSN_ERROR ! 0x20a instruction access error
+ tl1_reserved 5 ! 0x20b-0x20f reserved
+tl1_insn_illegal:
+ tl1_gen T_INSN_ILLEGAL ! 0x210 illegal instruction
+tl1_priv_opcode:
+ tl1_gen T_PRIV_OPCODE ! 0x211 privileged opcode
+ tl1_reserved 14 ! 0x212-0x21f reserved
+tl1_fp_disabled:
+ tl1_gen T_FP_DISABLED ! 0x220 floating point disabled
+tl1_fp_ieee:
+ tl1_gen T_FP_IEEE ! 0x221 floating point exception ieee
+tl1_fp_other:
+ tl1_gen T_FP_OTHER ! 0x222 floating point exception other
+tl1_tag_ovflw:
+ tl1_gen T_TAG_OVFLW ! 0x223 tag overflow
+tl1_clean_window:
+ clean_window ! 0x224 clean window
+tl1_divide:
+ tl1_gen T_DIVIDE ! 0x228 division by zero
+ tl1_reserved 7 ! 0x229-0x22f reserved
+tl1_data_excptn:
+ tl1_gen T_DATA_EXCPTN ! 0x230 data access exception
+ tl1_reserved 1 ! 0x231 reserved
+tl1_data_error:
+ tl1_gen T_DATA_ERROR ! 0x232 data access error
+ tl1_reserved 1 ! 0x233 reserved
+tl1_align:
+ tl1_align ! 0x234 memory address not aligned
+tl1_align_lddf:
+ tl1_gen T_ALIGN_LDDF ! 0x235 lddf memory address not aligned
+tl1_align_stdf:
+ tl1_gen T_ALIGN_STDF ! 0x236 stdf memory address not aligned
+tl1_priv_action:
+ tl1_gen T_PRIV_ACTION ! 0x237 privileged action
+ tl1_reserved 9 ! 0x238-0x240 reserved
+tl1_intr_level:
+ tl1_intr_level ! 0x241-0x24f interrupt level 1 to 15
+ tl1_reserved 16 ! 0x250-0x25f reserved
+tl1_intr_vector:
+ tl1_intr_vector ! 0x260 interrupt vector
+tl1_watch_phys:
+ tl1_gen T_WATCH_PHYS ! 0x261 physical address watchpoint
+tl1_watch_virt:
+ tl1_gen T_WATCH_VIRT ! 0x262 virtual address watchpoint
+tl1_ecc:
+ tl1_gen T_ECC ! 0x263 corrected ecc error
+tl1_immu_miss:
+ tl1_immu_miss ! 0x264 fast instruction access mmu miss
+tl1_dmmu_miss:
+ tl1_dmmu_miss ! 0x268 fast data access mmu miss
+tl1_dmmu_prot:
+ tl1_dmmu_prot ! 0x26c fast data access protection
+ tl1_reserved 16 ! 0x270-0x27f reserved
+tl1_spill_0_n:
+ tl1_spill_0_n ! 0x280 spill 0 normal
+tl1_spill_bad:
+ tl1_spill_bad 15 ! 0x284-0x2bf spill normal, other
+tl1_fill_0_n:
+ tl1_fill_0_n ! 0x2c0 fill 0 normal
+tl1_fill_bad:
+ tl1_fill_bad 15 ! 0x2c4-0x2ff fill normal, other
+ tl1_reserved 1 ! 0x300 trap instruction
+tl1_breakpoint:
+ tl1_breakpoint ! 0x301 breakpoint
+ tl1_soft 126 ! 0x302-0x37f trap instruction
+ tl1_reserved 128 ! 0x380-0x3ff reserved
+
+ENTRY(tl0_trap)
+ illtrap
+END(tl0_trap)
+
+/*
+ * void tl1_trap(u_long o0, u_long o1, u_long o2, u_long type)
+ */
+ENTRY(tl1_trap)
+ sub %sp, TF_SIZEOF, %sp
+ rdpr %tstate, %l0
+ stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
+ rdpr %tpc, %l1
+ stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
+ rdpr %tnpc, %l2
+ stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
+
+ wrpr %g0, 1, %tl
+ rdpr %pstate, %l7
+ wrpr %l7, PSTATE_AG | PSTATE_IE, %pstate
+
+ stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
+ stx %o1, [%sp + SPOFF + CCFSZ + TF_ARG]
+
+ stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
+ stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
+ stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
+ stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
+ stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
+ stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
+ stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
+
+ call trap
+ add %sp, CCFSZ + SPOFF, %o0
+
+ ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
+ ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
+ ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
+ ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
+ ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
+ ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
+ ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
+
+ ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
+ ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l1
+ ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
+
+ rdpr %pstate, %o0
+ wrpr %o0, PSTATE_AG | PSTATE_IE, %pstate
+
+ wrpr %g0, 2, %tl
+ wrpr %l0, 0, %tstate
+ wrpr %l1, 0, %tpc
+ wrpr %l2, 0, %tnpc
+
+ restore
+ retry
+END(tl1_trap)
+
+ENTRY(fork_trampoline)
+ mov %l0, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call fork_exit
+ nop
+ DEBUGGER()
+END(fork_trampoline)
diff --git a/sys/sparc64/sparc64/genassym.c b/sys/sparc64/sparc64/genassym.c
new file mode 100644
index 0000000..ba956f8
--- /dev/null
+++ b/sys/sparc64/sparc64/genassym.c
@@ -0,0 +1,163 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/assym.h>
+#include <sys/errno.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/user.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/asi.h>
+#include <machine/vmparam.h>
+#include <machine/cpufunc.h>
+#include <machine/frame.h>
+#include <machine/globals.h>
+#include <machine/pcb.h>
+#include <machine/pstate.h>
+#include <machine/setjmp.h>
+#include <machine/pv.h>
+#include <machine/tte.h>
+#include <machine/tlb.h>
+#include <machine/tsb.h>
+
+ASSYM(EFAULT, EFAULT);
+ASSYM(ENAMETOOLONG, ENAMETOOLONG);
+
+ASSYM(UPAGES, UPAGES);
+ASSYM(PAGE_SIZE, PAGE_SIZE);
+
+ASSYM(PSTATE_AG, PSTATE_AG);
+ASSYM(PSTATE_IE, PSTATE_IE);
+ASSYM(PSTATE_PRIV, PSTATE_PRIV);
+ASSYM(PSTATE_PEF, PSTATE_PEF);
+ASSYM(PSTATE_MG, PSTATE_MG);
+ASSYM(PSTATE_IG, PSTATE_IG);
+
+ASSYM(TTE_SHIFT, TTE_SHIFT);
+ASSYM(STTE_SHIFT, STTE_SHIFT);
+ASSYM(TSB_PRIMARY_BUCKET_SHIFT, TSB_PRIMARY_BUCKET_SHIFT);
+ASSYM(TSB_KERNEL_MIN_ADDRESS, TSB_KERNEL_MIN_ADDRESS);
+ASSYM(TSB_MASK_WIDTH, TSB_MASK_WIDTH);
+ASSYM(TSB_SECONDARY_BUCKET_SHIFT, TSB_SECONDARY_BUCKET_SHIFT);
+ASSYM(TSB_BUCKET_SPREAD_SHIFT, TSB_BUCKET_SPREAD_SHIFT);
+ASSYM(TSB_SECONDARY_STTE_MASK, TSB_SECONDARY_STTE_MASK);
+ASSYM(TSB_SECONDARY_STTE_SHIFT, TSB_SECONDARY_STTE_SHIFT);
+ASSYM(TSB_LEVEL1_BUCKET_MASK, TSB_LEVEL1_BUCKET_MASK);
+ASSYM(TSB_LEVEL1_BUCKET_SHIFT, TSB_LEVEL1_BUCKET_SHIFT);
+ASSYM(TSB_1M_STTE_SHIFT, TSB_1M_STTE_SHIFT);
+ASSYM(TSB_KERNEL_MASK, TSB_KERNEL_MASK);
+
+ASSYM(PAGE_SHIFT, PAGE_SHIFT);
+ASSYM(PAGE_MASK, PAGE_MASK);
+
+ASSYM(TTE_DATA, offsetof(struct tte, tte_data));
+ASSYM(TTE_TAG, offsetof(struct tte, tte_tag));
+ASSYM(ST_TTE, offsetof(struct stte, st_tte));
+ASSYM(STTE_SIZEOF, sizeof(struct stte));
+
+ASSYM(TD_VA_LOW_MASK, TD_VA_LOW_MASK);
+ASSYM(TD_VA_LOW_SHIFT, TD_VA_LOW_SHIFT);
+ASSYM(TD_MOD, TD_MOD);
+ASSYM(TD_REF, TD_REF);
+ASSYM(TD_W, TD_W);
+
+ASSYM(TT_VA_MASK, TT_VA_MASK);
+ASSYM(TT_VA_SHIFT, TT_VA_SHIFT);
+ASSYM(TT_CTX_SHIFT, TT_CTX_SHIFT);
+
+ASSYM(GD_CURPROC, offsetof(struct globaldata, gd_curproc));
+ASSYM(GD_CURPCB, offsetof(struct globaldata, gd_curpcb));
+ASSYM(GD_FPCURPROC, offsetof(struct globaldata, gd_fpcurproc));
+
+ASSYM(JB_FP, offsetof(struct _jmp_buf, _jb[_JB_FP]));
+ASSYM(JB_PC, offsetof(struct _jmp_buf, _jb[_JB_PC]));
+ASSYM(JB_SP, offsetof(struct _jmp_buf, _jb[_JB_SP]));
+
+ASSYM(P_ADDR, offsetof(struct proc, p_addr));
+ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
+
+ASSYM(PCB_FP, offsetof(struct pcb, pcb_fp));
+ASSYM(PCB_PC, offsetof(struct pcb, pcb_pc));
+ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
+
+ASSYM(F_L0, offsetof(struct frame, f_local[0]));
+ASSYM(F_L1, offsetof(struct frame, f_local[1]));
+ASSYM(F_L2, offsetof(struct frame, f_local[2]));
+ASSYM(F_L3, offsetof(struct frame, f_local[3]));
+ASSYM(F_L4, offsetof(struct frame, f_local[4]));
+ASSYM(F_L5, offsetof(struct frame, f_local[5]));
+ASSYM(F_L6, offsetof(struct frame, f_local[6]));
+ASSYM(F_L7, offsetof(struct frame, f_local[7]));
+ASSYM(F_I0, offsetof(struct frame, f_in[0]));
+ASSYM(F_I1, offsetof(struct frame, f_in[1]));
+ASSYM(F_I2, offsetof(struct frame, f_in[2]));
+ASSYM(F_I3, offsetof(struct frame, f_in[3]));
+ASSYM(F_I4, offsetof(struct frame, f_in[4]));
+ASSYM(F_I5, offsetof(struct frame, f_in[5]));
+ASSYM(F_I6, offsetof(struct frame, f_in[6]));
+ASSYM(F_I7, offsetof(struct frame, f_in[7]));
+ASSYM(CCFSZ, sizeof(struct frame));
+ASSYM(SPOFF, SPOFF);
+
+ASSYM(KF_FP, offsetof(struct kdbframe, kf_fp));
+ASSYM(KF_SIZEOF, sizeof(struct kdbframe));
+
+ASSYM(MF_SFAR, offsetof(struct mmuframe, mf_sfar));
+ASSYM(MF_SFSR, offsetof(struct mmuframe, mf_sfsr));
+ASSYM(MF_TAR, offsetof(struct mmuframe, mf_tar));
+ASSYM(MF_SIZEOF, sizeof(struct mmuframe));
+
+ASSYM(TF_G0, offsetof(struct trapframe, tf_global[0]));
+ASSYM(TF_G1, offsetof(struct trapframe, tf_global[1]));
+ASSYM(TF_G2, offsetof(struct trapframe, tf_global[2]));
+ASSYM(TF_G3, offsetof(struct trapframe, tf_global[3]));
+ASSYM(TF_G4, offsetof(struct trapframe, tf_global[4]));
+ASSYM(TF_G5, offsetof(struct trapframe, tf_global[5]));
+ASSYM(TF_G6, offsetof(struct trapframe, tf_global[6]));
+ASSYM(TF_G7, offsetof(struct trapframe, tf_global[7]));
+ASSYM(TF_O0, offsetof(struct trapframe, tf_out[0]));
+ASSYM(TF_O1, offsetof(struct trapframe, tf_out[1]));
+ASSYM(TF_O2, offsetof(struct trapframe, tf_out[2]));
+ASSYM(TF_O3, offsetof(struct trapframe, tf_out[3]));
+ASSYM(TF_O4, offsetof(struct trapframe, tf_out[4]));
+ASSYM(TF_O5, offsetof(struct trapframe, tf_out[5]));
+ASSYM(TF_O6, offsetof(struct trapframe, tf_out[6]));
+ASSYM(TF_O7, offsetof(struct trapframe, tf_out[7]));
+ASSYM(TF_TSTATE, offsetof(struct trapframe, tf_tstate));
+ASSYM(TF_TPC, offsetof(struct trapframe, tf_tpc));
+ASSYM(TF_TNPC, offsetof(struct trapframe, tf_tnpc));
+ASSYM(TF_TYPE, offsetof(struct trapframe, tf_type));
+ASSYM(TF_ARG, offsetof(struct trapframe, tf_arg));
+ASSYM(TF_SIZEOF, sizeof(struct trapframe));
+
+ASSYM(U_PCB, offsetof(struct user, u_pcb));
diff --git a/sys/sparc64/sparc64/locore.S b/sys/sparc64/sparc64/locore.S
new file mode 100644
index 0000000..cdde7b9
--- /dev/null
+++ b/sys/sparc64/sparc64/locore.S
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asmacros.h>
+
+#include "assym.s"
+
+/*
+ * void _start(struct bootinfo *bi, u_long ofw_vec)
+ */
+ENTRY(_start)
+ wrpr %g0, PSTATE_IE|PSTATE_PRIV, %pstate
+ mov %o0, %g1
+ mov %o1, %g2
+ flushw
+ wrpr %g0, 1, %cwp
+ wrpr %g0, 0, %cleanwin
+
+ setx user0 + UPAGES * PAGE_SIZE - SPOFF, %l0, %o5
+ save %o5, -CCFSZ, %sp
+
+ mov %g1, %o0
+ call sparc64_init
+ mov %g2, %o1
+ call mi_startup
+ nop
+ ! NOTREACHED
+END(_start)
+
+ENTRY(sigcode)
+ illtrap
+esigcode:
+END(sigcode)
+
+DATA(szsigcode)
+ .long esigcode - sigcode
diff --git a/sys/sparc64/sparc64/locore.s b/sys/sparc64/sparc64/locore.s
new file mode 100644
index 0000000..cdde7b9
--- /dev/null
+++ b/sys/sparc64/sparc64/locore.s
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asmacros.h>
+
+#include "assym.s"
+
+/*
+ * void _start(struct bootinfo *bi, u_long ofw_vec)
+ */
+ENTRY(_start)
+ wrpr %g0, PSTATE_IE|PSTATE_PRIV, %pstate
+ mov %o0, %g1
+ mov %o1, %g2
+ flushw
+ wrpr %g0, 1, %cwp
+ wrpr %g0, 0, %cleanwin
+
+ setx user0 + UPAGES * PAGE_SIZE - SPOFF, %l0, %o5
+ save %o5, -CCFSZ, %sp
+
+ mov %g1, %o0
+ call sparc64_init
+ mov %g2, %o1
+ call mi_startup
+ nop
+ ! NOTREACHED
+END(_start)
+
+ENTRY(sigcode)
+ illtrap
+esigcode:
+END(sigcode)
+
+DATA(szsigcode)
+ .long esigcode - sigcode
diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c
index bbc1f96..2ae5fe3 100644
--- a/sys/sparc64/sparc64/machdep.c
+++ b/sys/sparc64/sparc64/machdep.c
@@ -26,21 +26,52 @@
* $FreeBSD$
*/
+#include "opt_ddb.h"
+
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/cons.h>
+#include <sys/kernel.h>
+#include <sys/linker.h>
+#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/pcpu.h>
#include <sys/proc.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
#include <sys/ptrace.h>
#include <sys/signalvar.h>
#include <sys/sysproto.h>
+#include <sys/timetc.h>
+#include <sys/user.h>
#include <dev/ofw/openfirm.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_extern.h>
+
+#include <ddb/ddb.h>
+
+#include <machine/bootinfo.h>
+#include <machine/frame.h>
#include <machine/md_var.h>
+#include <machine/pmap.h>
+#include <machine/pstate.h>
#include <machine/reg.h>
-void sparc64_init(ofw_vec_t *ofw_vec);
+typedef int ofw_vec_t(void *);
+
+extern char tl0_base[];
+extern char _end[];
+
+int physmem = 0;
int cold = 1;
long dumplo;
int Maxmem = 0;
@@ -48,14 +79,254 @@ int Maxmem = 0;
struct mtx Giant;
struct mtx sched_lock;
-struct user *proc0paddr;
+struct globaldata __globaldata;
+char user0[UPAGES * PAGE_SIZE];
+
+vm_offset_t clean_sva;
+vm_offset_t clean_eva;
+
+u_long ofw_vec;
+u_long ofw_tba;
+
+static vm_offset_t buffer_sva;
+static vm_offset_t buffer_eva;
+static vm_offset_t pager_sva;
+static vm_offset_t pager_eva;
+
+static struct timecounter tick_tc;
+
+static timecounter_get_t tick_get_timecount;
+void sparc64_init(struct bootinfo *bi, ofw_vec_t *vec);
+
+static void cpu_startup(void *);
+SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
+
+static void
+cpu_startup(void *arg)
+{
+ vm_offset_t physmem_est;
+ vm_offset_t minaddr;
+ vm_offset_t maxaddr;
+ phandle_t child;
+ phandle_t root;
+ vm_offset_t va;
+ vm_size_t size;
+ char name[32];
+ char type[8];
+ u_int clock;
+ int factor;
+ caddr_t p;
+ int i;
+
+ root = OF_peer(0);
+ for (child = OF_child(root); child != 0; child = OF_peer(child)) {
+ OF_getprop(child, "device_type", type, sizeof(type));
+ if (strcmp(type, "cpu") == 0)
+ break;
+ }
+ if (child == 0)
+ panic("cpu_startup: no cpu\n");
+ OF_getprop(child, "name", name, sizeof(name));
+ OF_getprop(child, "clock-frequency", &clock, sizeof(clock));
+
+ tick_tc.tc_get_timecount = tick_get_timecount;
+ tick_tc.tc_poll_pps = NULL;
+ tick_tc.tc_counter_mask = ~0u;
+ tick_tc.tc_frequency = clock;
+ tick_tc.tc_name = "tick";
+ tc_init(&tick_tc);
+
+ p = name;
+ if (bcmp(p, "SUNW,", 5) == 0)
+ p += 5;
+ printf("CPU: %s Processor (%d.%02d MHz CPU)\n", p,
+ (clock + 4999) / 1000000, ((clock + 4999) / 10000) % 100);
+#if 0
+ ver = rdpr(ver);
+ printf("manuf: %#lx impl: %#lx mask: %#lx maxtl: %#lx maxwin: %#lx\n",
+ VER_MANUF(ver), VER_IMPL(ver), VER_MASK(ver), VER_MAXTL(ver),
+ VER_MAXWIN(ver));
+#endif
+
+ /*
+ * XXX make most of this MI and move to sys/kern.
+ */
+
+ /*
+ * Calculate callout wheel size.
+ */
+ for (callwheelsize = 1, callwheelbits = 0; callwheelsize < ncallout;
+ callwheelsize <<= 1, ++callwheelbits)
+ ;
+ callwheelmask = callwheelsize - 1;
+
+ size = 0;
+ va = 0;
+again:
+ p = (caddr_t)va;
+
+#define valloc(name, type, num) \
+ (name) = (type *)p; p = (caddr_t)((name) + (num))
+
+ valloc(callout, struct callout, ncallout);
+ valloc(callwheel, struct callout_tailq, callwheelsize);
+
+ if (kernel_map->first_free == NULL) {
+ printf("Warning: no free entries in kernel_map.\n");
+ physmem_est = physmem;
+ } else
+ physmem_est = min(physmem,
+ kernel_map->max_offset - kernel_map->min_offset);
+
+ if (nbuf == 0) {
+ factor = 4 * BKVASIZE / PAGE_SIZE;
+ nbuf = 50;
+ if (physmem_est > 1024)
+ nbuf += min((physmem_est - 1024) / factor,
+ 16384 / factor);
+ if (physmem_est > 16384)
+ nbuf += (physmem_est - 16384) * 2 / (factor * 5);
+ }
+
+ if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
+ (BKVASIZE * 2)) {
+ nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
+ (BKVASIZE * 2);
+ printf("Warning: nbufs capped at %d\n", nbuf);
+ }
+
+ nswbuf = max(min(nbuf/4, 256), 16);
+
+ valloc(swbuf, struct buf, nswbuf);
+ valloc(buf, struct buf, nbuf);
+ p = bufhashinit(p);
+
+ if (va == 0) {
+ size = (vm_size_t)(p - va);
+ if ((va = kmem_alloc(kernel_map, round_page(size))) == 0)
+ panic("startup: no room for tables");
+ goto again;
+ }
+
+ if ((vm_size_t)(p - va) != size)
+ panic("startup: table size inconsistency");
+
+ clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
+ (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
+ buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
+ (nbuf*BKVASIZE));
+ buffer_map->system_map = 1;
+ pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
+ (nswbuf*MAXPHYS) + pager_map_size);
+ pager_map->system_map = 1;
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ (16*(ARG_MAX+(PAGE_SIZE*3))));
+
+ SLIST_INIT(&callfree);
+ for (i = 0; i < ncallout; i++) {
+ callout_init(&callout[i], 0);
+ callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
+ SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
+ }
+
+ for (i = 0; i < callwheelsize; i++)
+ TAILQ_INIT(&callwheel[i]);
+
+ mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
+
+ bufinit();
+ vm_pager_bufferinit();
+
+ globaldata_register(globaldata);
+
+}
+
+unsigned
+tick_get_timecount(struct timecounter *tc)
+{
+ return ((unsigned)rd(tick));
+}
void
-sparc64_init(ofw_vec_t *ofw_vec)
+sparc64_init(struct bootinfo *bi, ofw_vec_t *vec)
{
- OF_init(ofw_vec);
+ struct trapframe *tf;
+
+ /*
+ * Initialize openfirmware (needed for console).
+ */
+ OF_init(vec);
+
+ /*
+ * Initialize the console before printing anything.
+ */
cninit();
- printf("hello world!!\n");
+
+ /*
+ * Check that the bootinfo struct is sane.
+ */
+ if (bi->bi_version != BOOTINFO_VERSION)
+ panic("sparc64_init: bootinfo version mismatch");
+ if (bi->bi_metadata == 0)
+ panic("sparc64_init: no loader metadata");
+ preload_metadata = (caddr_t)bi->bi_metadata;
+
+#ifdef DDB
+ kdb_init();
+#endif
+
+ /*
+ * Initialize virtual memory.
+ */
+ pmap_bootstrap(bi->bi_kpa, bi->bi_end);
+
+ /*
+ * XXX Clear tick and disable the comparator.
+ */
+ wrpr(tick, 0, 0);
+ wr(asr23, 1L << 63, 0);
+
+ /*
+ * Force trap level 1 and take over the trap table.
+ */
+ wrpr(tl, 0, 1);
+ wrpr(tba, tl0_base, 0);
+
+ /*
+ * Initialize proc0 stuff (p_contested needs to be done early).
+ */
+ LIST_INIT(&proc0.p_contested);
+ proc0.p_addr = (struct user *)user0;
+ tf = (struct trapframe *)(user0 + UPAGES * PAGE_SIZE - sizeof(*tf));
+ proc0.p_frame = tf;
+
+ /*
+ * Initialize the per-cpu pointer so we can set curproc.
+ */
+ globaldata = &__globaldata;
+
+ /*
+ * Initialize curproc so that mutexes work.
+ */
+ PCPU_SET(curproc, &proc0);
+ PCPU_SET(curpcb, &((struct user *)user0)->u_pcb);
+ PCPU_SET(spinlocks, NULL);
+
+ /*
+ * Initialize mutexes.
+ */
+ mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);
+ mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
+ mtx_init(&proc0.p_mtx, "process lock", MTX_DEF);
+
+ mtx_lock(&Giant);
+}
+
+void
+set_openfirm_callback(ofw_vec_t *vec)
+{
+ ofw_tba = rdpr(tba);
+ ofw_vec = (u_long)vec;
}
void
@@ -120,7 +391,9 @@ setregs(struct proc *p, u_long entry, u_long stack, u_long ps_strings)
void
Debugger(const char *msg)
{
- TODO;
+
+ printf("Debugger(\"%s\")\n", msg);
+ breakpoint();
}
int
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index bd22a07..556e4b0 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -26,10 +26,35 @@
* $FreeBSD$
*/
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this module is called upon to
+ * provide software-use-only maps which may or may not be stored in the
+ * same form as hardware maps. These pseudo-maps are used to store
+ * intermediate results from copy operations to and from address spaces.
+ *
+ * Since the information managed by this module is also stored by the
+ * logical address mapping module, this module may throw away valid virtual
+ * to physical mappings at almost any time. However, invalidations of
+ * mappings must be done as requested.
+ *
+ * In order to cope with hardware architectures which make virtual to
+ * physical map invalidates expensive, this module may delay invalidate
+ * reduced protection operations until such time as they are actually
+ * necessary. This module is given full information as to which processors
+ * are currently using which maps, and to when physical maps must be made
+ * correct.
+ */
+
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/proc.h>
#include <sys/systm.h>
+#include <sys/vmmeter.h>
+
+#include <dev/ofw/openfirm.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -42,246 +67,799 @@
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
+#include <machine/frame.h>
+#include <machine/pv.h>
+#include <machine/tlb.h>
+#include <machine/tte.h>
+#include <machine/tsb.h>
+
+#define PMAP_DEBUG
+
+#define PMAP_LOCK(pm)
+#define PMAP_UNLOCK(pm)
+
+#define dcache_global_flush(pa)
+#define icache_global_flush(pa)
+
+struct mem_region {
+ vm_offset_t mr_start;
+ vm_offset_t mr_size;
+};
+
+struct ofw_map {
+ vm_offset_t om_start;
+ vm_offset_t om_size;
+ u_long om_tte;
+};
+
+/*
+ * Virtual address of message buffer.
+ */
struct msgbuf *msgbufp;
+/*
+ * Physical addresses of first and last available physical page.
+ */
vm_offset_t avail_start;
vm_offset_t avail_end;
-vm_offset_t kernel_vm_end;
+
+/*
+ * Map of physical memory reagions.
+ */
vm_offset_t phys_avail[10];
+
+/*
+ * First and last available kernel virtual addresses.
+ */
vm_offset_t virtual_avail;
vm_offset_t virtual_end;
+vm_offset_t kernel_vm_end;
-struct pmap __kernel_pmap;
+/*
+ * Kernel pmap handle and associated storage.
+ */
+pmap_t kernel_pmap;
+static struct pmap kernel_pmap_store;
+
+/*
+ * Map of free and in use hardware contexts and index of first potentially
+ * free context.
+ */
+static char pmap_context_map[PMAP_CONTEXT_MAX];
+static u_int pmap_context_base;
-static boolean_t pmap_initialized = FALSE;
+/*
+ * Virtual addresses of free space for temporary mappings. Used for copying
+ * and zeroing physical pages.
+ */
+static vm_offset_t CADDR1;
+static vm_offset_t CADDR2;
+static __inline int
+pmap_track_modified(vm_offset_t va)
+{
+ return ((va < clean_sva) || (va >= clean_eva));
+}
+
+/*
+ * Manipulate tte bits of all virtual to physical mappings for the given page.
+ */
+static void pmap_bit_clear(vm_page_t m, u_long bits);
+static void pmap_bit_set(vm_page_t m, u_long bits);
+static int pmap_bit_test(vm_page_t m, u_long bits);
+
+static void pmap_local_remove_all(vm_page_t m);
+static void pmap_global_remove_all(vm_page_t m);
+
+/*
+ * Allocate and free hardware context numbers.
+ */
+static u_int pmap_context_alloc(void);
+static void pmap_context_destroy(u_int i);
+
+/*
+ * Allocate physical memory for use in pmap_bootstrap.
+ */
+static vm_offset_t pmap_bootstrap_alloc(vm_size_t size);
+
+/*
+ * Quick sort callout for comparing memory regions.
+ */
+static int mr_cmp(const void *a, const void *b);
+static int
+mr_cmp(const void *a, const void *b)
+{
+ return ((const struct mem_region *)a)->mr_start -
+ ((const struct mem_region *)b)->mr_start;
+}
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ */
void
-pmap_activate(struct proc *p)
+pmap_bootstrap(vm_offset_t skpa, vm_offset_t ekva)
+{
+ struct mem_region mra[8];
+ ihandle_t pmem;
+ struct pmap *pm;
+ vm_offset_t pa;
+ vm_offset_t va;
+ struct tte tte;
+ int sz;
+ int i;
+ int j;
+
+ /*
+ * Find out what physical memory is available from the prom and
+ * initialize the phys_avail array.
+ */
+ if ((pmem = OF_finddevice("/memory")) == -1)
+ panic("pmap_bootstrap: finddevice /memory");
+ if ((sz = OF_getproplen(pmem, "available")) == -1)
+ panic("pmap_bootstrap: getproplen /memory/available");
+ if (sizeof(phys_avail) < sz)
+ panic("pmap_bootstrap: phys_avail too small");
+ bzero(mra, sz);
+ if (OF_getprop(pmem, "available", mra, sz) == -1)
+ panic("pmap_bootstrap: getprop /memory/available");
+ sz /= sizeof(*mra);
+ qsort(mra, sz, sizeof *mra, mr_cmp);
+ for (i = 0, j = 0; i < sz; i++, j += 2) {
+ phys_avail[j] = mra[i].mr_start;
+ phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
+ }
+
+ /*
+ * Initialize the kernel pmap (which is statically allocated).
+ */
+ pm = &kernel_pmap_store;
+ pm->pm_context = TLB_CTX_KERNEL;
+ pm->pm_active = ~0;
+ pm->pm_count = 1;
+ kernel_pmap = pm;
+
+ /*
+ * Allocate the kernel tsb and lock it in the tlb.
+ */
+ pa = pmap_bootstrap_alloc(TSB_KERNEL_SIZE);
+ if (pa & PAGE_MASK_4M)
+ panic("pmap_bootstrap: tsb unaligned\n");
+ tsb_kernel_phys = pa;
+ for (i = 0; i < TSB_KERNEL_PAGES; i++) {
+ va = TSB_KERNEL_MIN_ADDRESS + i * PAGE_SIZE_4M;
+ tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
+ tte.tte_data = TD_V | TD_4M | TD_VA_LOW(va) | TD_PA(pa) |
+ TD_MOD | TD_REF | TD_TSB | TD_L | TD_CP | TD_P | TD_W;
+ tlb_store_slot(TLB_DTLB, va, tte, TLB_SLOT_TSB_KERNEL_MIN + i);
+ }
+ bzero((void *)va, TSB_KERNEL_SIZE);
+ stxa(AA_IMMU_TSB, ASI_IMMU,
+ (va >> (STTE_SHIFT - TTE_SHIFT)) | TSB_SIZE_REG);
+ stxa(AA_DMMU_TSB, ASI_DMMU,
+ (va >> (STTE_SHIFT - TTE_SHIFT)) | TSB_SIZE_REG);
+ membar(Sync);
+
+ /*
+ * Calculate the first and last available physical addresses.
+ */
+ avail_start = phys_avail[0];
+ for (i = 0; phys_avail[i + 2] != 0; i += 2)
+ ;
+ avail_end = phys_avail[i + 1];
+
+ /*
+ * Allocate physical memory for the heads of the stte alias chains.
+ */
+ sz = round_page(((avail_end - avail_start) >> PAGE_SHIFT) *
+ sizeof (vm_offset_t));
+ pv_table = pmap_bootstrap_alloc(sz);
+ /* XXX */
+ avail_start += sz;
+ for (i = 0; i < sz; i += sizeof(vm_offset_t))
+ stxp(pv_table + i, 0);
+
+ /*
+ * Set the start and end of kva. The kernel is loaded at the first
+ * available 4 meg super page, so round up to the end of the page.
+ */
+ virtual_avail = roundup(ekva, PAGE_SIZE_4M);
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+ /*
+ * Allocate virtual address space for copying and zeroing pages of
+ * physical memory.
+ */
+ CADDR1 = virtual_avail;
+ virtual_avail += PAGE_SIZE;
+ CADDR2 = virtual_avail;
+ virtual_avail += PAGE_SIZE;
+}
+
+/*
+ * Allocate a physical page of memory directly from the phys_avail map.
+ * Can only be called from pmap_bootstrap before avail start and end are
+ * calculated.
+ */
+static vm_offset_t
+pmap_bootstrap_alloc(vm_size_t size)
{
- TODO;
+ vm_offset_t pa;
+ int i;
+
+ size = round_page(size);
+ for (i = 0; phys_avail[i] != 0; i += 2) {
+ if (phys_avail[i + 1] - phys_avail[i] < size)
+ continue;
+ pa = phys_avail[i];
+ phys_avail[i] += size;
+ return (pa);
+ }
+ panic("pmap_bootstrap_alloc");
+}
+
+/*
+ * Allocate a hardware context number from the context map.
+ */
+static u_int
+pmap_context_alloc(void)
+{
+ u_int i;
+
+ i = pmap_context_base;
+ do {
+ if (pmap_context_map[i] == 0) {
+ pmap_context_map[i] = 1;
+ pmap_context_base = (i + 1) & (PMAP_CONTEXT_MAX - 1);
+ return (i);
+ }
+ } while ((i = (i + 1) & (PMAP_CONTEXT_MAX - 1)) != pmap_context_base);
+ panic("pmap_context_alloc");
}
+/*
+ * Free a hardware context number back to the context map.
+ */
+static void
+pmap_context_destroy(u_int i)
+{
+
+ pmap_context_map[i] = 0;
+}
+
+/*
+ * Map a range of physical addresses into kernel virtual address space.
+ *
+ * The value passed in *virt is a suggested virtual address for the mapping.
+ * Architectures which can support a direct-mapped physical to virtual region
+ * can return the appropriate address within that region, leaving '*virt'
+ * unchanged. We cannot and therefore do not; *virt is updated with the
+ * first usable address after the mapped region.
+ */
vm_offset_t
-pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
+pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
{
- TODO;
- return (0);
+ vm_offset_t sva;
+ vm_offset_t va;
+
+ sva = *virt;
+ va = sva;
+ for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
+ pmap_kenter(va, pa_start);
+ *virt = va;
+ return (sva);
}
+/*
+ * Map a wired page into kernel virtual address space.
+ */
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_kenter(vm_offset_t va, vm_offset_t pa)
{
- TODO;
+ struct tte tte;
+
+ tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
+ tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
+ TD_MOD | TD_REF | TD_CP | TD_P | TD_W;
+ tsb_tte_enter_kernel(va, tte);
}
+/*
+ * Remove a wired page from kernel virtual address space.
+ */
void
-pmap_clear_modify(vm_page_t m)
+pmap_kremove(vm_offset_t va)
{
- TODO;
+ tsb_remove_kernel(va);
}
+/*
+ * Map a list of wired pages into kernel virtual address space. This is
+ * intended for temporary mappings which do not need page modification or
+ * references recorded. Existing mappings in the region are overwritten.
+ */
void
-pmap_collect(void)
+pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
{
- TODO;
+ int i;
+
+ for (i = 0; i < count; i++, va += PAGE_SIZE)
+ pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
}
+/*
+ * Remove page mappings from kernel virtual address space. Intended for
+ * temporary mappings entered by pmap_qenter.
+ */
void
-pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
- vm_size_t len, vm_offset_t src_addr)
+pmap_qremove(vm_offset_t va, int count)
{
- TODO;
+ int i;
+
+ for (i = 0; i < count; i++, va += PAGE_SIZE)
+ pmap_kremove(va);
}
+/*
+ * Map the given physical page at the specified virtual address in the
+ * target pmap with the protection requested. If specified the page
+ * will be wired down.
+ */
void
-pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ boolean_t wired)
+{
+ struct stte *stp;
+ struct tte tte;
+ vm_offset_t pa;
+
+ pa = VM_PAGE_TO_PHYS(m);
+ tte.tte_tag = TT_CTX(pm->pm_context) | TT_VA(va);
+ tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
+ TD_CP | TD_CV;
+ if (pm->pm_context == TLB_CTX_KERNEL)
+ tte.tte_data |= TD_P;
+ if (wired == TRUE) {
+ tte.tte_data |= TD_REF;
+ if (prot & VM_PROT_WRITE)
+ tte.tte_data |= TD_MOD;
+ }
+ if (prot & VM_PROT_WRITE)
+ tte.tte_data |= TD_W;
+ if (prot & VM_PROT_EXECUTE) {
+ tte.tte_data |= TD_EXEC;
+ icache_global_flush(&pa);
+ }
+
+ if (pm == kernel_pmap) {
+ tsb_tte_enter_kernel(va, tte);
+ return;
+ }
+
+ PMAP_LOCK(pm);
+ if ((stp = tsb_stte_lookup(pm, va)) != NULL) {
+ pv_remove_virt(stp);
+ tsb_stte_remove(stp);
+ pv_insert(pm, pa, va, stp);
+ stp->st_tte = tte;
+ } else {
+ tsb_tte_enter(pm, va, tte);
+ }
+ PMAP_UNLOCK(pm);
+}
+
+/*
+ * Initialize the pmap module.
+ */
+void
+pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
{
- TODO;
}
void
-pmap_zero_page(vm_offset_t pa)
+pmap_init2(void)
{
- TODO;
}
+/*
+ * Initialize the pmap associated with process 0.
+ */
void
-pmap_zero_page_area(vm_offset_t pa, int off, int size)
+pmap_pinit0(pmap_t pm)
{
- TODO;
+
+ pm = &kernel_pmap_store;
+ pm->pm_context = pmap_context_alloc();
+ pm->pm_active = 0;
+ pm->pm_count = 1;
+ bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
+/*
+ * Initialize a preallocated and zeroed pmap structure.
+ */
void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired)
+pmap_pinit(pmap_t pm)
{
- TODO;
+ struct stte *stp;
+
+ pm->pm_context = pmap_context_alloc();
+ pm->pm_active = 0;
+ pm->pm_count = 1;
+ stp = &pm->pm_stte;
+ stp->st_tte = tsb_page_alloc(pm, (vm_offset_t)tsb_base(0));
+ bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
-vm_offset_t
-pmap_extract(pmap_t pmap, vm_offset_t va)
+void
+pmap_pinit2(pmap_t pmap)
{
- TODO;
- return (0);
}
+/*
+ * Grow the number of kernel page table entries. Unneeded.
+ */
void
pmap_growkernel(vm_offset_t addr)
{
- TODO;
}
+/*
+ * Zero a page of physical memory by temporarily mapping it into the tlb.
+ */
void
-pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
+pmap_zero_page(vm_offset_t pa)
{
- TODO;
+ struct tte tte;
+ vm_offset_t va;
+
+ va = CADDR2;
+ tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
+ tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_L | TD_CP | TD_P | TD_W;
+ tlb_store(TLB_DTLB, va, tte);
+ bzero((void *)va, PAGE_SIZE);
+ tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
}
+/*
+ * Make the specified page pageable (or not). Unneeded.
+ */
void
-pmap_init2(void)
+pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+ boolean_t pageable)
{
- TODO;
}
-boolean_t
-pmap_is_modified(vm_page_t m)
+/*
+ * Create the kernel stack and user structure for a new process. This
+ * routine directly affects the performance of fork().
+ */
+void
+pmap_new_proc(struct proc *p)
{
- TODO;
- return (0);
+ struct user *u;
+ vm_object_t o;
+ vm_page_t m;
+ u_int i;
+
+ if ((o = p->p_upages_obj) == NULL) {
+ o = vm_object_allocate(OBJT_DEFAULT, UPAGES);
+ p->p_upages_obj = o;
+ }
+ if ((u = p->p_addr) == NULL) {
+ u = (struct user *)kmem_alloc_nofault(kernel_map,
+ UPAGES * PAGE_SIZE);
+ KASSERT(u != NULL, ("pmap_new_proc: u area\n"));
+ p->p_addr = u;
+ }
+ for (i = 0; i < UPAGES; i++) {
+ m = vm_page_grab(o, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+ m->wire_count++;
+ cnt.v_wire_count++;
+
+ pmap_kenter((vm_offset_t)u + i * PAGE_SIZE,
+ VM_PAGE_TO_PHYS(m));
+
+ vm_page_wakeup(m);
+ vm_page_flag_clear(m, PG_ZERO);
+ vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
+ m->valid = VM_PAGE_BITS_ALL;
+ }
}
void
-pmap_clear_reference(vm_page_t m)
+pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
- TODO;
+
+ if (m->flags & PG_FICTITIOUS || prot & VM_PROT_WRITE)
+ return;
+ if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
+ pmap_bit_clear(m, TD_W);
+ else
+ pmap_global_remove_all(m);
}
-int
-pmap_ts_referenced(vm_page_t m)
+void
+pmap_clear_modify(vm_page_t m)
{
- TODO;
+
+ if (m->flags & PG_FICTITIOUS)
+ return;
+ pmap_bit_clear(m, TD_MOD);
+}
+
+static void
+pmap_bit_clear(vm_page_t m, u_long bits)
+{
+ vm_offset_t pstp;
+ vm_offset_t pvh;
+ vm_offset_t pa;
+ vm_offset_t va;
+ struct tte tte;
+
+ pa = VM_PAGE_TO_PHYS(m);
+ pvh = pv_lookup(pa);
+ PV_LOCK();
+#ifdef notyet
+restart:
+#endif
+ for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
+ tte = pv_get_tte(pstp);
+ KASSERT(TD_PA(tte.tte_data) == pa,
+ ("pmap_bit_clear: corrupt alias chain"));
+ if ((tte.tte_data & bits) == 0)
+ continue;
+ va = tte_get_va(tte);
+ if (bits == TD_W && !pmap_track_modified(va))
+ continue;
+ if (bits == TD_W && tte.tte_data & TD_MOD) {
+ vm_page_dirty(m);
+ bits |= TD_MOD;
+ }
+ pv_bit_clear(pstp, bits);
+#ifdef notyet
+ generation = pv_generation;
+ PV_UNLOCK();
+ /* XXX pass function and parameter to ipi call */
+ ipi_all(IPI_TLB_PAGE_DEMAP);
+ PV_LOCK();
+ if (generation != pv_generation)
+ goto restart;
+#else
+ tlb_page_demap(TLB_DTLB, tte_get_ctx(tte), va);
+#endif
+ }
+ PV_UNLOCK();
+}
+
+static void
+pmap_bit_set(vm_page_t m, u_long bits)
+{
+ vm_offset_t pstp;
+ vm_offset_t pvh;
+ vm_offset_t pa;
+ struct tte tte;
+
+ pa = VM_PAGE_TO_PHYS(m);
+ pvh = pv_lookup(pa);
+ PV_LOCK();
+#ifdef notyet
+restart:
+#endif
+ for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
+ tte = pv_get_tte(pstp);
+ KASSERT(TD_PA(tte.tte_data) == pa,
+ ("pmap_bit_set: corrupt alias chain"));
+ if (tte.tte_data & bits)
+ continue;
+ pv_bit_set(pstp, bits);
+#ifdef notyet
+ generation = pv_generation;
+ PV_UNLOCK();
+ /* XXX pass function and parameter to ipi call */
+ ipi_all(IPI_TLB_PAGE_DEMAP);
+ PV_LOCK();
+ if (generation != pv_generation)
+ goto restart;
+#else
+ tlb_page_demap(TLB_DTLB, tte_get_ctx(tte), tte_get_va(tte));
+#endif
+ }
+ PV_UNLOCK();
+}
+
+static int
+pmap_bit_test(vm_page_t m, u_long bits)
+{
+ vm_offset_t pstp;
+ vm_offset_t pvh;
+ vm_offset_t pa;
+
+ pa = VM_PAGE_TO_PHYS(m);
+ pvh = pv_lookup(pa);
+ PV_LOCK();
+ for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
+ if (pv_bit_test(pstp, bits)) {
+ PV_UNLOCK();
+ return (1);
+ }
+ }
+ PV_UNLOCK();
return (0);
}
+static void
+pmap_global_remove_all(vm_page_t m)
+{
+ vm_offset_t pstp;
+ vm_offset_t pvh;
+ vm_offset_t pa;
+
+ printf("pmap_global_remove_all\n");
+ pa = VM_PAGE_TO_PHYS(m);
+ pvh = pv_lookup(pa);
+ pv_dump(pvh);
+ PV_LOCK();
+ printf("pmap_global_remove_all: for\n");
+ for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp))
+ pv_bit_clear(pstp, TD_V);
+ printf("pmap_global_remove_all: done for\n");
+ PV_UNLOCK();
+ pmap_local_remove_all(m);
+ pv_dump(pvh);
+ PV_LOCK();
+ printf("pmap_global_remove_all: while\n");
+ while ((pstp = pv_get_first(pvh)) != 0) {
+ pv_dump(pvh);
+ pv_remove_phys(pstp);
+ }
+ printf("pmap_global_remove_all: done while\n");
+ PV_UNLOCK();
+ printf("pmap_global_remove_all: done\n");
+}
+
+static void
+pmap_local_remove_all(vm_page_t m)
+{
+ vm_offset_t pstp;
+ vm_offset_t pvh;
+ vm_offset_t pa;
+ struct tte tte;
+
+ pa = VM_PAGE_TO_PHYS(m);
+ pvh = pv_lookup(pa);
+ PV_LOCK();
+ printf("pmap_local_remove_all: for\n");
+ for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
+ tte = pv_get_tte(pstp);
+ tsb_tte_local_remove(&tte);
+ }
+ printf("pmap_local_remove_all: done for\n");
+ PV_UNLOCK();
+}
+
void
-pmap_kenter(vm_offset_t va, vm_offset_t pa)
+pmap_activate(struct proc *p)
{
TODO;
}
vm_offset_t
-pmap_kextract(vm_offset_t va)
+pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
{
TODO;
return (0);
}
void
-pmap_kremove(vm_offset_t va)
-{
- TODO;
-}
-
-vm_offset_t
-pmap_map(vm_offset_t *va, vm_offset_t start, vm_offset_t end, int prot)
+pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
{
TODO;
- return (0);
}
-int
-pmap_mincore(pmap_t pmap, vm_offset_t addr)
+void
+pmap_collect(void)
{
TODO;
- return (0);
}
void
-pmap_new_proc(struct proc *p)
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
+ vm_size_t len, vm_offset_t src_addr)
{
TODO;
}
void
-pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
- vm_pindex_t pindex, vm_size_t size, int limit)
+pmap_copy_page(vm_offset_t src, vm_offset_t dst)
{
TODO;
}
void
-pmap_page_protect(vm_page_t m, vm_prot_t prot)
+pmap_zero_page_area(vm_offset_t pa, int off, int size)
{
TODO;
}
-void
-pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
- boolean_t pageable)
+vm_offset_t
+pmap_extract(pmap_t pmap, vm_offset_t va)
{
TODO;
+ return (0);
}
boolean_t
-pmap_page_exists(pmap_t pmap, vm_page_t m)
+pmap_is_modified(vm_page_t m)
{
TODO;
return (0);
}
void
-pmap_pinit(pmap_t pmap)
+pmap_clear_reference(vm_page_t m)
{
TODO;
}
-void
-pmap_pinit0(pmap_t pmap)
+int
+pmap_ts_referenced(vm_page_t m)
{
TODO;
+ return (0);
}
-void
-pmap_pinit2(pmap_t pmap)
+vm_offset_t
+pmap_kextract(vm_offset_t va)
{
TODO;
+ return (0);
}
-void
-pmap_prefault(pmap_t pmap, vm_offset_t va, vm_map_entry_t entry)
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr)
{
TODO;
+ return (0);
}
void
-pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size, int limit)
{
TODO;
}
-vm_offset_t
-pmap_phys_address(int ppn)
+boolean_t
+pmap_page_exists(pmap_t pmap, vm_page_t m)
{
TODO;
return (0);
}
void
-pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+pmap_prefault(pmap_t pmap, vm_offset_t va, vm_map_entry_t entry)
{
TODO;
}
void
-pmap_qremove(vm_offset_t va, int count)
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
{
TODO;
}
-void
-pmap_reference(pmap_t pmap)
+vm_offset_t
+pmap_phys_address(int ppn)
{
TODO;
+ return (0);
+}
+
+void
+pmap_reference(pmap_t pm)
+{
+ if (pm != NULL)
+ pm->pm_count++;
}
void
diff --git a/sys/sparc64/sparc64/pv.c b/sys/sparc64/sparc64/pv.c
new file mode 100644
index 0000000..4affb53
--- /dev/null
+++ b/sys/sparc64/sparc64/pv.c
@@ -0,0 +1,90 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+#include <machine/asi.h>
+#include <machine/frame.h>
+#include <machine/pmap.h>
+#include <machine/pv.h>
+#include <machine/tte.h>
+#include <machine/tlb.h>
+#include <machine/tsb.h>
+
+/*
+ * Physical address of array of physical addresses of stte alias chain heads,
+ * and generation count of alias chains.
+ */
+vm_offset_t pv_table;
+u_long pv_generation;
+
+void
+pv_insert(pmap_t pm, vm_offset_t pa, vm_offset_t va, struct stte *stp)
+{
+ vm_offset_t pstp;
+ vm_offset_t pvh;
+
+ pstp = tsb_stte_vtophys(pm, stp);
+ pvh = pv_lookup(pa);
+ PV_LOCK();
+ if ((stp->st_next = pv_get_first(pvh)) != 0)
+ pv_set_prev(stp->st_next, pstp + ST_NEXT);
+ pv_set_first(pvh, pstp);
+ stp->st_prev = pvh;
+ pv_generation++;
+ PV_UNLOCK();
+}
+
+void
+pv_remove_virt(struct stte *stp)
+{
+ PV_LOCK();
+ if (stp->st_next != 0)
+ pv_set_prev(stp->st_next, stp->st_prev);
+ stxp(stp->st_prev, stp->st_next);
+ pv_generation++;
+ PV_UNLOCK();
+}
+
+void
+pv_dump(vm_offset_t pvh)
+{
+ vm_offset_t pstp;
+
+ printf("pv_dump: pvh=%#lx first=%#lx\n", pvh, pv_get_first(pvh));
+ for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp))
+ printf("\tpstp=%#lx next=%#lx prev=%#lx\n", pstp,
+ pv_get_next(pstp), pv_get_prev(pstp));
+ printf("pv_dump: done\n");
+}
diff --git a/sys/sparc64/sparc64/support.S b/sys/sparc64/sparc64/support.S
new file mode 100644
index 0000000..35a4a44
--- /dev/null
+++ b/sys/sparc64/sparc64/support.S
@@ -0,0 +1,346 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asi.h>
+#include <machine/asmacros.h>
+
+#include "assym.s"
+
+#define E
+
+#define _LD(w, a) ld ## w ## a
+#define _ST(w, a) st ## w ## a
+
+#define LD(w, a) _LD(w, a)
+#define ST(w, a) _ST(w, a)
+
+#define _BCOPY(src, dst, len, sa, sasi, da, dasi) \
+ brz,pn len, 2f ; \
+ mov len, %o3 ; \
+1: LD(ub, sa) [src] sasi, %o4 ; \
+ ST(b, da) %o4, [dst] dasi ; \
+ dec %o3 ; \
+ inc src ; \
+ brnz,pt %o3, 1b ; \
+ inc dst ; \
+2:
+
+#define BCOPY(src, dst, len) \
+ _BCOPY(src, dst, len, E, E, E, E)
+
+#define COPYIN(uaddr, kaddr, len) \
+ wr %g0, ASI_AIUP, %asi ; \
+ _BCOPY(uaddr, kaddr, len, a, %asi, E, E)
+
+#define COPYOUT(kaddr, uaddr, len) \
+ wr %g0, ASI_AIUP, %asi ; \
+ _BCOPY(kaddr, uaddr, len, E, E, a, %asi)
+
+#define _COPYSTR(src, dst, len, done, sa, sasi, da, dasi) \
+ clr %o4 ; \
+ clr %o5 ; \
+1: LD(ub, sa) [src] sasi, %g1 ; \
+ ST(b, da) %g1, [dst] dasi ; \
+ brz,pn %g1, 2f ; \
+ inc %o4 ; \
+ dec len ; \
+ inc src ; \
+ brgz,pt len, 1b ; \
+ inc dst ; \
+ mov ENAMETOOLONG, %o5 ; \
+2: brnz,a done, 3f ; \
+ stx %o4, [done] ; \
+3:
+
+#define COPYSTR(dst, src, len, done) \
+ _COPYSTR(dst, src, len, done, E, E, E, E)
+
+#define COPYINSTR(uaddr, kaddr, len, done) \
+ wr %g0, ASI_AIUP, %asi ; \
+ _COPYSTR(uaddr, kaddr, len, done, a, %asi, E, E)
+
+#define CATCH_SETUP(label) \
+ setx label, %g2, %g1 ; \
+ ldx [PCPU(CURPCB)], %g6 ; \
+ stx %g1, [%g6 + PCB_ONFAULT] ;
+
+#define CATCH_END() \
+ stx %g0, [%g6 + PCB_ONFAULT] ;
+
+#define FU_ALIGNED(loader, label) \
+ CATCH_SETUP(label) ; \
+ loader [%o0] ASI_AIUP, %o0 ; \
+ retl ; \
+ CATCH_END()
+
+#define FU_BYTES(loader, size, label) \
+ btst (size) - 1, %o0 ; \
+ bnz,pn %xcc, .Lfsalign ; \
+ EMPTY ; \
+ FU_ALIGNED(loader, label)
+
+#define SU_ALIGNED(storer, label) \
+ CATCH_SETUP(label) ; \
+ storer %o1, [%o0] ASI_AIUP ; \
+ retl ; \
+ CATCH_END()
+
+#define SU_BYTES(storer, size, label) \
+ btst (size) - 1, %o0 ; \
+ bnz,pn %xcc, .Lfsalign ; \
+ EMPTY ; \
+ SU_ALIGNED(storer, label)
+
+/*
+ * void bcmp(void *b, size_t len)
+ */
+ENTRY(bcmp)
+ brz,pn %o2, 2f
+ clr %o3
+1: ldub [%o0 + %o3], %o4
+ ldub [%o1 + %o3], %o5
+ cmp %o4, %o5
+ bne,pn %xcc, 1f
+ inc %o3
+ deccc %o2
+ bne,pt %xcc, 1b
+ nop
+2: retl
+ mov %o2, %o0
+END(bcmp)
+
+/*
+ * void bcopy(const void *src, void *dst, size_t len)
+ */
+ENTRY(bcopy)
+ BCOPY(%o0, %o1, %o2)
+ retl
+ nop
+END(bcopy)
+
+/*
+ * void ovbcopy(const void *src, void *dst, size_t len)
+ * XXX handle overlap...
+ */
+ENTRY(ovbcopy)
+ BCOPY(%o0, %o1, %o2)
+ retl
+ nop
+END(ovbcopy)
+
+/*
+ * void bzero(void *b, size_t len)
+ */
+ENTRY(bzero)
+ brz,pn %o1, 1f
+ nop
+1: deccc %o1
+ stb %g0, [%o0]
+ bne,pt %xcc, 1b
+ inc %o0
+2: retl
+ nop
+END(bzero)
+
+/*
+ * void *memcpy(void *dst, const void *src, size_t len)
+ */
+ENTRY(memcpy)
+ BCOPY(%o1, %o0, %o2)
+ retl
+ nop
+END(memcpy)
+
+/*
+ * int copyin(const void *uaddr, void *kaddr, size_t len)
+ */
+ENTRY(copyin)
+ CATCH_SETUP(.Lefault)
+ COPYIN(%o0, %o1, %o2)
+ CATCH_END()
+ retl
+ clr %o0
+END(copyin)
+
+/*
+ * int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
+ */
+ENTRY(copyinstr)
+ CATCH_SETUP(.Lefault)
+ COPYINSTR(%o0, %o1, %o2, %o3)
+ CATCH_END()
+ retl
+ mov %o5, %o0
+END(copyinstr)
+
+/*
+ * int copyout(const void *kaddr, void *uaddr, size_t len)
+ */
+ENTRY(copyout)
+ CATCH_SETUP(.Lefault)
+ COPYOUT(%o0, %o1, %o2)
+ CATCH_END()
+ retl
+ clr %o0
+END(copyout)
+
+.Lefault:
+ CATCH_END()
+ retl
+ mov EFAULT, %o0
+
+/*
+ * int copystr(const void *src, void *dst, size_t len, size_t *done)
+ */
+ENTRY(copystr)
+ COPYSTR(%o0, %o1, %o2, %o3)
+ retl
+ mov %o5, %o0
+END(copystr)
+
+/*
+ * int fubyte(const void *base)
+ */
+ENTRY(fubyte)
+ FU_ALIGNED(lduba, .Lfsfault)
+END(fubyte)
+
+/*
+ * int fusword(const void *base)
+ */
+ENTRY(fusword)
+ FU_BYTES(lduwa, 2, .Lfsfault)
+END(fusword)
+
+/*
+ * int fuswintr(const void *base)
+ */
+ENTRY(fuswintr)
+ FU_BYTES(lduwa, 2, fsbail)
+END(fuswintr)
+
+/*
+ * int fuword(const void *base)
+ */
+ENTRY(fuword)
+ FU_BYTES(ldxa, 8, .Lfsfault)
+END(fuword)
+
+/*
+ * int subyte(const void *base)
+ */
+ENTRY(subyte)
+ SU_ALIGNED(stba, .Lfsfault)
+END(subyte)
+
+/*
+ * int suibyte(const void *base)
+ */
+ENTRY(suibyte)
+ SU_ALIGNED(stba, fsbail)
+END(suibyte)
+
+/*
+ * int susword(const void *base)
+ */
+ENTRY(susword)
+ SU_BYTES(stwa, 2, .Lfsfault)
+END(susword)
+
+/*
+ * int suswintr(const void *base)
+ */
+ENTRY(suswintr)
+ SU_BYTES(stwa, 2, fsbail)
+END(suswintr)
+
+/*
+ * int suword(const void *base)
+ */
+ENTRY(suword)
+ SU_BYTES(stwa, 8, .Lfsfault)
+END(suword)
+
+ENTRY(fsbail)
+ nop
+.Lfsfault:
+ CATCH_END()
+.Lfsalign:
+ retl
+ mov -1, %o0
+END(fsbail)
+
+ENTRY(longjmp)
+ set 1, %g3
+ movrz %o1, %o1, %g3
+ mov %o0, %g1
+ ldx [%g1 + JB_FP], %g2
+1: cmp %fp, %g2
+ bl,a,pt %xcc, 1b
+ restore
+ bne,pn %xcc, 2f
+ ldx [%g1 + JB_SP], %o2
+ ldx [%g1 + JB_PC], %o3
+ cmp %o2, %sp
+ blt,pn %xcc, 2f
+ movge %xcc, %o2, %sp
+ jmp %o3 + 8
+ mov %g3, %o0
+2: PANIC("longjmp botch", %l1)
+END(longjmp)
+
+ENTRY(setjmp)
+ stx %sp, [%o0 + JB_SP]
+ stx %o7, [%o0 + JB_PC]
+ stx %fp, [%o0 + JB_FP]
+ retl
+ clr %o0
+END(setjmp)
+
+/*
+ * void openfirmware(cell_t args[])
+ */
+ENTRY(openfirmware)
+ save %sp, -CCFSZ, %sp
+ rdpr %pstate, %l0
+ rdpr %tl, %l1
+ rdpr %tba, %l2
+ wrpr %g0, 0, %tl
+ setx ofw_tba, %l4, %l3
+ ldx [%l3], %l3
+ setx ofw_vec, %l5, %l4
+ ldx [%l4], %l4
+ wrpr %l3, 0, %tba
+ call %l4
+ mov %i0, %o0
+ wrpr %l0, 0, %pstate
+ wrpr %l1, 0, %tl
+ wrpr %l2, 0, %tba
+ ret
+ restore
+END(openfirmware)
diff --git a/sys/sparc64/sparc64/support.s b/sys/sparc64/sparc64/support.s
new file mode 100644
index 0000000..35a4a44
--- /dev/null
+++ b/sys/sparc64/sparc64/support.s
@@ -0,0 +1,346 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asi.h>
+#include <machine/asmacros.h>
+
+#include "assym.s"
+
+#define E
+
+#define _LD(w, a) ld ## w ## a
+#define _ST(w, a) st ## w ## a
+
+#define LD(w, a) _LD(w, a)
+#define ST(w, a) _ST(w, a)
+
+#define _BCOPY(src, dst, len, sa, sasi, da, dasi) \
+ brz,pn len, 2f ; \
+ mov len, %o3 ; \
+1: LD(ub, sa) [src] sasi, %o4 ; \
+ ST(b, da) %o4, [dst] dasi ; \
+ dec %o3 ; \
+ inc src ; \
+ brnz,pt %o3, 1b ; \
+ inc dst ; \
+2:
+
+#define BCOPY(src, dst, len) \
+ _BCOPY(src, dst, len, E, E, E, E)
+
+#define COPYIN(uaddr, kaddr, len) \
+ wr %g0, ASI_AIUP, %asi ; \
+ _BCOPY(uaddr, kaddr, len, a, %asi, E, E)
+
+#define COPYOUT(kaddr, uaddr, len) \
+ wr %g0, ASI_AIUP, %asi ; \
+ _BCOPY(kaddr, uaddr, len, E, E, a, %asi)
+
+#define _COPYSTR(src, dst, len, done, sa, sasi, da, dasi) \
+ clr %o4 ; \
+ clr %o5 ; \
+1: LD(ub, sa) [src] sasi, %g1 ; \
+ ST(b, da) %g1, [dst] dasi ; \
+ brz,pn %g1, 2f ; \
+ inc %o4 ; \
+ dec len ; \
+ inc src ; \
+ brgz,pt len, 1b ; \
+ inc dst ; \
+ mov ENAMETOOLONG, %o5 ; \
+2: brnz,a done, 3f ; \
+ stx %o4, [done] ; \
+3:
+
+#define COPYSTR(dst, src, len, done) \
+ _COPYSTR(dst, src, len, done, E, E, E, E)
+
+#define COPYINSTR(uaddr, kaddr, len, done) \
+ wr %g0, ASI_AIUP, %asi ; \
+ _COPYSTR(uaddr, kaddr, len, done, a, %asi, E, E)
+
+#define CATCH_SETUP(label) \
+ setx label, %g2, %g1 ; \
+ ldx [PCPU(CURPCB)], %g6 ; \
+ stx %g1, [%g6 + PCB_ONFAULT] ;
+
+#define CATCH_END() \
+ stx %g0, [%g6 + PCB_ONFAULT] ;
+
+#define FU_ALIGNED(loader, label) \
+ CATCH_SETUP(label) ; \
+ loader [%o0] ASI_AIUP, %o0 ; \
+ retl ; \
+ CATCH_END()
+
+#define FU_BYTES(loader, size, label) \
+ btst (size) - 1, %o0 ; \
+ bnz,pn %xcc, .Lfsalign ; \
+ EMPTY ; \
+ FU_ALIGNED(loader, label)
+
+#define SU_ALIGNED(storer, label) \
+ CATCH_SETUP(label) ; \
+ storer %o1, [%o0] ASI_AIUP ; \
+ retl ; \
+ CATCH_END()
+
+#define SU_BYTES(storer, size, label) \
+ btst (size) - 1, %o0 ; \
+ bnz,pn %xcc, .Lfsalign ; \
+ EMPTY ; \
+ SU_ALIGNED(storer, label)
+
+/*
+ * void bcmp(void *b, size_t len)
+ */
+ENTRY(bcmp)
+ brz,pn %o2, 2f
+ clr %o3
+1: ldub [%o0 + %o3], %o4
+ ldub [%o1 + %o3], %o5
+ cmp %o4, %o5
+ bne,pn %xcc, 1f
+ inc %o3
+ deccc %o2
+ bne,pt %xcc, 1b
+ nop
+2: retl
+ mov %o2, %o0
+END(bcmp)
+
+/*
+ * void bcopy(const void *src, void *dst, size_t len)
+ */
+ENTRY(bcopy)
+ BCOPY(%o0, %o1, %o2)
+ retl
+ nop
+END(bcopy)
+
+/*
+ * void ovbcopy(const void *src, void *dst, size_t len)
+ * XXX handle overlap...
+ */
+ENTRY(ovbcopy)
+ BCOPY(%o0, %o1, %o2)
+ retl
+ nop
+END(ovbcopy)
+
+/*
+ * void bzero(void *b, size_t len)
+ */
+ENTRY(bzero)
+ brz,pn %o1, 1f
+ nop
+1: deccc %o1
+ stb %g0, [%o0]
+ bne,pt %xcc, 1b
+ inc %o0
+2: retl
+ nop
+END(bzero)
+
+/*
+ * void *memcpy(void *dst, const void *src, size_t len)
+ */
+ENTRY(memcpy)
+ BCOPY(%o1, %o0, %o2)
+ retl
+ nop
+END(memcpy)
+
+/*
+ * int copyin(const void *uaddr, void *kaddr, size_t len)
+ */
+ENTRY(copyin)
+ CATCH_SETUP(.Lefault)
+ COPYIN(%o0, %o1, %o2)
+ CATCH_END()
+ retl
+ clr %o0
+END(copyin)
+
+/*
+ * int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
+ */
+ENTRY(copyinstr)
+ CATCH_SETUP(.Lefault)
+ COPYINSTR(%o0, %o1, %o2, %o3)
+ CATCH_END()
+ retl
+ mov %o5, %o0
+END(copyinstr)
+
+/*
+ * int copyout(const void *kaddr, void *uaddr, size_t len)
+ */
+ENTRY(copyout)
+ CATCH_SETUP(.Lefault)
+ COPYOUT(%o0, %o1, %o2)
+ CATCH_END()
+ retl
+ clr %o0
+END(copyout)
+
+.Lefault:
+ CATCH_END()
+ retl
+ mov EFAULT, %o0
+
+/*
+ * int copystr(const void *src, void *dst, size_t len, size_t *done)
+ */
+ENTRY(copystr)
+ COPYSTR(%o0, %o1, %o2, %o3)
+ retl
+ mov %o5, %o0
+END(copystr)
+
+/*
+ * int fubyte(const void *base)
+ */
+ENTRY(fubyte)
+ FU_ALIGNED(lduba, .Lfsfault)
+END(fubyte)
+
+/*
+ * int fusword(const void *base)
+ */
+ENTRY(fusword)
+ FU_BYTES(lduwa, 2, .Lfsfault)
+END(fusword)
+
+/*
+ * int fuswintr(const void *base)
+ */
+ENTRY(fuswintr)
+ FU_BYTES(lduwa, 2, fsbail)
+END(fuswintr)
+
+/*
+ * int fuword(const void *base)
+ */
+ENTRY(fuword)
+ FU_BYTES(ldxa, 8, .Lfsfault)
+END(fuword)
+
+/*
+ * int subyte(const void *base)
+ */
+ENTRY(subyte)
+ SU_ALIGNED(stba, .Lfsfault)
+END(subyte)
+
+/*
+ * int suibyte(const void *base)
+ */
+ENTRY(suibyte)
+ SU_ALIGNED(stba, fsbail)
+END(suibyte)
+
+/*
+ * int susword(const void *base)
+ */
+ENTRY(susword)
+ SU_BYTES(stwa, 2, .Lfsfault)
+END(susword)
+
+/*
+ * int suswintr(const void *base)
+ */
+ENTRY(suswintr)
+ SU_BYTES(stwa, 2, fsbail)
+END(suswintr)
+
+/*
+ * int suword(const void *base)
+ */
+ENTRY(suword)
+ SU_BYTES(stwa, 8, .Lfsfault)
+END(suword)
+
+ENTRY(fsbail)
+ nop
+.Lfsfault:
+ CATCH_END()
+.Lfsalign:
+ retl
+ mov -1, %o0
+END(fsbail)
+
+ENTRY(longjmp)
+ set 1, %g3
+ movrz %o1, %o1, %g3
+ mov %o0, %g1
+ ldx [%g1 + JB_FP], %g2
+1: cmp %fp, %g2
+ bl,a,pt %xcc, 1b
+ restore
+ bne,pn %xcc, 2f
+ ldx [%g1 + JB_SP], %o2
+ ldx [%g1 + JB_PC], %o3
+ cmp %o2, %sp
+ blt,pn %xcc, 2f
+ movge %xcc, %o2, %sp
+ jmp %o3 + 8
+ mov %g3, %o0
+2: PANIC("longjmp botch", %l1)
+END(longjmp)
+
+ENTRY(setjmp)
+ stx %sp, [%o0 + JB_SP]
+ stx %o7, [%o0 + JB_PC]
+ stx %fp, [%o0 + JB_FP]
+ retl
+ clr %o0
+END(setjmp)
+
+/*
+ * void openfirmware(cell_t args[])
+ */
+ENTRY(openfirmware)
+ save %sp, -CCFSZ, %sp
+ rdpr %pstate, %l0
+ rdpr %tl, %l1
+ rdpr %tba, %l2
+ wrpr %g0, 0, %tl
+ setx ofw_tba, %l4, %l3
+ ldx [%l3], %l3
+ setx ofw_vec, %l5, %l4
+ ldx [%l4], %l4
+ wrpr %l3, 0, %tba
+ call %l4
+ mov %i0, %o0
+ wrpr %l0, 0, %pstate
+ wrpr %l1, 0, %tl
+ wrpr %l2, 0, %tba
+ ret
+ restore
+END(openfirmware)
diff --git a/sys/sparc64/sparc64/swtch.S b/sys/sparc64/sparc64/swtch.S
new file mode 100644
index 0000000..e437522
--- /dev/null
+++ b/sys/sparc64/sparc64/swtch.S
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asmacros.h>
+
+#include "assym.s"
+
+ENTRY(cpu_switch)
+ save %sp, -CCFSZ, %sp
+ call chooseproc
+ ldx [PCPU(CURPROC)], %l0
+ cmp %l0, %o0
+ be,pn %xcc, 2f
+ ldx [PCPU(FPCURPROC)], %l2
+ cmp %l0, %l2
+ bne,pt %xcc, 1f
+ ldx [PCPU(CURPCB)], %l1
+ PANIC("cpu_switch: fpcurproc", %i0)
+1: flushw
+ wrpr %g0, 0, %cleanwin
+ stx %fp, [%l1 + PCB_FP]
+ stx %i7, [%l1 + PCB_PC]
+ ldx [%o0 + P_ADDR], %o1
+ ldx [%o1 + U_PCB + PCB_FP], %fp
+ ldx [%o1 + U_PCB + PCB_PC], %i7
+ stx %o0, [PCPU(CURPROC)]
+ stx %o1, [PCPU(CURPCB)]
+ sub %fp, CCFSZ, %sp
+2: ret
+ restore
+END(cpu_switch)
+
+ENTRY(savectx)
+ save %sp, -CCFSZ, %sp
+ flushw
+ ldx [PCPU(FPCURPROC)], %l0
+ brz,pt %l0, 1f
+ nop
+ illtrap
+1: stx %fp, [%i0 + PCB_FP]
+ stx %i7, [%i0 + PCB_PC]
+ ret
+ restore %g0, 0, %o0
+END(savectx)
diff --git a/sys/sparc64/sparc64/swtch.s b/sys/sparc64/sparc64/swtch.s
new file mode 100644
index 0000000..e437522
--- /dev/null
+++ b/sys/sparc64/sparc64/swtch.s
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asmacros.h>
+
+#include "assym.s"
+
+ENTRY(cpu_switch)
+ save %sp, -CCFSZ, %sp
+ call chooseproc
+ ldx [PCPU(CURPROC)], %l0
+ cmp %l0, %o0
+ be,pn %xcc, 2f
+ ldx [PCPU(FPCURPROC)], %l2
+ cmp %l0, %l2
+ bne,pt %xcc, 1f
+ ldx [PCPU(CURPCB)], %l1
+ PANIC("cpu_switch: fpcurproc", %i0)
+1: flushw
+ wrpr %g0, 0, %cleanwin
+ stx %fp, [%l1 + PCB_FP]
+ stx %i7, [%l1 + PCB_PC]
+ ldx [%o0 + P_ADDR], %o1
+ ldx [%o1 + U_PCB + PCB_FP], %fp
+ ldx [%o1 + U_PCB + PCB_PC], %i7
+ stx %o0, [PCPU(CURPROC)]
+ stx %o1, [PCPU(CURPCB)]
+ sub %fp, CCFSZ, %sp
+2: ret
+ restore
+END(cpu_switch)
+
+ENTRY(savectx)
+ save %sp, -CCFSZ, %sp
+ flushw
+ ldx [PCPU(FPCURPROC)], %l0
+ brz,pt %l0, 1f
+ nop
+ illtrap
+1: stx %fp, [%i0 + PCB_FP]
+ stx %i7, [%i0 + PCB_PC]
+ ret
+ restore %g0, 0, %o0
+END(savectx)
diff --git a/sys/sparc64/sparc64/trap.c b/sys/sparc64/sparc64/trap.c
index 78434a1..7aec69b 100644
--- a/sys/sparc64/sparc64/trap.c
+++ b/sys/sparc64/sparc64/trap.c
@@ -26,15 +26,74 @@
* $FreeBSD$
*/
+#include "opt_ddb.h"
+
#include <sys/param.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
#include <sys/proc.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
#include <machine/frame.h>
+#include <machine/pv.h>
+#include <machine/trap.h>
+#include <machine/tte.h>
+#include <machine/tlb.h>
+#include <machine/tsb.h>
-void trap(u_int type, struct trapframe *tf);
+void trap(struct trapframe *tf);
+
+const char *trap_msg[] = {
+ "reserved",
+ "power on reset",
+ "watchdog reset",
+ "externally initiated reset",
+ "software initiated reset",
+ "red state exception",
+ "instruction access exception",
+ "instruction access error",
+ "illegal instruction",
+ "privileged opcode",
+ "floating point disabled",
+ "floating point exception ieee 754",
+ "floating point exception other",
+ "tag overflow",
+ "division by zero",
+ "data access exception",
+ "data access error",
+ "memory address not aligned",
+ "lddf memory address not aligned",
+ "stdf memory address not aligned",
+ "privileged action",
+ "interrupt vector",
+ "physical address watchpoint",
+ "virtual address watchpoint",
+ "corrected ecc error",
+ "fast instruction access mmu miss",
+ "fast data access mmu miss",
+ "fast data access protection",
+ "bad spill",
+ "bad fill",
+ "breakpoint",
+};
void
-trap(u_int type, struct trapframe *tf)
+trap(struct trapframe *tf)
{
- TODO;
+
+ switch (tf->tf_type) {
+#ifdef DDB
+ case T_BREAKPOINT | T_KERNEL:
+ if (kdb_trap(tf) != 0)
+ return;
+ break;
+#endif
+ default:
+ break;
+ }
+ panic("trap: %s", trap_msg[tf->tf_type & ~T_KERNEL]);
}
diff --git a/sys/sparc64/sparc64/tsb.c b/sys/sparc64/sparc64/tsb.c
new file mode 100644
index 0000000..1a7af7b
--- /dev/null
+++ b/sys/sparc64/sparc64/tsb.c
@@ -0,0 +1,279 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from BSDI: pmap.c,v 1.28.2.15 2000/04/27 03:10:31 cp Exp
+ * $FreeBSD$
+ */
+
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/linker_set.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_zone.h>
+
+#include <machine/cpufunc.h>
+#include <machine/frame.h>
+#include <machine/trap.h>
+#include <machine/pmap.h>
+#include <machine/pv.h>
+#include <machine/tlb.h>
+#include <machine/tsb.h>
+#include <machine/tte.h>
+
+vm_offset_t tsb_kernel_phys;
+
+struct stte *
+tsb_get_bucket(pmap_t pm, u_int level, vm_offset_t va, int allocate)
+{
+ struct stte *bucket;
+ struct stte *stp;
+ vm_offset_t bva;
+ u_long bits;
+
+ bucket = tsb_vtobucket(va, level);
+ if (level == 0)
+ return (bucket);
+ bits = (va & ((tsb_mask(level) & ~tsb_mask(level - 1)) << PAGE_SHIFT))
+ >> tsb_mask_width(level);
+ if (level == 1) {
+ bits |= ((long)bucket & TSB_LEVEL1_BUCKET_MASK) >>
+ TSB_LEVEL1_BUCKET_SHIFT;
+ }
+ bva = trunc_page((u_long)tsb_vtobucket(va, level - 1)) | bits;
+ stp = (struct stte *)(long)bva + tsb_bucket_size(level - 1) - 1;
+ if (tte_match(stp->st_tte, (u_long)bucket) == 0) {
+ if (!allocate)
+ return (NULL);
+ tsb_page_fault(pm, level, trunc_page((u_long)bucket), stp);
+ } else {
+ tlb_store_slot(TLB_DTLB, trunc_page((u_long)bucket),
+ stp->st_tte, tsb_tlb_slot(1));
+ }
+ return (bucket);
+}
+
+int
+tsb_miss(pmap_t pm, u_int type, struct mmuframe *mf)
+{
+ struct stte *stp;
+ vm_offset_t va;
+
+ va = mf->mf_tar;
+ if ((stp = tsb_stte_lookup(pm, va)) == NULL)
+ return (EFAULT);
+ switch (type) {
+ case T_DMMU_MISS:
+ tlb_store(TLB_DTLB, va, stp->st_tte);
+ break;
+ default:
+ return (EFAULT);
+ }
+ return (0);
+}
+
+struct tte
+tsb_page_alloc(pmap_t pm, vm_offset_t va)
+{
+ struct tte tte;
+
+ /* XXX */
+ tte.tte_tag = 0;
+ tte.tte_data = 0;
+ return (tte);
+}
+
+void
+tsb_page_fault(pmap_t pm, int level, vm_offset_t va, struct stte *stp)
+{
+ struct tte tte;
+
+ tte = tsb_page_alloc(pm, va);
+ stp->st_tte = tte;
+ tlb_store_slot(TLB_DTLB, va, stp->st_tte, tsb_tlb_slot(level));
+ tsb_page_init((void *)va, level);
+}
+
+void
+tsb_page_init(void *va, int level)
+{
+ struct stte *stp;
+ caddr_t p;
+ u_int bsize;
+ u_int inc;
+ u_int i;
+
+ inc = PAGE_SIZE >> TSB_BUCKET_SPREAD_SHIFT;
+ if (level == 0)
+ inc >>= TSB_SECONDARY_BUCKET_SHIFT - TSB_PRIMARY_BUCKET_SHIFT;
+ bsize = tsb_bucket_size(level);
+ bzero(va, PAGE_SIZE);
+ for (i = 0; i < PAGE_SIZE; i += inc) {
+ p = (caddr_t)va + i;
+ stp = (struct stte *)p + bsize - 1;
+ stp->st_tte.tte_data = TD_TSB;
+ }
+}
+
+struct stte *
+tsb_stte_lookup(pmap_t pm, vm_offset_t va)
+{
+ struct stte *bucket;
+ u_int level;
+ u_int i;
+
+ va = trunc_page(va);
+ for (level = 0; level < TSB_DEPTH; level++) {
+ bucket = tsb_get_bucket(pm, level, va, 0);
+ if (bucket == NULL)
+ break;
+ for (i = 0; i < tsb_bucket_size(level); i++) {
+ if (tte_match(bucket[i].st_tte, va))
+ return (&bucket[i]);
+ }
+ }
+ return (NULL);
+}
+
+struct stte *
+tsb_stte_promote(pmap_t pm, vm_offset_t va, struct stte *stp)
+{
+ struct stte *bucket;
+ struct tte tte;
+ int bmask;
+ int b0;
+ int i;
+
+ bmask = tsb_bucket_mask(0);
+ bucket = tsb_vtobucket(va, 0);
+ b0 = rd(tick) & bmask;
+ i = b0;
+ do {
+ if ((bucket[i].st_tte.tte_data & TD_V) == 0 ||
+ (bucket[i].st_tte.tte_data & (TD_L | TD_REF)) == 0) {
+ tte = stp->st_tte;
+ stp->st_tte.tte_data = 0;
+ pv_remove_virt(stp);
+ return (tsb_tte_enter(pm, va, tte));
+ }
+ } while ((i = (i + 1) & bmask) != b0);
+ return (stp);
+}
+
+void
+tsb_stte_remove(struct stte *stp)
+{
+ struct tte tte;
+
+ tte = stp->st_tte;
+ tte_invalidate(&stp->st_tte);
+ tsb_tte_local_remove(&tte);
+}
+
+void
+tsb_tte_local_remove(struct tte *tp)
+{
+ vm_offset_t va;
+ u_int ctx;
+
+ va = tte_get_va(*tp);
+ ctx = tte_get_ctx(*tp);
+ tlb_page_demap(TLB_DTLB | TLB_ITLB, ctx, va);
+}
+
+struct stte *
+tsb_tte_enter(pmap_t pm, vm_offset_t va, struct tte tte)
+{
+ struct stte *bucket;
+ struct stte *nstp;
+ struct stte *rstp;
+ struct stte *stp;
+ struct tte otte;
+ u_int bmask;
+ int level;
+ int b0;
+ int i;
+
+ nstp = NULL;
+ for (level = 0; level < TSB_DEPTH; level++) {
+ bucket = tsb_get_bucket(pm, level, va, 1);
+
+ stp = NULL;
+ rstp = NULL;
+ bmask = tsb_bucket_mask(level);
+ b0 = rd(tick) & bmask;
+ i = b0;
+ do {
+ if ((bucket[i].st_tte.tte_data & (TD_TSB | TD_L)) != 0)
+ continue;
+ if ((bucket[i].st_tte.tte_data & TD_V) == 0) {
+ stp = &bucket[i];
+ break;
+ }
+ if (stp == NULL) {
+ if ((bucket[i].st_tte.tte_data & TD_REF) == 0)
+ stp = &bucket[i];
+ else if (rstp == NULL)
+ rstp = &bucket[i];
+ }
+ } while ((i = (i + 1) & bmask) != b0);
+
+ if (stp == NULL)
+ stp = rstp;
+ if (stp == NULL)
+ panic("tsb_enter_tte");
+ if (nstp == NULL)
+ nstp = stp;
+
+ otte = stp->st_tte;
+ if (otte.tte_data & TD_V)
+ pv_remove_virt(stp);
+ stp->st_tte = tte;
+ pv_insert(pm, TD_PA(tte.tte_data), va, stp);
+ if ((otte.tte_data & TD_V) == 0)
+ break;
+ tte = otte;
+ va = tte_get_va(tte);
+ }
+ if (level >= TSB_DEPTH)
+ panic("tsb_enter_tte: TSB full");
+ return (nstp);
+}
diff --git a/sys/sparc64/sparc64/upa.c b/sys/sparc64/sparc64/upa.c
new file mode 100644
index 0000000..f29defe
--- /dev/null
+++ b/sys/sparc64/sparc64/upa.c
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+
+static int upa_probe(device_t dev);
+static int upa_attach(device_t dev);
+
+static device_method_t upa_methods[] = {
+ /* Device interface. */
+ DEVMETHOD(device_probe, upa_probe),
+ DEVMETHOD(device_attach, upa_attach),
+
+ { NULL, NULL }
+};
+
+static driver_t upa_driver = {
+ "upa",
+ upa_methods,
+ 1,
+};
+static devclass_t upa_devclass;
+
+DRIVER_MODULE(upa, root, upa_driver, upa_devclass, 0, 0);
+
+static int
+upa_probe(device_t dev)
+{
+ return (bus_generic_probe(dev));
+}
+
+static int
+upa_attach(device_t dev)
+{
+ return (bus_generic_attach(dev));
+}
diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c
index e96db4c..b14c9a4 100644
--- a/sys/sparc64/sparc64/vm_machdep.c
+++ b/sys/sparc64/sparc64/vm_machdep.c
@@ -31,10 +31,16 @@
#include <sys/proc.h>
#include <sys/bio.h>
#include <sys/buf.h>
+#include <sys/unistd.h>
+#include <sys/user.h>
+
+#include <dev/ofw/openfirm.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
+#include <machine/cpu.h>
+#include <machine/frame.h>
#include <machine/md_var.h>
void
@@ -46,18 +52,46 @@ cpu_exit(struct proc *p)
void
cpu_fork(struct proc *p1, struct proc *p2, int flags)
{
- TODO;
+ struct trapframe *tf;
+ struct frame *fp;
+ struct pcb *pcb;
+
+ if ((flags & RFPROC) == 0)
+ return;
+ if (PCPU_GET(fpcurproc) == p1)
+ panic("cpu_fork: save fp state\n");
+
+ pcb = &p2->p_addr->u_pcb;
+ bcopy(&p1->p_addr->u_pcb, pcb, sizeof(*pcb));
+
+ tf = (struct trapframe *)((caddr_t)pcb + UPAGES * PAGE_SIZE) - 1;
+ bcopy(p1->p_frame, tf, sizeof(*tf));
+ p2->p_frame = tf;
+
+ fp = (struct frame *)tf - 1;
+ fp->f_local[0] = (u_long)fork_return;
+ fp->f_local[1] = (u_long)p2;
+ fp->f_local[2] = (u_long)tf;
+ pcb->pcb_fp = (u_long)fp - SPOFF;
+ pcb->pcb_pc = (u_long)fork_trampoline - 8;
}
void
cpu_reset(void)
{
+ OF_exit();
}
void
cpu_set_fork_handler(struct proc *p, void (*func)(void *), void *arg)
{
- TODO;
+ struct frame *fp;
+ struct pcb *pcb;
+
+ pcb = &p->p_addr->u_pcb;
+ fp = (struct frame *)(pcb->pcb_fp + SPOFF);
+ fp->f_local[0] = (u_long)func;
+ fp->f_local[1] = (u_long)arg;
}
void
OpenPOWER on IntegriCloud