summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorcognet <cognet@FreeBSD.org>2004-05-14 11:46:45 +0000
committercognet <cognet@FreeBSD.org>2004-05-14 11:46:45 +0000
commit295dcdd68788bc2248ebf4ab1df93d68da49b711 (patch)
tree3eb2afc6dea26c92444202ed2d06bac484ff40ae
parentdd167f263352cec86bd3c4298031380259b2fb17 (diff)
downloadFreeBSD-src-295dcdd68788bc2248ebf4ab1df93d68da49b711.zip
FreeBSD-src-295dcdd68788bc2248ebf4ab1df93d68da49b711.tar.gz
Import FreeBSD/arm kernel bits.
It only supports sa1110 (on simics) right now, but xscale support should come soon. Some of the initial work has been provided by : Stephane Potvin <sepotvin at videotron.ca> Most of this comes from NetBSD.
-rw-r--r--sys/arm/arm/autoconf.c98
-rw-r--r--sys/arm/arm/bcopy_page.S273
-rw-r--r--sys/arm/arm/bcopyinout.S810
-rw-r--r--sys/arm/arm/bcopyinout_xscale.S1357
-rw-r--r--sys/arm/arm/blockio.S587
-rw-r--r--sys/arm/arm/bootconfig.c126
-rw-r--r--sys/arm/arm/bus_space_asm_generic.S353
-rw-r--r--sys/arm/arm/busdma_machdep.c677
-rw-r--r--sys/arm/arm/copystr.S227
-rw-r--r--sys/arm/arm/cpufunc.c2177
-rw-r--r--sys/arm/arm/cpufunc_asm.S157
-rw-r--r--sys/arm/arm/cpufunc_asm_arm10.S269
-rw-r--r--sys/arm/arm/cpufunc_asm_arm3.S61
-rw-r--r--sys/arm/arm/cpufunc_asm_arm67.S111
-rw-r--r--sys/arm/arm/cpufunc_asm_arm7tdmi.S100
-rw-r--r--sys/arm/arm/cpufunc_asm_arm8.S284
-rw-r--r--sys/arm/arm/cpufunc_asm_arm9.S137
-rw-r--r--sys/arm/arm/cpufunc_asm_armv4.S67
-rw-r--r--sys/arm/arm/cpufunc_asm_ixp12x0.S90
-rw-r--r--sys/arm/arm/cpufunc_asm_sa1.S316
-rw-r--r--sys/arm/arm/cpufunc_asm_sa11x0.S125
-rw-r--r--sys/arm/arm/cpufunc_asm_xscale.S497
-rw-r--r--sys/arm/arm/critical.c50
-rw-r--r--sys/arm/arm/db_disasm.c79
-rw-r--r--sys/arm/arm/db_interface.c334
-rw-r--r--sys/arm/arm/db_trace.c250
-rw-r--r--sys/arm/arm/disassem.c681
-rw-r--r--sys/arm/arm/dump_machdep.c45
-rw-r--r--sys/arm/arm/elf_machdep.c213
-rw-r--r--sys/arm/arm/exception.S392
-rw-r--r--sys/arm/arm/fiq.c169
-rw-r--r--sys/arm/arm/fiq_subr.S101
-rw-r--r--sys/arm/arm/fusu.S403
-rw-r--r--sys/arm/arm/genassym.c112
-rw-r--r--sys/arm/arm/identcpu.c366
-rw-r--r--sys/arm/arm/in_cksum.c250
-rw-r--r--sys/arm/arm/in_cksum_arm.S474
-rw-r--r--sys/arm/arm/intr.c150
-rw-r--r--sys/arm/arm/irq_dispatch.S155
-rw-r--r--sys/arm/arm/locore.S312
-rw-r--r--sys/arm/arm/machdep.c409
-rw-r--r--sys/arm/arm/nexus.c243
-rw-r--r--sys/arm/arm/nexus_io.c208
-rw-r--r--sys/arm/arm/nexus_io_asm.S114
-rw-r--r--sys/arm/arm/pmap.c4650
-rw-r--r--sys/arm/arm/setcpsr.S80
-rw-r--r--sys/arm/arm/setstack.s94
-rw-r--r--sys/arm/arm/support.S72
-rw-r--r--sys/arm/arm/swtch.S543
-rw-r--r--sys/arm/arm/sys_machdep.c69
-rw-r--r--sys/arm/arm/trap.c917
-rw-r--r--sys/arm/arm/uio_machdep.c131
-rw-r--r--sys/arm/arm/undefined.c291
-rw-r--r--sys/arm/arm/vectors.S104
-rw-r--r--sys/arm/arm/vm_machdep.c348
-rw-r--r--sys/arm/conf/SIMICS83
-rw-r--r--sys/arm/include/_inttypes.h220
-rw-r--r--sys/arm/include/armreg.h299
-rw-r--r--sys/arm/include/asm.h147
-rw-r--r--sys/arm/include/asmacros.h204
-rw-r--r--sys/arm/include/atomic.h197
-rw-r--r--sys/arm/include/blockio.h56
-rw-r--r--sys/arm/include/bootconfig.h58
-rw-r--r--sys/arm/include/bus.h769
-rw-r--r--sys/arm/include/clock.h32
-rw-r--r--sys/arm/include/cpu.h49
-rw-r--r--sys/arm/include/cpuconf.h171
-rw-r--r--sys/arm/include/cpufunc.h530
-rw-r--r--sys/arm/include/critical.h54
-rw-r--r--sys/arm/include/db_machdep.h75
-rw-r--r--sys/arm/include/disassem.h54
-rw-r--r--sys/arm/include/fiq.h71
-rw-r--r--sys/arm/include/float.h75
-rw-r--r--sys/arm/include/floatingpoint.h42
-rw-r--r--sys/arm/include/fp.h88
-rw-r--r--sys/arm/include/frame.h190
-rw-r--r--sys/arm/include/ieee.h154
-rw-r--r--sys/arm/include/ieeefp.h51
-rw-r--r--sys/arm/include/in_cksum.h51
-rw-r--r--sys/arm/include/intr.h83
-rw-r--r--sys/arm/include/katelib.h103
-rw-r--r--sys/arm/include/machdep.h13
-rw-r--r--sys/arm/include/md_var.h36
-rw-r--r--sys/arm/include/metadata.h34
-rw-r--r--sys/arm/include/mutex.h32
-rw-r--r--sys/arm/include/param.h42
-rw-r--r--sys/arm/include/pcb.h99
-rw-r--r--sys/arm/include/pcpu.h60
-rw-r--r--sys/arm/include/pmap.h586
-rw-r--r--sys/arm/include/proc.h57
-rw-r--r--sys/arm/include/profile.h139
-rw-r--r--sys/arm/include/psl.h83
-rw-r--r--sys/arm/include/pte.h335
-rw-r--r--sys/arm/include/ptrace.h8
-rw-r--r--sys/arm/include/reg.h32
-rw-r--r--sys/arm/include/reloc.h53
-rw-r--r--sys/arm/include/resource.h45
-rw-r--r--sys/arm/include/runq.h46
-rw-r--r--sys/arm/include/setjmp.h93
-rw-r--r--sys/arm/include/sf_buf.h58
-rw-r--r--sys/arm/include/sigframe.h2
-rw-r--r--sys/arm/include/smp.h6
-rw-r--r--sys/arm/include/stdarg.h85
-rw-r--r--sys/arm/include/swi.h23
-rw-r--r--sys/arm/include/trap.h4
-rw-r--r--sys/arm/include/ucontext.h151
-rw-r--r--sys/arm/include/undefined.h90
-rw-r--r--sys/arm/include/utrap.h110
-rw-r--r--sys/arm/include/vmparam.h129
-rw-r--r--sys/arm/sa11x0/assabet_machdep.c497
-rw-r--r--sys/arm/sa11x0/files.sa11x011
-rw-r--r--sys/arm/sa11x0/sa11x0.c261
-rw-r--r--sys/arm/sa11x0/sa11x0_dmacreg.h97
-rw-r--r--sys/arm/sa11x0/sa11x0_gpioreg.h100
-rw-r--r--sys/arm/sa11x0/sa11x0_io.c252
-rw-r--r--sys/arm/sa11x0/sa11x0_io_asm.S290
-rw-r--r--sys/arm/sa11x0/sa11x0_irq.S223
-rw-r--r--sys/arm/sa11x0/sa11x0_irqhandler.c233
-rw-r--r--sys/arm/sa11x0/sa11x0_ost.c355
-rw-r--r--sys/arm/sa11x0/sa11x0_ostreg.h83
-rw-r--r--sys/arm/sa11x0/sa11x0_ppcreg.h67
-rw-r--r--sys/arm/sa11x0/sa11x0_reg.h81
-rw-r--r--sys/arm/sa11x0/sa11x0_var.h79
-rw-r--r--sys/arm/sa11x0/std.sa11x05
124 files changed, 30630 insertions, 69 deletions
diff --git a/sys/arm/arm/autoconf.c b/sys/arm/arm/autoconf.c
new file mode 100644
index 0000000..68b1829
--- /dev/null
+++ b/sys/arm/arm/autoconf.c
@@ -0,0 +1,98 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)autoconf.c 7.1 (Berkeley) 5/9/91
+ * from: FreeBSD: src/sys/i386/i386/autoconf.c,v 1.156
+ */
+
+/*
+ * Setup the system to run on the current machine.
+ *
+ * Configure() is called at boot time and initializes the vba
+ * device tables and the memory controller monitoring. Available
+ * devices are determined (from possibilities mentioned in ioconf.c),
+ * and the drivers are initialized.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/disklabel.h>
+#include <sys/reboot.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/cons.h>
+
+static void configure_first (void *);
+static void configure (void *);
+static void configure_final (void *);
+
+SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL);
+/* SI_ORDER_SECOND is hookable */
+SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL);
+/* SI_ORDER_MIDDLE is hookable */
+SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL);
+
+device_t nexus_dev;
+
+
+/*
+ * Determine i/o configuration for a machine.
+ */
+static void
+configure_first(void *dummy)
+{
+}
+
+static void
+configure(void *dummy)
+{
+ device_add_child(root_bus, "nexus", 0);
+
+ root_bus_configure();
+
+ cold = 0;
+ cninit_finish();
+}
+
+static void
+configure_final(void *dummy)
+{
+}
diff --git a/sys/arm/arm/bcopy_page.S b/sys/arm/arm/bcopy_page.S
new file mode 100644
index 0000000..c310143
--- /dev/null
+++ b/sys/arm/arm/bcopy_page.S
@@ -0,0 +1,273 @@
+/* $NetBSD: bcopy_page.S,v 1.7 2003/10/13 21:03:13 scw Exp $ */
+
+/*
+ * Copyright (c) 1995 Scott Stevens
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Scott Stevens.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * bcopy_page.S
+ *
+ * page optimised bcopy and bzero routines
+ *
+ * Created : 08/04/95
+ */
+
+#include <machine/asm.h>
+#include <machine/param.h>
+__FBSDID("$FreeBSD$");
+#ifndef __XSCALE__
+
+/* #define BIG_LOOPS */
+
+/*
+ * bcopy_page(src, dest)
+ *
+ * Optimised copy page routine.
+ *
+ * On entry:
+ * r0 - src address
+ * r1 - dest address
+ *
+ * Requires:
+ * number of bytes per page (PAGE_SIZE) is a multiple of 512 (BIG_LOOPS), 128
+ * otherwise.
+ */
+
+#define CHUNK_SIZE 32
+
+#define PREFETCH_FIRST_CHUNK /* nothing */
+#define PREFETCH_NEXT_CHUNK /* nothing */
+
+#ifndef COPY_CHUNK
+#define COPY_CHUNK \
+ PREFETCH_NEXT_CHUNK ; \
+ ldmia r0!, {r3-r8,ip,lr} ; \
+ stmia r1!, {r3-r8,ip,lr}
+#endif /* ! COPY_CHUNK */
+
+#ifndef SAVE_REGS
+#define SAVE_REGS stmfd sp!, {r4-r8, lr}
+#define RESTORE_REGS ldmfd sp!, {r4-r8, pc}
+#endif
+
+ENTRY(bcopy_page)
+ PREFETCH_FIRST_CHUNK
+ SAVE_REGS
+#ifdef BIG_LOOPS
+ mov r2, #(PAGE_SIZE >> 9)
+#else
+ mov r2, #(PAGE_SIZE >> 7)
+#endif
+
+1:
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+
+#ifdef BIG_LOOPS
+ /* There is little point making the loop any larger; unless we are
+ running with the cache off, the load/store overheads will
+ completely dominate this loop. */
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+#endif
+ subs r2, r2, #1
+ bne 1b
+
+ RESTORE_REGS /* ...and return. */
+
+/*
+ * bzero_page(dest)
+ *
+ * Optimised zero page routine.
+ *
+ * On entry:
+ * r0 - dest address
+ *
+ * Requires:
+ * number of bytes per page (PAGE_SIZE) is a multiple of 512 (BIG_LOOPS), 128
+ * otherwise
+ */
+
+ENTRY(bzero_page)
+ stmfd sp!, {r4-r8, lr}
+#ifdef BIG_LOOPS
+ mov r2, #(PAGE_SIZE >> 9)
+#else
+ mov r2, #(PAGE_SIZE >> 7)
+#endif
+ mov r3, #0
+ mov r4, #0
+ mov r5, #0
+ mov r6, #0
+ mov r7, #0
+ mov r8, #0
+ mov ip, #0
+ mov lr, #0
+
+1:
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+
+#ifdef BIG_LOOPS
+ /* There is little point making the loop any larger; unless we are
+ running with the cache off, the load/store overheads will
+ completely dominate this loop. */
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+
+#endif
+
+ subs r2, r2, #1
+ bne 1b
+
+ ldmfd sp!, {r4-r8, pc}
+
+#else /* __XSCALE__ */
+
+/*
+ * XSCALE version of bcopy_page
+ */
+ENTRY(bcopy_page)
+ pld [r0]
+ stmfd sp!, {r4, r5}
+ mov ip, #32
+ ldr r2, [r0], #0x04 /* 0x00 */
+ ldr r3, [r0], #0x04 /* 0x04 */
+1: pld [r0, #0x18] /* Prefetch 0x20 */
+ ldr r4, [r0], #0x04 /* 0x08 */
+ ldr r5, [r0], #0x04 /* 0x0c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x10 */
+ ldr r3, [r0], #0x04 /* 0x14 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x18 */
+ ldr r5, [r0], #0x04 /* 0x1c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x20 */
+ ldr r3, [r0], #0x04 /* 0x24 */
+ pld [r0, #0x18] /* Prefetch 0x40 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x28 */
+ ldr r5, [r0], #0x04 /* 0x2c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x30 */
+ ldr r3, [r0], #0x04 /* 0x34 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x38 */
+ ldr r5, [r0], #0x04 /* 0x3c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x40 */
+ ldr r3, [r0], #0x04 /* 0x44 */
+ pld [r0, #0x18] /* Prefetch 0x60 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x48 */
+ ldr r5, [r0], #0x04 /* 0x4c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x50 */
+ ldr r3, [r0], #0x04 /* 0x54 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x58 */
+ ldr r5, [r0], #0x04 /* 0x5c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x60 */
+ ldr r3, [r0], #0x04 /* 0x64 */
+ pld [r0, #0x18] /* Prefetch 0x80 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x68 */
+ ldr r5, [r0], #0x04 /* 0x6c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x70 */
+ ldr r3, [r0], #0x04 /* 0x74 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x78 */
+ ldr r5, [r0], #0x04 /* 0x7c */
+ strd r2, [r1], #0x08
+ subs ip, ip, #0x01
+ ldrgt r2, [r0], #0x04 /* 0x80 */
+ ldrgt r3, [r0], #0x04 /* 0x84 */
+ strd r4, [r1], #0x08
+ bgt 1b
+ ldmfd sp!, {r4, r5}
+ mov pc, lr
+
+/*
+ * XSCALE version of bzero_page
+ */
+ENTRY(bzero_page)
+ mov r1, #PAGE_SIZE
+ mov r2, #0
+ mov r3, #0
+1: strd r2, [r0], #8 /* 32 */
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8 /* 64 */
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8 /* 96 */
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8 /* 128 */
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ subs r1, r1, #128
+ bne 1b
+ mov pc, lr
+#endif /* __XSCALE__ */
diff --git a/sys/arm/arm/bcopyinout.S b/sys/arm/arm/bcopyinout.S
new file mode 100644
index 0000000..d57533a
--- /dev/null
+++ b/sys/arm/arm/bcopyinout.S
@@ -0,0 +1,810 @@
+/* $NetBSD: bcopyinout.S,v 1.11 2003/10/13 21:22:40 scw Exp $ */
+
+/*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Allen Briggs for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "assym.s"
+
+#include <machine/asm.h>
+
+__FBSDID("$FreeBSD$");
+#ifdef __XSCALE__
+#include "bcopyinout_xscale.S"
+#else
+
+ .text
+ .align 0
+
+#ifdef MULTIPROCESSOR
+.Lcpu_info:
+ .word _C_LABEL(cpu_info)
+#else
+.Lcurpcb:
+ .word _C_LABEL(__pcpu) + PC_CURPCB
+#endif
+
+#define SAVE_REGS stmfd sp!, {r4-r11}
+#define RESTORE_REGS ldmfd sp!, {r4-r11}
+
+#if defined(__XSCALE__)
+#define HELLOCPP #
+#define PREFETCH(rx,o) pld [ rx , HELLOCPP (o) ]
+#else
+#define PREFETCH(rx,o)
+#endif
+
+/*
+ * r0 = user space address
+ * r1 = kernel space address
+ * r2 = length
+ *
+ * Copies bytes from user space to kernel space
+ *
+ * We save/restore r4-r11:
+ * r4-r11 are scratch
+ */
+ENTRY(copyin)
+ /* Quick exit if length is zero */
+ teq r2, #0
+ moveq r0, #0
+ moveq pc, lr
+
+ SAVE_REGS
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r2, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r2, r14}
+#else
+ ldr r4, .Lcurpcb
+ ldr r4, [r4]
+#endif
+
+ ldr r5, [r4, #PCB_ONFAULT]
+ adr r3, .Lcopyfault
+ str r3, [r4, #PCB_ONFAULT]
+
+ PREFETCH(r0, 0)
+ PREFETCH(r1, 0)
+
+ /*
+ * If not too many bytes, take the slow path.
+ */
+ cmp r2, #0x08
+ blt .Licleanup
+
+ /*
+ * Align destination to word boundary.
+ */
+ and r6, r1, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lialend
+ .word .Lialend
+ .word .Lial3
+ .word .Lial2
+ .word .Lial1
+.Lial3: ldrbt r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lial2: ldrbt r7, [r0], #1
+ sub r2, r2, #1
+ strb r7, [r1], #1
+.Lial1: ldrbt r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lialend:
+
+ /*
+ * If few bytes left, finish slow.
+ */
+ cmp r2, #0x08
+ blt .Licleanup
+
+ /*
+ * If source is not aligned, finish slow.
+ */
+ ands r3, r0, #0x03
+ bne .Licleanup
+
+ cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */
+ blt .Licleanup8
+
+ /*
+ * Align destination to cacheline boundary.
+ * If source and destination are nicely aligned, this can be a big
+ * win. If not, it's still cheaper to copy in groups of 32 even if
+ * we don't get the nice cacheline alignment.
+ */
+ and r6, r1, #0x1f
+ ldr pc, [pc, r6]
+ b .Licaligned
+ .word .Licaligned
+ .word .Lical28
+ .word .Lical24
+ .word .Lical20
+ .word .Lical16
+ .word .Lical12
+ .word .Lical8
+ .word .Lical4
+.Lical28:ldrt r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lical24:ldrt r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lical20:ldrt r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lical16:ldrt r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lical12:ldrt r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lical8:ldrt r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lical4:ldrt r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+
+ /*
+ * We start with > 0x40 bytes to copy (>= 0x60 got us into this
+ * part of the code, and we may have knocked that down by as much
+ * as 0x1c getting aligned).
+ *
+ * This loop basically works out to:
+ * do {
+ * prefetch-next-cacheline(s)
+ * bytes -= 0x20;
+ * copy cacheline
+ * } while (bytes >= 0x40);
+ * bytes -= 0x20;
+ * copy cacheline
+ */
+.Licaligned:
+ PREFETCH(r0, 32)
+ PREFETCH(r1, 32)
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldrt r10, [r0], #4
+ ldrt r11, [r0], #4
+ ldrt r6, [r0], #4
+ ldrt r7, [r0], #4
+ ldrt r8, [r0], #4
+ ldrt r9, [r0], #4
+ stmia r1!, {r10-r11}
+ ldrt r10, [r0], #4
+ ldrt r11, [r0], #4
+ stmia r1!, {r6-r11}
+
+ cmp r2, #0x40
+ bge .Licaligned
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldrt r10, [r0], #4
+ ldrt r11, [r0], #4
+ ldrt r6, [r0], #4
+ ldrt r7, [r0], #4
+ ldrt r8, [r0], #4
+ ldrt r9, [r0], #4
+ stmia r1!, {r10-r11}
+ ldrt r10, [r0], #4
+ ldrt r11, [r0], #4
+ stmia r1!, {r6-r11}
+
+ cmp r2, #0x08
+ blt .Liprecleanup
+
+.Licleanup8:
+ ldrt r8, [r0], #4
+ ldrt r9, [r0], #4
+ sub r2, r2, #8
+ stmia r1!, {r8, r9}
+ cmp r2, #8
+ bge .Licleanup8
+
+.Liprecleanup:
+ /*
+ * If we're done, bail.
+ */
+ cmp r2, #0
+ beq .Lout
+
+.Licleanup:
+ and r6, r2, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Licend
+ .word .Lic4
+ .word .Lic1
+ .word .Lic2
+ .word .Lic3
+.Lic4: ldrbt r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lic3: ldrbt r7, [r0], #1
+ sub r2, r2, #1
+ strb r7, [r1], #1
+.Lic2: ldrbt r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lic1: ldrbt r7, [r0], #1
+ subs r2, r2, #1
+ strb r7, [r1], #1
+.Licend:
+ bne .Licleanup
+
+.Liout:
+ mov r0, #0
+
+ str r5, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+
+ mov pc, lr
+
+.Lcopyfault:
+ mov r0, #14 /* EFAULT */
+ str r5, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+
+ mov pc, lr
+
+/*
+ * r0 = kernel space address
+ * r1 = user space address
+ * r2 = length
+ *
+ * Copies bytes from kernel space to user space
+ *
+ * We save/restore r4-r11:
+ * r4-r11 are scratch
+ */
+
+ENTRY(copyout)
+ /* Quick exit if length is zero */
+ teq r2, #0
+ moveq r0, #0
+ moveq pc, lr
+
+ SAVE_REGS
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r2, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r2, r14}
+#else
+ ldr r4, .Lcurpcb
+ ldr r4, [r4]
+#endif
+
+ ldr r5, [r4, #PCB_ONFAULT]
+ adr r3, .Lcopyfault
+ str r3, [r4, #PCB_ONFAULT]
+
+ PREFETCH(r0, 0)
+ PREFETCH(r1, 0)
+
+ /*
+ * If not too many bytes, take the slow path.
+ */
+ cmp r2, #0x08
+ blt .Lcleanup
+
+ /*
+ * Align destination to word boundary.
+ */
+ and r6, r1, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lalend
+ .word .Lalend
+ .word .Lal3
+ .word .Lal2
+ .word .Lal1
+.Lal3: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strbt r6, [r1], #1
+.Lal2: ldrb r7, [r0], #1
+ sub r2, r2, #1
+ strbt r7, [r1], #1
+.Lal1: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strbt r6, [r1], #1
+.Lalend:
+
+ /*
+ * If few bytes left, finish slow.
+ */
+ cmp r2, #0x08
+ blt .Lcleanup
+
+ /*
+ * If source is not aligned, finish slow.
+ */
+ ands r3, r0, #0x03
+ bne .Lcleanup
+
+ cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */
+ blt .Lcleanup8
+
+ /*
+ * Align source & destination to cacheline boundary.
+ */
+ and r6, r1, #0x1f
+ ldr pc, [pc, r6]
+ b .Lcaligned
+ .word .Lcaligned
+ .word .Lcal28
+ .word .Lcal24
+ .word .Lcal20
+ .word .Lcal16
+ .word .Lcal12
+ .word .Lcal8
+ .word .Lcal4
+.Lcal28:ldr r6, [r0], #4
+ sub r2, r2, #4
+ strt r6, [r1], #4
+.Lcal24:ldr r7, [r0], #4
+ sub r2, r2, #4
+ strt r7, [r1], #4
+.Lcal20:ldr r6, [r0], #4
+ sub r2, r2, #4
+ strt r6, [r1], #4
+.Lcal16:ldr r7, [r0], #4
+ sub r2, r2, #4
+ strt r7, [r1], #4
+.Lcal12:ldr r6, [r0], #4
+ sub r2, r2, #4
+ strt r6, [r1], #4
+.Lcal8: ldr r7, [r0], #4
+ sub r2, r2, #4
+ strt r7, [r1], #4
+.Lcal4: ldr r6, [r0], #4
+ sub r2, r2, #4
+ strt r6, [r1], #4
+
+ /*
+ * We start with > 0x40 bytes to copy (>= 0x60 got us into this
+ * part of the code, and we may have knocked that down by as much
+ * as 0x1c getting aligned).
+ *
+ * This loop basically works out to:
+ * do {
+ * prefetch-next-cacheline(s)
+ * bytes -= 0x20;
+ * copy cacheline
+ * } while (bytes >= 0x40);
+ * bytes -= 0x20;
+ * copy cacheline
+ */
+.Lcaligned:
+ PREFETCH(r0, 32)
+ PREFETCH(r1, 32)
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldmia r0!, {r6-r11}
+ strt r6, [r1], #4
+ strt r7, [r1], #4
+ ldmia r0!, {r6-r7}
+ strt r8, [r1], #4
+ strt r9, [r1], #4
+ strt r10, [r1], #4
+ strt r11, [r1], #4
+ strt r6, [r1], #4
+ strt r7, [r1], #4
+
+ cmp r2, #0x40
+ bge .Lcaligned
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldmia r0!, {r6-r11}
+ strt r6, [r1], #4
+ strt r7, [r1], #4
+ ldmia r0!, {r6-r7}
+ strt r8, [r1], #4
+ strt r9, [r1], #4
+ strt r10, [r1], #4
+ strt r11, [r1], #4
+ strt r6, [r1], #4
+ strt r7, [r1], #4
+
+ cmp r2, #0x08
+ blt .Lprecleanup
+
+.Lcleanup8:
+ ldmia r0!, {r8-r9}
+ sub r2, r2, #8
+ strt r8, [r1], #4
+ strt r9, [r1], #4
+ cmp r2, #8
+ bge .Lcleanup8
+
+.Lprecleanup:
+ /*
+ * If we're done, bail.
+ */
+ cmp r2, #0
+ beq .Lout
+
+.Lcleanup:
+ and r6, r2, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lcend
+ .word .Lc4
+ .word .Lc1
+ .word .Lc2
+ .word .Lc3
+.Lc4: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strbt r6, [r1], #1
+.Lc3: ldrb r7, [r0], #1
+ sub r2, r2, #1
+ strbt r7, [r1], #1
+.Lc2: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strbt r6, [r1], #1
+.Lc1: ldrb r7, [r0], #1
+ subs r2, r2, #1
+ strbt r7, [r1], #1
+.Lcend:
+ bne .Lcleanup
+
+.Lout:
+ mov r0, #0
+
+ str r5, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+
+ mov pc, lr
+
+/*
+ * r0 = kernel space source address
+ * r1 = kernel space destination address
+ * r2 = length
+ *
+ * Copies bytes from kernel space to kernel space, aborting on page fault
+ *
+ * Copy of copyout, but without the ldrt/strt instructions.
+ */
+
+ENTRY(kcopy)
+ /* Quick exit if length is zero */
+ teq r2, #0
+ moveq r0, #0
+ moveq pc, lr
+
+ SAVE_REGS
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r2, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r2, r14}
+#else
+ ldr r4, .Lcurpcb
+ ldr r4, [r4]
+#endif
+
+ ldr r5, [r4, #PCB_ONFAULT]
+ adr r3, .Lcopyfault
+ str r3, [r4, #PCB_ONFAULT]
+
+ PREFETCH(r0, 0)
+ PREFETCH(r1, 0)
+
+ /*
+ * If not too many bytes, take the slow path.
+ */
+ cmp r2, #0x08
+ blt .Lkcleanup
+
+ /*
+ * Align destination to word boundary.
+ */
+ and r6, r1, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lkalend
+ .word .Lkalend
+ .word .Lkal3
+ .word .Lkal2
+ .word .Lkal1
+.Lkal3: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lkal2: ldrb r7, [r0], #1
+ sub r2, r2, #1
+ strb r7, [r1], #1
+.Lkal1: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lkalend:
+
+ /*
+ * If few bytes left, finish slow.
+ */
+ cmp r2, #0x08
+ blt .Lkcleanup
+
+ /*
+ * If source is not aligned, finish slow.
+ */
+ ands r3, r0, #0x03
+ bne .Lkcleanup
+
+ cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */
+ blt .Lkcleanup8
+
+ /*
+ * Align source & destination to cacheline boundary.
+ */
+ and r6, r1, #0x1f
+ ldr pc, [pc, r6]
+ b .Lkcaligned
+ .word .Lkcaligned
+ .word .Lkcal28
+ .word .Lkcal24
+ .word .Lkcal20
+ .word .Lkcal16
+ .word .Lkcal12
+ .word .Lkcal8
+ .word .Lkcal4
+.Lkcal28:ldr r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lkcal24:ldr r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lkcal20:ldr r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lkcal16:ldr r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lkcal12:ldr r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lkcal8:ldr r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lkcal4:ldr r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+
+ /*
+ * We start with > 0x40 bytes to copy (>= 0x60 got us into this
+ * part of the code, and we may have knocked that down by as much
+ * as 0x1c getting aligned).
+ *
+ * This loop basically works out to:
+ * do {
+ * prefetch-next-cacheline(s)
+ * bytes -= 0x20;
+ * copy cacheline
+ * } while (bytes >= 0x40);
+ * bytes -= 0x20;
+ * copy cacheline
+ */
+.Lkcaligned:
+ PREFETCH(r0, 32)
+ PREFETCH(r1, 32)
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldmia r0!, {r6-r11}
+ stmia r1!, {r6, r7}
+ ldmia r0!, {r6, r7}
+ stmia r1!, {r8-r11}
+ stmia r1!, {r6, r7}
+
+ cmp r2, #0x40
+ bge .Lkcaligned
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldmia r0!, {r6-r11}
+ stmia r1!, {r6-r7}
+ ldmia r0!, {r6-r7}
+ stmia r1!, {r8-r11}
+ stmia r1!, {r6-r7}
+
+ cmp r2, #0x08
+ blt .Lkprecleanup
+
+.Lkcleanup8:
+ ldmia r0!, {r8-r9}
+ sub r2, r2, #8
+ stmia r1!, {r8-r9}
+ cmp r2, #8
+ bge .Lkcleanup8
+
+.Lkprecleanup:
+ /*
+ * If we're done, bail.
+ */
+ cmp r2, #0
+ beq .Lkout
+
+.Lkcleanup:
+ and r6, r2, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lkcend
+ .word .Lkc4
+ .word .Lkc1
+ .word .Lkc2
+ .word .Lkc3
+.Lkc4: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lkc3: ldrb r7, [r0], #1
+ sub r2, r2, #1
+ strb r7, [r1], #1
+.Lkc2: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lkc1: ldrb r7, [r0], #1
+ subs r2, r2, #1
+ strb r7, [r1], #1
+.Lkcend:
+ bne .Lkcleanup
+
+.Lkout:
+ mov r0, #0
+
+ str r5, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+
+ mov pc, lr
+#endif /* !__XSCALE__ */
+
+/*
+ * int badaddr_read_1(const uint8_t *src, uint8_t *dest)
+ *
+ * Copies a single 8-bit value from src to dest, returning 0 on success,
+ * else EFAULT if a page fault occurred.
+ */
+ENTRY(badaddr_read_1)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0-r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+ ldr ip, [r2, #PCB_ONFAULT]
+ adr r3, 1f
+ str r3, [r2, #PCB_ONFAULT]
+ nop
+ nop
+ nop
+ ldrb r3, [r0]
+ nop
+ nop
+ nop
+ strb r3, [r1]
+ mov r0, #0 /* No fault */
+1: str ip, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * int badaddr_read_2(const uint16_t *src, uint16_t *dest)
+ *
+ * Copies a single 16-bit value from src to dest, returning 0 on success,
+ * else EFAULT if a page fault occurred.
+ */
+ENTRY(badaddr_read_2)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0-r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+ ldr ip, [r2, #PCB_ONFAULT]
+ adr r3, 1f
+ str r3, [r2, #PCB_ONFAULT]
+ nop
+ nop
+ nop
+ ldrh r3, [r0]
+ nop
+ nop
+ nop
+ strh r3, [r1]
+ mov r0, #0 /* No fault */
+1: str ip, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * int badaddr_read_4(const uint32_t *src, uint32_t *dest)
+ *
+ * Copies a single 32-bit value from src to dest, returning 0 on success,
+ * else EFAULT if a page fault occurred.
+ */
+ENTRY(badaddr_read_4)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0-r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+ ldr ip, [r2, #PCB_ONFAULT]
+ adr r3, 1f
+ str r3, [r2, #PCB_ONFAULT]
+ nop
+ nop
+ nop
+ ldr r3, [r0]
+ nop
+ nop
+ nop
+ str r3, [r1]
+ mov r0, #0 /* No fault */
+1: str ip, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
diff --git a/sys/arm/arm/bcopyinout_xscale.S b/sys/arm/arm/bcopyinout_xscale.S
new file mode 100644
index 0000000..d2b95b6
--- /dev/null
+++ b/sys/arm/arm/bcopyinout_xscale.S
@@ -0,0 +1,1357 @@
+/* $NetBSD: bcopyinout_xscale.S,v 1.3 2003/12/15 09:27:18 scw Exp $ */
+
+/*
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+ .text
+ .align 0
+
+#ifdef MULTIPROCESSOR
+.Lcpu_info:
+ .word _C_LABEL(cpu_info)
+#else
+.Lcurpcb:
+ .word _C_LABEL(__pcpu) + PC_CURPCB
+#endif
+
+/*
+ * r0 = user space address
+ * r1 = kernel space address
+ * r2 = length
+ *
+ * Copies bytes from user space to kernel space
+ */
+ENTRY(copyin)
+ cmp r2, #0x00
+ movle r0, #0x00
+ movle pc, lr /* Bail early if length is <= 0 */
+
+ stmfd sp!, {r10-r11, lr}
+
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r2}
+ bl _C_LABEL(cpu_number)
+ ldr r10, .Lcpu_info
+ ldmfd sp!, {r0-r2}
+ ldr r10, [r10, r0, lsl #2]
+ ldr r10, [r10, #CI_CURPCB]
+#else
+ ldr r10, .Lcurpcb
+ ldr r10, [r10]
+#endif
+
+ mov r3, #0x00
+ adr ip, .Lcopyin_fault
+ ldr r11, [r10, #PCB_ONFAULT]
+ str ip, [r10, #PCB_ONFAULT]
+ bl .Lcopyin_guts
+ str r11, [r10, #PCB_ONFAULT]
+ mov r0, #0x00
+ ldmfd sp!, {r10-r11, pc}
+
+.Lcopyin_fault:
+ str r11, [r10, #PCB_ONFAULT]
+ cmp r3, #0x00
+ ldmgtfd sp!, {r4-r7} /* r3 > 0 Restore r4-r7 */
+ ldmltfd sp!, {r4-r9} /* r3 < 0 Restore r4-r9 */
+ ldmfd sp!, {r10-r11, pc}
+
+.Lcopyin_guts:
+ pld [r0]
+ /* Word-align the destination buffer */
+ ands ip, r1, #0x03 /* Already word aligned? */
+ beq .Lcopyin_wordaligned /* Yup */
+ rsb ip, ip, #0x04
+ cmp r2, ip /* Enough bytes left to align it? */
+ blt .Lcopyin_l4_2 /* Nope. Just copy bytewise */
+ sub r2, r2, ip
+ rsbs ip, ip, #0x03
+ addne pc, pc, ip, lsl #3
+ nop
+ ldrbt ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrbt ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrbt ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ cmp r2, #0x00 /* All done? */
+ moveq pc, lr
+
+ /* Destination buffer is now word aligned */
+.Lcopyin_wordaligned:
+ ands ip, r0, #0x03 /* Is src also word-aligned? */
+ bne .Lcopyin_bad_align /* Nope. Things just got bad */
+ cmp r2, #0x08 /* Less than 8 bytes remaining? */
+ blt .Lcopyin_w_less_than8
+
+ /* Quad-align the destination buffer */
+ tst r1, #0x07 /* Already quad aligned? */
+ ldrnet ip, [r0], #0x04
+ stmfd sp!, {r4-r9} /* Free up some registers */
+ mov r3, #-1 /* Signal restore r4-r9 */
+ tst r1, #0x07 /* XXX: bug work-around */
+ subne r2, r2, #0x04
+ strne ip, [r1], #0x04
+
+ /* Destination buffer quad aligned, source is word aligned */
+ subs r2, r2, #0x80
+ blt .Lcopyin_w_lessthan128
+
+ /* Copy 128 bytes at a time */
+.Lcopyin_w_loop128:
+ ldrt r4, [r0], #0x04 /* LD:00-03 */
+ ldrt r5, [r0], #0x04 /* LD:04-07 */
+ pld [r0, #0x18] /* Prefetch 0x20 */
+ ldrt r6, [r0], #0x04 /* LD:08-0b */
+ ldrt r7, [r0], #0x04 /* LD:0c-0f */
+ ldrt r8, [r0], #0x04 /* LD:10-13 */
+ ldrt r9, [r0], #0x04 /* LD:14-17 */
+ strd r4, [r1], #0x08 /* ST:00-07 */
+ ldrt r4, [r0], #0x04 /* LD:18-1b */
+ ldrt r5, [r0], #0x04 /* LD:1c-1f */
+ strd r6, [r1], #0x08 /* ST:08-0f */
+ ldrt r6, [r0], #0x04 /* LD:20-23 */
+ ldrt r7, [r0], #0x04 /* LD:24-27 */
+ pld [r0, #0x18] /* Prefetch 0x40 */
+ strd r8, [r1], #0x08 /* ST:10-17 */
+ ldrt r8, [r0], #0x04 /* LD:28-2b */
+ ldrt r9, [r0], #0x04 /* LD:2c-2f */
+ strd r4, [r1], #0x08 /* ST:18-1f */
+ ldrt r4, [r0], #0x04 /* LD:30-33 */
+ ldrt r5, [r0], #0x04 /* LD:34-37 */
+ strd r6, [r1], #0x08 /* ST:20-27 */
+ ldrt r6, [r0], #0x04 /* LD:38-3b */
+ ldrt r7, [r0], #0x04 /* LD:3c-3f */
+ strd r8, [r1], #0x08 /* ST:28-2f */
+ ldrt r8, [r0], #0x04 /* LD:40-43 */
+ ldrt r9, [r0], #0x04 /* LD:44-47 */
+ pld [r0, #0x18] /* Prefetch 0x60 */
+ strd r4, [r1], #0x08 /* ST:30-37 */
+ ldrt r4, [r0], #0x04 /* LD:48-4b */
+ ldrt r5, [r0], #0x04 /* LD:4c-4f */
+ strd r6, [r1], #0x08 /* ST:38-3f */
+ ldrt r6, [r0], #0x04 /* LD:50-53 */
+ ldrt r7, [r0], #0x04 /* LD:54-57 */
+ strd r8, [r1], #0x08 /* ST:40-47 */
+ ldrt r8, [r0], #0x04 /* LD:58-5b */
+ ldrt r9, [r0], #0x04 /* LD:5c-5f */
+ strd r4, [r1], #0x08 /* ST:48-4f */
+ ldrt r4, [r0], #0x04 /* LD:60-63 */
+ ldrt r5, [r0], #0x04 /* LD:64-67 */
+ pld [r0, #0x18] /* Prefetch 0x80 */
+ strd r6, [r1], #0x08 /* ST:50-57 */
+ ldrt r6, [r0], #0x04 /* LD:68-6b */
+ ldrt r7, [r0], #0x04 /* LD:6c-6f */
+ strd r8, [r1], #0x08 /* ST:58-5f */
+ ldrt r8, [r0], #0x04 /* LD:70-73 */
+ ldrt r9, [r0], #0x04 /* LD:74-77 */
+ strd r4, [r1], #0x08 /* ST:60-67 */
+ ldrt r4, [r0], #0x04 /* LD:78-7b */
+ ldrt r5, [r0], #0x04 /* LD:7c-7f */
+ strd r6, [r1], #0x08 /* ST:68-6f */
+ strd r8, [r1], #0x08 /* ST:70-77 */
+ subs r2, r2, #0x80
+ strd r4, [r1], #0x08 /* ST:78-7f */
+ bge .Lcopyin_w_loop128
+
+.Lcopyin_w_lessthan128:
+ adds r2, r2, #0x80 /* Adjust for extra sub */
+ ldmeqfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x20
+ blt .Lcopyin_w_lessthan32
+
+ /* Copy 32 bytes at a time */
+.Lcopyin_w_loop32:
+ ldrt r4, [r0], #0x04
+ ldrt r5, [r0], #0x04
+ pld [r0, #0x18]
+ ldrt r6, [r0], #0x04
+ ldrt r7, [r0], #0x04
+ ldrt r8, [r0], #0x04
+ ldrt r9, [r0], #0x04
+ strd r4, [r1], #0x08
+ ldrt r4, [r0], #0x04
+ ldrt r5, [r0], #0x04
+ strd r6, [r1], #0x08
+ strd r8, [r1], #0x08
+ subs r2, r2, #0x20
+ strd r4, [r1], #0x08
+ bge .Lcopyin_w_loop32
+
+.Lcopyin_w_lessthan32:
+ adds r2, r2, #0x20 /* Adjust for extra sub */
+ ldmeqfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+
+ and r4, r2, #0x18
+ rsb r5, r4, #0x18
+ subs r2, r2, r4
+ add pc, pc, r5, lsl #1
+ nop
+
+ /* At least 24 bytes remaining */
+ ldrt r4, [r0], #0x04
+ ldrt r5, [r0], #0x04
+ nop
+ strd r4, [r1], #0x08
+
+ /* At least 16 bytes remaining */
+ ldrt r4, [r0], #0x04
+ ldrt r5, [r0], #0x04
+ nop
+ strd r4, [r1], #0x08
+
+ /* At least 8 bytes remaining */
+ ldrt r4, [r0], #0x04
+ ldrt r5, [r0], #0x04
+ nop
+ strd r4, [r1], #0x08
+
+ /* Less than 8 bytes remaining */
+ ldmfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+ mov r3, #0x00
+
+.Lcopyin_w_less_than8:
+ subs r2, r2, #0x04
+ ldrget ip, [r0], #0x04
+ strge ip, [r1], #0x04
+ moveq pc, lr /* Return now if done */
+ addlt r2, r2, #0x04
+ ldrbt ip, [r0], #0x01
+ cmp r2, #0x02
+ ldrgebt r2, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrgtbt ip, [r0]
+ strgeb r2, [r1], #0x01
+ strgtb ip, [r1]
+ mov pc, lr
+
+/*
+ * At this point, it has not been possible to word align both buffers.
+ * The destination buffer (r1) is word aligned, but the source buffer
+ * (r0) is not.
+ */
+.Lcopyin_bad_align:
+ stmfd sp!, {r4-r7}
+ mov r3, #0x01
+ bic r0, r0, #0x03
+ cmp ip, #2
+ ldrt ip, [r0], #0x04
+ bgt .Lcopyin_bad3
+ beq .Lcopyin_bad2
+ b .Lcopyin_bad1
+
+.Lcopyin_bad1_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #8
+#else
+ mov r4, ip, lsr #8
+#endif
+ ldrt r5, [r0], #0x04
+ pld [r0, #0x018]
+ ldrt r6, [r0], #0x04
+ ldrt r7, [r0], #0x04
+ ldrt ip, [r0], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #24
+ mov r5, r5, lsl #8
+ orr r5, r5, r6, lsr #24
+ mov r6, r6, lsl #8
+ orr r6, r6, r7, lsr #24
+ mov r7, r7, lsl #8
+ orr r7, r7, ip, lsr #24
+#else
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ mov r7, r7, lsr #8
+ orr r7, r7, ip, lsl #24
+#endif
+ str r4, [r1], #0x04
+ str r5, [r1], #0x04
+ str r6, [r1], #0x04
+ str r7, [r1], #0x04
+.Lcopyin_bad1:
+ subs r2, r2, #0x10
+ bge .Lcopyin_bad1_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r0, r0, #0x03
+ blt .Lcopyin_l4
+
+.Lcopyin_bad1_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #8
+#else
+ mov r4, ip, lsr #8
+#endif
+ ldrt ip, [r0], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #24
+#else
+ orr r4, r4, ip, lsl #24
+#endif
+ str r4, [r1], #0x04
+ bge .Lcopyin_bad1_loop4
+ sub r0, r0, #0x03
+ b .Lcopyin_l4
+
+.Lcopyin_bad2_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #16
+#else
+ mov r4, ip, lsr #16
+#endif
+ ldrt r5, [r0], #0x04
+ pld [r0, #0x018]
+ ldrt r6, [r0], #0x04
+ ldrt r7, [r0], #0x04
+ ldrt ip, [r0], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #16
+ mov r5, r5, lsl #16
+ orr r5, r5, r6, lsr #16
+ mov r6, r6, lsl #16
+ orr r6, r6, r7, lsr #16
+ mov r7, r7, lsl #16
+ orr r7, r7, ip, lsr #16
+#else
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ mov r7, r7, lsr #16
+ orr r7, r7, ip, lsl #16
+#endif
+ str r4, [r1], #0x04
+ str r5, [r1], #0x04
+ str r6, [r1], #0x04
+ str r7, [r1], #0x04
+.Lcopyin_bad2:
+ subs r2, r2, #0x10
+ bge .Lcopyin_bad2_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r0, r0, #0x02
+ blt .Lcopyin_l4
+
+.Lcopyin_bad2_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #16
+#else
+ mov r4, ip, lsr #16
+#endif
+ ldrt ip, [r0], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #16
+#else
+ orr r4, r4, ip, lsl #16
+#endif
+ str r4, [r1], #0x04
+ bge .Lcopyin_bad2_loop4
+ sub r0, r0, #0x02
+ b .Lcopyin_l4
+
+.Lcopyin_bad3_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #24
+#else
+ mov r4, ip, lsr #24
+#endif
+ ldrt r5, [r0], #0x04
+ pld [r0, #0x018]
+ ldrt r6, [r0], #0x04
+ ldrt r7, [r0], #0x04
+ ldrt ip, [r0], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #8
+ mov r5, r5, lsl #24
+ orr r5, r5, r6, lsr #8
+ mov r6, r6, lsl #24
+ orr r6, r6, r7, lsr #8
+ mov r7, r7, lsl #24
+ orr r7, r7, ip, lsr #8
+#else
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ mov r7, r7, lsr #24
+ orr r7, r7, ip, lsl #8
+#endif
+ str r4, [r1], #0x04
+ str r5, [r1], #0x04
+ str r6, [r1], #0x04
+ str r7, [r1], #0x04
+.Lcopyin_bad3:
+ subs r2, r2, #0x10
+ bge .Lcopyin_bad3_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r0, r0, #0x01
+ blt .Lcopyin_l4
+
+.Lcopyin_bad3_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #24
+#else
+ mov r4, ip, lsr #24
+#endif
+ ldrt ip, [r0], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #8
+#else
+ orr r4, r4, ip, lsl #8
+#endif
+ str r4, [r1], #0x04
+ bge .Lcopyin_bad3_loop4
+ sub r0, r0, #0x01
+
+.Lcopyin_l4:
+ ldmfd sp!, {r4-r7}
+ mov r3, #0x00
+ adds r2, r2, #0x04
+ moveq pc, lr
+.Lcopyin_l4_2:
+ rsbs r2, r2, #0x03
+ addne pc, pc, r2, lsl #3
+ nop
+ ldrbt ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrbt ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrbt ip, [r0]
+ strb ip, [r1]
+ mov pc, lr
+
+
+/*
+ * r0 = kernel space address
+ * r1 = user space address
+ * r2 = length
+ *
+ * Copies bytes from kernel space to user space
+ */
+ENTRY(copyout)
+ cmp r2, #0x00
+ movle r0, #0x00
+ movle pc, lr /* Bail early if length is <= 0 */
+
+ stmfd sp!, {r10-r11, lr}
+
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r2}
+ bl _C_LABEL(cpu_number)
+ ldr r10, .Lcpu_info
+ ldmfd sp!, {r0-r2}
+ ldr r10, [r10, r0, lsl #2]
+ ldr r10, [r10, #CI_CURPCB]
+#else
+ ldr r10, .Lcurpcb
+ ldr r10, [r10]
+#endif
+
+ mov r3, #0x00
+ adr ip, .Lcopyout_fault
+ ldr r11, [r10, #PCB_ONFAULT]
+ str ip, [r10, #PCB_ONFAULT]
+ bl .Lcopyout_guts
+ str r11, [r10, #PCB_ONFAULT]
+ mov r0, #0x00
+ ldmfd sp!, {r10-r11, pc}
+
+.Lcopyout_fault:
+ str r11, [r10, #PCB_ONFAULT]
+ cmp r3, #0x00
+ ldmgtfd sp!, {r4-r7} /* r3 > 0 Restore r4-r7 */
+ ldmltfd sp!, {r4-r9} /* r3 < 0 Restore r4-r9 */
+ ldmfd sp!, {r10-r11, pc}
+
+.Lcopyout_guts:
+ pld [r0]
+ /* Word-align the destination buffer */
+ ands ip, r1, #0x03 /* Already word aligned? */
+ beq .Lcopyout_wordaligned /* Yup */
+ rsb ip, ip, #0x04
+ cmp r2, ip /* Enough bytes left to align it? */
+ blt .Lcopyout_l4_2 /* Nope. Just copy bytewise */
+ sub r2, r2, ip
+ rsbs ip, ip, #0x03
+ addne pc, pc, ip, lsl #3
+ nop
+ ldrb ip, [r0], #0x01
+ strbt ip, [r1], #0x01
+ ldrb ip, [r0], #0x01
+ strbt ip, [r1], #0x01
+ ldrb ip, [r0], #0x01
+ strbt ip, [r1], #0x01
+ cmp r2, #0x00 /* All done? */
+ moveq pc, lr
+
+ /* Destination buffer is now word aligned */
+.Lcopyout_wordaligned:
+ ands ip, r0, #0x03 /* Is src also word-aligned? */
+ bne .Lcopyout_bad_align /* Nope. Things just got bad */
+ cmp r2, #0x08 /* Less than 8 bytes remaining? */
+ blt .Lcopyout_w_less_than8
+
+ /* Quad-align the destination buffer */
+ tst r1, #0x07 /* Already quad aligned? */
+ ldrne ip, [r0], #0x04
+ stmfd sp!, {r4-r9} /* Free up some registers */
+ mov r3, #-1 /* Signal restore r4-r9 */
+ tst r1, #0x07 /* XXX: bug work-around */
+ subne r2, r2, #0x04
+ strnet ip, [r1], #0x04
+
+ /* Destination buffer quad aligned, source is word aligned */
+ subs r2, r2, #0x80
+ blt .Lcopyout_w_lessthan128
+
+ /* Copy 128 bytes at a time */
+.Lcopyout_w_loop128:
+ ldr r4, [r0], #0x04 /* LD:00-03 */
+ ldr r5, [r0], #0x04 /* LD:04-07 */
+ pld [r0, #0x18] /* Prefetch 0x20 */
+ ldr r6, [r0], #0x04 /* LD:08-0b */
+ ldr r7, [r0], #0x04 /* LD:0c-0f */
+ ldr r8, [r0], #0x04 /* LD:10-13 */
+ ldr r9, [r0], #0x04 /* LD:14-17 */
+ strt r4, [r1], #0x04 /* ST:00-03 */
+ strt r5, [r1], #0x04 /* ST:04-07 */
+ ldr r4, [r0], #0x04 /* LD:18-1b */
+ ldr r5, [r0], #0x04 /* LD:1c-1f */
+ strt r6, [r1], #0x04 /* ST:08-0b */
+ strt r7, [r1], #0x04 /* ST:0c-0f */
+ ldr r6, [r0], #0x04 /* LD:20-23 */
+ ldr r7, [r0], #0x04 /* LD:24-27 */
+ pld [r0, #0x18] /* Prefetch 0x40 */
+ strt r8, [r1], #0x04 /* ST:10-13 */
+ strt r9, [r1], #0x04 /* ST:14-17 */
+ ldr r8, [r0], #0x04 /* LD:28-2b */
+ ldr r9, [r0], #0x04 /* LD:2c-2f */
+ strt r4, [r1], #0x04 /* ST:18-1b */
+ strt r5, [r1], #0x04 /* ST:1c-1f */
+ ldr r4, [r0], #0x04 /* LD:30-33 */
+ ldr r5, [r0], #0x04 /* LD:34-37 */
+ strt r6, [r1], #0x04 /* ST:20-23 */
+ strt r7, [r1], #0x04 /* ST:24-27 */
+ ldr r6, [r0], #0x04 /* LD:38-3b */
+ ldr r7, [r0], #0x04 /* LD:3c-3f */
+ strt r8, [r1], #0x04 /* ST:28-2b */
+ strt r9, [r1], #0x04 /* ST:2c-2f */
+ ldr r8, [r0], #0x04 /* LD:40-43 */
+ ldr r9, [r0], #0x04 /* LD:44-47 */
+ pld [r0, #0x18] /* Prefetch 0x60 */
+ strt r4, [r1], #0x04 /* ST:30-33 */
+ strt r5, [r1], #0x04 /* ST:34-37 */
+ ldr r4, [r0], #0x04 /* LD:48-4b */
+ ldr r5, [r0], #0x04 /* LD:4c-4f */
+ strt r6, [r1], #0x04 /* ST:38-3b */
+ strt r7, [r1], #0x04 /* ST:3c-3f */
+ ldr r6, [r0], #0x04 /* LD:50-53 */
+ ldr r7, [r0], #0x04 /* LD:54-57 */
+ strt r8, [r1], #0x04 /* ST:40-43 */
+ strt r9, [r1], #0x04 /* ST:44-47 */
+ ldr r8, [r0], #0x04 /* LD:58-5b */
+ ldr r9, [r0], #0x04 /* LD:5c-5f */
+ strt r4, [r1], #0x04 /* ST:48-4b */
+ strt r5, [r1], #0x04 /* ST:4c-4f */
+ ldr r4, [r0], #0x04 /* LD:60-63 */
+ ldr r5, [r0], #0x04 /* LD:64-67 */
+ pld [r0, #0x18] /* Prefetch 0x80 */
+ strt r6, [r1], #0x04 /* ST:50-53 */
+ strt r7, [r1], #0x04 /* ST:54-57 */
+ ldr r6, [r0], #0x04 /* LD:68-6b */
+ ldr r7, [r0], #0x04 /* LD:6c-6f */
+ strt r8, [r1], #0x04 /* ST:58-5b */
+ strt r9, [r1], #0x04 /* ST:5c-5f */
+ ldr r8, [r0], #0x04 /* LD:70-73 */
+ ldr r9, [r0], #0x04 /* LD:74-77 */
+ strt r4, [r1], #0x04 /* ST:60-63 */
+ strt r5, [r1], #0x04 /* ST:64-67 */
+ ldr r4, [r0], #0x04 /* LD:78-7b */
+ ldr r5, [r0], #0x04 /* LD:7c-7f */
+ strt r6, [r1], #0x04 /* ST:68-6b */
+ strt r7, [r1], #0x04 /* ST:6c-6f */
+ strt r8, [r1], #0x04 /* ST:70-73 */
+ strt r9, [r1], #0x04 /* ST:74-77 */
+ subs r2, r2, #0x80
+ strt r4, [r1], #0x04 /* ST:78-7b */
+ strt r5, [r1], #0x04 /* ST:7c-7f */
+ bge .Lcopyout_w_loop128
+
+.Lcopyout_w_lessthan128:
+ adds r2, r2, #0x80 /* Adjust for extra sub */
+ ldmeqfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x20
+ blt .Lcopyout_w_lessthan32
+
+ /* Copy 32 bytes at a time */
+.Lcopyout_w_loop32:
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ pld [r0, #0x18]
+ ldr r6, [r0], #0x04
+ ldr r7, [r0], #0x04
+ ldr r8, [r0], #0x04
+ ldr r9, [r0], #0x04
+ strt r4, [r1], #0x04
+ strt r5, [r1], #0x04
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ strt r6, [r1], #0x04
+ strt r7, [r1], #0x04
+ strt r8, [r1], #0x04
+ strt r9, [r1], #0x04
+ subs r2, r2, #0x20
+ strt r4, [r1], #0x04
+ strt r5, [r1], #0x04
+ bge .Lcopyout_w_loop32
+
+.Lcopyout_w_lessthan32:
+ adds r2, r2, #0x20 /* Adjust for extra sub */
+ ldmeqfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+
+ and r4, r2, #0x18
+ rsb r5, r4, #0x18
+ subs r2, r2, r4
+ add pc, pc, r5, lsl #1
+ nop
+
+ /* At least 24 bytes remaining */
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ strt r4, [r1], #0x04
+ strt r5, [r1], #0x04
+
+ /* At least 16 bytes remaining */
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ strt r4, [r1], #0x04
+ strt r5, [r1], #0x04
+
+ /* At least 8 bytes remaining */
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ strt r4, [r1], #0x04
+ strt r5, [r1], #0x04
+
+ /* Less than 8 bytes remaining */
+ ldmfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+ mov r3, #0x00
+
+.Lcopyout_w_less_than8:
+ subs r2, r2, #0x04
+ ldrge ip, [r0], #0x04
+ strget ip, [r1], #0x04
+ moveq pc, lr /* Return now if done */
+ addlt r2, r2, #0x04
+ ldrb ip, [r0], #0x01
+ cmp r2, #0x02
+ ldrgeb r2, [r0], #0x01
+ strbt ip, [r1], #0x01
+ ldrgtb ip, [r0]
+ strgebt r2, [r1], #0x01
+ strgtbt ip, [r1]
+ mov pc, lr
+
+/*
+ * At this point, it has not been possible to word align both buffers.
+ * The destination buffer (r1) is word aligned, but the source buffer
+ * (r0) is not.
+ */
+.Lcopyout_bad_align:
+ stmfd sp!, {r4-r7}
+ mov r3, #0x01
+ bic r0, r0, #0x03
+ cmp ip, #2
+ ldr ip, [r0], #0x04
+ bgt .Lcopyout_bad3
+ beq .Lcopyout_bad2
+ b .Lcopyout_bad1
+
+.Lcopyout_bad1_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #8
+#else
+ mov r4, ip, lsr #8
+#endif
+ ldr r5, [r0], #0x04
+ pld [r0, #0x018]
+ ldr r6, [r0], #0x04
+ ldr r7, [r0], #0x04
+ ldr ip, [r0], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #24
+ mov r5, r5, lsl #8
+ orr r5, r5, r6, lsr #24
+ mov r6, r6, lsl #8
+ orr r6, r6, r7, lsr #24
+ mov r7, r7, lsl #8
+ orr r7, r7, ip, lsr #24
+#else
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ mov r7, r7, lsr #8
+ orr r7, r7, ip, lsl #24
+#endif
+ strt r4, [r1], #0x04
+ strt r5, [r1], #0x04
+ strt r6, [r1], #0x04
+ strt r7, [r1], #0x04
+.Lcopyout_bad1:
+ subs r2, r2, #0x10
+ bge .Lcopyout_bad1_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r0, r0, #0x03
+ blt .Lcopyout_l4
+
+.Lcopyout_bad1_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #8
+#else
+ mov r4, ip, lsr #8
+#endif
+ ldr ip, [r0], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #24
+#else
+ orr r4, r4, ip, lsl #24
+#endif
+ strt r4, [r1], #0x04
+ bge .Lcopyout_bad1_loop4
+ sub r0, r0, #0x03
+ b .Lcopyout_l4
+
+.Lcopyout_bad2_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #16
+#else
+ mov r4, ip, lsr #16
+#endif
+ ldr r5, [r0], #0x04
+ pld [r0, #0x018]
+ ldr r6, [r0], #0x04
+ ldr r7, [r0], #0x04
+ ldr ip, [r0], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #16
+ mov r5, r5, lsl #16
+ orr r5, r5, r6, lsr #16
+ mov r6, r6, lsl #16
+ orr r6, r6, r7, lsr #16
+ mov r7, r7, lsl #16
+ orr r7, r7, ip, lsr #16
+#else
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ mov r7, r7, lsr #16
+ orr r7, r7, ip, lsl #16
+#endif
+ strt r4, [r1], #0x04
+ strt r5, [r1], #0x04
+ strt r6, [r1], #0x04
+ strt r7, [r1], #0x04
+.Lcopyout_bad2:
+ subs r2, r2, #0x10
+ bge .Lcopyout_bad2_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r0, r0, #0x02
+ blt .Lcopyout_l4
+
+.Lcopyout_bad2_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #16
+#else
+ mov r4, ip, lsr #16
+#endif
+ ldr ip, [r0], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #16
+#else
+ orr r4, r4, ip, lsl #16
+#endif
+ strt r4, [r1], #0x04
+ bge .Lcopyout_bad2_loop4
+ sub r0, r0, #0x02
+ b .Lcopyout_l4
+
+.Lcopyout_bad3_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #24
+#else
+ mov r4, ip, lsr #24
+#endif
+ ldr r5, [r0], #0x04
+ pld [r0, #0x018]
+ ldr r6, [r0], #0x04
+ ldr r7, [r0], #0x04
+ ldr ip, [r0], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #8
+ mov r5, r5, lsl #24
+ orr r5, r5, r6, lsr #8
+ mov r6, r6, lsl #24
+ orr r6, r6, r7, lsr #8
+ mov r7, r7, lsl #24
+ orr r7, r7, ip, lsr #8
+#else
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ mov r7, r7, lsr #24
+ orr r7, r7, ip, lsl #8
+#endif
+ strt r4, [r1], #0x04
+ strt r5, [r1], #0x04
+ strt r6, [r1], #0x04
+ strt r7, [r1], #0x04
+.Lcopyout_bad3:
+ subs r2, r2, #0x10
+ bge .Lcopyout_bad3_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r0, r0, #0x01
+ blt .Lcopyout_l4
+
+.Lcopyout_bad3_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #24
+#else
+ mov r4, ip, lsr #24
+#endif
+ ldr ip, [r0], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #8
+#else
+ orr r4, r4, ip, lsl #8
+#endif
+ strt r4, [r1], #0x04
+ bge .Lcopyout_bad3_loop4
+ sub r0, r0, #0x01
+
+.Lcopyout_l4:
+ ldmfd sp!, {r4-r7}
+ mov r3, #0x00
+ adds r2, r2, #0x04
+ moveq pc, lr
+.Lcopyout_l4_2:
+ rsbs r2, r2, #0x03
+ addne pc, pc, r2, lsl #3
+ nop
+ ldrb ip, [r0], #0x01
+ strbt ip, [r1], #0x01
+ ldrb ip, [r0], #0x01
+ strbt ip, [r1], #0x01
+ ldrb ip, [r0]
+ strbt ip, [r1]
+ mov pc, lr
+
+
+/*
+ * r0 = kernel space source address
+ * r1 = kernel space destination address
+ * r2 = length
+ *
+ * Copies bytes from kernel space to kernel space, aborting on page fault
+ */
+ENTRY(kcopy)
+ cmp r2, #0x00
+ movle r0, #0x00
+ movle pc, lr /* Bail early if length is <= 0 */
+
+ stmfd sp!, {r10-r11, lr}
+
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r2}
+ bl _C_LABEL(cpu_number)
+ ldr r10, .Lcpu_info
+ ldmfd sp!, {r0-r2}
+ ldr r10, [r10, r0, lsl #2]
+ ldr r10, [r10, #CI_CURPCB]
+#else
+ ldr r10, .Lcurpcb
+ ldr r10, [r10]
+#endif
+
+ mov r3, #0x00
+ adr ip, .Lkcopy_fault
+ ldr r11, [r10, #PCB_ONFAULT]
+ str ip, [r10, #PCB_ONFAULT]
+ bl .Lkcopy_guts
+ str r11, [r10, #PCB_ONFAULT]
+ mov r0, #0x00
+ ldmfd sp!, {r10-r11, pc}
+
+.Lkcopy_fault:
+ str r11, [r10, #PCB_ONFAULT]
+ cmp r3, #0x00
+ ldmgtfd sp!, {r4-r7} /* r3 > 0 Restore r4-r7 */
+ ldmltfd sp!, {r4-r9} /* r3 < 0 Restore r4-r9 */
+ ldmfd sp!, {r10-r11, pc}
+
+.Lkcopy_guts:
+ pld [r0]
+ /* Word-align the destination buffer */
+ ands ip, r1, #0x03 /* Already word aligned? */
+ beq .Lkcopy_wordaligned /* Yup */
+ rsb ip, ip, #0x04
+ cmp r2, ip /* Enough bytes left to align it? */
+ blt .Lkcopy_bad_endgame2 /* Nope. Just copy bytewise */
+ sub r2, r2, ip
+ rsbs ip, ip, #0x03
+ addne pc, pc, ip, lsl #3
+ ldrb ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrb ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrb ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ cmp r2, #0x00 /* All done? */
+ moveq pc, lr
+
+ /* Destination buffer is now word aligned */
+.Lkcopy_wordaligned:
+ ands ip, r0, #0x03 /* Is src also word-aligned? */
+ bne .Lkcopy_bad_align /* Nope. Things just got bad */
+ cmp r2, #0x08 /* Less than 8 bytes remaining? */
+ blt .Lkcopy_w_less_than8
+
+ /* Quad-align the destination buffer */
+ tst r1, #0x07 /* Already quad aligned? */
+ ldrne ip, [r0], #0x04
+ stmfd sp!, {r4-r9} /* Free up some registers */
+ mov r3, #-1 /* Signal restore r4-r9 */
+ subne r2, r2, #0x04
+ strne ip, [r1], #0x04
+
+ /* Destination buffer quad aligned, source is word aligned */
+ subs r2, r2, #0x80
+ blt .Lkcopy_w_lessthan128
+
+ /* Copy 128 bytes at a time */
+.Lkcopy_w_loop128:
+ ldr r4, [r0], #0x04 /* LD:00-03 */
+ ldr r5, [r0], #0x04 /* LD:04-07 */
+ pld [r0, #0x18] /* Prefetch 0x20 */
+ ldr r6, [r0], #0x04 /* LD:08-0b */
+ ldr r7, [r0], #0x04 /* LD:0c-0f */
+ ldr r8, [r0], #0x04 /* LD:10-13 */
+ ldr r9, [r0], #0x04 /* LD:14-17 */
+ strd r4, [r1], #0x08 /* ST:00-07 */
+ ldr r4, [r0], #0x04 /* LD:18-1b */
+ ldr r5, [r0], #0x04 /* LD:1c-1f */
+ strd r6, [r1], #0x08 /* ST:08-0f */
+ ldr r6, [r0], #0x04 /* LD:20-23 */
+ ldr r7, [r0], #0x04 /* LD:24-27 */
+ pld [r0, #0x18] /* Prefetch 0x40 */
+ strd r8, [r1], #0x08 /* ST:10-17 */
+ ldr r8, [r0], #0x04 /* LD:28-2b */
+ ldr r9, [r0], #0x04 /* LD:2c-2f */
+ strd r4, [r1], #0x08 /* ST:18-1f */
+ ldr r4, [r0], #0x04 /* LD:30-33 */
+ ldr r5, [r0], #0x04 /* LD:34-37 */
+ strd r6, [r1], #0x08 /* ST:20-27 */
+ ldr r6, [r0], #0x04 /* LD:38-3b */
+ ldr r7, [r0], #0x04 /* LD:3c-3f */
+ strd r8, [r1], #0x08 /* ST:28-2f */
+ ldr r8, [r0], #0x04 /* LD:40-43 */
+ ldr r9, [r0], #0x04 /* LD:44-47 */
+ pld [r0, #0x18] /* Prefetch 0x60 */
+ strd r4, [r1], #0x08 /* ST:30-37 */
+ ldr r4, [r0], #0x04 /* LD:48-4b */
+ ldr r5, [r0], #0x04 /* LD:4c-4f */
+ strd r6, [r1], #0x08 /* ST:38-3f */
+ ldr r6, [r0], #0x04 /* LD:50-53 */
+ ldr r7, [r0], #0x04 /* LD:54-57 */
+ strd r8, [r1], #0x08 /* ST:40-47 */
+ ldr r8, [r0], #0x04 /* LD:58-5b */
+ ldr r9, [r0], #0x04 /* LD:5c-5f */
+ strd r4, [r1], #0x08 /* ST:48-4f */
+ ldr r4, [r0], #0x04 /* LD:60-63 */
+ ldr r5, [r0], #0x04 /* LD:64-67 */
+ pld [r0, #0x18] /* Prefetch 0x80 */
+ strd r6, [r1], #0x08 /* ST:50-57 */
+ ldr r6, [r0], #0x04 /* LD:68-6b */
+ ldr r7, [r0], #0x04 /* LD:6c-6f */
+ strd r8, [r1], #0x08 /* ST:58-5f */
+ ldr r8, [r0], #0x04 /* LD:70-73 */
+ ldr r9, [r0], #0x04 /* LD:74-77 */
+ strd r4, [r1], #0x08 /* ST:60-67 */
+ ldr r4, [r0], #0x04 /* LD:78-7b */
+ ldr r5, [r0], #0x04 /* LD:7c-7f */
+ strd r6, [r1], #0x08 /* ST:68-6f */
+ strd r8, [r1], #0x08 /* ST:70-77 */
+ subs r2, r2, #0x80
+ strd r4, [r1], #0x08 /* ST:78-7f */
+ bge .Lkcopy_w_loop128
+
+.Lkcopy_w_lessthan128:
+ adds r2, r2, #0x80 /* Adjust for extra sub */
+ ldmeqfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x20
+ blt .Lkcopy_w_lessthan32
+
+ /* Copy 32 bytes at a time */
+.Lkcopy_w_loop32:
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ pld [r0, #0x18]
+ ldr r6, [r0], #0x04
+ ldr r7, [r0], #0x04
+ ldr r8, [r0], #0x04
+ ldr r9, [r0], #0x04
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ strd r6, [r1], #0x08
+ strd r8, [r1], #0x08
+ subs r2, r2, #0x20
+ strd r4, [r1], #0x08
+ bge .Lkcopy_w_loop32
+
+.Lkcopy_w_lessthan32:
+ adds r2, r2, #0x20 /* Adjust for extra sub */
+ ldmeqfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+
+ and r4, r2, #0x18
+ rsb r5, r4, #0x18
+ subs r2, r2, r4
+ add pc, pc, r5, lsl #1
+ nop
+
+ /* At least 24 bytes remaining */
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ nop
+ strd r4, [r1], #0x08
+
+ /* At least 16 bytes remaining */
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ nop
+ strd r4, [r1], #0x08
+
+ /* At least 8 bytes remaining */
+ ldr r4, [r0], #0x04
+ ldr r5, [r0], #0x04
+ nop
+ strd r4, [r1], #0x08
+
+ /* Less than 8 bytes remaining */
+ ldmfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+ mov r3, #0x00
+
+.Lkcopy_w_less_than8:
+ subs r2, r2, #0x04
+ ldrge ip, [r0], #0x04
+ strge ip, [r1], #0x04
+ moveq pc, lr /* Return now if done */
+ addlt r2, r2, #0x04
+ ldrb ip, [r0], #0x01
+ cmp r2, #0x02
+ ldrgeb r2, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrgtb ip, [r0]
+ strgeb r2, [r1], #0x01
+ strgtb ip, [r1]
+ mov pc, lr
+
+/*
+ * At this point, it has not been possible to word align both buffers.
+ * The destination buffer (r1) is word aligned, but the source buffer
+ * (r0) is not.
+ */
+.Lkcopy_bad_align:
+ stmfd sp!, {r4-r7}
+ mov r3, #0x01
+ bic r0, r0, #0x03
+ cmp ip, #2
+ ldr ip, [r0], #0x04
+ bgt .Lkcopy_bad3
+ beq .Lkcopy_bad2
+ b .Lkcopy_bad1
+
+.Lkcopy_bad1_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #8
+#else
+ mov r4, ip, lsr #8
+#endif
+ ldr r5, [r0], #0x04
+ pld [r0, #0x018]
+ ldr r6, [r0], #0x04
+ ldr r7, [r0], #0x04
+ ldr ip, [r0], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #24
+ mov r5, r5, lsl #8
+ orr r5, r5, r6, lsr #24
+ mov r6, r6, lsl #8
+ orr r6, r6, r7, lsr #24
+ mov r7, r7, lsl #8
+ orr r7, r7, ip, lsr #24
+#else
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ mov r7, r7, lsr #8
+ orr r7, r7, ip, lsl #24
+#endif
+ str r4, [r1], #0x04
+ str r5, [r1], #0x04
+ str r6, [r1], #0x04
+ str r7, [r1], #0x04
+.Lkcopy_bad1:
+ subs r2, r2, #0x10
+ bge .Lkcopy_bad1_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r0, r0, #0x03
+ blt .Lkcopy_bad_endgame
+
+.Lkcopy_bad1_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #8
+#else
+ mov r4, ip, lsr #8
+#endif
+ ldr ip, [r0], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #24
+#else
+ orr r4, r4, ip, lsl #24
+#endif
+ str r4, [r1], #0x04
+ bge .Lkcopy_bad1_loop4
+ sub r0, r0, #0x03
+ b .Lkcopy_bad_endgame
+
+.Lkcopy_bad2_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #16
+#else
+ mov r4, ip, lsr #16
+#endif
+ ldr r5, [r0], #0x04
+ pld [r0, #0x018]
+ ldr r6, [r0], #0x04
+ ldr r7, [r0], #0x04
+ ldr ip, [r0], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #16
+ mov r5, r5, lsl #16
+ orr r5, r5, r6, lsr #16
+ mov r6, r6, lsl #16
+ orr r6, r6, r7, lsr #16
+ mov r7, r7, lsl #16
+ orr r7, r7, ip, lsr #16
+#else
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ mov r7, r7, lsr #16
+ orr r7, r7, ip, lsl #16
+#endif
+ str r4, [r1], #0x04
+ str r5, [r1], #0x04
+ str r6, [r1], #0x04
+ str r7, [r1], #0x04
+.Lkcopy_bad2:
+ subs r2, r2, #0x10
+ bge .Lkcopy_bad2_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r0, r0, #0x02
+ blt .Lkcopy_bad_endgame
+
+.Lkcopy_bad2_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #16
+#else
+ mov r4, ip, lsr #16
+#endif
+ ldr ip, [r0], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #16
+#else
+ orr r4, r4, ip, lsl #16
+#endif
+ str r4, [r1], #0x04
+ bge .Lkcopy_bad2_loop4
+ sub r0, r0, #0x02
+ b .Lkcopy_bad_endgame
+
+.Lkcopy_bad3_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #24
+#else
+ mov r4, ip, lsr #24
+#endif
+ ldr r5, [r0], #0x04
+ pld [r0, #0x018]
+ ldr r6, [r0], #0x04
+ ldr r7, [r0], #0x04
+ ldr ip, [r0], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #8
+ mov r5, r5, lsl #24
+ orr r5, r5, r6, lsr #8
+ mov r6, r6, lsl #24
+ orr r6, r6, r7, lsr #8
+ mov r7, r7, lsl #24
+ orr r7, r7, ip, lsr #8
+#else
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ mov r7, r7, lsr #24
+ orr r7, r7, ip, lsl #8
+#endif
+ str r4, [r1], #0x04
+ str r5, [r1], #0x04
+ str r6, [r1], #0x04
+ str r7, [r1], #0x04
+.Lkcopy_bad3:
+ subs r2, r2, #0x10
+ bge .Lkcopy_bad3_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r0, r0, #0x01
+ blt .Lkcopy_bad_endgame
+
+.Lkcopy_bad3_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #24
+#else
+ mov r4, ip, lsr #24
+#endif
+ ldr ip, [r0], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #8
+#else
+ orr r4, r4, ip, lsl #8
+#endif
+ str r4, [r1], #0x04
+ bge .Lkcopy_bad3_loop4
+ sub r0, r0, #0x01
+
+.Lkcopy_bad_endgame:
+ ldmfd sp!, {r4-r7}
+ mov r3, #0x00
+ adds r2, r2, #0x04
+ moveq pc, lr
+.Lkcopy_bad_endgame2:
+ rsbs r2, r2, #0x03
+ addne pc, pc, r2, lsl #3
+ nop
+ ldrb ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrb ip, [r0], #0x01
+ strb ip, [r1], #0x01
+ ldrb ip, [r0]
+ strb ip, [r1]
+ mov pc, lr
diff --git a/sys/arm/arm/blockio.S b/sys/arm/arm/blockio.S
new file mode 100644
index 0000000..95a8efa
--- /dev/null
+++ b/sys/arm/arm/blockio.S
@@ -0,0 +1,587 @@
+/* $NetBSD: blockio.S,v 1.5 2002/08/15 01:38:16 briggs Exp $ */
+
+/*
+ * Copyright (c) 2001 Ben Harris.
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * blockio.S
+ *
+ * optimised block read/write from/to IO routines.
+ *
+ * Created : 08/10/94
+ * Modified : 22/01/99 -- R.Earnshaw
+ * Faster, and small tweaks for StrongARM
+ */
+
+#include <machine/asm.h>
+
+__FBSDID("$FreeBSD$");
+
+/*
+ * Read bytes from an I/O address into a block of memory
+ *
+ * r0 = address to read from (IO)
+ * r1 = address to write to (memory)
+ * r2 = length
+ */
+
+/* This code will look very familiar if you've read _memcpy(). */
+ENTRY(read_multi_1)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ subs r2, r2, #4 /* r2 = length - 4 */
+ blt .Lrm1_l4 /* less than 4 bytes */
+ ands r12, r1, #3
+ beq .Lrm1_main /* aligned destination */
+ rsb r12, r12, #4
+ cmp r12, #2
+ ldrb r3, [r0]
+ strb r3, [r1], #1
+ ldrgeb r3, [r0]
+ strgeb r3, [r1], #1
+ ldrgtb r3, [r0]
+ strgtb r3, [r1], #1
+ subs r2, r2, r12
+ blt .Lrm1_l4
+.Lrm1_main:
+.Lrm1loop:
+ ldrb r3, [r0]
+ ldrb r12, [r0]
+ orr r3, r3, r12, lsl #8
+ ldrb r12, [r0]
+ orr r3, r3, r12, lsl #16
+ ldrb r12, [r0]
+ orr r3, r3, r12, lsl #24
+ str r3, [r1], #4
+ subs r2, r2, #4
+ bge .Lrm1loop
+.Lrm1_l4:
+ adds r2, r2, #4 /* r2 = length again */
+ ldmeqdb fp, {fp, sp, pc}
+ moveq pc, r14
+ cmp r2, #2
+ ldrb r3, [r0]
+ strb r3, [r1], #1
+ ldrgeb r3, [r0]
+ strgeb r3, [r1], #1
+ ldrgtb r3, [r0]
+ strgtb r3, [r1], #1
+ ldmdb fp, {fp, sp, pc}
+
+/*
+ * Write bytes to an I/O address from a block of memory
+ *
+ * r0 = address to write to (IO)
+ * r1 = address to read from (memory)
+ * r2 = length
+ */
+
+/* This code will look very familiar if you've read _memcpy(). */
+ENTRY(write_multi_1)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ subs r2, r2, #4 /* r2 = length - 4 */
+ blt .Lwm1_l4 /* less than 4 bytes */
+ ands r12, r1, #3
+ beq .Lwm1_main /* aligned source */
+ rsb r12, r12, #4
+ cmp r12, #2
+ ldrb r3, [r1], #1
+ strb r3, [r0]
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0]
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0]
+ subs r2, r2, r12
+ blt .Lwm1_l4
+.Lwm1_main:
+.Lwm1loop:
+ ldr r3, [r1], #4
+ strb r3, [r0]
+ mov r3, r3, lsr #8
+ strb r3, [r0]
+ mov r3, r3, lsr #8
+ strb r3, [r0]
+ mov r3, r3, lsr #8
+ strb r3, [r0]
+ subs r2, r2, #4
+ bge .Lwm1loop
+.Lwm1_l4:
+ adds r2, r2, #4 /* r2 = length again */
+ ldmeqdb fp, {fp, sp, pc}
+ cmp r2, #2
+ ldrb r3, [r1], #1
+ strb r3, [r0]
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0]
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0]
+ ldmdb fp, {fp, sp, pc}
+
+/*
+ * Reads short ints (16 bits) from an I/O address into a block of memory
+ *
+ * r0 = address to read from (IO)
+ * r1 = address to write to (memory)
+ * r2 = length
+ */
+
+ENTRY(insw)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address and the size is word aligned, do it fast */
+
+ tst r2, #0x00000001
+ tsteq r1, #0x00000003
+ beq .Lfastinsw
+
+/* Non aligned insw */
+
+.Linswloop:
+ ldr r3, [r0]
+ subs r2, r2, #0x00000001 /* Loop test in load delay slot */
+ strb r3, [r1], #0x0001
+ mov r3, r3, lsr #8
+ strb r3, [r1], #0x0001
+ bgt .Linswloop
+
+ mov pc, lr
+
+/* Word aligned insw */
+
+.Lfastinsw:
+
+.Lfastinswloop:
+ ldr r3, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr ip, [r0]
+ mov r3, r3, lsr #16 /* Put the two shorts together */
+ orr r3, r3, ip, lsl #16
+ str r3, [r1], #0x0004 /* Store */
+ subs r2, r2, #0x00000002 /* Next */
+ bgt .Lfastinswloop
+
+ mov pc, lr
+
+
+/*
+ * Writes short ints (16 bits) from a block of memory to an I/O address
+ *
+ * r0 = address to write to (IO)
+ * r1 = address to read from (memory)
+ * r2 = length
+ */
+
+ENTRY(outsw)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address and the size is word aligned, do it fast */
+
+ tst r2, #0x00000001
+ tsteq r1, #0x00000003
+ beq .Lfastoutsw
+
+/* Non aligned outsw */
+
+.Loutswloop:
+ ldrb r3, [r1], #0x0001
+ ldrb ip, [r1], #0x0001
+ subs r2, r2, #0x00000001 /* Loop test in load delay slot */
+ orr r3, r3, ip, lsl #8
+ orr r3, r3, r3, lsl #16
+ str r3, [r0]
+ bgt .Loutswloop
+
+ mov pc, lr
+
+/* Word aligned outsw */
+
+.Lfastoutsw:
+
+.Lfastoutswloop:
+ ldr r3, [r1], #0x0004 /* r3 = (H)(L) */
+ subs r2, r2, #0x00000002 /* Loop test in load delay slot */
+
+ eor ip, r3, r3, lsr #16 /* ip = (H)(H^L) */
+ eor r3, r3, ip, lsl #16 /* r3 = (H^H^L)(L) = (L)(L) */
+ eor ip, ip, r3, lsr #16 /* ip = (H)(H^L^L) = (H)(H) */
+
+ str r3, [r0]
+ str ip, [r0]
+
+/* mov ip, r3, lsl #16
+ * orr ip, ip, ip, lsr #16
+ * str ip, [r0]
+ *
+ * mov ip, r3, lsr #16
+ * orr ip, ip, ip, lsl #16
+ * str ip, [r0]
+ */
+
+ bgt .Lfastoutswloop
+
+ mov pc, lr
+
+/*
+ * reads short ints (16 bits) from an I/O address into a block of memory
+ * with a length garenteed to be a multiple of 16 bytes
+ * with a word aligned destination address
+ *
+ * r0 = address to read from (IO)
+ * r1 = address to write to (memory)
+ * r2 = length
+ */
+
+ENTRY(insw16)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address is word aligned and the size suitably
+ aligned, do it fast */
+
+ tst r2, #0x00000007
+ tsteq r1, #0x00000003
+
+ bne _C_LABEL(insw)
+
+/* Word aligned insw */
+
+ stmfd sp!, {r4,r5,lr}
+
+.Linsw16loop:
+ ldr r3, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr lr, [r0]
+ mov r3, r3, lsr #16 /* Put the two shorts together */
+ orr r3, r3, lr, lsl #16
+
+ ldr r4, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr lr, [r0]
+ mov r4, r4, lsr #16 /* Put the two shorts together */
+ orr r4, r4, lr, lsl #16
+
+ ldr r5, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr lr, [r0]
+ mov r5, r5, lsr #16 /* Put the two shorts together */
+ orr r5, r5, lr, lsl #16
+
+ ldr ip, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr lr, [r0]
+ mov ip, ip, lsr #16 /* Put the two shorts together */
+ orr ip, ip, lr, lsl #16
+
+ stmia r1!, {r3-r5,ip}
+ subs r2, r2, #0x00000008 /* Next */
+ bgt .Linsw16loop
+
+ ldmfd sp!, {r4,r5,pc} /* Restore regs and go home */
+
+
+/*
+ * Writes short ints (16 bits) from a block of memory to an I/O address
+ *
+ * r0 = address to write to (IO)
+ * r1 = address to read from (memory)
+ * r2 = length
+ */
+
+ENTRY(outsw16)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address is word aligned and the size suitably
+ aligned, do it fast */
+
+ tst r2, #0x00000007
+ tsteq r1, #0x00000003
+
+ bne _C_LABEL(outsw)
+
+/* Word aligned outsw */
+
+ stmfd sp!, {r4,r5,lr}
+
+.Loutsw16loop:
+ ldmia r1!, {r4,r5,ip,lr}
+
+ eor r3, r4, r4, lsl #16 /* r3 = (A^B)(B) */
+ eor r4, r4, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+ eor r3, r3, r4, lsl #16 /* r3 = (A^B^A)(B) = (B)(B) */
+ str r3, [r0]
+ str r4, [r0]
+
+/* mov r3, r4, lsl #16
+ * orr r3, r3, r3, lsr #16
+ * str r3, [r0]
+ *
+ * mov r3, r4, lsr #16
+ * orr r3, r3, r3, lsl #16
+ * str r3, [r0]
+ */
+
+ eor r3, r5, r5, lsl #16 /* r3 = (A^B)(B) */
+ eor r5, r5, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+ eor r3, r3, r5, lsl #16 /* r3 = (A^B^A)(B) = (B)(B) */
+ str r3, [r0]
+ str r5, [r0]
+
+ eor r3, ip, ip, lsl #16 /* r3 = (A^B)(B) */
+ eor ip, ip, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+ eor r3, r3, ip, lsl #16 /* r3 = (A^B^A)(B) = (B)(B) */
+ str r3, [r0]
+ str ip, [r0]
+
+ eor r3, lr, lr, lsl #16 /* r3 = (A^B)(B) */
+ eor lr, lr, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+ eor r3, r3, lr, lsl #16 /* r3 = (A^B^A)(B) = (B)(B) */
+ str r3, [r0]
+ str lr, [r0]
+
+ subs r2, r2, #0x00000008
+ bgt .Loutsw16loop
+
+ ldmfd sp!, {r4,r5,pc} /* and go home */
+
+/*
+ * reads short ints (16 bits) from an I/O address into a block of memory
+ * The I/O address is assumed to be mapped multiple times in a block of
+ * 8 words.
+ * The destination address should be word aligned.
+ *
+ * r0 = address to read from (IO)
+ * r1 = address to write to (memory)
+ * r2 = length
+ */
+
+ENTRY(inswm8)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address is word aligned and the size suitably
+ aligned, do it fast */
+
+ tst r1, #0x00000003
+
+ bne _C_LABEL(insw)
+
+/* Word aligned insw */
+
+ stmfd sp!, {r4-r9,lr}
+
+ mov lr, #0xff000000
+ orr lr, lr, #0x00ff0000
+
+.Linswm8_loop8:
+ cmp r2, #8
+ bcc .Linswm8_l8
+
+ ldmia r0, {r3-r9,ip}
+
+ bic r3, r3, lr
+ orr r3, r3, r4, lsl #16
+ bic r5, r5, lr
+ orr r4, r5, r6, lsl #16
+ bic r7, r7, lr
+ orr r5, r7, r8, lsl #16
+ bic r9, r9, lr
+ orr r6, r9, ip, lsl #16
+
+ stmia r1!, {r3-r6}
+
+ subs r2, r2, #0x00000008 /* Next */
+ bne .Linswm8_loop8
+ beq .Linswm8_l1
+
+.Linswm8_l8:
+ cmp r2, #4
+ bcc .Linswm8_l4
+
+ ldmia r0, {r3-r6}
+
+ bic r3, r3, lr
+ orr r3, r3, r4, lsl #16
+ bic r5, r5, lr
+ orr r4, r5, r6, lsl #16
+
+ stmia r1!, {r3-r4}
+
+ subs r2, r2, #0x00000004
+ beq .Linswm8_l1
+
+.Linswm8_l4:
+ cmp r2, #2
+ bcc .Linswm8_l2
+
+ ldmia r0, {r3-r4}
+
+ bic r3, r3, lr
+ orr r3, r3, r4, lsl #16
+ str r3, [r1], #0x0004
+
+ subs r2, r2, #0x00000002
+ beq .Linswm8_l1
+
+.Linswm8_l2:
+ cmp r2, #1
+ bcc .Linswm8_l1
+
+ ldr r3, [r0]
+ subs r2, r2, #0x00000001 /* Test in load delay slot */
+ /* XXX, why don't we use result? */
+
+ strb r3, [r1], #0x0001
+ mov r3, r3, lsr #8
+ strb r3, [r1], #0x0001
+
+
+.Linswm8_l1:
+ ldmfd sp!, {r4-r9,pc} /* And go home */
+
+/*
+ * write short ints (16 bits) to an I/O address from a block of memory
+ * The I/O address is assumed to be mapped multiple times in a block of
+ * 8 words.
+ * The source address should be word aligned.
+ *
+ * r0 = address to read to (IO)
+ * r1 = address to write from (memory)
+ * r2 = length
+ */
+
+ENTRY(outswm8)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address is word aligned and the size suitably
+ aligned, do it fast */
+
+ tst r1, #0x00000003
+
+ bne _C_LABEL(outsw)
+
+/* Word aligned outsw */
+
+ stmfd sp!, {r4-r8,lr}
+
+.Loutswm8_loop8:
+ cmp r2, #8
+ bcc .Loutswm8_l8
+
+ ldmia r1!, {r3,r5,r7,ip}
+
+ eor r4, r3, r3, lsr #16 /* r4 = (A)(A^B) */
+ eor r3, r3, r4, lsl #16 /* r3 = (A^A^B)(B) = (B)(B) */
+ eor r4, r4, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+
+ eor r6, r5, r5, lsr #16 /* r6 = (A)(A^B) */
+ eor r5, r5, r6, lsl #16 /* r5 = (A^A^B)(B) = (B)(B) */
+ eor r6, r6, r5, lsr #16 /* r6 = (A)(B^A^B) = (A)(A) */
+
+ eor r8, r7, r7, lsr #16 /* r8 = (A)(A^B) */
+ eor r7, r7, r8, lsl #16 /* r7 = (A^A^B)(B) = (B)(B) */
+ eor r8, r8, r7, lsr #16 /* r8 = (A)(B^A^B) = (A)(A) */
+
+ eor lr, ip, ip, lsr #16 /* lr = (A)(A^B) */
+ eor ip, ip, lr, lsl #16 /* ip = (A^A^B)(B) = (B)(B) */
+ eor lr, lr, ip, lsr #16 /* lr = (A)(B^A^B) = (A)(A) */
+
+ stmia r0, {r3-r8,ip,lr}
+
+ subs r2, r2, #0x00000008 /* Next */
+ bne .Loutswm8_loop8
+ beq .Loutswm8_l1
+
+.Loutswm8_l8:
+ cmp r2, #4
+ bcc .Loutswm8_l4
+
+ ldmia r1!, {r3-r4}
+
+ eor r6, r3, r3, lsr #16 /* r6 = (A)(A^B) */
+ eor r5, r3, r6, lsl #16 /* r5 = (A^A^B)(B) = (B)(B) */
+ eor r6, r6, r5, lsr #16 /* r6 = (A)(B^A^B) = (A)(A) */
+
+ eor r8, r4, r4, lsr #16 /* r8 = (A)(A^B) */
+ eor r7, r4, r8, lsl #16 /* r7 = (A^A^B)(B) = (B)(B) */
+ eor r8, r8, r7, lsr #16 /* r8 = (A)(B^A^B) = (A)(A) */
+
+ stmia r0, {r5-r8}
+
+ subs r2, r2, #0x00000004
+ beq .Loutswm8_l1
+
+.Loutswm8_l4:
+ cmp r2, #2
+ bcc .Loutswm8_l2
+
+ ldr r3, [r1], #0x0004 /* r3 = (A)(B) */
+ subs r2, r2, #0x00000002 /* Done test in Load delay slot */
+
+ eor r5, r3, r3, lsr #16 /* r5 = (A)(A^B)*/
+ eor r4, r3, r5, lsl #16 /* r4 = (A^A^B)(B) = (B)(B) */
+ eor r5, r5, r4, lsr #16 /* r5 = (A)(B^A^B) = (A)(A) */
+
+ stmia r0, {r4, r5}
+
+ beq .Loutswm8_l1
+
+.Loutswm8_l2:
+ cmp r2, #1
+ bcc .Loutswm8_l1
+
+ ldrb r3, [r1], #0x0001
+ ldrb r4, [r1], #0x0001
+ subs r2, r2, #0x00000001 /* Done test in load delay slot */
+ /* XXX This test isn't used? */
+ orr r3, r3, r4, lsl #8
+ orr r3, r3, r3, lsl #16
+ str r3, [r0]
+
+.Loutswm8_l1:
+ ldmfd sp!, {r4-r8,pc} /* And go home */
diff --git a/sys/arm/arm/bootconfig.c b/sys/arm/arm/bootconfig.c
new file mode 100644
index 0000000..02892c4
--- /dev/null
+++ b/sys/arm/arm/bootconfig.c
@@ -0,0 +1,126 @@
+/* $NetBSD: bootconfig.c,v 1.2 2002/03/10 19:56:39 lukem Exp $ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+
+#include <sys/systm.h>
+
+#include <machine/bootconfig.h>
+
+
+/*
+ * Function to identify and process different types of boot argument
+ */
+
+int
+get_bootconf_option(opts, opt, type, result)
+ char *opts;
+ char *opt;
+ int type;
+ void *result;
+{
+ char *ptr;
+ char *optstart;
+ int not;
+
+ ptr = opts;
+
+ while (*ptr) {
+ /* Find start of option */
+ while (*ptr == ' ' || *ptr == '\t')
+ ++ptr;
+
+ if (*ptr == 0)
+ break;
+
+ not = 0;
+
+ /* Is it a negate option */
+ if ((type & BOOTOPT_TYPE_MASK) == BOOTOPT_TYPE_BOOLEAN && *ptr == '!') {
+ not = 1;
+ ++ptr;
+ }
+
+ /* Find the end of option */
+ optstart = ptr;
+ while (*ptr != 0 && *ptr != ' ' && *ptr != '\t' && *ptr != '=')
+ ++ptr;
+
+ if ((*ptr == '=')
+ || (*ptr != '=' && ((type & BOOTOPT_TYPE_MASK) == BOOTOPT_TYPE_BOOLEAN))) {
+ /* compare the option */
+ if (strncmp(optstart, opt, (ptr - optstart)) == 0) {
+ /* found */
+
+ if (*ptr == '=')
+ ++ptr;
+
+ switch(type & BOOTOPT_TYPE_MASK) {
+ case BOOTOPT_TYPE_BOOLEAN :
+ if (*(ptr - 1) == '=')
+ *((int *)result) = ((u_int)strtoul(ptr, NULL, 10) != 0);
+ else
+ *((int *)result) = !not;
+ break;
+ case BOOTOPT_TYPE_STRING :
+ *((char **)result) = ptr;
+ break;
+ case BOOTOPT_TYPE_INT :
+ *((int *)result) = (u_int)strtoul(ptr, NULL, 10);
+ break;
+ case BOOTOPT_TYPE_BININT :
+ *((int *)result) = (u_int)strtoul(ptr, NULL, 2);
+ break;
+ case BOOTOPT_TYPE_HEXINT :
+ *((int *)result) = (u_int)strtoul(ptr, NULL, 16);
+ break;
+ default:
+ return(0);
+ }
+ return(1);
+ }
+ }
+ /* skip to next option */
+ while (*ptr != ' ' && *ptr != '\t' && *ptr != 0)
+ ++ptr;
+ }
+ return(0);
+}
diff --git a/sys/arm/arm/bus_space_asm_generic.S b/sys/arm/arm/bus_space_asm_generic.S
new file mode 100644
index 0000000..f8635a6
--- /dev/null
+++ b/sys/arm/arm/bus_space_asm_generic.S
@@ -0,0 +1,353 @@
+/* $NetBSD: bus_space_asm_generic.S,v 1.3 2003/03/27 19:46:14 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1997 Causality Limited.
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#include <machine/cpuconf.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Generic bus_space functions.
+ */
+
+/*
+ * read single
+ */
+
+ENTRY(generic_bs_r_1)
+ ldrb r0, [r1, r2]
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_r_2)
+ ldrh r0, [r1, r2]
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_r_4)
+ ldr r0, [r1, r2]
+ mov pc, lr
+
+/*
+ * write single
+ */
+
+ENTRY(generic_bs_w_1)
+ strb r3, [r1, r2]
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_w_2)
+ strh r3, [r1, r2]
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_w_4)
+ str r3, [r1, r2]
+ mov pc, lr
+
+/*
+ * read multiple
+ */
+
+ENTRY(generic_bs_rm_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrb r3, [r0]
+ strb r3, [r1], #1
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_rm_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrh r3, [r0]
+ strh r3, [r1], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_rm_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldr r3, [r0]
+ str r3, [r1], #4
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * write multiple
+ */
+
+ENTRY(generic_bs_wm_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrb r3, [r1], #1
+ strb r3, [r0]
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_wm_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrh r3, [r1], #2
+ strh r3, [r0]
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_wm_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldr r3, [r1], #4
+ str r3, [r0]
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * read region
+ */
+
+ENTRY(generic_bs_rr_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrb r3, [r0], #1
+ strb r3, [r1], #1
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_rr_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrh r3, [r0], #2
+ strh r3, [r1], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_rr_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldr r3, [r0], #4
+ str r3, [r1], #4
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * write region.
+ */
+
+ENTRY(generic_bs_wr_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_wr_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrh r3, [r1], #2
+ strh r3, [r0], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_wr_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldr r3, [r1], #4
+ str r3, [r0], #4
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * set region
+ */
+
+ENTRY(generic_bs_sr_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: strb r1, [r0], #1
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_sr_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: strh r1, [r0], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_sr_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: str r1, [r0], #4
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * copy region
+ */
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_c_2)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ add r1, r2, r3
+ ldr r2, [sp, #4]
+ teq r2, #0
+ moveq pc, lr
+
+ cmp r0, r1
+ blt 2f
+
+1: ldrh r3, [r0], #2
+ strh r3, [r1], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+2: add r0, r0, r2, lsl #1
+ add r1, r1, r2, lsl #1
+ sub r0, r0, #2
+ sub r1, r1, #2
+
+3: ldrh r3, [r0], #-2
+ strh r3, [r1], #-2
+ subs r2, r2, #1
+ bne 3b
+
+ mov pc, lr
+#endif
diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c
new file mode 100644
index 0000000..49d5777
--- /dev/null
+++ b/sys/arm/arm/busdma_machdep.c
@@ -0,0 +1,677 @@
+/*
+ * Copyright (c) 2004 Olivier Houchard
+ * Copyright (c) 2002 Peter Grehan
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * MacPPC bus dma support routines
+ */
+
+#define _ARM32_BUS_DMA_PRIVATE
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/cpufunc.h>
+
+struct bus_dma_tag {
+ bus_dma_tag_t parent;
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_dma_filter_t *filter;
+ void *filterarg;
+ bus_size_t maxsize;
+ u_int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ int ref_count;
+ int map_count;
+ bus_dma_lock_t *lockfunc;
+ void *lockfuncarg;
+ /*
+ * DMA range for this tag. If the page doesn't fall within
+ * one of these ranges, an error is returned. The caller
+ * may then decide what to do with the transfer. If the
+ * range pointer is NULL, it is ignored.
+ */
+ struct arm32_dma_range *ranges;
+ int _nranges;
+
+};
+
+struct arm_seglist {
+ bus_dma_segment_t seg;
+ SLIST_ENTRY(arm_seglist) next;
+};
+
+#define MAX_SEGS 512
+struct bus_dmamap {
+ bus_dma_tag_t dmat;
+ int flags;
+ SLIST_HEAD(, arm_seglist) seglist;
+};
+
+/*
+ * Check to see if the specified page is in an allowed DMA range.
+ */
+
+static int
+bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
+ bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
+ int flags, vm_offset_t *lastaddrp, int *segp,
+ int first);
+static __inline struct arm32_dma_range *
+_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
+ bus_addr_t curaddr)
+{
+ struct arm32_dma_range *dr;
+ int i;
+
+ for (i = 0, dr = ranges; i < nranges; i++, dr++) {
+ if (curaddr >= dr->dr_sysbase &&
+ round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
+ return (dr);
+ }
+
+ return (NULL);
+}
+/*
+ * Convenience function for manipulating driver locks from busdma (during
+ * busdma_swi, for example). Drivers that don't provide their own locks
+ * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
+ * non-mutex locking scheme don't have to use this at all.
+ */
+void
+busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
+{
+ struct mtx *dmtx;
+
+ dmtx = (struct mtx *)arg;
+ switch (op) {
+ case BUS_DMA_LOCK:
+ mtx_lock(dmtx);
+ break;
+ case BUS_DMA_UNLOCK:
+ mtx_unlock(dmtx);
+ break;
+ default:
+ panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
+ }
+}
+
+/*
+ * dflt_lock should never get called. It gets put into the dma tag when
+ * lockfunc == NULL, which is only valid if the maps that are associated
+ * with the tag are meant to never be defered.
+ * XXX Should have a way to identify which driver is responsible here.
+ */
+static void
+dflt_lock(void *arg, bus_dma_lock_op_t op)
+{
+#ifdef INVARIANTS
+ panic("driver error: busdma dflt_lock called");
+#else
+ printf("DRIVER_ERROR: busdma dflt_lock called\n");
+#endif
+}
+
+/*
+ * Allocate a device specific dma_tag.
+ */
+int
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_size_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filter,
+ void *filterarg, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+ bus_dma_tag_t newtag;
+ int error = 0;
+
+ /* Return a NULL tag on failure */
+ *dmat = NULL;
+
+ newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
+ if (newtag == NULL)
+ return (ENOMEM);
+
+ newtag->parent = parent;
+ newtag->alignment = alignment;
+ newtag->boundary = boundary;
+ newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
+ newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
+ newtag->filter = filter;
+ newtag->filterarg = filterarg;
+ newtag->maxsize = maxsize;
+ newtag->nsegments = nsegments;
+ newtag->maxsegsz = maxsegsz;
+ newtag->flags = flags;
+ newtag->ref_count = 1; /* Count ourself */
+ newtag->map_count = 0;
+ newtag->ranges = bus_dma_get_range();
+ if (lockfunc != NULL) {
+ newtag->lockfunc = lockfunc;
+ newtag->lockfuncarg = lockfuncarg;
+ } else {
+ newtag->lockfunc = dflt_lock;
+ newtag->lockfuncarg = NULL;
+ }
+
+ /*
+ * Take into account any restrictions imposed by our parent tag
+ */
+ if (parent != NULL) {
+ newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
+ newtag->highaddr = max(parent->highaddr, newtag->highaddr);
+
+ /*
+ * XXX Not really correct??? Probably need to honor boundary
+ * all the way up the inheritence chain.
+ */
+ newtag->boundary = max(parent->boundary, newtag->boundary);
+ if (newtag->filter == NULL) {
+ /*
+ * Short circuit looking at our parent directly
+ * since we have encapsulated all of its information
+ */
+ newtag->filter = parent->filter;
+ newtag->filterarg = parent->filterarg;
+ newtag->parent = parent->parent;
+ }
+ if (newtag->parent != NULL)
+ atomic_add_int(&parent->ref_count, 1);
+ }
+
+ *dmat = newtag;
+ return (error);
+}
+
+int
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+ if (dmat != NULL) {
+
+ if (dmat->map_count != 0)
+ return (EBUSY);
+
+ while (dmat != NULL) {
+ bus_dma_tag_t parent;
+
+ parent = dmat->parent;
+ atomic_subtract_int(&dmat->ref_count, 1);
+ if (dmat->ref_count == 0) {
+ free(dmat, M_DEVBUF);
+ /*
+ * Last reference count, so
+ * release our reference
+ * count on our parent.
+ */
+ dmat = parent;
+ } else
+ dmat = NULL;
+ }
+ }
+ return (0);
+}
+
+static void
+arm_dmamap_freesegs(bus_dmamap_t map)
+{
+ struct arm_seglist *seg = SLIST_FIRST(&map->seglist);
+
+ while (seg) {
+ struct arm_seglist *next;
+
+ next = SLIST_NEXT(seg, next);
+ SLIST_REMOVE_HEAD(&map->seglist, next);
+ free(seg, M_DEVBUF);
+ seg = next;
+ }
+}
+
+static int
+arm_dmamap_addseg(bus_dmamap_t map, vm_offset_t addr, vm_size_t size)
+{
+ struct arm_seglist *seg = malloc(sizeof(*seg), M_DEVBUF, M_NOWAIT);
+
+ if (!seg)
+ return (ENOMEM);
+ seg->seg.ds_addr = addr;
+ seg->seg.ds_len = size;
+ SLIST_INSERT_HEAD(&map->seglist, seg, next);
+ return (0);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ bus_dmamap_t newmap;
+
+ newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (newmap == NULL)
+ return (ENOMEM);
+ SLIST_INIT(&newmap->seglist);
+ *mapp = newmap;
+ dmat->map_count++;
+
+ return (0);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ arm_dmamap_freesegs(map);
+ free(map, M_DEVBUF);
+ dmat->map_count--;
+ return (0);
+}
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ bus_dmamap_t newmap;
+
+ int mflags;
+
+ if (flags & BUS_DMA_NOWAIT)
+ mflags = M_NOWAIT;
+ else
+ mflags = M_WAITOK;
+ if (flags & BUS_DMA_ZERO)
+ mflags |= M_ZERO;
+
+ newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (newmap == NULL)
+ return (ENOMEM);
+ SLIST_INIT(&newmap->seglist);
+ *mapp = newmap;
+ if (dmat->maxsize <= PAGE_SIZE) {
+ *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
+ } else {
+ /*
+ * XXX Use Contigmalloc until it is merged into this facility
+ * and handles multi-seg allocations. Nobody is doing
+ * multi-seg allocations yet though.
+ */
+ *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
+ 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
+ dmat->boundary);
+ }
+
+ if (*vaddr == NULL) {
+ free(newmap, M_DEVBUF);
+ *mapp = NULL;
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+/*
+ * Free a piece of memory and it's allocated dmamap, that was allocated
+ * via bus_dmamem_alloc. Make the same choice for free/contigfree.
+ */
+void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ if (map != NULL)
+ panic("bus_dmamem_free: Invalid map freed\n");
+ if (dmat->maxsize <= PAGE_SIZE)
+ free(vaddr, M_DEVBUF);
+ else {
+ contigfree(vaddr, dmat->maxsize, M_DEVBUF);
+ }
+ arm_dmamap_freesegs(map);
+ free(map, M_DEVBUF);
+}
+
+/*
+ * Map the buffer buf into bus space using the dmamap map.
+ */
+int
+bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags)
+{
+ vm_offset_t lastaddr = 0;
+ int error, nsegs = 0;
+#ifdef __GNUC__
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+#else
+ bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
+#endif
+
+ error = bus_dmamap_load_buffer(dmat,
+ dm_segments, map, buf, buflen, NULL,
+ flags, &lastaddr, &nsegs, 1);
+ (*callback)(callback_arg, dm_segments, nsegs, error);
+
+ return (0);
+}
+
+/*
+ * Utility function to load a linear buffer. lastaddrp holds state
+ * between invocations (for multiple-buffer loads). segp contains
+ * the starting segment on entrance, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+static int
+bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
+ bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
+ int flags, vm_offset_t *lastaddrp, int *segp,
+ int first)
+{
+ bus_size_t sgsize;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
+ vm_offset_t vaddr = (vm_offset_t)buf;
+ int seg;
+ int error = 0;
+ pmap_t pmap;
+ pd_entry_t *pde;
+ pt_entry_t pte;
+ pt_entry_t *ptep;
+
+
+ if (td != NULL)
+ pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ else
+ pmap = NULL;
+
+ lastaddr = *lastaddrp;
+ bmask = ~(dmat->boundary - 1);
+
+ for (seg = *segp; buflen > 0 ; ) {
+ /*
+ * Get the physical address for this segment.
+ *
+ * XXX Don't support checking for coherent mappings
+ * XXX in user address space.
+ */
+ if (__predict_true(pmap == pmap_kernel())) {
+ (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
+ if (__predict_false(pmap_pde_section(pde))) {
+ curaddr = (*pde & L1_S_FRAME) |
+ (vaddr & L1_S_OFFSET);
+ if (*pde & L1_S_CACHE_MASK) {
+ map->flags &=
+ ~ARM32_DMAMAP_COHERENT;
+ }
+ } else {
+ pte = *ptep;
+ KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
+ "INV type");
+ if (__predict_false((pte & L2_TYPE_MASK)
+ == L2_TYPE_L)) {
+ curaddr = (pte & L2_L_FRAME) |
+ (vaddr & L2_L_OFFSET);
+ if (pte & L2_L_CACHE_MASK) {
+ map->flags &=
+ ~ARM32_DMAMAP_COHERENT;
+ }
+ } else {
+ curaddr = (pte & L2_S_FRAME) |
+ (vaddr & L2_S_OFFSET);
+ if (pte & L2_S_CACHE_MASK) {
+ map->flags &=
+ ~ARM32_DMAMAP_COHERENT;
+ }
+ }
+ }
+ } else {
+ curaddr = pmap_extract(pmap, vaddr);
+ map->flags &= ~ARM32_DMAMAP_COHERENT;
+ }
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (dmat->boundary > 0) {
+ baddr = (curaddr + dmat->boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * the previous segment if possible.
+ */
+ error = arm_dmamap_addseg(map,
+ (vm_offset_t)curaddr, sgsize);
+ if (error)
+ break;
+
+ if (first) {
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ first = 0;
+ } else {
+ if (curaddr == lastaddr &&
+ (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+ (dmat->boundary == 0 ||
+ (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= dmat->nsegments)
+ break;
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ }
+ }
+
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0)
+ error = EFBIG; /* XXX better return value here? */
+ return (error);
+}
+
+/*
+ * Like bus_dmamap_load(), but for mbufs.
+ */
+int
+bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
+{
+#ifdef __GNUC__
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+#else
+ bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
+#endif
+ int nsegs = 0, error = 0;
+
+ M_ASSERTPKTHDR(m0);
+
+ if (m0->m_pkthdr.len <= dmat->maxsize) {
+ int first = 1;
+ vm_offset_t lastaddr = 0;
+ struct mbuf *m;
+
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ if (m->m_len > 0) {
+ error = bus_dmamap_load_buffer(dmat,
+ dm_segments, map, m->m_data, m->m_len, NULL,
+ flags, &lastaddr, &nsegs, first);
+ first = 0;
+ }
+ }
+ } else {
+ error = EINVAL;
+ }
+
+ if (error) {
+ /*
+ * force "no valid mappings" on error in callback.
+ */
+ (*callback)(callback_arg, dm_segments, 0, 0, error);
+ } else {
+ (*callback)(callback_arg, dm_segments, nsegs+1,
+ m0->m_pkthdr.len, error);
+ }
+ return (error);
+}
+
+/*
+ * Like bus_dmamap_load(), but for uios.
+ */
+int
+bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
+{
+ vm_offset_t lastaddr;
+#ifdef __GNUC__
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+#else
+ bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
+#endif
+ int nsegs, i, error, first;
+ bus_size_t resid;
+ struct iovec *iov;
+ struct thread *td = NULL;
+
+ resid = uio->uio_resid;
+ iov = uio->uio_iov;
+
+ if (uio->uio_segflg == UIO_USERSPACE) {
+ td = uio->uio_td;
+ KASSERT(td != NULL,
+ ("bus_dmamap_load_uio: USERSPACE but no proc"));
+ }
+
+ first = 1;
+ nsegs = error = 0;
+ for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
+ /*
+ * Now at the first iovec to load. Load each iovec
+ * until we have exhausted the residual count.
+ */
+ bus_size_t minlen =
+ resid < iov[i].iov_len ? resid : iov[i].iov_len;
+ caddr_t addr = (caddr_t) iov[i].iov_base;
+
+ if (minlen > 0) {
+ error = bus_dmamap_load_buffer(dmat, dm_segments, map,
+ addr, minlen, td, flags, &lastaddr, &nsegs, first);
+
+ first = 0;
+
+ resid -= minlen;
+ }
+ }
+
+ if (error) {
+ /*
+ * force "no valid mappings" on error in callback.
+ */
+ (*callback)(callback_arg, dm_segments, 0, 0, error);
+ } else {
+ (*callback)(callback_arg, dm_segments, nsegs+1,
+ uio->uio_resid, error);
+ }
+
+ return (error);
+}
+
+/*
+ * Release the mapping held by map. A no-op on PowerPC.
+ */
+void
+bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ arm_dmamap_freesegs(map);
+ return;
+}
+
+void
+bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+ struct arm_seglist *seg = SLIST_FIRST(&map->seglist);
+
+ if (op != BUS_DMASYNC_PREREAD && op != BUS_DMASYNC_PREWRITE)
+ return;
+ /* Skip cache frobbing if mapping was COHERENT. */
+ if (map->flags & ARM32_DMAMAP_COHERENT) {
+ /* Drain the write buffer. */
+ cpu_drain_writebuf();
+ return;
+ }
+ while (seg) {
+ cpu_dcache_wbinv_range(seg->seg.ds_addr, seg->seg.ds_len);
+ seg = SLIST_NEXT(seg, next);
+ }
+}
diff --git a/sys/arm/arm/copystr.S b/sys/arm/arm/copystr.S
new file mode 100644
index 0000000..9058b69
--- /dev/null
+++ b/sys/arm/arm/copystr.S
@@ -0,0 +1,227 @@
+/* $NetBSD: copystr.S,v 1.8 2002/10/13 14:54:48 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * copystr.S
+ *
+ * optimised and fault protected copystr functions
+ *
+ * Created : 16/05/95
+ */
+
+
+#include "assym.s"
+#include <machine/asm.h>
+#include <machine/armreg.h>
+#include <machine/asmacros.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/errno.h>
+
+ .text
+ .align 0
+#ifdef MULTIPROCESSOR
+.Lcpu_info:
+ .word _C_LABEL(cpu_info)
+#else
+.Lpcb:
+ .word _C_LABEL(__pcpu) + PC_CURPCB
+#endif
+
+/*
+ * r0 - from
+ * r1 - to
+ * r2 - maxlens
+ * r3 - lencopied
+ *
+ * Copy string from r0 to r1
+ */
+ENTRY(copystr)
+ stmfd sp!, {r4-r5} /* stack is 8 byte aligned */
+ teq r2, #0x00000000
+ mov r5, #0x00000000
+ moveq r0, #ENAMETOOLONG
+ beq 2f
+
+1: ldrb r4, [r0], #0x0001
+ add r5, r5, #0x00000001
+ teq r4, #0x00000000
+ strb r4, [r1], #0x0001
+ teqne r5, r2
+ bne 1b
+
+ teq r4, #0x00000000
+ moveq r0, #0x00000000
+ movne r0, #ENAMETOOLONG
+
+2: teq r3, #0x00000000
+ strne r5, [r3]
+
+ ldmfd sp!, {r4-r5} /* stack is 8 byte aligned */
+ mov pc, lr
+
+#define SAVE_REGS stmfd sp!, {r4-r6}
+#define RESTORE_REGS ldmfd sp!, {r4-r6}
+
+/*
+ * r0 - user space address
+ * r1 - kernel space address
+ * r2 - maxlens
+ * r3 - lencopied
+ *
+ * Copy string from user space to kernel space
+ */
+ENTRY(copyinstr)
+ SAVE_REGS
+
+ teq r2, #0x00000000
+ mov r6, #0x00000000
+ moveq r0, #ENAMETOOLONG
+ beq 2f
+
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r3, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r3, r14}
+#else
+ ldr r4, .Lpcb
+ ldr r4, [r4]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r4, #0x00000000
+ beq .Lcopystrpcbfault
+#endif
+
+ adr r5, .Lcopystrfault
+ str r5, [r4, #PCB_ONFAULT]
+
+1: ldrbt r5, [r0], #0x0001
+ add r6, r6, #0x00000001
+ teq r5, #0x00000000
+ strb r5, [r1], #0x0001
+ teqne r6, r2
+ bne 1b
+
+ mov r0, #0x00000000
+ str r0, [r4, #PCB_ONFAULT]
+
+ teq r5, #0x00000000
+ moveq r0, #0x00000000
+ movne r0, #ENAMETOOLONG
+
+2: teq r3, #0x00000000
+ strne r6, [r3]
+
+ RESTORE_REGS
+ mov pc, lr
+
+/*
+ * r0 - kernel space address
+ * r1 - user space address
+ * r2 - maxlens
+ * r3 - lencopied
+ *
+ * Copy string from kernel space to user space
+ */
+ENTRY(copyoutstr)
+ SAVE_REGS
+
+ teq r2, #0x00000000
+ mov r6, #0x00000000
+ moveq r0, #ENAMETOOLONG
+ beq 2f
+
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r3, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r3, r14}
+#else
+ ldr r4, .Lpcb
+ ldr r4, [r4]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r4, #0x00000000
+ beq .Lcopystrpcbfault
+#endif
+
+ adr r5, .Lcopystrfault
+ str r5, [r4, #PCB_ONFAULT]
+
+1: ldrb r5, [r0], #0x0001
+ add r6, r6, #0x00000001
+ teq r5, #0x00000000
+ strbt r5, [r1], #0x0001
+ teqne r6, r2
+ bne 1b
+
+ mov r0, #0x00000000
+ str r0, [r4, #PCB_ONFAULT]
+
+ teq r5, #0x00000000
+ moveq r0, #0x00000000
+ movne r0, #ENAMETOOLONG
+
+2: teq r3, #0x00000000
+ strne r6, [r3]
+
+ RESTORE_REGS
+ mov pc, lr
+
+/* A fault occurred during the copy */
+.Lcopystrfault:
+ mov r1, #0x00000000
+ str r1, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+ mov pc, lr
+
+#ifdef DIAGNOSTIC
+.Lcopystrpcbfault:
+ mov r2, r1
+ mov r1, r0
+ adr r0, Lcopystrpcbfaulttext
+ bic sp, sp, #7 /* align stack to 8 bytes */
+ b _C_LABEL(panic)
+
+Lcopystrpcbfaulttext:
+ .asciz "No valid PCB during copyinoutstr() addr1=%08x addr2=%08x\n"
+ .align 0
+#endif
diff --git a/sys/arm/arm/cpufunc.c b/sys/arm/arm/cpufunc.c
new file mode 100644
index 0000000..65e3acd
--- /dev/null
+++ b/sys/arm/arm/cpufunc.c
@@ -0,0 +1,2177 @@
+/* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
+
+/*
+ * arm7tdmi support code Copyright (c) 2001 John Fremlin
+ * arm8 support code Copyright (c) 1997 ARM Limited
+ * arm8 support code Copyright (c) 1997 Causality Limited
+ * arm9 support code Copyright (C) 2001 ARM Ltd
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpufuncs.c
+ *
+ * C functions for supporting CPU / MMU / TLB specific operations.
+ *
+ * Created : 30/01/97
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/cdefs.h>
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <machine/cpu.h>
+#include <machine/disassem.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/cpuconf.h>
+#include <machine/cpufunc.h>
+#include <machine/bootconfig.h>
+
+#ifdef CPU_XSCALE_80200
+#include <arm/xscale/i80200reg.h>
+#include <arm/xscale/i80200var.h>
+#endif
+
+#ifdef CPU_XSCALE_80321
+#include <arm/xscale/i80321reg.h>
+#include <arm/xscale/i80321var.h>
+#endif
+
+#ifdef CPU_XSCALE_IXP425
+#include <arm/xscale/ixp425reg.h>
+#include <arm/xscale/ixp425var.h>
+#endif
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
+#include <arm/xscale/xscalereg.h>
+#endif
+
+#if defined(PERFCTRS)
+struct arm_pmc_funcs *arm_pmc;
+#endif
+
+/* PRIMARY CACHE VARIABLES */
+int arm_picache_size;
+int arm_picache_line_size;
+int arm_picache_ways;
+
+int arm_pdcache_size; /* and unified */
+int arm_pdcache_line_size;
+int arm_pdcache_ways;
+
+int arm_pcache_type;
+int arm_pcache_unified;
+
+int arm_dcache_align;
+int arm_dcache_align_mask;
+
+/* 1 == use cpu_sleep(), 0 == don't */
+int cpu_do_powersave;
+int ctrl;
+#ifdef CPU_ARM3
+struct cpu_functions arm3_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ arm3_control, /* control */
+ NULL, /* domain */
+ NULL, /* setttb */
+ NULL, /* faultstatus */
+ NULL, /* faultaddress */
+
+ /* TLB functions */
+
+ cpufunc_nullop, /* tlb_flushID */
+ (void *)cpufunc_nullop, /* tlb_flushID_SE */
+ cpufunc_nullop, /* tlb_flushI */
+ (void *)cpufunc_nullop, /* tlb_flushI_SE */
+ cpufunc_nullop, /* tlb_flushD */
+ (void *)cpufunc_nullop, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *) cpufunc_nullop, /* icache_sync_range */
+
+ arm3_cache_flush, /* dcache_wbinv_all */
+ (void *)arm3_cache_flush, /* dcache_wbinv_range */
+ (void *)arm3_cache_flush, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm3_cache_flush, /* idcache_wbinv_all */
+ (void *)arm3_cache_flush, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ early_abort_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ NULL, /* context_switch */
+
+ (void *)cpufunc_nullop /* cpu setup */
+
+};
+#endif /* CPU_ARM3 */
+
+#ifdef CPU_ARM6
+struct cpu_functions arm6_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ arm67_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ arm67_tlb_flush, /* tlb_flushID */
+ arm67_tlb_purge, /* tlb_flushID_SE */
+ arm67_tlb_flush, /* tlb_flushI */
+ arm67_tlb_purge, /* tlb_flushI_SE */
+ arm67_tlb_flush, /* tlb_flushD */
+ arm67_tlb_purge, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *) cpufunc_nullop, /* icache_sync_range */
+
+ arm67_cache_flush, /* dcache_wbinv_all */
+ (void *)arm67_cache_flush, /* dcache_wbinv_range */
+ (void *)arm67_cache_flush, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm67_cache_flush, /* idcache_wbinv_all */
+ (void *)arm67_cache_flush, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+#ifdef ARM6_LATE_ABORT
+ late_abort_fixup, /* dataabt_fixup */
+#else
+ early_abort_fixup, /* dataabt_fixup */
+#endif
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm67_context_switch, /* context_switch */
+
+ arm6_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM6 */
+
+#ifdef CPU_ARM7
+struct cpu_functions arm7_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ arm67_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ arm67_tlb_flush, /* tlb_flushID */
+ arm67_tlb_purge, /* tlb_flushID_SE */
+ arm67_tlb_flush, /* tlb_flushI */
+ arm67_tlb_purge, /* tlb_flushI_SE */
+ arm67_tlb_flush, /* tlb_flushD */
+ arm67_tlb_purge, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *)cpufunc_nullop, /* icache_sync_range */
+
+ arm67_cache_flush, /* dcache_wbinv_all */
+ (void *)arm67_cache_flush, /* dcache_wbinv_range */
+ (void *)arm67_cache_flush, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm67_cache_flush, /* idcache_wbinv_all */
+ (void *)arm67_cache_flush, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ late_abort_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm67_context_switch, /* context_switch */
+
+ arm7_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM7 */
+
+#ifdef CPU_ARM7TDMI
+struct cpu_functions arm7tdmi_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ arm7tdmi_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ arm7tdmi_tlb_flushID, /* tlb_flushID */
+ arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
+ arm7tdmi_tlb_flushID, /* tlb_flushI */
+ arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
+ arm7tdmi_tlb_flushID, /* tlb_flushD */
+ arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *)cpufunc_nullop, /* icache_sync_range */
+
+ arm7tdmi_cache_flushID, /* dcache_wbinv_all */
+ (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
+ (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm7tdmi_cache_flushID, /* idcache_wbinv_all */
+ (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ late_abort_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm7tdmi_context_switch, /* context_switch */
+
+ arm7tdmi_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM7TDMI */
+
+#ifdef CPU_ARM8
+struct cpu_functions arm8_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ arm8_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ arm8_tlb_flushID, /* tlb_flushID */
+ arm8_tlb_flushID_SE, /* tlb_flushID_SE */
+ arm8_tlb_flushID, /* tlb_flushI */
+ arm8_tlb_flushID_SE, /* tlb_flushI_SE */
+ arm8_tlb_flushID, /* tlb_flushD */
+ arm8_tlb_flushID_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *)cpufunc_nullop, /* icache_sync_range */
+
+ arm8_cache_purgeID, /* dcache_wbinv_all */
+ (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
+/*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
+ (void *)arm8_cache_cleanID, /* dcache_wb_range */
+
+ arm8_cache_purgeID, /* idcache_wbinv_all */
+ (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm8_context_switch, /* context_switch */
+
+ arm8_setup /* cpu setup */
+};
+#endif /* CPU_ARM8 */
+
+#ifdef CPU_ARM9
+struct cpu_functions arm9_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* Domain */
+ arm9_setttb, /* Setttb */
+ cpufunc_faultstatus, /* Faultstatus */
+ cpufunc_faultaddress, /* Faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ arm9_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ arm9_cache_syncI, /* icache_sync_all */
+ arm9_cache_syncI_rng, /* icache_sync_range */
+
+ /* ...cache in write-though mode... */
+ arm9_cache_flushD, /* dcache_wbinv_all */
+ arm9_cache_flushD_rng, /* dcache_wbinv_range */
+ arm9_cache_flushD_rng, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm9_cache_flushID, /* idcache_wbinv_all */
+ arm9_cache_flushID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm9_context_switch, /* context_switch */
+
+ arm9_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM9 */
+
+#ifdef CPU_ARM10
+struct cpu_functions arm10_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* Domain */
+ arm10_setttb, /* Setttb */
+ cpufunc_faultstatus, /* Faultstatus */
+ cpufunc_faultaddress, /* Faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ arm10_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ arm10_tlb_flushI_SE, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ arm10_icache_sync_all, /* icache_sync_all */
+ arm10_icache_sync_range, /* icache_sync_range */
+
+ arm10_dcache_wbinv_all, /* dcache_wbinv_all */
+ arm10_dcache_wbinv_range, /* dcache_wbinv_range */
+ arm10_dcache_inv_range, /* dcache_inv_range */
+ arm10_dcache_wb_range, /* dcache_wb_range */
+
+ arm10_idcache_wbinv_all, /* idcache_wbinv_all */
+ arm10_idcache_wbinv_range, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm10_context_switch, /* context_switch */
+
+ arm10_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM10 */
+
+#ifdef CPU_SA110
+struct cpu_functions sa110_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ sa1_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ sa1_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ sa1_cache_syncI, /* icache_sync_all */
+ sa1_cache_syncI_rng, /* icache_sync_range */
+
+ sa1_cache_purgeD, /* dcache_wbinv_all */
+ sa1_cache_purgeD_rng, /* dcache_wbinv_range */
+/*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
+ sa1_cache_cleanD_rng, /* dcache_wb_range */
+
+ sa1_cache_purgeID, /* idcache_wbinv_all */
+ sa1_cache_purgeID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ sa110_context_switch, /* context_switch */
+
+ sa110_setup /* cpu setup */
+};
+#endif /* CPU_SA110 */
+
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+struct cpu_functions sa11x0_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ sa1_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ sa1_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ sa1_cache_syncI, /* icache_sync_all */
+ sa1_cache_syncI_rng, /* icache_sync_range */
+
+ sa1_cache_purgeD, /* dcache_wbinv_all */
+ sa1_cache_purgeD_rng, /* dcache_wbinv_range */
+/*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
+ sa1_cache_cleanD_rng, /* dcache_wb_range */
+
+ sa1_cache_purgeID, /* idcache_wbinv_all */
+ sa1_cache_purgeID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ sa11x0_drain_readbuf, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ sa11x0_cpu_sleep, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ sa11x0_context_switch, /* context_switch */
+
+ sa11x0_setup /* cpu setup */
+};
+#endif /* CPU_SA1100 || CPU_SA1110 */
+
+#ifdef CPU_IXP12X0
+struct cpu_functions ixp12x0_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ sa1_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ sa1_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ sa1_cache_syncI, /* icache_sync_all */
+ sa1_cache_syncI_rng, /* icache_sync_range */
+
+ sa1_cache_purgeD, /* dcache_wbinv_all */
+ sa1_cache_purgeD_rng, /* dcache_wbinv_range */
+/*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
+ sa1_cache_cleanD_rng, /* dcache_wb_range */
+
+ sa1_cache_purgeID, /* idcache_wbinv_all */
+ sa1_cache_purgeID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ ixp12x0_drain_readbuf, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ ixp12x0_context_switch, /* context_switch */
+
+ ixp12x0_setup /* cpu setup */
+};
+#endif /* CPU_IXP12X0 */
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+struct cpu_functions xscale_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ xscale_cpwait, /* cpwait */
+
+ /* MMU functions */
+
+ xscale_control, /* control */
+ cpufunc_domains, /* domain */
+ xscale_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ xscale_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ xscale_cache_syncI, /* icache_sync_all */
+ xscale_cache_syncI_rng, /* icache_sync_range */
+
+ xscale_cache_purgeD, /* dcache_wbinv_all */
+ xscale_cache_purgeD_rng, /* dcache_wbinv_range */
+ xscale_cache_flushD_rng, /* dcache_inv_range */
+ xscale_cache_cleanD_rng, /* dcache_wb_range */
+
+ xscale_cache_purgeID, /* idcache_wbinv_all */
+ xscale_cache_purgeID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ xscale_cpu_sleep, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ xscale_context_switch, /* context_switch */
+
+ xscale_setup /* cpu setup */
+};
+#endif
+/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
+
+/*
+ * Global constants also used by locore.s
+ */
+
+struct cpu_functions cpufuncs;
+u_int cputype;
+u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
+
+#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
+ defined (CPU_ARM10) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+static void get_cachetype_cp15 __P((void));
+
+/* Additional cache information local to this file. Log2 of some of the
+ above numbers. */
+static int arm_dcache_l2_nsets;
+static int arm_dcache_l2_assoc;
+static int arm_dcache_l2_linesize;
+
+static void
+get_cachetype_cp15()
+{
+ u_int ctype, isize, dsize;
+ u_int multiplier;
+
+ __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
+ : "=r" (ctype));
+
+ /*
+ * ...and thus spake the ARM ARM:
+ *
+ * If an <opcode2> value corresponding to an unimplemented or
+ * reserved ID register is encountered, the System Control
+ * processor returns the value of the main ID register.
+ */
+ if (ctype == cpufunc_id())
+ goto out;
+
+ if ((ctype & CPU_CT_S) == 0)
+ arm_pcache_unified = 1;
+
+ /*
+ * If you want to know how this code works, go read the ARM ARM.
+ */
+
+ arm_pcache_type = CPU_CT_CTYPE(ctype);
+
+ if (arm_pcache_unified == 0) {
+ isize = CPU_CT_ISIZE(ctype);
+ multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
+ arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
+ if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
+ if (isize & CPU_CT_xSIZE_M)
+ arm_picache_line_size = 0; /* not present */
+ else
+ arm_picache_ways = 1;
+ } else {
+ arm_picache_ways = multiplier <<
+ (CPU_CT_xSIZE_ASSOC(isize) - 1);
+ }
+ arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
+ }
+
+ dsize = CPU_CT_DSIZE(ctype);
+ multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
+ arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
+ if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
+ if (dsize & CPU_CT_xSIZE_M)
+ arm_pdcache_line_size = 0; /* not present */
+ else
+ arm_pdcache_ways = 1;
+ } else {
+ arm_pdcache_ways = multiplier <<
+ (CPU_CT_xSIZE_ASSOC(dsize) - 1);
+ }
+ arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
+
+ arm_dcache_align = arm_pdcache_line_size;
+
+ arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
+ arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
+ arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
+ CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
+
+ out:
+ arm_dcache_align_mask = arm_dcache_align - 1;
+}
+#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
+
+#if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
+ defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
+ defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
+/* Cache information for CPUs without cache type registers. */
+struct cachetab {
+ u_int32_t ct_cpuid;
+ int ct_pcache_type;
+ int ct_pcache_unified;
+ int ct_pdcache_size;
+ int ct_pdcache_line_size;
+ int ct_pdcache_ways;
+ int ct_picache_size;
+ int ct_picache_line_size;
+ int ct_picache_ways;
+};
+
+struct cachetab cachetab[] = {
+ /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
+ { CPU_ID_ARM2, 0, 1, 0, 0, 0, 0, 0, 0 },
+ { CPU_ID_ARM250, 0, 1, 0, 0, 0, 0, 0, 0 },
+ { CPU_ID_ARM3, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 },
+ { CPU_ID_ARM610, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 },
+ { CPU_ID_ARM710, CPU_CT_CTYPE_WT, 1, 8192, 32, 4, 0, 0, 0 },
+ { CPU_ID_ARM7500, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 },
+ { CPU_ID_ARM710A, CPU_CT_CTYPE_WT, 1, 8192, 16, 4, 0, 0, 0 },
+ { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 },
+ /* XXX is this type right for SA-1? */
+ { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
+ { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
+ { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
+ { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
+ { 0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+static void get_cachetype_table __P((void));
+
+static void
+get_cachetype_table()
+{
+ int i;
+ u_int32_t cpuid = cpufunc_id();
+
+ for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
+ if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
+ arm_pcache_type = cachetab[i].ct_pcache_type;
+ arm_pcache_unified = cachetab[i].ct_pcache_unified;
+ arm_pdcache_size = cachetab[i].ct_pdcache_size;
+ arm_pdcache_line_size =
+ cachetab[i].ct_pdcache_line_size;
+ arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
+ arm_picache_size = cachetab[i].ct_picache_size;
+ arm_picache_line_size =
+ cachetab[i].ct_picache_line_size;
+ arm_picache_ways = cachetab[i].ct_picache_ways;
+ }
+ }
+ arm_dcache_align = arm_pdcache_line_size;
+
+ arm_dcache_align_mask = arm_dcache_align - 1;
+}
+
+#endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
+
+/*
+ * Cannot panic here as we may not have a console yet ...
+ */
+
+int
+set_cpufuncs()
+{
+ cputype = cpufunc_id();
+ cputype &= CPU_ID_CPU_MASK;
+
+ /*
+ * NOTE: cpu_do_powersave defaults to off. If we encounter a
+ * CPU type where we want to use it by default, then we set it.
+ */
+
+#ifdef CPU_ARM3
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ (cputype & 0x00000f00) == 0x00000300) {
+ cpufuncs = arm3_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0;
+ get_cachetype_table();
+ return 0;
+ }
+#endif /* CPU_ARM3 */
+#ifdef CPU_ARM6
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ (cputype & 0x00000f00) == 0x00000600) {
+ cpufuncs = arm6_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0;
+ get_cachetype_table();
+ pmap_pte_init_generic();
+ return 0;
+ }
+#endif /* CPU_ARM6 */
+#ifdef CPU_ARM7
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ CPU_ID_IS7(cputype) &&
+ (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
+ cpufuncs = arm7_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0;
+ get_cachetype_table();
+ pmap_pte_init_generic();
+ return 0;
+ }
+#endif /* CPU_ARM7 */
+#ifdef CPU_ARM7TDMI
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ CPU_ID_IS7(cputype) &&
+ (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
+ cpufuncs = arm7tdmi_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0;
+ get_cachetype_cp15();
+ pmap_pte_init_generic();
+ return 0;
+ }
+#endif
+#ifdef CPU_ARM8
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ (cputype & 0x0000f000) == 0x00008000) {
+ cpufuncs = arm8_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
+ get_cachetype_cp15();
+ pmap_pte_init_arm8();
+ return 0;
+ }
+#endif /* CPU_ARM8 */
+#ifdef CPU_ARM9
+ if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
+ (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
+ (cputype & 0x0000f000) == 0x00009000) {
+ cpufuncs = arm9_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
+ get_cachetype_cp15();
+ pmap_pte_init_arm9();
+ return 0;
+ }
+#endif /* CPU_ARM9 */
+#ifdef CPU_ARM10
+ if (/* cputype == CPU_ID_ARM1020T || */
+ cputype == CPU_ID_ARM1020E) {
+ /*
+ * Select write-through cacheing (this isn't really an
+ * option on ARM1020T).
+ */
+ cpufuncs = arm10_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
+ get_cachetype_cp15();
+ arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
+ arm10_dcache_sets_max =
+ (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
+ arm10_dcache_sets_inc;
+ arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
+ arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
+ pmap_pte_init_generic();
+ return 0;
+ }
+#endif /* CPU_ARM10 */
+#ifdef CPU_SA110
+ if (cputype == CPU_ID_SA110) {
+ cpufuncs = sa110_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
+ get_cachetype_table();
+ pmap_pte_init_sa1();
+ return 0;
+ }
+#endif /* CPU_SA110 */
+#ifdef CPU_SA1100
+ if (cputype == CPU_ID_SA1100) {
+ cpufuncs = sa11x0_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
+ get_cachetype_table();
+ pmap_pte_init_sa1();
+ /* Use powersave on this CPU. */
+ cpu_do_powersave = 1;
+
+ return 0;
+ }
+#endif /* CPU_SA1100 */
+#ifdef CPU_SA1110
+ if (cputype == CPU_ID_SA1110) {
+ cpufuncs = sa11x0_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
+ get_cachetype_table();
+ pmap_pte_init_sa1();
+ /* Use powersave on this CPU. */
+ cpu_do_powersave = 1;
+
+ return 0;
+ }
+#endif /* CPU_SA1110 */
+#ifdef CPU_IXP12X0
+ if (cputype == CPU_ID_IXP1200) {
+ cpufuncs = ixp12x0_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1;
+ get_cachetype_table();
+ pmap_pte_init_sa1();
+ return 0;
+ }
+#endif /* CPU_IXP12X0 */
+#ifdef CPU_XSCALE_80200
+ if (cputype == CPU_ID_80200) {
+ int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
+
+ i80200_icu_init();
+
+ /*
+ * Reset the Performance Monitoring Unit to a
+ * pristine state:
+ * - CCNT, PMN0, PMN1 reset to 0
+ * - overflow indications cleared
+ * - all counters disabled
+ */
+ __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
+ :
+ : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
+ PMNC_CC_IF));
+
+#if defined(XSCALE_CCLKCFG)
+ /*
+ * Crank CCLKCFG to maximum legal value.
+ */
+ __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
+ :
+ : "r" (XSCALE_CCLKCFG));
+#endif
+
+ /*
+ * XXX Disable ECC in the Bus Controller Unit; we
+ * don't really support it, yet. Clear any pending
+ * error indications.
+ */
+ __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
+ :
+ : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
+
+ cpufuncs = xscale_cpufuncs;
+#if defined(PERFCTRS)
+ xscale_pmu_init();
+#endif
+
+ /*
+ * i80200 errata: Step-A0 and A1 have a bug where
+ * D$ dirty bits are not cleared on "invalidate by
+ * address".
+ *
+ * Workaround: Clean cache line before invalidating.
+ */
+ if (rev == 0 || rev == 1)
+ cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
+
+ cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
+ get_cachetype_cp15();
+ pmap_pte_init_xscale();
+ return 0;
+ }
+#endif /* CPU_XSCALE_80200 */
+#ifdef CPU_XSCALE_80321
+ if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
+ cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0) {
+ i80321_icu_init();
+
+ /*
+ * Reset the Performance Monitoring Unit to a
+ * pristine state:
+ * - CCNT, PMN0, PMN1 reset to 0
+ * - overflow indications cleared
+ * - all counters disabled
+ */
+ __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
+ :
+ : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
+ PMNC_CC_IF));
+
+ cpufuncs = xscale_cpufuncs;
+#if defined(PERFCTRS)
+ xscale_pmu_init();
+#endif
+
+ cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
+ get_cachetype_cp15();
+ pmap_pte_init_xscale();
+ return 0;
+ }
+#endif /* CPU_XSCALE_80321 */
+#ifdef CPU_XSCALE_PXA2X0
+ /* ignore core revision to test PXA2xx CPUs */
+ if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
+ (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
+
+ cpufuncs = xscale_cpufuncs;
+#if defined(PERFCTRS)
+ xscale_pmu_init();
+#endif
+
+ cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
+ get_cachetype_cp15();
+ pmap_pte_init_xscale();
+
+ /* Use powersave on this CPU. */
+ cpu_do_powersave = 1;
+
+ return 0;
+ }
+#endif /* CPU_XSCALE_PXA2X0 */
+#ifdef CPU_XSCALE_IXP425
+ if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
+ cputype == CPU_ID_IXP425_266) {
+ ixp425_icu_init();
+
+ cpufuncs = xscale_cpufuncs;
+#if defined(PERFCTRS)
+ xscale_pmu_init();
+#endif
+
+ cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
+ get_cachetype_cp15();
+ pmap_pte_init_xscale();
+
+ return 0;
+ }
+#endif /* CPU_XSCALE_IXP425 */
+ /*
+ * Bzzzz. And the answer was ...
+ */
+ panic("No support for this CPU type (%08x) in kernel", cputype);
+ return(ARCHITECTURE_NOT_PRESENT);
+}
+
+/*
+ * Fixup routines for data and prefetch aborts.
+ *
+ * Several compile time symbols are used
+ *
+ * DEBUG_FAULT_CORRECTION - Print debugging information during the
+ * correction of registers after a fault.
+ * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
+ * when defined should use late aborts
+ */
+
+
+/*
+ * Null abort fixup routine.
+ * For use when no fixup is required.
+ */
+int
+cpufunc_null_fixup(arg)
+ void *arg;
+{
+ return(ABORT_FIXUP_OK);
+}
+
+
+#if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
+ defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
+
+#ifdef DEBUG_FAULT_CORRECTION
+#define DFC_PRINTF(x) printf x
+#define DFC_DISASSEMBLE(x) disassemble(x)
+#else
+#define DFC_PRINTF(x) /* nothing */
+#define DFC_DISASSEMBLE(x) /* nothing */
+#endif
+
+/*
+ * "Early" data abort fixup.
+ *
+ * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
+ * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
+ *
+ * In early aborts, we may have to fix up LDM, STM, LDC and STC.
+ */
+int
+early_abort_fixup(arg)
+ void *arg;
+{
+ trapframe_t *frame = arg;
+ u_int fault_pc;
+ u_int fault_instruction;
+ int saved_lr = 0;
+
+ if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
+
+ /* Ok an abort in SVC mode */
+
+ /*
+ * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
+ * as the fault happened in svc mode but we need it in the
+ * usr slot so we can treat the registers as an array of ints
+ * during fixing.
+ * NOTE: This PC is in the position but writeback is not
+ * allowed on r15.
+ * Doing it like this is more efficient than trapping this
+ * case in all possible locations in the following fixup code.
+ */
+
+ saved_lr = frame->tf_usr_lr;
+ frame->tf_usr_lr = frame->tf_svc_lr;
+
+ /*
+ * Note the trapframe does not have the SVC r13 so a fault
+ * from an instruction with writeback to r13 in SVC mode is
+ * not allowed. This should not happen as the kstack is
+ * always valid.
+ */
+ }
+
+ /* Get fault address and status from the CPU */
+
+ fault_pc = frame->tf_pc;
+ fault_instruction = *((volatile unsigned int *)fault_pc);
+
+ /* Decode the fault instruction and fix the registers as needed */
+
+ if ((fault_instruction & 0x0e000000) == 0x08000000) {
+ int base;
+ int loop;
+ int count;
+ int *registers = &frame->tf_r0;
+
+ DFC_PRINTF(("LDM/STM\n"));
+ DFC_DISASSEMBLE(fault_pc);
+ if (fault_instruction & (1 << 21)) {
+ DFC_PRINTF(("This instruction must be corrected\n"));
+ base = (fault_instruction >> 16) & 0x0f;
+ if (base == 15)
+ return ABORT_FIXUP_FAILED;
+ /* Count registers transferred */
+ count = 0;
+ for (loop = 0; loop < 16; ++loop) {
+ if (fault_instruction & (1<<loop))
+ ++count;
+ }
+ DFC_PRINTF(("%d registers used\n", count));
+ DFC_PRINTF(("Corrected r%d by %d bytes ",
+ base, count * 4));
+ if (fault_instruction & (1 << 23)) {
+ DFC_PRINTF(("down\n"));
+ registers[base] -= count * 4;
+ } else {
+ DFC_PRINTF(("up\n"));
+ registers[base] += count * 4;
+ }
+ }
+ } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
+ int base;
+ int offset;
+ int *registers = &frame->tf_r0;
+
+ /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
+
+ DFC_DISASSEMBLE(fault_pc);
+
+ /* Only need to fix registers if write back is turned on */
+
+ if ((fault_instruction & (1 << 21)) != 0) {
+ base = (fault_instruction >> 16) & 0x0f;
+ if (base == 13 &&
+ (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
+ return ABORT_FIXUP_FAILED;
+ if (base == 15)
+ return ABORT_FIXUP_FAILED;
+
+ offset = (fault_instruction & 0xff) << 2;
+ DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
+ if ((fault_instruction & (1 << 23)) != 0)
+ offset = -offset;
+ registers[base] += offset;
+ DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
+ }
+ } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
+ return ABORT_FIXUP_FAILED;
+
+ if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
+
+ /* Ok an abort in SVC mode */
+
+ /*
+ * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
+ * as the fault happened in svc mode but we need it in the
+ * usr slot so we can treat the registers as an array of ints
+ * during fixing.
+ * NOTE: This PC is in the position but writeback is not
+ * allowed on r15.
+ * Doing it like this is more efficient than trapping this
+ * case in all possible locations in the prior fixup code.
+ */
+
+ frame->tf_svc_lr = frame->tf_usr_lr;
+ frame->tf_usr_lr = saved_lr;
+
+ /*
+ * Note the trapframe does not have the SVC r13 so a fault
+ * from an instruction with writeback to r13 in SVC mode is
+ * not allowed. This should not happen as the kstack is
+ * always valid.
+ */
+ }
+
+ return(ABORT_FIXUP_OK);
+}
+#endif /* CPU_ARM2/250/3/6/7 */
+
+
+#if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
+ defined(CPU_ARM7TDMI)
+/*
+ * "Late" (base updated) data abort fixup
+ *
+ * For ARM6 (in late-abort mode) and ARM7.
+ *
+ * In this model, all data-transfer instructions need fixing up. We defer
+ * LDM, STM, LDC and STC fixup to the early-abort handler.
+ */
+int
+late_abort_fixup(arg)
+ void *arg;
+{
+ trapframe_t *frame = arg;
+ u_int fault_pc;
+ u_int fault_instruction;
+ int saved_lr = 0;
+
+ if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
+
+ /* Ok an abort in SVC mode */
+
+ /*
+ * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
+ * as the fault happened in svc mode but we need it in the
+ * usr slot so we can treat the registers as an array of ints
+ * during fixing.
+ * NOTE: This PC is in the position but writeback is not
+ * allowed on r15.
+ * Doing it like this is more efficient than trapping this
+ * case in all possible locations in the following fixup code.
+ */
+
+ saved_lr = frame->tf_usr_lr;
+ frame->tf_usr_lr = frame->tf_svc_lr;
+
+ /*
+ * Note the trapframe does not have the SVC r13 so a fault
+ * from an instruction with writeback to r13 in SVC mode is
+ * not allowed. This should not happen as the kstack is
+ * always valid.
+ */
+ }
+
+ /* Get fault address and status from the CPU */
+
+ fault_pc = frame->tf_pc;
+ fault_instruction = *((volatile unsigned int *)fault_pc);
+
+ /* Decode the fault instruction and fix the registers as needed */
+
+ /* Was is a swap instruction ? */
+
+ if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
+ DFC_DISASSEMBLE(fault_pc);
+ } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
+
+ /* Was is a ldr/str instruction */
+ /* This is for late abort only */
+
+ int base;
+ int offset;
+ int *registers = &frame->tf_r0;
+
+ DFC_DISASSEMBLE(fault_pc);
+
+ /* This is for late abort only */
+
+ if ((fault_instruction & (1 << 24)) == 0
+ || (fault_instruction & (1 << 21)) != 0) {
+ /* postindexed ldr/str with no writeback */
+
+ base = (fault_instruction >> 16) & 0x0f;
+ if (base == 13 &&
+ (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
+ return ABORT_FIXUP_FAILED;
+ if (base == 15)
+ return ABORT_FIXUP_FAILED;
+ DFC_PRINTF(("late abt fix: r%d=%08x : ",
+ base, registers[base]));
+ if ((fault_instruction & (1 << 25)) == 0) {
+ /* Immediate offset - easy */
+
+ offset = fault_instruction & 0xfff;
+ if ((fault_instruction & (1 << 23)))
+ offset = -offset;
+ registers[base] += offset;
+ DFC_PRINTF(("imm=%08x ", offset));
+ } else {
+ /* offset is a shifted register */
+ int shift;
+
+ offset = fault_instruction & 0x0f;
+ if (offset == base)
+ return ABORT_FIXUP_FAILED;
+
+ /*
+ * Register offset - hard we have to
+ * cope with shifts !
+ */
+ offset = registers[offset];
+
+ if ((fault_instruction & (1 << 4)) == 0)
+ /* shift with amount */
+ shift = (fault_instruction >> 7) & 0x1f;
+ else {
+ /* shift with register */
+ if ((fault_instruction & (1 << 7)) != 0)
+ /* undefined for now so bail out */
+ return ABORT_FIXUP_FAILED;
+ shift = ((fault_instruction >> 8) & 0xf);
+ if (base == shift)
+ return ABORT_FIXUP_FAILED;
+ DFC_PRINTF(("shift reg=%d ", shift));
+ shift = registers[shift];
+ }
+ DFC_PRINTF(("shift=%08x ", shift));
+ switch (((fault_instruction >> 5) & 0x3)) {
+ case 0 : /* Logical left */
+ offset = (int)(((u_int)offset) << shift);
+ break;
+ case 1 : /* Logical Right */
+ if (shift == 0) shift = 32;
+ offset = (int)(((u_int)offset) >> shift);
+ break;
+ case 2 : /* Arithmetic Right */
+ if (shift == 0) shift = 32;
+ offset = (int)(((int)offset) >> shift);
+ break;
+ case 3 : /* Rotate right (rol or rxx) */
+ return ABORT_FIXUP_FAILED;
+ break;
+ }
+
+ DFC_PRINTF(("abt: fixed LDR/STR with "
+ "register offset\n"));
+ if ((fault_instruction & (1 << 23)))
+ offset = -offset;
+ DFC_PRINTF(("offset=%08x ", offset));
+ registers[base] += offset;
+ }
+ DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
+ }
+ }
+
+ if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
+
+ /* Ok an abort in SVC mode */
+
+ /*
+ * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
+ * as the fault happened in svc mode but we need it in the
+ * usr slot so we can treat the registers as an array of ints
+ * during fixing.
+ * NOTE: This PC is in the position but writeback is not
+ * allowed on r15.
+ * Doing it like this is more efficient than trapping this
+ * case in all possible locations in the prior fixup code.
+ */
+
+ frame->tf_svc_lr = frame->tf_usr_lr;
+ frame->tf_usr_lr = saved_lr;
+
+ /*
+ * Note the trapframe does not have the SVC r13 so a fault
+ * from an instruction with writeback to r13 in SVC mode is
+ * not allowed. This should not happen as the kstack is
+ * always valid.
+ */
+ }
+
+ /*
+ * Now let the early-abort fixup routine have a go, in case it
+ * was an LDM, STM, LDC or STC that faulted.
+ */
+
+ return early_abort_fixup(arg);
+}
+#endif /* CPU_ARM6(LATE)/7/7TDMI */
+
+/*
+ * CPU Setup code
+ */
+
+#if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
+ defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
+ defined(CPU_SA1100) || defined(CPU_SA1110) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+
+#define IGN 0
+#define OR 1
+#define BIC 2
+
+struct cpu_option {
+ char *co_name;
+ int co_falseop;
+ int co_trueop;
+ int co_value;
+};
+
+static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
+
+static u_int
+parse_cpu_options(args, optlist, cpuctrl)
+ char *args;
+ struct cpu_option *optlist;
+ u_int cpuctrl;
+{
+ int integer;
+
+ if (args == NULL)
+ return(cpuctrl);
+
+ while (optlist->co_name) {
+ if (get_bootconf_option(args, optlist->co_name,
+ BOOTOPT_TYPE_BOOLEAN, &integer)) {
+ if (integer) {
+ if (optlist->co_trueop == OR)
+ cpuctrl |= optlist->co_value;
+ else if (optlist->co_trueop == BIC)
+ cpuctrl &= ~optlist->co_value;
+ } else {
+ if (optlist->co_falseop == OR)
+ cpuctrl |= optlist->co_value;
+ else if (optlist->co_falseop == BIC)
+ cpuctrl &= ~optlist->co_value;
+ }
+ }
+ ++optlist;
+ }
+ return(cpuctrl);
+}
+#endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
+
+#if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
+ || defined(CPU_ARM8)
+struct cpu_option arm678_options[] = {
+#ifdef COMPAT_12
+ { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
+#endif /* COMPAT_12 */
+ { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+#endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
+
+#ifdef CPU_ARM6
+struct cpu_option arm6_options[] = {
+ { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm6_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ /* Set up default control registers bits */
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
+ | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
+ | CPU_CONTROL_AFLT_ENABLE;
+
+#ifdef ARM6_LATE_ABORT
+ cpuctrl |= CPU_CONTROL_LABT_ENABLE;
+#endif /* ARM6_LATE_ABORT */
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
+ cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+}
+#endif /* CPU_ARM6 */
+
+#ifdef CPU_ARM7
+struct cpu_option arm7_options[] = {
+ { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+#ifdef COMPAT_12
+ { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
+#endif /* COMPAT_12 */
+ { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm7_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
+ | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
+ | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
+ | CPU_CONTROL_AFLT_ENABLE;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
+ cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+}
+#endif /* CPU_ARM7 */
+
+#ifdef CPU_ARM7TDMI
+struct cpu_option arm7tdmi_options[] = {
+ { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+#ifdef COMPAT_12
+ { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
+#endif /* COMPAT_12 */
+ { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm7tdmi_setup(args)
+ char *args;
+{
+ int cpuctrl;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
+
+ cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
+ cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+}
+#endif /* CPU_ARM7TDMI */
+
+#ifdef CPU_ARM8
+struct cpu_option arm8_options[] = {
+ { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+#ifdef COMPAT_12
+ { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+#endif /* COMPAT_12 */
+ { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm8_setup(args)
+ char *args;
+{
+ int integer;
+ int cpuctrl, cpuctrlmask;
+ int clocktest;
+ int setclock = 0;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
+ | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
+ cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Get clock configuration */
+ clocktest = arm8_clock_config(0, 0) & 0x0f;
+
+ /* Special ARM8 clock and test configuration */
+ if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
+ clocktest = 0;
+ setclock = 1;
+ }
+ if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
+ if (integer)
+ clocktest |= 0x01;
+ else
+ clocktest &= ~(0x01);
+ setclock = 1;
+ }
+ if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
+ if (integer)
+ clocktest |= 0x02;
+ else
+ clocktest &= ~(0x02);
+ setclock = 1;
+ }
+ if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
+ clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
+ setclock = 1;
+ }
+ if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
+ clocktest |= (integer & 7) << 5;
+ setclock = 1;
+ }
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+
+ /* Set the clock/test register */
+ if (setclock)
+ arm8_clock_config(0x7f, clocktest);
+}
+#endif /* CPU_ARM8 */
+
+#ifdef CPU_ARM9
+struct cpu_option arm9_options[] = {
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm9_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_CPCLK;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ cpu_control(0xffffffff, cpuctrl);
+ ctrl = cpuctrl;
+
+}
+#endif /* CPU_ARM9 */
+
+#ifdef CPU_ARM10
+struct cpu_option arm10_options[] = {
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm10_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Now really make sure they are clean. */
+ asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
+
+ /* Set the control register */
+ ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+
+ /* And again. */
+ cpu_idcache_wbinv_all();
+}
+#endif /* CPU_ARM10 */
+
+#ifdef CPU_SA110
+struct cpu_option sa110_options[] = {
+#ifdef COMPAT_12
+ { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
+#endif /* COMPAT_12 */
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+sa110_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_CPCLK;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ ctrl = cpuctrl;
+/* cpu_control(cpuctrlmask, cpuctrl);*/
+ cpu_control(0xffffffff, cpuctrl);
+
+ /*
+ * enable clockswitching, note that this doesn't read or write to r0,
+ * r0 is just to make it valid asm
+ */
+ __asm ("mcr 15, 0, r0, c15, c1, 2");
+}
+#endif /* CPU_SA110 */
+
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+struct cpu_option sa11x0_options[] = {
+#ifdef COMPAT_12
+ { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
+#endif /* COMPAT_12 */
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+sa11x0_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+
+ cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ if (vector_page == ARM_VECTORS_HIGH)
+ cpuctrl |= CPU_CONTROL_VECRELOC;
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+ /* Set the control register */
+ ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+}
+#endif /* CPU_SA1100 || CPU_SA1110 */
+
+#if defined(CPU_IXP12X0)
+struct cpu_option ixp12x0_options[] = {
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+ixp12x0_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE;
+
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
+ | CPU_CONTROL_VECRELOC;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+#if 0
+ if (vector_page == ARM_VECTORS_HIGH)
+ cpuctrl |= CPU_CONTROL_VECRELOC;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ ctrl = cpuctrl;
+ /* cpu_control(0xffffffff, cpuctrl); */
+ cpu_control(cpuctrlmask, cpuctrl);
+}
+#endif /* CPU_IXP12X0 */
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+struct cpu_option xscale_options[] = {
+#ifdef COMPAT_12
+ { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+#endif /* COMPAT_12 */
+ { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+xscale_setup(args)
+ char *args;
+{
+ uint32_t auxctl;
+ int cpuctrl, cpuctrlmask;
+
+ /*
+ * The XScale Write Buffer is always enabled. Our option
+ * is to enable/disable coalescing. Note that bits 6:3
+ * must always be enabled.
+ */
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
+ | CPU_CONTROL_BPRD_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+#if 0
+ if (vector_page == ARM_VECTORS_HIGH)
+ cpuctrl |= CPU_CONTROL_VECRELOC;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /*
+ * Set the control register. Note that bits 6:3 must always
+ * be set to 1.
+ */
+ ctrl = cpuctrl;
+/* cpu_control(cpuctrlmask, cpuctrl);*/
+ cpu_control(0xffffffff, cpuctrl);
+
+ /* Make sure write coalescing is turned on */
+ __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
+ : "=r" (auxctl));
+#ifdef XSCALE_NO_COALESCE_WRITES
+ auxctl |= XSCALE_AUXCTL_K;
+#else
+ auxctl &= ~XSCALE_AUXCTL_K;
+#endif
+ __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
+ : : "r" (auxctl));
+}
+#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
diff --git a/sys/arm/arm/cpufunc_asm.S b/sys/arm/arm/cpufunc_asm.S
new file mode 100644
index 0000000..6a1456d
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm.S
@@ -0,0 +1,157 @@
+/* $NetBSD: cpufunc_asm.S,v 1.12 2003/09/06 09:14:52 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpufunc.S
+ *
+ * Assembly functions for CPU / MMU / TLB specific operations
+ *
+ * Created : 30/01/97
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+ .text
+ .align 0
+
+ENTRY(cpufunc_nullop)
+ mov pc, lr
+
+/*
+ * Generic functions to read the internal coprocessor registers
+ *
+ * Currently these registers are :
+ * c0 - CPU ID
+ * c5 - Fault status
+ * c6 - Fault address
+ *
+ */
+
+ENTRY(cpufunc_id)
+ mrc p15, 0, r0, c0, c0, 0
+ mov pc, lr
+
+ENTRY(cpu_get_control)
+ mrc p15, 0, r0, c1, c0, 0
+ mov pc, lr
+
+ENTRY(cpu_read_cache_config)
+ mrc p15, 0, r0, c0, c0, 1
+ mov pc, lr
+
+ENTRY(cpufunc_faultstatus)
+ mrc p15, 0, r0, c5, c0, 0
+ mov pc, lr
+
+ENTRY(cpufunc_faultaddress)
+ mrc p15, 0, r0, c6, c0, 0
+ mov pc, lr
+
+
+/*
+ * Generic functions to write the internal coprocessor registers
+ *
+ *
+ * Currently these registers are
+ * c1 - CPU Control
+ * c3 - Domain Access Control
+ *
+ * All other registers are CPU architecture specific
+ */
+
+#if 0 /* See below. */
+ENTRY(cpufunc_control)
+ mcr p15, 0, r0, c1, c0, 0
+ mov pc, lr
+#endif
+
+ENTRY(cpufunc_domains)
+ mcr p15, 0, r0, c3, c0, 0
+ mov pc, lr
+
+/*
+ * Generic functions to read/modify/write the internal coprocessor registers
+ *
+ *
+ * Currently these registers are
+ * c1 - CPU Control
+ *
+ * All other registers are CPU architecture specific
+ */
+
+ENTRY(cpufunc_control)
+ mrc p15, 0, r3, c1, c0, 0 /* Read the control register */
+ bic r2, r3, r0 /* Clear bits */
+ eor r2, r2, r1 /* XOR bits */
+
+
+ teq r2, r3 /* Only write if there is a change */
+ mcrne p15, 0, r2, c1, c0, 0 /* Write new control register */
+ #if 0
+ mov r0, r3 /* Return old value */
+ #endif
+
+ mov pc, lr
+.Lglou:
+ .asciz "plop %p\n"
+ .align 0
+/*
+ * other potentially useful software functions are:
+ * clean D cache entry and flush I cache entry
+ * for the moment use cache_purgeID_E
+ */
+
+/* Random odd functions */
+
+/*
+ * Function to get the offset of a stored program counter from the
+ * instruction doing the store. This offset is defined to be the same
+ * for all STRs and STMs on a given implementation. Code based on
+ * section 2.4.3 of the ARM ARM (2nd Ed.), with modifications to work
+ * in 26-bit modes as well.
+ */
+ENTRY(get_pc_str_offset)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ sub sp, sp, #4
+ mov r1, pc /* R1 = addr of following STR */
+ mov r0, r0
+ str pc, [sp] /* [SP] = . + offset */
+ ldr r0, [sp]
+ sub r0, r0, r1
+ ldmdb fp, {fp, sp, pc}
diff --git a/sys/arm/arm/cpufunc_asm_arm10.S b/sys/arm/arm/cpufunc_asm_arm10.S
new file mode 100644
index 0000000..2c00812
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_arm10.S
@@ -0,0 +1,269 @@
+/* $NetBSD: cpufunc_asm_arm10.S,v 1.1 2003/09/06 09:12:29 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 2002 ARM Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ARM10 assembly functions for CPU / MMU / TLB specific operations
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(arm10_setttb)
+ stmfd sp!, {r0, lr}
+ bl _C_LABEL(arm10_idcache_wbinv_all)
+ ldmfd sp!, {r0, lr}
+
+ mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
+
+ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
+ bx lr
+
+/*
+ * TLB functions
+ */
+ENTRY(arm10_tlb_flushID_SE)
+ mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
+ mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
+ bx lr
+
+ENTRY(arm10_tlb_flushI_SE)
+ mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
+ bx lr
+
+
+/*
+ * Cache operations. For the entire cache we use the set/index
+ * operations.
+ */
+ s_max .req r0
+ i_max .req r1
+ s_inc .req r2
+ i_inc .req r3
+
+ENTRY_NP(arm10_icache_sync_range)
+ ldr ip, .Larm10_line_size
+ cmp r1, #0x4000
+ bcs .Larm10_icache_sync_all
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm10_sync_next:
+ mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
+ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm10_sync_next
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ bx lr
+
+ENTRY_NP(arm10_icache_sync_all)
+.Larm10_icache_sync_all:
+ /*
+ * We assume that the code here can never be out of sync with the
+ * dcache, so that we can safely flush the Icache and fall through
+ * into the Dcache cleaning code.
+ */
+ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
+ /* Fall through to clean Dcache. */
+
+.Larm10_dcache_wb:
+ ldr ip, .Larm10_cache_data
+ ldmia ip, {s_max, i_max, s_inc, i_inc}
+.Lnext_set:
+ orr ip, s_max, i_max
+.Lnext_index:
+ mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
+ sub ip, ip, i_inc
+ tst ip, i_max /* Index 0 is last one */
+ bne .Lnext_index /* Next index */
+ mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
+ subs s_max, s_max, s_inc
+ bpl .Lnext_set /* Next set */
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ bx lr
+
+.Larm10_line_size:
+ .word _C_LABEL(arm_pdcache_line_size)
+
+ENTRY(arm10_dcache_wb_range)
+ ldr ip, .Larm10_line_size
+ cmp r1, #0x4000
+ bcs .Larm10_dcache_wb
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm10_wb_next:
+ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm10_wb_next
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ bx lr
+
+ENTRY(arm10_dcache_wbinv_range)
+ ldr ip, .Larm10_line_size
+ cmp r1, #0x4000
+ bcs .Larm10_dcache_wbinv_all
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm10_wbinv_next:
+ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm10_wbinv_next
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ bx lr
+
+/*
+ * Note, we must not invalidate everything. If the range is too big we
+ * must use wb-inv of the entire cache.
+ */
+ENTRY(arm10_dcache_inv_range)
+ ldr ip, .Larm10_line_size
+ cmp r1, #0x4000
+ bcs .Larm10_dcache_wbinv_all
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm10_inv_next:
+ mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm10_inv_next
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ bx lr
+
+ENTRY(arm10_idcache_wbinv_range)
+ ldr ip, .Larm10_line_size
+ cmp r1, #0x4000
+ bcs .Larm10_idcache_wbinv_all
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm10_id_wbinv_next:
+ mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
+ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm10_id_wbinv_next
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ bx lr
+
+ENTRY_NP(arm10_idcache_wbinv_all)
+.Larm10_idcache_wbinv_all:
+ /*
+ * We assume that the code here can never be out of sync with the
+ * dcache, so that we can safely flush the Icache and fall through
+ * into the Dcache purging code.
+ */
+ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
+ /* Fall through to purge Dcache. */
+
+ENTRY(arm10_dcache_wbinv_all)
+.Larm10_dcache_wbinv_all:
+ ldr ip, .Larm10_cache_data
+ ldmia ip, {s_max, i_max, s_inc, i_inc}
+.Lnext_set_inv:
+ orr ip, s_max, i_max
+.Lnext_index_inv:
+ mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
+ sub ip, ip, i_inc
+ tst ip, i_max /* Index 0 is last one */
+ bne .Lnext_index_inv /* Next index */
+ mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
+ subs s_max, s_max, s_inc
+ bpl .Lnext_set_inv /* Next set */
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ bx lr
+
+.Larm10_cache_data:
+ .word _C_LABEL(arm10_dcache_sets_max)
+
+/*
+ * Context switch.
+ *
+ * These is the CPU-specific parts of the context switcher cpu_switch()
+ * These functions actually perform the TTB reload.
+ *
+ * NOTE: Special calling convention
+ * r1, r4-r13 must be preserved
+ */
+ENTRY(arm10_context_switch)
+ /*
+ * We can assume that the caches will only contain kernel addresses
+ * at this point. So no need to flush them again.
+ */
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
+ mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
+
+ /* Paranoia -- make sure the pipeline is empty. */
+ nop
+ nop
+ nop
+ bx lr
+
+ .bss
+
+/* XXX The following macros should probably be moved to asm.h */
+#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
+#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
+
+/*
+ * Parameters for the cache cleaning code. Note that the order of these
+ * four variables is assumed in the code above. Hence the reason for
+ * declaring them in the assembler file.
+ */
+ .align 0
+C_OBJECT(arm10_dcache_sets_max)
+ .space 4
+C_OBJECT(arm10_dcache_index_max)
+ .space 4
+C_OBJECT(arm10_dcache_sets_inc)
+ .space 4
+C_OBJECT(arm10_dcache_index_inc)
+ .space 4
diff --git a/sys/arm/arm/cpufunc_asm_arm3.S b/sys/arm/arm/cpufunc_asm_arm3.S
new file mode 100644
index 0000000..83fc054
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_arm3.S
@@ -0,0 +1,61 @@
+/* $NetBSD: cpufunc_asm_arm3.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ARM3 assembly functions for CPU / MMU / TLB specific operations
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The ARM3 has its own control register in a different place.
+ */
+ENTRY(arm3_control)
+ mrc p15, 0, r3, c2, c0, 0 /* Read the control register */
+ bic r2, r3, r0 /* Clear bits */
+ eor r2, r2, r1 /* XOR bits */
+
+ teq r2, r3 /* Only write if there is a change */
+ mcrne p15, 0, r2, c2, c0, 0 /* Write new control register */
+ mov r0, r3 /* Return old value */
+ mov pc, lr
+
+/*
+ * Cache functions.
+ */
+
+ENTRY(arm3_cache_flush)
+ mcr p15, 0, r0, c1, c0, 0
+ mov pc, lr
diff --git a/sys/arm/arm/cpufunc_asm_arm67.S b/sys/arm/arm/cpufunc_asm_arm67.S
new file mode 100644
index 0000000..2d7dbb8
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_arm67.S
@@ -0,0 +1,111 @@
+/* $NetBSD: cpufunc_asm_arm67.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ARM6/ARM7 assembly functions for CPU / MMU / TLB specific operations
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(arm67_setttb)
+ mcr p15, 0, r0, c7, c0, 0
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c5, c0, 0
+
+ /* For good measure we will flush the IDC as well */
+ mcr p15, 0, r0, c7, c0, 0
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+
+ mov pc, lr
+
+/*
+ * TLB functions
+ */
+ENTRY(arm67_tlb_flush)
+ mcr p15, 0, r0, c5, c0, 0
+ mov pc, lr
+
+ENTRY(arm67_tlb_purge)
+ mcr p15, 0, r0, c6, c0, 0
+ mov pc, lr
+
+/*
+ * Cache functions
+ */
+ENTRY(arm67_cache_flush)
+ mcr p15, 0, r0, c7, c0, 0
+ mov pc, lr
+
+/*
+ * Context switch.
+ *
+ * These is the CPU-specific parts of the context switcher cpu_switch()
+ * These functions actually perform the TTB reload.
+ *
+ * NOTE: Special calling convention
+ * r1, r4-r13 must be preserved
+ */
+ENTRY(arm67_context_switch)
+ /* For good measure we will flush the IDC as well */
+ mcr p15, 0, r0, c7, c0, 0 /* flush cache */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c5, c0, 0
+
+#if 0
+ /* For good measure we will flush the IDC as well */
+ mcr p15, 0, r0, c7, c0, 0 /* flush cache */
+#endif
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+ mov pc, lr
diff --git a/sys/arm/arm/cpufunc_asm_arm7tdmi.S b/sys/arm/arm/cpufunc_asm_arm7tdmi.S
new file mode 100644
index 0000000..0eee07a
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_arm7tdmi.S
@@ -0,0 +1,100 @@
+/* $NetBSD: cpufunc_asm_arm7tdmi.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001 John Fremlin
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ARM7TDMI assembly functions for CPU / MMU / TLB specific operations
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(arm7tdmi_setttb)
+ mov r1, r0 /* store the TTB in a safe place */
+ mov r2, lr /* ditto with lr */
+
+ bl _C_LABEL(arm7tdmi_cache_flushID)
+
+ /* Write the TTB */
+ mcr p15, 0, r1, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ bl _C_LABEL(arm7tdmi_tlb_flushID)
+
+ /* For good measure we will flush the IDC as well */
+ bl _C_LABEL(arm7tdmi_cache_flushID)
+
+ mov pc, r2
+
+/*
+ * TLB functions
+ */
+ENTRY(arm7tdmi_tlb_flushID)
+ mov r0, #0
+ mcr p15, 0, r0, c8, c7, 0
+ mov pc, lr
+
+ENTRY(arm7tdmi_tlb_flushID_SE)
+ mcr p15, 0, r0, c8, c7, 1
+ mov pc, lr
+
+/*
+ * Cache functions
+ */
+ENTRY(arm7tdmi_cache_flushID)
+ mov r0, #0
+
+ mcr p15, 0, r0, c7, c7, 0
+
+ /* Make sure that the pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+
+ mov pc, lr
+
+/*
+ * Context switch.
+ *
+ * These is the CPU-specific parts of the context switcher cpu_switch()
+ * These functions actually perform the TTB reload.
+ *
+ * NOTE: Special calling convention
+ * r1, r4-r13 must be preserved
+ */
+ENTRY(arm7tdmi_context_switch)
+ b _C_LABEL(arm7tdmi_setttb)
diff --git a/sys/arm/arm/cpufunc_asm_arm8.S b/sys/arm/arm/cpufunc_asm_arm8.S
new file mode 100644
index 0000000..1afcb16
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_arm8.S
@@ -0,0 +1,284 @@
+/* $NetBSD: cpufunc_asm_arm8.S,v 1.2 2001/11/11 00:47:49 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1997 ARM Limited
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ARM8 assembly functions for CPU / MMU / TLB specific operations
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+ENTRY(arm8_clock_config)
+ mrc p15, 0, r3, c15, c0, 0 /* Read the clock register */
+ bic r2, r3, #0x11 /* turn off dynamic clocking
+ and clear L bit */
+ mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
+
+ bic r2, r3, r0 /* Clear bits */
+ eor r2, r2, r1 /* XOR bits */
+ bic r2, r2, #0x10 /* clear the L bit */
+
+ bic r1, r2, #0x01 /* still keep dynamic clocking off */
+ mcr p15, 0, r1, c15, c0, 0 /* Write clock register */
+ mov r0, r0 /* NOP */
+ mov r0, r0 /* NOP */
+ mov r0, r0 /* NOP */
+ mov r0, r0 /* NOP */
+ mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
+ mov r0, r3 /* Return old value */
+ mov pc, lr
+
+/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(arm8_setttb)
+ mrs r3, cpsr_all
+ orr r1, r3, #(I32_bit | F32_bit)
+ msr cpsr_all, r1
+
+ stmfd sp!, {r0-r3, lr}
+ bl _C_LABEL(arm8_cache_cleanID)
+ ldmfd sp!, {r0-r3, lr}
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0
+
+ /* For good measure we will flush the IDC as well */
+ mcr p15, 0, r0, c7, c7, 0
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+ msr cpsr_all, r3
+
+ mov pc, lr
+
+/*
+ * TLB functions
+ */
+ENTRY(arm8_tlb_flushID)
+ mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
+ mov pc, lr
+
+ENTRY(arm8_tlb_flushID_SE)
+ mcr p15, 0, r0, c8, c7, 1 /* flush I+D tlb single entry */
+ mov pc, lr
+
+/*
+ * Cache functions
+ */
+ENTRY(arm8_cache_flushID)
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
+ mov pc, lr
+
+ENTRY(arm8_cache_flushID_E)
+ mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
+ mov pc, lr
+
+ENTRY(arm8_cache_cleanID)
+ mov r0, #0x00000000
+
+1: mov r2, r0
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+
+ adds r0, r0, #0x04000000
+ bne 1b
+
+ mov pc, lr
+
+ENTRY(arm8_cache_cleanID_E)
+ mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
+ mov pc, lr
+
+ENTRY(arm8_cache_purgeID)
+ /*
+ * ARM810 bug 3
+ *
+ * Clean and invalidate entry will not invalidate the entry
+ * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
+ *
+ * Instead of using the clean and invalidate entry operation
+ * use a separate clean and invalidate entry operations.
+ * i.e.
+ * mcr p15, 0, rd, c7, c11, 1
+ * mcr p15, 0, rd, c7, c7, 1
+ */
+
+ mov r0, #0x00000000
+
+ mrs r3, cpsr_all
+ orr r2, r3, #(I32_bit | F32_bit)
+ msr cpsr_all, r2
+
+1: mov r2, r0
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+ add r2, r2, #0x10
+ mcr p15, 0, r2, c7, c11, 1
+ mcr p15, 0, r2, c7, c7, 1
+
+ adds r0, r0, #0x04000000
+ bne 1b
+
+ msr cpsr_all, r3
+ mov pc, lr
+
+ENTRY(arm8_cache_purgeID_E)
+ /*
+ * ARM810 bug 3
+ *
+ * Clean and invalidate entry will not invalidate the entry
+ * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
+ *
+ * Instead of using the clean and invalidate entry operation
+ * use a separate clean and invalidate entry operations.
+ * i.e.
+ * mcr p15, 0, rd, c7, c11, 1
+ * mcr p15, 0, rd, c7, c7, 1
+ */
+ mrs r3, cpsr_all
+ orr r2, r3, #(I32_bit | F32_bit)
+ msr cpsr_all, r2
+ mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
+ mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
+ msr cpsr_all, r3
+ mov pc, lr
+
+/*
+ * Context switch.
+ *
+ * These is the CPU-specific parts of the context switcher cpu_switch()
+ * These functions actually perform the TTB reload.
+ *
+ * NOTE: Special calling convention
+ * r1, r4-r13 must be preserved
+ */
+ENTRY(arm8_context_switch)
+ /* For good measure we will flush the IDC as well */
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
+
+#if 0
+ /* For good measure we will flush the IDC as well */
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
+#endif
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+ mov pc, lr
diff --git a/sys/arm/arm/cpufunc_asm_arm9.S b/sys/arm/arm/cpufunc_asm_arm9.S
new file mode 100644
index 0000000..3237bf0
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_arm9.S
@@ -0,0 +1,137 @@
+/* $NetBSD: cpufunc_asm_arm9.S,v 1.2 2002/01/29 15:27:29 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 2001 ARM Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ARM9 assembly functions for CPU / MMU / TLB specific operations
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(arm9_setttb)
+ /*
+ * Since we use the caches in write-through mode, we only have to
+ * drain the write buffers and flush the caches.
+ */
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
+
+ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
+ mov pc, lr
+
+/*
+ * TLB functions
+ */
+ENTRY(arm9_tlb_flushID_SE)
+ mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
+ mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
+ mov pc, lr
+
+/*
+ * Cache functions
+ */
+ENTRY(arm9_cache_flushID)
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
+ mov pc, lr
+
+ENTRY(arm9_cache_flushID_SE)
+ mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
+ mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
+ mov pc, lr
+
+ENTRY(arm9_cache_flushI)
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+ mov pc, lr
+
+ENTRY(arm9_cache_flushI_SE)
+ mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
+ mov pc, lr
+
+ENTRY(arm9_cache_flushD)
+ mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
+ mov pc, lr
+
+ENTRY(arm9_cache_flushD_SE)
+ mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
+ mov pc, lr
+
+ENTRY(arm9_cache_cleanID)
+ mcr p15, 0, r0, c7, c10, 4
+ mov pc, lr
+
+/*
+ * Soft functions
+ */
+ENTRY(arm9_cache_syncI)
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */
+ mov pc, lr
+
+ENTRY_NP(arm9_cache_flushID_rng)
+ b _C_LABEL(arm9_cache_flushID)
+
+ENTRY_NP(arm9_cache_flushD_rng)
+ /* Same as above, but D cache only */
+ b _C_LABEL(arm9_cache_flushD)
+
+ENTRY_NP(arm9_cache_syncI_rng)
+ /* Similarly, for I cache sync */
+ b _C_LABEL(arm9_cache_syncI)
+
+/*
+ * Context switch.
+ *
+ * These is the CPU-specific parts of the context switcher cpu_switch()
+ * These functions actually perform the TTB reload.
+ *
+ * NOTE: Special calling convention
+ * r1, r4-r13 must be preserved
+ */
+ENTRY(arm9_context_switch)
+ /*
+ * We can assume that the caches will only contain kernel addresses
+ * at this point. So no need to flush them again.
+ */
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
+ mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
+
+ /* Paranoia -- make sure the pipeline is empty. */
+ nop
+ nop
+ nop
+ mov pc, lr
diff --git a/sys/arm/arm/cpufunc_asm_armv4.S b/sys/arm/arm/cpufunc_asm_armv4.S
new file mode 100644
index 0000000..f1e7d3b
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_armv4.S
@@ -0,0 +1,67 @@
+/* $NetBSD: cpufunc_asm_armv4.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001 ARM Limited
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ARM9 assembly functions for CPU / MMU / TLB specific operations
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * TLB functions
+ */
+ENTRY(armv4_tlb_flushID)
+ mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
+ mov pc, lr
+
+ENTRY(armv4_tlb_flushI)
+ mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
+ mov pc, lr
+
+ENTRY(armv4_tlb_flushD)
+ mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
+ mov pc, lr
+
+ENTRY(armv4_tlb_flushD_SE)
+ mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
+ mov pc, lr
+
+/*
+ * Other functions
+ */
+ENTRY(armv4_drain_writebuf)
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mov pc, lr
diff --git a/sys/arm/arm/cpufunc_asm_ixp12x0.S b/sys/arm/arm/cpufunc_asm_ixp12x0.S
new file mode 100644
index 0000000..142682c
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_ixp12x0.S
@@ -0,0 +1,90 @@
+/* $NetBSD: cpufunc_asm_ixp12x0.S,v 1.2 2002/08/17 16:36:31 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * This function is the same as sa110_context_switch for now, the plan
+ * is to make use of the process id register to avoid cache flushes.
+ */
+ENTRY(ixp12x0_context_switch)
+ /*
+ * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
+ * Thus the data cache will contain only kernel data and the
+ * instruction cache will contain only kernel code, and all
+ * kernel mappings are shared by all processes.
+ */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+ mov pc, lr
+
+ENTRY(ixp12x0_drain_readbuf)
+ mcr p15, 0, r0, c9, c0, 0 /* drain read buffer */
+ mov pc, lr
+
+/*
+ * Information for the IXP12X0 cache clean/purge functions:
+ *
+ * * Virtual address of the memory region to use
+ * * Size of memory region
+ */
+ .data
+
+ .global _C_LABEL(ixp12x0_cache_clean_addr)
+_C_LABEL(ixp12x0_cache_clean_addr):
+ .word 0xf0000000
+
+ .global _C_LABEL(ixp12x0_cache_clean_size)
+_C_LABEL(ixp12x0_cache_clean_size):
+ .word 0x00008000
+
+ .text
+
+.Lixp12x0_cache_clean_addr:
+ .word _C_LABEL(ixp12x0_cache_clean_addr)
+.Lixp12x0_cache_clean_size:
+ .word _C_LABEL(ixp12x0_cache_clean_size)
diff --git a/sys/arm/arm/cpufunc_asm_sa1.S b/sys/arm/arm/cpufunc_asm_sa1.S
new file mode 100644
index 0000000..77768ff
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_sa1.S
@@ -0,0 +1,316 @@
+/* $NetBSD: cpufunc_asm_sa1.S,v 1.8 2002/08/17 16:36:32 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * SA-1 assembly functions for CPU / MMU / TLB specific operations
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+.Lblock_userspace_access:
+ .word _C_LABEL(block_userspace_access)
+
+/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(getttb)
+ mrc p15, 0, r0, c2, c0, 0
+ENTRY(sa1_setttb)
+#ifdef CACHE_CLEAN_BLOCK_INTR
+ mrs r3, cpsr_all
+ orr r1, r3, #(I32_bit | F32_bit)
+ msr cpsr_all, r1
+#else
+ ldr r3, .Lblock_userspace_access
+ ldr r2, [r3]
+ orr r1, r2, #1
+ str r1, [r3]
+#endif
+ stmfd sp!, {r0-r3, lr}
+ bl _C_LABEL(sa1_cache_cleanID)
+ ldmfd sp!, {r0-r3, lr}
+ mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
+ mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
+
+ /* The cleanID above means we only need to flush the I cache here */
+ mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+#ifdef CACHE_CLEAN_BLOCK_INTR
+ msr cpsr_all, r3
+#else
+ str r2, [r3]
+#endif
+ mov pc, lr
+
+/*
+ * TLB functions
+ */
+ENTRY(sa1_tlb_flushID_SE)
+ mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
+ mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
+ mov pc, lr
+
+/*
+ * Cache functions
+ */
+ENTRY(sa1_cache_flushID)
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
+ mov pc, lr
+
+ENTRY(sa1_cache_flushI)
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+ mov pc, lr
+
+ENTRY(sa1_cache_flushD)
+ mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
+ mov pc, lr
+
+ENTRY(sa1_cache_flushD_SE)
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ mov pc, lr
+
+ENTRY(sa1_cache_cleanD_E)
+ mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ mov pc, lr
+
+/*
+ * Information for the SA-1 cache clean/purge functions:
+ *
+ * * Virtual address of the memory region to use
+ * * Size of memory region
+ */
+ .data
+
+ .global _C_LABEL(sa1_cache_clean_addr)
+_C_LABEL(sa1_cache_clean_addr):
+ .word 0xf0000000
+
+ .global _C_LABEL(sa1_cache_clean_size)
+_C_LABEL(sa1_cache_clean_size):
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+ .word 0x00004000
+#else
+ .word 0x00008000
+#endif
+
+ .text
+
+.Lsa1_cache_clean_addr:
+ .word _C_LABEL(sa1_cache_clean_addr)
+.Lsa1_cache_clean_size:
+ .word _C_LABEL(sa1_cache_clean_size)
+
+#ifdef CACHE_CLEAN_BLOCK_INTR
+#define SA1_CACHE_CLEAN_BLOCK \
+ mrs r3, cpsr_all ; \
+ orr r0, r3, #(I32_bit | F32_bit) ; \
+ msr cpsr_all, r0
+
+#define SA1_CACHE_CLEAN_UNBLOCK \
+ msr cpsr_all, r3
+#else
+#define SA1_CACHE_CLEAN_BLOCK \
+ ldr r3, .Lblock_userspace_access ; \
+ ldr ip, [r3] ; \
+ orr r0, ip, #1 ; \
+ str r0, [r3]
+
+#define SA1_CACHE_CLEAN_UNBLOCK \
+ str ip, [r3]
+#endif /* CACHE_CLEAN_BLOCK_INTR */
+
+#ifdef DOUBLE_CACHE_CLEAN_BANK
+#define SA1_DOUBLE_CACHE_CLEAN_BANK \
+ eor r0, r0, r1 ; \
+ str r0, [r2]
+#else
+#define SA1_DOUBLE_CACHE_CLEAN_BANK /* nothing */
+#endif /* DOUBLE_CACHE_CLEAN_BANK */
+
+#define SA1_CACHE_CLEAN_PROLOGUE \
+ SA1_CACHE_CLEAN_BLOCK ; \
+ ldr r2, .Lsa1_cache_clean_addr ; \
+ ldmia r2, {r0, r1} ; \
+ SA1_DOUBLE_CACHE_CLEAN_BANK
+
+#define SA1_CACHE_CLEAN_EPILOGUE \
+ SA1_CACHE_CLEAN_UNBLOCK
+
+ENTRY_NP(sa1_cache_syncI)
+ENTRY_NP(sa1_cache_purgeID)
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
+ENTRY_NP(sa1_cache_cleanID)
+ENTRY_NP(sa1_cache_purgeD)
+ENTRY(sa1_cache_cleanD)
+ SA1_CACHE_CLEAN_PROLOGUE
+
+1: ldr r2, [r0], #32
+ subs r1, r1, #32
+ bne 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ SA1_CACHE_CLEAN_EPILOGUE
+ mov pc, lr
+
+ENTRY(sa1_cache_purgeID_E)
+ mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ mov pc, lr
+
+ENTRY(sa1_cache_purgeD_E)
+ mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ mov pc, lr
+
+/*
+ * Soft functions
+ */
+/* sa1_cache_syncI is identical to sa1_cache_purgeID */
+
+ENTRY(sa1_cache_cleanID_rng)
+ENTRY(sa1_cache_cleanD_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(sa1_cache_cleanID)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mov pc, lr
+
+ENTRY(sa1_cache_purgeID_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(sa1_cache_purgeID)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+ mov pc, lr
+
+ENTRY(sa1_cache_purgeD_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(sa1_cache_purgeD)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mov pc, lr
+
+ENTRY(sa1_cache_syncI_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(sa1_cache_syncI)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+
+ mov pc, lr
+
+/*
+ * Context switch.
+ *
+ * These is the CPU-specific parts of the context switcher cpu_switch()
+ * These functions actually perform the TTB reload.
+ *
+ * NOTE: Special calling convention
+ * r1, r4-r13 must be preserved
+ */
+#if defined(CPU_SA110)
+ENTRY(sa110_context_switch)
+ /*
+ * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
+ * Thus the data cache will contain only kernel data and the
+ * instruction cache will contain only kernel code, and all
+ * kernel mappings are shared by all processes.
+ */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+ mov pc, lr
+#endif
diff --git a/sys/arm/arm/cpufunc_asm_sa11x0.S b/sys/arm/arm/cpufunc_asm_sa11x0.S
new file mode 100644
index 0000000..c4c04bc
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_sa11x0.S
@@ -0,0 +1,125 @@
+/* $NetBSD: cpufunc_asm_sa11x0.S,v 1.3 2002/08/17 16:36:32 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+ .data
+ .global _C_LABEL(sa11x0_idle_mem)
+_C_LABEL(sa11x0_idle_mem):
+ .word 0
+
+ .text
+
+ .align 5
+
+ /* We're now 32-byte aligned */
+
+.Lsa11x0_idle_mem:
+ .word _C_LABEL(sa11x0_idle_mem) /* 1 */
+
+/*
+ * sa11x0_cpusleep
+ *
+ * This is called when there is nothing on any of the run queues.
+ * We go into IDLE mode so that any IRQ or FIQ will awaken us.
+ */
+ENTRY(sa11x0_cpu_sleep)
+ ldr r1, .Lsa11x0_idle_mem /* get address of... */ /* 2 */
+ nop /* 3 */
+ ldr r1, [r1] /* ...non-cacheable page */ /* 4 */
+ nop /* 5 */
+
+ /*
+ * SA-1110 manual, 9.5.2.1 (Entering Idle Mode) says that
+ * to enter idle mode:
+ *
+ * * Disable clock switching
+ * * Issue load from non-cacheable address
+ * * Issue "wait for interrupt"
+ *
+ * The 3-insn sequence must reside in the first 3 words
+ * of a cache line.
+ *
+ * We must disable interrupts in the CPSR so that we can
+ * re-enable clock switching before servicing interrupts.
+ */
+
+ mrs r3, cpsr_all /* 6 */
+ orr r2, r3, #(I32_bit|F32_bit) /* 7 */
+ msr cpsr_all, r2 /* 8 */
+
+ /* We're now 32-byte aligned */
+
+ mcr p15, 0, r0, c15, c2, 2 /* disable clock switching */
+ ldr r0, [r1] /* load from non-cacheable address */
+ mcr p15, 0, r0, c15, c8, 2 /* wait for interrupt */
+
+ mcr p15, 0, r0, c15, c1, 2 /* re-enable clock switching */
+
+ /* Restore interrupts (which will cause them to be serviced). */
+ msr cpsr_all, r3
+ mov pc, lr
+
+
+/*
+ * This function is the same as sa110_context_switch for now, the plan
+ * is to make use of the process id register to avoid cache flushes.
+ */
+ENTRY(sa11x0_context_switch)
+ /*
+ * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
+ * Thus the data cache will contain only kernel data and the
+ * instruction cache will contain only kernel code, and all
+ * kernel mappings are shared by all processes.
+ */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+ mov pc, lr
+
+ENTRY(sa11x0_drain_readbuf)
+ mcr p15, 0, r0, c9, c0, 0 /* drain read buffer */
+ mov pc, lr
diff --git a/sys/arm/arm/cpufunc_asm_xscale.S b/sys/arm/arm/cpufunc_asm_xscale.S
new file mode 100644
index 0000000..5b690cc
--- /dev/null
+++ b/sys/arm/arm/cpufunc_asm_xscale.S
@@ -0,0 +1,497 @@
+/* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Copyright (c) 2001 Matt Thomas.
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * XScale assembly functions for CPU / MMU / TLB specific operations
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Size of the XScale core D-cache.
+ */
+#define DCACHE_SIZE 0x00008000
+
+.Lblock_userspace_access:
+ .word _C_LABEL(block_userspace_access)
+
+/*
+ * CPWAIT -- Canonical method to wait for CP15 update.
+ * From: Intel 80200 manual, section 2.3.3.
+ *
+ * NOTE: Clobbers the specified temp reg.
+ */
+#define CPWAIT_BRANCH \
+ sub pc, pc, #4
+
+#define CPWAIT(tmp) \
+ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
+ mov tmp, tmp /* wait for it to complete */ ;\
+ CPWAIT_BRANCH /* branch to next insn */
+
+#define CPWAIT_AND_RETURN_SHIFTER lsr #32
+
+#define CPWAIT_AND_RETURN(tmp) \
+ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
+ /* Wait for it to complete and branch to the return address */ \
+ sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
+
+ENTRY(xscale_cpwait)
+ CPWAIT_AND_RETURN(r0)
+
+/*
+ * We need a separate cpu_control() entry point, since we have to
+ * invalidate the Branch Target Buffer in the event the BPRD bit
+ * changes in the control register.
+ */
+ENTRY(xscale_control)
+ mrc p15, 0, r3, c1, c0, 0 /* Read the control register */
+ bic r2, r3, r0 /* Clear bits */
+ eor r2, r2, r1 /* XOR bits */
+
+ teq r2, r3 /* Only write if there was a change */
+ mcrne p15, 0, r0, c7, c5, 6 /* Invalidate the BTB */
+ mcrne p15, 0, r2, c1, c0, 0 /* Write new control register */
+ mov r0, r3 /* Return old value */
+
+ CPWAIT_AND_RETURN(r1)
+
+/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(xscale_setttb)
+#ifdef CACHE_CLEAN_BLOCK_INTR
+ mrs r3, cpsr_all
+ orr r1, r3, #(I32_bit | F32_bit)
+ msr cpsr_all, r1
+#else
+ ldr r3, .Lblock_userspace_access
+ ldr r2, [r3]
+ orr r1, r2, #1
+ str r1, [r3]
+#endif
+ stmfd sp!, {r0-r3, lr}
+ bl _C_LABEL(xscale_cache_cleanID)
+ mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
+ mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
+
+ CPWAIT(r0)
+
+ ldmfd sp!, {r0-r3, lr}
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
+
+ /* The cleanID above means we only need to flush the I cache here */
+ mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
+
+ CPWAIT(r0)
+
+#ifdef CACHE_CLEAN_BLOCK_INTR
+ msr cpsr_all, r3
+#else
+ str r2, [r3]
+#endif
+ mov pc, lr
+
+/*
+ * TLB functions
+ *
+ * Note: We don't need to worry about issuing a CPWAIT after
+ * TLB operations, because we expect a pmap_update() to follow.
+ */
+ENTRY(xscale_tlb_flushID_SE)
+ mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
+ mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
+ mov pc, lr
+
+/*
+ * Cache functions
+ */
+ENTRY(xscale_cache_flushID)
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
+ CPWAIT_AND_RETURN(r0)
+
+ENTRY(xscale_cache_flushI)
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+ CPWAIT_AND_RETURN(r0)
+
+ENTRY(xscale_cache_flushD)
+ mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
+ CPWAIT_AND_RETURN(r0)
+
+ENTRY(xscale_cache_flushI_SE)
+ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
+ CPWAIT_AND_RETURN(r0)
+
+ENTRY(xscale_cache_flushD_SE)
+ /*
+ * Errata (rev < 2): Must clean-dcache-line to an address
+ * before invalidate-dcache-line to an address, or dirty
+ * bits will not be cleared in the dcache array.
+ */
+ mcr p15, 0, r0, c7, c10, 1
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ CPWAIT_AND_RETURN(r0)
+
+ENTRY(xscale_cache_cleanD_E)
+ mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ CPWAIT_AND_RETURN(r0)
+
+/*
+ * Information for the XScale cache clean/purge functions:
+ *
+ * * Virtual address of the memory region to use
+ * * Size of memory region
+ *
+ * Note the virtual address for the Data cache clean operation
+ * does not need to be backed by physical memory, since no loads
+ * will actually be performed by the allocate-line operation.
+ *
+ * Note that the Mini-Data cache MUST be cleaned by executing
+ * loads from memory mapped into a region reserved exclusively
+ * for cleaning of the Mini-Data cache.
+ */
+ .data
+
+ .global _C_LABEL(xscale_cache_clean_addr)
+_C_LABEL(xscale_cache_clean_addr):
+ .word 0x00000000
+
+ .global _C_LABEL(xscale_cache_clean_size)
+_C_LABEL(xscale_cache_clean_size):
+ .word DCACHE_SIZE
+
+ .global _C_LABEL(xscale_minidata_clean_addr)
+_C_LABEL(xscale_minidata_clean_addr):
+ .word 0x00000000
+
+ .global _C_LABEL(xscale_minidata_clean_size)
+_C_LABEL(xscale_minidata_clean_size):
+ .word 0x00000800
+
+ .text
+
+.Lxscale_cache_clean_addr:
+ .word _C_LABEL(xscale_cache_clean_addr)
+.Lxscale_cache_clean_size:
+ .word _C_LABEL(xscale_cache_clean_size)
+
+.Lxscale_minidata_clean_addr:
+ .word _C_LABEL(xscale_minidata_clean_addr)
+.Lxscale_minidata_clean_size:
+ .word _C_LABEL(xscale_minidata_clean_size)
+
+#ifdef CACHE_CLEAN_BLOCK_INTR
+#define XSCALE_CACHE_CLEAN_BLOCK \
+ mrs r3, cpsr_all ; \
+ orr r0, r3, #(I32_bit | F32_bit) ; \
+ msr cpsr_all, r0
+
+#define XSCALE_CACHE_CLEAN_UNBLOCK \
+ msr cpsr_all, r3
+#else
+#define XSCALE_CACHE_CLEAN_BLOCK \
+ ldr r3, .Lblock_userspace_access ; \
+ ldr ip, [r3] ; \
+ orr r0, ip, #1 ; \
+ str r0, [r3]
+
+#define XSCALE_CACHE_CLEAN_UNBLOCK \
+ str ip, [r3]
+#endif /* CACHE_CLEAN_BLOCK_INTR */
+
+#define XSCALE_CACHE_CLEAN_PROLOGUE \
+ XSCALE_CACHE_CLEAN_BLOCK ; \
+ ldr r2, .Lxscale_cache_clean_addr ; \
+ ldmia r2, {r0, r1} ; \
+ /* \
+ * BUG ALERT! \
+ * \
+ * The XScale core has a strange cache eviction bug, which \
+ * requires us to use 2x the cache size for the cache clean \
+ * and for that area to be aligned to 2 * cache size. \
+ * \
+ * The work-around is to use 2 areas for cache clean, and to \
+ * alternate between them whenever this is done. No one knows \
+ * why the work-around works (mmm!). \
+ */ \
+ eor r0, r0, #(DCACHE_SIZE) ; \
+ str r0, [r2] ; \
+ add r0, r0, r1
+
+#define XSCALE_CACHE_CLEAN_EPILOGUE \
+ XSCALE_CACHE_CLEAN_UNBLOCK
+
+ENTRY_NP(xscale_cache_syncI)
+ENTRY_NP(xscale_cache_purgeID)
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
+ENTRY_NP(xscale_cache_cleanID)
+ENTRY_NP(xscale_cache_purgeD)
+ENTRY(xscale_cache_cleanD)
+ XSCALE_CACHE_CLEAN_PROLOGUE
+
+1: subs r0, r0, #32
+ mcr p15, 0, r0, c7, c2, 5 /* allocate cache line */
+ subs r1, r1, #32
+ bne 1b
+
+ CPWAIT(r0)
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ CPWAIT(r0)
+
+ XSCALE_CACHE_CLEAN_EPILOGUE
+ mov pc, lr
+
+/*
+ * Clean the mini-data cache.
+ *
+ * It's expected that we only use the mini-data cache for
+ * kernel addresses, so there is no need to purge it on
+ * context switch, and no need to prevent userspace access
+ * while we clean it.
+ */
+ENTRY(xscale_cache_clean_minidata)
+ ldr r2, .Lxscale_minidata_clean_addr
+ ldmia r2, {r0, r1}
+1: ldr r3, [r0], #32
+ subs r1, r1, #32
+ bne 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ CPWAIT_AND_RETURN(r1)
+
+ENTRY(xscale_cache_purgeID_E)
+ mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ CPWAIT(r1)
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ CPWAIT_AND_RETURN(r1)
+
+ENTRY(xscale_cache_purgeD_E)
+ mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ CPWAIT(r1)
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ CPWAIT_AND_RETURN(r1)
+
+/*
+ * Soft functions
+ */
+/* xscale_cache_syncI is identical to xscale_cache_purgeID */
+
+ENTRY(xscale_cache_cleanID_rng)
+ENTRY(xscale_cache_cleanD_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(xscale_cache_cleanID)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ CPWAIT(r0)
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ CPWAIT_AND_RETURN(r0)
+
+ENTRY(xscale_cache_purgeID_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(xscale_cache_purgeID)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ CPWAIT(r0)
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ CPWAIT_AND_RETURN(r0)
+
+ENTRY(xscale_cache_purgeD_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(xscale_cache_purgeD)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ CPWAIT(r0)
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ CPWAIT_AND_RETURN(r0)
+
+ENTRY(xscale_cache_syncI_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(xscale_cache_syncI)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ CPWAIT(r0)
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ CPWAIT_AND_RETURN(r0)
+
+ENTRY(xscale_cache_flushD_rng)
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ CPWAIT_AND_RETURN(r0)
+
+/*
+ * Context switch.
+ *
+ * These is the CPU-specific parts of the context switcher cpu_switch()
+ * These functions actually perform the TTB reload.
+ *
+ * NOTE: Special calling convention
+ * r1, r4-r13 must be preserved
+ */
+ENTRY(xscale_context_switch)
+ /*
+ * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
+ * Thus the data cache will contain only kernel data and the
+ * instruction cache will contain only kernel code, and all
+ * kernel mappings are shared by all processes.
+ */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
+
+ CPWAIT_AND_RETURN(r0)
+
+/*
+ * xscale_cpu_sleep
+ *
+ * This is called when there is nothing on any of the run queues.
+ * We go into IDLE mode so that any IRQ or FIQ will awaken us.
+ *
+ * If this is called with anything other than ARM_SLEEP_MODE_IDLE,
+ * ignore it.
+ */
+ENTRY(xscale_cpu_sleep)
+ tst r0, #0x00000000
+ bne 1f
+ mov r0, #0x1
+ mcr p14, 0, r0, c7, c0, 0
+
+1:
+ mov pc, lr
diff --git a/sys/arm/arm/critical.c b/sys/arm/arm/critical.c
new file mode 100644
index 0000000..02a2135
--- /dev/null
+++ b/sys/arm/arm/critical.c
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/ucontext.h>
+#include <machine/clock.h>
+#include <machine/critical.h>
+
+/*
+ * cpu_critical_fork_exit() - cleanup after fork
+ */
+void
+cpu_critical_fork_exit(void)
+{
+}
+
diff --git a/sys/arm/arm/db_disasm.c b/sys/arm/arm/db_disasm.c
new file mode 100644
index 0000000..d5dfa92
--- /dev/null
+++ b/sys/arm/arm/db_disasm.c
@@ -0,0 +1,79 @@
+/* $NetBSD: db_disasm.c,v 1.4 2003/07/15 00:24:38 lukem Exp $ */
+
+/*
+ * Copyright (c) 1996 Mark Brinicombe.
+ * Copyright (c) 1996 Brini.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <machine/db_machdep.h>
+#include <ddb/ddb.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+
+#include <machine/disassem.h>
+
+/* Glue code to interface db_disasm to the generic ARM disassembler */
+
+static u_int db_disasm_read_word(u_int);
+static void db_disasm_printaddr(u_int);
+
+static const disasm_interface_t db_disasm_interface = {
+ db_disasm_read_word,
+ db_disasm_printaddr,
+ db_printf
+};
+
+static u_int
+db_disasm_read_word(u_int address)
+{
+
+ return db_get_value(address, 4, 0);
+}
+
+static void
+db_disasm_printaddr(u_int address)
+{
+
+ db_printsym((db_addr_t)address, DB_STGY_ANY);
+}
+
+vm_offset_t
+db_disasm(vm_offset_t loc, boolean_t altfmt)
+{
+
+ return disasm(&db_disasm_interface, loc, altfmt);
+}
+
+/* End of db_disasm.c */
diff --git a/sys/arm/arm/db_interface.c b/sys/arm/arm/db_interface.c
new file mode 100644
index 0000000..dbfe868
--- /dev/null
+++ b/sys/arm/arm/db_interface.c
@@ -0,0 +1,334 @@
+/* $NetBSD: db_interface.c,v 1.33 2003/08/25 04:51:10 mrg Exp $ */
+
+/*
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * From: db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU)
+ */
+
+/*
+ * Interface to new debugger.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+#include <sys/systm.h> /* just for boothowto */
+#include <sys/exec.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+
+#include <machine/db_machdep.h>
+#include <machine/katelib.h>
+#include <machine/vmparam.h>
+#include <machine/cpu.h>
+
+#include <ddb/ddb.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_sym.h>
+#include <sys/cons.h>
+
+static int nil;
+
+db_regs_t ddb_regs;
+int db_access_und_sp (struct db_variable *, db_expr_t *, int);
+int db_access_abt_sp (struct db_variable *, db_expr_t *, int);
+int db_access_irq_sp (struct db_variable *, db_expr_t *, int);
+u_int db_fetch_reg (int, db_regs_t *);
+
+int db_trapper __P((u_int, u_int, trapframe_t *, int));
+
+struct db_variable db_regs[] = {
+ { "spsr", (int *)&DDB_REGS->tf_spsr, FCN_NULL, },
+ { "r0", (int *)&DDB_REGS->tf_r0, FCN_NULL, },
+ { "r1", (int *)&DDB_REGS->tf_r1, FCN_NULL, },
+ { "r2", (int *)&DDB_REGS->tf_r2, FCN_NULL, },
+ { "r3", (int *)&DDB_REGS->tf_r3, FCN_NULL, },
+ { "r4", (int *)&DDB_REGS->tf_r4, FCN_NULL, },
+ { "r5", (int *)&DDB_REGS->tf_r5, FCN_NULL, },
+ { "r6", (int *)&DDB_REGS->tf_r6, FCN_NULL, },
+ { "r7", (int *)&DDB_REGS->tf_r7, FCN_NULL, },
+ { "r8", (int *)&DDB_REGS->tf_r8, FCN_NULL, },
+ { "r9", (int *)&DDB_REGS->tf_r9, FCN_NULL, },
+ { "r10", (int *)&DDB_REGS->tf_r10, FCN_NULL, },
+ { "r11", (int *)&DDB_REGS->tf_r11, FCN_NULL, },
+ { "r12", (int *)&DDB_REGS->tf_r12, FCN_NULL, },
+ { "usr_sp", (int *)&DDB_REGS->tf_usr_sp, FCN_NULL, },
+ { "usr_lr", (int *)&DDB_REGS->tf_usr_lr, FCN_NULL, },
+ { "svc_sp", (int *)&DDB_REGS->tf_svc_sp, FCN_NULL, },
+ { "svc_lr", (int *)&DDB_REGS->tf_svc_lr, FCN_NULL, },
+ { "pc", (int *)&DDB_REGS->tf_pc, FCN_NULL, },
+ { "und_sp", &nil, db_access_und_sp, },
+ { "abt_sp", &nil, db_access_abt_sp, },
+ { "irq_sp", &nil, db_access_irq_sp, },
+};
+
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+int db_active = 0;
+
+int
+db_access_und_sp(struct db_variable *vp, db_expr_t *valp, int rw)
+{
+
+ if (rw == DB_VAR_GET)
+ *valp = get_stackptr(PSR_UND32_MODE);
+ return(0);
+}
+
+int
+db_access_abt_sp(struct db_variable *vp, db_expr_t *valp, int rw)
+{
+
+ if (rw == DB_VAR_GET)
+ *valp = get_stackptr(PSR_ABT32_MODE);
+ return(0);
+}
+
+int
+db_access_irq_sp(struct db_variable *vp, db_expr_t *valp, int rw)
+{
+
+ if (rw == DB_VAR_GET)
+ *valp = get_stackptr(PSR_IRQ32_MODE);
+ return(0);
+}
+
+#ifdef DDB
+/*
+ * kdb_trap - field a TRACE or BPT trap
+ */
+int
+kdb_trap(int type, db_regs_t *regs)
+{
+ int s;
+
+ switch (type) {
+ case T_BREAKPOINT: /* breakpoint */
+ case -1: /* keyboard interrupt */
+ break;
+ default:
+ break;
+ }
+
+ /* Should switch to kdb`s own stack here. */
+
+ ddb_regs = *regs;
+
+ s = splhigh();
+ db_active++;
+ db_trap(type, 0/*code*/);
+ db_active--;
+ splx(s);
+
+ *regs = ddb_regs;
+
+ return (1);
+}
+#endif
+
+void
+db_show_mdpcpu(struct pcpu *pc)
+{
+}
+int
+db_validate_address(vm_offset_t addr)
+{
+ struct proc *p = curproc;
+ struct pmap *pmap;
+
+ if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap ||
+#ifndef ARM32_NEW_VM_LAYOUT
+ addr >= VM_MAXUSER_ADDRESS
+#else
+ addr >= VM_MIN_KERNEL_ADDRESS
+#endif
+ )
+ pmap = pmap_kernel();
+ else
+ pmap = p->p_vmspace->vm_map.pmap;
+
+ return (pmap_extract(pmap, addr) == FALSE);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+void
+db_read_bytes(addr, size, data)
+ vm_offset_t addr;
+ size_t size;
+ char *data;
+{
+ char *src = (char *)addr;
+
+ if (db_validate_address((u_int)src)) {
+ db_printf("address %p is invalid\n", src);
+ return;
+ }
+
+ if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0) {
+ *((int*)data) = *((int*)src);
+ return;
+ }
+
+ if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0) {
+ *((short*)data) = *((short*)src);
+ return;
+ }
+
+ while (size-- > 0) {
+ if (db_validate_address((u_int)src)) {
+ db_printf("address %p is invalid\n", src);
+ return;
+ }
+ *data++ = *src++;
+ }
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+void
+db_write_bytes(vm_offset_t addr, size_t size, char *data)
+{
+ char *dst;
+ size_t loop;
+
+ /* If any part is in kernel text, use db_write_text() */
+ if (addr >= (vm_offset_t) btext && addr < (vm_offset_t) etext) {
+ return;
+ }
+
+ dst = (char *)addr;
+ if (db_validate_address((u_int)dst)) {
+ db_printf("address %p is invalid\n", dst);
+ return;
+ }
+
+ if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0)
+ *((int*)dst) = *((int*)data);
+ else
+ if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0)
+ *((short*)dst) = *((short*)data);
+ else {
+ loop = size;
+ while (loop-- > 0) {
+ if (db_validate_address((u_int)dst)) {
+ db_printf("address %p is invalid\n", dst);
+ return;
+ }
+ *dst++ = *data++;
+ }
+ }
+
+ /* make sure the caches and memory are in sync */
+ cpu_icache_sync_range(addr, size);
+
+ /* In case the current page tables have been modified ... */
+ cpu_tlb_flushID();
+ cpu_cpwait();
+}
+
+#ifdef DDB
+void
+Debugger(const char *msg)
+{
+ db_printf("Debugger(\"%s\")\n", msg);
+ __asm(".word 0xe7ffffff");
+}
+
+int
+db_trapper(u_int addr, u_int inst, trapframe_t *frame, int fault_code)
+{
+
+ if (fault_code == 0) {
+ if ((inst & ~INSN_COND_MASK) == (BKPT_INST & ~INSN_COND_MASK))
+ kdb_trap(T_BREAKPOINT, frame);
+ else
+ kdb_trap(-1, frame);
+ } else
+ return (1);
+ return (0);
+}
+
+extern u_int end;
+
+#endif
+
+u_int
+db_fetch_reg(int reg, db_regs_t *db_regs)
+{
+
+ switch (reg) {
+ case 0:
+ return (db_regs->tf_r0);
+ case 1:
+ return (db_regs->tf_r1);
+ case 2:
+ return (db_regs->tf_r2);
+ case 3:
+ return (db_regs->tf_r3);
+ case 4:
+ return (db_regs->tf_r4);
+ case 5:
+ return (db_regs->tf_r5);
+ case 6:
+ return (db_regs->tf_r6);
+ case 7:
+ return (db_regs->tf_r7);
+ case 8:
+ return (db_regs->tf_r8);
+ case 9:
+ return (db_regs->tf_r9);
+ case 10:
+ return (db_regs->tf_r10);
+ case 11:
+ return (db_regs->tf_r11);
+ case 12:
+ return (db_regs->tf_r12);
+ case 13:
+ return (db_regs->tf_svc_sp);
+ case 14:
+ return (db_regs->tf_svc_lr);
+ case 15:
+ return (db_regs->tf_pc);
+ default:
+ panic("db_fetch_reg: botch");
+ }
+}
+
diff --git a/sys/arm/arm/db_trace.c b/sys/arm/arm/db_trace.c
new file mode 100644
index 0000000..82694b8
--- /dev/null
+++ b/sys/arm/arm/db_trace.c
@@ -0,0 +1,250 @@
+/* $NetBSD: db_trace.c,v 1.8 2003/01/17 22:28:48 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2000, 2001 Ben Harris
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+
+
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <machine/armreg.h>
+#include <machine/asm.h>
+#include <machine/cpufunc.h>
+#include <machine/db_machdep.h>
+#include <machine/vmparam.h>
+#include <ddb/ddb.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_output.h>
+
+#define INKERNEL(va) (((vm_offset_t)(va)) >= VM_MIN_KERNEL_ADDRESS)
+
+int db_md_set_watchpoint(db_expr_t addr, db_expr_t size);
+int db_md_clr_watchpoint(db_expr_t addr, db_expr_t size);
+void db_md_list_watchpoints(void);
+/*
+ * APCS stack frames are awkward beasts, so I don't think even trying to use
+ * a structure to represent them is a good idea.
+ *
+ * Here's the diagram from the APCS. Increasing address is _up_ the page.
+ *
+ * save code pointer [fp] <- fp points to here
+ * return link value [fp, #-4]
+ * return sp value [fp, #-8]
+ * return fp value [fp, #-12]
+ * [saved v7 value]
+ * [saved v6 value]
+ * [saved v5 value]
+ * [saved v4 value]
+ * [saved v3 value]
+ * [saved v2 value]
+ * [saved v1 value]
+ * [saved a4 value]
+ * [saved a3 value]
+ * [saved a2 value]
+ * [saved a1 value]
+ *
+ * The save code pointer points twelve bytes beyond the start of the
+ * code sequence (usually a single STM) that created the stack frame.
+ * We have to disassemble it if we want to know which of the optional
+ * fields are actually present.
+ */
+
+#define FR_SCP (0)
+#define FR_RLV (-1)
+#define FR_RSP (-2)
+#define FR_RFP (-3)
+
+void
+db_stack_trace_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char *modif;
+{
+ u_int32_t *frame, *lastframe;
+ c_db_sym_t sym;
+ db_expr_t pc;
+ char c, *cp = modif;
+ const char *name;
+ db_expr_t value;
+ db_expr_t offset;
+ boolean_t kernel_only = TRUE;
+ boolean_t trace_thread = FALSE;
+ int scp_offset;
+
+ while ((c = *cp++) != 0) {
+ if (c == 'u')
+ kernel_only = FALSE;
+ if (c == 't')
+ trace_thread = TRUE;
+ }
+
+ if (!have_addr)
+ frame = (u_int32_t *)(DDB_REGS->tf_r11);
+ else {
+ if (trace_thread) {
+ struct proc *p;
+ struct thread *td;
+ pid_t pid = (pid_t)addr;
+ LIST_FOREACH(p, &allproc, p_list) {
+ if (p->p_pid == pid)
+ break;
+ }
+
+ if (p == NULL) {
+ db_printf("not found\n");
+ return;
+ }
+ if (!(p->p_sflag & PS_INMEM)) {
+ db_printf("swapped out\n");
+ return;
+ }
+ td = FIRST_THREAD_IN_PROC(p);
+ frame = (u_int32_t *)(td->td_pcb->un_32.pcb32_r11);
+ db_printf("at %p\n", frame);
+ } else
+ frame = (u_int32_t *)(addr);
+ }
+ lastframe = NULL;
+ scp_offset = -(get_pc_str_offset() >> 2);
+
+ while (count-- && frame != NULL) {
+ db_addr_t scp;
+ u_int32_t savecode;
+ int r;
+ u_int32_t *rp;
+ const char *sep;
+
+ /*
+ * In theory, the SCP isn't guaranteed to be in the function
+ * that generated the stack frame. We hope for the best.
+ */
+#ifdef __PROG26
+ scp = frame[FR_SCP] & R15_PC;
+#else
+ scp = frame[FR_SCP];
+#endif
+
+ db_printsym(scp, DB_STGY_PROC);
+ db_printf("\n\t");
+ pc = ddb_regs.tf_pc;
+ sym = db_search_symbol(pc, DB_STGY_ANY, &offset);
+ if (sym == C_DB_SYM_NULL) {
+ value = 0;
+ name = "(null)";
+ } else
+ db_symbol_values(sym, &name, &value);
+ db_printf("%s() at ", name);
+ db_printsym(pc, DB_STGY_PROC);
+ db_printf("\n");
+#ifdef __PROG26
+ db_printf("scp=0x%08x rlv=0x%08x (", scp, frame[FR_RLV] & R15_PC);
+ db_printsym(frame[FR_RLV] & R15_PC, DB_STGY_PROC);
+ db_printf(")\n");
+#else
+ db_printf("scp=0x%08x rlv=0x%08x (", scp, frame[FR_RLV]);
+ db_printsym(frame[FR_RLV], DB_STGY_PROC);
+ db_printf(")\n");
+#endif
+ db_printf("\trsp=0x%08x rfp=0x%08x", frame[FR_RSP], frame[FR_RFP]);
+
+ savecode = ((u_int32_t *)scp)[scp_offset];
+ if ((savecode & 0x0e100000) == 0x08000000) {
+ /* Looks like an STM */
+ rp = frame - 4;
+ sep = "\n\t";
+ for (r = 10; r >= 0; r--) {
+ if (savecode & (1 << r)) {
+ db_printf("%sr%d=0x%08x",
+ sep, r, *rp--);
+ sep = (frame - rp) % 4 == 2 ?
+ "\n\t" : " ";
+ }
+ }
+ }
+
+ db_printf("\n");
+
+ /*
+ * Switch to next frame up
+ */
+ if (frame[FR_RFP] == 0)
+ break; /* Top of stack */
+
+ lastframe = frame;
+ frame = (u_int32_t *)(frame[FR_RFP]);
+
+ if (INKERNEL((int)frame)) {
+ /* staying in kernel */
+ if (frame <= lastframe) {
+ db_printf("Bad frame pointer: %p\n", frame);
+ break;
+ }
+ } else if (INKERNEL((int)lastframe)) {
+ /* switch from user to kernel */
+ if (kernel_only)
+ break; /* kernel stack only */
+ } else {
+ /* in user */
+ if (frame <= lastframe) {
+ db_printf("Bad user frame pointer: %p\n",
+ frame);
+ break;
+ }
+ }
+ }
+}
+
+/* XXX stubs */
+void
+db_md_list_watchpoints()
+{
+}
+
+int
+db_md_clr_watchpoint(db_expr_t addr, db_expr_t size)
+{
+ return (0);
+}
+
+int
+db_md_set_watchpoint(db_expr_t addr, db_expr_t size)
+{
+ return (0);
+}
+void
+db_print_backtrace(void)
+{
+
+ db_stack_trace_cmd((db_expr_t)__builtin_frame_address(0), 1, -1, NULL);
+}
diff --git a/sys/arm/arm/disassem.c b/sys/arm/arm/disassem.c
new file mode 100644
index 0000000..02f766e
--- /dev/null
+++ b/sys/arm/arm/disassem.c
@@ -0,0 +1,681 @@
+/* $NetBSD: disassem.c,v 1.14 2003/03/27 16:58:36 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1996 Mark Brinicombe.
+ * Copyright (c) 1996 Brini.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * db_disasm.c
+ *
+ * Kernel disassembler
+ *
+ * Created : 10/02/96
+ *
+ * Structured after the sparc/sparc/db_disasm.c by David S. Miller &
+ * Paul Kranenburg
+ *
+ * This code is not complete. Not all instructions are disassembled.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+
+
+#include <sys/systm.h>
+#include <machine/disassem.h>
+#include <machine/armreg.h>
+#include <ddb/ddb.h>
+
+/*
+ * General instruction format
+ *
+ * insn[cc][mod] [operands]
+ *
+ * Those fields with an uppercase format code indicate that the field
+ * follows directly after the instruction before the separator i.e.
+ * they modify the instruction rather than just being an operand to
+ * the instruction. The only exception is the writeback flag which
+ * follows a operand.
+ *
+ *
+ * 2 - print Operand 2 of a data processing instruction
+ * d - destination register (bits 12-15)
+ * n - n register (bits 16-19)
+ * s - s register (bits 8-11)
+ * o - indirect register rn (bits 16-19) (used by swap)
+ * m - m register (bits 0-3)
+ * a - address operand of ldr/str instruction
+ * l - register list for ldm/stm instruction
+ * f - 1st fp operand (register) (bits 12-14)
+ * g - 2nd fp operand (register) (bits 16-18)
+ * h - 3rd fp operand (register/immediate) (bits 0-4)
+ * b - branch address
+ * t - thumb branch address (bits 24, 0-23)
+ * k - breakpoint comment (bits 0-3, 8-19)
+ * X - block transfer type
+ * Y - block transfer type (r13 base)
+ * c - comment field bits(0-23)
+ * p - saved or current status register
+ * F - PSR transfer fields
+ * D - destination-is-r15 (P) flag on TST, TEQ, CMP, CMN
+ * L - co-processor transfer size
+ * S - set status flag
+ * P - fp precision
+ * Q - fp precision (for ldf/stf)
+ * R - fp rounding
+ * v - co-processor data transfer registers + addressing mode
+ * W - writeback flag
+ * x - instruction in hex
+ * # - co-processor number
+ * y - co-processor data processing registers
+ * z - co-processor register transfer registers
+ */
+
+struct arm32_insn {
+ u_int mask;
+ u_int pattern;
+ char* name;
+ char* format;
+};
+
+static const struct arm32_insn arm32_i[] = {
+ { 0x0fffffff, 0x0ff00000, "imb", "c" }, /* Before swi */
+ { 0x0fffffff, 0x0ff00001, "imbrange", "c" }, /* Before swi */
+ { 0x0f000000, 0x0f000000, "swi", "c" },
+ { 0xfe000000, 0xfa000000, "blx", "t" }, /* Before b and bl */
+ { 0x0f000000, 0x0a000000, "b", "b" },
+ { 0x0f000000, 0x0b000000, "bl", "b" },
+ { 0x0fe000f0, 0x00000090, "mul", "Snms" },
+ { 0x0fe000f0, 0x00200090, "mla", "Snmsd" },
+ { 0x0fe000f0, 0x00800090, "umull", "Sdnms" },
+ { 0x0fe000f0, 0x00c00090, "smull", "Sdnms" },
+ { 0x0fe000f0, 0x00a00090, "umlal", "Sdnms" },
+ { 0x0fe000f0, 0x00e00090, "smlal", "Sdnms" },
+ { 0x0d700000, 0x04200000, "strt", "daW" },
+ { 0x0d700000, 0x04300000, "ldrt", "daW" },
+ { 0x0d700000, 0x04600000, "strbt", "daW" },
+ { 0x0d700000, 0x04700000, "ldrbt", "daW" },
+ { 0x0c500000, 0x04000000, "str", "daW" },
+ { 0x0c500000, 0x04100000, "ldr", "daW" },
+ { 0x0c500000, 0x04400000, "strb", "daW" },
+ { 0x0c500000, 0x04500000, "ldrb", "daW" },
+ { 0x0e1f0000, 0x080d0000, "stm", "YnWl" },/* separate out r13 base */
+ { 0x0e1f0000, 0x081d0000, "ldm", "YnWl" },/* separate out r13 base */
+ { 0x0e100000, 0x08000000, "stm", "XnWl" },
+ { 0x0e100000, 0x08100000, "ldm", "XnWl" },
+ { 0x0e1000f0, 0x00100090, "ldrb", "de" },
+ { 0x0e1000f0, 0x00000090, "strb", "de" },
+ { 0x0e1000f0, 0x001000d0, "ldrsb", "de" },
+ { 0x0e1000f0, 0x001000b0, "ldrh", "de" },
+ { 0x0e1000f0, 0x000000b0, "strh", "de" },
+ { 0x0e1000f0, 0x001000f0, "ldrsh", "de" },
+ { 0x0f200090, 0x00200090, "und", "x" }, /* Before data processing */
+ { 0x0e1000d0, 0x000000d0, "und", "x" }, /* Before data processing */
+ { 0x0ff00ff0, 0x01000090, "swp", "dmo" },
+ { 0x0ff00ff0, 0x01400090, "swpb", "dmo" },
+ { 0x0fbf0fff, 0x010f0000, "mrs", "dp" }, /* Before data processing */
+ { 0x0fb0fff0, 0x0120f000, "msr", "pFm" },/* Before data processing */
+ { 0x0fb0f000, 0x0320f000, "msr", "pF2" },/* Before data processing */
+ { 0x0ffffff0, 0x012fff10, "bx", "m" },
+ { 0x0fff0ff0, 0x016f0f10, "clz", "dm" },
+ { 0x0ffffff0, 0x012fff30, "blx", "m" },
+ { 0xfff000f0, 0xe1200070, "bkpt", "k" },
+ { 0x0de00000, 0x00000000, "and", "Sdn2" },
+ { 0x0de00000, 0x00200000, "eor", "Sdn2" },
+ { 0x0de00000, 0x00400000, "sub", "Sdn2" },
+ { 0x0de00000, 0x00600000, "rsb", "Sdn2" },
+ { 0x0de00000, 0x00800000, "add", "Sdn2" },
+ { 0x0de00000, 0x00a00000, "adc", "Sdn2" },
+ { 0x0de00000, 0x00c00000, "sbc", "Sdn2" },
+ { 0x0de00000, 0x00e00000, "rsc", "Sdn2" },
+ { 0x0df00000, 0x01100000, "tst", "Dn2" },
+ { 0x0df00000, 0x01300000, "teq", "Dn2" },
+ { 0x0de00000, 0x01400000, "cmp", "Dn2" },
+ { 0x0de00000, 0x01600000, "cmn", "Dn2" },
+ { 0x0de00000, 0x01800000, "orr", "Sdn2" },
+ { 0x0de00000, 0x01a00000, "mov", "Sd2" },
+ { 0x0de00000, 0x01c00000, "bic", "Sdn2" },
+ { 0x0de00000, 0x01e00000, "mvn", "Sd2" },
+ { 0x0ff08f10, 0x0e000100, "adf", "PRfgh" },
+ { 0x0ff08f10, 0x0e100100, "muf", "PRfgh" },
+ { 0x0ff08f10, 0x0e200100, "suf", "PRfgh" },
+ { 0x0ff08f10, 0x0e300100, "rsf", "PRfgh" },
+ { 0x0ff08f10, 0x0e400100, "dvf", "PRfgh" },
+ { 0x0ff08f10, 0x0e500100, "rdf", "PRfgh" },
+ { 0x0ff08f10, 0x0e600100, "pow", "PRfgh" },
+ { 0x0ff08f10, 0x0e700100, "rpw", "PRfgh" },
+ { 0x0ff08f10, 0x0e800100, "rmf", "PRfgh" },
+ { 0x0ff08f10, 0x0e900100, "fml", "PRfgh" },
+ { 0x0ff08f10, 0x0ea00100, "fdv", "PRfgh" },
+ { 0x0ff08f10, 0x0eb00100, "frd", "PRfgh" },
+ { 0x0ff08f10, 0x0ec00100, "pol", "PRfgh" },
+ { 0x0f008f10, 0x0e000100, "fpbop", "PRfgh" },
+ { 0x0ff08f10, 0x0e008100, "mvf", "PRfh" },
+ { 0x0ff08f10, 0x0e108100, "mnf", "PRfh" },
+ { 0x0ff08f10, 0x0e208100, "abs", "PRfh" },
+ { 0x0ff08f10, 0x0e308100, "rnd", "PRfh" },
+ { 0x0ff08f10, 0x0e408100, "sqt", "PRfh" },
+ { 0x0ff08f10, 0x0e508100, "log", "PRfh" },
+ { 0x0ff08f10, 0x0e608100, "lgn", "PRfh" },
+ { 0x0ff08f10, 0x0e708100, "exp", "PRfh" },
+ { 0x0ff08f10, 0x0e808100, "sin", "PRfh" },
+ { 0x0ff08f10, 0x0e908100, "cos", "PRfh" },
+ { 0x0ff08f10, 0x0ea08100, "tan", "PRfh" },
+ { 0x0ff08f10, 0x0eb08100, "asn", "PRfh" },
+ { 0x0ff08f10, 0x0ec08100, "acs", "PRfh" },
+ { 0x0ff08f10, 0x0ed08100, "atn", "PRfh" },
+ { 0x0f008f10, 0x0e008100, "fpuop", "PRfh" },
+ { 0x0e100f00, 0x0c000100, "stf", "QLv" },
+ { 0x0e100f00, 0x0c100100, "ldf", "QLv" },
+ { 0x0ff00f10, 0x0e000110, "flt", "PRgd" },
+ { 0x0ff00f10, 0x0e100110, "fix", "PRdh" },
+ { 0x0ff00f10, 0x0e200110, "wfs", "d" },
+ { 0x0ff00f10, 0x0e300110, "rfs", "d" },
+ { 0x0ff00f10, 0x0e400110, "wfc", "d" },
+ { 0x0ff00f10, 0x0e500110, "rfc", "d" },
+ { 0x0ff0ff10, 0x0e90f110, "cmf", "PRgh" },
+ { 0x0ff0ff10, 0x0eb0f110, "cnf", "PRgh" },
+ { 0x0ff0ff10, 0x0ed0f110, "cmfe", "PRgh" },
+ { 0x0ff0ff10, 0x0ef0f110, "cnfe", "PRgh" },
+ { 0xff100010, 0xfe000010, "mcr2", "#z" },
+ { 0x0f100010, 0x0e000010, "mcr", "#z" },
+ { 0xff100010, 0xfe100010, "mrc2", "#z" },
+ { 0x0f100010, 0x0e100010, "mrc", "#z" },
+ { 0xff000010, 0xfe000000, "cdp2", "#y" },
+ { 0x0f000010, 0x0e000000, "cdp", "#y" },
+ { 0xfe100090, 0xfc100000, "ldc2", "L#v" },
+ { 0x0e100090, 0x0c100000, "ldc", "L#v" },
+ { 0xfe100090, 0xfc000000, "stc2", "L#v" },
+ { 0x0e100090, 0x0c000000, "stc", "L#v" },
+ { 0x00000000, 0x00000000, NULL, NULL }
+};
+
+static char const arm32_insn_conditions[][4] = {
+ "eq", "ne", "cs", "cc",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "", "nv"
+};
+
+static char const insn_block_transfers[][4] = {
+ "da", "ia", "db", "ib"
+};
+
+static char const insn_stack_block_transfers[][4] = {
+ "ed", "ea", "fd", "fa"
+};
+
+static char const op_shifts[][4] = {
+ "lsl", "lsr", "asr", "ror"
+};
+
+static char const insn_fpa_rounding[][2] = {
+ "", "p", "m", "z"
+};
+
+static char const insn_fpa_precision[][2] = {
+ "s", "d", "e", "p"
+};
+
+static char const insn_fpaconstants[][8] = {
+ "0.0", "1.0", "2.0", "3.0",
+ "4.0", "5.0", "0.5", "10.0"
+};
+
+#define insn_condition(x) arm32_insn_conditions[(x >> 28) & 0x0f]
+#define insn_blktrans(x) insn_block_transfers[(x >> 23) & 3]
+#define insn_stkblktrans(x) insn_stack_block_transfers[(x >> 23) & 3]
+#define op2_shift(x) op_shifts[(x >> 5) & 3]
+#define insn_fparnd(x) insn_fpa_rounding[(x >> 5) & 0x03]
+#define insn_fpaprec(x) insn_fpa_precision[(((x >> 18) & 2)|(x >> 7)) & 1]
+#define insn_fpaprect(x) insn_fpa_precision[(((x >> 21) & 2)|(x >> 15)) & 1]
+#define insn_fpaimm(x) insn_fpaconstants[x & 0x07]
+
+/* Local prototypes */
+static void disasm_register_shift(const disasm_interface_t *di, u_int insn);
+static void disasm_print_reglist(const disasm_interface_t *di, u_int insn);
+static void disasm_insn_ldrstr(const disasm_interface_t *di, u_int insn,
+ u_int loc);
+static void disasm_insn_ldrhstrh(const disasm_interface_t *di, u_int insn,
+ u_int loc);
+static void disasm_insn_ldcstc(const disasm_interface_t *di, u_int insn,
+ u_int loc);
+static u_int disassemble_readword(u_int address);
+static void disassemble_printaddr(u_int address);
+
+vm_offset_t
+disasm(const disasm_interface_t *di, vm_offset_t loc, int altfmt)
+{
+ struct arm32_insn *i_ptr = (struct arm32_insn *)&arm32_i;
+
+ u_int insn;
+ int matchp;
+ int branch;
+ char* f_ptr;
+ int fmt;
+
+ fmt = 0;
+ matchp = 0;
+ insn = di->di_readword(loc);
+
+/* di->di_printf("loc=%08x insn=%08x : ", loc, insn);*/
+
+ while (i_ptr->name) {
+ if ((insn & i_ptr->mask) == i_ptr->pattern) {
+ matchp = 1;
+ break;
+ }
+ i_ptr++;
+ }
+
+ if (!matchp) {
+ di->di_printf("und%s\t%08x\n", insn_condition(insn), insn);
+ return(loc + INSN_SIZE);
+ }
+
+ /* If instruction forces condition code, don't print it. */
+ if ((i_ptr->mask & 0xf0000000) == 0xf0000000)
+ di->di_printf("%s", i_ptr->name);
+ else
+ di->di_printf("%s%s", i_ptr->name, insn_condition(insn));
+
+ f_ptr = i_ptr->format;
+
+ /* Insert tab if there are no instruction modifiers */
+
+ if (*(f_ptr) < 'A' || *(f_ptr) > 'Z') {
+ ++fmt;
+ di->di_printf("\t");
+ }
+
+ while (*f_ptr) {
+ switch (*f_ptr) {
+ /* 2 - print Operand 2 of a data processing instruction */
+ case '2':
+ if (insn & 0x02000000) {
+ int rotate= ((insn >> 7) & 0x1e);
+
+ di->di_printf("#0x%08x",
+ (insn & 0xff) << (32 - rotate) |
+ (insn & 0xff) >> rotate);
+ } else {
+ disasm_register_shift(di, insn);
+ }
+ break;
+ /* d - destination register (bits 12-15) */
+ case 'd':
+ di->di_printf("r%d", ((insn >> 12) & 0x0f));
+ break;
+ /* D - insert 'p' if Rd is R15 */
+ case 'D':
+ if (((insn >> 12) & 0x0f) == 15)
+ di->di_printf("p");
+ break;
+ /* n - n register (bits 16-19) */
+ case 'n':
+ di->di_printf("r%d", ((insn >> 16) & 0x0f));
+ break;
+ /* s - s register (bits 8-11) */
+ case 's':
+ di->di_printf("r%d", ((insn >> 8) & 0x0f));
+ break;
+ /* o - indirect register rn (bits 16-19) (used by swap) */
+ case 'o':
+ di->di_printf("[r%d]", ((insn >> 16) & 0x0f));
+ break;
+ /* m - m register (bits 0-4) */
+ case 'm':
+ di->di_printf("r%d", ((insn >> 0) & 0x0f));
+ break;
+ /* a - address operand of ldr/str instruction */
+ case 'a':
+ disasm_insn_ldrstr(di, insn, loc);
+ break;
+ /* e - address operand of ldrh/strh instruction */
+ case 'e':
+ disasm_insn_ldrhstrh(di, insn, loc);
+ break;
+ /* l - register list for ldm/stm instruction */
+ case 'l':
+ disasm_print_reglist(di, insn);
+ break;
+ /* f - 1st fp operand (register) (bits 12-14) */
+ case 'f':
+ di->di_printf("f%d", (insn >> 12) & 7);
+ break;
+ /* g - 2nd fp operand (register) (bits 16-18) */
+ case 'g':
+ di->di_printf("f%d", (insn >> 16) & 7);
+ break;
+ /* h - 3rd fp operand (register/immediate) (bits 0-4) */
+ case 'h':
+ if (insn & (1 << 3))
+ di->di_printf("#%s", insn_fpaimm(insn));
+ else
+ di->di_printf("f%d", insn & 7);
+ break;
+ /* b - branch address */
+ case 'b':
+ branch = ((insn << 2) & 0x03ffffff);
+ if (branch & 0x02000000)
+ branch |= 0xfc000000;
+ di->di_printaddr(loc + 8 + branch);
+ break;
+ /* t - blx address */
+ case 't':
+ branch = ((insn << 2) & 0x03ffffff) |
+ (insn >> 23 & 0x00000002);
+ if (branch & 0x02000000)
+ branch |= 0xfc000000;
+ di->di_printaddr(loc + 8 + branch);
+ break;
+ /* X - block transfer type */
+ case 'X':
+ di->di_printf("%s", insn_blktrans(insn));
+ break;
+ /* Y - block transfer type (r13 base) */
+ case 'Y':
+ di->di_printf("%s", insn_stkblktrans(insn));
+ break;
+ /* c - comment field bits(0-23) */
+ case 'c':
+ di->di_printf("0x%08x", (insn & 0x00ffffff));
+ break;
+ /* k - breakpoint comment (bits 0-3, 8-19) */
+ case 'k':
+ di->di_printf("0x%04x",
+ (insn & 0x000fff00) >> 4 | (insn & 0x0000000f));
+ break;
+ /* p - saved or current status register */
+ case 'p':
+ if (insn & 0x00400000)
+ di->di_printf("spsr");
+ else
+ di->di_printf("cpsr");
+ break;
+ /* F - PSR transfer fields */
+ case 'F':
+ di->di_printf("_");
+ if (insn & (1 << 16))
+ di->di_printf("c");
+ if (insn & (1 << 17))
+ di->di_printf("x");
+ if (insn & (1 << 18))
+ di->di_printf("s");
+ if (insn & (1 << 19))
+ di->di_printf("f");
+ break;
+ /* B - byte transfer flag */
+ case 'B':
+ if (insn & 0x00400000)
+ di->di_printf("b");
+ break;
+ /* L - co-processor transfer size */
+ case 'L':
+ if (insn & (1 << 22))
+ di->di_printf("l");
+ break;
+ /* S - set status flag */
+ case 'S':
+ if (insn & 0x00100000)
+ di->di_printf("s");
+ break;
+ /* P - fp precision */
+ case 'P':
+ di->di_printf("%s", insn_fpaprec(insn));
+ break;
+ /* Q - fp precision (for ldf/stf) */
+ case 'Q':
+ break;
+ /* R - fp rounding */
+ case 'R':
+ di->di_printf("%s", insn_fparnd(insn));
+ break;
+ /* W - writeback flag */
+ case 'W':
+ if (insn & (1 << 21))
+ di->di_printf("!");
+ break;
+ /* # - co-processor number */
+ case '#':
+ di->di_printf("p%d", (insn >> 8) & 0x0f);
+ break;
+ /* v - co-processor data transfer registers+addressing mode */
+ case 'v':
+ disasm_insn_ldcstc(di, insn, loc);
+ break;
+ /* x - instruction in hex */
+ case 'x':
+ di->di_printf("0x%08x", insn);
+ break;
+ /* y - co-processor data processing registers */
+ case 'y':
+ di->di_printf("%d, ", (insn >> 20) & 0x0f);
+
+ di->di_printf("c%d, c%d, c%d", (insn >> 12) & 0x0f,
+ (insn >> 16) & 0x0f, insn & 0x0f);
+
+ di->di_printf(", %d", (insn >> 5) & 0x07);
+ break;
+ /* z - co-processor register transfer registers */
+ case 'z':
+ di->di_printf("%d, ", (insn >> 21) & 0x07);
+ di->di_printf("r%d, c%d, c%d, %d",
+ (insn >> 12) & 0x0f, (insn >> 16) & 0x0f,
+ insn & 0x0f, (insn >> 5) & 0x07);
+
+/* if (((insn >> 5) & 0x07) != 0)
+ di->di_printf(", %d", (insn >> 5) & 0x07);*/
+ break;
+ default:
+ di->di_printf("[%c - unknown]", *f_ptr);
+ break;
+ }
+ if (*(f_ptr+1) >= 'A' && *(f_ptr+1) <= 'Z')
+ ++f_ptr;
+ else if (*(++f_ptr)) {
+ ++fmt;
+ if (fmt == 1)
+ di->di_printf("\t");
+ else
+ di->di_printf(", ");
+ }
+ };
+
+ di->di_printf("\n");
+
+ return(loc + INSN_SIZE);
+}
+
+
+static void
+disasm_register_shift(const disasm_interface_t *di, u_int insn)
+{
+ di->di_printf("r%d", (insn & 0x0f));
+ if ((insn & 0x00000ff0) == 0)
+ ;
+ else if ((insn & 0x00000ff0) == 0x00000060)
+ di->di_printf(", rrx");
+ else {
+ if (insn & 0x10)
+ di->di_printf(", %s r%d", op2_shift(insn),
+ (insn >> 8) & 0x0f);
+ else
+ di->di_printf(", %s #%d", op2_shift(insn),
+ (insn >> 7) & 0x1f);
+ }
+}
+
+
+static void
+disasm_print_reglist(const disasm_interface_t *di, u_int insn)
+{
+ int loop;
+ int start;
+ int comma;
+
+ di->di_printf("{");
+ start = -1;
+ comma = 0;
+
+ for (loop = 0; loop < 17; ++loop) {
+ if (start != -1) {
+ if (loop == 16 || !(insn & (1 << loop))) {
+ if (comma)
+ di->di_printf(", ");
+ else
+ comma = 1;
+ if (start == loop - 1)
+ di->di_printf("r%d", start);
+ else
+ di->di_printf("r%d-r%d", start, loop - 1);
+ start = -1;
+ }
+ } else {
+ if (insn & (1 << loop))
+ start = loop;
+ }
+ }
+ di->di_printf("}");
+
+ if (insn & (1 << 22))
+ di->di_printf("^");
+}
+
+static void
+disasm_insn_ldrstr(const disasm_interface_t *di, u_int insn, u_int loc)
+{
+ int offset;
+
+ offset = insn & 0xfff;
+ if ((insn & 0x032f0000) == 0x010f0000) {
+ /* rA = pc, immediate index */
+ if (insn & 0x00800000)
+ loc += offset;
+ else
+ loc -= offset;
+ di->di_printaddr(loc + 8);
+ } else {
+ di->di_printf("[r%d", (insn >> 16) & 0x0f);
+ if ((insn & 0x03000fff) != 0x01000000) {
+ di->di_printf("%s, ", (insn & (1 << 24)) ? "" : "]");
+ if (!(insn & 0x00800000))
+ di->di_printf("-");
+ if (insn & (1 << 25))
+ disasm_register_shift(di, insn);
+ else
+ di->di_printf("#0x%03x", offset);
+ }
+ if (insn & (1 << 24))
+ di->di_printf("]");
+ }
+}
+
+static void
+disasm_insn_ldrhstrh(const disasm_interface_t *di, u_int insn, u_int loc)
+{
+ int offset;
+
+ offset = ((insn & 0xf00) >> 4) | (insn & 0xf);
+ if ((insn & 0x004f0000) == 0x004f0000) {
+ /* rA = pc, immediate index */
+ if (insn & 0x00800000)
+ loc += offset;
+ else
+ loc -= offset;
+ di->di_printaddr(loc + 8);
+ } else {
+ di->di_printf("[r%d", (insn >> 16) & 0x0f);
+ if ((insn & 0x01400f0f) != 0x01400000) {
+ di->di_printf("%s, ", (insn & (1 << 24)) ? "" : "]");
+ if (!(insn & 0x00800000))
+ di->di_printf("-");
+ if (insn & (1 << 22))
+ di->di_printf("#0x%02x", offset);
+ else
+ di->di_printf("r%d", (insn & 0x0f));
+ }
+ if (insn & (1 << 24))
+ di->di_printf("]");
+ }
+}
+
+static void
+disasm_insn_ldcstc(const disasm_interface_t *di, u_int insn, u_int loc)
+{
+ if (((insn >> 8) & 0xf) == 1)
+ di->di_printf("f%d, ", (insn >> 12) & 0x07);
+ else
+ di->di_printf("c%d, ", (insn >> 12) & 0x0f);
+
+ di->di_printf("[r%d", (insn >> 16) & 0x0f);
+
+ di->di_printf("%s, ", (insn & (1 << 24)) ? "" : "]");
+
+ if (!(insn & (1 << 23)))
+ di->di_printf("-");
+
+ di->di_printf("#0x%03x", (insn & 0xff) << 2);
+
+ if (insn & (1 << 24))
+ di->di_printf("]");
+
+ if (insn & (1 << 21))
+ di->di_printf("!");
+}
+
+static u_int
+disassemble_readword(u_int address)
+{
+ return(*((u_int *)address));
+}
+
+static void
+disassemble_printaddr(u_int address)
+{
+ printf("0x%08x", address);
+}
+
+static const disasm_interface_t disassemble_di = {
+ disassemble_readword, disassemble_printaddr, db_printf
+};
+
+void
+disassemble(u_int address)
+{
+
+ (void)disasm(&disassemble_di, address, 0);
+}
+
+/* End of disassem.c */
diff --git a/sys/arm/arm/dump_machdep.c b/sys/arm/arm/dump_machdep.c
new file mode 100644
index 0000000..38841e4
--- /dev/null
+++ b/sys/arm/arm/dump_machdep.c
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2004 Olivier Houchard
+ * All rights reserved.
+ *
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/cons.h>
+#include <sys/kernel.h>
+#include <sys/kerneldump.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/md_var.h>
+
+void
+dumpsys(struct dumperinfo *di)
+{
+}
diff --git a/sys/arm/arm/elf_machdep.c b/sys/arm/arm/elf_machdep.c
new file mode 100644
index 0000000..373e937
--- /dev/null
+++ b/sys/arm/arm/elf_machdep.c
@@ -0,0 +1,213 @@
+/*-
+ * Copyright 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/linker.h>
+#include <sys/sysent.h>
+#include <sys/imgact_elf.h>
+#include <sys/syscall.h>
+#include <sys/signalvar.h>
+#include <sys/vnode.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_param.h>
+
+#include <machine/elf.h>
+#include <machine/md_var.h>
+
+struct sysentvec elf32_freebsd_sysvec = {
+ SYS_MAXSYSCALL,
+ sysent,
+ 0,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ __elfN(freebsd_fixup),
+ sendsig,
+ NULL,
+ NULL,
+ NULL,
+ "FreeBSD ELF32",
+ __elfN(coredump),
+ NULL,
+ MINSIGSTKSZ,
+ PAGE_SIZE,
+ VM_MIN_ADDRESS,
+ VM_MAXUSER_ADDRESS,
+ USRSTACK,
+ PS_STRINGS,
+ VM_PROT_ALL,
+ exec_copyout_strings,
+ exec_setregs,
+ NULL
+};
+
+static Elf32_Brandinfo freebsd_brand_info = {
+ ELFOSABI_FREEBSD,
+ EM_ARM,
+ "FreeBSD",
+ NULL,
+ "/libexec/ld-elf.so.1",
+ &elf32_freebsd_sysvec,
+ NULL,
+ };
+
+SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_ANY,
+ (sysinit_cfunc_t) elf32_insert_brand_entry,
+ &freebsd_brand_info);
+
+static Elf32_Brandinfo freebsd_brand_oinfo = {
+ ELFOSABI_FREEBSD,
+ EM_ARM,
+ "FreeBSD",
+ NULL,
+ "/usr/libexec/ld-elf.so.1",
+ &elf32_freebsd_sysvec,
+ NULL,
+ };
+
+SYSINIT(oelf32, SI_SUB_EXEC, SI_ORDER_ANY,
+ (sysinit_cfunc_t) elf32_insert_brand_entry,
+ &freebsd_brand_oinfo);
+
+/* Process one elf relocation with addend. */
+static int
+elf_reloc_internal(linker_file_t lf, const void *data, int type, int local)
+{
+ Elf_Addr relocbase = (Elf_Addr) lf->address;
+ Elf_Addr *where;
+ Elf_Addr addr;
+ Elf_Addr addend;
+ Elf_Word rtype, symidx;
+ const Elf_Rel *rel;
+ const Elf_Rela *rela;
+
+ switch (type) {
+ case ELF_RELOC_REL:
+ rel = (const Elf_Rel *)data;
+ where = (Elf_Addr *) (relocbase + rel->r_offset);
+ addend = *where;
+ rtype = ELF_R_TYPE(rel->r_info);
+ symidx = ELF_R_SYM(rel->r_info);
+ break;
+ case ELF_RELOC_RELA:
+ rela = (const Elf_Rela *)data;
+ where = (Elf_Addr *) (relocbase + rela->r_offset);
+ addend = rela->r_addend;
+ rtype = ELF_R_TYPE(rela->r_info);
+ symidx = ELF_R_SYM(rela->r_info);
+ break;
+ default:
+ panic("unknown reloc type %d\n", type);
+ }
+
+ if (local) {
+ if (rtype == R_ARM_RELATIVE) { /* A + B */
+ addr = relocbase + addend;
+ if (*where != addr)
+ *where = addr;
+ }
+ return (0);
+ }
+
+ switch (rtype) {
+
+ case R_ARM_NONE: /* none */
+ break;
+
+ case R_ARM_PC24: /* S + A - P */
+ addr = elf_lookup(lf, symidx, 1);
+ if (addr == 0)
+ return -1;
+ addr += addend - (Elf_Addr)where;
+ if (*where != addr)
+ *where = addr;
+ break;
+
+ case R_ARM_COPY: /* none */
+ /*
+ * There shouldn't be copy relocations in kernel
+ * objects.
+ */
+ printf("kldload: unexpected R_COPY relocation\n");
+ return -1;
+ break;
+
+ case R_ARM_GLOB_DAT: /* S */
+ addr = elf_lookup(lf, symidx, 1);
+ if (addr == 0)
+ return -1;
+ if (*where != addr)
+ *where = addr;
+ break;
+
+ case R_ARM_RELATIVE:
+ break;
+
+ default:
+ printf("kldload: unexpected relocation type %d\n",
+ rtype);
+ return -1;
+ }
+ return(0);
+}
+
+int
+elf_reloc(linker_file_t lf, const void *data, int type)
+{
+
+ return (elf_reloc_internal(lf, data, type, 0));
+}
+
+int
+elf_reloc_local(linker_file_t lf, const void *data, int type)
+{
+
+ return (elf_reloc_internal(lf, data, type, 1));
+}
+
+int
+elf_cpu_load_file(linker_file_t lf __unused)
+{
+
+ return (0);
+}
+
+int
+elf_cpu_unload_file(linker_file_t lf __unused)
+{
+
+ return (0);
+}
diff --git a/sys/arm/arm/exception.S b/sys/arm/arm/exception.S
new file mode 100644
index 0000000..fb6bded
--- /dev/null
+++ b/sys/arm/arm/exception.S
@@ -0,0 +1,392 @@
+/* $NetBSD: exception.S,v 1.13 2003/10/31 16:30:15 scw Exp $ */
+
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * exception.S
+ *
+ * Low level handlers for exception vectors
+ *
+ * Created : 24/09/94
+ *
+ * Based on kate/display/abort.s
+ *
+ */
+
+#include "assym.s"
+
+#include <machine/asm.h>
+#include <machine/armreg.h>
+#include <machine/asmacros.h>
+__FBSDID("$FreeBSD$");
+
+ .text
+ .align 0
+
+AST_ALIGNMENT_FAULT_LOCALS
+
+/*
+ * reset_entry:
+ *
+ * Handler for Reset exception.
+ */
+ASENTRY_NP(reset_entry)
+ adr r0, Lreset_panicmsg
+ adr r1, Lfile
+ mov r2, #__LINE__
+ bl _C_LABEL(__panic)
+ /* NOTREACHED */
+Lreset_panicmsg:
+ .asciz "Reset vector called, LR = 0x%08x"
+Lfile:
+ .asciz __FILE__
+ .balign 4
+
+/*
+ * swi_entry
+ *
+ * Handler for the Software Interrupt exception.
+ */
+ASENTRY_NP(swi_entry)
+ PUSHFRAME
+ ENABLE_ALIGNMENT_FAULTS
+
+ mov r0, sp /* Pass the frame to any function */
+ bl _C_LABEL(swi_handler) /* It's a SWI ! */
+
+ DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ PULLFRAME
+ movs pc, lr /* Exit */
+
+/*
+ * prefetch_abort_entry:
+ *
+ * Handler for the Prefetch Abort exception.
+ */
+ASENTRY_NP(prefetch_abort_entry)
+#ifdef __XSCALE__
+ nop /* Make absolutely sure any pending */
+ nop /* imprecise aborts have occurred. */
+#endif
+ sub lr, lr, #0x00000004 /* Adjust the lr */
+
+ PUSHFRAMEINSVC
+ ENABLE_ALIGNMENT_FAULTS
+ ldr r1, Lprefetch_abort_handler_address
+ adr lr, exception_exit
+ mov r0, sp /* pass the stack pointer as r0 */
+ ldr pc, [r1]
+
+Lprefetch_abort_handler_address:
+ .word _C_LABEL(prefetch_abort_handler_address)
+
+ .data
+ .global _C_LABEL(prefetch_abort_handler_address)
+
+_C_LABEL(prefetch_abort_handler_address):
+ .word abortprefetch
+
+ .text
+abortprefetch:
+ adr r0, abortprefetchmsg
+ adr r1, filee
+ mov r2, #__LINE__
+ b _C_LABEL(__panic)
+
+filee:
+ .asciz __FILE__
+
+abortprefetchmsg:
+ .asciz "abortprefetch"
+ .align 0
+
+/*
+ * data_abort_entry:
+ *
+ * Handler for the Data Abort exception.
+ */
+ASENTRY_NP(data_abort_entry)
+#ifdef __XSCALE__
+ nop /* Make absolutely sure any pending */
+ nop /* imprecise aborts have occurred. */
+#endif
+
+ sub lr, lr, #0x00000008 /* Adjust the lr */
+ PUSHFRAMEINSVC /* Push trap frame and switch */
+ /* to SVC32 mode */
+ ENABLE_ALIGNMENT_FAULTS
+
+ ldr r1, Ldata_abort_handler_address
+ adr lr, exception_exit
+ mov r0, sp /* pass the stack pointer as r0 */
+ ldr pc, [r1]
+Ldata_abort_handler_address:
+ .word _C_LABEL(data_abort_handler_address)
+
+ .data
+ .global _C_LABEL(data_abort_handler_address)
+_C_LABEL(data_abort_handler_address):
+ .word abortdata
+
+ .text
+abortdata:
+ adr r0, abortdatamsg
+ adr r1, file
+ mov r2, #__LINE__
+ b _C_LABEL(__panic)
+
+abortdatamsg:
+ .asciz "abortdata"
+file:
+ .asciz __FILE__
+ .align 0
+
+/*
+ * address_exception_entry:
+ *
+ * Handler for the Address Exception exception.
+ *
+ * NOTE: This exception isn't really used on arm32. We
+ * print a warning message to the console and then treat
+ * it like a Data Abort.
+ */
+ASENTRY_NP(address_exception_entry)
+ mrs r1, cpsr_all
+ mrs r2, spsr_all
+ mov r3, lr
+ adr r0, Laddress_exception_msg
+ bl _C_LABEL(printf) /* XXX CLOBBERS LR!! */
+ b data_abort_entry
+Laddress_exception_msg:
+ .asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n"
+ .balign 4
+
+/*
+ * General exception exit handler
+ * (Placed here to be within range of all the references to it)
+ *
+ * It exits straight away if not returning to USR mode.
+ * This loops around delivering any pending ASTs.
+ * Interrupts are disabled at suitable points to avoid ASTs
+ * being posted between testing and exit to user mode.
+ *
+ * This function uses PULLFRAMEFROMSVCANDEXIT and
+ * DO_AST_AND_RESTORE_ALIGNMENT_FAULTS thus should
+ * only be called if the exception handler used PUSHFRAMEINSVC
+ * followed by ENABLE_ALIGNMENT_FAULTS.
+ */
+
+exception_exit:
+ DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ PULLFRAMEFROMSVCANDEXIT
+
+/*
+ * undefined_entry:
+ *
+ * Handler for the Undefined Instruction exception.
+ *
+ * We indirect the undefined vector via the handler address
+ * in the data area. Entry to the undefined handler must
+ * look like direct entry from the vector.
+ */
+ASENTRY_NP(undefined_entry)
+#ifdef IPKDB
+/*
+ * IPKDB must be hooked in at the earliest possible entry point.
+ *
+ */
+/*
+ * Make room for all registers saving real r0-r7 and r15.
+ * The remaining registers are updated later.
+ */
+ stmfd sp!, {r0,r1} /* psr & spsr */
+ stmfd sp!, {lr} /* pc */
+ stmfd sp!, {r0-r14} /* r0-r7, r8-r14 */
+/*
+ * Get previous psr.
+ */
+ mrs r7, cpsr_all
+ mrs r0, spsr_all
+ str r0, [sp, #(16*4)]
+/*
+ * Test for user mode.
+ */
+ tst r0, #0xf
+ bne .Lprenotuser_push
+ add r1, sp, #(8*4)
+ stmia r1,{r8-r14}^ /* store user mode r8-r14*/
+ b .Lgoipkdb
+/*
+ * Switch to previous mode to get r8-r13.
+ */
+.Lprenotuser_push:
+ orr r0, r0, #(I32_bit) /* disable interrupts */
+ msr cpsr_all, r0
+ mov r1, r8
+ mov r2, r9
+ mov r3, r10
+ mov r4, r11
+ mov r5, r12
+ mov r6, r13
+ msr cpsr_all, r7 /* back to undefined mode */
+ add r8, sp, #(8*4)
+ stmia r8, {r1-r6} /* r8-r13 */
+/*
+ * Now back to previous mode to get r14 and spsr.
+ */
+ msr cpsr_all, r0
+ mov r1, r14
+ mrs r2, spsr
+ msr cpsr_all, r7 /* back to undefined mode */
+ str r1, [sp, #(14*4)] /* r14 */
+ str r2, [sp, #(17*4)] /* spsr */
+/*
+ * Now to IPKDB.
+ */
+.Lgoipkdb:
+ mov r0, sp
+ bl _C_LABEL(ipkdb_trap_glue)
+ ldr r1, .Lipkdb_trap_return
+ str r0,[r1]
+
+/*
+ * Have to load all registers from the stack.
+ *
+ * Start with spsr and pc.
+ */
+ ldr r0, [sp, #(16*4)] /* spsr */
+ ldr r1, [sp, #(15*4)] /* r15 */
+ msr spsr_all, r0
+ mov r14, r1
+/*
+ * Test for user mode.
+ */
+ tst r0, #0xf
+ bne .Lprenotuser_pull
+ add r1, sp, #(8*4)
+ ldmia r1, {r8-r14}^ /* load user mode r8-r14 */
+ b .Lpull_r0r7
+.Lprenotuser_pull:
+/*
+ * Now previous mode spsr and r14.
+ */
+ ldr r1, [sp, #(17*4)] /* spsr */
+ ldr r2, [sp, #(14*4)] /* r14 */
+ orr r0, r0, #(I32_bit)
+ msr cpsr_all, r0 /* switch to previous mode */
+ msr spsr_all, r1
+ mov r14, r2
+ msr cpsr_all, r7 /* back to undefined mode */
+/*
+ * Now r8-r13.
+ */
+ add r8, sp, #(8*4)
+ ldmia r8, {r1-r6} /* r8-r13 */
+ msr cpsr_all, r0
+ mov r8, r1
+ mov r9, r2
+ mov r10, r3
+ mov r11, r4
+ mov r12, r5
+ mov r13, r6
+ msr cpsr_all, r7
+.Lpull_r0r7:
+/*
+ * Now the rest of the registers.
+ */
+ ldr r1,Lipkdb_trap_return
+ ldr r0,[r1]
+ tst r0,r0
+ ldmfd sp!, {r0-r7} /* r0-r7 */
+ add sp, sp, #(10*4) /* adjust sp */
+
+/*
+ * Did IPKDB handle it?
+ */
+ movnes pc, lr /* return */
+
+#endif
+ stmfd sp!, {r0, r1}
+ ldr r0, Lundefined_handler_indirection
+ ldr r1, [sp], #0x0004
+ str r1, [r0, #0x0000]
+ ldr r1, [sp], #0x0004
+ str r1, [r0, #0x0004]
+ ldmia r0, {r0, r1, pc}
+
+#ifdef IPKDB
+Lipkdb_trap_return:
+ .word Lipkdb_trap_return_data
+#endif
+
+Lundefined_handler_indirection:
+ .word Lundefined_handler_indirection_data
+
+/*
+ * assembly bounce code for calling the kernel
+ * undefined instruction handler. This uses
+ * a standard trap frame and is called in SVC mode.
+ */
+
+ENTRY_NP(undefinedinstruction_bounce)
+ PUSHFRAMEINSVC
+ ENABLE_ALIGNMENT_FAULTS
+
+ mov r0, sp
+ adr lr, exception_exit
+ b _C_LABEL(undefinedinstruction)
+
+ .data
+ .align 0
+
+#ifdef IPKDB
+Lipkdb_trap_return_data:
+ .word 0
+#endif
+
+/*
+ * Indirection data
+ * 2 words use for preserving r0 and r1
+ * 3rd word contains the undefined handler address.
+ */
+
+Lundefined_handler_indirection_data:
+ .word 0
+ .word 0
+
+ .global _C_LABEL(undefined_handler_address)
+_C_LABEL(undefined_handler_address):
+ .word _C_LABEL(undefinedinstruction_bounce)
diff --git a/sys/arm/arm/fiq.c b/sys/arm/arm/fiq.c
new file mode 100644
index 0000000..4cd8d30
--- /dev/null
+++ b/sys/arm/arm/fiq.c
@@ -0,0 +1,169 @@
+/* $NetBSD: fiq.c,v 1.5 2002/04/03 23:33:27 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/cpufunc.h>
+#include <machine/fiq.h>
+#include <vm/vm.h>
+#include <machine/pcb.h>
+#include <vm/pmap.h>
+
+TAILQ_HEAD(, fiqhandler) fiqhandler_stack =
+ TAILQ_HEAD_INITIALIZER(fiqhandler_stack);
+
+extern char fiqvector[];
+extern char fiq_nullhandler[], fiq_nullhandler_end[];
+
+#define IRQ_BIT I32_bit
+#define FIQ_BIT F32_bit
+
+/*
+ * fiq_installhandler:
+ *
+ * Actually install the FIQ handler down at the FIQ vector.
+ *
+ * Note: If the FIQ is invoked via an extra layer of
+ * indirection, the actual FIQ code store lives in the
+ * data segment, so there is no need to manipulate
+ * the vector page's protection.
+ */
+static void
+fiq_installhandler(void *func, size_t size)
+{
+#if !defined(__ARM_FIQ_INDIRECT)
+ vector_page_setprot(VM_PROT_READ|VM_PROT_WRITE);
+#endif
+
+ memcpy(fiqvector, func, size);
+
+#if !defined(__ARM_FIQ_INDIRECT)
+ vector_page_setprot(VM_PROT_READ);
+ cpu_icache_sync_range((vm_offset_t) fiqvector, size);
+#endif
+}
+
+/*
+ * fiq_claim:
+ *
+ * Claim the FIQ vector.
+ */
+int
+fiq_claim(struct fiqhandler *fh)
+{
+ struct fiqhandler *ofh;
+ u_int oldirqstate;
+ int error = 0;
+
+ if (fh->fh_size > 0x100)
+ return (EFBIG);
+
+ oldirqstate = disable_interrupts(FIQ_BIT);
+
+ if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) {
+ if ((ofh->fh_flags & FH_CANPUSH) == 0) {
+ error = EBUSY;
+ goto out;
+ }
+
+ /* Save the previous FIQ handler's registers. */
+ if (ofh->fh_regs != NULL)
+ fiq_getregs(ofh->fh_regs);
+ }
+
+ /* Set FIQ mode registers to ours. */
+ if (fh->fh_regs != NULL)
+ fiq_setregs(fh->fh_regs);
+
+ TAILQ_INSERT_HEAD(&fiqhandler_stack, fh, fh_list);
+
+ /* Now copy the actual handler into place. */
+ fiq_installhandler(fh->fh_func, fh->fh_size);
+
+ /* Make sure FIQs are enabled when we return. */
+ oldirqstate &= ~FIQ_BIT;
+
+ out:
+ restore_interrupts(oldirqstate);
+ return (error);
+}
+
+/*
+ * fiq_release:
+ *
+ * Release the FIQ vector.
+ */
+void
+fiq_release(struct fiqhandler *fh)
+{
+ u_int oldirqstate;
+ struct fiqhandler *ofh;
+
+ oldirqstate = disable_interrupts(FIQ_BIT);
+
+ /*
+ * If we are the currently active FIQ handler, then we
+ * need to save our registers and pop the next one back
+ * into the vector.
+ */
+ if (fh == TAILQ_FIRST(&fiqhandler_stack)) {
+ if (fh->fh_regs != NULL)
+ fiq_getregs(fh->fh_regs);
+ TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list);
+ if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) {
+ if (ofh->fh_regs != NULL)
+ fiq_setregs(ofh->fh_regs);
+ fiq_installhandler(ofh->fh_func, ofh->fh_size);
+ }
+ } else
+ TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list);
+
+ if (TAILQ_FIRST(&fiqhandler_stack) == NULL) {
+ /* Copy the NULL handler back down into the vector. */
+ fiq_installhandler(fiq_nullhandler,
+ (size_t)(fiq_nullhandler_end - fiq_nullhandler));
+
+ /* Make sure FIQs are disabled when we return. */
+ oldirqstate |= FIQ_BIT;
+ }
+
+ restore_interrupts(oldirqstate);
+}
diff --git a/sys/arm/arm/fiq_subr.S b/sys/arm/arm/fiq_subr.S
new file mode 100644
index 0000000..054037f
--- /dev/null
+++ b/sys/arm/arm/fiq_subr.S
@@ -0,0 +1,101 @@
+/* $NetBSD: fiq_subr.S,v 1.3 2002/04/12 18:50:31 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+#include <machine/armreg.h>
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * MODE_CHANGE_NOP should be inserted between a mode change and a
+ * banked register (R8--R15) access.
+ */
+#if defined(CPU_ARM2) || defined(CPU_ARM250)
+#define MODE_CHANGE_NOP mov r0, r0
+#else
+#define MODE_CHANGE_NOP /* Data sheet says ARM3 doesn't need it */
+#endif
+
+#define SWITCH_TO_FIQ_MODE \
+ mrs r2, cpsr_all ; \
+ mov r3, r2 ; \
+ bic r2, r2, #(PSR_MODE) ; \
+ orr r2, r2, #(PSR_FIQ32_MODE) ; \
+ msr cpsr_all, r2
+
+#define BACK_TO_SVC_MODE \
+ msr cpsr_all, r3
+
+/*
+ * fiq_getregs:
+ *
+ * Fetch the FIQ mode banked registers into the fiqhandler
+ * structure.
+ */
+ENTRY(fiq_getregs)
+ SWITCH_TO_FIQ_MODE
+
+ stmia r0, {r8-r13}
+
+ BACK_TO_SVC_MODE
+ mov pc, lr
+
+/*
+ * fiq_setregs:
+ *
+ * Load the FIQ mode banked registers from the fiqhandler
+ * structure.
+ */
+ENTRY(fiq_setregs)
+ SWITCH_TO_FIQ_MODE
+
+ ldmia r0, {r8-r13}
+
+ BACK_TO_SVC_MODE
+ mov pc, lr
+
+/*
+ * fiq_nullhandler:
+ *
+ * Null handler copied down to the FIQ vector when the last
+ * FIQ handler is removed.
+ */
+ .global _C_LABEL(fiq_nullhandler), _C_LABEL(fiq_nullhandler_end)
+_C_LABEL(fiq_nullhandler):
+ subs pc, lr, #4
+_C_LABEL(fiq_nullhandler_end):
diff --git a/sys/arm/arm/fusu.S b/sys/arm/arm/fusu.S
new file mode 100644
index 0000000..e610b55
--- /dev/null
+++ b/sys/arm/arm/fusu.S
@@ -0,0 +1,403 @@
+/* $NetBSD: fusu.S,v 1.10 2003/12/01 13:34:44 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 1996-1998 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#include <machine/asmacros.h>
+#include <machine/armreg.h>
+#include "assym.s"
+__FBSDID("$FreeBSD$");
+
+#ifdef MULTIPROCESSOR
+.Lcpu_info:
+ .word _C_LABEL(cpu_info)
+#else
+.Lcurpcb:
+ .word _C_LABEL(__pcpu) + PC_CURPCB
+#endif
+
+/*
+ * fuword(caddr_t uaddr);
+ * Fetch an int from the user's address space.
+ */
+
+ENTRY(fuword)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r1, .Lfusufault
+ str r1, [r2, #PCB_ONFAULT]
+
+ ldrt r3, [r0]
+
+ mov r1, #0x00000000
+ str r1, [r2, #PCB_ONFAULT]
+ mov r0, r3
+ mov pc, lr
+
+ENTRY(fuword32)
+ bl _C_LABEL(fuword)
+/*
+ * fusword(caddr_t uaddr);
+ * Fetch a short from the user's address space.
+ */
+
+ENTRY(fusword)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r1, .Lfusufault
+ str r1, [r2, #PCB_ONFAULT]
+
+ ldrbt r3, [r0], #1
+ ldrbt ip, [r0]
+#ifdef __ARMEB__
+ orr r0, ip, r3, asl #8
+#else
+ orr r0, r3, ip, asl #8
+#endif
+ mov r1, #0x00000000
+ str r1, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * fuswintr(caddr_t uaddr);
+ * Fetch a short from the user's address space. Can be called during an
+ * interrupt.
+ */
+
+ENTRY(fuswintr)
+ ldr r2, Lblock_userspace_access
+ ldr r2, [r2]
+ teq r2, #0
+ mvnne r0, #0x00000000
+ movne pc, lr
+
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r1, _C_LABEL(fusubailout)
+ str r1, [r2, #PCB_ONFAULT]
+
+ ldrbt r3, [r0], #1
+ ldrbt ip, [r0]
+#ifdef __ARMEB__
+ orr r0, ip, r3, asl #8
+#else
+ orr r0, r3, ip, asl #8
+#endif
+
+ mov r1, #0x00000000
+ str r1, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+Lblock_userspace_access:
+ .word _C_LABEL(block_userspace_access)
+
+ .data
+ .align 0
+ .global _C_LABEL(block_userspace_access)
+_C_LABEL(block_userspace_access):
+ .word 0
+ .text
+
+/*
+ * fubyte(caddr_t uaddr);
+ * Fetch a byte from the user's address space.
+ */
+
+ENTRY(fubyte)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r1, .Lfusufault
+ str r1, [r2, #PCB_ONFAULT]
+
+ ldrbt r3, [r0]
+
+ mov r1, #0x00000000
+ str r1, [r2, #PCB_ONFAULT]
+ mov r0, r3
+ mov pc, lr
+
+/*
+ * Handle faults from [fs]u*(). Clean up and return -1.
+ */
+
+.Lfusufault:
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mvn r0, #0x00000000
+ mov pc, lr
+
+/*
+ * Handle faults from [fs]u*(). Clean up and return -1. This differs from
+ * fusufault() in that trap() will recognise it and return immediately rather
+ * than trying to page fault.
+ */
+
+/* label must be global as fault.c references it */
+ .global _C_LABEL(fusubailout)
+_C_LABEL(fusubailout):
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mvn r0, #0x00000000
+ mov pc, lr
+
+#ifdef DIAGNOSTIC
+/*
+ * Handle earlier faults from [fs]u*(), due to no pcb
+ */
+
+.Lfusupcbfault:
+ mov r1, r0
+ adr r0, fusupcbfaulttext
+ b _C_LABEL(panic)
+
+fusupcbfaulttext:
+ .asciz "Yikes - no valid PCB during fusuxxx() addr=%08x\n"
+ .align 0
+#endif
+
+/*
+ * suword(caddr_t uaddr, int x);
+ * Store an int in the user's address space.
+ */
+
+ENTRY(suword)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r3, .Lfusufault
+ str r3, [r2, #PCB_ONFAULT]
+
+ strt r1, [r0]
+
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+ENTRY(suword32)
+ adr pc, _C_LABEL(suword)
+/*
+ * suswintr(caddr_t uaddr, short x);
+ * Store a short in the user's address space. Can be called during an
+ * interrupt.
+ */
+
+ENTRY(suswintr)
+ ldr r2, Lblock_userspace_access
+ ldr r2, [r2]
+ teq r2, #0
+ mvnne r0, #0x00000000
+ movne pc, lr
+
+#ifdef MULTIPROCESSOR
+ stmfd sp!, {r0, r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r3, _C_LABEL(fusubailout)
+ str r3, [r2, #PCB_ONFAULT]
+
+#ifdef __ARMEB__
+ mov ip, r1, lsr #8
+ strbt ip, [r0], #1
+#else
+ strbt r1, [r0], #1
+ mov r1, r1, lsr #8
+#endif
+ strbt r1, [r0]
+
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * susword(caddr_t uaddr, short x);
+ * Store a short in the user's address space.
+ */
+
+ENTRY(susword)
+#ifdef MULTIPROCESSOR
+ stmfd sp!, {r0, r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r3, .Lfusufault
+ str r3, [r2, #PCB_ONFAULT]
+
+#ifdef __ARMEB__
+ mov ip, r1, lsr #8
+ strbt ip, [r0], #1
+#else
+ strbt r1, [r0], #1
+ mov r1, r1, lsr #8
+#endif
+ strbt r1, [r0]
+
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * subyte(caddr_t uaddr, char x);
+ * Store a byte in the user's address space.
+ */
+
+ENTRY(subyte)
+#ifdef MULTIPROCESSOR
+ stmfd sp!, {r0, r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r3, .Lfusufault
+ str r3, [r2, #PCB_ONFAULT]
+
+ strbt r1, [r0]
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mov pc, lr
diff --git a/sys/arm/arm/genassym.c b/sys/arm/arm/genassym.c
new file mode 100644
index 0000000..4717d89
--- /dev/null
+++ b/sys/arm/arm/genassym.c
@@ -0,0 +1,112 @@
+/*-
+ * Copyright (c) 2004 Olivier Houchard
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/assym.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/mbuf.h>
+#include <sys/vmmeter.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/vmparam.h>
+#include <machine/armreg.h>
+#include <machine/pcb.h>
+#include <machine/cpu.h>
+#include <machine/proc.h>
+#include <machine/cpufunc.h>
+#include <machine/pte.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/ip_var.h>
+
+ASSYM(KERNBASE, KERNBASE);
+ASSYM(PCB_NOALIGNFLT, PCB_NOALIGNFLT);
+ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
+ASSYM(PCB_DACR, offsetof(struct pcb, pcb_dacr));
+ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
+ASSYM(PCB_CSTATE, offsetof(struct pcb, pcb_cstate));
+ASSYM(PCB_UND_SP, offsetof(struct pcb, un_32.pcb32_und_sp));
+ASSYM(PCB_PAGEDIR, offsetof(struct pcb, pcb_pagedir));
+ASSYM(PCB_L1VEC, offsetof(struct pcb, pcb_l1vec));
+ASSYM(PCB_PL1VEC, offsetof(struct pcb, pcb_pl1vec));
+ASSYM(PCB_R8, offsetof(struct pcb, un_32.pcb32_r8));
+ASSYM(PCB_R9, offsetof(struct pcb, un_32.pcb32_r9));
+ASSYM(PCB_R10, offsetof(struct pcb, un_32.pcb32_r10));
+ASSYM(PCB_R11, offsetof(struct pcb, un_32.pcb32_r11));
+ASSYM(PCB_R12, offsetof(struct pcb, un_32.pcb32_r12));
+ASSYM(PCB_PC, offsetof(struct pcb, un_32.pcb32_pc));
+ASSYM(PCB_SP, offsetof(struct pcb, un_32.pcb32_sp));
+
+ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
+ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
+ASSYM(M_LEN, offsetof(struct mbuf, m_len));
+ASSYM(M_DATA, offsetof(struct mbuf, m_data));
+ASSYM(M_NEXT, offsetof(struct mbuf, m_next));
+ASSYM(IP_SRC, offsetof(struct ip, ip_src));
+ASSYM(IP_DST, offsetof(struct ip, ip_dst));
+ASSYM(CF_SETTTB, offsetof(struct cpu_functions, cf_setttb));
+ASSYM(CF_CONTROL, offsetof(struct cpu_functions, cf_control));
+ASSYM(CF_CONTEXT_SWITCH, offsetof(struct cpu_functions, cf_context_switch));
+ASSYM(CF_DCACHE_WB_RANGE, offsetof(struct cpu_functions, cf_dcache_wb_range));
+ASSYM(CF_IDCACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_idcache_wbinv_all));
+ASSYM(CF_TLB_FLUSHID_SE, offsetof(struct cpu_functions, cf_tlb_flushID_SE));
+
+ASSYM(CS_ALL, offsetof(union pmap_cache_state, cs_all));
+ASSYM(CS_CACHE_ID, offsetof(union pmap_cache_state, cs_cache_id));
+ASSYM(CS_TLB_ID, offsetof(union pmap_cache_state, cs_tlb_id));
+ASSYM(CS_CACHE_D, offsetof(union pmap_cache_state, cs_cache_d));
+ASSYM(PMAP_CSTATE, offsetof(struct pmap, pm_cstate));
+ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
+ASSYM(V_SOFT, offsetof(struct vmmeter, v_soft));
+ASSYM(V_INTR, offsetof(struct vmmeter, v_intr));
+
+ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
+ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
+ASSYM(TD_PROC, offsetof(struct thread, td_proc));
+ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
+
+ASSYM(TF_R0, offsetof(struct trapframe, tf_r0));
+ASSYM(TF_R1, offsetof(struct trapframe, tf_r1));
+ASSYM(TF_PC, offsetof(struct trapframe, tf_pc));
+ASSYM(P_UAREA, offsetof(struct proc, p_uarea));
+ASSYM(P_PID, offsetof(struct proc, p_pid));
+ASSYM(P_FLAG, offsetof(struct proc, p_flag));
+
+ASSYM(PDESIZE, PDESIZE);
+ASSYM(PMAP_DOMAIN_KERNEL, PMAP_DOMAIN_KERNEL);
+ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
+ASSYM(USER_SIZE, sizeof(struct user));
+ASSYM(P_TRACED, P_TRACED);
+ASSYM(P_SIGEVENT, P_SIGEVENT);
+ASSYM(P_PROFIL, P_PROFIL);
+ASSYM(TRAPFRAMESIZE, sizeof(struct trapframe));
diff --git a/sys/arm/arm/identcpu.c b/sys/arm/arm/identcpu.c
new file mode 100644
index 0000000..22a5d7f
--- /dev/null
+++ b/sys/arm/arm/identcpu.c
@@ -0,0 +1,366 @@
+/* $NetBSD: cpu.c,v 1.55 2004/02/13 11:36:10 wiz Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpu.c
+ *
+ * Probing and configuration for the master CPU
+ *
+ * Created : 10/10/95
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/systm.h>
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/time.h>
+#include <sys/proc.h>
+#include <sys/conf.h>
+#include <machine/cpu.h>
+
+#include <machine/cpuconf.h>
+
+char machine[] = "arm";
+
+enum cpu_class {
+ CPU_CLASS_NONE,
+ CPU_CLASS_ARM2,
+ CPU_CLASS_ARM2AS,
+ CPU_CLASS_ARM3,
+ CPU_CLASS_ARM6,
+ CPU_CLASS_ARM7,
+ CPU_CLASS_ARM7TDMI,
+ CPU_CLASS_ARM8,
+ CPU_CLASS_ARM9TDMI,
+ CPU_CLASS_ARM9ES,
+ CPU_CLASS_ARM10E,
+ CPU_CLASS_SA1,
+ CPU_CLASS_XSCALE
+};
+
+static const char * const generic_steppings[16] = {
+ "rev 0", "rev 1", "rev 2", "rev 3",
+ "rev 4", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const sa110_steppings[16] = {
+ "rev 0", "step J", "step K", "step S",
+ "step T", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const sa1100_steppings[16] = {
+ "rev 0", "step B", "step C", "rev 3",
+ "rev 4", "rev 5", "rev 6", "rev 7",
+ "step D", "step E", "rev 10" "step G",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const sa1110_steppings[16] = {
+ "step A-0", "rev 1", "rev 2", "rev 3",
+ "step B-0", "step B-1", "step B-2", "step B-3",
+ "step B-4", "step B-5", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const ixp12x0_steppings[16] = {
+ "(IXP1200 step A)", "(IXP1200 step B)",
+ "rev 2", "(IXP1200 step C)",
+ "(IXP1200 step D)", "(IXP1240/1250 step A)",
+ "(IXP1240 step B)", "(IXP1250 step B)",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const xscale_steppings[16] = {
+ "step A-0", "step A-1", "step B-0", "step C-0",
+ "step D-0", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const i80321_steppings[16] = {
+ "step A-0", "step B-0", "rev 2", "rev 3",
+ "rev 4", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const pxa2x0_steppings[16] = {
+ "step A-0", "step A-1", "step B-0", "step B-1",
+ "step B-2", "step C-0", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const ixp425_steppings[16] = {
+ "step 0", "rev 1", "rev 2", "rev 3",
+ "rev 4", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+struct cpuidtab {
+ u_int32_t cpuid;
+ enum cpu_class cpu_class;
+ const char *cpu_name;
+ const char * const *cpu_steppings;
+};
+
+const struct cpuidtab cpuids[] = {
+ { CPU_ID_ARM2, CPU_CLASS_ARM2, "ARM2",
+ generic_steppings },
+ { CPU_ID_ARM250, CPU_CLASS_ARM2AS, "ARM250",
+ generic_steppings },
+
+ { CPU_ID_ARM3, CPU_CLASS_ARM3, "ARM3",
+ generic_steppings },
+
+ { CPU_ID_ARM600, CPU_CLASS_ARM6, "ARM600",
+ generic_steppings },
+ { CPU_ID_ARM610, CPU_CLASS_ARM6, "ARM610",
+ generic_steppings },
+ { CPU_ID_ARM620, CPU_CLASS_ARM6, "ARM620",
+ generic_steppings },
+
+ { CPU_ID_ARM700, CPU_CLASS_ARM7, "ARM700",
+ generic_steppings },
+ { CPU_ID_ARM710, CPU_CLASS_ARM7, "ARM710",
+ generic_steppings },
+ { CPU_ID_ARM7500, CPU_CLASS_ARM7, "ARM7500",
+ generic_steppings },
+ { CPU_ID_ARM710A, CPU_CLASS_ARM7, "ARM710a",
+ generic_steppings },
+ { CPU_ID_ARM7500FE, CPU_CLASS_ARM7, "ARM7500FE",
+ generic_steppings },
+ { CPU_ID_ARM710T, CPU_CLASS_ARM7TDMI, "ARM710T",
+ generic_steppings },
+ { CPU_ID_ARM720T, CPU_CLASS_ARM7TDMI, "ARM720T",
+ generic_steppings },
+ { CPU_ID_ARM740T8K, CPU_CLASS_ARM7TDMI, "ARM740T (8 KB cache)",
+ generic_steppings },
+ { CPU_ID_ARM740T4K, CPU_CLASS_ARM7TDMI, "ARM740T (4 KB cache)",
+ generic_steppings },
+
+ { CPU_ID_ARM810, CPU_CLASS_ARM8, "ARM810",
+ generic_steppings },
+
+ { CPU_ID_ARM920T, CPU_CLASS_ARM9TDMI, "ARM920T",
+ generic_steppings },
+ { CPU_ID_ARM922T, CPU_CLASS_ARM9TDMI, "ARM922T",
+ generic_steppings },
+ { CPU_ID_ARM940T, CPU_CLASS_ARM9TDMI, "ARM940T",
+ generic_steppings },
+ { CPU_ID_ARM946ES, CPU_CLASS_ARM9ES, "ARM946E-S",
+ generic_steppings },
+ { CPU_ID_ARM966ES, CPU_CLASS_ARM9ES, "ARM966E-S",
+ generic_steppings },
+ { CPU_ID_ARM966ESR1, CPU_CLASS_ARM9ES, "ARM966E-S",
+ generic_steppings },
+ { CPU_ID_TI925T, CPU_CLASS_ARM9TDMI, "TI ARM925T",
+ generic_steppings },
+
+ { CPU_ID_ARM1020E, CPU_CLASS_ARM10E, "ARM1020E",
+ generic_steppings },
+ { CPU_ID_ARM1022ES, CPU_CLASS_ARM10E, "ARM1022E-S",
+ generic_steppings },
+
+ { CPU_ID_SA110, CPU_CLASS_SA1, "SA-110",
+ sa110_steppings },
+ { CPU_ID_SA1100, CPU_CLASS_SA1, "SA-1100",
+ sa1100_steppings },
+ { CPU_ID_SA1110, CPU_CLASS_SA1, "SA-1110",
+ sa1110_steppings },
+
+ { CPU_ID_IXP1200, CPU_CLASS_SA1, "IXP1200",
+ ixp12x0_steppings },
+
+ { CPU_ID_80200, CPU_CLASS_XSCALE, "i80200",
+ xscale_steppings },
+
+ { CPU_ID_80321_400, CPU_CLASS_XSCALE, "i80321 400MHz",
+ i80321_steppings },
+ { CPU_ID_80321_600, CPU_CLASS_XSCALE, "i80321 600MHz",
+ i80321_steppings },
+ { CPU_ID_80321_400_B0, CPU_CLASS_XSCALE, "i80321 400MHz",
+ i80321_steppings },
+ { CPU_ID_80321_600_B0, CPU_CLASS_XSCALE, "i80321 600MHz",
+ i80321_steppings },
+
+ { CPU_ID_PXA250A, CPU_CLASS_XSCALE, "PXA250",
+ pxa2x0_steppings },
+ { CPU_ID_PXA210A, CPU_CLASS_XSCALE, "PXA210",
+ pxa2x0_steppings },
+ { CPU_ID_PXA250B, CPU_CLASS_XSCALE, "PXA250",
+ pxa2x0_steppings },
+ { CPU_ID_PXA210B, CPU_CLASS_XSCALE, "PXA210",
+ pxa2x0_steppings },
+ { CPU_ID_PXA250C, CPU_CLASS_XSCALE, "PXA250",
+ pxa2x0_steppings },
+ { CPU_ID_PXA210C, CPU_CLASS_XSCALE, "PXA210",
+ pxa2x0_steppings },
+
+ { CPU_ID_IXP425_533, CPU_CLASS_XSCALE, "IXP425 533MHz",
+ ixp425_steppings },
+ { CPU_ID_IXP425_400, CPU_CLASS_XSCALE, "IXP425 400MHz",
+ ixp425_steppings },
+ { CPU_ID_IXP425_266, CPU_CLASS_XSCALE, "IXP425 266MHz",
+ ixp425_steppings },
+
+ { 0, CPU_CLASS_NONE, NULL, NULL }
+};
+
+struct cpu_classtab {
+ const char *class_name;
+ const char *class_option;
+};
+
+const struct cpu_classtab cpu_classes[] = {
+ { "unknown", NULL }, /* CPU_CLASS_NONE */
+ { "ARM2", "CPU_ARM2" }, /* CPU_CLASS_ARM2 */
+ { "ARM2as", "CPU_ARM250" }, /* CPU_CLASS_ARM2AS */
+ { "ARM3", "CPU_ARM3" }, /* CPU_CLASS_ARM3 */
+ { "ARM6", "CPU_ARM6" }, /* CPU_CLASS_ARM6 */
+ { "ARM7", "CPU_ARM7" }, /* CPU_CLASS_ARM7 */
+ { "ARM7TDMI", "CPU_ARM7TDMI" }, /* CPU_CLASS_ARM7TDMI */
+ { "ARM8", "CPU_ARM8" }, /* CPU_CLASS_ARM8 */
+ { "ARM9TDMI", NULL }, /* CPU_CLASS_ARM9TDMI */
+ { "ARM9E-S", NULL }, /* CPU_CLASS_ARM9ES */
+ { "ARM10E", "CPU_ARM10" }, /* CPU_CLASS_ARM10E */
+ { "SA-1", "CPU_SA110" }, /* CPU_CLASS_SA1 */
+ { "XScale", "CPU_XSCALE_..." }, /* CPU_CLASS_XSCALE */
+};
+
+/*
+ * Report the type of the specified arm processor. This uses the generic and
+ * arm specific information in the cpu structure to identify the processor.
+ * The remaining fields in the cpu structure are filled in appropriately.
+ */
+
+#if 0
+static const char * const wtnames[] = {
+ "write-through",
+ "write-back",
+ "write-back",
+ "**unknown 3**",
+ "**unknown 4**",
+ "write-back-locking", /* XXX XScale-specific? */
+ "write-back-locking-A",
+ "write-back-locking-B",
+ "**unknown 8**",
+ "**unknown 9**",
+ "**unknown 10**",
+ "**unknown 11**",
+ "**unknown 12**",
+ "**unknown 13**",
+ "**unknown 14**",
+ "**unknown 15**",
+};
+#endif
+
+extern int ctrl;
+void
+identify_arm_cpu(void)
+{
+ u_int cpuid;
+ enum cpu_class cpu_class = CPU_CLASS_NONE;
+ int i;
+
+ cpuid = cpu_id();
+
+ if (cpuid == 0) {
+ printf("Processor failed probe - no CPU ID\n");
+ return;
+ }
+
+ for (i = 0; cpuids[i].cpuid != 0; i++)
+ if (cpuids[i].cpuid == (cpuid & CPU_ID_CPU_MASK)) {
+ cpu_class = cpuids[i].cpu_class;
+ printf("%s %s (%s core)\n",
+ cpuids[i].cpu_name,
+ cpuids[i].cpu_steppings[cpuid &
+ CPU_ID_REVISION_MASK],
+ cpu_classes[cpu_class].class_name);
+ break;
+ }
+ if (cpuids[i].cpuid == 0)
+ printf("unknown CPU (ID = 0x%x)\n", cpuid);
+
+ switch (cpu_class) {
+ case CPU_CLASS_ARM6:
+ case CPU_CLASS_ARM7:
+ case CPU_CLASS_ARM7TDMI:
+ case CPU_CLASS_ARM8:
+ if ((ctrl & CPU_CONTROL_IDC_ENABLE) == 0)
+ printf(" IDC disabled");
+ else
+ printf(" IDC enabled");
+ break;
+ case CPU_CLASS_ARM9TDMI:
+ case CPU_CLASS_ARM10E:
+ case CPU_CLASS_SA1:
+ case CPU_CLASS_XSCALE:
+ if ((ctrl & CPU_CONTROL_DC_ENABLE) == 0)
+ printf(" DC disabled");
+ else
+ printf(" DC enabled");
+ if ((ctrl & CPU_CONTROL_IC_ENABLE) == 0)
+ printf(" IC disabled");
+ else
+ printf(" IC enabled");
+ break;
+ default:
+ break;
+ }
+ if ((ctrl & CPU_CONTROL_WBUF_ENABLE) == 0)
+ printf(" WB disabled");
+ else
+ printf(" WB enabled");
+
+ if (ctrl & CPU_CONTROL_LABT_ENABLE)
+ printf(" LABT");
+ else
+ printf(" EABT");
+
+ if (ctrl & CPU_CONTROL_BPRD_ENABLE)
+ printf(" branch prediction enabled");
+
+ printf("\n");
+}
+
diff --git a/sys/arm/arm/in_cksum.c b/sys/arm/arm/in_cksum.c
new file mode 100644
index 0000000..13f252c
--- /dev/null
+++ b/sys/arm/arm/in_cksum.c
@@ -0,0 +1,250 @@
+/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1996
+ * Matt Thomas <matt@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ * (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+
+static const u_int32_t in_masks[] = {
+#if 0
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+#else
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0xFF000000, 0xFFFF0000, 0xFFFFFF00, /* offset 0 */
+ 0x00000000, 0x00FF0000, 0x00FFFF00, 0x00FFFFFF, /* offset 1 */
+ 0x00000000, 0x0000FF00, 0x0000FFFF, 0x0000FFFF, /* offset 2 */
+ 0x00000000, 0x000000FF, 0x000000FF, 0x000000FF, /* offset 3 */
+#endif
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const void *buf, int len)
+{
+ const u_int32_t *lw = (const u_int32_t *) buf;
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (long) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (long) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((long) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+
+ if ((clen ^ (int) addr) & 1)
+ sum += in_cksumdata(addr, mlen) << 8;
+ else
+ sum += in_cksumdata(addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+ u_int64_t sum = in_cksumdata(ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+ REDUCE16;
+ return (~sum & 0xffff);
+}
diff --git a/sys/arm/arm/in_cksum_arm.S b/sys/arm/arm/in_cksum_arm.S
new file mode 100644
index 0000000..d0b6452
--- /dev/null
+++ b/sys/arm/arm/in_cksum_arm.S
@@ -0,0 +1,474 @@
+/* $NetBSD: in_cksum_arm.S,v 1.2 2003/09/23 10:01:36 scw Exp $ */
+
+/*
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Hand-optimised in_cksum() and in4_cksum() implementations for ARM/Xscale
+ */
+
+#include "opt_inet.h"
+
+#include <machine/asm.h>
+#include "assym.s"
+__FBSDID("$FreeBSD$");
+
+/*
+ * int in_cksum(struct mbuf *m, int len)
+ *
+ * Entry:
+ * r0 m
+ * r1 len
+ *
+ * NOTE: Assumes 'm' is *never* NULL.
+ */
+/* LINTSTUB: Func: int in_cksum(struct mbuf *, int) */
+ENTRY(in_cksum)
+ stmfd sp!, {r4-r11,lr}
+ mov r8, #0x00
+ mov r9, r1
+ mov r10, #0x00
+ mov ip, r0
+
+.Lin_cksum_loop:
+ ldr r1, [ip, #(M_LEN)]
+ ldr r0, [ip, #(M_DATA)]
+ ldr ip, [ip, #(M_NEXT)]
+.Lin_cksum_entry4:
+ cmp r9, r1
+ movlt r1, r9
+ sub r9, r9, r1
+ eor r11, r10, r0
+ add r10, r10, r1
+ adds r2, r1, #0x00
+ blne _ASM_LABEL(L_cksumdata)
+ tst r11, #0x01
+ movne r2, r2, ror #8
+ adds r8, r8, r2
+ adc r8, r8, #0x00
+ cmp ip, #0x00
+ bne .Lin_cksum_loop
+
+ mov r1, #0xff
+ orr r1, r1, #0xff00
+ and r0, r8, r1
+ add r0, r0, r8, lsr #16
+ add r0, r0, r0, lsr #16
+ and r0, r0, r1
+ eor r0, r0, r1
+ ldmfd sp!, {r4-r11,pc}
+
+
+#ifdef INET
+/*
+ * int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len)
+ *
+ * Entry:
+ * r0 m
+ * r1 nxt
+ * r2 off
+ * r3 len
+ */
+/* LINTSTUB: Func: int in4_cksum(struct mbuf *, u_int8_t, int, int) */
+ENTRY(in4_cksum)
+ stmfd sp!, {r4-r11,lr}
+ mov r8, #0x00 /* Accumulate sum in r8 */
+
+ /*
+ * First, deal with a pseudo header, if present
+ */
+ ldr r6, [r0, #(M_DATA)]
+ cmp r1, #0x00
+ beq .Lin4_cksum_skip_entry
+
+#ifdef __XSCALE__
+ pld [r6, #(IP_SRC)]
+#endif
+ add r4, r6, #(IP_SRC)
+ ands r4, r4, #0x03
+ add r8, r1, r3 /* sum = nxt + len */
+ addne pc, pc, r4, lsl #5 /* Handle alignment of pseudo header */
+ nop
+
+ /* 0x00: Data 32-bit aligned */
+ ldr r5, [r6, #(IP_SRC)]
+ ldr r4, [r6, #(IP_DST)]
+ b .Lin4_cksum_add_ips
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ /* 0x01: Data 8-bit aligned */
+ ldr r4, [r6, #(IP_SRC - 1)] /* BE:r4 = x012 LE:r4 = 210x */
+ ldr r5, [r6, #(IP_SRC + 3)] /* BE:r5 = 3456 LE:r5 = 6543 */
+ ldrb r7, [r6, #(IP_SRC + 7)] /* r7 = ...7 */
+#ifdef __ARMEB__
+ mov r4, r4, lsl #8 /* r4 = 012. */
+ orr r4, r4, r5, lsr #24 /* r4 = 0123 */
+ orr r5, r7, r5, lsl #8 /* r5 = 4567 */
+ b .Lin4_cksum_add_ips
+ nop
+#else
+ mov r4, r4, lsr #8 /* r4 = .210 */
+ orr r4, r4, r5, lsl #24 /* r4 = 3210 */
+ mov r5, r5, lsr #8 /* r5 = .654 */
+ orr r5, r5, r7, lsl #24 /* r5 = 7654 */
+ b .Lin4_cksum_add_ips
+#endif
+
+ /* 0x02: Data 16-bit aligned */
+#ifdef __XSCALE__
+ ldrh r5, [r6, #(IP_SRC)] /* BE:r5 = ..01 LE:r5 = ..10 */
+ ldrh r7, [r6, #(IP_DST + 2)] /* BE:r7 = ..67 LE:r7 = ..76 */
+ ldr r4, [r6, #(IP_SRC + 2)] /* BE:r4 = 2345 LE:r4 = 5432 */
+ orr r5, r7, r5, lsl #16 /* BE:r5 = 0167 LE:r5 = 1076 */
+ b .Lin4_cksum_add_ips
+ nop
+ nop
+ nop
+#else
+ ldr r4, [r6, #(IP_SRC - 2)] /* r4 = 10xx */
+ ldr r7, [r6, #(IP_DST - 2)] /* r7 = xx76 */
+ ldr r5, [r6, #(IP_SRC + 2)] /* r5 = 5432 */
+ mov r4, r4, lsr #16 /* r4 = ..10 */
+ orr r4, r4, r7, lsl #16 /* r4 = 7610 */
+ b .Lin4_cksum_add_ips
+ nop
+ nop
+#endif
+
+ /* 0x03: Data 8-bit aligned */
+ ldrb r4, [r6, #(IP_SRC)] /* r4 = ...0 */
+ ldr r5, [r6, #(IP_SRC + 1)] /* BE:r5 = 1234 LE:r5 = 4321 */
+ ldr r7, [r6, #(IP_SRC + 5)] /* BE:r7 = 567x LE:r7 = x765 */
+#ifdef __ARMEB__
+ mov r4, r4, lsl #24 /* r4 = 0... */
+ orr r4, r4, r5, lsr #8 /* r4 = 0123 */
+ mov r5, r5, lsl #24 /* r5 = 4... */
+ orr r5, r5, r7, lsr #8 /* r5 = 4567 */
+#else
+ orr r4, r4, r5, lsl #8 /* r4 = 3210 */
+ mov r5, r5, lsr #24 /* r4 = ...4 */
+ orr r5, r5, r7, lsl #8 /* r5 = 7654 */
+#endif
+ /* FALLTHROUGH */
+
+.Lin4_cksum_add_ips:
+ adds r5, r5, r4
+#ifndef __ARMEB__
+ adcs r8, r5, r8, lsl #8
+#else
+ adcs r8, r5, r8
+#endif
+ adc r8, r8, #0x00
+ mov r1, #0x00
+ b .Lin4_cksum_skip_entry
+
+.Lin4_cksum_skip_loop:
+ ldr r1, [r0, #(M_LEN)]
+ ldr r6, [r0, #(M_DATA)]
+ ldr r0, [r0, #(M_NEXT)]
+.Lin4_cksum_skip_entry:
+ subs r2, r2, r1
+ blt .Lin4_cksum_skip_done
+ cmp r0, #0x00
+ bne .Lin4_cksum_skip_loop
+ b .Lin4_cksum_whoops
+
+.Lin4_cksum_skip_done:
+ mov ip, r0
+ add r0, r2, r6
+ add r0, r0, r1
+ rsb r1, r2, #0x00
+ mov r9, r3
+ mov r10, #0x00
+ b .Lin_cksum_entry4
+
+.Lin4_cksum_whoops:
+ adr r0, .Lin4_cksum_whoops_str
+ adr r1, .LFile
+ mov r2, #__LINE__
+ bl _C_LABEL(__panic)
+.LFile:
+ .asciz __FILE__
+.Lin4_cksum_whoops_str:
+ .asciz "in4_cksum: out of mbufs\n"
+ .align 5
+#endif /* INET */
+
+
+/*
+ * The main in*_cksum() workhorse...
+ *
+ * Entry parameters:
+ * r0 Pointer to buffer
+ * r1 Buffer length
+ * lr Return address
+ *
+ * Returns:
+ * r2 Accumulated 32-bit sum
+ *
+ * Clobbers:
+ * r0-r7
+ */
+/* LINTSTUB: Ignore */
+ASENTRY_NP(L_cksumdata)
+#ifdef __XSCALE__
+ pld [r0] /* Pre-fetch the start of the buffer */
+#endif
+ mov r2, #0
+
+ /* We first have to word-align the buffer. */
+ ands r7, r0, #0x03
+ beq .Lcksumdata_wordaligned
+ rsb r7, r7, #0x04
+ cmp r1, r7 /* Enough bytes left to make it? */
+ blt .Lcksumdata_endgame
+ cmp r7, #0x02
+ ldrb r4, [r0], #0x01 /* Fetch 1st byte */
+ ldrgeb r5, [r0], #0x01 /* Fetch 2nd byte */
+ movlt r5, #0x00
+ ldrgtb r6, [r0], #0x01 /* Fetch 3rd byte */
+ movle r6, #0x00
+ /* Combine the three bytes depending on endianness and alignment */
+#ifdef __ARMEB__
+ orreq r2, r5, r4, lsl #8
+ orreq r2, r2, r6, lsl #24
+ orrne r2, r4, r5, lsl #8
+ orrne r2, r2, r6, lsl #16
+#else
+ orreq r2, r4, r5, lsl #8
+ orreq r2, r2, r6, lsl #16
+ orrne r2, r5, r4, lsl #8
+ orrne r2, r2, r6, lsl #24
+#endif
+ subs r1, r1, r7 /* Update length */
+ moveq pc, lr /* All done? */
+
+ /* Buffer is now word aligned */
+.Lcksumdata_wordaligned:
+#ifdef __XSCALE__
+ cmp r1, #0x04 /* Less than 4 bytes left? */
+ blt .Lcksumdata_endgame /* Yup */
+
+ /* Now quad-align, if necessary */
+ ands r7, r0, #0x04
+ ldrne r7, [r0], #0x04
+ subne r1, r1, #0x04
+ subs r1, r1, #0x40
+ blt .Lcksumdata_bigloop_end /* Note: C flag clear if branch taken */
+
+ /*
+ * Buffer is now quad aligned. Sum 64 bytes at a time.
+ * Note: First ldrd is hoisted above the loop, together with
+ * setting r6 to zero to avoid stalling for results in the
+ * loop. (r7 is live, from above).
+ */
+ ldrd r4, [r0], #0x08
+ mov r6, #0x00
+.Lcksumdata_bigloop:
+ pld [r0, #0x18]
+ adds r2, r2, r6
+ adcs r2, r2, r7
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldrd r4, [r0], #0x08
+ adcs r2, r2, r6
+ adcs r2, r2, r7
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldrd r4, [r0], #0x08
+ adcs r2, r2, r6
+ adcs r2, r2, r7
+ pld [r0, #0x18]
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldrd r4, [r0], #0x08
+ adcs r2, r2, r6
+ adcs r2, r2, r7
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adc r2, r2, #0x00
+ subs r1, r1, #0x40
+ ldrged r4, [r0], #0x08
+ bge .Lcksumdata_bigloop
+
+ adds r2, r2, r6 /* r6/r7 still need summing */
+.Lcksumdata_bigloop_end:
+ adcs r2, r2, r7
+ adc r2, r2, #0x00
+
+#else /* !__XSCALE__ */
+
+ subs r1, r1, #0x40
+ blt .Lcksumdata_bigloop_end
+
+.Lcksumdata_bigloop:
+ ldmia r0!, {r3, r4, r5, r6}
+ adds r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldmia r0!, {r3, r4, r5, r7}
+ adcs r2, r2, r6
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldmia r0!, {r3, r4, r5, r6}
+ adcs r2, r2, r7
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldmia r0!, {r3, r4, r5, r7}
+ adcs r2, r2, r6
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r7
+ adc r2, r2, #0x00
+ subs r1, r1, #0x40
+ bge .Lcksumdata_bigloop
+.Lcksumdata_bigloop_end:
+#endif
+
+ adds r1, r1, #0x40
+ moveq pc, lr
+ cmp r1, #0x20
+
+#ifdef __XSCALE__
+ ldrged r4, [r0], #0x08 /* Avoid stalling pld and result */
+ blt .Lcksumdata_less_than_32
+ pld [r0, #0x18]
+ ldrd r6, [r0], #0x08
+ adds r2, r2, r4
+ adcs r2, r2, r5
+ ldrd r4, [r0], #0x08
+ adcs r2, r2, r6
+ adcs r2, r2, r7
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r6 /* XXX: Unavoidable result stall */
+ adcs r2, r2, r7
+#else
+ blt .Lcksumdata_less_than_32
+ ldmia r0!, {r3, r4, r5, r6}
+ adds r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldmia r0!, {r3, r4, r5, r7}
+ adcs r2, r2, r6
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r7
+#endif
+ adc r2, r2, #0x00
+ subs r1, r1, #0x20
+ moveq pc, lr
+
+.Lcksumdata_less_than_32:
+ /* There are less than 32 bytes left */
+ and r3, r1, #0x18
+ rsb r4, r3, #0x18
+ sub r1, r1, r3
+ adds r4, r4, r4, lsr #1 /* Side effect: Clear carry flag */
+ addne pc, pc, r4
+ nop
+
+/*
+ * Note: We use ldm here, even on Xscale, since the combined issue/result
+ * latencies for ldm and ldrd are the same. Using ldm avoids needless #ifdefs.
+ */
+ /* At least 24 bytes remaining... */
+ ldmia r0!, {r4, r5}
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+
+ /* At least 16 bytes remaining... */
+ ldmia r0!, {r4, r5}
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+
+ /* At least 8 bytes remaining... */
+ ldmia r0!, {r4, r5}
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+
+ /* Less than 8 bytes remaining... */
+ adc r2, r2, #0x00
+ subs r1, r1, #0x04
+ blt .Lcksumdata_lessthan4
+
+ ldr r4, [r0], #0x04
+ sub r1, r1, #0x04
+ adds r2, r2, r4
+ adc r2, r2, #0x00
+
+ /* Deal with < 4 bytes remaining */
+.Lcksumdata_lessthan4:
+ adds r1, r1, #0x04
+ moveq pc, lr
+
+ /* Deal with 1 to 3 remaining bytes, possibly misaligned */
+.Lcksumdata_endgame:
+ ldrb r3, [r0] /* Fetch first byte */
+ cmp r1, #0x02
+ ldrgeb r4, [r0, #0x01] /* Fetch 2nd and 3rd as necessary */
+ movlt r4, #0x00
+ ldrgtb r5, [r0, #0x02]
+ movle r5, #0x00
+ /* Combine the three bytes depending on endianness and alignment */
+ tst r0, #0x01
+#ifdef __ARMEB__
+ orreq r3, r4, r3, lsl #8
+ orreq r3, r3, r5, lsl #24
+ orrne r3, r3, r4, lsl #8
+ orrne r3, r3, r5, lsl #16
+#else
+ orreq r3, r3, r4, lsl #8
+ orreq r3, r3, r5, lsl #16
+ orrne r3, r4, r3, lsl #8
+ orrne r3, r3, r5, lsl #24
+#endif
+ adds r2, r2, r3
+ adc r2, r2, #0x00
+ mov pc, lr
diff --git a/sys/arm/arm/intr.c b/sys/arm/arm/intr.c
new file mode 100644
index 0000000..282f43f
--- /dev/null
+++ b/sys/arm/arm/intr.c
@@ -0,0 +1,150 @@
+/* $NetBSD: intr.c,v 1.12 2003/07/15 00:24:41 lukem Exp $ */
+
+/*
+ * Copyright (c) 2004 Olivier Houchard.
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Soft interrupt and other generic interrupt functions.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/syslog.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/conf.h>
+#include <machine/atomic.h>
+#include <machine/intr.h>
+#include <machine/cpu.h>
+
+int current_spl_level = _SPL_SERIAL;
+
+u_int spl_masks[_SPL_LEVELS + 1];
+u_int spl_smasks[_SPL_LEVELS];
+extern u_int irqmasks[];
+
+#define NIRQ 0x20 /* XXX */
+struct ithd *ithreads[NIRQ];
+void
+set_splmasks()
+{
+ int loop;
+
+ for (loop = 0; loop < _SPL_LEVELS; ++loop) {
+ spl_masks[loop] = 0xffffffff;
+ spl_smasks[loop] = 1;
+ }
+
+ spl_masks[_SPL_NET] = irqmasks[IPL_NET];
+ spl_masks[_SPL_SOFTSERIAL] = irqmasks[IPL_TTY];
+ spl_masks[_SPL_TTY] = irqmasks[IPL_TTY];
+ spl_masks[_SPL_VM] = irqmasks[IPL_VM];
+ spl_masks[_SPL_AUDIO] = irqmasks[IPL_AUDIO];
+ spl_masks[_SPL_CLOCK] = irqmasks[IPL_CLOCK];
+#ifdef IPL_STATCLOCK
+ spl_masks[_SPL_STATCLOCK] = irqmasks[IPL_STATCLOCK];
+#else
+ spl_masks[_SPL_STATCLOCK] = irqmasks[IPL_CLOCK];
+#endif
+ spl_masks[_SPL_HIGH] = irqmasks[IPL_HIGH];
+ spl_masks[_SPL_SERIAL] = irqmasks[IPL_SERIAL];
+ spl_masks[_SPL_LEVELS] = 0;
+
+ spl_smasks[_SPL_0] = 0xffffffff;
+ for (loop = 0; loop < _SPL_SOFTSERIAL; ++loop)
+ spl_smasks[loop] |= SOFTIRQ_BIT(SOFTIRQ_SERIAL);
+ for (loop = 0; loop < _SPL_SOFTNET; ++loop)
+ spl_smasks[loop] |= SOFTIRQ_BIT(SOFTIRQ_NET);
+ for (loop = 0; loop < _SPL_SOFTCLOCK; ++loop)
+ spl_smasks[loop] |= SOFTIRQ_BIT(SOFTIRQ_CLOCK);
+}
+
+void arm_setup_irqhandler(const char *name, void (*hand)(void*), void *arg,
+ int irq, int flags, void **cookiep)
+{
+ struct ithd *cur_ith;
+ int error;
+
+ if (irq < 0 || irq >= NIRQ)
+ return;
+ cur_ith = ithreads[irq];
+ if (cur_ith == NULL) {
+ error = ithread_create(&cur_ith, irq, 0, NULL, NULL, "intr%d:",
+ irq);
+ if (error)
+ return;
+ ithreads[irq] = cur_ith;
+ }
+ ithread_add_handler(cur_ith, name, hand, arg, ithread_priority(flags),
+ flags, cookiep);
+}
+
+void dosoftints(void);
+void
+dosoftints(void)
+{
+}
+
+void
+arm_handler_execute(void *);
+void
+arm_handler_execute(void *irq)
+{
+ struct ithd *ithd;
+ int i;
+ int irqnb = (int)irq;
+ struct intrhand *ih;
+
+ for (i = 0; i < NIRQ; i++) {
+ if (1 << i & irqnb) {
+ ithd = ithreads[i];
+ if (!ithd) /* FUCK */
+ return;
+ ih = TAILQ_FIRST(&ithd->it_handlers);
+ if (ih && ih->ih_flags & IH_FAST) {
+ TAILQ_FOREACH(ih, &ithd->it_handlers,
+ ih_next) {
+ ih->ih_handler(ih->ih_argument);
+ /*
+ * XXX: what about the irq frame if
+ * the arg is NULL ?
+ */
+ }
+ } else if (ih) {
+ ithread_schedule(ithd, !cold);
+ }
+ }
+ }
+}
diff --git a/sys/arm/arm/irq_dispatch.S b/sys/arm/arm/irq_dispatch.S
new file mode 100644
index 0000000..8b4b325
--- /dev/null
+++ b/sys/arm/arm/irq_dispatch.S
@@ -0,0 +1,155 @@
+/* $NetBSD: irq_dispatch.S,v 1.5 2003/10/30 08:57:24 scw Exp $ */
+
+/*
+ * Copyright (c) 2002 Fujitsu Component Limited
+ * Copyright (c) 2002 Genetec Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of The Fujitsu Component Limited nor the name of
+ * Genetec corporation may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY FUJITSU COMPONENT LIMITED AND GENETEC
+ * CORPORATION ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL FUJITSU COMPONENT LIMITED OR GENETEC
+ * CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.s"
+#include <machine/asm.h>
+#include <machine/asmacros.h>
+#include <machine/armreg.h>
+__FBSDID("$FreeBSD$");
+#if 0
+#ifdef ARM_INTR_IMPL
+#include ARM_INTR_IMPL
+#else
+#error ARM_INTR_IMPL not defined
+#endif
+
+#ifndef ARM_IRQ_HANDLER
+#error ARM_IRQ_HANDLER not defined
+#endif
+#endif
+/*
+ * irq_entry:
+ * Main entry point for the IRQ vector. This is a generic version
+ * which can be used by different platforms.
+ */
+ .text
+ .align 0
+.Lcurrent_intr_depth:
+ .word _C_LABEL(current_intr_depth)
+
+AST_ALIGNMENT_FAULT_LOCALS
+
+ASENTRY_NP(irq_entry)
+ sub lr, lr, #0x00000004 /* Adjust the lr */
+
+
+ PUSHFRAMEINSVC /* Push an interrupt frame */
+ ENABLE_ALIGNMENT_FAULTS
+ ldr r1, .Laflt_curpcb
+
+ /*
+ * Increment the interrupt nesting depth and call the interrupt
+ * dispatch routine. We've pushed a frame, so we can safely use
+ * callee-saved regs here. We use the following registers, which
+ * we expect to presist:
+ *
+ * r5 address of `current_intr_depth' variable
+ * r6 old value of `current_intr_depth'
+ */
+ ldr r5, .Lcurrent_intr_depth
+ mov r0, sp /* arg for dispatcher */
+ ldr r6, [r5]
+ add r1, r6, #1
+ str r1, [r5]
+
+ #if 0
+ bl ARM_IRQ_HANDLER
+ #endif
+
+ /*
+ * Restore the old interrupt depth value (which should be the
+ * same as decrementing it at this point).
+ */
+ str r6, [r5]
+
+ DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ PULLFRAMEFROMSVCANDEXIT
+ movs pc, lr /* Exit */
+
+ .bss
+ .align 0
+
+ .global _C_LABEL(current_intr_depth)
+_C_LABEL(current_intr_depth):
+ .word 0
+
+ /*
+ * XXX Provide intrnames/intrcnt for legacy code, but
+ * don't actually use them.
+ */
+
+ .global _C_LABEL(intrnames), _C_LABEL(eintrnames)
+ .global _C_LABEL(intrcnt), _C_LABEL(eintrcnt)
+_C_LABEL(intrnames):
+_C_LABEL(eintrnames):
+
+ .global _C_LABEL(intrcnt), _C_LABEL(sintrcnt), _C_LABEL(eintrcnt)
+_C_LABEL(intrcnt):
+_C_LABEL(eintrcnt):
diff --git a/sys/arm/arm/locore.S b/sys/arm/arm/locore.S
new file mode 100644
index 0000000..ff569e8
--- /dev/null
+++ b/sys/arm/arm/locore.S
@@ -0,0 +1,312 @@
+/* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */
+
+/*
+ * Copyright (C) 1994-1997 Mark Brinicombe
+ * Copyright (C) 1994 Brini
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of Brini may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "assym.s"
+#include <machine/asm.h>
+#include <machine/armreg.h>
+#include <machine/pte.h>
+__FBSDID("$FreeBSD$");
+
+/* What size should this really be ? It is only used by init_arm() */
+#define INIT_ARM_STACK_SIZE 2048
+
+/*
+ * This is for kvm_mkdb, and should be the address of the beginning
+ * of the kernel text segment (not necessarily the same as kernbase).
+ */
+
+
+#define CPWAIT_BRANCH \
+ sub pc, pc, #4
+
+#define CPWAIT(tmp) \
+ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
+ mov tmp, tmp /* wait for it to complete */ ;\
+ CPWAIT_BRANCH /* branch to next insn */
+
+ .text
+ .align 0
+.globl kernbase
+.set kernbase,KERNBASE
+
+ENTRY_NP(btext)
+
+ASENTRY_NP(_start)
+ /* Check if we are running on RAM, if not move ourself to RAM */
+ cmp pc, #KERNPHYSADDR
+ bhi start_inram /* XXX: This is wrong */
+
+ /* move me to RAM
+ * XXX: we can use memcpy if it is PIC
+ */
+ ldr r1, Lcopy_size
+ adr r0, _C_LABEL(_start)
+ add r1, r1, #3
+ mov r1, r1, LSR #2
+ mov r2, #KERNPHYSADDR
+ add r2, r2, #0x00200000
+ mov r4, r2
+
+5: ldr r3,[r0],#4
+ str r3,[r2],#4
+ subs r1,r1,#1
+ bhi 5b
+
+ /* Jump to RAM */
+ ldr r0, Lstart_off
+ add pc, r4, r0
+
+Lcopy_size: .word _edata-_C_LABEL(_start)
+Lstart_off: .word start_inram-_C_LABEL(_start)
+start_inram:
+#ifdef STARTUP_PAGETABLE_ADDR
+ adr r4, mmu_init_table2
+
+ mrc p15, 0, r2, c1, c0, 0
+ tst r2, #CPU_CONTROL_MMU_ENABLE /* we already have a page table? */
+ bne 3f
+
+ /* build page table from scratch */
+ ldr r0, Lstartup_pagetable
+ adr r4, mmu_init_table
+ b 3f
+
+2:
+ str r3, [r0, r2]
+ add r2, r2, #4
+ add r3, r3, #(L1_S_SIZE)
+ adds r1, r1, #-1
+ bhi 2b
+3:
+ ldmia r4!, {r1,r2,r3} /* # of sections, PA|attr, VA */
+ cmp r1, #0
+ bne 2b
+
+ mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
+ mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
+
+ /* Set the Domain Access register. Very important! */
+ mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
+ mcr p15, 0, r0, c3, c0, 0
+
+ /* Enable MMU */
+ mrc p15, 0, r0, c1, c0, 0
+ orr r0, r0, #CPU_CONTROL_MMU_ENABLE
+ mcr p15, 0, r0, c1, c0, 0
+ CPWAIT(r0)
+
+ bl mmu_done
+
+mmu_done:
+#endif
+ adr r1, .Lstart
+ ldmia r1, {r1, r2, sp} /* Set initial stack and */
+ sub r2, r2, r1 /* get zero init data */
+ mov r3, #0
+
+.L1:
+ str r3, [r1], #0x0004 /* Zero the bss */
+ subs r2, r2, #4
+ bgt .L1
+
+ mov fp, #0xc0000000 /* trace back starts here */
+ bl _C_LABEL(initarm) /* Off we go */
+
+ /* init arm will return the new stack pointer. */
+ mov sp, r0
+ mov fp, #0x00000000 /* trace back starts here */
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+
+ bl _C_LABEL(mi_startup) /* call mi_startup()! */
+
+ adr r0, .Lmainreturned
+ adr r1, .LFile
+ mov r2, #__LINE__
+ b _C_LABEL(__panic)
+ /* NOTEACHED */
+#ifdef STARTUP_PAGETABLE_ADDR
+#define MMU_INIT(va,pa,n_sec,attr) \
+ .word n_sec ; \
+ .word 4*((va)>>L1_S_SHIFT) ; \
+ .word (pa)|(attr) ;
+
+Lstartup_pagetable:
+ .word STARTUP_PAGETABLE_ADDR
+mmu_init_table:
+ /* fill all table VA==PA */
+ MMU_INIT(0x00000000, 0x00000000, 1<<(32-L1_S_SHIFT), L1_TYPE_S|L1_S_AP(AP_KRW))
+ /* map SDRAM VA==PA, WT cacheable */
+ MMU_INIT(KERNPHYSADDR, KERNPHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
+mmu_init_table2:
+ /* map VA 0xc0000000..0xc3ffffff to PA 0xa0000000..0xa3ffffff */
+ MMU_INIT(0xc0000000, KERNPHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
+
+ .word 0 /* end of table */
+#endif
+.Lstart:
+ .word _edata
+ .word _end
+ .word svcstk + INIT_ARM_STACK_SIZE
+
+.LFile:
+ .asciz __FILE__
+.Lmainreturned:
+ .asciz "main() returned"
+ .align 0
+
+ .bss
+svcstk:
+ .space INIT_ARM_STACK_SIZE
+
+ .text
+ .align 0
+
+#ifndef OFW
+ /* OFW based systems will used OF_boot() */
+
+.Lcpufuncs:
+ .word _C_LABEL(cpufuncs)
+
+ENTRY_NP(cpu_reset)
+ mrs r2, cpsr
+ bic r2, r2, #(PSR_MODE)
+ orr r2, r2, #(PSR_SVC32_MODE)
+ orr r2, r2, #(I32_bit | F32_bit)
+ msr cpsr_all, r2
+
+ ldr r4, .Lcpu_reset_address
+ ldr r4, [r4]
+
+ ldr r0, .Lcpufuncs
+ mov lr, pc
+ ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
+
+ /*
+ * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
+ * necessary.
+ */
+
+ ldr r1, .Lcpu_reset_needs_v4_MMU_disable
+ ldr r1, [r1]
+ cmp r1, #0
+ mov r2, #0
+
+ /*
+ * MMU & IDC off, 32 bit program & data space
+ * Hurl ourselves into the ROM
+ */
+ mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
+ mcr 15, 0, r0, c1, c0, 0
+ mcrne 15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */
+ mov pc, r4
+
+ /*
+ * _cpu_reset_address contains the address to branch to, to complete
+ * the cpu reset after turning the MMU off
+ * This variable is provided by the hardware specific code
+ */
+.Lcpu_reset_address:
+ .word _C_LABEL(cpu_reset_address)
+
+ /*
+ * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
+ * v4 MMU disable instruction needs executing... it is an illegal instruction
+ * on f.e. ARM6/7 that locks up the computer in an endless illegal
+ * instruction / data-abort / reset loop.
+ */
+.Lcpu_reset_needs_v4_MMU_disable:
+ .word _C_LABEL(cpu_reset_needs_v4_MMU_disable)
+
+#endif /* OFW */
+
+#ifdef IPKDB
+/*
+ * Execute(inst, psr, args, sp)
+ *
+ * Execute INSTruction with PSR and ARGS[0] - ARGS[3] making
+ * available stack at SP for next undefined instruction trap.
+ *
+ * Move the instruction onto the stack and jump to it.
+ */
+ENTRY_NP(Execute)
+ mov ip, sp
+ stmfd sp!, {r2, r4-r7, fp, ip, lr, pc}
+ sub fp, ip, #4
+ mov ip, r3
+ ldr r7, .Lreturn
+ stmfd sp!, {r0, r7}
+ adr r7, #.LExec
+ mov r5, r1
+ mrs r4, cpsr
+ ldmia r2, {r0-r3}
+ mov r6, sp
+ mov sp, ip
+ msr cpsr_all, r5
+ mov pc, r6
+.LExec:
+ mrs r5, cpsr
+/* XXX Cannot switch thus easily back from user mode */
+ msr cpsr_all, r4
+ add sp, r6, #8
+ ldmfd sp!, {r6}
+ stmia r6, {r0-r3}
+ mov r0, r5
+ ldmdb fp, {r4-r7, fp, sp, pc}
+.Lreturn:
+ mov pc, r7
+#endif
+
+/*
+ * setjump + longjmp
+ */
+ENTRY(setjmp)
+ stmia r0, {r4-r14}
+ mov r0, #0x00000000
+ mov pc, lr
+
+ENTRY(longjmp)
+ ldmia r0, {r4-r14}
+ mov r0, #0x00000001
+ mov pc, lr
+
+ .data
+ .global _C_LABEL(esym)
+_C_LABEL(esym): .word _C_LABEL(end)
+
+ENTRY_NP(abort)
+ b _C_LABEL(abort)
+
+/* End of locore.S */
diff --git a/sys/arm/arm/machdep.c b/sys/arm/arm/machdep.c
new file mode 100644
index 0000000..73000f7
--- /dev/null
+++ b/sys/arm/arm/machdep.c
@@ -0,0 +1,409 @@
+/* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
+
+/*
+ * Copyright (c) 2004 Olivier Houchard
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Machine dependant functions for kernel setup
+ *
+ * Created : 17/09/94
+ * Updated : 18/04/01 updated for new wscons
+ */
+
+#include "opt_compat.h"
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysproto.h>
+#include <sys/signalvar.h>
+#include <sys/imgact.h>
+#include <sys/kernel.h>
+#include <sys/linker.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/cons.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/exec.h>
+#include <machine/reg.h>
+#include <machine/cpu.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_map.h>
+#include <vm/vnode_pager.h>
+#include <machine/pmap.h>
+#include <machine/vmparam.h>
+#include <machine/pcb.h>
+#include <machine/undefined.h>
+#include <machine/machdep.h>
+#include <machine/metadata.h>
+#include <machine/armreg.h>
+
+#define MDROOT_ADDR 0xd0400000
+
+uint32_t cpu_reset_address = 0;
+int cold = 1;
+int astpending = 0;
+vm_offset_t vector_page;
+
+static void *
+getframe(struct thread *td, int sig, int *onstack)
+{
+ struct trapframe *tf = td->td_frame;
+
+ *onstack = sigonstack(tf->tf_usr_sp);
+ if (*onstack)
+ return (void*)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size);
+ return (void*)(tf->tf_usr_sp);
+}
+
+void
+sendsig(catcher, sig, mask, code)
+ sig_t catcher;
+ int sig;
+ sigset_t *mask;
+ u_long code;
+{
+ struct thread *td = curthread;
+ struct trapframe *tf = td->td_frame;
+ struct sigframe *fp, frame;
+ struct sigacts *psp = td->td_proc->p_sigacts;
+ int onstack;
+
+ fp = getframe(td, sig, &onstack);
+ /* make room on the stack */
+ fp--;
+
+ /* make the stack aligned */
+ (u_int)fp = _ALIGN(fp);
+ /* Populate the siginfo frame. */
+ frame.sf_si.si_signo = sig;
+ frame.sf_si.si_code = code;
+ frame.sf_uc.uc_sigmask = *mask;
+ frame.sf_uc.uc_link = NULL;
+ frame.sf_uc.uc_flags |= td->td_sigstk.ss_flags & SS_ONSTACK ?
+ _UC_SETSTACK : _UC_CLRSTACK;
+ memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
+ get_mcontext(td, &frame.sf_uc.uc_mcontext,
+ (uint32_t)&frame.sf_uc.uc_flags);
+ PROC_UNLOCK(td->td_proc);
+ mtx_unlock(&psp->ps_mtx);
+ if (copyout(&frame, (void*)fp, sizeof(frame)) != 0)
+ sigexit(td, SIGILL);
+ /*
+ * Build context to run handler in. We invoke the handler
+ * directly, only returning via the trampoline. Note the
+ * trampoline version numbers are coordinated with machine-
+ * dependent code in libc.
+ */
+
+ tf->tf_r0 = sig;
+ tf->tf_r1 = (int)&fp->sf_si;
+ tf->tf_r2 = (int)&fp->sf_uc;
+
+ /* the trampoline uses r5 as the uc address */
+ tf->tf_r5 = (int)&fp->sf_uc;
+ tf->tf_pc = (int)catcher;
+ tf->tf_usr_sp = (int)fp;
+ if (onstack)
+ td->td_sigstk.ss_flags |= SS_ONSTACK;
+ PROC_LOCK(td->td_proc);
+ mtx_lock(&psp->ps_mtx);
+}
+
+struct kva_md_info kmi;
+
+/*
+ * arm32_vector_init:
+ *
+ * Initialize the vector page, and select whether or not to
+ * relocate the vectors.
+ *
+ * NOTE: We expect the vector page to be mapped at its expected
+ * destination.
+ */
+
+extern unsigned int page0[], page0_data[];
+void
+arm_vector_init(vm_offset_t va, int which)
+{
+ unsigned int *vectors = (int *) va;
+ unsigned int *vectors_data = vectors + (page0_data - page0);
+ int vec;
+
+ /*
+ * Loop through the vectors we're taking over, and copy the
+ * vector's insn and data word.
+ */
+ for (vec = 0; vec < ARM_NVEC; vec++) {
+ if ((which & (1 << vec)) == 0) {
+ /* Don't want to take over this vector. */
+ continue;
+ }
+ vectors[vec] = page0[vec];
+ vectors_data[vec] = page0_data[vec];
+ }
+
+ /* Now sync the vectors. */
+ cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
+
+ vector_page = va;
+
+ if (va == ARM_VECTORS_HIGH) {
+ /*
+ * Assume the MD caller knows what it's doing here, and
+ * really does want the vector page relocated.
+ *
+ * Note: This has to be done here (and not just in
+ * cpu_setup()) because the vector page needs to be
+ * accessible *before* cpu_startup() is called.
+ * Think ddb(9) ...
+ *
+ * NOTE: If the CPU control register is not readable,
+ * this will totally fail! We'll just assume that
+ * any system that has high vector support has a
+ * readable CPU control register, for now. If we
+ * ever encounter one that does not, we'll have to
+ * rethink this.
+ */
+ cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
+ }
+}
+
+static void
+cpu_startup(void *dummy)
+{
+ struct pcb *pcb = thread0.td_pcb;
+ vm_ksubmap_init(&kmi);
+ bufinit();
+ vm_pager_bufferinit();
+ pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
+ USPACE_UNDEF_STACK_TOP;
+ pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
+ USPACE_SVC_STACK_TOP;
+ vector_page_setprot(VM_PROT_READ);
+ pmap_update(pmap_kernel());
+ pmap_set_pcb_pagedir(pmap_kernel(), pcb);
+ cpu_setup("");
+ identify_arm_cpu();
+ thread0.td_frame = (struct trapframe *)pcb->un_32.pcb32_sp - 1;
+}
+
+SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
+
+void
+cpu_idle(void)
+{
+}
+
+int
+fill_regs(struct thread *td, struct reg *regs)
+{
+ struct trapframe *tf = td->td_frame;
+ bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
+ regs->r_sp = tf->tf_usr_sp;
+ regs->r_lr = tf->tf_usr_lr;
+ regs->r_pc = tf->tf_pc;
+ regs->r_cpsr = tf->tf_spsr;
+ return (0);
+}
+int
+fill_fpregs(struct thread *td, struct fpreg *regs)
+{
+ bzero(regs, sizeof(*regs));
+ return (0);
+}
+
+int
+set_regs(struct thread *td, struct reg *regs)
+{
+ struct trapframe *tf = td->td_frame;
+
+ bcopy(regs->r, &tf->tf_r0, sizeof(*regs->r));
+ tf->tf_usr_sp = regs->r_sp;
+ tf->tf_usr_lr = regs->r_lr;
+ tf->tf_pc = regs->r_pc;
+ tf->tf_spsr &= ~PSR_FLAGS;
+ tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
+ while(1);
+ return (0);
+}
+
+int
+set_fpregs(struct thread *td, struct fpreg *regs)
+{
+ return (0);
+}
+
+int
+fill_dbregs(struct thread *td, struct dbreg *regs)
+{
+ return (0);
+}
+int
+set_dbregs(struct thread *td, struct dbreg *regs)
+{
+ return (0);
+}
+
+void
+cpu_halt(void)
+{
+ cpu_reset();
+}
+
+int
+ptrace_single_step(struct thread *td)
+{
+ return (0);
+}
+
+int
+ptrace_set_pc(struct thread *td, unsigned long addr)
+{
+ return (0);
+}
+
+void
+cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
+{
+}
+
+/*
+ * Clear registers on exec
+ */
+void
+exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
+{
+ struct trapframe *tf = td->td_frame;
+
+ memset(tf, 0, sizeof(*tf));
+ tf->tf_usr_sp = stack;
+ tf->tf_usr_lr = entry;
+ tf->tf_svc_lr = 0x77777777;
+ tf->tf_pc = entry;
+ tf->tf_spsr = PSR_USR32_MODE;
+}
+
+/*
+ * Build siginfo_t for SA thread
+ */
+void
+cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
+{
+ printf("cpu_thread_siginfo\n");
+}
+
+/*
+ * Get machine context.
+ */
+int
+get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
+{
+ struct trapframe *tf = td->td_frame;
+ __greg_t *gr = mcp->__gregs;
+
+ /* Save General Register context. */
+ gr[_REG_R0] = tf->tf_r0;
+ gr[_REG_R1] = tf->tf_r1;
+ gr[_REG_R2] = tf->tf_r2;
+ gr[_REG_R3] = tf->tf_r3;
+ gr[_REG_R4] = tf->tf_r4;
+ gr[_REG_R5] = tf->tf_r5;
+ gr[_REG_R6] = tf->tf_r6;
+ gr[_REG_R7] = tf->tf_r7;
+ gr[_REG_R8] = tf->tf_r8;
+ gr[_REG_R9] = tf->tf_r9;
+ gr[_REG_R10] = tf->tf_r10;
+ gr[_REG_R11] = tf->tf_r11;
+ gr[_REG_R12] = tf->tf_r12;
+ gr[_REG_SP] = tf->tf_usr_sp;
+ gr[_REG_LR] = tf->tf_usr_lr;
+ gr[_REG_PC] = tf->tf_pc;
+ gr[_REG_CPSR] = tf->tf_spsr;
+
+ return (0);
+}
+
+/*
+ * Set machine context.
+ *
+ * However, we don't set any but the user modifiable flags, and we won't
+ * touch the cs selector.
+ */
+int
+set_mcontext(struct thread *td, const mcontext_t *mcp)
+{
+ panic("SET_MCONTEXT AHAHAH\n");
+ return (0);
+}
+
+#ifdef COMPAT_FREEBSD4
+int
+freebsd4_sigreturn(td, uap)
+ struct thread *td;
+ struct freebsd4_sigreturn_args /* {
+ const ucontext4 *sigcntxp;
+ } */ *uap;
+{
+ return (0);
+}
+#endif
+
+/*
+ * MPSAFE
+ */
+int
+sigreturn(td, uap)
+ struct thread *td;
+ struct sigreturn_args /* {
+ const __ucontext *sigcntxp;
+ } */ *uap;
+{
+ return (0);
+}
+
+
diff --git a/sys/arm/arm/nexus.c b/sys/arm/arm/nexus.c
new file mode 100644
index 0000000..2efbaf4
--- /dev/null
+++ b/sys/arm/arm/nexus.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This code implements a `root nexus' for Arm Architecture
+ * machines. The function of the root nexus is to serve as an
+ * attachment point for both processors and buses, and to manage
+ * resources which are common to all of them. In particular,
+ * this code implements the core resource managers for interrupt
+ * requests, DMA requests (which rightfully should be a part of the
+ * ISA code but it's easier to do it here for now), I/O port addresses,
+ * and I/O memory address space.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <sys/interrupt.h>
+
+#include <machine/vmparam.h>
+#include <machine/pcb.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/pmap.h>
+
+#include <machine/resource.h>
+#include <machine/intr.h>
+
+static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device");
+
+struct nexus_device {
+ struct resource_list nx_resources;
+};
+
+#define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev))
+
+static struct rman mem_rman;
+
+static int nexus_probe(device_t);
+static int nexus_attach(device_t);
+static int nexus_print_child(device_t, device_t);
+static device_t nexus_add_child(device_t, int, const char *, int);
+static struct resource *nexus_alloc_resource(device_t, device_t, int, int *,
+ u_long, u_long, u_long, u_int);
+static int nexus_activate_resource(device_t, device_t, int, int,
+ struct resource *);
+static int
+nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags,
+ driver_intr_t *intr, void *arg, void **cookiep);
+static device_method_t nexus_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nexus_probe),
+ DEVMETHOD(device_attach, nexus_attach),
+ /* Bus interface */
+ DEVMETHOD(bus_print_child, nexus_print_child),
+ DEVMETHOD(bus_add_child, nexus_add_child),
+ DEVMETHOD(bus_alloc_resource, nexus_alloc_resource),
+ DEVMETHOD(bus_activate_resource, nexus_activate_resource),
+ DEVMETHOD(bus_setup_intr, nexus_setup_intr),
+ { 0, 0 }
+};
+
+static driver_t nexus_driver = {
+ "nexus",
+ nexus_methods,
+ 1 /* no softc */
+};
+static devclass_t nexus_devclass;
+
+static int
+nexus_probe(device_t dev)
+{
+ device_quiet(dev); /* suppress attach message for neatness */
+
+ mem_rman.rm_start = 0;
+ mem_rman.rm_end = ~0u;
+ mem_rman.rm_type = RMAN_ARRAY;
+ mem_rman.rm_descr = "I/O memory addresses";
+ if (rman_init(&mem_rman)
+ || rman_manage_region(&mem_rman, 0, ~0u))
+ panic("nexus_probe mem_rman");
+
+ return (0);
+ return bus_generic_probe(dev);
+}
+
+static int
+nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags,
+ driver_intr_t *intr, void *arg, void **cookiep)
+{
+ arm_setup_irqhandler(device_get_nameunit(child),
+ intr, arg, res->r_start, flags, cookiep);
+ return (0);
+}
+
+
+static int
+nexus_attach(device_t dev)
+{
+ /*
+ * First, deal with the children we know about already
+ */
+ printf("avant\n");
+ bus_generic_probe(dev);
+ bus_generic_attach(dev);
+ printf("nexus_attach\n");
+
+ return 0;
+}
+
+
+static int
+nexus_print_child(device_t bus, device_t child)
+{
+ int retval = 0;
+
+ retval += bus_print_child_header(bus, child);
+ retval += printf(" on motherboard\n"); /* XXX "motherboard", ick */
+
+ return (retval);
+}
+
+
+static device_t
+nexus_add_child(device_t bus, int order, const char *name, int unit)
+{
+ device_t child;
+ struct nexus_device *ndev;
+
+ ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO);
+ if (!ndev)
+ return(0);
+ resource_list_init(&ndev->nx_resources);
+
+ child = device_add_child_ordered(bus, order, name, unit);
+
+ /* should we free this in nexus_child_detached? */
+ device_set_ivars(child, ndev);
+
+ return(child);
+}
+
+
+/*
+ * Allocate a resource on behalf of child. NB: child is usually going to be a
+ * child of one of our descendants, not a direct child of nexus0.
+ * (Exceptions include footbridge.)
+ */
+#define ARM_BUS_SPACE_MEM 1
+static struct resource *
+nexus_alloc_resource(device_t bus, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+ struct resource *rv;
+ struct rman *rm;
+ int needactivate = flags & RF_ACTIVE;
+
+ switch (type) {
+ case SYS_RES_MEMORY:
+ rm = &mem_rman;
+ break;
+
+ default:
+ return 0;
+ }
+
+ rv = rman_reserve_resource(rm, start, end, count, flags, child);
+ if (rv == 0)
+ return 0;
+
+ rman_set_bustag(rv, (void*)ARM_BUS_SPACE_MEM);
+ rman_set_bushandle(rv, rv->r_start);
+
+ if (needactivate) {
+ if (bus_activate_resource(child, type, *rid, rv)) {
+ rman_release_resource(rv);
+ return 0;
+ }
+ }
+
+ return rv;
+}
+
+
+static int
+nexus_activate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ /*
+ * If this is a memory resource, map it into the kernel.
+ */
+ if (rman_get_bustag(r) == (void*)ARM_BUS_SPACE_MEM) {
+ caddr_t vaddr = 0;
+ u_int32_t paddr;
+ u_int32_t psize;
+ u_int32_t poffs;
+
+ paddr = rman_get_start(r);
+ psize = rman_get_size(r);
+ poffs = paddr - trunc_page(paddr);
+ vaddr = (caddr_t) pmap_mapdev(paddr-poffs, psize+poffs) + poffs;
+ rman_set_virtual(r, vaddr);
+ rman_set_bushandle(r, (bus_space_handle_t) vaddr);
+ }
+ return (rman_activate_resource(r));
+}
+
+DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0);
diff --git a/sys/arm/arm/nexus_io.c b/sys/arm/arm/nexus_io.c
new file mode 100644
index 0000000..0a93a75
--- /dev/null
+++ b/sys/arm/arm/nexus_io.c
@@ -0,0 +1,208 @@
+/* $NetBSD: mainbus_io.c,v 1.13 2003/07/15 00:24:47 lukem Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * bus_space I/O functions for mainbus
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+
+
+#include <machine/bus.h>
+#include <machine/pmap.h>
+
+/* Proto types for all the bus_space structure functions */
+vm_offset_t lala;
+bs_protos(nexus);
+/* Declare the mainbus bus space tag */
+
+struct bus_space mainbus_bs_tag = {
+ /* cookie */
+ NULL,
+
+ /* mapping/unmapping */
+ nexus_bs_map,
+ nexus_bs_unmap,
+ nexus_bs_subregion,
+
+ /* allocation/deallocation */
+ nexus_bs_alloc,
+ nexus_bs_free,
+
+ /* get kernel virtual address */
+ 0, /* there is no linear mapping */
+
+ NULL,
+
+ /* barrier */
+ nexus_bs_barrier,
+
+ /* read (single) */
+ nexus_bs_r_1,
+ nexus_bs_r_2,
+ nexus_bs_r_4,
+ NULL,
+
+ /* read multiple */
+ NULL,
+ nexus_bs_rm_2,
+ NULL,
+ NULL,
+
+ /* read region */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+
+ /* write (single) */
+ nexus_bs_w_1,
+ nexus_bs_w_2,
+ nexus_bs_w_4,
+ NULL,
+
+ /* write multiple */
+ nexus_bs_wm_1,
+ nexus_bs_wm_2,
+ NULL,
+ NULL,
+
+ /* write region */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+
+ /* set region */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+
+ /* copy */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+};
+
+/* bus space functions */
+
+int
+nexus_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int cacheable,
+ bus_space_handle_t *bshp)
+{
+ return(0);
+}
+
+int
+nexus_bs_alloc(t, rstart, rend, size, alignment, boundary, cacheable,
+ bpap, bshp)
+ void *t;
+ bus_addr_t rstart, rend;
+ bus_size_t size, alignment, boundary;
+ int cacheable;
+ bus_addr_t *bpap;
+ bus_space_handle_t *bshp;
+{
+ panic("mainbus_bs_alloc(): Help!");
+}
+
+
+void
+nexus_bs_unmap(void *t, bus_size_t size)
+{
+ /*
+ * Temporary implementation
+ */
+}
+
+void
+nexus_bs_free(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+
+ panic("mainbus_bs_free(): Help!");
+ /* mainbus_bs_unmap() does all that we need to do. */
+/* mainbus_bs_unmap(t, bsh, size);*/
+}
+
+int
+nexus_bs_subregion(t, bsh, offset, size, nbshp)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, size;
+ bus_space_handle_t *nbshp;
+{
+
+ *nbshp = bsh + offset;
+ return (0);
+}
+
+int
+nexus_bs_mmap(dev_t dev, vm_offset_t off, vm_paddr_t *addr, int prot)
+{
+ *addr = off;
+ return (0);
+}
+
+void
+nexus_bs_barrier(t, bsh, offset, len, flags)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, len;
+ int flags;
+{
+}
+
+/* End of mainbus_io.c */
diff --git a/sys/arm/arm/nexus_io_asm.S b/sys/arm/arm/nexus_io_asm.S
new file mode 100644
index 0000000..0129f7f
--- /dev/null
+++ b/sys/arm/arm/nexus_io_asm.S
@@ -0,0 +1,114 @@
+/* $NetBSD: mainbus_io_asm.S,v 1.1 2001/02/24 19:38:02 reinoud Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * bus_space I/O functions for nexus
+ */
+
+
+/*
+ * read single
+ */
+
+ENTRY(nexus_bs_r_1)
+ ldrb r0, [r1, r2, lsl #2]
+ mov pc, lr
+
+ENTRY(nexus_bs_r_2)
+ ldr r0, [r1, r2, lsl #2]
+ bic r0, r0, #0xff000000
+ bic r0, r0, #0x00ff0000
+ mov pc, lr
+
+ENTRY(nexus_bs_r_4)
+ ldr r0, [r1, r2, lsl #2]
+ mov pc, lr
+
+/*
+ * write single
+ */
+
+ENTRY(nexus_bs_w_1)
+ strb r3, [r1, r2, lsl #2]
+ mov pc, lr
+
+ENTRY(nexus_bs_w_2)
+ mov r3, r3, lsl #16
+ orr r3, r3, r3, lsr #16
+ str r3, [r1, r2, lsl #2]
+ mov pc, lr
+
+ENTRY(nexus_bs_w_4)
+ str r3, [r1, r2, lsl #2]
+ mov pc, lr
+
+/*
+ * read multiple
+ */
+
+ENTRY(nexus_bs_rm_2)
+ add r0, r1, r2, lsl #2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ b _C_LABEL(insw16)
+
+/*
+ * write multiple
+ */
+
+ENTRY(nexus_bs_wm_1)
+ add r0, r1, r2, lsl #2
+ ldr r2, [sp, #0]
+
+ /* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+nexus_wm_1_loop:
+ ldrb r1, [r3], #0x0001
+ str r1, [r0]
+ subs r2, r2, #0x00000001
+ bgt nexus_wm_1_loop
+
+ mov pc, lr
+
+ENTRY(nexus_bs_wm_2)
+ add r0, r1, r2, lsl #2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ b _C_LABEL(outsw16)
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
new file mode 100644
index 0000000..0bba36f
--- /dev/null
+++ b/sys/arm/arm/pmap.c
@@ -0,0 +1,4650 @@
+/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */
+/*
+ * Copyright 2004 Olivier Houchard.
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2002-2003 Wasabi Systems, Inc.
+ * Copyright (c) 2001 Richard Earnshaw
+ * Copyright (c) 2001-2002 Christopher Gilbert
+ * All rights reserved.
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Charles M. Hannum.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *
+ * RiscBSD kernel project
+ *
+ * pmap.c
+ *
+ * Machine dependant vm stuff
+ *
+ * Created : 20/09/94
+ */
+
+/*
+ * Special compilation symbols
+ * PMAP_DEBUG - Build in pmap_debug_level code
+ */
+/* Include header files */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/msgbuf.h>
+#include <sys/vmmeter.h>
+#include <sys/mman.h>
+#include <sys/smp.h>
+#include <sys/sx.h>
+#include <sys/pool.h>
+#include <sys/sched.h>
+
+#include <vm/vm.h>
+#include <vm/uma.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_extern.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <machine/md_var.h>
+#include <machine/vmparam.h>
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/pcb.h>
+
+#ifdef PMAP_DEBUG
+#define PDEBUG(_lev_,_stat_) \
+ if (pmap_debug_level >= (_lev_)) \
+ ((_stat_))
+#define dprintf printf
+
+int pmap_debug_level = 0;
+#define PMAP_INLINE
+#else /* PMAP_DEBUG */
+#define PDEBUG(_lev_,_stat_) /* Nothing */
+#define dprintf(x, arg...)
+#define PMAP_INLINE
+#endif /* PMAP_DEBUG */
+
+/*
+ * Get PDEs and PTEs for user/kernel address space
+ */
+#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDR_SHIFT])
+
+#define pmap_pte_prot(m, p) (protection_codes[p])
+static int protection_codes[8];
+
+extern struct pv_addr systempage;
+/*
+ * Internal function prototypes
+ */
+static PMAP_INLINE void pmap_invalidate_page (pmap_t, vm_offset_t);
+#if 0
+static PMAP_INLINE void pmap_invalidate_tlb (pmap_t, vm_offset_t);
+#endif
+static PMAP_INLINE void pmap_invalidate_tlb_all (pmap_t);
+static PMAP_INLINE void pmap_changebit (vm_page_t, int, boolean_t);
+static PMAP_INLINE int pmap_track_modified(vm_offset_t);
+static pt_entry_t * pmap_pte (pmap_t, vm_offset_t);
+static int pmap_unuse_pt (pmap_t, vm_offset_t, vm_page_t);
+static PMAP_INLINE void pmap_free_pv_entry (pv_entry_t);
+static PMAP_INLINE int pmap_unwire_pte_hold(pmap_t, vm_page_t);
+static void arm_protection_init(void);
+static pv_entry_t pmap_get_pv_entry(void);
+
+static void pmap_vac_me_harder(struct vm_page *, pmap_t,
+ vm_offset_t);
+static void pmap_vac_me_kpmap(struct vm_page *, pmap_t,
+ vm_offset_t);
+static void pmap_vac_me_user(struct vm_page *, pmap_t, vm_offset_t);
+static void pmap_alloc_l1(pmap_t);
+static void pmap_free_l1(pmap_t);
+static void pmap_use_l1(pmap_t);
+static PMAP_INLINE boolean_t pmap_is_current(pmap_t);
+static PMAP_INLINE boolean_t pmap_is_cached(pmap_t);
+
+static void pmap_clearbit(struct vm_page *, u_int);
+
+static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t);
+static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t);
+static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
+static vm_offset_t kernel_pt_lookup(vm_paddr_t);
+
+static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1");
+
+vm_offset_t avail_end; /* PA of last available physical page */
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+
+extern void *end;
+vm_offset_t kernel_vm_end = 0;
+
+struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+static pt_entry_t *csrc_pte, *cdst_pte;
+static vm_offset_t csrcp, cdstp;
+static void pmap_init_l1(struct l1_ttable *, pd_entry_t *);
+/*
+ * These routines are called when the CPU type is identified to set up
+ * the PTE prototypes, cache modes, etc.
+ *
+ * The variables are always here, just in case LKMs need to reference
+ * them (though, they shouldn't).
+ */
+
+pt_entry_t pte_l1_s_cache_mode;
+pt_entry_t pte_l1_s_cache_mode_pt;
+pt_entry_t pte_l1_s_cache_mask;
+
+pt_entry_t pte_l2_l_cache_mode;
+pt_entry_t pte_l2_l_cache_mode_pt;
+pt_entry_t pte_l2_l_cache_mask;
+
+pt_entry_t pte_l2_s_cache_mode;
+pt_entry_t pte_l2_s_cache_mode_pt;
+pt_entry_t pte_l2_s_cache_mask;
+
+pt_entry_t pte_l2_s_prot_u;
+pt_entry_t pte_l2_s_prot_w;
+pt_entry_t pte_l2_s_prot_mask;
+
+pt_entry_t pte_l1_s_proto;
+pt_entry_t pte_l1_c_proto;
+pt_entry_t pte_l2_s_proto;
+
+void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
+void (*pmap_zero_page_func)(vm_paddr_t, int, int);
+/*
+ * Which pmap is currently 'live' in the cache
+ *
+ * XXXSCW: Fix for SMP ...
+ */
+union pmap_cache_state *pmap_cache_state;
+
+LIST_HEAD(pmaplist, pmap);
+struct pmaplist allpmaps;
+
+static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
+
+/* static pt_entry_t *msgbufmap;*/
+struct msgbuf *msgbufp = 0;
+
+extern void bcopy_page(vm_offset_t, vm_offset_t);
+extern void bzero_page(vm_offset_t);
+/*
+ * Metadata for L1 translation tables.
+ */
+struct l1_ttable {
+ /* Entry on the L1 Table list */
+ SLIST_ENTRY(l1_ttable) l1_link;
+
+ /* Entry on the L1 Least Recently Used list */
+ TAILQ_ENTRY(l1_ttable) l1_lru;
+
+ /* Track how many domains are allocated from this L1 */
+ volatile u_int l1_domain_use_count;
+
+ /*
+ * A free-list of domain numbers for this L1.
+ * We avoid using ffs() and a bitmap to track domains since ffs()
+ * is slow on ARM.
+ */
+ u_int8_t l1_domain_first;
+ u_int8_t l1_domain_free[PMAP_DOMAINS];
+
+ /* Physical address of this L1 page table */
+ vm_paddr_t l1_physaddr;
+
+ /* KVA of this L1 page table */
+ pd_entry_t *l1_kva;
+};
+
+/*
+ * Convert a virtual address into its L1 table index. That is, the
+ * index used to locate the L2 descriptor table pointer in an L1 table.
+ * This is basically used to index l1->l1_kva[].
+ *
+ * Each L2 descriptor table represents 1MB of VA space.
+ */
+#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT)
+
+/*
+ * L1 Page Tables are tracked using a Least Recently Used list.
+ * - New L1s are allocated from the HEAD.
+ * - Freed L1s are added to the TAIl.
+ * - Recently accessed L1s (where an 'access' is some change to one of
+ * the userland pmaps which owns this L1) are moved to the TAIL.
+ */
+static TAILQ_HEAD(, l1_ttable) l1_lru_list;
+static struct mtx l1_lru_lock;
+
+/*
+ * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
+ *
+ * This is normally 16MB worth L2 page descriptors for any given pmap.
+ * Reference counts are maintained for L2 descriptors so they can be
+ * freed when empty.
+ */
+struct l2_dtable {
+ /* The number of L2 page descriptors allocated to this l2_dtable */
+ u_int l2_occupancy;
+
+ /* List of L2 page descriptors */
+ struct l2_bucket {
+ pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */
+ vm_paddr_t l2b_phys; /* Physical address of same */
+ u_short l2b_l1idx; /* This L2 table's L1 index */
+ u_short l2b_occupancy; /* How many active descriptors */
+ } l2_bucket[L2_BUCKET_SIZE];
+};
+
+/*
+ * Given an L1 table index, calculate the corresponding l2_dtable index
+ * and bucket index within the l2_dtable.
+ */
+#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \
+ (L2_SIZE - 1))
+#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1))
+
+/*
+ * Given a virtual address, this macro returns the
+ * virtual address required to drop into the next L2 bucket.
+ */
+#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE)
+
+/*
+ * L2 allocation.
+ */
+#define pmap_alloc_l2_dtable() \
+ (void*)uma_zalloc(l2table_zone, M_NOWAIT)
+#define pmap_free_l2_dtable(l2) \
+ uma_zfree(l2table_zone, l2)
+
+/*
+ * We try to map the page tables write-through, if possible. However, not
+ * all CPUs have a write-through cache mode, so on those we have to sync
+ * the cache when we frob page tables.
+ *
+ * We try to evaluate this at compile time, if possible. However, it's
+ * not always possible to do that, hence this run-time var.
+ */
+int pmap_needs_pte_sync;
+
+/*
+ * Macro to determine if a mapping might be resident in the
+ * instruction cache and/or TLB
+ */
+#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
+
+/*
+ * Macro to determine if a mapping might be resident in the
+ * data cache and/or TLB
+ */
+#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0)
+
+/*
+ * Cache enable bits in PTE to use on pages that are cacheable.
+ * On most machines this is cacheable/bufferable, but on some, eg arm10, we
+ * can chose between write-through and write-back cacheing.
+ */
+pt_entry_t pte_cache_mode = (PT_C | PT_B);
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+#define MINPV 1024
+
+#ifndef PMAP_SHPGPERPROC
+#define PMAP_SHPGPERPROC 200
+#endif
+
+static uma_zone_t pvzone;
+static uma_zone_t l2zone;
+static uma_zone_t l2table_zone;
+static struct vm_object pvzone_obj;
+static struct vm_object l2zone_obj;
+static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
+int pmap_pagedaemon_waken = 0;
+
+void pmap_deactivate(struct thread *);
+
+void
+pmap_deactivate(struct thread *td)
+{
+}
+/*
+ * This list exists for the benefit of pmap_map_chunk(). It keeps track
+ * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
+ * find them as necessary.
+ *
+ * Note that the data on this list MUST remain valid after initarm() returns,
+ * as pmap_bootstrap() uses it to contruct L2 table metadata.
+ */
+SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
+
+static void
+pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
+{
+ int i;
+
+ l1->l1_kva = l1pt;
+ l1->l1_domain_use_count = 0;
+ l1->l1_domain_first = 0;
+
+ for (i = 0; i < PMAP_DOMAINS; i++)
+ l1->l1_domain_free[i] = i + 1;
+
+ /*
+ * Copy the kernel's L1 entries to each new L1.
+ */
+ if (pmap_initialized)
+ memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
+
+ if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0)
+ panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
+ if (l1->l1_physaddr & (L1_TABLE_SIZE - 1))
+ panic("fuck\n");
+ TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
+}
+
+static vm_offset_t
+kernel_pt_lookup(vm_paddr_t pa)
+{
+ struct pv_addr *pv;
+
+ SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
+#ifndef ARM32_NEW_VM_LAYOUT
+ if (pv->pv_pa == (pa & ~PAGE_MASK)) {
+ return (pv->pv_va | (pa & PAGE_MASK));
+ }
+#else
+ if (pv->pv_pa == pa)
+ return (pv->pv_va);
+#endif
+ }
+ return (0);
+}
+
+#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+void
+pmap_pte_init_generic(void)
+{
+
+ pte_l1_s_cache_mode = L1_S_B|L1_S_C;
+ pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
+
+ pte_l2_l_cache_mode = L2_B|L2_C;
+ pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
+
+ pte_l2_s_cache_mode = L2_B|L2_C;
+ pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
+
+ /*
+ * If we have a write-through cache, set B and C. If
+ * we have a write-back cache, then we assume setting
+ * only C will make those pages write-through.
+ */
+ if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
+ pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_B|L2_C;
+ pte_l2_s_cache_mode_pt = L2_B|L2_C;
+ } else {
+ pte_l1_s_cache_mode_pt = L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_C;
+ pte_l2_s_cache_mode_pt = L2_C;
+ }
+
+ pte_l2_s_prot_u = L2_S_PROT_U_generic;
+ pte_l2_s_prot_w = L2_S_PROT_W_generic;
+ pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
+
+ pte_l1_s_proto = L1_S_PROTO_generic;
+ pte_l1_c_proto = L1_C_PROTO_generic;
+ pte_l2_s_proto = L2_S_PROTO_generic;
+
+ pmap_copy_page_func = pmap_copy_page_generic;
+ pmap_zero_page_func = pmap_zero_page_generic;
+}
+
+#if defined(CPU_ARM8)
+void
+pmap_pte_init_arm8(void)
+{
+
+ /*
+ * ARM8 is compatible with generic, but we need to use
+ * the page tables uncached.
+ */
+ pmap_pte_init_generic();
+
+ pte_l1_s_cache_mode_pt = 0;
+ pte_l2_l_cache_mode_pt = 0;
+ pte_l2_s_cache_mode_pt = 0;
+}
+#endif /* CPU_ARM8 */
+
+#if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH)
+void
+pmap_pte_init_arm9(void)
+{
+
+ /*
+ * ARM9 is compatible with generic, but we want to use
+ * write-through caching for now.
+ */
+ pmap_pte_init_generic();
+
+ pte_l1_s_cache_mode = L1_S_C;
+ pte_l2_l_cache_mode = L2_C;
+ pte_l2_s_cache_mode = L2_C;
+
+ pte_l1_s_cache_mode_pt = L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_C;
+ pte_l2_s_cache_mode_pt = L2_C;
+}
+#endif /* CPU_ARM9 */
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
+
+#if defined(CPU_ARM10)
+void
+pmap_pte_init_arm10(void)
+{
+
+ /*
+ * ARM10 is compatible with generic, but we want to use
+ * write-through caching for now.
+ */
+ pmap_pte_init_generic();
+
+ pte_l1_s_cache_mode = L1_S_B | L1_S_C;
+ pte_l2_l_cache_mode = L2_B | L2_C;
+ pte_l2_s_cache_mode = L2_B | L2_C;
+
+ pte_l1_s_cache_mode_pt = L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_C;
+ pte_l2_s_cache_mode_pt = L2_C;
+
+}
+#endif /* CPU_ARM10 */
+
+#if ARM_MMU_SA1 == 1
+void
+pmap_pte_init_sa1(void)
+{
+
+ /*
+ * The StrongARM SA-1 cache does not have a write-through
+ * mode. So, do the generic initialization, then reset
+ * the page table cache mode to B=1,C=1, and note that
+ * the PTEs need to be sync'd.
+ */
+ pmap_pte_init_generic();
+
+ pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_B|L2_C;
+ pte_l2_s_cache_mode_pt = L2_B|L2_C;
+
+ pmap_needs_pte_sync = 1;
+}
+#endif /* ARM_MMU_SA1 == 1*/
+
+#if ARM_MMU_XSCALE == 1
+#if (ARM_NMMUS > 1)
+static u_int xscale_use_minidata;
+#endif
+
+void
+pmap_pte_init_xscale(void)
+{
+ uint32_t auxctl;
+ int write_through = 0;
+
+ pte_l1_s_cache_mode = L1_S_B|L1_S_C;
+ pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
+
+ pte_l2_l_cache_mode = L2_B|L2_C;
+ pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
+
+ pte_l2_s_cache_mode = L2_B|L2_C;
+ pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
+
+ pte_l1_s_cache_mode_pt = L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_C;
+ pte_l2_s_cache_mode_pt = L2_C;
+
+#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
+ /*
+ * The XScale core has an enhanced mode where writes that
+ * miss the cache cause a cache line to be allocated. This
+ * is significantly faster than the traditional, write-through
+ * behavior of this case.
+ */
+ pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
+ pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
+ pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
+#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
+
+#ifdef XSCALE_CACHE_WRITE_THROUGH
+ /*
+ * Some versions of the XScale core have various bugs in
+ * their cache units, the work-around for which is to run
+ * the cache in write-through mode. Unfortunately, this
+ * has a major (negative) impact on performance. So, we
+ * go ahead and run fast-and-loose, in the hopes that we
+ * don't line up the planets in a way that will trip the
+ * bugs.
+ *
+ * However, we give you the option to be slow-but-correct.
+ */
+ write_through = 1;
+#elif defined(XSCALE_CACHE_WRITE_BACK)
+ /* force write back cache mode */
+ write_through = 0;
+#elif defined(CPU_XSCALE_PXA2X0)
+ /*
+ * Intel PXA2[15]0 processors are known to have a bug in
+ * write-back cache on revision 4 and earlier (stepping
+ * A[01] and B[012]). Fixed for C0 and later.
+ */
+ {
+ uint32_t id, type;
+
+ id = cpufunc_id();
+ type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
+
+ if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
+ if ((id & CPU_ID_REVISION_MASK) < 5) {
+ /* write through for stepping A0-1 and B0-2 */
+ write_through = 1;
+ }
+ }
+ }
+#endif /* XSCALE_CACHE_WRITE_THROUGH */
+
+ if (write_through) {
+ pte_l1_s_cache_mode = L1_S_C;
+ pte_l2_l_cache_mode = L2_C;
+ pte_l2_s_cache_mode = L2_C;
+ }
+
+#if (ARM_NMMUS > 1)
+ xscale_use_minidata = 1;
+#endif
+
+ pte_l2_s_prot_u = L2_S_PROT_U_xscale;
+ pte_l2_s_prot_w = L2_S_PROT_W_xscale;
+ pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
+
+ pte_l1_s_proto = L1_S_PROTO_xscale;
+ pte_l1_c_proto = L1_C_PROTO_xscale;
+ pte_l2_s_proto = L2_S_PROTO_xscale;
+
+ pmap_copy_page_func = pmap_copy_page_xscale;
+ pmap_zero_page_func = pmap_zero_page_xscale;
+
+ /*
+ * Disable ECC protection of page table access, for now.
+ */
+ __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
+ auxctl &= ~XSCALE_AUXCTL_P;
+ __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
+}
+
+/*
+ * xscale_setup_minidata:
+ *
+ * Set up the mini-data cache clean area. We require the
+ * caller to allocate the right amount of physically and
+ * virtually contiguous space.
+ */
+extern vm_offset_t xscale_minidata_clean_addr;
+extern vm_size_t xscale_minidata_clean_size; /* already initialized */
+void
+xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pt_entry_t *pte;
+ vm_size_t size;
+ uint32_t auxctl;
+
+ xscale_minidata_clean_addr = va;
+
+ /* Round it to page size. */
+ size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
+
+ for (; size != 0;
+ va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte = (pt_entry_t *)
+ kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
+#else
+ pte = (pt_entry_t *) kernel_pt_lookup(
+ pde[L1_IDX(va)] & L1_C_ADDR_MASK);
+#endif
+ if (pte == NULL)
+ panic("xscale_setup_minidata: can't find L2 table for "
+ "VA 0x%08x", (u_int32_t) va);
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte[(va >> PAGE_SHIFT) & 0x3ff] =
+#else
+ pte[l2pte_index(va)] =
+#endif
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
+ }
+
+ /*
+ * Configure the mini-data cache for write-back with
+ * read/write-allocate.
+ *
+ * NOTE: In order to reconfigure the mini-data cache, we must
+ * make sure it contains no valid data! In order to do that,
+ * we must issue a global data cache invalidate command!
+ *
+ * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
+ * THIS IS VERY IMPORTANT!
+ */
+
+ /* Invalidate data and mini-data. */
+ __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
+ __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
+ auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
+ __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
+}
+#endif
+
+/*
+ * Allocate an L1 translation table for the specified pmap.
+ * This is called at pmap creation time.
+ */
+static void
+pmap_alloc_l1(pmap_t pm)
+{
+ struct l1_ttable *l1;
+ u_int8_t domain;
+
+ /*
+ * Remove the L1 at the head of the LRU list
+ */
+ mtx_lock(&l1_lru_lock);
+ l1 = TAILQ_FIRST(&l1_lru_list);
+ TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
+
+ /*
+ * Pick the first available domain number, and update
+ * the link to the next number.
+ */
+ domain = l1->l1_domain_first;
+ l1->l1_domain_first = l1->l1_domain_free[domain];
+
+ /*
+ * If there are still free domain numbers in this L1,
+ * put it back on the TAIL of the LRU list.
+ */
+ if (++l1->l1_domain_use_count < PMAP_DOMAINS)
+ TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
+
+ mtx_unlock(&l1_lru_lock);
+
+ /*
+ * Fix up the relevant bits in the pmap structure
+ */
+ pm->pm_l1 = l1;
+ pm->pm_domain = domain;
+}
+
+/*
+ * Free an L1 translation table.
+ * This is called at pmap destruction time.
+ */
+static void
+pmap_free_l1(pmap_t pm)
+{
+ struct l1_ttable *l1 = pm->pm_l1;
+
+ mtx_lock(&l1_lru_lock);
+
+ /*
+ * If this L1 is currently on the LRU list, remove it.
+ */
+ if (l1->l1_domain_use_count < PMAP_DOMAINS)
+ TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
+
+ /*
+ * Free up the domain number which was allocated to the pmap
+ */
+ l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first;
+ l1->l1_domain_first = pm->pm_domain;
+ l1->l1_domain_use_count--;
+
+ /*
+ * The L1 now must have at least 1 free domain, so add
+ * it back to the LRU list. If the use count is zero,
+ * put it at the head of the list, otherwise it goes
+ * to the tail.
+ */
+ if (l1->l1_domain_use_count == 0) {
+ TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
+ } else
+ TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
+
+ mtx_unlock(&l1_lru_lock);
+}
+
+static PMAP_INLINE void
+pmap_use_l1(pmap_t pm)
+{
+ struct l1_ttable *l1;
+
+ /*
+ * Do nothing if we're in interrupt context.
+ * Access to an L1 by the kernel pmap must not affect
+ * the LRU list.
+ */
+ if (pm == pmap_kernel())
+ return;
+
+ l1 = pm->pm_l1;
+
+ /*
+ * If the L1 is not currently on the LRU list, just return
+ */
+ if (l1->l1_domain_use_count == PMAP_DOMAINS)
+ return;
+
+ mtx_lock(&l1_lru_lock);
+
+ /*
+ * Check the use count again, now that we've acquired the lock
+ */
+ if (l1->l1_domain_use_count == PMAP_DOMAINS) {
+ mtx_unlock(&l1_lru_lock);
+ return;
+ }
+
+ /*
+ * Move the L1 to the back of the LRU list
+ */
+ TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
+ TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
+
+ mtx_unlock(&l1_lru_lock);
+}
+
+
+/*
+ * Returns a pointer to the L2 bucket associated with the specified pmap
+ * and VA, or NULL if no L2 bucket exists for the address.
+ */
+static PMAP_INLINE struct l2_bucket *
+pmap_get_l2_bucket(pmap_t pm, vm_offset_t va)
+{
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ u_short l1idx;
+
+ l1idx = L1_IDX(va);
+
+ if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
+ (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
+ return (NULL);
+
+ return (l2b);
+}
+
+/*
+ * Returns a pointer to the L2 bucket associated with the specified pmap
+ * and VA.
+ *
+ * If no L2 bucket exists, perform the necessary allocations to put an L2
+ * bucket/page table in place.
+ *
+ * Note that if a new L2 bucket/page was allocated, the caller *must*
+ * increment the bucket occupancy counter appropriately *before*
+ * releasing the pmap's lock to ensure no other thread or cpu deallocates
+ * the bucket/page in the meantime.
+ */
+static struct l2_bucket *
+pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
+{
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ u_short l1idx;
+
+ l1idx = L1_IDX(va);
+
+ if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
+ /*
+ * No mapping at this address, as there is
+ * no entry in the L1 table.
+ * Need to allocate a new l2_dtable.
+ */
+ if ((l2 = pmap_alloc_l2_dtable()) == NULL) {
+ return (NULL);
+ }
+ bzero(l2, sizeof(*l2));
+ /*
+ * Link it into the parent pmap
+ */
+ pm->pm_l2[L2_IDX(l1idx)] = l2;
+ bzero(l2, sizeof( struct l2_dtable));
+ }
+
+ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
+
+ /*
+ * Fetch pointer to the L2 page table associated with the address.
+ */
+ if (l2b->l2b_kva == NULL) {
+ pt_entry_t *ptep;
+
+ /*
+ * No L2 page table has been allocated. Chances are, this
+ * is because we just allocated the l2_dtable, above.
+ */
+ ptep = (void*)uma_zalloc(l2zone, M_NOWAIT);
+ l2b->l2b_phys = vtophys(ptep);
+ if (ptep == NULL) {
+ /*
+ * Oops, no more L2 page tables available at this
+ * time. We may need to deallocate the l2_dtable
+ * if we allocated a new one above.
+ */
+ if (l2->l2_occupancy == 0) {
+ pm->pm_l2[L2_IDX(l1idx)] = NULL;
+ pmap_free_l2_dtable(l2);
+ }
+ return (NULL);
+ }
+
+ l2->l2_occupancy++;
+ l2b->l2b_kva = ptep;
+ l2b->l2b_l1idx = l1idx;
+ }
+
+ return (l2b);
+}
+
+static PMAP_INLINE void
+#ifndef PMAP_INCLUDE_PTE_SYNC
+pmap_free_l2_ptp(pt_entry_t *l2)
+#else
+pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2)
+#endif
+{
+#ifdef PMAP_INCLUDE_PTE_SYNC
+ /*
+ * Note: With a write-back cache, we may need to sync this
+ * L2 table before re-using it.
+ * This is because it may have belonged to a non-current
+ * pmap, in which case the cache syncs would have been
+ * skipped when the pages were being unmapped. If the
+ * L2 table were then to be immediately re-allocated to
+ * the *current* pmap, it may well contain stale mappings
+ * which have not yet been cleared by a cache write-back
+ * and so would still be visible to the mmu.
+ */
+ if (need_sync)
+ PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
+#endif
+ uma_zfree(l2zone, l2);
+}
+/*
+ * One or more mappings in the specified L2 descriptor table have just been
+ * invalidated.
+ *
+ * Garbage collect the metadata and descriptor table itself if necessary.
+ *
+ * The pmap lock must be acquired when this is called (not necessary
+ * for the kernel pmap).
+ */
+static void
+pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
+{
+ struct l2_dtable *l2;
+ pd_entry_t *pl1pd, l1pd;
+ pt_entry_t *ptep;
+ u_short l1idx;
+
+
+ /*
+ * Update the bucket's reference count according to how many
+ * PTEs the caller has just invalidated.
+ */
+ l2b->l2b_occupancy -= count;
+
+ /*
+ * Note:
+ *
+ * Level 2 page tables allocated to the kernel pmap are never freed
+ * as that would require checking all Level 1 page tables and
+ * removing any references to the Level 2 page table. See also the
+ * comment elsewhere about never freeing bootstrap L2 descriptors.
+ *
+ * We make do with just invalidating the mapping in the L2 table.
+ *
+ * This isn't really a big deal in practice and, in fact, leads
+ * to a performance win over time as we don't need to continually
+ * alloc/free.
+ */
+ if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
+ return;
+
+ /*
+ * There are no more valid mappings in this level 2 page table.
+ * Go ahead and NULL-out the pointer in the bucket, then
+ * free the page table.
+ */
+ l1idx = l2b->l2b_l1idx;
+ ptep = l2b->l2b_kva;
+ l2b->l2b_kva = NULL;
+
+ pl1pd = &pm->pm_l1->l1_kva[l1idx];
+
+ /*
+ * If the L1 slot matches the pmap's domain
+ * number, then invalidate it.
+ */
+ l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
+ if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) {
+ *pl1pd = 0;
+ PTE_SYNC(pl1pd);
+ }
+
+ /*
+ * Release the L2 descriptor table back to the pool cache.
+ */
+#ifndef PMAP_INCLUDE_PTE_SYNC
+ pmap_free_l2_ptp(ptep);
+#else
+ pmap_free_l2_ptp(!pmap_is_cached(pm), ptep);
+#endif
+
+ /*
+ * Update the reference count in the associated l2_dtable
+ */
+ l2 = pm->pm_l2[L2_IDX(l1idx)];
+ if (--l2->l2_occupancy > 0)
+ return;
+
+ /*
+ * There are no more valid mappings in any of the Level 1
+ * slots managed by this l2_dtable. Go ahead and NULL-out
+ * the pointer in the parent pmap and free the l2_dtable.
+ */
+ pm->pm_l2[L2_IDX(l1idx)] = NULL;
+ pmap_free_l2_dtable(l2);
+}
+
+/*
+ * Pool cache constructors for L2 descriptor tables, metadata and pmap
+ * structures.
+ */
+static void
+pmap_l2ptp_ctor(void *mem, int size, void *arg)
+{
+#ifndef PMAP_INCLUDE_PTE_SYNC
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
+ vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK;
+
+ /*
+ * The mappings for these page tables were initially made using
+ * pmap_kenter_pa() by the pool subsystem. Therefore, the cache-
+ * mode will not be right for page table mappings. To avoid
+ * polluting the pmap_kenter_pa() code with a special case for
+ * page tables, we simply fix up the cache-mode here if it's not
+ * correct.
+ */
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ pte = *ptep;
+
+ if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
+ /*
+ * Page tables must have the cache-mode set to Write-Thru.
+ */
+ *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
+ PTE_SYNC(ptep);
+ cpu_tlb_flushD_SE(va);
+ cpu_cpwait();
+ }
+#endif
+
+ memset(mem, 0, L2_TABLE_SIZE_REAL);
+ PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
+}
+
+/*
+ * A bunch of routines to conditionally flush the caches/TLB depending
+ * on whether the specified pmap actually needs to be flushed at any
+ * given time.
+ */
+static PMAP_INLINE void
+pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va)
+{
+
+ if (pm->pm_cstate.cs_tlb_id)
+ cpu_tlb_flushID_SE(va);
+}
+
+static PMAP_INLINE void
+pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va)
+{
+
+ if (pm->pm_cstate.cs_tlb_d)
+ cpu_tlb_flushD_SE(va);
+}
+
+static PMAP_INLINE void
+pmap_tlb_flushID(pmap_t pm)
+{
+
+ if (pm->pm_cstate.cs_tlb_id) {
+ cpu_tlb_flushID();
+ pm->pm_cstate.cs_tlb = 0;
+ }
+}
+static PMAP_INLINE void
+pmap_tlb_flushD(pmap_t pm)
+{
+
+ if (pm->pm_cstate.cs_tlb_d) {
+ cpu_tlb_flushD();
+ pm->pm_cstate.cs_tlb_d = 0;
+ }
+}
+
+static PMAP_INLINE void
+pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len)
+{
+
+ if (pm->pm_cstate.cs_cache_id)
+ cpu_idcache_wbinv_range(va, len);
+}
+
+static PMAP_INLINE void
+pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len,
+ boolean_t do_inv, boolean_t rd_only)
+{
+
+ if (pm->pm_cstate.cs_cache_d) {
+ if (do_inv) {
+ if (rd_only)
+ cpu_dcache_inv_range(va, len);
+ else
+ cpu_dcache_wbinv_range(va, len);
+ } else
+ if (!rd_only)
+ cpu_dcache_wb_range(va, len);
+ }
+}
+
+static PMAP_INLINE void
+pmap_idcache_wbinv_all(pmap_t pm)
+{
+
+ if (pm->pm_cstate.cs_cache_id) {
+ cpu_idcache_wbinv_all();
+ pm->pm_cstate.cs_cache = 0;
+ }
+}
+
+static PMAP_INLINE void
+pmap_dcache_wbinv_all(pmap_t pm)
+{
+
+ if (pm->pm_cstate.cs_cache_d) {
+ cpu_dcache_wbinv_all();
+ pm->pm_cstate.cs_cache_d = 0;
+ }
+}
+
+static PMAP_INLINE boolean_t
+pmap_is_current(pmap_t pm)
+{
+
+ if (pm == pmap_kernel() ||
+ (curproc && curproc->p_vmspace->vm_map.pmap == pm))
+ return (TRUE);
+
+ return (FALSE);
+}
+
+static PMAP_INLINE boolean_t
+pmap_is_cached(pmap_t pm)
+{
+
+ if (pm == pmap_kernel() || pmap_cache_state == NULL ||
+ pmap_cache_state == &pm->pm_cstate)
+ return (TRUE);
+
+ return (FALSE);
+}
+
+/*
+ * PTE_SYNC_CURRENT:
+ *
+ * Make sure the pte is written out to RAM.
+ * We need to do this for one of two cases:
+ * - We're dealing with the kernel pmap
+ * - There is no pmap active in the cache/tlb.
+ * - The specified pmap is 'active' in the cache/tlb.
+ */
+#ifdef PMAP_INCLUDE_PTE_SYNC
+#define PTE_SYNC_CURRENT(pm, ptep) \
+do { \
+ if (PMAP_NEEDS_PTE_SYNC && \
+ pmap_is_cached(pm)) \
+ PTE_SYNC(ptep); \
+} while (/*CONSTCOND*/0)
+#else
+#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */
+#endif
+
+/*
+ * Since we have a virtually indexed cache, we may need to inhibit caching if
+ * there is more than one mapping and at least one of them is writable.
+ * Since we purge the cache on every context switch, we only need to check for
+ * other mappings within the same pmap, or kernel_pmap.
+ * This function is also called when a page is unmapped, to possibly reenable
+ * caching on any remaining mappings.
+ *
+ * The code implements the following logic, where:
+ *
+ * KW = # of kernel read/write pages
+ * KR = # of kernel read only pages
+ * UW = # of user read/write pages
+ * UR = # of user read only pages
+ *
+ * KC = kernel mapping is cacheable
+ * UC = user mapping is cacheable
+ *
+ * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
+ * +---------------------------------------------
+ * UW=0,UR=0 | --- KC=1 KC=1 KC=0
+ * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
+ * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
+ * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
+ */
+
+static const int pmap_vac_flags[4][4] = {
+ {-1, 0, 0, PVF_KNC},
+ {0, 0, PVF_NC, PVF_NC},
+ {0, PVF_NC, PVF_NC, PVF_NC},
+ {PVF_UNC, PVF_NC, PVF_NC, PVF_NC}
+};
+
+static PMAP_INLINE int
+pmap_get_vac_flags(const struct vm_page *pg)
+{
+ int kidx, uidx;
+
+ kidx = 0;
+ if (pg->md.kro_mappings || pg->md.krw_mappings > 1)
+ kidx |= 1;
+ if (pg->md.krw_mappings)
+ kidx |= 2;
+
+ uidx = 0;
+ if (pg->md.uro_mappings || pg->md.urw_mappings > 1)
+ uidx |= 1;
+ if (pg->md.urw_mappings)
+ uidx |= 2;
+
+ return (pmap_vac_flags[uidx][kidx]);
+}
+
+static __inline void
+pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+{
+ int nattr;
+
+ nattr = pmap_get_vac_flags(pg);
+
+ if (nattr < 0) {
+ pg->md.pvh_attrs &= ~PVF_NC;
+ return;
+ }
+
+ if (nattr == 0 && (pg->md.pvh_attrs & PVF_NC) == 0) {
+ return;
+ }
+
+ if (pm == pmap_kernel())
+ pmap_vac_me_kpmap(pg, pm, va);
+ else
+ pmap_vac_me_user(pg, pm, va);
+
+ pg->md.pvh_attrs = (pg->md.pvh_attrs & ~PVF_NC) | nattr;
+}
+
+static void
+pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+{
+ u_int u_cacheable, u_entries;
+ struct pv_entry *pv;
+ pmap_t last_pmap = pm;
+
+ /*
+ * Pass one, see if there are both kernel and user pmaps for
+ * this page. Calculate whether there are user-writable or
+ * kernel-writable pages.
+ */
+ u_cacheable = 0;
+ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
+ if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
+ u_cacheable++;
+ }
+
+ u_entries = pg->md.urw_mappings + pg->md.uro_mappings;
+
+ /*
+ * We know we have just been updating a kernel entry, so if
+ * all user pages are already cacheable, then there is nothing
+ * further to do.
+ */
+ if (pg->md.k_mappings == 0 && u_cacheable == u_entries)
+ return;
+
+ if (u_entries) {
+ /*
+ * Scan over the list again, for each entry, if it
+ * might not be set correctly, call pmap_vac_me_user
+ * to recalculate the settings.
+ */
+ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
+ /*
+ * We know kernel mappings will get set
+ * correctly in other calls. We also know
+ * that if the pmap is the same as last_pmap
+ * then we've just handled this entry.
+ */
+ if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap)
+ continue;
+
+ /*
+ * If there are kernel entries and this page
+ * is writable but non-cacheable, then we can
+ * skip this entry also.
+ */
+ if (pg->md.k_mappings &&
+ (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
+ (PVF_NC | PVF_WRITE))
+ continue;
+
+ /*
+ * Similarly if there are no kernel-writable
+ * entries and the page is already
+ * read-only/cacheable.
+ */
+ if (pg->md.krw_mappings == 0 &&
+ (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
+ continue;
+
+ /*
+ * For some of the remaining cases, we know
+ * that we must recalculate, but for others we
+ * can't tell if they are correct or not, so
+ * we recalculate anyway.
+ */
+ pmap_vac_me_user(pg, (last_pmap = pv->pv_pmap), 0);
+ }
+
+ if (pg->md.k_mappings == 0)
+ return;
+ }
+
+ pmap_vac_me_user(pg, pm, va);
+}
+
+static void
+pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+{
+ pmap_t kpmap = pmap_kernel();
+ struct pv_entry *pv, *npv;
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
+ u_int entries = 0;
+ u_int writable = 0;
+ u_int cacheable_entries = 0;
+ u_int kern_cacheable = 0;
+ u_int other_writable = 0;
+
+ /*
+ * Count mappings and writable mappings in this pmap.
+ * Include kernel mappings as part of our own.
+ * Keep a pointer to the first one.
+ */
+ npv = TAILQ_FIRST(&pg->md.pv_list);
+ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
+ /* Count mappings in the same pmap */
+ if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
+ if (entries++ == 0)
+ npv = pv;
+
+ /* Cacheable mappings */
+ if ((pv->pv_flags & PVF_NC) == 0) {
+ cacheable_entries++;
+ if (kpmap == pv->pv_pmap)
+ kern_cacheable++;
+ }
+
+ /* Writable mappings */
+ if (pv->pv_flags & PVF_WRITE)
+ ++writable;
+ } else
+ if (pv->pv_flags & PVF_WRITE)
+ other_writable = 1;
+ }
+
+ /*
+ * Enable or disable caching as necessary.
+ * Note: the first entry might be part of the kernel pmap,
+ * so we can't assume this is indicative of the state of the
+ * other (maybe non-kpmap) entries.
+ */
+ if ((entries > 1 && writable) ||
+ (entries > 0 && pm == kpmap && other_writable)) {
+ if (cacheable_entries == 0)
+ return;
+
+ for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) {
+ if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
+ (pv->pv_flags & PVF_NC))
+ continue;
+
+ pv->pv_flags |= PVF_NC;
+
+ l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
+ ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+ pte = *ptep & ~L2_S_CACHE_MASK;
+
+ if ((va != pv->pv_va || pm != pv->pv_pmap) &&
+ l2pte_valid(pte)) {
+ if (PV_BEEN_EXECD(pv->pv_flags)) {
+ pmap_idcache_wbinv_range(pv->pv_pmap,
+ pv->pv_va, PAGE_SIZE);
+ pmap_tlb_flushID_SE(pv->pv_pmap,
+ pv->pv_va);
+ } else
+ if (PV_BEEN_REFD(pv->pv_flags)) {
+ pmap_dcache_wb_range(pv->pv_pmap,
+ pv->pv_va, PAGE_SIZE, TRUE,
+ (pv->pv_flags & PVF_WRITE) == 0);
+ pmap_tlb_flushD_SE(pv->pv_pmap,
+ pv->pv_va);
+ }
+ }
+
+ *ptep = pte;
+ PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
+ }
+ cpu_cpwait();
+ } else
+ if (entries > cacheable_entries) {
+ /*
+ * Turn cacheing back on for some pages. If it is a kernel
+ * page, only do so if there are no other writable pages.
+ */
+ for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) {
+ if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
+ (kpmap != pv->pv_pmap || other_writable)))
+ continue;
+
+ pv->pv_flags &= ~PVF_NC;
+
+ l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
+ ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+ pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
+
+ if (l2pte_valid(pte)) {
+ if (PV_BEEN_EXECD(pv->pv_flags)) {
+ pmap_tlb_flushID_SE(pv->pv_pmap,
+ pv->pv_va);
+ } else
+ if (PV_BEEN_REFD(pv->pv_flags)) {
+ pmap_tlb_flushD_SE(pv->pv_pmap,
+ pv->pv_va);
+ }
+ }
+
+ *ptep = pte;
+ PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
+ }
+ }
+}
+
+/*
+ * Modify pte bits for all ptes corresponding to the given physical address.
+ * We use `maskbits' rather than `clearbits' because we're always passing
+ * constants and the latter would require an extra inversion at run-time.
+ */
+static void
+pmap_clearbit(struct vm_page *pg, u_int maskbits)
+{
+ struct l2_bucket *l2b;
+ struct pv_entry *pv;
+ pt_entry_t *ptep, npte, opte;
+ pmap_t pm;
+ vm_offset_t va;
+ u_int oflags;
+
+#if 0
+ PMAP_HEAD_TO_MAP_LOCK();
+ simple_lock(&pg->mdpage.pvh_slock);
+#endif
+
+ /*
+ * Clear saved attributes (modify, reference)
+ */
+ pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
+
+ if (TAILQ_EMPTY(&pg->md.pv_list)) {
+#if 0
+ simple_unlock(&pg->mdpage.pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
+#endif
+ return;
+ }
+
+ /*
+ * Loop over all current mappings setting/clearing as appropos
+ */
+ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
+ va = pv->pv_va;
+ pm = pv->pv_pmap;
+ oflags = pv->pv_flags;
+ pv->pv_flags &= ~maskbits;
+
+#if 0
+ pmap_acquire_pmap_lock(pm);
+#endif
+
+ l2b = pmap_get_l2_bucket(pm, va);
+
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ npte = opte = *ptep;
+
+ if (maskbits & (PVF_WRITE|PVF_MOD)) {
+ if ((pv->pv_flags & PVF_NC)) {
+ /*
+ * Entry is not cacheable:
+ *
+ * Don't turn caching on again if this is a
+ * modified emulation. This would be
+ * inconsitent with the settings created by
+ * pmap_vac_me_harder(). Otherwise, it's safe
+ * to re-enable cacheing.
+ *
+ * There's no need to call pmap_vac_me_harder()
+ * here: all pages are losing their write
+ * permission.
+ */
+ if (maskbits & PVF_WRITE) {
+ npte |= pte_l2_s_cache_mode;
+ pv->pv_flags &= ~PVF_NC;
+ }
+ } else
+ if (opte & L2_S_PROT_W) {
+ /*
+ * Entry is writable/cacheable: check if pmap
+ * is current if it is flush it, otherwise it
+ * won't be in the cache
+ */
+ if (PV_BEEN_EXECD(oflags))
+ pmap_idcache_wbinv_range(pm, pv->pv_va,
+ PAGE_SIZE);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_dcache_wb_range(pm, pv->pv_va,
+ PAGE_SIZE,
+ (maskbits & PVF_REF) ? TRUE : FALSE,
+ FALSE);
+ }
+
+ /* make the pte read only */
+ npte &= ~L2_S_PROT_W;
+
+ if (maskbits & PVF_WRITE) {
+ /*
+ * Keep alias accounting up to date
+ */
+ if (pv->pv_pmap == pmap_kernel()) {
+ if (oflags & PVF_WRITE) {
+ pg->md.krw_mappings--;
+ pg->md.kro_mappings++;
+ }
+ } else
+ if (oflags & PVF_WRITE) {
+ pg->md.urw_mappings--;
+ pg->md.uro_mappings++;
+ }
+ }
+ }
+
+ if (maskbits & PVF_REF) {
+ if ((pv->pv_flags & PVF_NC) == 0 &&
+ (maskbits & (PVF_WRITE|PVF_MOD)) == 0) {
+ /*
+ * Check npte here; we may have already
+ * done the wbinv above, and the validity
+ * of the PTE is the same for opte and
+ * npte.
+ */
+ if (npte & L2_S_PROT_W) {
+ if (PV_BEEN_EXECD(oflags))
+ pmap_idcache_wbinv_range(pm,
+ pv->pv_va, PAGE_SIZE);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_dcache_wb_range(pm,
+ pv->pv_va, PAGE_SIZE,
+ TRUE, FALSE);
+ } else
+ if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) {
+ /* XXXJRT need idcache_inv_range */
+ if (PV_BEEN_EXECD(oflags))
+ pmap_idcache_wbinv_range(pm,
+ pv->pv_va, PAGE_SIZE);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_dcache_wb_range(pm,
+ pv->pv_va, PAGE_SIZE,
+ TRUE, TRUE);
+ }
+ }
+
+ /*
+ * Make the PTE invalid so that we will take a
+ * page fault the next time the mapping is
+ * referenced.
+ */
+ npte &= ~L2_TYPE_MASK;
+ npte |= L2_TYPE_INV;
+ }
+
+ if (npte != opte) {
+ *ptep = npte;
+ PTE_SYNC(ptep);
+ /* Flush the TLB entry if a current pmap. */
+ if (PV_BEEN_EXECD(oflags))
+ pmap_tlb_flushID_SE(pm, pv->pv_va);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_tlb_flushD_SE(pm, pv->pv_va);
+ }
+
+#if 0
+ pmap_release_pmap_lock(pm);
+#endif
+
+ }
+
+#if 0
+ simple_unlock(&pg->mdpage.pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
+#endif
+}
+
+/*
+ * main pv_entry manipulation functions:
+ * pmap_enter_pv: enter a mapping onto a vm_page list
+ * pmap_remove_pv: remove a mappiing from a vm_page list
+ *
+ * NOTE: pmap_enter_pv expects to lock the pvh itself
+ * pmap_remove_pv expects te caller to lock the pvh before calling
+ */
+
+/*
+ * pmap_enter_pv: enter a mapping onto a vm_page lst
+ *
+ * => caller should hold the proper lock on pmap_main_lock
+ * => caller should have pmap locked
+ * => we will gain the lock on the vm_page and allocate the new pv_entry
+ * => caller should adjust ptp's wire_count before calling
+ * => caller should not adjust pmap's wire_count
+ */
+static void
+pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
+ vm_offset_t va, u_int flags)
+{
+
+
+ pve->pv_pmap = pm;
+ pve->pv_va = va;
+ pve->pv_flags = flags;
+
+#if 0
+ mtx_lock(&pg->md.pvh_mtx);
+ TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist);
+#endif
+
+ TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
+ pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
+ if (pm == pmap_kernel()) {
+ if (flags & PVF_WRITE)
+ pg->md.krw_mappings++;
+ else
+ pg->md.kro_mappings++;
+ }
+ if (flags & PVF_WRITE)
+ pg->md.urw_mappings++;
+ else
+ pg->md.uro_mappings++;
+#if 0
+ mtx_unlock(&pg->md.pvh_mtx);
+#endif
+ if (pve->pv_flags & PVF_WIRED)
+ ++pm->pm_stats.wired_count;
+}
+
+/*
+ *
+ * pmap_find_pv: Find a pv entry
+ *
+ * => caller should hold lock on vm_page
+ */
+static PMAP_INLINE struct pv_entry *
+pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+{
+ struct pv_entry *pv;
+
+ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list)
+ if (pm == pv->pv_pmap && va == pv->pv_va)
+ break;
+ return (pv);
+}
+
+/*
+ * vector_page_setprot:
+ *
+ * Manipulate the protection of the vector page.
+ */
+void
+vector_page_setprot(int prot)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep;
+
+ l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
+
+ ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
+
+ *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
+ PTE_SYNC(ptep);
+ cpu_tlb_flushD_SE(vector_page);
+ cpu_cpwait();
+}
+
+/*
+ * pmap_remove_pv: try to remove a mapping from a pv_list
+ *
+ * => caller should hold proper lock on pmap_main_lock
+ * => pmap should be locked
+ * => caller should hold lock on vm_page [so that attrs can be adjusted]
+ * => caller should adjust ptp's wire_count and free PTP if needed
+ * => caller should NOT adjust pmap's wire_count
+ * => we return the removed pve
+ */
+static struct pv_entry *
+pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+{
+ struct pv_entry *pve, **prevptr;
+
+
+ prevptr = &TAILQ_FIRST(&pg->md.pv_list);/* previous pv_entry pointer */
+ pve = *prevptr;
+
+ while (pve) {
+ if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */
+ *prevptr = TAILQ_NEXT(pve, pv_list); /* remove it! */
+ if (pve->pv_flags & PVF_WIRED)
+ --pm->pm_stats.wired_count;
+ if (pm == pmap_kernel()) {
+ if (pve->pv_flags & PVF_WRITE)
+ pg->md.krw_mappings--;
+ else
+ pg->md.kro_mappings--;
+ } else
+ if (pve->pv_flags & PVF_WRITE)
+ pg->md.urw_mappings--;
+ else
+ pg->md.uro_mappings--;
+ break;
+ }
+ prevptr = &TAILQ_NEXT(pve, pv_list);
+ pve = TAILQ_NEXT(pve, pv_list);
+ }
+
+ return(pve); /* return removed pve */
+}
+/*
+ *
+ * pmap_modify_pv: Update pv flags
+ *
+ * => caller should hold lock on vm_page [so that attrs can be adjusted]
+ * => caller should NOT adjust pmap's wire_count
+ * => caller must call pmap_vac_me_harder() if writable status of a page
+ * may have changed.
+ * => we return the old flags
+ *
+ * Modify a physical-virtual mapping in the pv table
+ */
+static u_int
+pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
+ u_int clr_mask, u_int set_mask)
+{
+ struct pv_entry *npv;
+ u_int flags, oflags;
+
+ if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
+ return (0);
+
+ /*
+ * There is at least one VA mapping this page.
+ */
+
+ if (clr_mask & (PVF_REF | PVF_MOD))
+ pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
+
+ oflags = npv->pv_flags;
+ npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
+
+ if ((flags ^ oflags) & PVF_WIRED) {
+ if (flags & PVF_WIRED)
+ ++pm->pm_stats.wired_count;
+ else
+ --pm->pm_stats.wired_count;
+ }
+
+ if ((flags ^ oflags) & PVF_WRITE) {
+ if (pm == pmap_kernel()) {
+ if (flags & PVF_WRITE) {
+ pg->md.krw_mappings++;
+ pg->md.kro_mappings--;
+ } else {
+ pg->md.kro_mappings++;
+ pg->md.krw_mappings--;
+ }
+ } else
+ if (flags & PVF_WRITE) {
+ pg->md.urw_mappings++;
+ pg->md.uro_mappings--;
+ } else {
+ pg->md.uro_mappings++;
+ pg->md.urw_mappings--;
+ }
+ }
+
+ return (oflags);
+}
+
+/* Function to set the debug level of the pmap code */
+#ifdef PMAP_DEBUG
+void
+pmap_debug(int level)
+{
+ pmap_debug_level = level;
+ dprintf("pmap_debug: level=%d\n", pmap_debug_level);
+}
+#endif /* PMAP_DEBUG */
+
+
+void
+pmap_pinit0(struct pmap *pmap)
+{
+ PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap));
+
+ dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n",
+ (u_int32_t) pmap, (u_int32_t) pmap->pm_pdir);
+ pmap_pinit(pmap);
+}
+
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ * pmap_init has been enhanced to support in a fairly consistant
+ * way, discontiguous physical memory.
+ */
+void
+pmap_init(void)
+{
+ int i;
+
+ PDEBUG(1, printf("pmap_init: phys_start = %08x\n"));
+ /*
+ * Allocate memory for random pmap data structures. Includes the
+ * pv_head_table.
+ */
+ for(i = 0; i < vm_page_array_size; i++) {
+ vm_page_t m;
+
+ m = &vm_page_array[i];
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_list_count = 0;
+ }
+
+ /*
+ * init the pv free list
+ */
+ pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
+ NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
+ uma_prealloc(pvzone, MINPV);
+ l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
+ UMA_ZONE_VM | UMA_ZONE_NOFREE);
+ /*
+ * Now it is safe to enable pv_table recording.
+ */
+ pmap_initialized = TRUE;
+ PDEBUG(1, printf("pmap_init: done!\n"));
+}
+
+int
+pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
+{
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ pd_entry_t *pl1pd, l1pd;
+ pt_entry_t *ptep, pte;
+ vm_paddr_t pa;
+ u_int l1idx;
+ int rv = 0;
+
+#if 0
+ PMAP_MAP_TO_HEAD_LOCK();
+ pmap_acquire_pmap_lock(pm);
+#endif
+ l1idx = L1_IDX(va);
+
+ /*
+ * If there is no l2_dtable for this address, then the process
+ * has no business accessing it.
+ *
+ * Note: This will catch userland processes trying to access
+ * kernel addresses.
+ */
+ l2 = pm->pm_l2[L2_IDX(l1idx)];
+ if (l2 == NULL)
+ goto out;
+
+ /*
+ * Likewise if there is no L2 descriptor table
+ */
+ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
+ if (l2b->l2b_kva == NULL)
+ goto out;
+
+ /*
+ * Check the PTE itself.
+ */
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ pte = *ptep;
+ if (pte == 0)
+ goto out;
+
+ /*
+ * Catch a userland access to the vector page mapped at 0x0
+ */
+ if (user && (pte & L2_S_PROT_U) == 0)
+ goto out;
+
+ pa = l2pte_pa(pte);
+
+ if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) {
+ /*
+ * This looks like a good candidate for "page modified"
+ * emulation...
+ */
+ struct pv_entry *pv;
+ struct vm_page *pg;
+
+ /* Extract the physical address of the page */
+ if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
+ goto out;
+ }
+ /* Get the current flags for this page. */
+
+ pv = pmap_find_pv(pg, pm, va);
+ if (pv == NULL) {
+ goto out;
+ }
+
+ /*
+ * Do the flags say this page is writable? If not then it
+ * is a genuine write fault. If yes then the write fault is
+ * our fault as we did not reflect the write access in the
+ * PTE. Now we know a write has occurred we can correct this
+ * and also set the modified bit
+ */
+ if ((pv->pv_flags & PVF_WRITE) == 0) {
+ goto out;
+ }
+
+ pg->md.pvh_attrs |= PVF_REF | PVF_MOD;
+ pv->pv_flags |= PVF_REF | PVF_MOD;
+
+ /*
+ * Re-enable write permissions for the page. No need to call
+ * pmap_vac_me_harder(), since this is just a
+ * modified-emulation fault, and the PVF_WRITE bit isn't
+ * changing. We've already set the cacheable bits based on
+ * the assumption that we can write to this page.
+ */
+ *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
+ PTE_SYNC(ptep);
+ rv = 1;
+ } else
+ if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
+ /*
+ * This looks like a good candidate for "page referenced"
+ * emulation.
+ */
+ struct pv_entry *pv;
+ struct vm_page *pg;
+
+ /* Extract the physical address of the page */
+ if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
+ goto out;
+
+ /* Get the current flags for this page. */
+
+ pv = pmap_find_pv(pg, pm, va);
+ if (pv == NULL) {
+ goto out;
+ }
+
+ pg->md.pvh_attrs |= PVF_REF;
+ pv->pv_flags |= PVF_REF;
+
+
+ *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO;
+ PTE_SYNC(ptep);
+ rv = 1;
+ }
+
+ /*
+ * We know there is a valid mapping here, so simply
+ * fix up the L1 if necessary.
+ */
+ pl1pd = &pm->pm_l1->l1_kva[l1idx];
+ l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
+ if (*pl1pd != l1pd) {
+ *pl1pd = l1pd;
+ PTE_SYNC(pl1pd);
+ rv = 1;
+ }
+
+#ifdef CPU_SA110
+ /*
+ * There are bugs in the rev K SA110. This is a check for one
+ * of them.
+ */
+ if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
+ curcpu()->ci_arm_cpurev < 3) {
+ /* Always current pmap */
+ if (l2pte_valid(pte)) {
+ extern int kernel_debug;
+ if (kernel_debug & 1) {
+ struct proc *p = curlwp->l_proc;
+ printf("prefetch_abort: page is already "
+ "mapped - pte=%p *pte=%08x\n", ptep, pte);
+ printf("prefetch_abort: pc=%08lx proc=%p "
+ "process=%s\n", va, p, p->p_comm);
+ printf("prefetch_abort: far=%08x fs=%x\n",
+ cpu_faultaddress(), cpu_faultstatus());
+ }
+#ifdef DDB
+ if (kernel_debug & 2)
+ Debugger();
+#endif
+ rv = 1;
+ }
+ }
+#endif /* CPU_SA110 */
+
+#ifdef DEBUG
+ /*
+ * If 'rv == 0' at this point, it generally indicates that there is a
+ * stale TLB entry for the faulting address. This happens when two or
+ * more processes are sharing an L1. Since we don't flush the TLB on
+ * a context switch between such processes, we can take domain faults
+ * for mappings which exist at the same VA in both processes. EVEN IF
+ * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
+ * example.
+ *
+ * This is extremely likely to happen if pmap_enter() updated the L1
+ * entry for a recently entered mapping. In this case, the TLB is
+ * flushed for the new mapping, but there may still be TLB entries for
+ * other mappings belonging to other processes in the 1MB range
+ * covered by the L1 entry.
+ *
+ * Since 'rv == 0', we know that the L1 already contains the correct
+ * value, so the fault must be due to a stale TLB entry.
+ *
+ * Since we always need to flush the TLB anyway in the case where we
+ * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
+ * stale TLB entries dynamically.
+ *
+ * However, the above condition can ONLY happen if the current L1 is
+ * being shared. If it happens when the L1 is unshared, it indicates
+ * that other parts of the pmap are not doing their job WRT managing
+ * the TLB.
+ */
+ if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
+ extern int last_fault_code;
+ printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
+ pm, va, ftype);
+ printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
+ l2, l2b, ptep, pl1pd);
+ printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
+ pte, l1pd, last_fault_code);
+#ifdef DDB
+ Debugger();
+#endif
+ }
+#endif
+
+ cpu_tlb_flushID_SE(va);
+ cpu_cpwait();
+
+ rv = 1;
+
+out:
+#if 0
+ pmap_release_pmap_lock(pm);
+ PMAP_MAP_TO_HEAD_UNLOCK();
+#endif
+ return (rv);
+}
+
+/*
+ * Initialize the address space (zone) for the pv_entries. Set a
+ * high water mark so that the system can recover from excessive
+ * numbers of pv entries.
+ */
+void
+pmap_init2()
+{
+ int shpgperproc = PMAP_SHPGPERPROC;
+ struct l2_bucket *l2b;
+ struct l1_ttable *l1;
+ pd_entry_t *pl1pt;
+ pt_entry_t *ptep, pte;
+ vm_offset_t va, eva;
+ u_int loop, needed;
+ int i;
+
+
+ TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
+
+ pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
+ pv_entry_high_water = 9 * (pv_entry_max / 10);
+ l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
+ NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
+ uma_prealloc(l2zone, 512);
+
+ uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
+ uma_zone_set_obj(l2zone, &l2zone_obj, pv_entry_max);
+
+ needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
+ needed -= 1;
+ l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK);
+
+ for (loop = 0; loop < needed; loop++, l1++) {
+ /* Allocate a L1 page table */
+ va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, NULL, 0, 0x0, 0xffffffff, L1_TABLE_SIZE, 0);
+
+ if (va == 0)
+ panic("Cannot allocate L1 KVM");
+
+
+ eva = va + L1_TABLE_SIZE;
+ pl1pt = (pd_entry_t *)va;
+
+ for (i = 0; i < (L1_TABLE_SIZE / PAGE_SIZE) && va < eva; i++) {
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ pte = *ptep;
+ pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
+ *ptep = pte;
+ PTE_SYNC(ptep);
+ cpu_tlb_flushD_SE(va);
+
+ va += PAGE_SIZE;
+ }
+ pmap_init_l1(l1, pl1pt);
+ }
+
+
+#ifdef DEBUG
+ printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
+ needed);
+#endif
+}
+
+/*
+ * This is used to stuff certain critical values into the PCB where they
+ * can be accessed quickly from cpu_switch() et al.
+ */
+void
+pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
+{
+ struct l2_bucket *l2b;
+
+ pcb->pcb_pagedir = pm->pm_l1->l1_physaddr;
+ pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
+ (DOMAIN_CLIENT << (pm->pm_domain * 2));
+ pcb->pcb_cstate = (void *)&pm->pm_cstate;
+
+ if (vector_page < KERNBASE) {
+ pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
+ l2b = pmap_get_l2_bucket(pm, vector_page);
+ pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO |
+ L1_C_DOM(pm->pm_domain);
+ } else
+ pcb->pcb_pl1vec = NULL;
+}
+
+void
+pmap_activate(struct thread *td)
+{
+ pmap_t pm;
+ struct pcb *pcb;
+ int s;
+
+ pm = td->td_proc->p_vmspace->vm_map.pmap;
+ pcb = td->td_pcb;
+
+ critical_enter();
+ pmap_set_pcb_pagedir(pm, pcb);
+
+ if (td == curthread) {
+ u_int cur_dacr, cur_ttb;
+
+ __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb));
+ __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr));
+
+ cur_ttb &= ~(L1_TABLE_SIZE - 1);
+
+ if (cur_ttb == (u_int)pcb->pcb_pagedir &&
+ cur_dacr == pcb->pcb_dacr) {
+ /*
+ * No need to switch address spaces.
+ */
+ critical_exit();
+ return;
+ }
+
+ disable_interrupts(I32_bit | F32_bit);
+
+ /*
+ * We MUST, I repeat, MUST fix up the L1 entry corresponding
+ * to 'vector_page' in the incoming L1 table before switching
+ * to it otherwise subsequent interrupts/exceptions (including
+ * domain faults!) will jump into hyperspace.
+ */
+ if (pcb->pcb_pl1vec) {
+
+ *pcb->pcb_pl1vec = pcb->pcb_l1vec;
+ /*
+ * Don't need to PTE_SYNC() at this point since
+ * cpu_setttb() is about to flush both the cache
+ * and the TLB.
+ */
+ }
+
+ cpu_domains(pcb->pcb_dacr);
+ cpu_setttb(pcb->pcb_pagedir);
+
+ enable_interrupts(I32_bit | F32_bit);
+
+ /*
+ * Flag any previous userland pmap as being NOT
+ * resident in the cache/tlb.
+ */
+ if (pmap_cache_state && pmap_cache_state != &pm->pm_cstate)
+ pmap_cache_state->cs_all = 0;
+
+ /*
+ * The new pmap, however, IS resident.
+ */
+ pmap_cache_state = &pm->pm_cstate;
+ pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
+ splx(s);
+ }
+ critical_exit();
+}
+
+static int
+pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va)
+{
+ pd_entry_t *pdep, pde;
+ pt_entry_t *ptep, pte;
+ vm_offset_t pa;
+ int rv = 0;
+
+ /*
+ * Make sure the descriptor itself has the correct cache mode
+ */
+ pdep = &kl1[L1_IDX(va)];
+ pde = *pdep;
+
+ if (l1pte_section_p(pde)) {
+ if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
+ *pdep = (pde & ~L1_S_CACHE_MASK) |
+ pte_l1_s_cache_mode_pt;
+ PTE_SYNC(pdep);
+ cpu_dcache_wbinv_range((vm_offset_t)pdep,
+ sizeof(*pdep));
+ rv = 1;
+ }
+ } else {
+ pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
+ ptep = (pt_entry_t *)kernel_pt_lookup(pa);
+ if (ptep == NULL)
+ panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
+
+ ptep = &ptep[l2pte_index(va)];
+ pte = *ptep;
+ if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
+ *ptep = (pte & ~L2_S_CACHE_MASK) |
+ pte_l2_s_cache_mode_pt;
+ PTE_SYNC(ptep);
+ cpu_dcache_wbinv_range((vm_offset_t)ptep,
+ sizeof(*ptep));
+ rv = 1;
+ }
+ }
+
+ return (rv);
+}
+
+static void
+pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap,
+ pt_entry_t **ptep)
+{
+ vm_offset_t va = *availp;
+ struct l2_bucket *l2b;
+
+ if (ptep) {
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ if (l2b == NULL)
+ panic("pmap_alloc_specials: no l2b for 0x%x", va);
+
+ *ptep = &l2b->l2b_kva[l2pte_index(va)];
+ }
+
+ *vap = va;
+ *availp = va + (PAGE_SIZE * pages);
+}
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ *
+ * On the arm this is called after mapping has already been enabled
+ * and just syncs the pmap module with what has already been done.
+ * [We can't call it easily with mapping off since the kernel is not
+ * mapped with PA == VA, hence we would have to relocate every address
+ * from the linked base (virtual) address "KERNBASE" to the actual
+ * (physical) address starting relative to 0]
+ */
+#define PMAP_STATIC_L2_SIZE 16
+void
+pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt)
+{
+ static struct l1_ttable static_l1;
+ static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
+ struct l1_ttable *l1 = &static_l1;
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ pd_entry_t pde;
+ pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va;
+ pt_entry_t *ptep;
+ vm_paddr_t pa;
+ vm_offset_t va;
+ int l1idx, l2idx, l2next = 0;
+
+ PDEBUG(1, printf("firstaddr = %08x, loadaddr = %08x\n",
+ firstaddr, loadaddr));
+
+ virtual_avail = firstaddr;
+ kernel_pmap = &kernel_pmap_store;
+ kernel_pmap->pm_l1 = l1;
+/*
+ * Scan the L1 translation table created by initarm() and create
+ * the required metadata for all valid mappings found in it.
+ */
+ for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
+ pde = kernel_l1pt[l1idx];
+
+ /*
+ * We're only interested in Coarse mappings.
+ * pmap_extract() can deal with section mappings without
+ * recourse to checking L2 metadata.
+ */
+ if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
+ continue;
+
+ /*
+ * Lookup the KVA of this L2 descriptor table
+ */
+ pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
+ ptep = (pt_entry_t *)kernel_pt_lookup(pa);
+
+ if (ptep == NULL) {
+ panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
+ (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa);
+ }
+
+ /*
+ * Fetch the associated L2 metadata structure.
+ * Allocate a new one if necessary.
+ */
+ if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
+ if (l2next == PMAP_STATIC_L2_SIZE)
+ panic("pmap_bootstrap: out of static L2s");
+ kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 =
+ &static_l2[l2next++];
+ }
+
+ /*
+ * One more L1 slot tracked...
+ */
+ l2->l2_occupancy++;
+
+ /*
+ * Fill in the details of the L2 descriptor in the
+ * appropriate bucket.
+ */
+ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
+ l2b->l2b_kva = ptep;
+ l2b->l2b_phys = pa;
+ l2b->l2b_l1idx = l1idx;
+
+ /*
+ * Establish an initial occupancy count for this descriptor
+ */
+ for (l2idx = 0;
+ l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
+ l2idx++) {
+ if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
+ l2b->l2b_occupancy++;
+ }
+ }
+
+ /*
+ * Make sure the descriptor itself has the correct cache mode.
+ * If not, fix it, but whine about the problem. Port-meisters
+ * should consider this a clue to fix up their initarm()
+ * function. :)
+ */
+ if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) {
+ printf("pmap_bootstrap: WARNING! wrong cache mode for "
+ "L2 pte @ %p\n", ptep);
+ }
+ }
+
+
+ /*
+ * Initialize protection array.
+ */
+ arm_protection_init();
+
+ /*
+ * Ensure the primary (kernel) L1 has the correct cache mode for
+ * a page table. Bitch if it is not correctly set.
+ */
+ for (va = (vm_offset_t)kernel_l1pt;
+ va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
+ if (pmap_set_pt_cache_mode(kernel_l1pt, va))
+ printf("pmap_bootstrap: WARNING! wrong cache mode for "
+ "primary L1 @ 0x%x\n", va);
+ }
+
+ cpu_dcache_wbinv_all();
+ cpu_tlb_flushID();
+ cpu_cpwait();
+
+ kernel_pmap->pm_active = -1;
+ kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL;
+ TAILQ_INIT(&kernel_pmap->pm_pvlist);
+ LIST_INIT(&allpmaps);
+ LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
+
+ /*
+ * Reserve some special page table entries/VA space for temporary
+ * mapping of pages.
+ */
+#define SYSMAP(c, p, v, n) \
+ v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
+
+ pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte);
+ pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte);
+ pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte);
+ pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte);
+ TAILQ_INIT(&l1_lru_list);
+ mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF);
+ pmap_init_l1(l1, kernel_l1pt);
+ cpu_dcache_wbinv_all();
+
+ virtual_avail = round_page(virtual_avail);
+ virtual_end = lastaddr;
+ kernel_vm_end = virtual_end;
+}
+
+/***************************************************
+ * Pmap allocation/deallocation routines.
+ ***************************************************/
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap_t pmap)
+{
+ pmap_free_l1(pmap);
+ dprintf("pmap_release()\n");
+}
+
+
+/*
+ * grow the number of kernel page table entries, if needed
+ */
+void
+pmap_growkernel(vm_offset_t addr)
+{
+
+}
+
+
+/*
+ * pmap_page_protect:
+ *
+ * Lower the permission for all mappings to a given page.
+ */
+void
+pmap_page_protect(vm_page_t m, vm_prot_t prot)
+{
+
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
+ pmap_changebit(m, AP_KRWURW, FALSE);
+ } else {
+ pmap_remove_all(m);
+ }
+ }
+}
+
+
+#define PMAP_REMOVE_PAGES_CURPROC_ONLY
+/*
+ * Remove all pages from specified address space
+ * this aids process exit speeds. Also, this code
+ * is special cased for current process only, but
+ * can have the more generic (and slightly slower)
+ * mode enabled. This is much faster than pmap_remove
+ * in the case of running down an entire address space.
+ */
+void
+pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *pte, tpte;
+ pv_entry_t pv, npv;
+ int s;
+ vm_page_t m;
+
+#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
+ if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) {
+ printf("warning: pmap_remove_pages called with non-current pmap\n");
+ return;
+ }
+#endif
+
+ s = splvm();
+ for(pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
+ if (pv->pv_va >= eva || pv->pv_va < sva) {
+ npv = TAILQ_NEXT(pv, pv_plist);
+ continue;
+ }
+
+ /*
+ * We cannot remove a wired pages from a process' mapping
+ * at this time
+ */
+ if (pv->pv_flags & PT_W) {
+ npv = TAILQ_NEXT(pv, pv_plist);
+ continue;
+ }
+ l2b = pmap_get_l2_bucket(pmap_kernel(), pv->pv_va);
+ pte = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+ tpte = *pte;
+ *pte = 0;
+
+ m = PHYS_TO_VM_PAGE(tpte);
+
+ KASSERT(m < &vm_page_array[vm_page_array_size],
+ ("pmap_remove_pages: bad tpte %x", tpte));
+
+ pv->pv_pmap->pm_stats.resident_count--;
+
+ /*
+ * Update the vm_page_t clean and reference bits.
+ */
+ vm_page_dirty(m);
+
+ npv = TAILQ_NEXT(pv, pv_plist);
+ TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
+
+ m->md.pv_list_count--;
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
+ vm_page_flag_clear(m, PG_REFERENCED);
+ }
+
+ pmap_free_l2_bucket(pv->pv_pmap, l2b, 1);
+ pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
+ pmap_free_pv_entry(pv);
+ }
+ splx(s);
+ pmap_invalidate_tlb_all(pmap);
+}
+
+
+/***************************************************
+ * Low level mapping routines.....
+ ***************************************************/
+
+/*
+ * add a wired page to the kva
+ * note that in order for the mapping to take effect -- you
+ * should do a invltlb after doing the pmap_kenter...
+ */
+PMAP_INLINE void
+pmap_kenter(vm_offset_t va, vm_offset_t pa)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *pte;
+ pt_entry_t opte;
+
+ PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n",
+ (uint32_t) va, (uint32_t) pa));
+
+
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ KASSERT(l2b != NULL, ("No L2 Bucket"));
+ pte = &l2b->l2b_kva[l2pte_index(va)];
+ opte = *pte;
+ PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
+ (uint32_t) pte, opte, *pte));
+ if (l2pte_valid(opte)) {
+ cpu_dcache_wbinv_range(va, PAGE_SIZE);
+ cpu_tlb_flushD_SE(va);
+ cpu_cpwait();
+ } else
+ if (opte == 0)
+ l2b->l2b_occupancy++;
+ *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL,
+ VM_PROT_READ | VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(pte);
+
+}
+
+
+/*
+ * remove a page from the kernel pagetables
+ */
+PMAP_INLINE void
+pmap_kremove(vm_offset_t va)
+{
+ pt_entry_t *pte;
+
+ pte = (pt_entry_t *)vtopte(va);
+ *pte = 0;
+ pmap_invalidate_page(kernel_pmap, va);
+}
+
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
+{
+ vm_offset_t sva = *virt;
+ vm_offset_t va = sva;
+
+ PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, "
+ "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end,
+ prot));
+
+ while (start < end) {
+ pmap_kenter(va, start);
+ va += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ *virt = va;
+ return (sva);
+}
+
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ */
+void
+pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
+ va += PAGE_SIZE;
+ }
+}
+
+
+/*
+ * this routine jerks page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ */
+void
+pmap_qremove(vm_offset_t va, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ pmap_kremove(va);
+ va += PAGE_SIZE;
+ }
+}
+
+
+/*
+ * pmap_object_init_pt preloads the ptes for a given object
+ * into the specified pmap. This eliminates the blast of soft
+ * faults on process startup and immediately after an mmap.
+ */
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size)
+{
+ printf("pmap_object_init_pt()\n");
+}
+
+
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ return (FALSE);
+}
+
+static PMAP_INLINE void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+ /* TODO: Invalidate I+D_SE */
+ __asm("mcr p15, 0, r0, c7, c7, 0");
+}
+
+#if 0
+static PMAP_INLINE void
+pmap_invalidate_tlb(pmap_t pmap, vm_offset_t va)
+{
+ __asm("mcr p15, 0, r0, c8, c7, 0");
+ /* TODO: Invalidate TLB */
+}
+#endif
+
+static PMAP_INLINE void
+pmap_invalidate_tlb_all(pmap_t pmap)
+{
+ __asm("mcr p15, 0, r0, c8, c7, 0");
+ /* TODO: Invalidate all TLB */
+}
+
+static PMAP_INLINE void
+pmap_changebit(vm_page_t m, int bit, boolean_t setem)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ int s;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return;
+
+ s = splvm();
+
+ /*
+ * Loop over all current mappings setting/clearing as appropos
+ */
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+
+ /*
+ * don't write protect pager mappings
+ */
+ if (!setem && bit == AP_KRWURW) {
+ if (!pmap_track_modified(pv->pv_va))
+ continue;
+ }
+
+#if defined(PMAP_DEBUG)
+ if (!pv->pv_pmap) {
+ printf("Null pmap (cb) at va: 0x%x\n", (uint32_t) pv->pv_va);
+ continue;
+ }
+#endif
+
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+
+ if (setem) {
+ *pte |= bit;
+ pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
+ } else {
+ pt_entry_t pbits = *pte;
+ if (pbits & bit) {
+ *pte = pbits & ~bit;
+ pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
+ }
+ }
+ }
+ splx(s);
+}
+
+/*
+ * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
+ * Returns TRUE if the mapping exists, else FALSE.
+ *
+ * NOTE: This function is only used by a couple of arm-specific modules.
+ * It is not safe to take any pmap locks here, since we could be right
+ * in the middle of debugging the pmap anyway...
+ *
+ * It is possible for this routine to return FALSE even though a valid
+ * mapping does exist. This is because we don't lock, so the metadata
+ * state may be inconsistent.
+ *
+ * NOTE: We can return a NULL *ptp in the case where the L1 pde is
+ * a "section" mapping.
+ */
+boolean_t
+pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp)
+{
+ struct l2_dtable *l2;
+ pd_entry_t *pl1pd, l1pd;
+ pt_entry_t *ptep;
+ u_short l1idx;
+
+ if (pm->pm_l1 == NULL)
+ return (FALSE);
+
+ l1idx = L1_IDX(va);
+ *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
+ l1pd = *pl1pd;
+
+ if (l1pte_section_p(l1pd)) {
+ *ptp = NULL;
+ return (TRUE);
+ }
+
+ if (pm->pm_l2 == NULL)
+ return (FALSE);
+
+ l2 = pm->pm_l2[L2_IDX(l1idx)];
+
+ if (l2 == NULL ||
+ (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
+ return (FALSE);
+ }
+
+ *ptp = &ptep[l2pte_index(va)];
+ return (TRUE);
+}
+
+/*
+ * Routine: pmap_remove_all
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ *
+ * Notes:
+ * Original versions of this routine were very
+ * inefficient because they iteratively called
+ * pmap_remove (slow...)
+ */
+void
+pmap_remove_all(vm_page_t m)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte, tpte;
+ int s;
+
+#if defined(PMAP_DEBUG)
+ /*
+ * XXX this makes pmap_page_protect(NONE) illegal for non-managed
+ * pages!
+ */
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
+ panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
+ }
+#endif
+
+ s = splvm();
+ while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+ pv->pv_pmap->pm_stats.resident_count--;
+
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+
+ tpte = atomic_readandclear_int(pte);
+ if (pv->pv_flags & PT_W)
+ pv->pv_pmap->pm_stats.wired_count--;
+
+ pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
+
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ m->md.pv_list_count--;
+ pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
+ pmap_idcache_wbinv_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE);
+ pmap_free_pv_entry(pv);
+ }
+
+ splx(s);
+}
+
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
+ vm_offset_t next_bucket;
+ u_int flags;
+ int flush;
+
+ if ((prot & VM_PROT_READ) == 0) {
+ pmap_remove(pm, sva, eva);
+ return;
+ }
+
+ if (prot & VM_PROT_WRITE) {
+ /*
+ * If this is a read->write transition, just ignore it and let
+ * uvm_fault() take care of it later.
+ */
+ return;
+ }
+
+
+ /*
+ * OK, at this point, we know we're doing write-protect operation.
+ * If the pmap is active, write-back the range.
+ */
+ pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE);
+
+ flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
+ flags = 0;
+
+ while (sva < eva) {
+ next_bucket = L2_NEXT_BUCKET(sva);
+ if (next_bucket > eva)
+ next_bucket = eva;
+
+ l2b = pmap_get_l2_bucket(pm, sva);
+ if (l2b == NULL) {
+ sva = next_bucket;
+ continue;
+ }
+
+ ptep = &l2b->l2b_kva[l2pte_index(sva)];
+
+ while (sva < next_bucket) {
+ if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) {
+ struct vm_page *pg;
+ u_int f;
+
+ pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
+ pte &= ~L2_S_PROT_W;
+ *ptep = pte;
+ PTE_SYNC(ptep);
+
+ if (pg != NULL) {
+ f = pmap_modify_pv(pg, pm, sva,
+ PVF_WRITE, 0);
+ pmap_vac_me_harder(pg, pm, sva);
+ } else
+ f = PVF_REF | PVF_EXEC;
+
+ if (flush >= 0) {
+ flush++;
+ flags |= f;
+ } else
+ if (PV_BEEN_EXECD(f))
+ pmap_tlb_flushID_SE(pm, sva);
+ else
+ if (PV_BEEN_REFD(f))
+ pmap_tlb_flushD_SE(pm, sva);
+ }
+
+ sva += PAGE_SIZE;
+ ptep++;
+ }
+ }
+
+
+ if (flush) {
+ if (PV_BEEN_EXECD(flags))
+ pmap_tlb_flushID(pm);
+ else
+ if (PV_BEEN_REFD(flags))
+ pmap_tlb_flushD(pm);
+ }
+
+}
+
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+vm_offset_t getttb(void);
+void
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ boolean_t wired)
+{
+ struct l2_bucket *l2b;
+ struct vm_page *opg;
+ struct pv_entry *pve;
+ pt_entry_t *ptep, npte, opte;
+ u_int nflags;
+ u_int oflags;
+ vm_paddr_t pa;
+
+
+ if (va == vector_page) {
+ pa = systempage.pv_pa;
+ m = NULL;
+ } else
+ pa = VM_PAGE_TO_PHYS(m);
+ nflags = 0;
+ if (prot & VM_PROT_WRITE)
+ nflags |= PVF_WRITE;
+ if (prot & VM_PROT_EXECUTE)
+ nflags |= PVF_EXEC;
+ if (wired)
+ nflags |= PVF_WIRED;
+ PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, "
+ "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired));
+
+ if (pmap == pmap_kernel())
+ l2b = pmap_get_l2_bucket(pmap, va);
+ else {
+ l2b = pmap_alloc_l2_bucket(pmap, va);
+ }
+ if (l2b == NULL)
+ panic("pmap_enter: failed to allocate l2 bucket");
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ /*
+ * Page table entry not valid, we need a new PT page
+ */
+ if (ptep == NULL)
+ panic("pmap_enter: invalid page table pte=%p, va=0x%x 0x%x\n",
+ ptep, va, UPT_MIN_ADDRESS);
+
+ opte = *(vm_offset_t *)ptep;
+ npte = pa;
+ oflags = 0;
+
+ if (opte) {
+ /*
+ * There is already a mapping at this address.
+ * If the physical address is different, lookup the
+ * vm_page.
+ */
+ if (l2pte_pa(opte) != pa)
+ opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
+ else
+ opg = m;
+ } else
+ opg = NULL;
+
+ if (m && !(m->flags & PG_UNMANAGED)) {
+ /*
+ * This is to be a managed mapping.
+ */
+ if ((prot & (VM_PROT_ALL)) ||
+ (m->md.pvh_attrs & PVF_REF)) {
+ /*
+ * - The access type indicates that we don't need
+ * to do referenced emulation.
+ * OR
+ * - The physical page has already been referenced
+ * so no need to re-do referenced emulation here.
+ */
+ npte |= L2_S_PROTO;
+
+ nflags |= PVF_REF;
+
+ if (((prot & VM_PROT_WRITE) != 0 &&
+ ((m->flags & PG_WRITEABLE) ||
+ (m->md.pvh_attrs & PVF_MOD) != 0))) {
+ /*
+ * This is a writable mapping, and the
+ * page's mod state indicates it has
+ * already been modified. Make it
+ * writable from the outset.
+ */
+ npte |= L2_S_PROT_W;
+ nflags |= PVF_MOD;
+ }
+ } else {
+ /*
+ * Need to do page referenced emulation.
+ */
+ npte |= L2_TYPE_INV;
+ }
+
+ npte |= pte_l2_s_cache_mode;
+
+ if (m == opg) {
+ /*
+ * We're changing the attrs of an existing mapping.
+ */
+#if 0
+ simple_lock(&pg->mdpage.pvh_slock);
+#endif
+ oflags = pmap_modify_pv(m, pmap, va,
+ PVF_WRITE | PVF_EXEC | PVF_WIRED |
+ PVF_MOD | PVF_REF, nflags);
+#if 0
+ simple_unlock(&pg->mdpage.pvh_slock);
+#endif
+
+ /*
+ * We may need to flush the cache if we're
+ * doing rw-ro...
+ */
+ if (pmap->pm_cstate.cs_cache_d &&
+ (oflags & PVF_NC) == 0 &&
+ (opte & L2_S_PROT_W) != 0 &&
+ (prot & VM_PROT_WRITE) == 0)
+ cpu_dcache_wb_range(va, PAGE_SIZE);
+ } else {
+ /*
+ * New mapping, or changing the backing page
+ * of an existing mapping.
+ */
+ if (opg) {
+ /*
+ * Replacing an existing mapping with a new one.
+ * It is part of our managed memory so we
+ * must remove it from the PV list
+ */
+#if 0
+ simple_lock(&opg->mdpage.pvh_slock);
+#endif
+ pve = pmap_remove_pv(opg, pmap, va);
+ pmap_vac_me_harder(opg, pmap, 0);
+#if 0
+ simple_unlock(&opg->mdpage.pvh_slock);
+#endif
+ oflags = pve->pv_flags;
+
+ /*
+ * If the old mapping was valid (ref/mod
+ * emulation creates 'invalid' mappings
+ * initially) then make sure to frob
+ * the cache.
+ */
+ if ((oflags & PVF_NC) == 0 &&
+ l2pte_valid(opte)) {
+ if (PV_BEEN_EXECD(oflags)) {
+ pmap_idcache_wbinv_range(pmap, va,
+ PAGE_SIZE);
+ } else
+ if (PV_BEEN_REFD(oflags)) {
+ pmap_dcache_wb_range(pmap, va,
+ PAGE_SIZE, TRUE,
+ (oflags & PVF_WRITE) == 0);
+ }
+ }
+ } else
+ if ((pve = pmap_get_pv_entry()) == NULL) {
+ panic("pmap_enter: no pv entries");
+
+ }
+
+ pmap_enter_pv(m, pve, pmap, va, nflags);
+ }
+ } else {
+ /*
+ * We're mapping an unmanaged page.
+ * These are always readable, and possibly writable, from
+ * the get go as we don't need to track ref/mod status.
+ */
+ npte |= L2_S_PROTO;
+ if (prot & VM_PROT_WRITE) {
+ npte |= L2_S_PROT_W;
+ }
+
+ /*
+ * Make sure the vector table is mapped cacheable
+ */
+ if (pmap != pmap_kernel() && va == vector_page)
+ npte |= pte_l2_s_cache_mode;
+ if (opg) {
+ /*
+ * Looks like there's an existing 'managed' mapping
+ * at this address.
+ */
+#if 0
+ simple_lock(&opg->mdpage.pvh_slock);
+#endif
+ pve = pmap_remove_pv(opg, pmap, va);
+ pmap_vac_me_harder(opg, pmap, 0);
+#if 0
+ simple_unlock(&opg->mdpage.pvh_slock);
+#endif
+ oflags = pve->pv_flags;
+
+ if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) {
+ if (PV_BEEN_EXECD(oflags))
+ pmap_idcache_wbinv_range(pmap, va,
+ PAGE_SIZE);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_dcache_wb_range(pmap, va, PAGE_SIZE,
+ TRUE, (oflags & PVF_WRITE) == 0);
+ }
+ }
+ }
+
+ /*
+ * Make sure userland mappings get the right permissions
+ */
+ if (pmap != pmap_kernel() && va != vector_page) {
+ npte |= L2_S_PROT_U;
+ }
+
+ /*
+ * Keep the stats up to date
+ */
+ if (opte == 0) {
+ l2b->l2b_occupancy++;
+ pmap->pm_stats.resident_count++;
+ }
+
+
+ /*
+ * If this is just a wiring change, the two PTEs will be
+ * identical, so there's no need to update the page table.
+ */
+ if (npte != opte) {
+ boolean_t is_cached = pmap_is_cached(pmap);
+
+ *ptep = npte;
+ if (is_cached) {
+ /*
+ * We only need to frob the cache/tlb if this pmap
+ * is current
+ */
+ PTE_SYNC(ptep);
+ if (L1_IDX(va) != L1_IDX(vector_page) &&
+ l2pte_valid(npte)) {
+ /*
+ * This mapping is likely to be accessed as
+ * soon as we return to userland. Fix up the
+ * L1 entry to avoid taking another
+ * page/domain fault.
+ */
+ pd_entry_t *pl1pd, l1pd;
+
+
+ pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
+ l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) |
+ L1_C_PROTO;
+ if (*pl1pd != l1pd) {
+ *pl1pd = l1pd;
+ PTE_SYNC(pl1pd);
+ }
+ }
+ }
+
+ if (PV_BEEN_EXECD(oflags))
+ pmap_tlb_flushID_SE(pmap, va);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_tlb_flushD_SE(pmap, va);
+
+
+ if (m && !(m->flags & PG_UNMANAGED)) {
+#if 0
+ simple_lock(&pg->mdpage.pvh_slock);
+#endif
+ pmap_vac_me_harder(m, pmap, va);
+#if 0
+ simple_unlock(&pg->mdpage.pvh_slock);
+#endif
+ }
+ }
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * 5. Tlbflush is deferred to calling procedure.
+ * 6. Page IS managed.
+ * but is *MUCH* faster than pmap_enter...
+ */
+
+vm_page_t
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+{
+ pmap_enter(pmap, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
+ return (NULL);
+}
+
+/*
+ * Routine: pmap_change_wiring
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
+ vm_page_t pg;
+
+ l2b = pmap_get_l2_bucket(pmap, va);
+ KASSERT(l2b, ("No l2b bucket in pmap_change_wiring"));
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ pte = *ptep;
+ pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
+ if (pg)
+ pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired);
+}
+
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
+ vm_size_t len, vm_offset_t src_addr)
+{
+}
+
+
+/*
+ * this routine defines the region(s) of memory that should
+ * not be tested for the modified bit.
+ */
+static PMAP_INLINE int
+pmap_track_modified(vm_offset_t va)
+{
+ if ((va < clean_sva) || (va >= clean_eva))
+ return 1;
+ else
+ return 0;
+}
+
+
+/*
+ * Routine: pmap_pte
+ * Function:
+ * Extract the page table entry associated
+ * with the given map/virtual_address pair.
+ */
+static pt_entry_t *
+pmap_pte(pmap_t pmap, vm_offset_t va)
+{
+ struct l2_bucket *l2b;
+
+ l2b = pmap_get_l2_bucket(pmap, va);
+ if (l2b == NULL)
+ return (NULL);
+ return (&l2b->l2b_kva[l2pte_index(va)]);
+}
+
+vm_paddr_t
+pmap_kextract(vm_offset_t va)
+{
+ return (pmap_extract(pmap_kernel(), va));
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+vm_offset_t
+pmap_extract(pmap_t pm, vm_offset_t va)
+{
+ struct l2_dtable *l2;
+ pd_entry_t *pl1pd, l1pd;
+ pt_entry_t *ptep, pte;
+ vm_paddr_t pa;
+ u_int l1idx;
+ l1idx = L1_IDX(va);
+ pl1pd = &pm->pm_l1->l1_kva[l1idx];
+ l1pd = *pl1pd;
+
+ if (l1pte_section_p(l1pd)) {
+ /*
+ * These should only happen for pmap_kernel()
+ */
+ KASSERT(pm == pmap_kernel(), ("huh"));
+ pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
+ } else {
+ /*
+ * Note that we can't rely on the validity of the L1
+ * descriptor as an indication that a mapping exists.
+ * We have to look it up in the L2 dtable.
+ */
+ l2 = pm->pm_l2[L2_IDX(l1idx)];
+
+ if (l2 == NULL ||
+ (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
+ return (0);
+ }
+
+ ptep = &ptep[l2pte_index(va)];
+ pte = *ptep;
+
+ if (pte == 0)
+ return (0);
+
+ switch (pte & L2_TYPE_MASK) {
+ case L2_TYPE_L:
+ pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
+ break;
+
+ default:
+ pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
+ break;
+ }
+ }
+
+ return (pa);
+}
+
+vm_page_t
+pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+{
+ vm_paddr_t pa;
+ vm_page_t m;
+
+ m = NULL;
+ mtx_lock(&Giant);
+ if ((pa = pmap_extract(pmap, va)) != 0) {
+ m = PHYS_TO_VM_PAGE(pa);
+ vm_page_lock_queues();
+ vm_page_hold(m);
+ vm_page_unlock_queues();
+ }
+ mtx_unlock(&Giant);
+ return (m);
+}
+/*
+ * After removing a page table entry, this routine is used to
+ * conditionally free the page, and manage the hold/wire counts.
+ */
+static int
+pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
+{
+ if (va >= UPT_MIN_ADDRESS)
+ return 0;
+
+ return pmap_unwire_pte_hold(pmap, mpte);
+}
+
+void
+pmap_update(pmap_t pm)
+{
+
+ if (pmap_is_current(pm)) {
+ /*
+ * If we're dealing with a current userland pmap, move its L1
+ * to the end of the LRU.
+ */
+ if (pm != pmap_kernel())
+ pmap_use_l1(pm);
+
+ /*
+ * We can assume we're done with frobbing the cache/tlb for
+ * now. Make sure any future pmap ops don't skip cache/tlb
+ * flushes.
+ */
+ pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
+ }
+
+ /*
+ * make sure TLB/cache operations have completed.
+ */
+ cpu_cpwait();
+}
+
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+
+void
+pmap_pinit(pmap_t pmap)
+{
+ PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
+
+ pmap_alloc_l1(pmap);
+ bzero(pmap->pm_l2, sizeof(pmap->pm_l2));
+
+ LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
+ pmap->pm_count = 1;
+ pmap->pm_active = 0;
+ pmap->pm_ptphint = NULL;
+
+ TAILQ_INIT(&pmap->pm_pvlist);
+ bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+ pmap->pm_stats.resident_count = 1;
+ if (vector_page < KERNBASE) {
+ pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa),
+ VM_PROT_READ, 1);
+ pmap_update(pmap);
+ }
+}
+
+
+/***************************************************
+ * page management routines.
+ ***************************************************/
+
+
+static PMAP_INLINE void
+pmap_free_pv_entry(pv_entry_t pv)
+{
+ pv_entry_count--;
+ uma_zfree(pvzone, pv);
+}
+
+
+/*
+ * get a new pv_entry, allocating a block from the system
+ * when needed.
+ * the memory allocation is performed bypassing the malloc code
+ * because of the possibility of allocations at interrupt time.
+ */
+static pv_entry_t
+pmap_get_pv_entry(void)
+{
+ pv_entry_t ret_value;
+
+ pv_entry_count++;
+ if (pv_entry_high_water &&
+ (pv_entry_count > pv_entry_high_water) &&
+ (pmap_pagedaemon_waken == 0)) {
+ pmap_pagedaemon_waken = 1;
+ wakeup (&vm_pages_needed);
+ }
+ ret_value = uma_zalloc(pvzone, M_NOWAIT);
+ return ret_value;
+}
+
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+#define PMAP_REMOVE_CLEAN_LIST_SIZE 3
+void
+pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+{
+ struct l2_bucket *l2b;
+ vm_offset_t next_bucket;
+ pt_entry_t *ptep;
+ u_int cleanlist_idx, total, cnt;
+ struct {
+ vm_offset_t va;
+ pt_entry_t *pte;
+ } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
+ u_int mappings, is_exec, is_refd;
+
+
+ /*
+ * we lock in the pmap => pv_head direction
+ */
+#if 0
+ PMAP_MAP_TO_HEAD_LOCK();
+ pmap_acquire_pmap_lock(pm);
+#endif
+
+ if (!pmap_is_cached(pm)) {
+ cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
+ } else
+ cleanlist_idx = 0;
+
+ total = 0;
+
+ while (sva < eva) {
+ /*
+ * Do one L2 bucket's worth at a time.
+ */
+ next_bucket = L2_NEXT_BUCKET(sva);
+ if (next_bucket > eva)
+ next_bucket = eva;
+
+ l2b = pmap_get_l2_bucket(pm, sva);
+ if (l2b == NULL) {
+ sva = next_bucket;
+ continue;
+ }
+
+ ptep = &l2b->l2b_kva[l2pte_index(sva)];
+ mappings = 0;
+
+ while (sva < next_bucket) {
+ struct vm_page *pg;
+ pt_entry_t pte;
+ vm_paddr_t pa;
+
+ pte = *ptep;
+
+ if (pte == 0) {
+ /*
+ * Nothing here, move along
+ */
+ sva += PAGE_SIZE;
+ ptep++;
+ continue;
+ }
+
+ pm->pm_stats.resident_count--;
+ pa = l2pte_pa(pte);
+ is_exec = 0;
+ is_refd = 1;
+
+ /*
+ * Update flags. In a number of circumstances,
+ * we could cluster a lot of these and do a
+ * number of sequential pages in one go.
+ */
+ if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
+ struct pv_entry *pve;
+#if 0
+ simple_lock(&pg->mdpage.pvh_slock);
+#endif
+ pve = pmap_remove_pv(pg, pm, sva);
+ pmap_vac_me_harder(pg, pm, 0);
+#if 0
+ simple_unlock(&pg->mdpage.pvh_slock);
+#endif
+ if (pve != NULL) {
+ is_exec =
+ PV_BEEN_EXECD(pve->pv_flags);
+ is_refd =
+ PV_BEEN_REFD(pve->pv_flags);
+ pmap_free_pv_entry(pve);
+ }
+ }
+
+ if (!l2pte_valid(pte)) {
+ *ptep = 0;
+ PTE_SYNC_CURRENT(pm, ptep);
+ sva += PAGE_SIZE;
+ ptep++;
+ mappings++;
+ continue;
+ }
+
+ if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
+ /* Add to the clean list. */
+ cleanlist[cleanlist_idx].pte = ptep;
+ cleanlist[cleanlist_idx].va =
+ sva | (is_exec & 1);
+ cleanlist_idx++;
+ } else
+ if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
+ /* Nuke everything if needed. */
+ pmap_idcache_wbinv_all(pm);
+ pmap_tlb_flushID(pm);
+
+ /*
+ * Roll back the previous PTE list,
+ * and zero out the current PTE.
+ */
+ for (cnt = 0;
+ cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
+ *cleanlist[cnt].pte = 0;
+ }
+ *ptep = 0;
+ PTE_SYNC(ptep);
+ cleanlist_idx++;
+ } else {
+ *ptep = 0;
+ PTE_SYNC(ptep);
+ if (is_exec)
+ pmap_tlb_flushID_SE(pm, sva);
+ else
+ if (is_refd)
+ pmap_tlb_flushD_SE(pm, sva);
+ }
+
+ sva += PAGE_SIZE;
+ ptep++;
+ mappings++;
+ }
+
+ /*
+ * Deal with any left overs
+ */
+ if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
+ total += cleanlist_idx;
+ for (cnt = 0; cnt < cleanlist_idx; cnt++) {
+ if (pm->pm_cstate.cs_all != 0) {
+ vm_offset_t clva =
+ cleanlist[cnt].va & ~1;
+ if (cleanlist[cnt].va & 1) {
+ pmap_idcache_wbinv_range(pm,
+ clva, PAGE_SIZE);
+ pmap_tlb_flushID_SE(pm, clva);
+ } else {
+ pmap_dcache_wb_range(pm,
+ clva, PAGE_SIZE, TRUE,
+ FALSE);
+ pmap_tlb_flushD_SE(pm, clva);
+ }
+ }
+ *cleanlist[cnt].pte = 0;
+ PTE_SYNC_CURRENT(pm, cleanlist[cnt].pte);
+ }
+
+ /*
+ * If it looks like we're removing a whole bunch
+ * of mappings, it's faster to just write-back
+ * the whole cache now and defer TLB flushes until
+ * pmap_update() is called.
+ */
+ if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE)
+ cleanlist_idx = 0;
+ else {
+ cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
+ pmap_idcache_wbinv_all(pm);
+ }
+ }
+
+ pmap_free_l2_bucket(pm, l2b, mappings);
+ }
+
+#if 0
+ pmap_release_pmap_lock(pm);
+ PMAP_MAP_TO_HEAD_UNLOCK();
+#endif
+}
+
+
+
+static PMAP_INLINE int
+pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
+{
+ return 0;
+}
+
+
+/*
+ * pmap_zero_page()
+ *
+ * Zero a given physical page by mapping it at a page hook point.
+ * In doing the zero page op, the page we zero is mapped cachable, as with
+ * StrongARM accesses to non-cached pages are non-burst making writing
+ * _any_ bulk data very slow.
+ */
+#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+void
+pmap_zero_page_generic(vm_paddr_t phys, int off, int size)
+{
+#ifdef DEBUG
+ struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
+
+ if (pg->md.pvh_list != NULL)
+ panic("pmap_zero_page: page has mappings");
+#endif
+
+
+ /*
+ * Hook in the page, zero it, and purge the cache for that
+ * zeroed page. Invalidate the TLB as needed.
+ */
+ *cdst_pte = L2_S_PROTO | phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ if (off || size)
+ bzero((void *)(cdstp + off), size);
+ else
+ bzero_page(cdstp);
+ cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
+}
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
+
+#if ARM_MMU_XSCALE == 1
+void
+pmap_zero_page_xscale(vm_paddr_t phys, int off, int size)
+{
+#ifdef DEBUG
+ struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
+
+ if (pg->md.pvh_list != NULL)
+ panic("pmap_zero_page: page has mappings");
+#endif
+
+
+ /*
+ * Hook in the page, zero it, and purge the cache for that
+ * zeroed page. Invalidate the TLB as needed.
+ */
+ *cdst_pte = L2_S_PROTO | phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ if (off || size)
+ bzero((void *)(cdstp + off), size);
+ else
+ bzero_page(cdstp);
+ xscale_cache_clean_minidata();
+}
+
+/*
+ * Change the PTEs for the specified kernel mappings such that they
+ * will use the mini data cache instead of the main data cache.
+ */
+void
+pmap_uarea(vm_offset_t va)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, *sptep, pte;
+ vm_offset_t next_bucket, eva;
+
+#if (ARM_NMMUS > 1)
+ if (xscale_use_minidata == 0)
+ return;
+#endif
+
+ eva = va + USPACE;
+
+ while (va < eva) {
+ next_bucket = L2_NEXT_BUCKET(va);
+ if (next_bucket > eva)
+ next_bucket = eva;
+
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+
+ sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
+
+ while (va < next_bucket) {
+ pte = *ptep;
+ if (!l2pte_minidata(pte)) {
+ cpu_dcache_wbinv_range(va, PAGE_SIZE);
+ cpu_tlb_flushD_SE(va);
+ *ptep = pte & ~L2_B;
+ }
+ ptep++;
+ va += PAGE_SIZE;
+ }
+ PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
+ }
+ cpu_cpwait();
+}
+#endif /* ARM_MMU_XSCALE == 1 */
+
+/*
+ * pmap_zero_page zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents.
+ */
+void
+pmap_zero_page(vm_page_t m)
+{
+ pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, 0);
+}
+
+
+/*
+ * pmap_zero_page_area zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents.
+ *
+ * off and size may not cover an area beyond a single hardware page.
+ */
+void
+pmap_zero_page_area(vm_page_t m, int off, int size)
+{
+
+ pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size);
+}
+
+
+/*
+ * pmap_zero_page_idle zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents. This
+ * is intended to be called from the vm_pagezero process only and
+ * outside of Giant.
+ */
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+ unsigned int i;
+ int *ptr;
+ vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
+
+ pmap_zero_page(m);
+ return;
+ *cdst_pte = L2_S_PROTO | phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+
+ for (i = 0, ptr = (int *)cdstp;
+ i < (PAGE_SIZE / sizeof(int)); i++) {
+ *ptr = 0;
+ }
+ cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
+}
+
+/*
+ * pmap_clean_page()
+ *
+ * This is a local function used to work out the best strategy to clean
+ * a single page referenced by its entry in the PV table. It's used by
+ * pmap_copy_page, pmap_zero page and maybe some others later on.
+ *
+ * Its policy is effectively:
+ * o If there are no mappings, we don't bother doing anything with the cache.
+ * o If there is one mapping, we clean just that page.
+ * o If there are multiple mappings, we clean the entire cache.
+ *
+ * So that some functions can be further optimised, it returns 0 if it didn't
+ * clean the entire cache, or 1 if it did.
+ *
+ * XXX One bug in this routine is that if the pv_entry has a single page
+ * mapped at 0x00000000 a whole cache clean will be performed rather than
+ * just the 1 page. Since this should not occur in everyday use and if it does
+ * it will just result in not the most efficient clean for the page.
+ */
+static int
+pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
+{
+ pmap_t pm, pm_to_clean = NULL;
+ struct pv_entry *npv;
+ u_int cache_needs_cleaning = 0;
+ u_int flags = 0;
+ vm_offset_t page_to_clean = 0;
+
+ if (pv == NULL) {
+ /* nothing mapped in so nothing to flush */
+ return (0);
+ }
+
+ /*
+ * Since we flush the cache each time we change to a different
+ * user vmspace, we only need to flush the page if it is in the
+ * current pmap.
+ */
+ if (curproc)
+ pm = curproc->p_vmspace->vm_map.pmap;
+ else
+ pm = pmap_kernel();
+
+ for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) {
+ if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
+ flags |= npv->pv_flags;
+ /*
+ * The page is mapped non-cacheable in
+ * this map. No need to flush the cache.
+ */
+ if (npv->pv_flags & PVF_NC) {
+#ifdef DIAGNOSTIC
+ if (cache_needs_cleaning)
+ panic("pmap_clean_page: "
+ "cache inconsistency");
+#endif
+ break;
+ } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
+ continue;
+ if (cache_needs_cleaning) {
+ page_to_clean = 0;
+ break;
+ } else {
+ page_to_clean = npv->pv_va;
+ pm_to_clean = npv->pv_pmap;
+ }
+ cache_needs_cleaning = 1;
+ }
+ }
+ if (page_to_clean) {
+ if (PV_BEEN_EXECD(flags))
+ pmap_idcache_wbinv_range(pm_to_clean, page_to_clean,
+ PAGE_SIZE);
+ else
+ pmap_dcache_wb_range(pm_to_clean, page_to_clean,
+ PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0);
+ } else if (cache_needs_cleaning) {
+ if (PV_BEEN_EXECD(flags))
+ pmap_idcache_wbinv_all(pm);
+ else
+ pmap_dcache_wbinv_all(pm);
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bcopy to copy the page, one machine dependent page at a
+ * time.
+ */
+
+/*
+ * pmap_copy_page()
+ *
+ * Copy one physical page into another, by mapping the pages into
+ * hook points. The same comment regarding cachability as in
+ * pmap_zero_page also applies here.
+ */
+#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+void
+pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
+{
+ struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
+#ifdef DEBUG
+ struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
+
+ if (dst_pg->md.pvh_list != NULL)
+ panic("pmap_copy_page: dst page has mappings");
+#endif
+
+
+ /*
+ * Clean the source page. Hold the source page's lock for
+ * the duration of the copy so that no other mappings can
+ * be created while we have a potentially aliased mapping.
+ */
+#if 0
+ mtx_lock(&src_pg->md.pvh_mtx);
+#endif
+ (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE);
+
+ /*
+ * Map the pages into the page hook points, copy them, and purge
+ * the cache for the appropriate page. Invalidate the TLB
+ * as required.
+ */
+ *csrc_pte = L2_S_PROTO | src |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
+ PTE_SYNC(csrc_pte);
+ *cdst_pte = L2_S_PROTO | dst |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(csrcp);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ bcopy_page(csrcp, cdstp);
+ cpu_dcache_inv_range(csrcp, PAGE_SIZE);
+#if 0
+ mtx_lock(&src_pg->md.pvh_mtx);
+#endif
+ cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
+}
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
+
+#if ARM_MMU_XSCALE == 1
+void
+pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst)
+{
+ struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
+#ifdef DEBUG
+ struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
+
+ if (dst_pg->md.pvh_list != NULL)
+ panic("pmap_copy_page: dst page has mappings");
+#endif
+
+
+ /*
+ * Clean the source page. Hold the source page's lock for
+ * the duration of the copy so that no other mappings can
+ * be created while we have a potentially aliased mapping.
+ */
+ (void) pmap_clean_page(src_pg->md.pvh_list, TRUE);
+
+ /*
+ * Map the pages into the page hook points, copy them, and purge
+ * the cache for the appropriate page. Invalidate the TLB
+ * as required.
+ */
+ *csrc_pte = L2_S_PROTO | src |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
+ PTE_SYNC(csrc_pte);
+ *cdst_pte = L2_S_PROTO | dst |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(csrcp);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ bcopy_page(csrcp, cdstp);
+ xscale_cache_clean_minidata();
+}
+#endif /* ARM_MMU_XSCALE == 1 */
+
+void
+pmap_copy_page(vm_page_t src, vm_page_t dst)
+{
+ pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
+}
+
+
+
+
+/*
+ * this routine returns true if a physical page resides
+ * in the given pmap.
+ */
+boolean_t
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+{
+ pv_entry_t pv;
+ int loops = 0;
+ int s;
+
+ if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ return (FALSE);
+
+ s = splvm();
+
+ /*
+ * Not found, check current mappings returning immediately
+ */
+ for (pv = TAILQ_FIRST(&m->md.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ if (pv->pv_pmap == pmap) {
+ splx(s);
+ return (TRUE);
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ splx(s);
+ return (FALSE);
+}
+
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return the count of reference bits for a page, clearing all of them.
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+ printf("pmap_ts_referenced()\n");
+
+ return (0);
+}
+
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page was modified
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+ printf("pmap_is_modified()\n");
+
+ return(FALSE);
+}
+
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(vm_page_t m)
+{
+
+ if (m->md.pvh_attrs & PVF_MOD)
+ pmap_clearbit(m, PVF_MOD);
+}
+
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+void
+pmap_clear_reference(vm_page_t m)
+{
+
+ if (m->md.pvh_attrs & PVF_REF)
+ pmap_clearbit(m, PVF_REF);
+}
+
+
+/*
+ * perform the pmap work for mincore
+ */
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr)
+{
+ printf("pmap_mincore()\n");
+
+ return (0);
+}
+
+
+vm_offset_t
+pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
+{
+
+ return(addr);
+}
+
+
+static void
+arm_protection_init(void)
+{
+ int *kp, prot;
+
+ kp = protection_codes;
+ for (prot = 0; prot < 8; prot++) {
+ switch (prot) {
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
+ *kp++ = 0;
+ break;
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ *kp++ = PT_AP(AP_KRW);
+ }
+ }
+}
+
+
+/*
+ * Map a set of physical memory pages into the kernel virtual
+ * address space. Return a pointer to where it is mapped. This
+ * routine is intended to be used for mapping device memory,
+ * NOT real memory.
+ */
+void *
+pmap_mapdev(vm_offset_t pa, vm_size_t size)
+{
+ vm_offset_t va, tmpva, offset;
+ pt_entry_t *pte;
+
+ /* XXX: pmap_mapdev is wrong. */
+ offset = pa & PAGE_MASK;
+ size = roundup(offset + size, PAGE_SIZE);
+
+ GIANT_REQUIRED;
+
+ va = kmem_alloc_pageable(kernel_map, size);
+ if (!va)
+ panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
+
+ pa = pa & PG_FRAME;
+ for (tmpva = va; size > 0;) {
+ pte = vtopte((vm_offset_t)vtopte(tmpva));
+ *pte = L2_PTE(pa, AP_KRW);
+ size -= PAGE_SIZE;
+ tmpva += PAGE_SIZE;
+ pa += PAGE_SIZE;
+ }
+ pmap_invalidate_tlb_all(kernel_pmap);
+
+ return ((void *)(va + offset));
+}
+
+#define BOOTSTRAP_DEBUG
+
+/*
+ * pmap_map_section:
+ *
+ * Create a single section mapping.
+ */
+void
+pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
+ int prot, int cache)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pd_entry_t fl;
+
+ KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2"));
+
+ switch (cache) {
+ case PTE_NOCACHE:
+ default:
+ fl = 0;
+ break;
+
+ case PTE_CACHE:
+ fl = pte_l1_s_cache_mode;
+ break;
+
+ case PTE_PAGETABLE:
+ fl = pte_l1_s_cache_mode_pt;
+ break;
+ }
+
+ pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
+ L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL);
+ PTE_SYNC(&pde[va >> L1_S_SHIFT]);
+
+}
+
+/*
+ * pmap_link_l2pt:
+ *
+ * Link the L2 page table specified by "pa" into the L1
+ * page table at the slot for "va".
+ */
+void
+pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
+ u_int slot = va >> L1_S_SHIFT;
+
+#ifndef ARM32_NEW_VM_LAYOUT
+ KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0, ("blah"));
+ KASSERT((l2pv->pv_pa & PAGE_MASK) == 0, ("ouin"));
+#endif
+
+ proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
+
+ pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
+#ifdef ARM32_NEW_VM_LAYOUT
+ PTE_SYNC(&pde[slot]);
+#else
+ pde[slot + 1] = proto | (l2pv->pv_pa + 0x400);
+ pde[slot + 2] = proto | (l2pv->pv_pa + 0x800);
+ pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00);
+ PTE_SYNC_RANGE(&pde[slot + 0], 4);
+#endif
+
+ SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
+
+
+}
+
+/*
+ * pmap_map_entry
+ *
+ * Create a single page mapping.
+ */
+void
+pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
+ int cache)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pt_entry_t fl;
+ pt_entry_t *pte;
+
+ KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin"));
+
+ switch (cache) {
+ case PTE_NOCACHE:
+ default:
+ fl = 0;
+ break;
+
+ case PTE_CACHE:
+ fl = pte_l2_s_cache_mode;
+ break;
+
+ case PTE_PAGETABLE:
+ fl = pte_l2_s_cache_mode_pt;
+ break;
+ }
+
+ if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
+ panic("pmap_map_entry: no L2 table for VA 0x%08x", va);
+
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte = (pt_entry_t *)
+ kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
+#else
+ pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
+#endif
+
+ if (pte == NULL)
+ panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va);
+
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte[(va >> PAGE_SHIFT) & 0x3ff] =
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
+ PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]);
+#else
+ pte[l2pte_index(va)] =
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
+ PTE_SYNC(&pte[l2pte_index(va)]);
+#endif
+}
+
+/*
+ * pmap_map_chunk:
+ *
+ * Map a chunk of memory using the most efficient mappings
+ * possible (section. large page, small page) into the
+ * provided L1 and L2 tables at the specified virtual address.
+ */
+vm_size_t
+pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
+ vm_size_t size, int prot, int cache)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pt_entry_t *pte, f1, f2s, f2l;
+ vm_size_t resid;
+ int i;
+
+ resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
+
+ if (l1pt == 0)
+ panic("pmap_map_chunk: no L1 table provided");
+
+#ifdef VERBOSE_INIT_ARM
+ printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
+ "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
+#endif
+
+ switch (cache) {
+ case PTE_NOCACHE:
+ default:
+ f1 = 0;
+ f2l = 0;
+ f2s = 0;
+ break;
+
+ case PTE_CACHE:
+ f1 = pte_l1_s_cache_mode;
+ f2l = pte_l2_l_cache_mode;
+ f2s = pte_l2_s_cache_mode;
+ break;
+
+ case PTE_PAGETABLE:
+ f1 = pte_l1_s_cache_mode_pt;
+ f2l = pte_l2_l_cache_mode_pt;
+ f2s = pte_l2_s_cache_mode_pt;
+ break;
+ }
+
+ size = resid;
+
+ while (resid > 0) {
+ /* See if we can use a section mapping. */
+ if (L1_S_MAPPABLE_P(va, pa, resid)) {
+#ifdef VERBOSE_INIT_ARM
+ printf("S");
+#endif
+ pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
+ L1_S_PROT(PTE_KERNEL, prot) | f1 |
+ L1_S_DOM(PMAP_DOMAIN_KERNEL);
+ PTE_SYNC(&pde[va >> L1_S_SHIFT]);
+ va += L1_S_SIZE;
+ pa += L1_S_SIZE;
+ resid -= L1_S_SIZE;
+ continue;
+ }
+
+ /*
+ * Ok, we're going to use an L2 table. Make sure
+ * one is actually in the corresponding L1 slot
+ * for the current VA.
+ */
+ if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
+ panic("pmap_map_chunk: no L2 table for VA 0x%08x", va);
+
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte = (pt_entry_t *)
+ kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
+#else
+ pte = (pt_entry_t *) kernel_pt_lookup(
+ pde[L1_IDX(va)] & L1_C_ADDR_MASK);
+#endif
+ if (pte == NULL)
+ panic("pmap_map_chunk: can't find L2 table for VA"
+ "0x%08x", va);
+
+ /* See if we can use a L2 large page mapping. */
+ if (L2_L_MAPPABLE_P(va, pa, resid)) {
+#ifdef VERBOSE_INIT_ARM
+ printf("L");
+#endif
+ for (i = 0; i < 16; i++) {
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte[((va >> PAGE_SHIFT) & 0x3f0) + i] =
+ L2_L_PROTO | pa |
+ L2_L_PROT(PTE_KERNEL, prot) | f2l;
+ PTE_SYNC(&pte[((va >> PAGE_SHIFT) & 0x3f0) + i]);
+#else
+ pte[l2pte_index(va) + i] =
+ L2_L_PROTO | pa |
+ L2_L_PROT(PTE_KERNEL, prot) | f2l;
+ PTE_SYNC(&pte[l2pte_index(va) + i]);
+#endif
+ }
+ va += L2_L_SIZE;
+ pa += L2_L_SIZE;
+ resid -= L2_L_SIZE;
+ continue;
+ }
+
+ /* Use a small page mapping. */
+#ifdef VERBOSE_INIT_ARM
+ printf("P");
+#endif
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte[(va >> PAGE_SHIFT) & 0x3ff] =
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
+ PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]);
+#else
+ pte[l2pte_index(va)] =
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
+ PTE_SYNC(&pte[l2pte_index(va)]);
+#endif
+ va += PAGE_SIZE;
+ pa += PAGE_SIZE;
+ resid -= PAGE_SIZE;
+ }
+#ifdef VERBOSE_INIT_ARM
+ printf("\n");
+#endif
+ return (size);
+
+}
+
diff --git a/sys/arm/arm/setcpsr.S b/sys/arm/arm/setcpsr.S
new file mode 100644
index 0000000..fa70e1b
--- /dev/null
+++ b/sys/arm/arm/setcpsr.S
@@ -0,0 +1,80 @@
+/* $NetBSD: setcpsr.S,v 1.2 2002/08/15 01:37:02 briggs Exp $ */
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * setcpsr.S
+ *
+ * Miscellaneous routines to play with the CPSR register
+ *
+ * Eventually this routine can be inline assembly.
+ *
+ * Created : 12/09/94
+ *
+ * Based of kate/display/setcpsr.s
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/* Sets and clears bits in the CPSR register
+ *
+ * r0 - bic mask
+ * r1 - eor mask
+ */
+
+ENTRY_NP(SetCPSR)
+ mrs r3, cpsr /* Set the CPSR */
+ bic r2, r3, r0
+ eor r2, r2, r1
+ msr cpsr_all, r2
+
+ mov r0, r3 /* Return the old CPSR */
+
+ mov pc, lr
+
+
+/* Gets the CPSR register
+ *
+ * Returns the CPSR in r0
+ */
+
+ENTRY_NP(GetCPSR)
+ mrs r0, cpsr /* Get the CPSR */
+
+ mov pc, lr
+
diff --git a/sys/arm/arm/setstack.s b/sys/arm/arm/setstack.s
new file mode 100644
index 0000000..dc754d5
--- /dev/null
+++ b/sys/arm/arm/setstack.s
@@ -0,0 +1,94 @@
+/* $NetBSD: setstack.S,v 1.1 2001/07/28 13:28:03 chris Exp $ */
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * setstack.S
+ *
+ * Miscellaneous routine to play with the stack pointer in different CPU modes
+ *
+ * Eventually this routine can be inline assembly.
+ *
+ * Created : 17/09/94
+ *
+ * Based of kate/display/setstack.s
+ *
+ */
+
+#include <machine/armreg.h>
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/* To set the stack pointer for a particular mode we must switch
+ * to that mode update the banked r13 and then switch back.
+ * This routine provides an easy way of doing this for any mode
+ *
+ * r0 = CPU mode
+ * r1 = stackptr
+ */
+
+ENTRY(set_stackptr)
+ mrs r3, cpsr_all /* Switch to the appropriate mode */
+ bic r2, r3, #(PSR_MODE)
+ orr r2, r2, r0
+ msr cpsr_all, r2
+
+ mov sp, r1 /* Set the stack pointer */
+
+ msr cpsr_all, r3 /* Restore the old mode */
+
+ mov pc, lr /* Exit */
+
+/* To get the stack pointer for a particular mode we must switch
+ * to that mode copy the banked r13 and then switch back.
+ * This routine provides an easy way of doing this for any mode
+ *
+ * r0 = CPU mode
+ */
+
+ENTRY(get_stackptr)
+ mrs r3, cpsr_all /* Switch to the appropriate mode */
+ bic r2, r3, #(PSR_MODE)
+ orr r2, r2, r0
+ msr cpsr_all, r2
+
+ mov r0, sp /* Set the stack pointer */
+
+ msr cpsr_all, r3 /* Restore the old mode */
+
+ mov pc, lr /* Exit */
+
+/* End of setstack.S */
diff --git a/sys/arm/arm/support.S b/sys/arm/arm/support.S
new file mode 100644
index 0000000..bfca271
--- /dev/null
+++ b/sys/arm/arm/support.S
@@ -0,0 +1,72 @@
+/*-
+ * Copyright (c) 2004 Olivier Houchard
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+#include <machine/asmacros.h>
+__FBSDID("$FreeBSD$");
+
+#include "assym.s"
+
+ENTRY(casuptr)
+ mov r1, r2
+ bl suword
+/*
+ * New experimental definitions of IRQdisable and IRQenable
+ * These keep FIQ's enabled since FIQ's are special.
+ */
+
+#define IRQdisable \
+ mrs r14, cpsr ; \
+ orr r14, r14, #(I32_bit) ; \
+ msr cpsr_c, r14 ; \
+
+#define IRQenable \
+ mrs r14, cpsr ; \
+ bic r14, r14, #(I32_bit) ; \
+ msr cpsr_c, r14 ; \
+
+/*
+ * These are used for switching the translation table/DACR.
+ * Since the vector page can be invalid for a short time, we must
+ * disable both regular IRQs *and* FIQs.
+ *
+ * XXX: This is not necessary if the vector table is relocated.
+ */
+#define IRQdisableALL \
+ mrs r14, cpsr ; \
+ orr r14, r14, #(I32_bit) ; \
+ msr cpsr_all, r14
+
+#define IRQenableALL \
+ mrs r14, cpsr ; \
+ bic r14, r14, #(I32_bit) ; \
+ msr cpsr_all, r14
+
+ENTRY(disable_intr)
+ IRQdisableALL
+ENTRY(enable_intr)
+ IRQenableALL
+
diff --git a/sys/arm/arm/swtch.S b/sys/arm/arm/swtch.S
new file mode 100644
index 0000000..b823709
--- /dev/null
+++ b/sys/arm/arm/swtch.S
@@ -0,0 +1,543 @@
+/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */
+
+/*
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpuswitch.S
+ *
+ * cpu switching functions
+ *
+ * Created : 15/10/94
+ *
+ */
+
+#include <machine/asm.h>
+#include <machine/asmacros.h>
+#include <machine/armreg.h>
+__FBSDID("$FreeBSD$");
+
+#include "assym.s"
+
+/*
+ * New experimental definitions of IRQdisable and IRQenable
+ * These keep FIQ's enabled since FIQ's are special.
+ */
+
+#define DOMAIN_CLIENT 0x01
+#define IRQdisable \
+ mrs r14, cpsr ; \
+ orr r14, r14, #(I32_bit) ; \
+ msr cpsr_c, r14 ; \
+
+#define IRQenable \
+ mrs r14, cpsr ; \
+ bic r14, r14, #(I32_bit) ; \
+ msr cpsr_c, r14 ; \
+
+/*
+ * These are used for switching the translation table/DACR.
+ * Since the vector page can be invalid for a short time, we must
+ * disable both regular IRQs *and* FIQs.
+ *
+ * XXX: This is not necessary if the vector table is relocated.
+ */
+#define IRQdisableALL \
+ mrs r14, cpsr ; \
+ orr r14, r14, #(I32_bit | F32_bit) ; \
+ msr cpsr_c, r14
+
+#define IRQenableALL \
+ mrs r14, cpsr ; \
+ bic r14, r14, #(I32_bit | F32_bit) ; \
+ msr cpsr_c, r14
+
+.Lpcpu:
+ .word _C_LABEL(__pcpu)
+.Lcurthread:
+ .word _C_LABEL(__pcpu) + PC_CURTHREAD
+.Lcurpcb:
+ .word _C_LABEL(__pcpu) + PC_CURPCB
+.Lcpufuncs:
+ .word _C_LABEL(cpufuncs)
+.Lblock_userspace_access:
+ .word _C_LABEL(block_userspace_access)
+
+.Lcpu_do_powersave:
+ .word _C_LABEL(cpu_do_powersave)
+
+.Lpmap_kernel_cstate:
+ .word (kernel_pmap_store + PMAP_CSTATE)
+
+.Llast_cache_state_ptr:
+ .word _C_LABEL(pmap_cache_state)
+
+/* XXX: wow */
+ENTRY(cpu_throw)
+ENTRY(cpu_switch)
+ stmfd sp!, {r4-r7, lr}
+ mov r6, r1
+ mov r1, r0
+
+ .Lswitch_resume:
+ /* rem: r1 = old lwp */
+ /* rem: r4 = return value [not used if came from cpu_switchto()] */
+ /* rem: r6 = new process */
+ /* rem: interrupts are disabled */
+
+#ifdef MULTIPROCESSOR
+ /* XXX use curcpu() */
+ ldr r0, .Lcpu_info_store
+ str r0, [r6, #(L_CPU)]
+#else
+ /* l->l_cpu initialized in fork1() for single-processor */
+#endif
+
+ /* Process is now on a processor. */
+
+ /* We have a new curlwp now so make a note it */
+ ldr r7, .Lcurthread
+ str r6, [r7]
+
+ /* Hook in a new pcb */
+ ldr r7, .Lcurpcb
+ ldr r0, [r6, #(TD_PCB)]
+ str r0, [r7]
+
+ /* At this point we can allow IRQ's again. */
+ /* rem: r1 = old lwp */
+ /* rem: r4 = return value */
+ /* rem: r6 = new process */
+ /* rem: interrupts are enabled */
+
+ /* Remember the old lwp in r0 */
+ mov r0, r1
+
+ /*
+ * If the old lwp on entry to cpu_switch was zero then the
+ * process that called it was exiting. This means that we do
+ * not need to save the current context. Instead we can jump
+ * straight to restoring the context for the new process.
+ */
+ teq r0, #0x00000000
+ beq .Lswitch_exited
+
+ /* rem: r0 = old lwp */
+ /* rem: r4 = return value */
+ /* rem: r6 = new process */
+ /* rem: interrupts are enabled */
+
+ /* Stage two : Save old context */
+
+ /* Get the user structure for the old lwp. */
+ ldr r1, [r0, #(TD_PCB)]
+
+ /* Save all the registers in the old lwp's pcb */
+#ifndef __XSCALE__
+ add r7, r1, #(PCB_R8)
+ stmia r7, {r8-r13}
+#else
+ strd r8, [r1, #(PCB_R8)]
+ strd r10, [r1, #(PCB_R10)]
+ strd r12, [r1, #(PCB_R12)]
+#endif
+
+ /*
+ * NOTE: We can now use r8-r13 until it is time to restore
+ * them for the new process.
+ */
+
+ /* Remember the old PCB. */
+ mov r8, r1
+
+ /* r1 now free! */
+
+ /* Get the user structure for the new process in r9 */
+ ldr r9, [r6, #(TD_PCB)]
+
+ /*
+ * This can be optimised... We know we want to go from SVC32
+ * mode to UND32 mode
+ */
+ mrs r3, cpsr
+ bic r2, r3, #(PSR_MODE)
+ orr r2, r2, #(PSR_UND32_MODE | I32_bit)
+ msr cpsr_c, r2
+
+ str sp, [r8, #(PCB_UND_SP)]
+
+ msr cpsr_c, r3 /* Restore the old mode */
+
+ /* rem: r0 = old lwp */
+ /* rem: r4 = return value */
+ /* rem: r6 = new process */
+ /* rem: r8 = old PCB */
+ /* rem: r9 = new PCB */
+ /* rem: interrupts are enabled */
+
+ /* What else needs to be saved Only FPA stuff when that is supported */
+
+ /* Third phase : restore saved context */
+
+ /* rem: r0 = old lwp */
+ /* rem: r4 = return value */
+ /* rem: r6 = new lwp */
+ /* rem: r8 = old PCB */
+ /* rem: r9 = new PCB */
+ /* rem: interrupts are enabled */
+
+ /*
+ * Get the new L1 table pointer into r11. If we're switching to
+ * an LWP with the same address space as the outgoing one, we can
+ * skip the cache purge and the TTB load.
+ *
+ * To avoid data dep stalls that would happen anyway, we try
+ * and get some useful work done in the mean time.
+ */
+ ldr r10, [r8, #(PCB_PAGEDIR)] /* r10 = old L1 */
+ ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
+
+
+
+ ldr r0, [r8, #(PCB_DACR)] /* r0 = old DACR */
+ ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
+ ldr r8, [r9, #(PCB_CSTATE)] /* r8 = &new_pmap->pm_cstate */
+ ldr r5, .Llast_cache_state_ptr /* Previous thread's cstate */
+
+ teq r10, r11 /* Same L1? */
+ ldr r5, [r5]
+ cmpeq r0, r1 /* Same DACR? */
+ beq .Lcs_context_switched /* yes! */
+ ldr r3, .Lblock_userspace_access
+ mov r12, #0
+ cmp r5, #0 /* No last vm? (switch_exit) */
+ beq .Lcs_cache_purge_skipped /* No, we can skip cache flsh */
+
+ mov r2, #DOMAIN_CLIENT
+ cmp r1, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
+ beq .Lcs_cache_purge_skipped /* Yup. Don't flush cache */
+
+ cmp r5, r8 /* Same userland VM space? */
+ ldrneb r12, [r5, #(CS_CACHE_ID)] /* Last VM space cache state */
+
+ /*
+ * We're definately switching to a new userland VM space,
+ * and the previous userland VM space has yet to be flushed
+ * from the cache/tlb.
+ *
+ * r12 holds the previous VM space's cs_cache_id state
+ */
+ tst r12, #0xff /* Test cs_cache_id */
+ beq .Lcs_cache_purge_skipped /* VM space is not in cache */
+
+ /*
+ * Definately need to flush the cache.
+ * Mark the old VM space as NOT being resident in the cache.
+ */
+ mov r2, #0x00000000
+ strb r2, [r5, #(CS_CACHE_ID)]
+ strb r2, [r5, #(CS_CACHE_D)]
+
+ /*
+ * Don't allow user space access between the purge and the switch.
+ */
+ mov r2, #0x00000001
+ str r2, [r3]
+
+ stmfd sp!, {r0-r3}
+ ldr r1, .Lcpufuncs
+ mov lr, pc
+ ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
+ ldmfd sp!, {r0-r3}
+
+.Lcs_cache_purge_skipped:
+ /* rem: r1 = new DACR */
+ /* rem: r3 = &block_userspace_access */
+ /* rem: r4 = return value */
+ /* rem: r5 = &old_pmap->pm_cstate (or NULL) */
+ /* rem: r6 = new lwp */
+ /* rem: r8 = &new_pmap->pm_cstate */
+ /* rem: r9 = new PCB */
+ /* rem: r10 = old L1 */
+ /* rem: r11 = new L1 */
+
+ mov r2, #0x00000000
+ ldr r7, [r9, #(PCB_PL1VEC)]
+
+ /*
+ * At this point we need to kill IRQ's again.
+ *
+ * XXXSCW: Don't need to block FIQs if vectors have been relocated
+ */
+#if 0
+ IRQdisableALL
+#endif
+
+ /*
+ * Interrupts are disabled so we can allow user space accesses again
+ * as none will occur until interrupts are re-enabled after the
+ * switch.
+ */
+ str r2, [r3]
+
+ /*
+ * Ensure the vector table is accessible by fixing up the L1
+ */
+ cmp r7, #0 /* No need to fixup vector table? */
+ ldrne r2, [r7] /* But if yes, fetch current value */
+ ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */
+ mcr p15, 0, r1, c3, c0, 0 /* Update DACR for new context */
+ cmpne r2, r0 /* Stuffing the same value? */
+#if 0
+ strne r0, [r7] /* Nope, update it */
+#else
+ beq .Lcs_same_vector
+ str r0, [r7] /* Otherwise, update it */
+
+ /*
+ * Need to sync the cache to make sure that last store is
+ * visible to the MMU.
+ */
+ ldr r2, .Lcpufuncs
+ mov r0, r7
+ mov r1, #4
+ mov lr, pc
+ ldr pc, [r2, #CF_DCACHE_WB_RANGE]
+
+.Lcs_same_vector:
+#endif /* PMAP_INCLUDE_PTE_SYNC */
+
+ cmp r10, r11 /* Switching to the same L1? */
+ ldr r10, .Lcpufuncs
+ beq .Lcs_same_l1 /* Yup. */
+ /*
+ * Do a full context switch, including full TLB flush.
+ */
+ mov r0, r11
+ mov lr, pc
+ ldr pc, [r10, #CF_CONTEXT_SWITCH]
+
+ /*
+ * Mark the old VM space as NOT being resident in the TLB
+ */
+ mov r2, #0x00000000
+ cmp r5, #0
+ strneh r2, [r5, #(CS_TLB_ID)]
+ b .Lcs_context_switched
+
+ /*
+ * We're switching to a different process in the same L1.
+ * In this situation, we only need to flush the TLB for the
+ * vector_page mapping, and even then only if r7 is non-NULL.
+ */
+.Lcs_same_l1:
+ cmp r7, #0
+ movne r0, #0 /* We *know* vector_page's VA is 0x0 */
+ movne lr, pc
+ ldrne pc, [r10, #CF_TLB_FLUSHID_SE]
+
+.Lcs_context_switched:
+ /* rem: r8 = &new_pmap->pm_cstate */
+
+ /* XXXSCW: Safe to re-enable FIQs here */
+
+ /*
+ * The new VM space is live in the cache and TLB.
+ * Update its cache/tlb state, and if it's not the kernel
+ * pmap, update the 'last cache state' pointer.
+ */
+ mov r2, #-1
+ ldr r5, .Lpmap_kernel_cstate
+ ldr r0, .Llast_cache_state_ptr
+ str r2, [r8, #(CS_ALL)]
+ cmp r5, r8
+ strne r8, [r0]
+
+ /* rem: r4 = return value */
+ /* rem: r6 = new lwp */
+ /* rem: r9 = new PCB */
+
+ /*
+ * This can be optimised... We know we want to go from SVC32
+ * mode to UND32 mode
+ */
+ mrs r3, cpsr
+ bic r2, r3, #(PSR_MODE)
+ orr r2, r2, #(PSR_UND32_MODE)
+ msr cpsr_c, r2
+
+ ldr sp, [r9, #(PCB_UND_SP)]
+
+ msr cpsr_c, r3 /* Restore the old mode */
+
+ /* Restore all the save registers */
+#ifndef __XSCALE__
+ add r7, r9, #PCB_R8
+ ldmia r7, {r8-r13}
+ sub r7, r7, #PCB_R8 /* restore PCB pointer */
+#else
+ mov r7, r9
+ ldr r8, [r7, #(PCB_R8)]
+ ldr r9, [r7, #(PCB_R9)]
+ ldr r10, [r7, #(PCB_R10)]
+ ldr r11, [r7, #(PCB_R11)]
+ ldr r12, [r7, #(PCB_R12)]
+ ldr r13, [r7, #(PCB_SP)]
+#endif
+
+ ldr r5, [r6, #(TD_PROC)] /* fetch the proc for below */
+
+ /* rem: r4 = return value */
+ /* rem: r5 = new lwp's proc */
+ /* rem: r6 = new lwp */
+ /* rem: r7 = new pcb */
+
+#ifdef ARMFPE
+ add r0, r7, #(USER_SIZE) & 0x00ff
+ add r0, r0, #(USER_SIZE) & 0xff00
+ bl _C_LABEL(arm_fpe_core_changecontext)
+#endif
+
+ /* We can enable interrupts again */
+#if 0
+ IRQenableALL
+#endif
+ /* rem: r4 = return value */
+ /* rem: r5 = new lwp's proc */
+ /* rem: r6 = new lwp */
+ /* rem: r7 = new PCB */
+
+.Lswitch_return:
+
+ /*
+ * Pull the registers that got pushed when either savectx() or
+ * cpu_switch() was called and return.
+ */
+ ldmfd sp!, {r4-r7, pc}
+.Lswitch_exited:
+ /*
+ * We skip the cache purge because switch_exit() already did it.
+ * Load up registers the way .Lcs_cache_purge_skipped expects.
+ * Userpsace access already blocked by switch_exit().
+ */
+ ldr r9, [r6, #(TD_PCB)] /* r9 = new PCB */
+ ldr r3, .Lblock_userspace_access
+ mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */
+ mov r5, #0 /* No previous cache state */
+ ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
+ ldr r8, [r9, #(PCB_CSTATE)] /* r8 = new cache state */
+ ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
+ b .Lcs_cache_purge_skipped
+#ifdef DIAGNOSTIC
+.Lswitch_bogons:
+ adr r0, .Lswitch_panic_str
+ bl _C_LABEL(panic)
+1: nop
+ b 1b
+
+.Lswitch_panic_str:
+ .asciz "cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
+#endif
+ENTRY(savectx)
+ mov pc, lr
+ENTRY(fork_trampoline)
+ mov r1, r5
+ mov r2, sp
+ mov r0, r4
+ mov lr, pc
+ #if 0
+ mov r2, sp
+ #endif
+ #if 0
+ mov pc, r4
+ #endif
+ bl _C_LABEL(fork_exit)
+ /* Kill irq's */
+ mrs r0, cpsr
+ orr r0, r0, #(I32_bit)
+ msr cpsr_c, r0
+
+ PULLFRAME
+
+ movs pc, lr /* Exit */
+
+#ifndef __XSCALE__
+ .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
+.Lcpu_switch_ffs_table:
+/* same as ffs table but all nums are -1 from that */
+/* 0 1 2 3 4 5 6 7 */
+ .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
+ .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
+ .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
+ .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
+ .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
+ .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
+ .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
+ .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
+#endif /* !__XSCALE_ */
diff --git a/sys/arm/arm/sys_machdep.c b/sys/arm/arm/sys_machdep.c
new file mode 100644
index 0000000..fab6164
--- /dev/null
+++ b/sys/arm/arm/sys_machdep.c
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_kstack_pages.h"
+#include "opt_mac.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/mac.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+#include <sys/sysproto.h>
+#include <sys/user.h>
+#include <sys/syscall.h>
+#include <sys/sysent.h>
+
+#ifndef _SYS_SYSPROTO_H_
+struct sysarch_args {
+ int op;
+ char *parms;
+};
+#endif
+
+int
+sysarch(td, uap)
+ struct thread *td;
+ register struct sysarch_args *uap;
+{
+ return (0);
+}
+
diff --git a/sys/arm/arm/trap.c b/sys/arm/arm/trap.c
new file mode 100644
index 0000000..066a0ca
--- /dev/null
+++ b/sys/arm/arm/trap.c
@@ -0,0 +1,917 @@
+/* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */
+
+/*
+ * Copyright 2004 Olivier Houchard
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * fault.c
+ *
+ * Fault handlers
+ *
+ * Created : 28/11/94
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/syscall.h>
+#include <sys/sysent.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+
+#include <machine/cpuconf.h>
+#include <machine/vmparam.h>
+#include <machine/frame.h>
+#include <machine/katelib.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+#include <machine/proc.h>
+#include <machine/swi.h>
+#if !defined(DDB)
+#define kdb_trap kgdb_trap
+#endif
+
+
+
+void swi_handler(trapframe_t *);
+void undefinedinstruction(trapframe_t *);
+
+#include <machine/disassem.h>
+#include <machine/machdep.h>
+
+extern char fusubailout[];
+
+#ifdef DEBUG
+int last_fault_code; /* For the benefit of pmap_fault_fixup() */
+#endif
+
+#if defined(CPU_ARM3) || defined(CPU_ARM6) || \
+ defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
+/* These CPUs may need data/prefetch abort fixups */
+#define CPU_ABORT_FIXUP_REQUIRED
+#endif
+
+struct ksig {
+ int signb;
+ u_long code;
+};
+struct data_abort {
+ int (*func)(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
+ const char *desc;
+};
+
+static int dab_fatal(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
+static int dab_align(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
+static int dab_buserr(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
+
+static const struct data_abort data_aborts[] = {
+ {dab_fatal, "Vector Exception"},
+ {dab_align, "Alignment Fault 1"},
+ {dab_fatal, "Terminal Exception"},
+ {dab_align, "Alignment Fault 3"},
+ {dab_buserr, "External Linefetch Abort (S)"},
+ {NULL, "Translation Fault (S)"},
+ {dab_buserr, "External Linefetch Abort (P)"},
+ {NULL, "Translation Fault (P)"},
+ {dab_buserr, "External Non-Linefetch Abort (S)"},
+ {NULL, "Domain Fault (S)"},
+ {dab_buserr, "External Non-Linefetch Abort (P)"},
+ {NULL, "Domain Fault (P)"},
+ {dab_buserr, "External Translation Abort (L1)"},
+ {NULL, "Permission Fault (S)"},
+ {dab_buserr, "External Translation Abort (L2)"},
+ {NULL, "Permission Fault (P)"}
+};
+
+/* Determine if a fault came from user mode */
+#define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
+
+/* Determine if 'x' is a permission fault */
+#define IS_PERMISSION_FAULT(x) \
+ (((1 << ((x) & FAULT_TYPE_MASK)) & \
+ ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0)
+
+static __inline void
+call_trapsignal(struct thread *td, int sig, u_long code)
+{
+
+ trapsignal(td, sig, code);
+}
+
+static __inline int
+data_abort_fixup(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
+{
+#ifdef CPU_ABORT_FIXUP_REQUIRED
+ int error;
+
+ /* Call the cpu specific data abort fixup routine */
+ error = cpu_dataabt_fixup(tf);
+ if (__predict_true(error != ABORT_FIXUP_FAILED))
+ return (error);
+
+ /*
+ * Oops, couldn't fix up the instruction
+ */
+ printf("data_abort_fixup: fixup for %s mode data abort failed.\n",
+ TRAP_USERMODE(tf) ? "user" : "kernel");
+ printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
+ *((u_int *)tf->tf_pc));
+ disassemble(tf->tf_pc);
+
+ /* Die now if this happened in kernel mode */
+ if (!TRAP_USERMODE(tf))
+ dab_fatal(tf, fsr, far, td, NULL, ksig);
+
+ return (error);
+#else
+ return (ABORT_FIXUP_OK);
+#endif /* CPU_ABORT_FIXUP_REQUIRED */
+}
+
+extern int curpid;
+void
+data_abort_handler(trapframe_t *tf)
+{
+ struct vm_map *map;
+ struct pcb *pcb;
+ struct thread *td;
+ u_int user, far, fsr;
+ vm_prot_t ftype;
+ void *onfault;
+ vm_offset_t va;
+ u_int sticks = 0;
+ int error = 0;
+ struct ksig ksig;
+
+ /* Grab FAR/FSR before enabling interrupts */
+ far = cpu_faultaddress();
+ fsr = cpu_faultstatus();
+
+#if 0
+ /* Update vmmeter statistics */
+ vmexp.traps++;
+#endif
+ /* Re-enable interrupts if they were enabled previously */
+ if (__predict_true((tf->tf_spsr & I32_bit) == 0))
+ enable_interrupts(I32_bit);
+
+ /* Get the current lwp structure or lwp0 if there is none */
+ td = (curthread != NULL) ? curthread : &thread0;
+
+ /* Data abort came from user mode? */
+ user = TRAP_USERMODE(tf);
+
+ /* Grab the current pcb */
+ pcb = td->td_pcb;
+ /* Invoke the appropriate handler, if necessary */
+ if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) {
+ if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far,
+ td, &ksig))
+ goto do_trapsignal;
+ goto out;
+ }
+
+ /*
+ * At this point, we're dealing with one of the following data aborts:
+ *
+ * FAULT_TRANS_S - Translation -- Section
+ * FAULT_TRANS_P - Translation -- Page
+ * FAULT_DOMAIN_S - Domain -- Section
+ * FAULT_DOMAIN_P - Domain -- Page
+ * FAULT_PERM_S - Permission -- Section
+ * FAULT_PERM_P - Permission -- Page
+ *
+ * These are the main virtual memory-related faults signalled by
+ * the MMU.
+ */
+
+ /* fusubailout is used by [fs]uswintr to avoid page faulting */
+ if (__predict_false(pcb->pcb_onfault == fusubailout)) {
+ tf->tf_r0 = EFAULT;
+ tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
+ return;
+ }
+
+ if (user) {
+ sticks = td->td_sticks;
+ td->td_frame = tf;
+ }
+
+ /*
+ * Make sure the Program Counter is sane. We could fall foul of
+ * someone executing Thumb code, in which case the PC might not
+ * be word-aligned. This would cause a kernel alignment fault
+ * further down if we have to decode the current instruction.
+ * XXX: It would be nice to be able to support Thumb at some point.
+ */
+ if (__predict_false((tf->tf_pc & 3) != 0)) {
+ if (user) {
+ /*
+ * Give the user an illegal instruction signal.
+ */
+ /* Deliver a SIGILL to the process */
+ ksig.signb = SIGILL;
+ ksig.code = 0;
+ goto do_trapsignal;
+ }
+
+ /*
+ * The kernel never executes Thumb code.
+ */
+ printf("\ndata_abort_fault: Misaligned Kernel-mode "
+ "Program Counter\n");
+ dab_fatal(tf, fsr, far, td, &ksig);
+ }
+
+ /* See if the cpu state needs to be fixed up */
+ switch (data_abort_fixup(tf, fsr, far, td, &ksig)) {
+ case ABORT_FIXUP_RETURN:
+ return;
+ case ABORT_FIXUP_FAILED:
+ /* Deliver a SIGILL to the process */
+ ksig.signb = SIGILL;
+ ksig.code = 0;
+ goto do_trapsignal;
+ default:
+ break;
+ }
+
+ va = trunc_page((vm_offset_t)far);
+
+ /*
+ * It is only a kernel address space fault iff:
+ * 1. user == 0 and
+ * 2. pcb_onfault not set or
+ * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction.
+ */
+ if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS ||
+ (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) &&
+ __predict_true((pcb->pcb_onfault == NULL ||
+ (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) {
+ map = kernel_map;
+
+ /* Was the fault due to the FPE/IPKDB ? */
+ if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) {
+
+ /*
+ * Force exit via userret()
+ * This is necessary as the FPE is an extension to
+ * userland that actually runs in a priveledged mode
+ * but uses USR mode permissions for its accesses.
+ */
+ user = 1;
+ ksig.signb = SIGSEGV;
+ ksig.code = 0;
+ goto do_trapsignal;
+ }
+ } else {
+ map = &td->td_proc->p_vmspace->vm_map;
+ }
+
+ /*
+ * We need to know whether the page should be mapped
+ * as R or R/W. The MMU does not give us the info as
+ * to whether the fault was caused by a read or a write.
+ *
+ * However, we know that a permission fault can only be
+ * the result of a write to a read-only location, so
+ * we can deal with those quickly.
+ *
+ * Otherwise we need to disassemble the instruction
+ * responsible to determine if it was a write.
+ */
+ if (IS_PERMISSION_FAULT(fsr)) {
+ ftype = VM_PROT_WRITE;
+ } else {
+ u_int insn = ReadWord(tf->tf_pc);
+
+ if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */
+ ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */
+ ((insn & 0x0a100000) == 0x08000000)) /* STM/CDT */
+ {
+ ftype = VM_PROT_WRITE;
+ }
+ else
+ if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+ }
+
+ /*
+ * See if the fault is as a result of ref/mod emulation,
+ * or domain mismatch.
+ */
+#ifdef DEBUG
+ last_fault_code = fsr;
+#endif
+ if (pmap_fault_fixup(map->pmap, va, ftype, user)) {
+ goto out;
+ }
+
+ onfault = pcb->pcb_onfault;
+ pcb->pcb_onfault = NULL;
+ error = vm_fault(map, va, ftype, (ftype & VM_PROT_WRITE) ?
+ VM_FAULT_DIRTY : VM_FAULT_NORMAL);
+ pcb->pcb_onfault = onfault;
+ if (__predict_true(error == 0)) {
+ goto out;
+ }
+
+ if (user == 0) {
+ if (pcb->pcb_onfault) {
+ tf->tf_r0 = error;
+ tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
+ return;
+ }
+
+ printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype,
+ error);
+ dab_fatal(tf, fsr, far, td, &ksig);
+ }
+
+
+ if (error == ENOMEM) {
+ printf("VM: pid %d (%s), uid %d killed: "
+ "out of swap\n", td->td_proc->p_pid, td->td_proc->p_comm,
+ (td->td_proc->p_ucred) ?
+ td->td_proc->p_ucred->cr_uid : -1);
+ ksig.signb = SIGKILL;
+ } else {
+ ksig.signb = SIGSEGV;
+ }
+ ksig.code = 0;
+do_trapsignal:
+ call_trapsignal(td, ksig.signb, ksig.code);
+out:
+ /* If returning to user mode, make sure to invoke userret() */
+ if (user)
+ userret(td, tf, sticks);
+}
+
+/*
+ * dab_fatal() handles the following data aborts:
+ *
+ * FAULT_WRTBUF_0 - Vector Exception
+ * FAULT_WRTBUF_1 - Terminal Exception
+ *
+ * We should never see these on a properly functioning system.
+ *
+ * This function is also called by the other handlers if they
+ * detect a fatal problem.
+ *
+ * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
+ */
+static int
+dab_fatal(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
+{
+ const char *mode;
+
+ mode = TRAP_USERMODE(tf) ? "user" : "kernel";
+
+ if (td != NULL) {
+ printf("Fatal %s mode data abort: '%s'\n", mode,
+ data_aborts[fsr & FAULT_TYPE_MASK].desc);
+ printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
+ if ((fsr & FAULT_IMPRECISE) == 0)
+ printf("%08x, ", far);
+ else
+ printf("Invalid, ");
+ printf("spsr=%08x\n", tf->tf_spsr);
+ } else {
+ printf("Fatal %s mode prefetch abort at 0x%08x\n",
+ mode, tf->tf_pc);
+ printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
+ }
+
+ printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
+ tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
+ printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
+ tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
+ printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
+ tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
+ printf("r12=%08x, ", tf->tf_r12);
+
+ if (TRAP_USERMODE(tf))
+ printf("usp=%08x, ulr=%08x",
+ tf->tf_usr_sp, tf->tf_usr_lr);
+ else
+ printf("ssp=%08x, slr=%08x",
+ tf->tf_svc_sp, tf->tf_svc_lr);
+ printf(", pc =%08x\n\n", tf->tf_pc);
+
+#if defined(DDB) || defined(KGDB)
+ kdb_trap(T_FAULT, tf);
+#endif
+ panic("Fatal abort");
+ /*NOTREACHED*/
+}
+
+/*
+ * dab_align() handles the following data aborts:
+ *
+ * FAULT_ALIGN_0 - Alignment fault
+ * FAULT_ALIGN_0 - Alignment fault
+ *
+ * These faults are fatal if they happen in kernel mode. Otherwise, we
+ * deliver a bus error to the process.
+ */
+static int
+dab_align(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
+{
+
+ /* Alignment faults are always fatal if they occur in kernel mode */
+ if (!TRAP_USERMODE(tf))
+ dab_fatal(tf, fsr, far, td, ksig);
+
+ /* pcb_onfault *must* be NULL at this point */
+
+ /* See if the cpu state needs to be fixed up */
+ (void) data_abort_fixup(tf, fsr, far, td, ksig);
+
+ /* Deliver a bus error signal to the process */
+ ksig->code = 0;
+ ksig->signb = SIGBUS;
+ td->td_frame = tf;
+
+ return (1);
+}
+
+/*
+ * dab_buserr() handles the following data aborts:
+ *
+ * FAULT_BUSERR_0 - External Abort on Linefetch -- Section
+ * FAULT_BUSERR_1 - External Abort on Linefetch -- Page
+ * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section
+ * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page
+ * FAULT_BUSTRNL1 - External abort on Translation -- Level 1
+ * FAULT_BUSTRNL2 - External abort on Translation -- Level 2
+ *
+ * If pcb_onfault is set, flag the fault and return to the handler.
+ * If the fault occurred in user mode, give the process a SIGBUS.
+ *
+ * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2
+ * can be flagged as imprecise in the FSR. This causes a real headache
+ * since some of the machine state is lost. In this case, tf->tf_pc
+ * may not actually point to the offending instruction. In fact, if
+ * we've taken a double abort fault, it generally points somewhere near
+ * the top of "data_abort_entry" in exception.S.
+ *
+ * In all other cases, these data aborts are considered fatal.
+ */
+static int
+dab_buserr(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
+{
+ struct pcb *pcb = td->td_pcb;
+
+#ifdef __XSCALE__
+ if ((fsr & FAULT_IMPRECISE) != 0 &&
+ (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) {
+ /*
+ * Oops, an imprecise, double abort fault. We've lost the
+ * r14_abt/spsr_abt values corresponding to the original
+ * abort, and the spsr saved in the trapframe indicates
+ * ABT mode.
+ */
+ tf->tf_spsr &= ~PSR_MODE;
+
+ /*
+ * We use a simple heuristic to determine if the double abort
+ * happened as a result of a kernel or user mode access.
+ * If the current trapframe is at the top of the kernel stack,
+ * the fault _must_ have come from user mode.
+ */
+ if (tf != ((trapframe_t *)pcb->un_32.pcb32_sp) - 1) {
+ /*
+ * Kernel mode. We're either about to die a
+ * spectacular death, or pcb_onfault will come
+ * to our rescue. Either way, the current value
+ * of tf->tf_pc is irrelevant.
+ */
+ tf->tf_spsr |= PSR_SVC32_MODE;
+ if (pcb->pcb_onfault == NULL)
+ printf("\nKernel mode double abort!\n");
+ } else {
+ /*
+ * User mode. We've lost the program counter at the
+ * time of the fault (not that it was accurate anyway;
+ * it's not called an imprecise fault for nothing).
+ * About all we can do is copy r14_usr to tf_pc and
+ * hope for the best. The process is about to get a
+ * SIGBUS, so it's probably history anyway.
+ */
+ tf->tf_spsr |= PSR_USR32_MODE;
+ tf->tf_pc = tf->tf_usr_lr;
+ }
+ }
+
+ /* FAR is invalid for imprecise exceptions */
+ if ((fsr & FAULT_IMPRECISE) != 0)
+ far = 0;
+#endif /* __XSCALE__ */
+
+ if (pcb->pcb_onfault) {
+ tf->tf_r0 = EFAULT;
+ tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
+ return (0);
+ }
+
+ /* See if the cpu state needs to be fixed up */
+ (void) data_abort_fixup(tf, fsr, far, td, ksig);
+
+ /*
+ * At this point, if the fault happened in kernel mode, we're toast
+ */
+ if (!TRAP_USERMODE(tf))
+ dab_fatal(tf, fsr, far, td, ksig);
+
+ /* Deliver a bus error signal to the process */
+ ksig->signb = SIGBUS;
+ ksig->code = 0;
+ td->td_frame = tf;
+
+ return (1);
+}
+
+static __inline int
+prefetch_abort_fixup(trapframe_t *tf, struct ksig *ksig)
+{
+#ifdef CPU_ABORT_FIXUP_REQUIRED
+ int error;
+
+ /* Call the cpu specific prefetch abort fixup routine */
+ error = cpu_prefetchabt_fixup(tf);
+ if (__predict_true(error != ABORT_FIXUP_FAILED))
+ return (error);
+
+ /*
+ * Oops, couldn't fix up the instruction
+ */
+ printf(
+ "prefetch_abort_fixup: fixup for %s mode prefetch abort failed.\n",
+ TRAP_USERMODE(tf) ? "user" : "kernel");
+ printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
+ *((u_int *)tf->tf_pc));
+ disassemble(tf->tf_pc);
+
+ /* Die now if this happened in kernel mode */
+ if (!TRAP_USERMODE(tf))
+ dab_fatal(tf, 0, tf->tf_pc, NULL, ksig);
+
+ return (error);
+#else
+ return (ABORT_FIXUP_OK);
+#endif /* CPU_ABORT_FIXUP_REQUIRED */
+}
+
+/*
+ * void prefetch_abort_handler(trapframe_t *tf)
+ *
+ * Abort handler called when instruction execution occurs at
+ * a non existent or restricted (access permissions) memory page.
+ * If the address is invalid and we were in SVC mode then panic as
+ * the kernel should never prefetch abort.
+ * If the address is invalid and the page is mapped then the user process
+ * does no have read permission so send it a signal.
+ * Otherwise fault the page in and try again.
+ */
+void
+prefetch_abort_handler(trapframe_t *tf)
+{
+ struct thread *td;
+ struct vm_map *map;
+ vm_offset_t fault_pc, va;
+ int error = 0;
+ u_int sticks = 0;
+ struct ksig ksig;
+
+#if 0
+ /* Update vmmeter statistics */
+ uvmexp.traps++;
+#endif
+ /*
+ * Enable IRQ's (disabled by the abort) This always comes
+ * from user mode so we know interrupts were not disabled.
+ * But we check anyway.
+ */
+ if (__predict_true((tf->tf_spsr & I32_bit) == 0))
+ enable_interrupts(I32_bit);
+
+ /* See if the cpu state needs to be fixed up */
+ switch (prefetch_abort_fixup(tf, &ksig)) {
+ case ABORT_FIXUP_RETURN:
+ return;
+ case ABORT_FIXUP_FAILED:
+ /* Deliver a SIGILL to the process */
+ ksig.signb = SIGILL;
+ ksig.code = 0;
+ td = curthread;
+ td->td_frame = tf;
+ goto do_trapsignal;
+ default:
+ break;
+ }
+
+ /* Prefetch aborts cannot happen in kernel mode */
+ if (__predict_false(!TRAP_USERMODE(tf)))
+ dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig);
+
+ /* Get fault address */
+ fault_pc = tf->tf_pc;
+ td = curthread;
+ td->td_frame = tf;
+ sticks = td->td_sticks;
+
+ /* Ok validate the address, can only execute in USER space */
+ if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS ||
+ (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) {
+ ksig.signb = SIGSEGV;
+ ksig.code = 0;
+ goto do_trapsignal;
+ }
+
+ map = &td->td_proc->p_vmspace->vm_map;
+ va = trunc_page(fault_pc);
+
+ /*
+ * See if the pmap can handle this fault on its own...
+ */
+#ifdef DEBUG
+ last_fault_code = -1;
+#endif
+ if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1))
+ goto out;
+
+ error = vm_fault(map, va, VM_PROT_READ /*| VM_PROT_EXECUTE*/,
+ VM_FAULT_NORMAL);
+ if (__predict_true(error == 0))
+ goto out;
+
+ if (error == ENOMEM) {
+ printf("VM: pid %d (%s), uid %d killed: "
+ "out of swap\n", td->td_proc->p_pid, td->td_proc->p_comm,
+ (td->td_proc->p_ucred) ?
+ td->td_proc->p_ucred->cr_uid : -1);
+ ksig.signb = SIGKILL;
+ } else {
+ ksig.signb = SIGSEGV;
+ }
+ ksig.code = 0;
+
+do_trapsignal:
+ call_trapsignal(td, ksig.signb, ksig.code);
+
+out:
+ userret(td, tf, sticks);
+}
+
+extern int badaddr_read_1(const uint8_t *, uint8_t *);
+extern int badaddr_read_2(const uint16_t *, uint16_t *);
+extern int badaddr_read_4(const uint32_t *, uint32_t *);
+/*
+ * Tentatively read an 8, 16, or 32-bit value from 'addr'.
+ * If the read succeeds, the value is written to 'rptr' and zero is returned.
+ * Else, return EFAULT.
+ */
+int
+badaddr_read(void *addr, size_t size, void *rptr)
+{
+ union {
+ uint8_t v1;
+ uint16_t v2;
+ uint32_t v4;
+ } u;
+ int rv;
+
+ cpu_drain_writebuf();
+
+ /* Read from the test address. */
+ switch (size) {
+ case sizeof(uint8_t):
+ rv = badaddr_read_1(addr, &u.v1);
+ if (rv == 0 && rptr)
+ *(uint8_t *) rptr = u.v1;
+ break;
+
+ case sizeof(uint16_t):
+ rv = badaddr_read_2(addr, &u.v2);
+ if (rv == 0 && rptr)
+ *(uint16_t *) rptr = u.v2;
+ break;
+
+ case sizeof(uint32_t):
+ rv = badaddr_read_4(addr, &u.v4);
+ if (rv == 0 && rptr)
+ *(uint32_t *) rptr = u.v4;
+ break;
+
+ default:
+ panic("badaddr: invalid size (%lu)", (u_long) size);
+ }
+
+ /* Return EFAULT if the address was invalid, else zero */
+ return (rv);
+}
+
+#define MAXARGS 8
+static void
+syscall(struct thread *td, trapframe_t *frame, u_int32_t insn)
+{
+ struct proc *p = td->td_proc;
+ int code, error;
+ u_int nap, nargs;
+ register_t *ap, *args, copyargs[MAXARGS];
+ struct sysent *callp;
+ int locked = 0;
+
+ switch (insn & SWI_OS_MASK) {
+ case 0: /* XXX: we need our own one. */
+ nap = 4;
+ break;
+ default:
+ trapsignal(td, SIGILL, 0);
+ userret(td, frame, td->td_sticks);
+ return;
+ }
+ code = insn & 0x000fffff;
+ ap = &frame->tf_r0;
+ if (code == SYS_syscall) {
+ code = *ap++;
+
+ nap--;
+ } else if (code == SYS___syscall) {
+ code = *ap++;
+ nap -= 2;
+ ap++;
+ }
+ if (p->p_sysent->sv_mask)
+ code &= p->p_sysent->sv_mask;
+ if (code >= p->p_sysent->sv_size)
+ callp = &p->p_sysent->sv_table[0];
+ else
+ callp = &p->p_sysent->sv_table[code];
+ nargs = callp->sy_narg & SYF_ARGMASK;
+ if (nargs <= nap)
+ args = ap;
+ else {
+ memcpy(copyargs, ap, nap * sizeof(register_t));
+ error = copyin((void *)frame->tf_usr_sp, copyargs + nap,
+ (nargs - nap) * sizeof(register_t));
+ if (error)
+ goto bad;
+ args = copyargs;
+ }
+
+ error = 0;
+ if ((callp->sy_narg & SYF_MPSAFE) == 0)
+ mtx_lock(&Giant);
+ locked = 1;
+ if (error == 0) {
+ td->td_retval[0] = 0;
+ td->td_retval[1] = 0;
+ error = (*callp->sy_call)(td, args);
+ }
+#if 0
+ printf("code %d error %d\n", code, error);
+#endif
+ switch (error) {
+ case 0:
+ frame->tf_r0 = td->td_retval[0];
+ frame->tf_r1 = td->td_retval[1];
+
+ frame->tf_spsr &= ~PSR_C_bit; /* carry bit */
+ break;
+
+ case ERESTART:
+ /*
+ * Reconstruct the pc to point at the swi.
+ */
+ frame->tf_pc -= INSN_SIZE;
+ break;
+ case EJUSTRETURN:
+ /* nothing to do */
+ break;
+ default:
+bad:
+ frame->tf_r0 = error;
+ frame->tf_spsr |= PSR_C_bit; /* carry bit */
+ break;
+ }
+ if (locked && (callp->sy_narg & SYF_MPSAFE) == 0)
+ mtx_unlock(&Giant);
+
+
+ userret(td, frame, td->td_sticks);
+ mtx_assert(&sched_lock, MA_NOTOWNED);
+ mtx_assert(&Giant, MA_NOTOWNED);
+
+}
+
+void
+swi_handler(trapframe_t *frame)
+{
+ struct thread *td = curthread;
+ uint32_t insn;
+
+ /*
+ * Enable interrupts if they were enabled before the exception.
+ * Since all syscalls *should* come from user mode it will always
+ * be safe to enable them, but check anyway.
+ * */
+ if (!(frame->tf_spsr & I32_bit))
+ enable_interrupts(I32_bit);
+ /*
+ * Make sure the program counter is correctly aligned so we
+ * don't take an alignment fault trying to read the opcode.
+ */
+ if (__predict_false(((frame->tf_pc - INSN_SIZE) & 3) != 0)) {
+ trapsignal(td, SIGILL, 0);
+ userret(td, frame, td->td_sticks);
+ return;
+ }
+ insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE);
+ td->td_frame = frame;
+ syscall(td, frame, insn);
+}
+
diff --git a/sys/arm/arm/uio_machdep.c b/sys/arm/arm/uio_machdep.c
new file mode 100644
index 0000000..5b9a736
--- /dev/null
+++ b/sys/arm/arm/uio_machdep.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
+ * Copyright (c) 1982, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <machine/vmparam.h>
+
+/*
+ * Implement uiomove(9) from physical memory using the direct map to
+ * avoid the creation and destruction of ephemeral mappings.
+ */
+int
+uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
+{
+ struct thread *td = curthread;
+ struct iovec *iov;
+ void *cp;
+ vm_offset_t page_offset;
+ size_t cnt;
+ int error = 0;
+ int save = 0;
+
+ KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
+ ("uiomove_fromphys: mode"));
+ KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
+ ("uiomove_fromphys proc"));
+ if (td != NULL) {
+ mtx_lock_spin(&sched_lock);
+ save = td->td_flags & TDF_DEADLKTREAT;
+ td->td_flags |= TDF_DEADLKTREAT;
+ mtx_unlock_spin(&sched_lock);
+ }
+ while (n > 0 && uio->uio_resid) {
+ iov = uio->uio_iov;
+ cnt = iov->iov_len;
+ if (cnt == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ continue;
+ }
+ if (cnt > n)
+ cnt = n;
+ page_offset = offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - page_offset);
+ cp = (char *)VM_PAGE_TO_PHYS(ma[offset >> PAGE_SHIFT]) +
+ page_offset;
+ switch (uio->uio_segflg) {
+ case UIO_USERSPACE:
+ if (ticks - PCPU_GET(switchticks) >= hogticks)
+ uio_yield();
+ if (uio->uio_rw == UIO_READ)
+ error = copyout(cp, iov->iov_base, cnt);
+ else
+ error = copyin(iov->iov_base, cp, cnt);
+ if (error)
+ goto out;
+ break;
+ case UIO_SYSSPACE:
+ if (uio->uio_rw == UIO_READ)
+ bcopy(cp, iov->iov_base, cnt);
+ else
+ bcopy(iov->iov_base, cp, cnt);
+ break;
+ case UIO_NOCOPY:
+ break;
+ }
+ iov->iov_base = (char *)iov->iov_base + cnt;
+ iov->iov_len -= cnt;
+ uio->uio_resid -= cnt;
+ uio->uio_offset += cnt;
+ offset += cnt;
+ n -= cnt;
+ }
+out:
+ if (td != NULL && save == 0) {
+ mtx_lock_spin(&sched_lock);
+ td->td_flags &= ~TDF_DEADLKTREAT;
+ mtx_unlock_spin(&sched_lock);
+ }
+ return (error);
+}
diff --git a/sys/arm/arm/undefined.c b/sys/arm/arm/undefined.c
new file mode 100644
index 0000000..6a62e9c
--- /dev/null
+++ b/sys/arm/arm/undefined.c
@@ -0,0 +1,291 @@
+/* $NetBSD: undefined.c,v 1.22 2003/11/29 22:21:29 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 2001 Ben Harris.
+ * Copyright (c) 1995 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * undefined.c
+ *
+ * Fault handler
+ *
+ * Created : 06/01/95
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/signal.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/syslog.h>
+#include <sys/vmmeter.h>
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#ifdef FAST_FPE
+#include <sys/acct.h>
+#endif
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <machine/undefined.h>
+#include <machine/trap.h>
+
+#include <machine/disassem.h>
+
+#ifdef DDB
+#include <ddb/db_output.h>
+#include <machine/db_machdep.h>
+#endif
+
+#ifdef acorn26
+#include <machine/machdep.h>
+#endif
+
+static int gdb_trapper(u_int, u_int, struct trapframe *, int);
+#ifdef FAST_FPE
+extern int want_resched;
+#endif
+
+LIST_HEAD(, undefined_handler) undefined_handlers[MAX_COPROCS];
+
+
+void *
+install_coproc_handler(int coproc, undef_handler_t handler)
+{
+ struct undefined_handler *uh;
+
+ KASSERT(coproc >= 0 && coproc < MAX_COPROCS, ("bad coproc"));
+ KASSERT(handler != NULL, ("handler is NULL")); /* Used to be legal. */
+
+ /* XXX: M_TEMP??? */
+ MALLOC(uh, struct undefined_handler *, sizeof(*uh), M_TEMP, M_WAITOK);
+ uh->uh_handler = handler;
+ install_coproc_handler_static(coproc, uh);
+ return uh;
+}
+
+void
+install_coproc_handler_static(int coproc, struct undefined_handler *uh)
+{
+
+ LIST_INSERT_HEAD(&undefined_handlers[coproc], uh, uh_link);
+}
+
+void
+remove_coproc_handler(void *cookie)
+{
+ struct undefined_handler *uh = cookie;
+
+ LIST_REMOVE(uh, uh_link);
+ FREE(uh, M_TEMP);
+}
+
+
+static int
+gdb_trapper(u_int addr, u_int insn, struct trapframe *frame, int code)
+{
+ struct thread *td;
+ td = (curthread == NULL) ? &thread0 : curthread;
+
+#if 0
+ if (insn == GDB_BREAKPOINT || insn == GDB5_BREAKPOINT) {
+ if (code == FAULT_USER) {
+ ksiginfo_t ksi;
+
+ KSI_INIT_TRAP(&ksi);
+ ksi.ksi_signo = SIGTRAP;
+ ksi.ksi_code = TRAP_BRKPT;
+ ksi.ksi_addr = (u_int32_t *)addr;
+ ksi.ksi_trap = 0;
+ PROC_LOCK(td->td_proc);
+ trapsignal(td, &ksi);
+ PROC_UNLOCK(td->td_proc);
+ return 0;
+ }
+#ifdef KGDB
+ return !kgdb_trap(T_BREAKPOINT, frame);
+#endif
+ }
+#endif
+ return 1;
+}
+
+static struct undefined_handler gdb_uh;
+
+void
+undefined_init()
+{
+ int loop;
+
+ /* Not actually necessary -- the initialiser is just NULL */
+ for (loop = 0; loop < MAX_COPROCS; ++loop)
+ LIST_INIT(&undefined_handlers[loop]);
+
+ /* Install handler for GDB breakpoints */
+ gdb_uh.uh_handler = gdb_trapper;
+ install_coproc_handler_static(0, &gdb_uh);
+}
+
+
+void
+undefinedinstruction(trapframe_t *frame)
+{
+ struct thread *td;
+ u_int fault_pc;
+ int fault_instruction;
+ int fault_code;
+ int coprocessor;
+ struct undefined_handler *uh;
+#ifdef VERBOSE_ARM32
+ int s;
+#endif
+
+ /* Enable interrupts if they were enabled before the exception. */
+ if (!(frame->tf_spsr & I32_bit))
+ enable_interrupts(I32_bit);
+
+ frame->tf_pc -= INSN_SIZE;
+
+ fault_pc = frame->tf_pc;
+
+ /*
+ * Get the current thread/proc structure or thread0/proc0 if there is
+ * none.
+ */
+ td = curthread == NULL ? &thread0 : curthread;
+
+ /*
+ * Make sure the program counter is correctly aligned so we
+ * don't take an alignment fault trying to read the opcode.
+ */
+ if (__predict_false((fault_pc & 3) != 0)) {
+ trapsignal(td, SIGILL, 0);
+ userret(td, frame, 0);
+ return;
+ }
+
+ /*
+ * Should use fuword() here .. but in the interests of squeezing every
+ * bit of speed we will just use ReadWord(). We know the instruction
+ * can be read as was just executed so this will never fail unless the
+ * kernel is screwed up in which case it does not really matter does
+ * it ?
+ */
+
+ fault_instruction = *(u_int32_t *)fault_pc;
+
+ /* Update vmmeter statistics */
+#if 0
+ uvmexp.traps++;
+#endif
+ /* Check for coprocessor instruction */
+
+ /*
+ * According to the datasheets you only need to look at bit 27 of the
+ * instruction to tell the difference between and undefined
+ * instruction and a coprocessor instruction following an undefined
+ * instruction trap.
+ */
+
+ if ((fault_instruction & (1 << 27)) != 0)
+ coprocessor = (fault_instruction >> 8) & 0x0f;
+ else
+ coprocessor = 0;
+
+ if ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE) {
+ /*
+ * Modify the fault_code to reflect the USR/SVC state at
+ * time of fault.
+ */
+ fault_code = FAULT_USER;
+ td->td_frame = frame;
+ } else
+ fault_code = 0;
+
+ /* OK this is were we do something about the instruction. */
+ LIST_FOREACH(uh, &undefined_handlers[coprocessor], uh_link)
+ if (uh->uh_handler(fault_pc, fault_instruction, frame,
+ fault_code) == 0)
+ break;
+
+ if (uh == NULL) {
+ /* Fault has not been handled */
+ trapsignal(td, SIGILL, 0);
+ }
+
+ if ((fault_code & FAULT_USER) == 0)
+ return;
+
+#ifdef FAST_FPE
+ /* Optimised exit code */
+ {
+
+ /*
+ * Check for reschedule request, at the moment there is only
+ * 1 ast so this code should always be run
+ */
+
+ if (want_resched) {
+ /*
+ * We are being preempted.
+ */
+ preempt(0);
+ }
+
+ /* Invoke MI userret code */
+ mi_userret(td);
+
+#if 0
+ l->l_priority = l->l_usrpri;
+
+ curcpu()->ci_schedstate.spc_curpriority = l->l_priority;
+#endif
+ }
+
+#else
+ userret(td, frame, 0);
+#endif
+}
diff --git a/sys/arm/arm/vectors.S b/sys/arm/arm/vectors.S
new file mode 100644
index 0000000..2947a44
--- /dev/null
+++ b/sys/arm/arm/vectors.S
@@ -0,0 +1,104 @@
+/* $NetBSD: vectors.S,v 1.4 2002/08/17 16:36:32 thorpej Exp $ */
+
+/*
+ * Copyright (C) 1994-1997 Mark Brinicombe
+ * Copyright (C) 1994 Brini
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of Brini may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * These are the exception vectors copied down to page 0.
+ *
+ * Note that FIQs are special; rather than using a level of
+ * indirection, we actually copy the FIQ code down into the
+ * vector page.
+ */
+
+ .text
+ .align 0
+ .global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
+ .global _C_LABEL(fiqvector)
+
+_C_LABEL(page0):
+ ldr pc, .Lreset_target
+ ldr pc, .Lundefined_target
+ ldr pc, .Lswi_target
+ ldr pc, .Lprefetch_abort_target
+ ldr pc, .Ldata_abort_target
+ ldr pc, .Laddress_exception_target
+ ldr pc, .Lirq_target
+#ifdef __ARM_FIQ_INDIRECT
+ ldr pc, .Lfiq_target
+#else
+.Lfiqvector:
+ .set _C_LABEL(fiqvector), . - _C_LABEL(page0)
+ subs pc, lr, #4
+ .org .Lfiqvector + 0x100
+#endif
+
+_C_LABEL(page0_data):
+.Lreset_target:
+ .word reset_entry
+
+.Lundefined_target:
+ .word undefined_entry
+
+.Lswi_target:
+ .word swi_entry
+
+.Lprefetch_abort_target:
+ .word prefetch_abort_entry
+
+.Ldata_abort_target:
+ .word data_abort_entry
+
+.Laddress_exception_target:
+ .word address_exception_entry
+
+.Lirq_target:
+ .word irq_entry
+
+#ifdef __ARM_FIQ_INDIRECT
+.Lfiq_target:
+ .word _C_LABEL(fiqvector)
+#else
+ .word 0 /* pad it out */
+#endif
+_C_LABEL(page0_end):
+
+#ifdef __ARM_FIQ_INDIRECT
+ .data
+ .align 0
+_C_LABEL(fiqvector):
+ subs pc, lr, #4
+ .org _C_LABEL(fiqvector) + 0x100
+#endif
diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c
new file mode 100644
index 0000000..ec5be4e
--- /dev/null
+++ b/sys/arm/arm/vm_machdep.c
@@ -0,0 +1,348 @@
+/*-
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * Copyright (c) 1989, 1990 William Jolitz
+ * Copyright (c) 1994 John Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
+ * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
+ *//*-
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * Copyright (c) 1989, 1990 William Jolitz
+ * Copyright (c) 1994 John Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
+ * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/proc.h>
+#include <sys/socketvar.h>
+#include <sys/sf_buf.h>
+#include <sys/user.h>
+#include <machine/cpu.h>
+#include <machine/pcb.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_param.h>
+
+static void sf_buf_init(void *arg);
+SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
+
+LIST_HEAD(sf_head, sf_buf);
+
+
+/*
+ * A hash table of active sendfile(2) buffers
+ */
+static struct sf_head *sf_buf_active;
+static u_long sf_buf_hashmask;
+
+#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
+
+static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
+static u_int sf_buf_alloc_want;
+
+/*
+ * A lock used to synchronize access to the hash table and free list
+ */
+static struct mtx sf_buf_lock;
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the pcb, set up the stack so that the child
+ * ready to run and return to user mode.
+ */
+void
+cpu_fork(register struct thread *td1, register struct proc *p2,
+ struct thread *td2, int flags)
+{
+ struct pcb *pcb1, *pcb2;
+ struct trapframe *tf;
+ struct switchframe *sf;
+ struct mdproc *mdp2;
+
+ pcb1 = td1->td_pcb;
+ pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
+ td2->td_pcb = pcb2;
+ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
+ mdp2 = &p2->p_md;
+ bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
+ pcb2->un_32.pcb32_und_sp = (u_int)td2->td_kstack + USPACE_UNDEF_STACK_TOP;
+ pcb2->un_32.pcb32_sp = (u_int)td2->td_kstack +
+ USPACE_SVC_STACK_TOP;
+ pmap_activate(td2);
+ td2->td_frame = tf =
+ (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
+ *tf = *td1->td_frame;
+ sf = (struct switchframe *)tf - 1;
+ sf->sf_r4 = (u_int)fork_return;
+ sf->sf_r5 = (u_int)td2;
+ sf->sf_pc = (u_int)fork_trampoline;
+ tf->tf_spsr &= ~PSR_C_bit;
+ tf->tf_r0 = 0;
+ pcb2->un_32.pcb32_sp = (u_int)sf;
+}
+
+void
+cpu_thread_swapin(struct thread *td)
+{
+}
+
+void
+cpu_thread_swapout(struct thread *td)
+{
+}
+
+/*
+ * Detatch mapped page and release resources back to the system.
+ */
+void
+sf_buf_free(struct sf_buf *sf)
+{
+ mtx_lock(&sf_buf_lock);
+ sf->ref_count--;
+ if (sf->ref_count == 0) {
+ TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
+ nsfbufsused--;
+ if (sf_buf_alloc_want > 0)
+ wakeup_one(&sf_buf_freelist);
+ }
+ mtx_unlock(&sf_buf_lock);
+}
+
+/*
+ * * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
+ * */
+static void
+sf_buf_init(void *arg)
+{
+ struct sf_buf *sf_bufs;
+ vm_offset_t sf_base;
+ int i;
+
+ sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
+ TAILQ_INIT(&sf_buf_freelist);
+ sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
+ M_NOWAIT | M_ZERO);
+ for (i = 0; i < nsfbufs; i++) {
+ sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
+ TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
+ }
+ sf_buf_alloc_want = 0;
+ mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
+}
+
+/*
+ * Get an sf_buf from the freelist. Will block if none are available.
+ */
+struct sf_buf *
+sf_buf_alloc(struct vm_page *m, int pri)
+{
+ struct sf_head *hash_list;
+ struct sf_buf *sf;
+ int error;
+
+ hash_list = &sf_buf_active[SF_BUF_HASH(m)];
+ mtx_lock(&sf_buf_lock);
+ LIST_FOREACH(sf, hash_list, list_entry) {
+ if (sf->m == m) {
+ sf->ref_count++;
+ if (sf->ref_count == 1) {
+ TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
+ nsfbufsused++;
+ nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
+ }
+ goto done;
+ }
+ }
+ while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
+ sf_buf_alloc_want++;
+ mbstat.sf_allocwait++;
+ error = msleep(&sf_buf_freelist, &sf_buf_lock, PVM | pri,
+ "sfbufa", 0);
+ sf_buf_alloc_want--;
+
+
+ /*
+ * If we got a signal, don't risk going back to sleep.
+ */
+ if (error)
+ goto done;
+ }
+ TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
+ if (sf->m != NULL)
+ LIST_REMOVE(sf, list_entry);
+ LIST_INSERT_HEAD(hash_list, sf, list_entry);
+ sf->ref_count = 1;
+ sf->m = m;
+ nsfbufsused++;
+ nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
+ pmap_qenter(sf->kva, &sf->m, 1);
+done:
+ mtx_unlock(&sf_buf_lock);
+ return (sf);
+
+}
+
+/*
+ * Initialize machine state (pcb and trap frame) for a new thread about to
+ * upcall. Put enough state in the new thread's PCB to get it to go back
+ * userret(), where we can intercept it again to set the return (upcall)
+ * Address and stack, along with those from upcals that are from other sources
+ * such as those generated in thread_userret() itself.
+ */
+void
+cpu_set_upcall(struct thread *td, struct thread *td0)
+{
+ panic("set upcall\n");
+}
+
+/*
+ * Set that machine state for performing an upcall that has to
+ * be done in thread_userret() so that those upcalls generated
+ * in thread_userret() itself can be done as well.
+ */
+void
+cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
+{
+ panic("setupcallkse\n");
+}
+
+void
+cpu_thread_exit(struct thread *td)
+{
+}
+
+void
+cpu_thread_setup(struct thread *td)
+{
+ td->td_pcb = (struct pcb *)(td->td_kstack + KSTACK_PAGES *
+ PAGE_SIZE) - 1;
+ td->td_frame = (struct trapframe *)
+ ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP) - 1;
+}
+void
+cpu_thread_clean(struct thread *td)
+{
+}
+
+/*
+ * Intercept the return address from a freshly forked process that has NOT
+ * been scheduled yet.
+ *
+ * This is needed to make kernel threads stay in kernel mode.
+ */
+void
+cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
+{
+ struct switchframe *sf;
+ struct trapframe *tf;
+
+
+ tf = td->td_frame;
+ sf = (struct switchframe *)tf - 1;
+ sf->sf_r4 = (u_int)func;
+ sf->sf_r5 = (u_int)arg;
+ td->td_pcb->un_32.pcb32_sp = (u_int)sf;
+}
+
+/*
+ * Software interrupt handler for queued VM system processing.
+ */
+void
+swi_vm(void *dummy)
+{
+}
+
+void
+cpu_exit(struct thread *td)
+{
+}
+
+void
+cpu_sched_exit(td)
+ register struct thread *td;
+{
+}
diff --git a/sys/arm/conf/SIMICS b/sys/arm/conf/SIMICS
new file mode 100644
index 0000000..2937b32
--- /dev/null
+++ b/sys/arm/conf/SIMICS
@@ -0,0 +1,83 @@
+# GENERIC -- Generic kernel configuration file for FreeBSD/i386
+#
+# For more information on this file, please read the handbook section on
+# Kernel Configuration Files:
+#
+# http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html
+#
+# The handbook is also available locally in /usr/share/doc/handbook
+# if you've installed the doc distribution, otherwise always see the
+# FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the
+# latest information.
+#
+# An exhaustive list of options and more detailed explanations of the
+# device lines is also present in the ../../conf/NOTES and NOTES files.
+# If you are in doubt as to the purpose or necessity of a line, check first
+# in NOTES.
+#
+# $FreeBSD$
+
+machine arm
+ident SIMICS
+
+options KERNPHYSADDR=0xc0000000
+include "../sa11x0/std.sa11x0"
+#To statically compile in device wiring instead of /boot/device.hints
+#hints "GENERIC.hints" #Default places to look for devices.
+
+makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols
+makeoptions CONF_CFLAGS=-mcpu=strongarm
+options DDB
+
+options SCHED_4BSD #4BSD scheduler
+options INET #InterNETworking
+options INET6 #IPv6 communications protocols
+options FFS #Berkeley Fast Filesystem
+options SOFTUPDATES #Enable FFS soft updates support
+options UFS_ACL #Support for access control lists
+options UFS_DIRHASH #Improve performance on big directories
+options MD_ROOT #MD is a potential root device
+options ROOTDEVNAME=\"ufs:md0\"
+options NFSCLIENT #Network Filesystem Client
+options NFSSERVER #Network Filesystem Server
+options NFS_ROOT #NFS usable as /, requires NFSCLIENT
+#options MSDOSFS #MSDOS Filesystem
+options CD9660 #ISO 9660 Filesystem
+#options PROCFS #Process filesystem (requires PSEUDOFS)
+options PSEUDOFS #Pseudo-filesystem framework
+options COMPAT_43 #Compatible with BSD 4.3 [KEEP THIS!]
+options SCSI_DELAY=15000 #Delay (in ms) before probing SCSI
+#options KTRACE #ktrace(1) support
+options SYSVSHM #SYSV-style shared memory
+options SYSVMSG #SYSV-style message queues
+options SYSVSEM #SYSV-style semaphores
+options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions
+options KBD_INSTALL_CDEV # install a CDEV entry in /dev
+device genclock
+device loop
+device ether
+device saip
+device assabet
+device nexus
+#device saarm
+device rl
+device uart
+#options AHC_REG_PRETTY_PRINT # Print register bitfields in debug
+ # output. Adds ~128k to driver.
+#options AHD_REG_PRETTY_PRINT # Print register bitfields in debug
+ # output. Adds ~215k to driver.
+
+# Debugging for use in -current
+options DDB #Enable the kernel debugger
+#options INVARIANTS #Enable calls of extra sanity checking
+#options INVARIANT_SUPPORT #Extra sanity checks of internal structures, required by INVARIANTS
+#options WITNESS #Enable checks to detect deadlocks and cycles
+#options WITNESS_SKIPSPIN #Don't run witness on spinlocks for speed
+
+# To make an SMP kernel, the next two are needed
+#options SMP # Symmetric MultiProcessor Kernel
+#options APIC_IO # Symmetric (APIC) I/O
+
+device md
+# Floppy drives
+
diff --git a/sys/arm/include/_inttypes.h b/sys/arm/include/_inttypes.h
new file mode 100644
index 0000000..7da589d
--- /dev/null
+++ b/sys/arm/include/_inttypes.h
@@ -0,0 +1,220 @@
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * From: $NetBSD: int_fmtio.h,v 1.2 2001/04/26 16:25:21 kleink Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_INTTYPES_H_
+#define _MACHINE_INTTYPES_H_
+
+/*
+ * Macros for format specifiers.
+ */
+
+/* fprintf(3) macros for signed integers. */
+
+#define PRId8 "d" /* int8_t */
+#define PRId16 "d" /* int16_t */
+#define PRId32 "d" /* int32_t */
+#define PRId64 "lld" /* int64_t */
+#define PRIdLEAST8 "d" /* int_least8_t */
+#define PRIdLEAST16 "d" /* int_least16_t */
+#define PRIdLEAST32 "d" /* int_least32_t */
+#define PRIdLEAST64 "lld" /* int_least64_t */
+#define PRIdFAST8 "d" /* int_fast8_t */
+#define PRIdFAST16 "d" /* int_fast16_t */
+#define PRIdFAST32 "d" /* int_fast32_t */
+#define PRIdFAST64 "lld" /* int_fast64_t */
+#define PRIdMAX "jd" /* intmax_t */
+#define PRIdPTR "d" /* intptr_t */
+
+#define PRIi8 "i" /* int8_t */
+#define PRIi16 "i" /* int16_t */
+#define PRIi32 "i" /* int32_t */
+#define PRIi64 "lli" /* int64_t */
+#define PRIiLEAST8 "i" /* int_least8_t */
+#define PRIiLEAST16 "i" /* int_least16_t */
+#define PRIiLEAST32 "i" /* int_least32_t */
+#define PRIiLEAST64 "lli" /* int_least64_t */
+#define PRIiFAST8 "i" /* int_fast8_t */
+#define PRIiFAST16 "i" /* int_fast16_t */
+#define PRIiFAST32 "i" /* int_fast32_t */
+#define PRIiFAST64 "lli" /* int_fast64_t */
+#define PRIiMAX "ji" /* intmax_t */
+#define PRIiPTR "i" /* intptr_t */
+
+/* fprintf(3) macros for unsigned integers. */
+
+#define PRIo8 "o" /* uint8_t */
+#define PRIo16 "o" /* uint16_t */
+#define PRIo32 "o" /* uint32_t */
+#define PRIo64 "llo" /* uint64_t */
+#define PRIoLEAST8 "o" /* uint_least8_t */
+#define PRIoLEAST16 "o" /* uint_least16_t */
+#define PRIoLEAST32 "o" /* uint_least32_t */
+#define PRIoLEAST64 "llo" /* uint_least64_t */
+#define PRIoFAST8 "o" /* uint_fast8_t */
+#define PRIoFAST16 "o" /* uint_fast16_t */
+#define PRIoFAST32 "o" /* uint_fast32_t */
+#define PRIoFAST64 "llo" /* uint_fast64_t */
+#define PRIoMAX "jo" /* uintmax_t */
+#define PRIoPTR "o" /* uintptr_t */
+
+#define PRIu8 "u" /* uint8_t */
+#define PRIu16 "u" /* uint16_t */
+#define PRIu32 "u" /* uint32_t */
+#define PRIu64 "llu" /* uint64_t */
+#define PRIuLEAST8 "u" /* uint_least8_t */
+#define PRIuLEAST16 "u" /* uint_least16_t */
+#define PRIuLEAST32 "u" /* uint_least32_t */
+#define PRIuLEAST64 "llu" /* uint_least64_t */
+#define PRIuFAST8 "u" /* uint_fast8_t */
+#define PRIuFAST16 "u" /* uint_fast16_t */
+#define PRIuFAST32 "u" /* uint_fast32_t */
+#define PRIuFAST64 "llu" /* uint_fast64_t */
+#define PRIuMAX "ju" /* uintmax_t */
+#define PRIuPTR "u" /* uintptr_t */
+
+#define PRIx8 "x" /* uint8_t */
+#define PRIx16 "x" /* uint16_t */
+#define PRIx32 "x" /* uint32_t */
+#define PRIx64 "llx" /* uint64_t */
+#define PRIxLEAST8 "x" /* uint_least8_t */
+#define PRIxLEAST16 "x" /* uint_least16_t */
+#define PRIxLEAST32 "x" /* uint_least32_t */
+#define PRIxLEAST64 "llx" /* uint_least64_t */
+#define PRIxFAST8 "x" /* uint_fast8_t */
+#define PRIxFAST16 "x" /* uint_fast16_t */
+#define PRIxFAST32 "x" /* uint_fast32_t */
+#define PRIxFAST64 "llx" /* uint_fast64_t */
+#define PRIxMAX "jx" /* uintmax_t */
+#define PRIxPTR "x" /* uintptr_t */
+
+#define PRIX8 "X" /* uint8_t */
+#define PRIX16 "X" /* uint16_t */
+#define PRIX32 "X" /* uint32_t */
+#define PRIX64 "llX" /* uint64_t */
+#define PRIXLEAST8 "X" /* uint_least8_t */
+#define PRIXLEAST16 "X" /* uint_least16_t */
+#define PRIXLEAST32 "X" /* uint_least32_t */
+#define PRIXLEAST64 "llX" /* uint_least64_t */
+#define PRIXFAST8 "X" /* uint_fast8_t */
+#define PRIXFAST16 "X" /* uint_fast16_t */
+#define PRIXFAST32 "X" /* uint_fast32_t */
+#define PRIXFAST64 "llX" /* uint_fast64_t */
+#define PRIXMAX "jX" /* uintmax_t */
+#define PRIXPTR "X" /* uintptr_t */
+
+/* fscanf(3) macros for signed integers. */
+
+#define SCNd8 "hhd" /* int8_t */
+#define SCNd16 "hd" /* int16_t */
+#define SCNd32 "d" /* int32_t */
+#define SCNd64 "lld" /* int64_t */
+#define SCNdLEAST8 "hhd" /* int_least8_t */
+#define SCNdLEAST16 "hd" /* int_least16_t */
+#define SCNdLEAST32 "d" /* int_least32_t */
+#define SCNdLEAST64 "lld" /* int_least64_t */
+#define SCNdFAST8 "d" /* int_fast8_t */
+#define SCNdFAST16 "d" /* int_fast16_t */
+#define SCNdFAST32 "d" /* int_fast32_t */
+#define SCNdFAST64 "lld" /* int_fast64_t */
+#define SCNdMAX "jd" /* intmax_t */
+#define SCNdPTR "d" /* intptr_t */
+
+#define SCNi8 "hhi" /* int8_t */
+#define SCNi16 "hi" /* int16_t */
+#define SCNi32 "i" /* int32_t */
+#define SCNi64 "lli" /* int64_t */
+#define SCNiLEAST8 "hhi" /* int_least8_t */
+#define SCNiLEAST16 "hi" /* int_least16_t */
+#define SCNiLEAST32 "i" /* int_least32_t */
+#define SCNiLEAST64 "lli" /* int_least64_t */
+#define SCNiFAST8 "i" /* int_fast8_t */
+#define SCNiFAST16 "i" /* int_fast16_t */
+#define SCNiFAST32 "i" /* int_fast32_t */
+#define SCNiFAST64 "lli" /* int_fast64_t */
+#define SCNiMAX "ji" /* intmax_t */
+#define SCNiPTR "i" /* intptr_t */
+
+/* fscanf(3) macros for unsigned integers. */
+
+#define SCNo8 "hho" /* uint8_t */
+#define SCNo16 "ho" /* uint16_t */
+#define SCNo32 "o" /* uint32_t */
+#define SCNo64 "llo" /* uint64_t */
+#define SCNoLEAST8 "hho" /* uint_least8_t */
+#define SCNoLEAST16 "ho" /* uint_least16_t */
+#define SCNoLEAST32 "o" /* uint_least32_t */
+#define SCNoLEAST64 "llo" /* uint_least64_t */
+#define SCNoFAST8 "o" /* uint_fast8_t */
+#define SCNoFAST16 "o" /* uint_fast16_t */
+#define SCNoFAST32 "o" /* uint_fast32_t */
+#define SCNoFAST64 "llo" /* uint_fast64_t */
+#define SCNoMAX "jo" /* uintmax_t */
+#define SCNoPTR "o" /* uintptr_t */
+
+#define SCNu8 "hhu" /* uint8_t */
+#define SCNu16 "hu" /* uint16_t */
+#define SCNu32 "u" /* uint32_t */
+#define SCNu64 "llu" /* uint64_t */
+#define SCNuLEAST8 "hhu" /* uint_least8_t */
+#define SCNuLEAST16 "hu" /* uint_least16_t */
+#define SCNuLEAST32 "u" /* uint_least32_t */
+#define SCNuLEAST64 "llu" /* uint_least64_t */
+#define SCNuFAST8 "u" /* uint_fast8_t */
+#define SCNuFAST16 "u" /* uint_fast16_t */
+#define SCNuFAST32 "u" /* uint_fast32_t */
+#define SCNuFAST64 "llu" /* uint_fast64_t */
+#define SCNuMAX "ju" /* uintmax_t */
+#define SCNuPTR "u" /* uintptr_t */
+
+#define SCNx8 "hhx" /* uint8_t */
+#define SCNx16 "hx" /* uint16_t */
+#define SCNx32 "x" /* uint32_t */
+#define SCNx64 "llx" /* uint64_t */
+#define SCNxLEAST8 "hhx" /* uint_least8_t */
+#define SCNxLEAST16 "hx" /* uint_least16_t */
+#define SCNxLEAST32 "x" /* uint_least32_t */
+#define SCNxLEAST64 "llx" /* uint_least64_t */
+#define SCNxFAST8 "x" /* uint_fast8_t */
+#define SCNxFAST16 "x" /* uint_fast16_t */
+#define SCNxFAST32 "x" /* uint_fast32_t */
+#define SCNxFAST64 "llx" /* uint_fast64_t */
+#define SCNxMAX "jx" /* uintmax_t */
+#define SCNxPTR "x" /* uintptr_t */
+
+#endif /* !_MACHINE_INTTYPES_H_ */
diff --git a/sys/arm/include/armreg.h b/sys/arm/include/armreg.h
new file mode 100644
index 0000000..001c997
--- /dev/null
+++ b/sys/arm/include/armreg.h
@@ -0,0 +1,299 @@
+/* $NetBSD: armreg.h,v 1.28 2003/10/31 16:30:15 scw Exp $ */
+
+/*
+ * Copyright (c) 1998, 2001 Ben Harris
+ * Copyright (c) 1994-1996 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef MACHINE_ARMREG_H
+#define MACHINE_ARMREG_H
+#define INSN_SIZE 4
+#define INSN_COND_MASK 0xf0000000 /* Condition mask */
+#define PSR_MODE 0x0000001f /* mode mask */
+#define PSR_USR26_MODE 0x00000000
+#define PSR_FIQ26_MODE 0x00000001
+#define PSR_IRQ26_MODE 0x00000002
+#define PSR_SVC26_MODE 0x00000003
+#define PSR_USR32_MODE 0x00000010
+#define PSR_FIQ32_MODE 0x00000011
+#define PSR_IRQ32_MODE 0x00000012
+#define PSR_SVC32_MODE 0x00000013
+#define PSR_ABT32_MODE 0x00000017
+#define PSR_UND32_MODE 0x0000001b
+#define PSR_SYS32_MODE 0x0000001f
+#define PSR_32_MODE 0x00000010
+#define PSR_FLAGS 0xf0000000 /* flags */
+
+#define PSR_C_bit (1 << 29) /* carry */
+
+/* The high-order byte is always the implementor */
+#define CPU_ID_IMPLEMENTOR_MASK 0xff000000
+#define CPU_ID_ARM_LTD 0x41000000 /* 'A' */
+#define CPU_ID_DEC 0x44000000 /* 'D' */
+#define CPU_ID_INTEL 0x69000000 /* 'i' */
+#define CPU_ID_TI 0x54000000 /* 'T' */
+
+/* How to decide what format the CPUID is in. */
+#define CPU_ID_ISOLD(x) (((x) & 0x0000f000) == 0x00000000)
+#define CPU_ID_IS7(x) (((x) & 0x0000f000) == 0x00007000)
+#define CPU_ID_ISNEW(x) (!CPU_ID_ISOLD(x) && !CPU_ID_IS7(x))
+
+/* On ARM3 and ARM6, this byte holds the foundry ID. */
+#define CPU_ID_FOUNDRY_MASK 0x00ff0000
+#define CPU_ID_FOUNDRY_VLSI 0x00560000
+
+/* On ARM7 it holds the architecture and variant (sub-model) */
+#define CPU_ID_7ARCH_MASK 0x00800000
+#define CPU_ID_7ARCH_V3 0x00000000
+#define CPU_ID_7ARCH_V4T 0x00800000
+#define CPU_ID_7VARIANT_MASK 0x007f0000
+
+/* On more recent ARMs, it does the same, but in a different format */
+#define CPU_ID_ARCH_MASK 0x000f0000
+#define CPU_ID_ARCH_V3 0x00000000
+#define CPU_ID_ARCH_V4 0x00010000
+#define CPU_ID_ARCH_V4T 0x00020000
+#define CPU_ID_ARCH_V5 0x00030000
+#define CPU_ID_ARCH_V5T 0x00040000
+#define CPU_ID_ARCH_V5TE 0x00050000
+#define CPU_ID_VARIANT_MASK 0x00f00000
+
+/* Next three nybbles are part number */
+#define CPU_ID_PARTNO_MASK 0x0000fff0
+
+/* Intel XScale has sub fields in part number */
+#define CPU_ID_XSCALE_COREGEN_MASK 0x0000e000 /* core generation */
+#define CPU_ID_XSCALE_COREREV_MASK 0x00001c00 /* core revision */
+#define CPU_ID_XSCALE_PRODUCT_MASK 0x000003f0 /* product number */
+
+/* And finally, the revision number. */
+#define CPU_ID_REVISION_MASK 0x0000000f
+
+/* Individual CPUs are probably best IDed by everything but the revision. */
+#define CPU_ID_CPU_MASK 0xfffffff0
+
+/* Fake CPU IDs for ARMs without CP15 */
+#define CPU_ID_ARM2 0x41560200
+#define CPU_ID_ARM250 0x41560250
+
+/* Pre-ARM7 CPUs -- [15:12] == 0 */
+#define CPU_ID_ARM3 0x41560300
+#define CPU_ID_ARM600 0x41560600
+#define CPU_ID_ARM610 0x41560610
+#define CPU_ID_ARM620 0x41560620
+
+/* ARM7 CPUs -- [15:12] == 7 */
+#define CPU_ID_ARM700 0x41007000 /* XXX This is a guess. */
+#define CPU_ID_ARM710 0x41007100
+#define CPU_ID_ARM7500 0x41027100 /* XXX This is a guess. */
+#define CPU_ID_ARM710A 0x41047100 /* inc ARM7100 */
+#define CPU_ID_ARM7500FE 0x41077100
+#define CPU_ID_ARM710T 0x41807100
+#define CPU_ID_ARM720T 0x41807200
+#define CPU_ID_ARM740T8K 0x41807400 /* XXX no MMU, 8KB cache */
+#define CPU_ID_ARM740T4K 0x41817400 /* XXX no MMU, 4KB cache */
+
+/* Post-ARM7 CPUs */
+#define CPU_ID_ARM810 0x41018100
+#define CPU_ID_ARM920T 0x41129200
+#define CPU_ID_ARM922T 0x41029220
+#define CPU_ID_ARM940T 0x41029400 /* XXX no MMU */
+#define CPU_ID_ARM946ES 0x41049460 /* XXX no MMU */
+#define CPU_ID_ARM966ES 0x41049660 /* XXX no MMU */
+#define CPU_ID_ARM966ESR1 0x41059660 /* XXX no MMU */
+#define CPU_ID_ARM1020E 0x4115a200 /* (AKA arm10 rev 1) */
+#define CPU_ID_ARM1022ES 0x4105a220
+#define CPU_ID_SA110 0x4401a100
+#define CPU_ID_SA1100 0x4401a110
+#define CPU_ID_TI925T 0x54029250
+#define CPU_ID_SA1110 0x6901b110
+#define CPU_ID_IXP1200 0x6901c120
+#define CPU_ID_80200 0x69052000
+#define CPU_ID_PXA250 0x69052100 /* sans core revision */
+#define CPU_ID_PXA210 0x69052120
+#define CPU_ID_PXA250A 0x69052100 /* 1st version Core */
+#define CPU_ID_PXA210A 0x69052120 /* 1st version Core */
+#define CPU_ID_PXA250B 0x69052900 /* 3rd version Core */
+#define CPU_ID_PXA210B 0x69052920 /* 3rd version Core */
+#define CPU_ID_PXA250C 0x69052d00 /* 4th version Core */
+#define CPU_ID_PXA210C 0x69052d20 /* 4th version Core */
+#define CPU_ID_80321_400 0x69052420
+#define CPU_ID_80321_600 0x69052430
+#define CPU_ID_80321_400_B0 0x69052c20
+#define CPU_ID_80321_600_B0 0x69052c30
+#define CPU_ID_IXP425_533 0x690541c0
+#define CPU_ID_IXP425_400 0x690541d0
+#define CPU_ID_IXP425_266 0x690541f0
+
+/* ARM3-specific coprocessor 15 registers */
+#define ARM3_CP15_FLUSH 1
+#define ARM3_CP15_CONTROL 2
+#define ARM3_CP15_CACHEABLE 3
+#define ARM3_CP15_UPDATEABLE 4
+#define ARM3_CP15_DISRUPTIVE 5
+
+/* ARM3 Control register bits */
+#define ARM3_CTL_CACHE_ON 0x00000001
+#define ARM3_CTL_SHARED 0x00000002
+#define ARM3_CTL_MONITOR 0x00000004
+
+/*
+ * Post-ARM3 CP15 registers:
+ *
+ * 1 Control register
+ *
+ * 2 Translation Table Base
+ *
+ * 3 Domain Access Control
+ *
+ * 4 Reserved
+ *
+ * 5 Fault Status
+ *
+ * 6 Fault Address
+ *
+ * 7 Cache/write-buffer Control
+ *
+ * 8 TLB Control
+ *
+ * 9 Cache Lockdown
+ *
+ * 10 TLB Lockdown
+ *
+ * 11 Reserved
+ *
+ * 12 Reserved
+ *
+ * 13 Process ID (for FCSE)
+ *
+ * 14 Reserved
+ *
+ * 15 Implementation Dependent
+ */
+
+/* Some of the definitions below need cleaning up for V3/V4 architectures */
+
+/* CPU control register (CP15 register 1) */
+#define CPU_CONTROL_MMU_ENABLE 0x00000001 /* M: MMU/Protection unit enable */
+#define CPU_CONTROL_AFLT_ENABLE 0x00000002 /* A: Alignment fault enable */
+#define CPU_CONTROL_DC_ENABLE 0x00000004 /* C: IDC/DC enable */
+#define CPU_CONTROL_WBUF_ENABLE 0x00000008 /* W: Write buffer enable */
+#define CPU_CONTROL_32BP_ENABLE 0x00000010 /* P: 32-bit exception handlers */
+#define CPU_CONTROL_32BD_ENABLE 0x00000020 /* D: 32-bit addressing */
+#define CPU_CONTROL_LABT_ENABLE 0x00000040 /* L: Late abort enable */
+#define CPU_CONTROL_BEND_ENABLE 0x00000080 /* B: Big-endian mode */
+#define CPU_CONTROL_SYST_ENABLE 0x00000100 /* S: System protection bit */
+#define CPU_CONTROL_ROM_ENABLE 0x00000200 /* R: ROM protection bit */
+#define CPU_CONTROL_CPCLK 0x00000400 /* F: Implementation defined */
+#define CPU_CONTROL_BPRD_ENABLE 0x00000800 /* Z: Branch prediction enable */
+#define CPU_CONTROL_IC_ENABLE 0x00001000 /* I: IC enable */
+#define CPU_CONTROL_VECRELOC 0x00002000 /* V: Vector relocation */
+#define CPU_CONTROL_ROUNDROBIN 0x00004000 /* RR: Predictable replacement */
+#define CPU_CONTROL_V4COMPAT 0x00008000 /* L4: ARMv4 compat LDR R15 etc */
+
+#define CPU_CONTROL_IDC_ENABLE CPU_CONTROL_DC_ENABLE
+
+/* XScale Auxillary Control Register (CP15 register 1, opcode2 1) */
+#define XSCALE_AUXCTL_K 0x00000001 /* dis. write buffer coalescing */
+#define XSCALE_AUXCTL_P 0x00000002 /* ECC protect page table access */
+#define XSCALE_AUXCTL_MD_WB_RA 0x00000000 /* mini-D$ wb, read-allocate */
+#define XSCALE_AUXCTL_MD_WB_RWA 0x00000010 /* mini-D$ wb, read/write-allocate */
+#define XSCALE_AUXCTL_MD_WT 0x00000020 /* mini-D$ wt, read-allocate */
+#define XSCALE_AUXCTL_MD_MASK 0x00000030
+
+/* Cache type register definitions */
+#define CPU_CT_ISIZE(x) ((x) & 0xfff) /* I$ info */
+#define CPU_CT_DSIZE(x) (((x) >> 12) & 0xfff) /* D$ info */
+#define CPU_CT_S (1U << 24) /* split cache */
+#define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */
+
+#define CPU_CT_CTYPE_WT 0 /* write-through */
+#define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */
+#define CPU_CT_CTYPE_WB2 2 /* w/b, clean w/ cp15,7 */
+#define CPU_CT_CTYPE_WB6 6 /* w/b, cp15,7, lockdown fmt A */
+#define CPU_CT_CTYPE_WB7 7 /* w/b, cp15,7, lockdown fmt B */
+
+#define CPU_CT_xSIZE_LEN(x) ((x) & 0x3) /* line size */
+#define CPU_CT_xSIZE_M (1U << 2) /* multiplier */
+#define CPU_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x7) /* associativity */
+#define CPU_CT_xSIZE_SIZE(x) (((x) >> 6) & 0x7) /* size */
+
+/* Fault status register definitions */
+
+#define FAULT_TYPE_MASK 0x0f
+#define FAULT_USER 0x10
+
+#define FAULT_WRTBUF_0 0x00 /* Vector Exception */
+#define FAULT_WRTBUF_1 0x02 /* Terminal Exception */
+#define FAULT_BUSERR_0 0x04 /* External Abort on Linefetch -- Section */
+#define FAULT_BUSERR_1 0x06 /* External Abort on Linefetch -- Page */
+#define FAULT_BUSERR_2 0x08 /* External Abort on Non-linefetch -- Section */
+#define FAULT_BUSERR_3 0x0a /* External Abort on Non-linefetch -- Page */
+#define FAULT_BUSTRNL1 0x0c /* External abort on Translation -- Level 1 */
+#define FAULT_BUSTRNL2 0x0e /* External abort on Translation -- Level 2 */
+#define FAULT_ALIGN_0 0x01 /* Alignment */
+#define FAULT_ALIGN_1 0x03 /* Alignment */
+#define FAULT_TRANS_S 0x05 /* Translation -- Section */
+#define FAULT_TRANS_P 0x07 /* Translation -- Page */
+#define FAULT_DOMAIN_S 0x09 /* Domain -- Section */
+#define FAULT_DOMAIN_P 0x0b /* Domain -- Page */
+#define FAULT_PERM_S 0x0d /* Permission -- Section */
+#define FAULT_PERM_P 0x0f /* Permission -- Page */
+
+#define FAULT_IMPRECISE 0x400 /* Imprecise exception (XSCALE) */
+
+/*
+ * Address of the vector page, low and high versions.
+ */
+#define ARM_VECTORS_LOW 0x00000000U
+#define ARM_VECTORS_HIGH 0xffff0000U
+
+/*
+ * ARM Instructions
+ *
+ * 3 3 2 2 2
+ * 1 0 9 8 7 0
+ * +-------+-------------------------------------------------------+
+ * | cond | instruction dependant |
+ * |c c c c| |
+ * +-------+-------------------------------------------------------+
+ */
+
+#define INSN_SIZE 4 /* Always 4 bytes */
+#define INSN_COND_MASK 0xf0000000 /* Condition mask */
+#define INSN_COND_AL 0xe0000000 /* Always condition */
+
+#endif /* !MACHINE_ARMREG_H */
diff --git a/sys/arm/include/asm.h b/sys/arm/include/asm.h
new file mode 100644
index 0000000..dc1dd7e
--- /dev/null
+++ b/sys/arm/include/asm.h
@@ -0,0 +1,147 @@
+/* $NetBSD: asm.h,v 1.5 2003/08/07 16:26:53 agc Exp $ */
+
+/*
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)asm.h 5.5 (Berkeley) 5/7/91
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ASM_H_
+#define _MACHINE_ASM_H_
+#include <sys/cdefs.h>
+
+#ifdef __ELF__
+# define _C_LABEL(x) x
+#else
+# ifdef __STDC__
+# define _C_LABEL(x) _ ## x
+# else
+# define _C_LABEL(x) _/**/x
+# endif
+#endif
+#define _ASM_LABEL(x) x
+
+#ifndef _JB_MAGIC__SETJMP
+#define _JB_MAGIC__SETJMP 0x4278f500
+#define _JB_MAGIC_SETJMP 0x4278f501
+#endif
+#if 0
+#ifdef __STDC__
+# define __CONCAT(x,y) x ## y
+# define __STRING(x) #x
+#else
+# define __CONCAT(x,y) x/**/y
+# define __STRING(x) "x"
+#endif
+#endif
+
+#define I32_bit (1 << 7) /* IRQ disable */
+#define F32_bit (1 << 6) /* FIQ disable */
+
+#define CPU_CONTROL_32BP_ENABLE 0x00000010 /* P: 32-bit exception handlers */
+#define CPU_CONTROL_32BD_ENABLE 0x00000020 /* D: 32-bit addressing */
+
+#ifndef _ALIGN_TEXT
+# define _ALIGN_TEXT .align 0
+#endif
+
+/*
+ * gas/arm uses @ as a single comment character and thus cannot be used here
+ * Instead it recognised the # instead of an @ symbols in .type directives
+ * We define a couple of macros so that assembly code will not be dependant
+ * on one or the other.
+ */
+#define _ASM_TYPE_FUNCTION #function
+#define _ASM_TYPE_OBJECT #object
+#define GLOBAL(X) .globl x
+#define _ENTRY(x) \
+ .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x:
+
+#ifdef GPROF
+# ifdef __ELF__
+# define _PROF_PROLOGUE \
+ mov ip, lr; bl __mcount
+# else
+# define _PROF_PROLOGUE \
+ mov ip,lr; bl mcount
+# endif
+#else
+# define _PROF_PROLOGUE
+#endif
+
+#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE
+#define ENTRY_NP(y) _ENTRY(_C_LABEL(y))
+#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
+#define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y))
+
+#define ASMSTR .asciz
+
+#if defined(__ELF__) && defined(PIC)
+#ifdef __STDC__
+#define PIC_SYM(x,y) x ## ( ## y ## )
+#else
+#define PIC_SYM(x,y) x/**/(/**/y/**/)
+#endif
+#else
+#define PIC_SYM(x,y) x
+#endif
+
+#undef __FBSDID
+#if !defined(lint) && !defined(STRIP_FBSDID)
+#define __FBSDID(s) .ident s
+#else
+#define __FBSDID(s) /* nothing */
+#endif
+
+
+#ifdef __ELF__
+#define WEAK_ALIAS(alias,sym) \
+ .weak alias; \
+ alias = sym
+#endif
+
+#ifdef __STDC__
+#define WARN_REFERENCES(sym,msg) \
+ .stabs msg ## ,30,0,0,0 ; \
+ .stabs __STRING(_C_LABEL(sym)) ## ,1,0,0,0
+#elif defined(__ELF__)
+#define WARN_REFERENCES(sym,msg) \
+ .stabs msg,30,0,0,0 ; \
+ .stabs __STRING(sym),1,0,0,0
+#else
+#define WARN_REFERENCES(sym,msg) \
+ .stabs msg,30,0,0,0 ; \
+ .stabs __STRING(_/**/sym),1,0,0,0
+#endif /* __STDC__ */
+
+#endif /* !_MACHINE_ASM_H_ */
diff --git a/sys/arm/include/asmacros.h b/sys/arm/include/asmacros.h
new file mode 100644
index 0000000..c12260f
--- /dev/null
+++ b/sys/arm/include/asmacros.h
@@ -0,0 +1,204 @@
+/* $NetBSD: frame.h,v 1.6 2003/10/05 19:44:58 matt Exp $ */
+
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ASMACROS_H_
+#define _MACHINE_ASMACROS_H_
+
+#ifdef _KERNEL
+
+#ifdef LOCORE
+
+/*
+ * ASM macros for pushing and pulling trapframes from the stack
+ *
+ * These macros are used to handle the irqframe and trapframe structures
+ * defined above.
+ */
+
+/*
+ * PUSHFRAME - macro to push a trap frame on the stack in the current mode
+ * Since the current mode is used, the SVC lr field is not defined.
+ *
+ * NOTE: r13 and r14 are stored separately as a work around for the
+ * SA110 rev 2 STM^ bug
+ */
+
+#define PUSHFRAME \
+ str lr, [sp, #-4]!; /* Push the return address */ \
+ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
+ stmia sp, {r0-r12}; /* Push the user mode registers */ \
+ add r0, sp, #(4*13); /* Adjust the stack pointer */ \
+ stmia r0, {r13-r14}^; /* Push the user mode registers */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ mrs r0, spsr_all; /* Put the SPSR on the stack */ \
+ str r0, [sp, #-4]!;
+
+/*
+ * PULLFRAME - macro to pull a trap frame from the stack in the current mode
+ * Since the current mode is used, the SVC lr field is ignored.
+ */
+
+#define PULLFRAME \
+ ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
+ msr spsr_all, r0; \
+ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ add sp, sp, #(4*17); /* Adjust the stack pointer */ \
+ ldr lr, [sp], #0x0004; /* Pull the return address */
+
+/*
+ * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode
+ * This should only be used if the processor is not currently in SVC32
+ * mode. The processor mode is switched to SVC mode and the trap frame is
+ * stored. The SVC lr field is used to store the previous value of
+ * lr in SVC mode.
+ *
+ * NOTE: r13 and r14 are stored separately as a work around for the
+ * SA110 rev 2 STM^ bug
+ */
+
+#define PUSHFRAMEINSVC \
+ stmdb sp, {r0-r3}; /* Save 4 registers */ \
+ mov r0, lr; /* Save xxx32 r14 */ \
+ mov r1, sp; /* Save xxx32 sp */ \
+ mrs r3, spsr; /* Save xxx32 spsr */ \
+ mrs r2, cpsr; /* Get the CPSR */ \
+ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \
+ orr r2, r2, #(PSR_SVC32_MODE); \
+ msr cpsr_c, r2; /* Punch into SVC mode */ \
+ mov r2, sp; /* Save SVC sp */ \
+ str r0, [sp, #-4]!; /* Push return address */ \
+ str lr, [sp, #-4]!; /* Push SVC lr */ \
+ str r2, [sp, #-4]!; /* Push SVC sp */ \
+ msr spsr_all, r3; /* Restore correct spsr */ \
+ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
+ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \
+ stmia sp, {r0-r12}; /* Push the user mode registers */ \
+ add r0, sp, #(4*13); /* Adjust the stack pointer */ \
+ stmia r0, {r13-r14}^; /* Push the user mode registers */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ mrs r0, spsr_all; /* Put the SPSR on the stack */ \
+ str r0, [sp, #-4]!
+
+/*
+ * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack
+ * in SVC32 mode and restore the saved processor mode and PC.
+ * This should be used when the SVC lr register needs to be restored on
+ * exit.
+ */
+
+#define PULLFRAMEFROMSVCANDEXIT \
+ ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
+ msr spsr_all, r0; /* restore SPSR */ \
+ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ add sp, sp, #(4*15); /* Adjust the stack pointer */ \
+ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
+
+#define DATA(name) \
+ .data ; \
+ _ALIGN_DATA ; \
+ .globl name ; \
+ .type name, %object ; \
+name:
+
+#define EMPTY
+
+
+#define GET_CURPCB_ENTER \
+ ldr r1, .Laflt_curpcb ;\
+ ldr r1, [r1]
+
+/*
+ * This macro must be invoked following PUSHFRAMEINSVC or PUSHFRAME at
+ * the top of interrupt/exception handlers.
+ *
+ * When invoked, r0 *must* contain the value of SPSR on the current
+ * trap/interrupt frame. This is always the case if ENABLE_ALIGNMENT_FAULTS
+ * is invoked immediately after PUSHFRAMEINSVC or PUSHFRAME.
+ */
+#define ENABLE_ALIGNMENT_FAULTS \
+ and r0, r0, #(PSR_MODE) /* Test for USR32 mode */ ;\
+ teq r0, #(PSR_USR32_MODE) ;\
+ bne 1f /* Not USR mode skip AFLT */ ;\
+ GET_CURPCB_ENTER /* r1 = curpcb */ ;\
+ cmp r1, #0x00 /* curpcb NULL? */ ;\
+ ldrne r1, [r1, #PCB_FLAGS] /* Fetch curpcb->pcb_flags */ ;\
+ tstne r1, #PCB_NOALIGNFLT ;\
+ beq 1f /* AFLTs already enabled */ ;\
+ ldr r2, .Laflt_cpufuncs ;\
+ mov lr, pc ;\
+ ldr pc, [r2, #CF_CONTROL] /* Enable alignment faults */ ;\
+1:
+
+#define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \
+ ldr r0, [sp] /* Get the SPSR from stack */ ;\
+ mrs r4, cpsr /* save CPSR */ ;\
+ and r0, r0, #(PSR_MODE) /* Returning to USR mode? */ ;\
+ teq r0, #(PSR_USR32_MODE) ;\
+ bne 2f /* Nope, get out now */ ;\
+ bic r4, r4, #(I32_bit) ;\
+1: orr r0, r4, #(I32_bit) /* Disable IRQs */ ;\
+ msr cpsr_c, r0 ;\
+ ldr r5, .Laflt_curthread ;\
+ ldr r5, [r5] ;\
+ ldr r5, [r5, #(TD_FLAGS)] ;\
+ and r5, r5, #(TDF_ASTPENDING) ;\
+ teq r5, #0x00000000 ;\
+ beq 2f /* Nope. Just bail */ ;\
+ msr cpsr_c, r4 /* Restore interrupts */ ;\
+ mov r0, sp ;\
+ adr lr, 1b ;\
+ b _C_LABEL(ast) /* ast(frame) */ ;\
+2:
+
+#define AST_ALIGNMENT_FAULT_LOCALS ;\
+.Laflt_curpcb: ;\
+ .word _C_LABEL(__pcpu) + PC_CURPCB ;\
+.Laflt_cpufuncs: ;\
+ .word _C_LABEL(cpufuncs) ;\
+.Laflt_curthread: ;\
+ .word _C_LABEL(__pcpu) + PC_CURTHREAD
+
+
+#endif /* LOCORE */
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_ASMACROS_H_ */
diff --git a/sys/arm/include/atomic.h b/sys/arm/include/atomic.h
new file mode 100644
index 0000000..f1725d2
--- /dev/null
+++ b/sys/arm/include/atomic.h
@@ -0,0 +1,197 @@
+/* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */
+
+/*
+ * Copyright (C) 2003-2004 Olivier Houchard
+ * Copyright (C) 1994-1997 Mark Brinicombe
+ * Copyright (C) 1994 Brini
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of Brini may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+
+
+#ifndef _LOCORE
+
+#include <sys/types.h>
+
+#ifndef I32_bit
+#define I32_bit (1 << 7) /* IRQ disable */
+#endif
+#ifndef F32_bit
+#define F32_bit (1 << 6) /* FIQ disable */
+#endif
+
+#define __with_interrupts_disabled(expr) \
+ do { \
+ u_int cpsr_save, tmp; \
+ \
+ __asm __volatile( \
+ "mrs %0, cpsr;" \
+ "orr %1, %0, %2;" \
+ "msr cpsr_all, %1;" \
+ : "=r" (cpsr_save), "=r" (tmp) \
+ : "I" (I32_bit) \
+ : "cc" ); \
+ (expr); \
+ __asm __volatile( \
+ "msr cpsr_all, %0" \
+ : /* no output */ \
+ : "r" (cpsr_save) \
+ : "cc" ); \
+ } while(0)
+
+static __inline void
+atomic_set_32(volatile uint32_t *address, uint32_t setmask)
+{
+ __with_interrupts_disabled( *address |= setmask);
+}
+
+static __inline void
+atomic_set_ptr(volatile void *ptr, uint32_t src)
+{
+ atomic_set_32((volatile uint32_t *)ptr, (uint32_t)src);
+}
+
+#define atomic_set_rel_int atomic_set_32
+#define atomic_set_int atomic_set_32
+#define atomic_readandclear_int atomic_readandclear_32
+static __inline void
+atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
+{
+ __with_interrupts_disabled( *address &= ~clearmask);
+}
+
+static __inline void
+atomic_clear_ptr(volatile void *ptr, uint32_t src)
+{
+ atomic_clear_32((volatile uint32_t *)ptr, (uint32_t)src);
+}
+
+static __inline int
+atomic_load_acq_int(volatile uint32_t *v)
+{
+ int bla;
+
+ __with_interrupts_disabled(bla = *v);
+ return (bla);
+}
+
+#define atomic_clear_int atomic_clear_32
+static __inline void
+atomic_store_32(volatile uint32_t *dst, uint32_t src)
+{
+ __with_interrupts_disabled(*dst = src);
+}
+
+static __inline void
+atomic_store_ptr(volatile void *dst, void *src)
+{
+ atomic_store_32((volatile uint32_t *)dst, (uint32_t) src);
+}
+
+#define atomic_store_rel_ptr atomic_store_ptr
+#define atomic_store_rel_int atomic_store_32
+
+static __inline uint32_t
+atomic_readandclear_32(volatile u_int32_t *p)
+{
+ uint32_t ret;
+
+ __with_interrupts_disabled((ret = *p) != 0 ? *p = 0 : 0);
+ return (ret);
+}
+
+static __inline u_int32_t
+atomic_cmpset_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
+{
+ int done = 0;
+ __with_interrupts_disabled(*p = (*p == cmpval ? newval + done++ : *p));
+ return (done);
+}
+
+static __inline void
+atomic_add_32(volatile u_int32_t *p, u_int32_t val)
+{
+ __with_interrupts_disabled(*p += val);
+}
+
+static __inline void
+atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
+{
+ __with_interrupts_disabled(*p -= val);
+}
+
+#define atomic_subtract_int atomic_subtract_32
+#define atomic_subtract_rel_int atomic_subtract_32
+#define atomic_subtract_acq_int atomic_subtract_32
+#define atomic_add_int atomic_add_32
+#define atomic_add_rel_int atomic_add_32
+#define atomic_add_acq_int atomic_add_32
+#define atomic_cmpset_int atomic_cmpset_32
+#define atomic_cmpset_rel_int atomic_cmpset_32
+#define atomic_cmpset_acq_int atomic_cmpset_32
+
+static __inline u_int32_t
+atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
+{
+ return (atomic_cmpset_32((volatile u_int32_t *)dst, (u_int32_t)exp,
+ (u_int32_t)src));
+}
+
+static __inline u_int32_t
+atomic_cmpset_rel_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
+{
+ return (atomic_cmpset_32(p, cmpval, newval));
+}
+
+static __inline u_int32_t
+atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src)
+{
+ return (atomic_cmpset_32((volatile u_int32_t *)dst,
+ (u_int32_t)exp, (u_int32_t)src));
+}
+
+#define atomic_cmpset_acq_ptr atomic_cmpset_ptr
+
+#if !defined(ATOMIC_SET_BIT_NOINLINE)
+
+#define atomic_set_bit(a,m) atomic_set_32(a,m)
+#define atomic_clear_bit(a,m) atomic_clear_32(a,m)
+
+#endif
+
+#undef __with_interrupts_disabled
+
+#endif /* _LOCORE */
+#endif /* _MACHINE_ATOMIC_H_ */
diff --git a/sys/arm/include/blockio.h b/sys/arm/include/blockio.h
new file mode 100644
index 0000000..05c35ae
--- /dev/null
+++ b/sys/arm/include/blockio.h
@@ -0,0 +1,56 @@
+/* $NetBSD: blockio.h,v 1.2 2001/06/02 10:44:56 bjh21 Exp $ */
+
+/*-
+ * Copyright (c) 2001 Ben Harris
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+/*
+ * blockio.h - low level functions for bulk PIO data transfer
+ */
+
+#ifndef _MACHINE_BLOCKIO_H_
+#define _MACHINE_BLOCKIO_H_
+
+/*
+ * All these take three arguments:
+ * I/O address
+ * Memory address
+ * Number of bytes to copy
+ */
+
+void read_multi_1(u_int, void *, u_int);
+void write_multi_1(u_int, const void *, u_int);
+#define read_multi_2 insw16
+#define write_multi_2 outsw16
+
+void insw(u_int, void *, u_int);
+void outsw(u_int, void *, u_int);
+void insw16(u_int, void *, u_int);
+void outsw16(u_int, void *, u_int);
+
+#endif /* !_MACHINE_BLOCKIO_H_ */
diff --git a/sys/arm/include/bootconfig.h b/sys/arm/include/bootconfig.h
new file mode 100644
index 0000000..4745ee2
--- /dev/null
+++ b/sys/arm/include/bootconfig.h
@@ -0,0 +1,58 @@
+/* $NetBSD: bootconfig.h,v 1.1 2001/05/13 13:46:23 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BOOTCONFIG_H_
+#define _MACHINE_BOOTCONFIG_H_
+
+#ifdef _KERNEL
+#define BOOTOPT_TYPE_BOOLEAN 0
+#define BOOTOPT_TYPE_STRING 1
+#define BOOTOPT_TYPE_INT 2
+#define BOOTOPT_TYPE_BININT 3
+#define BOOTOPT_TYPE_HEXINT 4
+#define BOOTOPT_TYPE_MASK 7
+
+int get_bootconf_option __P((char *, char *, int, void *));
+
+extern char *boot_args;
+extern char *boot_file;
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_BOOTCONFIG_H_ */
diff --git a/sys/arm/include/bus.h b/sys/arm/include/bus.h
new file mode 100644
index 0000000..798f425
--- /dev/null
+++ b/sys/arm/include/bus.h
@@ -0,0 +1,769 @@
+/* $NetBSD: bus.h,v 1.11 2003/07/28 17:35:54 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BUS_H_
+#define _MACHINE_BUS_H_
+
+/*
+ * Addresses (in bus space).
+ */
+typedef u_long bus_addr_t;
+typedef u_long bus_size_t;
+
+/*
+ * Access methods for bus space.
+ */
+typedef struct bus_space *bus_space_tag_t;
+typedef u_long bus_space_handle_t;
+
+/*
+ * int bus_space_map (bus_space_tag_t t, bus_addr_t addr,
+ * bus_size_t size, int flags, bus_space_handle_t *bshp);
+ *
+ * Map a region of bus space.
+ */
+
+#define BUS_SPACE_MAP_CACHEABLE 0x01
+#define BUS_SPACE_MAP_LINEAR 0x02
+#define BUS_SPACE_MAP_PREFETCHABLE 0x04
+
+struct bus_space {
+ /* cookie */
+ void *bs_cookie;
+
+ /* mapping/unmapping */
+ int (*bs_map) (void *, bus_addr_t, bus_size_t,
+ int, bus_space_handle_t *);
+ void (*bs_unmap) (void *, bus_size_t);
+ int (*bs_subregion) (void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *);
+
+ /* allocation/deallocation */
+ int (*bs_alloc) (void *, bus_addr_t, bus_addr_t,
+ bus_size_t, bus_size_t, bus_size_t, int,
+ bus_addr_t *, bus_space_handle_t *);
+ void (*bs_free) (void *, bus_space_handle_t,
+ bus_size_t);
+
+ /* get kernel virtual address */
+ void * (*bs_vaddr) (void *, bus_space_handle_t);
+
+ /* mmap bus space for user */
+ int (*bs_mmap) (dev_t, vm_offset_t, vm_paddr_t *, int);
+
+ /* barrier */
+ void (*bs_barrier) (void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, int);
+
+ /* read (single) */
+ u_int8_t (*bs_r_1) (void *, bus_space_handle_t, bus_size_t);
+ u_int16_t (*bs_r_2) (void *, bus_space_handle_t, bus_size_t);
+ u_int32_t (*bs_r_4) (void *, bus_space_handle_t, bus_size_t);
+ u_int64_t (*bs_r_8) (void *, bus_space_handle_t, bus_size_t);
+
+ /* read multiple */
+ void (*bs_rm_1) (void *, bus_space_handle_t, bus_size_t,
+ u_int8_t *, bus_size_t);
+ void (*bs_rm_2) (void *, bus_space_handle_t, bus_size_t,
+ u_int16_t *, bus_size_t);
+ void (*bs_rm_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rm_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* read region */
+ void (*bs_rr_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rr_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rr_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rr_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* write (single) */
+ void (*bs_w_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*bs_w_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*bs_w_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*bs_w_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
+
+ /* write multiple */
+ void (*bs_wm_1) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wm_2) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wm_4) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wm_8) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* write region */
+ void (*bs_wr_1) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wr_2) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wr_4) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wr_8) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* set multiple */
+ void (*bs_sm_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sm_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sm_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sm_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* set region */
+ void (*bs_sr_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sr_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sr_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sr_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* copy */
+ void (*bs_c_1) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_2) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_4) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_8) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+
+};
+
+
+/*
+ * Utility macros; INTERNAL USE ONLY.
+ */
+#define __bs_c(a,b) __CONCAT(a,b)
+#define __bs_opname(op,size) __bs_c(__bs_c(__bs_c(bs_,op),_),size)
+
+#define __bs_rs(sz, t, h, o) \
+ (*(t)->__bs_opname(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws(sz, t, h, o, v) \
+ (*(t)->__bs_opname(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, a, c)
+#define __bs_set(type, sz, t, h, o, v, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, v, c)
+#define __bs_copy(sz, t, h1, o1, h2, o2, cnt) \
+ (*(t)->__bs_opname(c,sz))((t)->bs_cookie, h1, o1, h2, o2, cnt)
+
+
+/*
+ * Mapping and unmapping operations.
+ */
+#define bus_space_map(t, a, s, c, hp) \
+ (*(t)->bs_map)((t)->bs_cookie, (a), (s), (c), (hp))
+#define bus_space_unmap(t, h, s) \
+ (*(t)->bs_unmap)((t)->bs_cookie, (h), (s))
+#define bus_space_subregion(t, h, o, s, hp) \
+ (*(t)->bs_subregion)((t)->bs_cookie, (h), (o), (s), (hp))
+
+
+/*
+ * Allocation and deallocation operations.
+ */
+#define bus_space_alloc(t, rs, re, s, a, b, c, ap, hp) \
+ (*(t)->bs_alloc)((t)->bs_cookie, (rs), (re), (s), (a), (b), \
+ (c), (ap), (hp))
+#define bus_space_free(t, h, s) \
+ (*(t)->bs_free)((t)->bs_cookie, (h), (s))
+
+/*
+ * Get kernel virtual address for ranges mapped BUS_SPACE_MAP_LINEAR.
+ */
+#define bus_space_vaddr(t, h) \
+ (*(t)->bs_vaddr)((t)->bs_cookie, (h))
+
+/*
+ * MMap bus space for a user application.
+ */
+#define bus_space_mmap(t, a, o, p, f) \
+ (*(t)->bs_mmap)((t)->bs_cookie, (a), (o), (p), (f))
+
+/*
+ * Bus barrier operations.
+ */
+#define bus_space_barrier(t, h, o, l, f) \
+ (*(t)->bs_barrier)((t)->bs_cookie, (h), (o), (l), (f))
+
+#define BUS_SPACE_BARRIER_READ 0x01
+#define BUS_SPACE_BARRIER_WRITE 0x02
+
+/*
+ * Bus read (single) operations.
+ */
+#define bus_space_read_1(t, h, o) __bs_rs(1,(t),(h),(o))
+#define bus_space_read_2(t, h, o) __bs_rs(2,(t),(h),(o))
+#define bus_space_read_4(t, h, o) __bs_rs(4,(t),(h),(o))
+#define bus_space_read_8(t, h, o) __bs_rs(8,(t),(h),(o))
+
+
+/*
+ * Bus read multiple operations.
+ */
+#define bus_space_read_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(rm,8,(t),(h),(o),(a),(c))
+
+
+/*
+ * Bus read region operations.
+ */
+#define bus_space_read_region_1(t, h, o, a, c) \
+ __bs_nonsingle(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_2(t, h, o, a, c) \
+ __bs_nonsingle(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_4(t, h, o, a, c) \
+ __bs_nonsingle(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_8(t, h, o, a, c) \
+ __bs_nonsingle(rr,8,(t),(h),(o),(a),(c))
+
+
+/*
+ * Bus write (single) operations.
+ */
+#define bus_space_write_1(t, h, o, v) __bs_ws(1,(t),(h),(o),(v))
+#define bus_space_write_2(t, h, o, v) __bs_ws(2,(t),(h),(o),(v))
+#define bus_space_write_4(t, h, o, v) __bs_ws(4,(t),(h),(o),(v))
+#define bus_space_write_8(t, h, o, v) __bs_ws(8,(t),(h),(o),(v))
+
+
+/*
+ * Bus write multiple operations.
+ */
+#define bus_space_write_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(wm,8,(t),(h),(o),(a),(c))
+
+
+/*
+ * Bus write region operations.
+ */
+#define bus_space_write_region_1(t, h, o, a, c) \
+ __bs_nonsingle(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_2(t, h, o, a, c) \
+ __bs_nonsingle(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_4(t, h, o, a, c) \
+ __bs_nonsingle(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_8(t, h, o, a, c) \
+ __bs_nonsingle(wr,8,(t),(h),(o),(a),(c))
+
+
+/*
+ * Set multiple operations.
+ */
+#define bus_space_set_multi_1(t, h, o, v, c) \
+ __bs_set(sm,1,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_2(t, h, o, v, c) \
+ __bs_set(sm,2,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_4(t, h, o, v, c) \
+ __bs_set(sm,4,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_8(t, h, o, v, c) \
+ __bs_set(sm,8,(t),(h),(o),(v),(c))
+
+
+/*
+ * Set region operations.
+ */
+#define bus_space_set_region_1(t, h, o, v, c) \
+ __bs_set(sr,1,(t),(h),(o),(v),(c))
+#define bus_space_set_region_2(t, h, o, v, c) \
+ __bs_set(sr,2,(t),(h),(o),(v),(c))
+#define bus_space_set_region_4(t, h, o, v, c) \
+ __bs_set(sr,4,(t),(h),(o),(v),(c))
+#define bus_space_set_region_8(t, h, o, v, c) \
+ __bs_set(sr,8,(t),(h),(o),(v),(c))
+
+
+/*
+ * Copy operations.
+ */
+#define bus_space_copy_region_1(t, h1, o1, h2, o2, c) \
+ __bs_copy(1, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_2(t, h1, o1, h2, o2, c) \
+ __bs_copy(2, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_4(t, h1, o1, h2, o2, c) \
+ __bs_copy(4, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_8(t, h1, o1, h2, o2, c) \
+ __bs_copy(8, t, h1, o1, h2, o2, c)
+
+/*
+ * Macros to provide prototypes for all the functions used in the
+ * bus_space structure
+ */
+
+#define bs_map_proto(f) \
+int __bs_c(f,_bs_map) (void *t, bus_addr_t addr, \
+ bus_size_t size, int cacheable, bus_space_handle_t *bshp);
+
+#define bs_unmap_proto(f) \
+void __bs_c(f,_bs_unmap) (void *t, bus_size_t size);
+
+#define bs_subregion_proto(f) \
+int __bs_c(f,_bs_subregion) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, bus_size_t size, \
+ bus_space_handle_t *nbshp);
+
+#define bs_alloc_proto(f) \
+int __bs_c(f,_bs_alloc) (void *t, bus_addr_t rstart, \
+ bus_addr_t rend, bus_size_t size, bus_size_t align, \
+ bus_size_t boundary, int cacheable, bus_addr_t *addrp, \
+ bus_space_handle_t *bshp);
+
+#define bs_free_proto(f) \
+void __bs_c(f,_bs_free) (void *t, bus_space_handle_t bsh, \
+ bus_size_t size);
+
+#define bs_vaddr_proto(f) \
+void * __bs_c(f,_bs_vaddr) (void *t, bus_space_handle_t bsh);
+
+#define bs_mmap_proto(f) \
+int __bs_c(f,_bs_mmap) (dev_t, vm_offset_t, vm_paddr_t *, int);
+
+#define bs_barrier_proto(f) \
+void __bs_c(f,_bs_barrier) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, bus_size_t len, int flags);
+
+#define bs_r_1_proto(f) \
+u_int8_t __bs_c(f,_bs_r_1) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset);
+
+#define bs_r_2_proto(f) \
+u_int16_t __bs_c(f,_bs_r_2) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset);
+
+#define bs_r_4_proto(f) \
+u_int32_t __bs_c(f,_bs_r_4) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset);
+
+#define bs_r_8_proto(f) \
+u_int64_t __bs_c(f,_bs_r_8) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset);
+
+#define bs_w_1_proto(f) \
+void __bs_c(f,_bs_w_1) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t value);
+
+#define bs_w_2_proto(f) \
+void __bs_c(f,_bs_w_2) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t value);
+
+#define bs_w_4_proto(f) \
+void __bs_c(f,_bs_w_4) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t value);
+
+#define bs_w_8_proto(f) \
+void __bs_c(f,_bs_w_8) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t value);
+
+#define bs_rm_1_proto(f) \
+void __bs_c(f,_bs_rm_1) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t *addr, bus_size_t count);
+
+#define bs_rm_2_proto(f) \
+void __bs_c(f,_bs_rm_2) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t *addr, bus_size_t count);
+
+#define bs_rm_4_proto(f) \
+void __bs_c(f,_bs_rm_4) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t *addr, bus_size_t count);
+
+#define bs_rm_8_proto(f) \
+void __bs_c(f,_bs_rm_8) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t *addr, bus_size_t count);
+
+#define bs_wm_1_proto(f) \
+void __bs_c(f,_bs_wm_1) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int8_t *addr, bus_size_t count);
+
+#define bs_wm_2_proto(f) \
+void __bs_c(f,_bs_wm_2) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int16_t *addr, bus_size_t count);
+
+#define bs_wm_4_proto(f) \
+void __bs_c(f,_bs_wm_4) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int32_t *addr, bus_size_t count);
+
+#define bs_wm_8_proto(f) \
+void __bs_c(f,_bs_wm_8) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int64_t *addr, bus_size_t count);
+
+#define bs_rr_1_proto(f) \
+void __bs_c(f, _bs_rr_1) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t *addr, bus_size_t count);
+
+#define bs_rr_2_proto(f) \
+void __bs_c(f, _bs_rr_2) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t *addr, bus_size_t count);
+
+#define bs_rr_4_proto(f) \
+void __bs_c(f, _bs_rr_4) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t *addr, bus_size_t count);
+
+#define bs_rr_8_proto(f) \
+void __bs_c(f, _bs_rr_8) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t *addr, bus_size_t count);
+
+#define bs_wr_1_proto(f) \
+void __bs_c(f, _bs_wr_1) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int8_t *addr, bus_size_t count);
+
+#define bs_wr_2_proto(f) \
+void __bs_c(f, _bs_wr_2) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int16_t *addr, bus_size_t count);
+
+#define bs_wr_4_proto(f) \
+void __bs_c(f, _bs_wr_4) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int32_t *addr, bus_size_t count);
+
+#define bs_wr_8_proto(f) \
+void __bs_c(f, _bs_wr_8) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int64_t *addr, bus_size_t count);
+
+#define bs_sm_1_proto(f) \
+void __bs_c(f,_bs_sm_1) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t value, bus_size_t count);
+
+#define bs_sm_2_proto(f) \
+void __bs_c(f,_bs_sm_2) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t value, bus_size_t count);
+
+#define bs_sm_4_proto(f) \
+void __bs_c(f,_bs_sm_4) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t value, bus_size_t count);
+
+#define bs_sm_8_proto(f) \
+void __bs_c(f,_bs_sm_8) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t value, bus_size_t count);
+
+#define bs_sr_1_proto(f) \
+void __bs_c(f,_bs_sr_1) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t value, bus_size_t count);
+
+#define bs_sr_2_proto(f) \
+void __bs_c(f,_bs_sr_2) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t value, bus_size_t count);
+
+#define bs_sr_4_proto(f) \
+void __bs_c(f,_bs_sr_4) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t value, bus_size_t count);
+
+#define bs_sr_8_proto(f) \
+void __bs_c(f,_bs_sr_8) (void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t value, bus_size_t count);
+
+#define bs_c_1_proto(f) \
+void __bs_c(f,_bs_c_1) (void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count);
+
+#define bs_c_2_proto(f) \
+void __bs_c(f,_bs_c_2) (void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count);
+
+#define bs_c_4_proto(f) \
+void __bs_c(f,_bs_c_4) (void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count);
+
+#define bs_c_8_proto(f) \
+void __bs_c(f,_bs_c_8) (void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count);
+
+#define bs_protos(f) \
+bs_map_proto(f); \
+bs_unmap_proto(f); \
+bs_subregion_proto(f); \
+bs_alloc_proto(f); \
+bs_free_proto(f); \
+bs_vaddr_proto(f); \
+bs_mmap_proto(f); \
+bs_barrier_proto(f); \
+bs_r_1_proto(f); \
+bs_r_2_proto(f); \
+bs_r_4_proto(f); \
+bs_r_8_proto(f); \
+bs_w_1_proto(f); \
+bs_w_2_proto(f); \
+bs_w_4_proto(f); \
+bs_w_8_proto(f); \
+bs_rm_1_proto(f); \
+bs_rm_2_proto(f); \
+bs_rm_4_proto(f); \
+bs_rm_8_proto(f); \
+bs_wm_1_proto(f); \
+bs_wm_2_proto(f); \
+bs_wm_4_proto(f); \
+bs_wm_8_proto(f); \
+bs_rr_1_proto(f); \
+bs_rr_2_proto(f); \
+bs_rr_4_proto(f); \
+bs_rr_8_proto(f); \
+bs_wr_1_proto(f); \
+bs_wr_2_proto(f); \
+bs_wr_4_proto(f); \
+bs_wr_8_proto(f); \
+bs_sm_1_proto(f); \
+bs_sm_2_proto(f); \
+bs_sm_4_proto(f); \
+bs_sm_8_proto(f); \
+bs_sr_1_proto(f); \
+bs_sr_2_proto(f); \
+bs_sr_4_proto(f); \
+bs_sr_8_proto(f); \
+bs_c_1_proto(f); \
+bs_c_2_proto(f); \
+bs_c_4_proto(f); \
+bs_c_8_proto(f);
+
+#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
+
+/* Bus Space DMA macros */
+
+/*
+ * Flags used in various bus DMA methods.
+ */
+#define BUS_DMA_WAITOK 0x000 /* safe to sleep (pseudo-flag) */
+#define BUS_DMA_NOWAIT 0x001 /* not safe to sleep */
+#define BUS_DMA_ALLOCNOW 0x002 /* perform resource allocation now */
+#define BUS_DMA_COHERENT 0x004 /* hint: map memory DMA coherent */
+#define BUS_DMA_ZERO 0x008 /* hint: sequential, unidirectional */
+#define BUS_DMA_BUS1 0x010 /* placeholders for bus functions... */
+#define BUS_DMA_BUS2 0x020
+#define BUS_DMA_BUS3 0x040
+#define BUS_DMA_BUS4 0x080
+
+/*
+ * Private flags stored in the DMA map.
+ */
+#define ARM32_DMAMAP_COHERENT 0x10000 /* no cache flush necessary on sync */
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct uio;
+
+/*
+ * Operations performed by bus_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
+#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
+#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
+#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
+
+typedef struct bus_dma_tag *bus_dma_tag_t;
+typedef struct bus_dmamap *bus_dmamap_t;
+
+#define BUS_DMA_TAG_VALID(t) ((t) != (bus_dma_tag_t)0)
+
+/*
+ * bus_dma_segment_t
+ *
+ * Describes a single contiguous DMA transaction. Values
+ * are suitable for programming into DMA registers.
+ */
+struct bus_dma_segment {
+ /*
+ * PUBLIC MEMBERS: these are used by machine-independent code.
+ */
+ bus_addr_t ds_addr; /* DMA address */
+ bus_size_t ds_len; /* length of transfer */
+};
+typedef struct bus_dma_segment bus_dma_segment_t;
+
+/*
+ * arm32_dma_range
+ *
+ * This structure describes a valid DMA range.
+ */
+struct arm32_dma_range {
+ bus_addr_t dr_sysbase; /* system base address */
+ bus_addr_t dr_busbase; /* appears here on bus */
+ bus_size_t dr_len; /* length of range */
+};
+
+/*
+ * bus_dma_tag_t
+ *
+ * A machine-dependent opaque type describing the implementation of
+ * DMA for a given bus.
+ */
+
+typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
+typedef int bus_dmasync_op_t;
+typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
+
+
+#ifdef _ARM32_BUS_DMA_PRIVATE
+
+/* _dm_buftype */
+#define ARM32_BUFTYPE_INVALID 0
+#define ARM32_BUFTYPE_LINEAR 1
+#define ARM32_BUFTYPE_MBUF 2
+#define ARM32_BUFTYPE_UIO 3
+#define ARM32_BUFTYPE_RAW 4
+
+struct arm32_dma_range *bus_dma_get_range(void);
+#endif /* _ARM32_BUS_DMA_PRIVATE */
+
+/*
+ * A function that returns 1 if the address cannot be accessed by
+ * a device and 0 if it can be.
+ */
+typedef int bus_dma_filter_t(void *, bus_addr_t);
+
+/*
+ * A function that performs driver-specific syncronization on behalf of
+ * busdma.
+ */
+typedef enum {
+ BUS_DMA_LOCK = 0x01,
+ BUS_DMA_UNLOCK = 0x02,
+} bus_dma_lock_op_t;
+
+typedef void bus_dma_lock_t(void *, bus_dma_lock_op_t);
+
+/*
+ * Allocate a device specific dma_tag encapsulating the constraints of
+ * the parent tag in addition to other restrictions specified:
+ *
+ * alignment: alignment for segments.
+ * boundary: Boundary that segments cannot cross.
+ * lowaddr: Low restricted address that cannot appear in a mapping.
+ * highaddr: High restricted address that cannot appear in a mapping.
+ * filtfunc: An optional function to further test if an address
+ * within the range of lowaddr and highaddr cannot appear
+ * in a mapping.
+ * filtfuncarg: An argument that will be passed to filtfunc in addition
+ * to the address to test.
+ * maxsize: Maximum mapping size supported by this tag.
+ * nsegments: Number of discontinuities allowed in maps.
+ * maxsegsz: Maximum size of a segment in the map.
+ * flags: Bus DMA flags.
+ * dmat: A pointer to set to a valid dma tag should the return
+ * value of this function indicate success.
+ */
+int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_size_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filtfunc,
+ void *filtfuncarg, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat);
+
+int bus_dma_tag_destroy(bus_dma_tag_t dmat);
+
+int bus_dmamap_create (bus_dma_tag_t, int, bus_dmamap_t *);
+int bus_dmamap_destroy (bus_dma_tag_t, bus_dmamap_t);
+int bus_dmamap_load (bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, bus_dmamap_callback_t *, void *, int);
+int bus_dmamap_load_mbuf (bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, bus_dmamap_callback2_t *, void *, int);
+int bus_dmamap_load_uio (bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, bus_dmamap_callback2_t *, void *, int);
+void bus_dmamap_unload (bus_dma_tag_t, bus_dmamap_t);
+void bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
+
+int bus_dmamem_alloc (bus_dma_tag_t tag, void **vaddr, int flag,
+ bus_dmamap_t *mapp);
+void bus_dmamem_free (bus_dma_tag_t tag, void *vaddr, bus_dmamap_t map);
+
+/*
+ * Generic helper function for manipulating mutexes.
+ */
+void busdma_lock_mutex(void *arg, bus_dma_lock_op_t op);
+
+#endif /* _MACHINE_BUS_H_ */
diff --git a/sys/arm/include/clock.h b/sys/arm/include/clock.h
new file mode 100644
index 0000000..61922f4
--- /dev/null
+++ b/sys/arm/include/clock.h
@@ -0,0 +1,32 @@
+/*-
+ * Copyright (c) 2004 Olivier Houchard
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CLOCK_H_
+#define _MACHINE_CLOCK_H_
+
+#endif /* !_MACHINE_CLOCK_H_ */
diff --git a/sys/arm/include/cpu.h b/sys/arm/include/cpu.h
new file mode 100644
index 0000000..2130618
--- /dev/null
+++ b/sys/arm/include/cpu.h
@@ -0,0 +1,49 @@
+/* $NetBSD: cpu.h,v 1.2 2001/02/23 21:23:52 reinoud Exp $ */
+/* $FreeBSD$ */
+
+#ifndef MACHINE_CPU_H
+#define MACHINE_CPU_H
+
+#include <machine/armreg.h>
+
+void cpu_halt(void);
+void swi_vm(void *);
+
+static __inline uint64_t
+get_cyclecount(void)
+{
+ return (0);
+}
+
+#define CPU_CONSDEV 1
+#define CPU_ADJKERNTZ 2 /* int: timezone offset (seconds) */
+#define CPU_DISRTCSET 3 /* int: disable resettodr() call */
+#define CPU_BOOTINFO 4 /* struct: bootinfo */
+#define CPU_WALLCLOCK 5 /* int: indicates wall CMOS clock */
+#define CPU_MAXID 6 /* number of valid machdep ids */
+
+
+#define CLKF_USERMODE(frame) ((frame->if_spsr & PSR_MODE) == PSR_USR32_MODE)
+
+#define TRAPF_USERMODE(frame) ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
+#define CLKF_PC(frame) (frame->if_pc)
+
+#define TRAPF_PC(tfp) ((tfp)->tf_pc)
+
+#define cpu_getstack(td) ((td)->td_frame->tf_usr_sp)
+#define cpu_setstack(td, sp) ((td)->td_frame->tf_usr_sp = (sp))
+
+#define ARM_NVEC 8
+#define ARM_VEC_ALL 0xffffffff
+
+extern vm_offset_t vector_page;
+
+void fork_trampoline(void);
+void *initarm(void *, void *);
+void arm_vector_init(vm_offset_t, int);
+void identify_arm_cpu(void);
+
+extern char btext[];
+extern char etext[];
+int badaddr_read (void *, size_t, void *);
+#endif /* !MACHINE_CPU_H */
diff --git a/sys/arm/include/cpuconf.h b/sys/arm/include/cpuconf.h
new file mode 100644
index 0000000..1b7d81b
--- /dev/null
+++ b/sys/arm/include/cpuconf.h
@@ -0,0 +1,171 @@
+/* $NetBSD: cpuconf.h,v 1.8 2003/09/06 08:55:42 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _MACHINE_CPUCONF_H_
+#define _MACHINE_CPUCONF_H_
+
+/*
+ * IF YOU CHANGE THIS FILE, MAKE SURE TO UPDATE THE DEFINITION OF
+ * "PMAP_NEEDS_PTE_SYNC" IN <arm/arm32/pmap.h> FOR THE CPU TYPE
+ * YOU ARE ADDING SUPPORT FOR.
+ */
+
+/*
+ * Step 1: Count the number of CPU types configured into the kernel.
+ */
+#if defined(_KERNEL_OPT)
+#define CPU_NTYPES (defined(CPU_ARM2) + defined(CPU_ARM250) + \
+ defined(CPU_ARM3) + \
+ defined(CPU_ARM6) + defined(CPU_ARM7) + \
+ defined(CPU_ARM7TDMI) + \
+ defined(CPU_ARM8) + defined(CPU_ARM9) + \
+ defined(CPU_ARM10) + \
+ defined(CPU_SA110) + defined(CPU_SA1100) + \
+ defined(CPU_SA1110) + \
+ defined(CPU_IXP12X0) + \
+ defined(CPU_XSCALE_80200) + \
+ defined(CPU_XSCALE_80321) + \
+ defined(CPU_XSCALE_PXA2X0) + \
+ defined(CPU_XSCALE_IXP425))
+#else
+#define CPU_NTYPES 2
+#endif /* _KERNEL_OPT */
+
+/*
+ * Step 2: Determine which ARM architecture versions are configured.
+ */
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3))
+#define ARM_ARCH_2 1
+#else
+#define ARM_ARCH_2 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM6) || defined(CPU_ARM7))
+#define ARM_ARCH_3 1
+#else
+#define ARM_ARCH_3 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
+ defined(CPU_ARM10) || defined(CPU_SA110) || defined(CPU_SA1100) || \
+ defined(CPU_SA1110) || defined(CPU_IXP12X0) || defined(CPU_XSCALE_IXP425))
+#define ARM_ARCH_4 1
+#else
+#define ARM_ARCH_4 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0))
+#define ARM_ARCH_5 1
+#else
+#define ARM_ARCH_5 0
+#endif
+
+#define ARM_NARCH (ARM_ARCH_2 + ARM_ARCH_3 + ARM_ARCH_4 + ARM_ARCH_5)
+#if ARM_NARCH == 0
+#error ARM_NARCH is 0
+#endif
+
+/*
+ * Step 3: Define which MMU classes are configured:
+ *
+ * ARM_MMU_MEMC Prehistoric, external memory controller
+ * and MMU for ARMv2 CPUs.
+ *
+ * ARM_MMU_GENERIC Generic ARM MMU, compatible with ARM6.
+ *
+ * ARM_MMU_SA1 StrongARM SA-1 MMU. Compatible with generic
+ * ARM MMU, but has no write-through cache mode.
+ *
+ * ARM_MMU_XSCALE XScale MMU. Compatible with generic ARM
+ * MMU, but also has several extensions which
+ * require different PTE layout to use.
+ */
+#if (defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3))
+#define ARM_MMU_MEMC 1
+#else
+#define ARM_MMU_MEMC 0
+#endif
+
+#if (defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
+ defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM10))
+#define ARM_MMU_GENERIC 1
+#else
+#define ARM_MMU_GENERIC 0
+#endif
+
+#if (defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||\
+ defined(CPU_IXP12X0))
+#define ARM_MMU_SA1 1
+#else
+#define ARM_MMU_SA1 0
+#endif
+
+#if(defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425))
+#define ARM_MMU_XSCALE 1
+#else
+#define ARM_MMU_XSCALE 0
+#endif
+
+#define ARM_NMMUS (ARM_MMU_MEMC + ARM_MMU_GENERIC + \
+ ARM_MMU_SA1 + ARM_MMU_XSCALE)
+#if ARM_NMMUS == 0
+#error ARM_NMMUS is 0
+#endif
+
+/*
+ * Step 4: Define features that may be present on a subset of CPUs
+ *
+ * ARM_XSCALE_PMU Performance Monitoring Unit on 80200 and 80321
+ */
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321))
+#define ARM_XSCALE_PMU 1
+#else
+#define ARM_XSCALE_PMU 0
+#endif
+
+#endif /* _MACHINE_CPUCONF_H_ */
diff --git a/sys/arm/include/cpufunc.h b/sys/arm/include/cpufunc.h
new file mode 100644
index 0000000..8388046
--- /dev/null
+++ b/sys/arm/include/cpufunc.h
@@ -0,0 +1,530 @@
+/* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpufunc.h
+ *
+ * Prototypes for cpu, mmu and tlb related functions.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUFUNC_H_
+#define _MACHINE_CPUFUNC_H_
+
+#ifdef _KERNEL
+
+#include <sys/types.h>
+#include <machine/cpuconf.h>
+
+void disable_intr(void);
+void enable_intr(void);
+
+static __inline register_t
+intr_disable(void)
+{
+ int s = 0, tmp;
+
+ __asm __volatile("mrs %0, cpsr; \
+ orr %1, %0, %2;\
+ msr cpsr_all, %1;"
+ : "=r" (s), "=r" (tmp)
+ : "I" (I32_bit)
+ : "cc");
+ return (s);
+}
+
+static __inline void
+intr_restore(int s)
+{
+ __asm __volatile("msr cpsr_all, %0 "
+ : /* no output */
+ : "r" (s)
+ : "cc");
+}
+struct cpu_functions {
+
+ /* CPU functions */
+
+ u_int (*cf_id) (void);
+ void (*cf_cpwait) (void);
+
+ /* MMU functions */
+
+ u_int (*cf_control) (u_int bic, u_int eor);
+ void (*cf_domains) (u_int domains);
+ void (*cf_setttb) (u_int ttb);
+ u_int (*cf_faultstatus) (void);
+ u_int (*cf_faultaddress) (void);
+
+ /* TLB functions */
+
+ void (*cf_tlb_flushID) (void);
+ void (*cf_tlb_flushID_SE) (u_int va);
+ void (*cf_tlb_flushI) (void);
+ void (*cf_tlb_flushI_SE) (u_int va);
+ void (*cf_tlb_flushD) (void);
+ void (*cf_tlb_flushD_SE) (u_int va);
+
+ /*
+ * Cache operations:
+ *
+ * We define the following primitives:
+ *
+ * icache_sync_all Synchronize I-cache
+ * icache_sync_range Synchronize I-cache range
+ *
+ * dcache_wbinv_all Write-back and Invalidate D-cache
+ * dcache_wbinv_range Write-back and Invalidate D-cache range
+ * dcache_inv_range Invalidate D-cache range
+ * dcache_wb_range Write-back D-cache range
+ *
+ * idcache_wbinv_all Write-back and Invalidate D-cache,
+ * Invalidate I-cache
+ * idcache_wbinv_range Write-back and Invalidate D-cache,
+ * Invalidate I-cache range
+ *
+ * Note that the ARM term for "write-back" is "clean". We use
+ * the term "write-back" since it's a more common way to describe
+ * the operation.
+ *
+ * There are some rules that must be followed:
+ *
+ * I-cache Synch (all or range):
+ * The goal is to synchronize the instruction stream,
+ * so you may beed to write-back dirty D-cache blocks
+ * first. If a range is requested, and you can't
+ * synchronize just a range, you have to hit the whole
+ * thing.
+ *
+ * D-cache Write-Back and Invalidate range:
+ * If you can't WB-Inv a range, you must WB-Inv the
+ * entire D-cache.
+ *
+ * D-cache Invalidate:
+ * If you can't Inv the D-cache, you must Write-Back
+ * and Invalidate. Code that uses this operation
+ * MUST NOT assume that the D-cache will not be written
+ * back to memory.
+ *
+ * D-cache Write-Back:
+ * If you can't Write-back without doing an Inv,
+ * that's fine. Then treat this as a WB-Inv.
+ * Skipping the invalidate is merely an optimization.
+ *
+ * All operations:
+ * Valid virtual addresses must be passed to each
+ * cache operation.
+ */
+ void (*cf_icache_sync_all) (void);
+ void (*cf_icache_sync_range) (vm_offset_t, vm_size_t);
+
+ void (*cf_dcache_wbinv_all) (void);
+ void (*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t);
+ void (*cf_dcache_inv_range) (vm_offset_t, vm_size_t);
+ void (*cf_dcache_wb_range) (vm_offset_t, vm_size_t);
+
+ void (*cf_idcache_wbinv_all) (void);
+ void (*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t);
+
+ /* Other functions */
+
+ void (*cf_flush_prefetchbuf) (void);
+ void (*cf_drain_writebuf) (void);
+ void (*cf_flush_brnchtgt_C) (void);
+ void (*cf_flush_brnchtgt_E) (u_int va);
+
+ void (*cf_sleep) (int mode);
+
+ /* Soft functions */
+
+ int (*cf_dataabt_fixup) (void *arg);
+ int (*cf_prefetchabt_fixup) (void *arg);
+
+ void (*cf_context_switch) (void);
+
+ void (*cf_setup) (char *string);
+};
+
+extern struct cpu_functions cpufuncs;
+extern u_int cputype;
+
+#define cpu_id() cpufuncs.cf_id()
+#define cpu_cpwait() cpufuncs.cf_cpwait()
+
+#define cpu_control(c, e) cpufuncs.cf_control(c, e)
+#define cpu_domains(d) cpufuncs.cf_domains(d)
+#define cpu_setttb(t) cpufuncs.cf_setttb(t)
+#define cpu_faultstatus() cpufuncs.cf_faultstatus()
+#define cpu_faultaddress() cpufuncs.cf_faultaddress()
+
+#define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
+#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
+#define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI()
+#define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e)
+#define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
+#define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
+
+#define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all()
+#define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
+
+#define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all()
+#define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
+#define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
+#define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
+
+#define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
+#define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
+
+#define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf()
+#define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf()
+#define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C()
+#define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e)
+
+#define cpu_sleep(m) cpufuncs.cf_sleep(m)
+
+#define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a)
+#define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a)
+#define ABORT_FIXUP_OK 0 /* fixup succeeded */
+#define ABORT_FIXUP_FAILED 1 /* fixup failed */
+#define ABORT_FIXUP_RETURN 2 /* abort handler should return */
+
+#define cpu_setup(a) cpufuncs.cf_setup(a)
+
+int set_cpufuncs (void);
+#define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */
+#define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */
+
+void cpufunc_nullop (void);
+int cpufunc_null_fixup (void *);
+int early_abort_fixup (void *);
+int late_abort_fixup (void *);
+u_int cpufunc_id (void);
+u_int cpufunc_control (u_int clear, u_int bic);
+void cpufunc_domains (u_int domains);
+u_int cpufunc_faultstatus (void);
+u_int cpufunc_faultaddress (void);
+
+#ifdef CPU_ARM3
+u_int arm3_control (u_int clear, u_int bic);
+void arm3_cache_flush (void);
+#endif /* CPU_ARM3 */
+
+#if defined(CPU_ARM6) || defined(CPU_ARM7)
+void arm67_setttb (u_int ttb);
+void arm67_tlb_flush (void);
+void arm67_tlb_purge (u_int va);
+void arm67_cache_flush (void);
+void arm67_context_switch (void);
+#endif /* CPU_ARM6 || CPU_ARM7 */
+
+#ifdef CPU_ARM6
+void arm6_setup (char *string);
+#endif /* CPU_ARM6 */
+
+#ifdef CPU_ARM7
+void arm7_setup (char *string);
+#endif /* CPU_ARM7 */
+
+#ifdef CPU_ARM7TDMI
+int arm7_dataabt_fixup (void *arg);
+void arm7tdmi_setup (char *string);
+void arm7tdmi_setttb (u_int ttb);
+void arm7tdmi_tlb_flushID (void);
+void arm7tdmi_tlb_flushID_SE (u_int va);
+void arm7tdmi_cache_flushID (void);
+void arm7tdmi_context_switch (void);
+#endif /* CPU_ARM7TDMI */
+
+#ifdef CPU_ARM8
+void arm8_setttb (u_int ttb);
+void arm8_tlb_flushID (void);
+void arm8_tlb_flushID_SE (u_int va);
+void arm8_cache_flushID (void);
+void arm8_cache_flushID_E (u_int entry);
+void arm8_cache_cleanID (void);
+void arm8_cache_cleanID_E (u_int entry);
+void arm8_cache_purgeID (void);
+void arm8_cache_purgeID_E (u_int entry);
+
+void arm8_cache_syncI (void);
+void arm8_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
+void arm8_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
+void arm8_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
+void arm8_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
+void arm8_cache_syncI_rng (vm_offset_t start, vm_size_t end);
+
+void arm8_context_switch (void);
+
+void arm8_setup (char *string);
+
+u_int arm8_clock_config (u_int, u_int);
+#endif
+
+#ifdef CPU_SA110
+void sa110_setup (char *string);
+void sa110_context_switch (void);
+#endif /* CPU_SA110 */
+
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+void sa11x0_drain_readbuf (void);
+
+void sa11x0_context_switch (void);
+void sa11x0_cpu_sleep (int mode);
+
+void sa11x0_setup (char *string);
+#endif
+
+#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
+void sa1_setttb (u_int ttb);
+
+void sa1_tlb_flushID_SE (u_int va);
+
+void sa1_cache_flushID (void);
+void sa1_cache_flushI (void);
+void sa1_cache_flushD (void);
+void sa1_cache_flushD_SE (u_int entry);
+
+void sa1_cache_cleanID (void);
+void sa1_cache_cleanD (void);
+void sa1_cache_cleanD_E (u_int entry);
+
+void sa1_cache_purgeID (void);
+void sa1_cache_purgeID_E (u_int entry);
+void sa1_cache_purgeD (void);
+void sa1_cache_purgeD_E (u_int entry);
+
+void sa1_cache_syncI (void);
+void sa1_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
+void sa1_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
+void sa1_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
+void sa1_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
+void sa1_cache_syncI_rng (vm_offset_t start, vm_size_t end);
+
+#endif
+
+#ifdef CPU_ARM9
+void arm9_setttb (u_int);
+
+void arm9_tlb_flushID_SE (u_int va);
+
+void arm9_cache_flushID (void);
+void arm9_cache_flushID_SE (u_int);
+void arm9_cache_flushI (void);
+void arm9_cache_flushI_SE (u_int);
+void arm9_cache_flushD (void);
+void arm9_cache_flushD_SE (u_int);
+
+void arm9_cache_cleanID (void);
+
+void arm9_cache_syncI (void);
+void arm9_cache_flushID_rng (vm_offset_t, vm_size_t);
+void arm9_cache_flushD_rng (vm_offset_t, vm_size_t);
+void arm9_cache_syncI_rng (vm_offset_t, vm_size_t);
+
+void arm9_context_switch (void);
+
+void arm9_setup (char *string);
+#endif
+
+#ifdef CPU_ARM10
+void arm10_setttb (u_int);
+
+void arm10_tlb_flushID_SE (u_int);
+void arm10_tlb_flushI_SE (u_int);
+
+void arm10_icache_sync_all (void);
+void arm10_icache_sync_range (vm_offset_t, vm_size_t);
+
+void arm10_dcache_wbinv_all (void);
+void arm10_dcache_wbinv_range (vm_offset_t, vm_size_t);
+void arm10_dcache_inv_range (vm_offset_t, vm_size_t);
+void arm10_dcache_wb_range (vm_offset_t, vm_size_t);
+
+void arm10_idcache_wbinv_all (void);
+void arm10_idcache_wbinv_range (vm_offset_t, vm_size_t);
+
+void arm10_context_switch (void);
+
+void arm10_setup (char *string);
+
+extern unsigned arm10_dcache_sets_max;
+extern unsigned arm10_dcache_sets_inc;
+extern unsigned arm10_dcache_index_max;
+extern unsigned arm10_dcache_index_inc;
+#endif
+
+#if defined(CPU_ARM9) || defined(CPU_ARM10) || defined(CPU_SA110) || \
+ defined(CPU_SA1100) || defined(CPU_SA1110) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+
+void armv4_tlb_flushID (void);
+void armv4_tlb_flushI (void);
+void armv4_tlb_flushD (void);
+void armv4_tlb_flushD_SE (u_int va);
+
+void armv4_drain_writebuf (void);
+#endif
+
+#if defined(CPU_IXP12X0)
+void ixp12x0_drain_readbuf (void);
+void ixp12x0_context_switch (void);
+void ixp12x0_setup (char *string);
+#endif
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+void xscale_cpwait (void);
+
+void xscale_cpu_sleep (int mode);
+
+u_int xscale_control (u_int clear, u_int bic);
+
+void xscale_setttb (u_int ttb);
+
+void xscale_tlb_flushID_SE (u_int va);
+
+void xscale_cache_flushID (void);
+void xscale_cache_flushI (void);
+void xscale_cache_flushD (void);
+void xscale_cache_flushD_SE (u_int entry);
+
+void xscale_cache_cleanID (void);
+void xscale_cache_cleanD (void);
+void xscale_cache_cleanD_E (u_int entry);
+
+void xscale_cache_clean_minidata (void);
+
+void xscale_cache_purgeID (void);
+void xscale_cache_purgeID_E (u_int entry);
+void xscale_cache_purgeD (void);
+void xscale_cache_purgeD_E (u_int entry);
+
+void xscale_cache_syncI (void);
+void xscale_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_syncI_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_flushD_rng (vm_offset_t start, vm_size_t end);
+
+void xscale_context_switch (void);
+
+void xscale_setup (char *string);
+#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
+
+#define tlb_flush cpu_tlb_flushID
+#define setttb cpu_setttb
+#define drain_writebuf cpu_drain_writebuf
+
+/*
+ * Macros for manipulating CPU interrupts
+ */
+static __inline u_int32_t __set_cpsr_c(u_int bic, u_int eor) __attribute__((__unused__));
+
+static __inline u_int32_t
+__set_cpsr_c(u_int bic, u_int eor)
+{
+ u_int32_t tmp, ret;
+
+ __asm __volatile(
+ "mrs %0, cpsr\n" /* Get the CPSR */
+ "bic %1, %0, %2\n" /* Clear bits */
+ "eor %1, %1, %3\n" /* XOR bits */
+ "msr cpsr_c, %1\n" /* Set the control field of CPSR */
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (bic), "r" (eor));
+
+ return ret;
+}
+
+#define disable_interrupts(mask) \
+ (__set_cpsr_c((mask) & (I32_bit | F32_bit), \
+ (mask) & (I32_bit | F32_bit)))
+
+#define enable_interrupts(mask) \
+ (__set_cpsr_c((mask) & (I32_bit | F32_bit), 0))
+
+#define restore_interrupts(old_cpsr) \
+ (__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
+
+/* Functions to manipulate the CPSR. */
+u_int SetCPSR(u_int bic, u_int eor);
+u_int GetCPSR(void);
+
+/*
+ * Functions to manipulate cpu r13
+ * (in arm/arm32/setstack.S)
+ */
+
+void set_stackptr __P((u_int mode, u_int address));
+u_int get_stackptr __P((u_int mode));
+
+/*
+ * Miscellany
+ */
+
+int get_pc_str_offset __P((void));
+
+/*
+ * CPU functions from locore.S
+ */
+
+void cpu_reset __P((void)) __attribute__((__noreturn__));
+
+/*
+ * Cache info variables.
+ */
+
+/* PRIMARY CACHE VARIABLES */
+extern int arm_picache_size;
+extern int arm_picache_line_size;
+extern int arm_picache_ways;
+
+extern int arm_pdcache_size; /* and unified */
+extern int arm_pdcache_line_size;
+extern int arm_pdcache_ways;
+
+extern int arm_pcache_type;
+extern int arm_pcache_unified;
+
+extern int arm_dcache_align;
+extern int arm_dcache_align_mask;
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_CPUFUNC_H_ */
+
+/* End of cpufunc.h */
diff --git a/sys/arm/include/critical.h b/sys/arm/include/critical.h
new file mode 100644
index 0000000..6d3d46d
--- /dev/null
+++ b/sys/arm/include/critical.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file contains prototypes and high-level inlines related to
+ * machine-level critical function support:
+ *
+ * cpu_critical_enter() - inlined
+ * cpu_critical_exit() - inlined
+ * cpu_critical_fork_exit() - prototyped
+ * related support functions residing
+ * in <arch>/<arch>/critical.c - prototyped
+ *
+ * $FreeBSD$
+ */
+
+#ifndef MACHINE_CRITICAL_H
+#define MACHINE_CRITICAL_H
+void cpu_critical_fork_exit(void);
+static __inline void
+cpu_critical_enter(void)
+{
+ curthread->td_md.md_savecrit = disable_interrupts(I32_bit | F32_bit);
+}
+
+static __inline void
+cpu_critical_exit(void)
+{
+ restore_interrupts(curthread->td_md.md_savecrit);
+}
+
+#endif
diff --git a/sys/arm/include/db_machdep.h b/sys/arm/include/db_machdep.h
new file mode 100644
index 0000000..dc3e7ca
--- /dev/null
+++ b/sys/arm/include/db_machdep.h
@@ -0,0 +1,75 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ *
+ * from: FreeBSD: src/sys/i386/include/db_machdep.h,v 1.16 1999/10/04
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_DB_MACHDEP_H_
+#define _MACHINE_DB_MACHDEP_H_
+
+#include <machine/frame.h>
+#include <machine/trap.h>
+#include <machine/armreg.h>
+
+#define BYTE_MSF (1)
+
+#define T_BREAKPOINT (1)
+typedef vm_offset_t db_addr_t;
+typedef int db_expr_t;
+
+typedef struct trapframe db_regs_t;
+extern db_regs_t ddb_regs;
+#define DDB_REGS (&ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->tf_pc)
+
+#define BKPT_INST (KERNEL_BREAKPOINT)
+#define BKPT_SIZE (INSN_SIZE)
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define BKPT_SKIP do { \
+ ddb_regs.tf_pc -= BKPT_SIZE; \
+} while (0)
+
+#define db_clear_single_step(regs)
+#define db_set_single_step(regs)
+
+#define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT)
+#define IS_WATCHPOINT_TRAP(type, code) (0)
+
+#define inst_trap_return(ins) (0)
+#define inst_return(ins) (0)
+#define inst_call(ins) (0)
+#define inst_load(ins) (0)
+#define inst_store(ins) (0)
+
+#define DB_SMALL_VALUE_MAX (0x7fffffff)
+#define DB_SMALL_VALUE_MIN (-0x40001)
+
+#define DB_ELFSIZE 64
+
+int db_validate_address(vm_offset_t);
+#endif /* !_MACHINE_DB_MACHDEP_H_ */
diff --git a/sys/arm/include/disassem.h b/sys/arm/include/disassem.h
new file mode 100644
index 0000000..05fdb89
--- /dev/null
+++ b/sys/arm/include/disassem.h
@@ -0,0 +1,54 @@
+/* $NetBSD: disassem.h,v 1.4 2001/03/04 04:15:58 matt Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Define the interface structure required by the disassembler.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_DISASSEM_H_
+#define _MACHINE_DISASSEM_H_
+typedef struct {
+ u_int (*di_readword)(u_int);
+ void (*di_printaddr)(u_int);
+ void (*di_printf)(const char *, ...) __printflike(1, 2);
+} disasm_interface_t;
+
+/* Prototypes for callable functions */
+
+vm_offset_t disasm(const disasm_interface_t *, vm_offset_t, int);
+void disassemble(u_int);
+
+#endif /* !_MACHINE_DISASSEM_H_ */
diff --git a/sys/arm/include/fiq.h b/sys/arm/include/fiq.h
new file mode 100644
index 0000000..acc1ed8
--- /dev/null
+++ b/sys/arm/include/fiq.h
@@ -0,0 +1,71 @@
+/* $NetBSD: fiq.h,v 1.1 2001/12/20 01:20:23 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _MACHINE_FIQ_H_
+#define _MACHINE_FIQ_H_
+
+#include <sys/queue.h>
+
+struct fiqregs {
+ u_int fr_r8; /* FIQ mode r8 */
+ u_int fr_r9; /* FIQ mode r9 */
+ u_int fr_r10; /* FIQ mode r10 */
+ u_int fr_r11; /* FIQ mode r11 */
+ u_int fr_r12; /* FIQ mode r12 */
+ u_int fr_r13; /* FIQ mode r13 */
+};
+
+struct fiqhandler {
+ TAILQ_ENTRY(fiqhandler) fh_list;/* link in the FIQ handler stack */
+ void *fh_func; /* FIQ handler routine */
+ size_t fh_size; /* size of FIQ handler */
+ int fh_flags; /* flags; see below */
+ struct fiqregs *fh_regs; /* pointer to regs structure */
+};
+
+#define FH_CANPUSH 0x01 /* can push this handler out of the way */
+
+int fiq_claim(struct fiqhandler *);
+void fiq_release(struct fiqhandler *);
+
+void fiq_getregs(struct fiqregs *);
+void fiq_setregs(struct fiqregs *);
+
+#endif /* _MACHINE_FIQ_H_ */
diff --git a/sys/arm/include/float.h b/sys/arm/include/float.h
new file mode 100644
index 0000000..2cbdcaf
--- /dev/null
+++ b/sys/arm/include/float.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)float.h 7.1 (Berkeley) 5/8/90
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FLOAT_H_
+#define _MACHINE_FLOAT_H_ 1
+
+#define FLT_RADIX 2 /* b */
+#define FLT_ROUNDS 1 /* FP addition rounds to nearest */
+#define FLT_EVAL_METHOD (-1) /* i387 semantics are...interesting */
+#define DECIMAL_DIG 21 /* max precision in decimal digits */
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP (-125) /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP (-1021)
+#define DBL_MIN 2.2250738585072014E-308
+#define DBL_MIN_10_EXP (-307)
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MAX_10_EXP 308
+
+
+#define LDBL_MANT_DIG 64
+#define LDBL_EPSILON 1.0842021724855044340E-19L
+#define LDBL_DIG 18
+#define LDBL_MIN_EXP (-16381)
+#define LDBL_MIN 3.3621031431120935063E-4932L
+#define LDBL_MIN_10_EXP (-4931)
+#define LDBL_MAX_EXP 16384
+#define LDBL_MAX 1.1897314953572317650E+4932L
+#define LDBL_MAX_10_EXP 4932
+#endif /* _MACHINE_FLOAT_H_ */
diff --git a/sys/arm/include/floatingpoint.h b/sys/arm/include/floatingpoint.h
new file mode 100644
index 0000000..fd328a9
--- /dev/null
+++ b/sys/arm/include/floatingpoint.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 1993 Andrew Moore, Talke Studio
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#) floatingpoint.h 1.0 (Berkeley) 9/23/93
+ * $FreeBSD$
+ */
+
+#ifndef _FLOATINGPOINT_H_
+#define _FLOATINGPOINT_H_
+
+#include <machine/ieeefp.h>
+
+#endif /* !_FLOATINGPOINT_H_ */
diff --git a/sys/arm/include/fp.h b/sys/arm/include/fp.h
new file mode 100644
index 0000000..0626733
--- /dev/null
+++ b/sys/arm/include/fp.h
@@ -0,0 +1,88 @@
+/* $NetBSD: fp.h,v 1.1 2001/01/10 19:02:06 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * fp.h
+ *
+ * FP info
+ *
+ * Created : 10/10/95
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FP_H
+#define _MACHINE_FP_H
+
+/*
+ * An extended precision floating point number
+ */
+
+typedef struct fp_extended_precision {
+ u_int32_t fp_exponent;
+ u_int32_t fp_mantissa_hi;
+ u_int32_t fp_mantissa_lo;
+} fp_extended_precision_t;
+
+typedef struct fp_extended_precision fp_reg_t;
+
+/*
+ * Information about the FPE-SP state that is stored in the pcb
+ *
+ * This needs to move and be hidden from userland.
+ */
+
+struct fpe_sp_state {
+ unsigned int fp_flags;
+ unsigned int fp_sr;
+ unsigned int fp_cr;
+ fp_reg_t fp_registers[16];
+};
+
+/*
+ * Type for a saved FP context, if we want to translate the context to a
+ * user-readable form
+ */
+
+typedef struct {
+ u_int32_t fpsr;
+ fp_extended_precision_t regs[8];
+} fp_state_t;
+
+#endif /* _MACHINE_FP_H_ */
+
+/* End of fp.h */
diff --git a/sys/arm/include/frame.h b/sys/arm/include/frame.h
new file mode 100644
index 0000000..6250bf5
--- /dev/null
+++ b/sys/arm/include/frame.h
@@ -0,0 +1,190 @@
+/* $NetBSD: frame.h,v 1.5 2002/10/19 00:10:54 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * frame.h
+ *
+ * Stack frames structures
+ *
+ * Created : 30/09/94
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _MACHINE_FRAME_H_
+#define _MACHINE_FRAME_H_
+
+#ifndef _LOCORE
+
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+
+
+/*
+ * Trap frame. Pushed onto the kernel stack on a trap (synchronous exception).
+ */
+
+typedef struct trapframe {
+ register_t tf_spsr; /* Zero on arm26 */
+ register_t tf_r0;
+ register_t tf_r1;
+ register_t tf_r2;
+ register_t tf_r3;
+ register_t tf_r4;
+ register_t tf_r5;
+ register_t tf_r6;
+ register_t tf_r7;
+ register_t tf_r8;
+ register_t tf_r9;
+ register_t tf_r10;
+ register_t tf_r11;
+ register_t tf_r12;
+ register_t tf_usr_sp;
+ register_t tf_usr_lr;
+ register_t tf_svc_sp; /* Not used on arm26 */
+ register_t tf_svc_lr; /* Not used on arm26 */
+ register_t tf_pc;
+} trapframe_t;
+
+/* Register numbers */
+#define tf_r13 tf_usr_sp
+#define tf_r14 tf_usr_lr
+#define tf_r15 tf_pc
+/*
+ * * Scheduler activations upcall frame. Pushed onto user stack before
+ * * calling an SA upcall.
+ * */
+
+struct saframe {
+#if 0 /* in registers on entry to upcall */
+ int sa_type;
+ struct sa_t ** sa_sas;
+ int sa_events;
+ int sa_interrupted;
+#endif
+ void * sa_arg;
+};
+
+/*
+ * * Signal frame. Pushed onto user stack before calling sigcode.
+ * */
+
+/* the pointers are use in the trampoline code to locate the ucontext */
+struct sigframe {
+ siginfo_t sf_si; /* actual saved siginfo */
+ ucontext_t sf_uc; /* actual saved ucontext */
+};
+
+/*
+ * System stack frames.
+ */
+
+
+typedef struct irqframe {
+ unsigned int if_spsr;
+ unsigned int if_r0;
+ unsigned int if_r1;
+ unsigned int if_r2;
+ unsigned int if_r3;
+ unsigned int if_r4;
+ unsigned int if_r5;
+ unsigned int if_r6;
+ unsigned int if_r7;
+ unsigned int if_r8;
+ unsigned int if_r9;
+ unsigned int if_r10;
+ unsigned int if_r11;
+ unsigned int if_r12;
+ unsigned int if_usr_sp;
+ unsigned int if_usr_lr;
+ unsigned int if_svc_sp;
+ unsigned int if_svc_lr;
+ unsigned int if_pc;
+} irqframe_t;
+
+typedef struct clockframe {
+ unsigned int if_spsr;
+ unsigned int if_r0;
+ unsigned int if_r1;
+ unsigned int if_r2;
+ unsigned int if_r3;
+ unsigned int if_r4;
+ unsigned int if_r5;
+ unsigned int if_r6;
+ unsigned int if_r7;
+ unsigned int if_r8;
+ unsigned int if_r9;
+ unsigned int if_r10;
+ unsigned int if_r11;
+ unsigned int if_r12;
+ unsigned int if_usr_sp;
+ unsigned int if_usr_lr;
+ unsigned int if_svc_sp;
+ unsigned int if_svc_lr;
+ unsigned int if_pc;
+} clockframe_t;
+
+int kdb_trap(int, struct trapframe *);
+
+/*
+ * Switch frame
+ */
+
+struct switchframe {
+ u_int sf_r4;
+ u_int sf_r5;
+ u_int sf_r6;
+ u_int sf_r7;
+ u_int sf_pc;
+};
+
+/*
+ * Stack frame. Used during stack traces (db_trace.c)
+ */
+struct frame {
+ u_int fr_fp;
+ u_int fr_sp;
+ u_int fr_lr;
+ u_int fr_pc;
+};
+
+#endif /* !_LOCORE */
+
+#endif /* _MACHINE_FRAME_H_ */
+
+/* End of frame.h */
diff --git a/sys/arm/include/ieee.h b/sys/arm/include/ieee.h
new file mode 100644
index 0000000..568bae3
--- /dev/null
+++ b/sys/arm/include/ieee.h
@@ -0,0 +1,154 @@
+/* $NetBSD: ieee754.h,v 1.4 2003/10/27 02:30:26 simonb Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ieee.h 8.1 (Berkeley) 6/11/93
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * NOTICE: This is not a standalone file. To use it, #include it in
+ * your port's ieee.h header.
+ */
+
+#include <machine/endian.h>
+
+/*
+ * <sys/ieee754.h> defines the layout of IEEE 754 floating point types.
+ * Only single-precision and double-precision types are defined here;
+ * extended types, if available, are defined in the machine-dependent
+ * header.
+ */
+
+/*
+ * Define the number of bits in each fraction and exponent.
+ *
+ * k k+1
+ * Note that 1.0 x 2 == 0.1 x 2 and that denorms are represented
+ *
+ * (-exp_bias+1)
+ * as fractions that look like 0.fffff x 2 . This means that
+ *
+ * -126
+ * the number 0.10000 x 2 , for instance, is the same as the normalized
+ *
+ * -127 -128
+ * float 1.0 x 2 . Thus, to represent 2 , we need one leading zero
+ *
+ * -129
+ * in the fraction; to represent 2 , we need two, and so on. This
+ *
+ * (-exp_bias-fracbits+1)
+ * implies that the smallest denormalized number is 2
+ *
+ * for whichever format we are talking about: for single precision, for
+ *
+ * -126 -149
+ * instance, we get .00000000000000000000001 x 2 , or 1.0 x 2 , and
+ *
+ * -149 == -127 - 23 + 1.
+ */
+#define SNG_EXPBITS 8
+#define SNG_FRACBITS 23
+
+#define DBL_EXPBITS 11
+#define DBL_FRACBITS 52
+
+struct ieee_single {
+#if _BYTE_ORDER == _BIG_ENDIAN
+ u_int sng_sign:1;
+ u_int sng_exp:8;
+ u_int sng_frac:23;
+#else
+ u_int sng_frac:23;
+ u_int sng_exp:8;
+ u_int sng_sign:1;
+#endif
+};
+
+struct ieee_double {
+#if _BYTE_ORDER == _BIG_ENDIAN
+ u_int dbl_sign:1;
+ u_int dbl_exp:11;
+ u_int dbl_frach:20;
+ u_int dbl_fracl;
+#else
+ u_int dbl_fracl;
+ u_int dbl_frach:20;
+ u_int dbl_exp:11;
+ u_int dbl_sign:1;
+#endif
+};
+
+/*
+ * Floats whose exponent is in [1..INFNAN) (of whatever type) are
+ * `normal'. Floats whose exponent is INFNAN are either Inf or NaN.
+ * Floats whose exponent is zero are either zero (iff all fraction
+ * bits are zero) or subnormal values.
+ *
+ * A NaN is a `signalling NaN' if its QUIETNAN bit is clear in its
+ * high fraction; if the bit is set, it is a `quiet NaN'.
+ */
+#define SNG_EXP_INFNAN 255
+#define DBL_EXP_INFNAN 2047
+
+#if 0
+#define SNG_QUIETNAN (1 << 22)
+#define DBL_QUIETNAN (1 << 19)
+#endif
+
+/*
+ * Exponent biases.
+ */
+#define SNG_EXP_BIAS 127
+#define DBL_EXP_BIAS 1023
+
+/*
+ * Convenience data structures.
+ */
+union ieee_single_u {
+ float sngu_f;
+ struct ieee_single sngu_sng;
+};
+
+union ieee_double_u {
+ double dblu_d;
+ struct ieee_double dblu_dbl;
+};
diff --git a/sys/arm/include/ieeefp.h b/sys/arm/include/ieeefp.h
new file mode 100644
index 0000000..0d08686
--- /dev/null
+++ b/sys/arm/include/ieeefp.h
@@ -0,0 +1,51 @@
+/* $NetBSD: ieeefp.h,v 1.1 2001/01/10 19:02:06 bjh21 Exp $ */
+/* $FreeBSD$ */
+/*
+ * Based on ieeefp.h written by J.T. Conklin, Apr 28, 1995
+ * Public domain.
+ */
+
+#ifndef _MACHINE_IEEEFP_H_
+#define _MACHINE_IEEEFP_H_
+
+/* FP exception codes */
+#define FP_EXCEPT_INV 0
+#define FP_EXCEPT_DZ 1
+#define FP_EXCEPT_OFL 2
+#define FP_EXCEPT_UFL 3
+#define FP_EXCEPT_IMP 4
+
+/* Exception type (used by fpsetmask() et al.) */
+
+typedef int fp_except;
+
+/* Bit defines for fp_except */
+
+#define FP_X_INV (1 << FP_EXCEPT_INV) /* invalid operation exception */
+#define FP_X_DZ (1 << FP_EXCEPT_DZ) /* divide-by-zero exception */
+#define FP_X_OFL (1 << FP_EXCEPT_OFL) /* overflow exception */
+#define FP_X_UFL (1 << FP_EXCEPT_UFL) /* underflow exception */
+#define FP_X_IMP (1 << FP_EXCEPT_IMP) /* imprecise (loss of precision; "inexact") */
+
+/* Rounding modes */
+
+typedef enum {
+ FP_RN=0, /* round to nearest representable number */
+ FP_RP=1, /* round toward positive infinity */
+ FP_RM=2, /* round toward negative infinity */
+ FP_RZ=3 /* round to zero (truncate) */
+} fp_rnd_t;
+
+/*
+ * FP precision modes
+ */
+typedef enum {
+ FP_PS=0, /* 24 bit (single-precision) */
+ FP_PRS, /* reserved */
+ FP_PD, /* 53 bit (double-precision) */
+ FP_PE /* 64 bit (extended-precision) */
+} fp_prec_t;
+
+#define fp_except_t int
+
+#endif /* _MACHINE_IEEEFP_H_ */
diff --git a/sys/arm/include/in_cksum.h b/sys/arm/include/in_cksum.h
new file mode 100644
index 0000000..26f448a
--- /dev/null
+++ b/sys/arm/include/in_cksum.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#include <sys/cdefs.h>
+
+#ifdef _KERNEL
+u_short in_cksum(struct mbuf *m, int len);
+u_int in_cksum_hdr(const struct ip *ip);
+u_short in_addword(u_short sum, u_short b);
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif /* _KERNEL */
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/sys/arm/include/intr.h b/sys/arm/include/intr.h
new file mode 100644
index 0000000..30d5e6d
--- /dev/null
+++ b/sys/arm/include/intr.h
@@ -0,0 +1,83 @@
+/* $NetBSD: intr.h,v 1.7 2003/06/16 20:01:00 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _MACHINE_INTR_H_
+#define _MACHINE_INTR_H_
+
+/* Define the various Interrupt Priority Levels */
+
+/* Hardware Interrupt Priority Levels are not mutually exclusive. */
+
+#ifdef CPU_SA1110
+#define IPL_SOFTCLOCK 0
+#define IPL_SOFTNET 1
+#define IPL_BIO 2 /* block I/O */
+#define IPL_NET 3 /* network */
+#define IPL_SOFTSERIAL 4
+#define IPL_TTY 5 /* terminal */
+#define IPL_VM 6 /* memory allocation */
+#define IPL_AUDIO 7 /* audio */
+#define IPL_CLOCK 8 /* clock */
+#define IPL_HIGH 9 /* */
+#define IPL_SERIAL 10 /* serial */
+#define IPL_NONE 11
+
+#define NIPL 12
+
+#endif
+
+#define IST_UNUSABLE -1 /* interrupt cannot be used */
+#define IST_NONE 0 /* none (dummy) */
+#define IST_PULSE 1 /* pulsed */
+#define IST_EDGE 2 /* edge-triggered */
+#define IST_LEVEL 3 /* level-triggered */
+
+/* Software interrupt priority levels */
+
+#define SOFTIRQ_CLOCK 0
+#define SOFTIRQ_NET 1
+#define SOFTIRQ_SERIAL 2
+
+#define SOFTIRQ_BIT(x) (1 << x)
+
+#include <machine/psl.h>
+
+void set_splmasks(void);
+void arm_setup_irqhandler(const char *, void (*)(void*), void *, int, int,
+ void **);
+#endif /* _MACHINE_INTR_H */
diff --git a/sys/arm/include/katelib.h b/sys/arm/include/katelib.h
new file mode 100644
index 0000000..b449511
--- /dev/null
+++ b/sys/arm/include/katelib.h
@@ -0,0 +1,103 @@
+/* $NetBSD: katelib.h,v 1.3 2001/11/23 19:21:48 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994-1996 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * katelib.h
+ *
+ * Prototypes for machine specific functions. Most of these
+ * could be inlined.
+ *
+ * This should not really be a separate header file. Eventually I will merge
+ * this into other header files once I have decided where the declarations
+ * should go.
+ *
+ * Created : 18/09/94
+ *
+ * Based on kate/katelib/prototypes.h
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * USE OF THIS FILE IS DEPRECATED
+ */
+
+#ifndef _MACHINE_KATELIB_H_
+#define _MACHINE_KATELIB_H_
+#include <sys/types.h>
+#include <machine/cpufunc.h>
+
+#ifdef _KERNEL
+
+/* Assembly modules */
+
+/* In blockio.S */
+#include <machine/blockio.h>
+
+/* Macros for reading and writing words, shorts, bytes */
+
+#define WriteWord(a, b) \
+*((volatile unsigned int *)(a)) = (b)
+
+#define ReadWord(a) \
+(*((volatile unsigned int *)(a)))
+
+#define WriteShort(a, b) \
+*((volatile unsigned int *)(a)) = ((b) | ((b) << 16))
+
+#define ReadShort(a) \
+((*((volatile unsigned int *)(a))) & 0xffff)
+
+#define WriteByte(a, b) \
+*((volatile unsigned char *)(a)) = (b)
+
+#define ReadByte(a) \
+(*((volatile unsigned char *)(a)))
+
+/* Define in/out macros */
+
+#define inb(port) ReadByte((port))
+#define outb(port, byte) WriteByte((port), (byte))
+#define inw(port) ReadShort((port))
+#define outw(port, word) WriteShort((port), (word))
+#define inl(port) ReadWord((port))
+#define outl(port, lword) WriteWord((port), (lword))
+
+#endif
+
+#endif /* !_MACHINE_KATELIB_H_ */
+/* End of katelib.h */
diff --git a/sys/arm/include/machdep.h b/sys/arm/include/machdep.h
new file mode 100644
index 0000000..4d201cc
--- /dev/null
+++ b/sys/arm/include/machdep.h
@@ -0,0 +1,13 @@
+/* $NetBSD: machdep.h,v 1.7 2002/02/21 02:52:21 thorpej Exp $ */
+/* $FreeBSD$ */
+
+#ifndef _MACHDEP_BOOT_MACHDEP_H_
+#define _MACHDEP_BOOT_MACHDEP_H_
+
+/* misc prototypes used by the many arm machdeps */
+void halt (void);
+void data_abort_handler (trapframe_t *);
+void prefetch_abort_handler (trapframe_t *);
+void undefinedinstruction_bounce (trapframe_t *);
+
+#endif /* !_MACHINE_MACHDEP_H_ */
diff --git a/sys/arm/include/md_var.h b/sys/arm/include/md_var.h
new file mode 100644
index 0000000..da7daa7
--- /dev/null
+++ b/sys/arm/include/md_var.h
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 1995 Bruce D. Evans.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/md_var.h,v 1.40 2001/07/12
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MD_VAR_H_
+#define _MACHINE_MD_VAR_H_
+
+#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/arm/include/metadata.h b/sys/arm/include/metadata.h
new file mode 100644
index 0000000..32497a4
--- /dev/null
+++ b/sys/arm/include/metadata.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2003 Peter Wemm <peter@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_METADATA_H_
+#define _MACHINE_METADATA_H_
+
+#define MODINFOMD_SMAP 0x1001
+
+#endif /* !_MACHINE_METADATA_H_ */
diff --git a/sys/arm/include/mutex.h b/sys/arm/include/mutex.h
new file mode 100644
index 0000000..c9b2e1d
--- /dev/null
+++ b/sys/arm/include/mutex.h
@@ -0,0 +1,32 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MUTEX_H_
+#define _MACHINE_MUTEX_H_
+
+#endif /* !_MACHINE_MUTEX_H_ */
diff --git a/sys/arm/include/param.h b/sys/arm/include/param.h
index eb3ae34..ae4a5f9 100644
--- a/sys/arm/include/param.h
+++ b/sys/arm/include/param.h
@@ -55,10 +55,10 @@
#endif
#ifndef _MACHINE
-#define _MACHIN "arm32"
+#define _MACHINE "arm"
#endif
#ifndef _MACHINE_ARCH
-#define _MACHINE_ARCH "arm32"
+#define _MACHINE_ARCH "arm"
#endif
#ifndef _NO_NAMESPACE_POLLUTION
@@ -67,14 +67,12 @@
#define _MACHINE_PARAM_H_
#ifndef MACHINE
-#define MACHINE "arm32"
+#define MACHINE "arm"
#endif
#ifndef MACHINE_ARCH
-#define MACHINE_ARCH "arm32"
+#define MACHINE_ARCH "arm"
#endif
-#define MID_MACHINE MID_ARM32
-
-#include <machine/cpu.h>
+#define MID_MACHINE MID_ARM6
#ifdef SMP
#define MAXCPU 2
@@ -90,12 +88,34 @@
#define PAGE_MASK (PAGE_SIZE - 1)
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
-#define KERNBASE 0x100000 /* start of kernel virtual */
-#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT)
+#define PDR_SHIFT 20 /* log2(NBPDR) */
+#define NBPDR (1 << PDR_SHIFT)
+#define NPDEPG (1 << (32 - PDR_SHIFT))
+
+#ifndef KSTACK_PAGES
+#define KSTACK_PAGES 4
+#endif /* !KSTACK_PAGES */
+
+#ifndef UAREA_PAGES
+#define UAREA_PAGES 2
+#endif /* !UAREA_PAGES */
+
+#ifndef USPACE
+#define USPACE (UAREA_PAGES * PAGE_SIZE) /* total size of u-area */
+#endif
+
+#ifndef FPCONTEXTSIZE
+#define FPCONTEXTSIZE (0x100)
+#endif
-#define UPAGES 2 /* pages of u-area */
-#define USPACE (UPAGES * PAGE_SIZE) /* total size of u-area */
+#ifndef KSTACK_GUARD_PAGES
+#define KSTACK_GUARD_PAGES 1
+#endif /* !KSTACK_GUARD_PAGES */
+#define USPACE_SVC_STACK_TOP (USPACE)
+#define USPACE_SVC_STACK_BOTTOM (USPACE_SVC_STACK_TOP - 0x1000)
+#define USPACE_UNDEF_STACK_TOP (USPACE_SVC_STACK_BOTTOM - 0x10)
+#define USPACE_UNDEF_STACK_BOTTOM (FPCONTEXTSIZE + 10)
/*
* Mach derived conversion macros
*/
diff --git a/sys/arm/include/pcb.h b/sys/arm/include/pcb.h
new file mode 100644
index 0000000..3919d23
--- /dev/null
+++ b/sys/arm/include/pcb.h
@@ -0,0 +1,99 @@
+/* $NetBSD: pcb.h,v 1.10 2003/10/13 21:46:39 scw Exp $ */
+
+/*
+ * Copyright (c) 2001 Matt Thomas <matt@3am-software.com>.
+ * Copyright (c) 1994 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the RiscBSD team.
+ * 4. The name "RiscBSD" nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCB_H_
+#define _MACHINE_PCB_H_
+
+#include <machine/frame.h>
+#include <machine/fp.h>
+
+
+struct trapframe;
+
+struct pcb_arm32 {
+ vm_offset_t pcb32_pagedir; /* PT hooks */
+ uint32_t *pcb32_pl1vec; /* PTR to vector_base L1 entry*/
+ uint32_t pcb32_l1vec; /* Value to stuff on ctx sw */
+ u_int pcb32_dacr; /* Domain Access Control Reg */
+ void *pcb32_cstate; /* &pmap->pm_cstate */
+ /*
+ * WARNING!
+ * cpuswitch.S relies on pcb32_r8 being quad-aligned in struct pcb
+ * (due to the use of "strd" when compiled for XSCALE)
+ */
+ u_int pcb32_r8; /* used */
+ u_int pcb32_r9; /* used */
+ u_int pcb32_r10; /* used */
+ u_int pcb32_r11; /* used */
+ u_int pcb32_r12; /* used */
+ u_int pcb32_sp; /* used */
+ u_int pcb32_lr;
+ u_int pcb32_pc;
+ u_int pcb32_und_sp;
+};
+#define pcb_pagedir un_32.pcb32_pagedir
+#define pcb_pl1vec un_32.pcb32_pl1vec
+#define pcb_l1vec un_32.pcb32_l1vec
+#define pcb_dacr un_32.pcb32_dacr
+#define pcb_cstate un_32.pcb32_cstate
+
+/*
+ * WARNING!
+ * See warning for struct pcb_arm32, above, before changing struct pcb!
+ */
+struct pcb {
+ u_int pcb_flags;
+#define PCB_OWNFPU 0x00000001
+#define PCB_NOALIGNFLT 0x00000002
+ caddr_t pcb_onfault; /* On fault handler */
+ struct pcb_arm32 un_32;
+ struct fpe_sp_state pcb_fpstate; /* Floating Point state */
+};
+
+/*
+ * No additional data for core dumps.
+ */
+struct md_coredump {
+ int md_empty;
+};
+
+#ifdef _KERNEL
+extern struct pcb *curpcb;
+void savectx(struct pcb *);
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_PCB_H_ */
diff --git a/sys/arm/include/pcpu.h b/sys/arm/include/pcpu.h
new file mode 100644
index 0000000..a71eb79
--- /dev/null
+++ b/sys/arm/include/pcpu.h
@@ -0,0 +1,60 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/globaldata.h,v 1.27 2001/04/27
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCPU_H_
+#define _MACHINE_PCPU_H_
+
+#ifdef _KERNEL
+
+#include <machine/asmacros.h>
+#include <machine/frame.h>
+
+#define ALT_STACK_SIZE 128
+
+struct vmspace;
+
+/*
+ * Inside the kernel, the globally reserved register g7 is used to
+ * point at the globaldata structure.
+ */
+#define PCPU_MD_FIELDS \
+ struct pcup *pc_prvspace;
+
+struct pcb;
+struct pcpu;
+
+extern struct pcpu *pcpup;
+
+#define PCPU_GET(member) (pcpup->pc_ ## member)
+#define PCPU_PTR(member) (&pcpup->pc_ ## member)
+#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_PCPU_H_ */
diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h
new file mode 100644
index 0000000..416c7f8
--- /dev/null
+++ b/sys/arm/include/pmap.h
@@ -0,0 +1,586 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Derived from hp300 version by Mike Hibler, this version by William
+ * Jolitz uses a recursive map [a pde points to the page directory] to
+ * map the page tables using the pagetables themselves. This is done to
+ * reduce the impact on kernel virtual memory for lots of sparse address
+ * space, and to reduce the cost of memory to each process.
+ *
+ * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
+ * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
+ * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
+
+#include <machine/pte.h>
+
+/*
+ * Pte related macros
+ */
+#define PTE_NOCACHE 0
+#define PTE_CACHE 1
+
+#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDR_SHIFT)+((pti)<<PAGE_SHIFT)))
+#define PTDIPDE(ptd) ((ptd)/1024)
+#define PTDIPTE(ptd) ((ptd)%256)
+
+#ifndef NKPT
+#define NKPT 120 /* actual number of kernel page tables */
+#endif
+
+#ifndef NKPDE
+#define NKPDE 1019 /* Maximum number of kernel PDE */
+#endif
+
+#define NPDEPTD 16 /* Number of PDE in each PTD */
+
+/*
+ * The *PTDI values control the layout of virtual memory
+ */
+
+#define KPTDI (NPDEPG-NKPDE) /* ptd entry for kernel space begin */
+#define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */
+#define KPTPTDI (PTDPTDI-1) /* ptd entry for kernel PTEs */
+#define UPTPTDI (KPTPTDI-3) /* ptd entry for uspace PTEs */
+#define UMAXPTDI (UPTPTDI-1) /* ptd entry for user space end */
+#define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
+
+#ifndef LOCORE
+
+#include <sys/queue.h>
+
+#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
+#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
+
+#ifdef _KERNEL
+#define ARM_PTE_TO_PFN(pte) ((pt_entry_t)(pte) >> PAGE_SHIFT)
+#define ARM_PDE_TO_PFN(pde) ((pd_entry_t)(pde) >> 10)
+#define ARM_PHYS_TO_KSPACE(x) ((vm_offset_t) (x) | (UPTPTDI << PDR_SHIFT))
+#define ARM_KSPACE_TO_PHYS(x) ((vm_offset_t) (x) & ~(UPTPTDI << PDR_SHIFT))
+
+extern pt_entry_t PTmap[], APTmap;
+extern pd_entry_t PTD[], APTD, PTDpde, APTDpde;
+
+extern pd_entry_t IdlePTD; /* physical address of "Idle" state directory */
+
+
+
+#if 0
+static __inline vm_offset_t
+pmap_akextract(vm_offset_t va)
+{
+ vm_offset_t pa;
+ pa = *(vm_offset_t *)avtopte(va);
+ pa = (pa & PG_FRAME) | (va & PAGE_MASK);
+ return pa;
+}
+#endif
+#define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
+
+#define avtophys(va) pmap_akextract(((vm_offset_t) (va)))
+
+#endif
+
+/*
+ * Pmap sutff
+ */
+
+/*
+ * This structure is used to hold a virtual<->physical address
+ * association and is used mostly by bootstrap code
+ */
+struct pv_addr {
+ SLIST_ENTRY(pv_addr) pv_list;
+ vm_offset_t pv_va;
+ vm_paddr_t pv_pa;
+};
+
+struct pv_entry;
+
+struct md_page {
+ int pvh_attrs;
+ u_int uro_mappings;
+ u_int urw_mappings;
+ union {
+ u_short s_mappings[2]; /* Assume kernel count <= 65535 */
+ u_int i_mappings;
+ } k_u;
+#define kro_mappings k_u.s_mappings[0]
+#define krw_mappings k_u.s_mappings[1]
+#define k_mappings k_u.i_mappings
+ int pv_list_count;
+ TAILQ_HEAD(,pv_entry) pv_list;
+};
+
+#define VM_MDPAGE_INIT(pg) \
+do { \
+ TAILQ_INIT(&pg->pv_list); \
+ mtx_init(&(pg)->md_page.pvh_mtx, "MDPAGE Mutex", NULL, MTX_DEV);\
+ (pg)->mdpage.pvh_attrs = 0; \
+ (pg)->mdpage.uro_mappings = 0; \
+ (pg)->mdpage.urw_mappings = 0; \
+ (pg)->mdpage.k_mappings = 0; \
+} while (/*CONSTCOND*/0)
+
+struct l1_ttable;
+struct l2_dtable;
+
+/*
+ * Track cache/tlb occupancy using the following structure
+ */
+union pmap_cache_state {
+ struct {
+ union {
+ u_int8_t csu_cache_b[2];
+ u_int16_t csu_cache;
+ } cs_cache_u;
+
+ union {
+ u_int8_t csu_tlb_b[2];
+ u_int16_t csu_tlb;
+ } cs_tlb_u;
+ } cs_s;
+ u_int32_t cs_all;
+};
+#define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
+#define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
+#define cs_cache cs_s.cs_cache_u.csu_cache
+#define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
+#define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
+#define cs_tlb cs_s.cs_tlb_u.csu_tlb
+
+/*
+ * Assigned to cs_all to force cacheops to work for a particular pmap
+ */
+#define PMAP_CACHE_STATE_ALL 0xffffffffu
+
+/*
+ * The number of L2 descriptor tables which can be tracked by an l2_dtable.
+ * A bucket size of 16 provides for 16MB of contiguous virtual address
+ * space per l2_dtable. Most processes will, therefore, require only two or
+ * three of these to map their whole working set.
+ */
+#define L2_BUCKET_LOG2 4
+#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
+/*
+ * Given the above "L2-descriptors-per-l2_dtable" constant, the number
+ * of l2_dtable structures required to track all possible page descriptors
+ * mappable by an L1 translation table is given by the following constants:
+ */
+#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
+#define L2_SIZE (1 << L2_LOG2)
+
+struct pmap {
+ u_int8_t pm_domain;
+ struct l1_ttable *pm_l1;
+ struct l2_dtable *pm_l2[L2_SIZE];
+ pd_entry_t *pm_pdir; /* KVA of page directory */
+ TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
+ struct pv_addr pm_ptpt; /* pagetable of pagetables */
+ int pm_count; /* reference count */
+ int pm_active; /* active on cpus */
+ struct pmap_statistics pm_stats; /* pmap statictics */
+ struct vm_page *pm_ptphint; /* pmap ptp hint */
+ union pmap_cache_state pm_cstate;
+ LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
+};
+
+typedef struct pmap *pmap_t;
+
+#ifdef _KERNEL
+extern pmap_t kernel_pmap;
+#define pmap_kernel() kernel_pmap
+#endif
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
+ */
+typedef struct pv_entry {
+ pmap_t pv_pmap; /* pmap where mapping lies */
+ vm_offset_t pv_va; /* virtual address for mapping */
+ TAILQ_ENTRY(pv_entry) pv_list;
+ TAILQ_ENTRY(pv_entry) pv_plist;
+ vm_page_t pv_ptem; /* VM page for pte */
+ int pv_flags; /* flags (wired, etc...) */
+} *pv_entry_t;
+
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+#define PV_CI 0x01 /* all entries must be cache inhibited */
+#define PV_PTPAGE 0x02 /* entry maps a page table page */
+
+/*
+ * Page hooks.
+ * For speed we store the both the virtual address and the page table
+ * entry address for each page hook.
+ */
+typedef struct {
+ vm_offset_t va;
+ pt_entry_t *pte;
+} pagehook_t;
+
+
+#ifdef _KERNEL
+
+boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
+
+/*
+ * virtual address to page table entry and
+ * to physical address. Likewise for alternate address space.
+ * Note: these work recursively, thus vtopte of a pte will give
+ * the corresponding pde that in turn maps it.
+ */
+
+void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
+/* Virtual address to page table entry */
+static __inline pt_entry_t *
+vtopte(vm_offset_t va)
+{
+ pd_entry_t *pdep;
+ pt_entry_t *ptep;
+
+ if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
+ return (NULL);
+ return (ptep);
+}
+
+extern vm_offset_t avail_end;
+extern vm_offset_t clean_eva;
+extern vm_offset_t clean_sva;
+extern vm_offset_t phys_avail[];
+extern vm_offset_t virtual_avail;
+extern vm_offset_t virtual_end;
+
+void pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
+void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
+void pmap_kremove(vm_offset_t);
+void *pmap_mapdev(vm_offset_t, vm_size_t);
+void pmap_unmapdev(vm_offset_t, vm_size_t);
+vm_page_t pmap_use_pt(pmap_t, vm_offset_t);
+void pmap_debug(int);
+void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
+void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
+vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
+void
+pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
+ int cache);
+int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
+
+/*
+ * Definitions for MMU domains
+ */
+#define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */
+#define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */
+
+/*
+ * The new pmap ensures that page-tables are always mapping Write-Thru.
+ * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
+ * on every change.
+ *
+ * Unfortunately, not all CPUs have a write-through cache mode. So we
+ * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
+ * and if there is the chance for PTE syncs to be needed, we define
+ * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
+ * the code.
+ */
+extern int pmap_needs_pte_sync;
+
+/*
+ * These macros define the various bit masks in the PTE.
+ *
+ * We use these macros since we use different bits on different processor
+ * models.
+ */
+#define L1_S_PROT_U (L1_S_AP(AP_U))
+#define L1_S_PROT_W (L1_S_AP(AP_W))
+#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
+
+#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
+#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
+
+#define L2_L_PROT_U (L2_AP(AP_U))
+#define L2_L_PROT_W (L2_AP(AP_W))
+#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
+
+#define L2_L_CACHE_MASK_generic (L2_B|L2_C)
+#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
+
+#define L2_S_PROT_U_generic (L2_AP(AP_U))
+#define L2_S_PROT_W_generic (L2_AP(AP_W))
+#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
+
+#define L2_S_PROT_U_xscale (L2_AP0(AP_U))
+#define L2_S_PROT_W_xscale (L2_AP0(AP_W))
+#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
+
+#define L2_S_CACHE_MASK_generic (L2_B|L2_C)
+#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
+
+#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
+#define L1_S_PROTO_xscale (L1_TYPE_S)
+
+#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
+#define L1_C_PROTO_xscale (L1_TYPE_C)
+
+#define L2_L_PROTO (L2_TYPE_L)
+
+#define L2_S_PROTO_generic (L2_TYPE_S)
+#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
+
+/*
+ * User-visible names for the ones that vary with MMU class.
+ */
+
+#if ARM_NMMUS > 1
+/* More than one MMU class configured; use variables. */
+#define L2_S_PROT_U pte_l2_s_prot_u
+#define L2_S_PROT_W pte_l2_s_prot_w
+#define L2_S_PROT_MASK pte_l2_s_prot_mask
+
+#define L1_S_CACHE_MASK pte_l1_s_cache_mask
+#define L2_L_CACHE_MASK pte_l2_l_cache_mask
+#define L2_S_CACHE_MASK pte_l2_s_cache_mask
+
+#define L1_S_PROTO pte_l1_s_proto
+#define L1_C_PROTO pte_l1_c_proto
+#define L2_S_PROTO pte_l2_s_proto
+
+#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+#define L2_S_PROT_U L2_S_PROT_U_generic
+#define L2_S_PROT_W L2_S_PROT_W_generic
+#define L2_S_PROT_MASK L2_S_PROT_MASK_generic
+
+#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
+#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
+#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
+
+#define L1_S_PROTO L1_S_PROTO_generic
+#define L1_C_PROTO L1_C_PROTO_generic
+#define L2_S_PROTO L2_S_PROTO_generic
+
+#elif ARM_MMU_XSCALE == 1
+#define L2_S_PROT_U L2_S_PROT_U_xscale
+#define L2_S_PROT_W L2_S_PROT_W_xscale
+#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
+
+#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
+#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
+#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
+
+#define L1_S_PROTO L1_S_PROTO_xscale
+#define L1_C_PROTO L1_C_PROTO_xscale
+#define L2_S_PROTO L2_S_PROTO_xscale
+
+#endif /* ARM_NMMUS > 1 */
+
+#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
+#define PMAP_NEEDS_PTE_SYNC 1
+#define PMAP_INCLUDE_PTE_SYNC
+#elif (ARM_MMU_SA1 == 0)
+#define PMAP_NEEDS_PTE_SYNC 0
+#endif
+
+/*
+ * These macros return various bits based on kernel/user and protection.
+ * Note that the compiler will usually fold these at compile time.
+ */
+#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
+ (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
+
+#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
+ (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
+
+#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
+ (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
+
+/*
+ * Macros to test if a mapping is mappable with an L1 Section mapping
+ * or an L2 Large Page mapping.
+ */
+#define L1_S_MAPPABLE_P(va, pa, size) \
+ ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
+
+#define L2_L_MAPPABLE_P(va, pa, size) \
+ ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
+
+/*
+ * Provide a fallback in case we were not able to determine it at
+ * compile-time.
+ */
+#ifndef PMAP_NEEDS_PTE_SYNC
+#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
+#define PMAP_INCLUDE_PTE_SYNC
+#endif
+
+#define PTE_SYNC(pte) \
+do { \
+ if (PMAP_NEEDS_PTE_SYNC) \
+ cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
+} while (/*CONSTCOND*/0)
+
+#define PTE_SYNC_RANGE(pte, cnt) \
+do { \
+ if (PMAP_NEEDS_PTE_SYNC) { \
+ cpu_dcache_wb_range((vm_offset_t)(pte), \
+ (cnt) << 2); /* * sizeof(pt_entry_t) */ \
+ } \
+} while (/*CONSTCOND*/0)
+
+extern pt_entry_t pte_l1_s_cache_mode;
+extern pt_entry_t pte_l1_s_cache_mask;
+
+extern pt_entry_t pte_l2_l_cache_mode;
+extern pt_entry_t pte_l2_l_cache_mask;
+
+extern pt_entry_t pte_l2_s_cache_mode;
+extern pt_entry_t pte_l2_s_cache_mask;
+
+extern pt_entry_t pte_l1_s_cache_mode_pt;
+extern pt_entry_t pte_l2_l_cache_mode_pt;
+extern pt_entry_t pte_l2_s_cache_mode_pt;
+
+extern pt_entry_t pte_l2_s_prot_u;
+extern pt_entry_t pte_l2_s_prot_w;
+extern pt_entry_t pte_l2_s_prot_mask;
+
+extern pt_entry_t pte_l1_s_proto;
+extern pt_entry_t pte_l1_c_proto;
+extern pt_entry_t pte_l2_s_proto;
+
+extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
+extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
+
+#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
+void pmap_zero_page_generic(vm_paddr_t, int, int);
+
+void pmap_pte_init_generic(void);
+#if defined(CPU_ARM8)
+void pmap_pte_init_arm8(void);
+#endif
+#if defined(CPU_ARM9)
+void pmap_pte_init_arm9(void);
+#endif /* CPU_ARM9 */
+#if defined(CPU_ARM10)
+void pmap_pte_init_arm10(void);
+#endif /* CPU_ARM10 */
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
+
+#if /* ARM_MMU_SA1 == */1
+void pmap_pte_init_sa1(void);
+#endif /* ARM_MMU_SA1 == 1 */
+
+#if ARM_MMU_XSCALE == 1
+void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
+void pmap_zero_page_xscale(vm_paddr_t, int, int);
+
+void pmap_pte_init_xscale(void);
+
+void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
+
+#define PMAP_UAREA(va) pmap_uarea(va)
+void pmap_uarea(vm_offset_t);
+#endif /* ARM_MMU_XSCALE == 1 */
+#define PTE_KERNEL 0
+#define PTE_USER 1
+#define l1pte_valid(pde) ((pde) != 0)
+#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
+#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
+#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
+
+#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
+#define l2pte_valid(pte) ((pte) != 0)
+#define l2pte_pa(pte) ((pte) & L2_S_FRAME)
+#define l2pte_minidata(pte) (((pte) & \
+ (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
+ == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
+
+/* L1 and L2 page table macros */
+#define pmap_pde_v(pde) l1pte_valid(*(pde))
+#define pmap_pde_section(pde) l1pte_section_p(*(pde))
+#define pmap_pde_page(pde) l1pte_page_p(*(pde))
+#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
+
+#define pmap_pte_v(pte) l2pte_valid(*(pte))
+#define pmap_pte_pa(pte) l2pte_pa(*(pte))
+
+/* Size of the kernel part of the L1 page table */
+#define KERNEL_PD_SIZE \
+ (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
+#define PTE_PAGETABLE 2
+
+/*
+ * Flags that indicate attributes of pages or mappings of pages.
+ *
+ * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
+ * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
+ * pv_entry's for each page. They live in the same "namespace" so
+ * that we can clear multiple attributes at a time.
+ *
+ * Note the "non-cacheable" flag generally means the page has
+ * multiple mappings in a given address space.
+ */
+#define PVF_MOD 0x01 /* page is modified */
+#define PVF_REF 0x02 /* page is referenced */
+#define PVF_WIRED 0x04 /* mapping is wired */
+#define PVF_WRITE 0x08 /* mapping is writable */
+#define PVF_EXEC 0x10 /* mapping is executable */
+#define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
+#define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
+#define PVF_NC (PVF_UNC|PVF_KNC)
+
+void vector_page_setprot(int);
+/*
+ * Routine: pmap_kextract
+ * Function:
+ * Extract the physical page address associated
+ * kernel virtual address.
+ */
+
+vm_paddr_t pmap_kextract(vm_offset_t);
+
+void pmap_update(pmap_t);
+#endif /* _KERNEL */
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/arm/include/proc.h b/sys/arm/include/proc.h
new file mode 100644
index 0000000..5d72258
--- /dev/null
+++ b/sys/arm/include/proc.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)proc.h 7.1 (Berkeley) 5/15/91
+ * from: FreeBSD: src/sys/i386/include/proc.h,v 1.11 2001/06/29
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PROC_H_
+#define _MACHINE_PROC_H_
+
+#include <machine/utrap.h>
+
+struct md_utrap {
+ utrap_entry_t *ut_precise[UT_MAX]; /* must be first */
+ int ut_refcnt;
+};
+
+struct mdthread {
+ register_t md_savecrit;
+};
+
+struct mdproc {
+ struct md_utrap *md_utrap;
+ void *md_sigtramp;
+};
+
+#endif /* !_MACHINE_PROC_H_ */
diff --git a/sys/arm/include/profile.h b/sys/arm/include/profile.h
new file mode 100644
index 0000000..65a1a9d
--- /dev/null
+++ b/sys/arm/include/profile.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)profile.h 8.1 (Berkeley) 6/11/93
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PROFILE_H_
+#define _MACHINE_PROFILE_H_
+
+#ifdef _KERNEL
+
+/*
+ * Config generates something to tell the compiler to align functions on 16
+ * byte boundaries. A strict alignment is good for keeping the tables small.
+ */
+#define FUNCTION_ALIGNMENT 16
+
+/*
+ * The kernel uses assembler stubs instead of unportable inlines.
+ * This is mainly to save a little time when profiling is not enabled,
+ * which is the usual case for the kernel.
+ */
+#define _MCOUNT_DECL void mcount
+#define MCOUNT
+
+#ifdef GUPROF
+#define CALIB_SCALE 1000
+#define KCOUNT(p,index) ((p)->kcount[(index) \
+ / (HISTFRACTION * sizeof(HISTCOUNTER))])
+#define MCOUNT_DECL(s)
+#define MCOUNT_ENTER(s)
+#define MCOUNT_EXIT(s)
+#define PC_TO_I(p, pc) ((uintfptr_t)(pc) - (uintfptr_t)(p)->lowpc)
+#else
+#define MCOUNT_DECL(s) u_long s;
+#ifdef SMP
+extern int mcount_lock;
+#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); \
+ while (!atomic_cmpset_acq_int(&mcount_lock, 0, 1)) \
+ /* nothing */ ; }
+#define MCOUNT_EXIT(s) { atomic_store_rel_int(&mcount_lock, 0); \
+ write_eflags(s); }
+#else
+#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); }
+#define MCOUNT_EXIT(s) (write_eflags(s))
+#endif
+#endif /* GUPROF */
+
+#else /* !_KERNEL */
+
+#define FUNCTION_ALIGNMENT 4
+
+#define _MCOUNT_DECL static __inline void _mcount
+
+#define MCOUNT
+
+typedef unsigned int uintfptr_t;
+
+#endif /* _KERNEL */
+
+/*
+ * An unsigned integral type that can hold non-negative difference between
+ * function pointers.
+ */
+typedef u_int fptrdiff_t;
+
+#ifdef _KERNEL
+
+void mcount(uintfptr_t frompc, uintfptr_t selfpc);
+void kmupetext(uintfptr_t nhighpc);
+
+#ifdef GUPROF
+struct gmonparam;
+
+void nullfunc_loop_profiled(void);
+void nullfunc_profiled(void);
+void startguprof(struct gmonparam *p);
+void stopguprof(struct gmonparam *p);
+#else
+#define startguprof(p)
+#define stopguprof(p)
+#endif /* GUPROF */
+
+#else /* !_KERNEL */
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+#ifdef __GNUC__
+void mcount(void) __asm(".mcount");
+#endif
+__END_DECLS
+
+#endif /* _KERNEL */
+
+#ifdef GUPROF
+/* XXX doesn't quite work outside kernel yet. */
+extern int cputime_bias;
+
+__BEGIN_DECLS
+int cputime(void);
+void empty_loop(void);
+void mexitcount(uintfptr_t selfpc);
+void nullfunc(void);
+void nullfunc_loop(void);
+__END_DECLS
+#endif
+
+#endif /* !_MACHINE_PROFILE_H_ */
diff --git a/sys/arm/include/psl.h b/sys/arm/include/psl.h
new file mode 100644
index 0000000..7ee8e91
--- /dev/null
+++ b/sys/arm/include/psl.h
@@ -0,0 +1,83 @@
+/* $NetBSD: psl.h,v 1.6 2003/06/16 20:00:58 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * psl.h
+ *
+ * spl prototypes.
+ * Eventually this will become a set of defines.
+ *
+ * Created : 21/07/95
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PSL_H_
+#define _MACHINE_PSL_H_
+#include <machine/intr.h>
+
+/*
+ * These are the different SPL states
+ *
+ * Each state has an interrupt mask associated with it which
+ * indicate which interrupts are allowed.
+ */
+
+#define _SPL_0 0
+#define _SPL_SOFTCLOCK 1
+#define _SPL_SOFTNET 2
+#define _SPL_BIO 3
+#define _SPL_NET 4
+#define _SPL_SOFTSERIAL 5
+#define _SPL_TTY 6
+#define _SPL_VM 7
+#define _SPL_AUDIO 8
+#define _SPL_CLOCK 9
+#define _SPL_STATCLOCK 10
+#define _SPL_HIGH 11
+#define _SPL_SERIAL 12
+#define _SPL_LEVELS 13
+
+#ifdef _KERNEL
+#ifndef _LOCORE
+extern int current_spl_level;
+
+extern u_int spl_masks[_SPL_LEVELS + 1];
+extern u_int spl_smasks[_SPL_LEVELS];
+#endif /* _LOCORE */
+#endif /* _KERNEL */
+
+#endif /* _ARM_PSL_H_ */
+/* End of psl.h */
diff --git a/sys/arm/include/pte.h b/sys/arm/include/pte.h
new file mode 100644
index 0000000..cfc9c14
--- /dev/null
+++ b/sys/arm/include/pte.h
@@ -0,0 +1,335 @@
+/* $NetBSD: pte.h,v 1.1 2001/11/23 17:39:04 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the RiscBSD team.
+ * 4. The name "RiscBSD" nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PTE_H_
+#define _MACHINE_PTE_H_
+
+#define PDSHIFT 20 /* LOG2(NBPDR) */
+#define NBPD (1 << PDSHIFT) /* bytes/page dir */
+#define NPTEPD (NBPD / PAGE_SIZE)
+
+#ifndef LOCORE
+typedef uint32_t pd_entry_t; /* page directory entry */
+typedef uint32_t pt_entry_t; /* page table entry */
+#endif
+
+#define PD_MASK 0xfff00000 /* page directory address bits */
+#define PT_MASK 0x000ff000 /* page table address bits */
+
+#define PG_FRAME 0xfffff000
+
+/* The PT_SIZE definition is misleading... A page table is only 0x400
+ * bytes long. But since VM mapping can only be done to 0x1000 a single
+ * 1KB blocks cannot be steered to a va by itself. Therefore the
+ * pages tables are allocated in blocks of 4. i.e. if a 1 KB block
+ * was allocated for a PT then the other 3KB would also get mapped
+ * whenever the 1KB was mapped.
+ */
+
+#define PT_RSIZE 0x0400 /* Real page table size */
+#define PT_SIZE 0x1000
+#define PD_SIZE 0x4000
+
+/* Access permissions for L1 sections and L2 pages */
+#define AP_KR 0x00
+#define AP_KRW 0x01
+#define AP_KRWUR 0x02
+#define AP_KRWURW 0x03
+
+#define AP_W 0x01
+#define AP_U 0x02
+
+/* Physical bits in a pte */
+#define PT_B 0x04 /* Phys - Buffered (write) */
+#define PT_C 0x08 /* Phys - Cacheable */
+#define PT_U 0x10 /* Phys - Updateable */
+
+#ifndef LOCORE
+extern pt_entry_t pte_cache_mode;
+
+#define PT_CACHEABLE (pte_cache_mode)
+#endif
+
+/* Page R/M attributes (in pmseg.attrs). */
+#define PT_M 0x01 /* Virt - Modified */
+#define PT_H 0x02 /* Virt - Handled (Used) */
+/* Mapping wired/writeable/cacheable attributes (in pv_flags). */
+#define PT_W 0x04 /* Virt - Wired */
+#define PT_Wr 0x08 /* Virt / Phys Write */
+#define PT_NC 0x10 /* Cacheing disabled (multi-mapped page) */
+
+/* access permissions for L2 pages (all sub pages have the same perms) */
+#define PT_AP(x) ((x << 10) | (x << 8) | (x << 6) | (x << 4))
+
+/* shift for access permissions in a L1 section mapping */
+#define AP_SECTION_SHIFT 10
+
+/* Page table types and masks */
+#define L1_PAGE 0x01 /* L1 page table mapping */
+#define L1_SECTION 0x02 /* L1 section mapping */
+#define L1_FPAGE 0x03 /* L1 fine page mapping */
+#define L1_MASK 0x03 /* Mask for L1 entry type */
+#define L2_LPAGE 0x01 /* L2 large page (64KB) */
+#define L2_SPAGE 0x02 /* L2 small page (4KB) */
+#define L2_MASK 0x03 /* Mask for L2 entry type */
+#define L2_INVAL 0x00 /* L2 invalid type */
+
+/* PTE construction macros */
+#define L2_LPTE(p, a, f) ((p) | PT_AP(a) | L2_LPAGE | (f))
+#define L2_SPTE(p, a, f) ((p) | PT_AP(a) | L2_SPAGE | (f))
+#define L2_PTE(p, a) L2_SPTE((p), (a), PT_CACHEABLE)
+#define L2_PTE_NC(p, a) L2_SPTE((p), (a), PT_B)
+#define L2_PTE_NC_NB(p, a) L2_SPTE((p), (a), 0)
+#define L1_SECPTE(p, a, f) ((p) | ((a) << AP_SECTION_SHIFT) | (f) \
+ | L1_SECTION | PT_U)
+
+#define L1_PTE(p) ((p) | 0x00 | L1_PAGE | PT_U)
+#define L1_SEC(p, c) L1_SECPTE((p), AP_KRW, (c))
+
+#define L1_SEC_SIZE (1 << PDSHIFT)
+#define L2_LPAGE_SIZE (NBPG * 16)
+
+/* Domain types */
+#define DOMAIN_FAULT 0x00
+#define DOMAIN_CLIENT 0x01
+#define DOMAIN_RESERVED 0x02
+#define DOMAIN_MANAGER 0x03
+
+/* L1 and L2 address masks */
+#define L1_ADDR_MASK 0xfffffc00
+#define L2_ADDR_MASK 0xfffff000
+
+/*
+ * The ARM MMU architecture was introduced with ARM v3 (previous ARM
+ * architecture versions used an optional off-CPU memory controller
+ * to perform address translation).
+ *
+ * The ARM MMU consists of a TLB and translation table walking logic.
+ * There is typically one TLB per memory interface (or, put another
+ * way, one TLB per software-visible cache).
+ *
+ * The ARM MMU is capable of mapping memory in the following chunks:
+ *
+ * 1M Sections (L1 table)
+ *
+ * 64K Large Pages (L2 table)
+ *
+ * 4K Small Pages (L2 table)
+ *
+ * 1K Tiny Pages (L2 table)
+ *
+ * There are two types of L2 tables: Coarse Tables and Fine Tables.
+ * Coarse Tables can map Large and Small Pages. Fine Tables can
+ * map Tiny Pages.
+ *
+ * Coarse Tables can define 4 Subpages within Large and Small pages.
+ * Subpages define different permissions for each Subpage within
+ * a Page.
+ *
+ * Coarse Tables are 1K in length. Fine tables are 4K in length.
+ *
+ * The Translation Table Base register holds the pointer to the
+ * L1 Table. The L1 Table is a 16K contiguous chunk of memory
+ * aligned to a 16K boundary. Each entry in the L1 Table maps
+ * 1M of virtual address space, either via a Section mapping or
+ * via an L2 Table.
+ *
+ * In addition, the Fast Context Switching Extension (FCSE) is available
+ * on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating
+ * TLB/cache flushes on context switch by use of a smaller address space
+ * and a "process ID" that modifies the virtual address before being
+ * presented to the translation logic.
+ */
+
+#define L1_S_SIZE 0x00100000 /* 1M */
+#define L1_S_OFFSET (L1_S_SIZE - 1)
+#define L1_S_FRAME (~L1_S_OFFSET)
+#define L1_S_SHIFT 20
+
+#define L2_L_SIZE 0x00010000 /* 64K */
+#define L2_L_OFFSET (L2_L_SIZE - 1)
+#define L2_L_FRAME (~L2_L_OFFSET)
+#define L2_L_SHIFT 16
+
+#define L2_S_SIZE 0x00001000 /* 4K */
+#define L2_S_OFFSET (L2_S_SIZE - 1)
+#define L2_S_FRAME (~L2_S_OFFSET)
+#define L2_S_SHIFT 12
+
+#define L2_T_SIZE 0x00000400 /* 1K */
+#define L2_T_OFFSET (L2_T_SIZE - 1)
+#define L2_T_FRAME (~L2_T_OFFSET)
+#define L2_T_SHIFT 10
+
+/*
+ * The NetBSD VM implementation only works on whole pages (4K),
+ * whereas the ARM MMU's Coarse tables are sized in terms of 1K
+ * (16K L1 table, 1K L2 table).
+ *
+ * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2
+ * table.
+ */
+#define L1_ADDR_BITS 0xfff00000 /* L1 PTE address bits */
+#define L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */
+
+#define L1_TABLE_SIZE 0x4000 /* 16K */
+#define L2_TABLE_SIZE 0x1000 /* 4K */
+/*
+ * The new pmap deals with the 1KB coarse L2 tables by
+ * allocating them from a pool. Until every port has been converted,
+ * keep the old L2_TABLE_SIZE define lying around. Converted ports
+ * should use L2_TABLE_SIZE_REAL until then.
+ */
+#define L2_TABLE_SIZE_REAL 0x400 /* 1K */
+
+/*
+ * ARM L1 Descriptors
+ */
+
+#define L1_TYPE_INV 0x00 /* Invalid (fault) */
+#define L1_TYPE_C 0x01 /* Coarse L2 */
+#define L1_TYPE_S 0x02 /* Section */
+#define L1_TYPE_F 0x03 /* Fine L2 */
+#define L1_TYPE_MASK 0x03 /* mask of type bits */
+
+/* L1 Section Descriptor */
+#define L1_S_B 0x00000004 /* bufferable Section */
+#define L1_S_C 0x00000008 /* cacheable Section */
+#define L1_S_IMP 0x00000010 /* implementation defined */
+#define L1_S_DOM(x) ((x) << 5) /* domain */
+#define L1_S_DOM_MASK L1_S_DOM(0xf)
+#define L1_S_AP(x) ((x) << 10) /* access permissions */
+#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */
+
+#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */
+#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */
+
+/* L1 Coarse Descriptor */
+#define L1_C_IMP0 0x00000004 /* implementation defined */
+#define L1_C_IMP1 0x00000008 /* implementation defined */
+#define L1_C_IMP2 0x00000010 /* implementation defined */
+#define L1_C_DOM(x) ((x) << 5) /* domain */
+#define L1_C_DOM_MASK L1_C_DOM(0xf)
+#define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */
+
+#define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */
+
+/* L1 Fine Descriptor */
+#define L1_F_IMP0 0x00000004 /* implementation defined */
+#define L1_F_IMP1 0x00000008 /* implementation defined */
+#define L1_F_IMP2 0x00000010 /* implementation defined */
+#define L1_F_DOM(x) ((x) << 5) /* domain */
+#define L1_F_DOM_MASK L1_F_DOM(0xf)
+#define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */
+
+#define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */
+
+/*
+ * ARM L2 Descriptors
+ */
+
+#define L2_TYPE_INV 0x00 /* Invalid (fault) */
+#define L2_TYPE_L 0x01 /* Large Page */
+#define L2_TYPE_S 0x02 /* Small Page */
+#define L2_TYPE_T 0x03 /* Tiny Page */
+#define L2_TYPE_MASK 0x03 /* mask of type bits */
+
+ /*
+ * This L2 Descriptor type is available on XScale processors
+ * when using a Coarse L1 Descriptor. The Extended Small
+ * Descriptor has the same format as the XScale Tiny Descriptor,
+ * but describes a 4K page, rather than a 1K page.
+ */
+#define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */
+
+#define L2_B 0x00000004 /* Bufferable page */
+#define L2_C 0x00000008 /* Cacheable page */
+#define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */
+#define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */
+#define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */
+#define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */
+#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
+
+#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */
+#define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */
+
+/*
+ * Access Permissions for L1 and L2 Descriptors.
+ */
+#define AP_W 0x01 /* writable */
+#define AP_U 0x02 /* user */
+
+/*
+ * Short-hand for common AP_* constants.
+ *
+ * Note: These values assume the S (System) bit is set and
+ * the R (ROM) bit is clear in CP15 register 1.
+ */
+#define AP_KR 0x00 /* kernel read */
+#define AP_KRW 0x01 /* kernel read/write */
+#define AP_KRWUR 0x02 /* kernel read/write usr read */
+#define AP_KRWURW 0x03 /* kernel read/write usr read/write */
+
+/*
+ * Domain Types for the Domain Access Control Register.
+ */
+#define DOMAIN_FAULT 0x00 /* no access */
+#define DOMAIN_CLIENT 0x01 /* client */
+#define DOMAIN_RESERVED 0x02 /* reserved */
+#define DOMAIN_MANAGER 0x03 /* manager */
+
+/*
+ * Type Extension bits for XScale processors.
+ *
+ * Behavior of C and B when X == 0:
+ *
+ * C B Cacheable Bufferable Write Policy Line Allocate Policy
+ * 0 0 N N - -
+ * 0 1 N Y - -
+ * 1 0 Y Y Write-through Read Allocate
+ * 1 1 Y Y Write-back Read Allocate
+ *
+ * Behavior of C and B when X == 1:
+ * C B Cacheable Bufferable Write Policy Line Allocate Policy
+ * 0 0 - - - - DO NOT USE
+ * 0 1 N Y - -
+ * 1 0 Mini-Data - - -
+ * 1 1 Y Y Write-back R/W Allocate
+ */
+#define TEX_XSCALE_X 0x01 /* X modifies C and B */
+#endif /* !_MACHINE_PTE_H_ */
+
+/* End of pte.h */
diff --git a/sys/arm/include/ptrace.h b/sys/arm/include/ptrace.h
new file mode 100644
index 0000000..d98aa14
--- /dev/null
+++ b/sys/arm/include/ptrace.h
@@ -0,0 +1,8 @@
+/* $NetBSD: ptrace.h,v 1.2 2001/02/23 21:23:52 reinoud Exp $ */
+/* $FreeBSD$ */
+
+#ifndef _MACHINE_PTRACE_H_
+#define _MACHINE_PTRACE_H_
+
+#endif /* !_MACHINE_PTRACE_H */
+
diff --git a/sys/arm/include/reg.h b/sys/arm/include/reg.h
new file mode 100644
index 0000000..7cf6d21
--- /dev/null
+++ b/sys/arm/include/reg.h
@@ -0,0 +1,32 @@
+/* $NetBSD: reg.h,v 1.2 2001/02/23 21:23:52 reinoud Exp $ */
+/* $FreeBSD$ */
+#ifndef MACHINE_REG_H
+#define MACHINE_REG_H
+
+#include <machine/fp.h>
+
+struct reg {
+ unsigned int r[13];
+ unsigned int r_sp;
+ unsigned int r_lr;
+ unsigned int r_pc;
+ unsigned int r_cpsr;
+};
+
+struct fpreg {
+ unsigned int fpr_fpsr;
+ fp_reg_t fpr[8];
+};
+
+struct dbreg {
+ unsigned int dr[8]; /* debug registers */
+};
+
+int fill_regs(struct thread *, struct reg *);
+int set_regs(struct thread *, struct reg *);
+int fill_fpregs(struct thread *, struct fpreg *);
+int set_fpregs(struct thread *, struct fpreg *);
+int fill_dbregs(struct thread *, struct dbreg *);
+int set_dbregs(struct thread *, struct dbreg *);
+
+#endif /* !MACHINE_REG_H */
diff --git a/sys/arm/include/reloc.h b/sys/arm/include/reloc.h
new file mode 100644
index 0000000..df4a126
--- /dev/null
+++ b/sys/arm/include/reloc.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)reloc.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_RELOC_H_
+#define _MACHINE_RELOC_H_
+
+/* Relocation format. */
+struct relocation_info {
+ int r_address; /* offset in text or data segment */
+ unsigned int r_symbolnum : 24, /* ordinal number of add symbol */
+ r_pcrel : 1, /* 1 if value should be pc-relative */
+ r_length : 2, /* log base 2 of value's width */
+ r_extern : 1, /* 1 if need to add symbol to value */
+ r_baserel : 1, /* linkage table relative */
+ r_jmptable : 1, /* relocate to jump table */
+ r_relative : 1, /* load address relative */
+ r_copy : 1; /* run time copy */
+};
+
+#endif
diff --git a/sys/arm/include/resource.h b/sys/arm/include/resource.h
new file mode 100644
index 0000000..783a1c4
--- /dev/null
+++ b/sys/arm/include/resource.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_RESOURCE_H_
+#define _MACHINE_RESOURCE_H_ 1
+
+/*
+ * Definitions of resource types for Intel Architecture machines
+ * with support for legacy ISA devices and drivers.
+ */
+
+#define SYS_RES_IRQ 1 /* interrupt lines */
+#define SYS_RES_DRQ 2 /* isa dma lines */
+#define SYS_RES_MEMORY 3 /* i/o memory */
+#define SYS_RES_IOPORT 4 /* i/o ports */
+
+#endif /* !_MACHINE_RESOURCE_H_ */
diff --git a/sys/arm/include/runq.h b/sys/arm/include/runq.h
new file mode 100644
index 0000000..2a21bfa
--- /dev/null
+++ b/sys/arm/include/runq.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_RUNQ_H_
+#define _MACHINE_RUNQ_H_
+
+#define RQB_LEN (2) /* Number of priority status words. */
+#define RQB_L2BPW (5) /* Log2(sizeof(rqb_word_t) * NBBY)). */
+#define RQB_BPW (1<<RQB_L2BPW) /* Bits in an rqb_word_t. */
+
+#define RQB_BIT(pri) (1 << ((pri) & (RQB_BPW - 1)))
+#define RQB_WORD(pri) ((pri) >> RQB_L2BPW)
+
+#define RQB_FFS(word) (ffs(word) - 1)
+
+/*
+ * Type of run queue status word.
+ */
+typedef u_int32_t rqb_word_t;
+
+#endif
diff --git a/sys/arm/include/setjmp.h b/sys/arm/include/setjmp.h
new file mode 100644
index 0000000..79c1330
--- /dev/null
+++ b/sys/arm/include/setjmp.h
@@ -0,0 +1,93 @@
+/* $NetBSD: setjmp.h,v 1.2 2001/08/25 14:45:59 bjh21 Exp $ */
+/* $FreeBSD$ */
+
+/*
+ * machine/setjmp.h: machine dependent setjmp-related information.
+ */
+
+#ifdef __ELF__
+#define _JBLEN 64 /* size, in longs, of a jmp_buf */
+#else
+#define _JBLEN 29 /* size, in longs, of a jmp_buf */
+#endif
+
+/*
+ * NOTE: The internal structure of a jmp_buf is *PRIVATE*
+ * This information is provided as there is software
+ * that fiddles with this with obtain the stack pointer
+ * (yes really ! and its commercial !).
+ *
+ * Description of the setjmp buffer
+ *
+ * word 0 magic number (dependant on creator)
+ * 1 - 3 f4 fp register 4
+ * 4 - 6 f5 fp register 5
+ * 7 - 9 f6 fp register 6
+ * 10 - 12 f7 fp register 7
+ * 13 fpsr fp status register
+ * 14 r4 register 4
+ * 15 r5 register 5
+ * 16 r6 register 6
+ * 17 r7 register 7
+ * 18 r8 register 8
+ * 19 r9 register 9
+ * 20 r10 register 10 (sl)
+ * 21 r11 register 11 (fp)
+ * 22 r12 register 12 (ip)
+ * 23 r13 register 13 (sp)
+ * 24 r14 register 14 (lr)
+ * 25 signal mask (dependant on magic)
+ * 26 (con't)
+ * 27 (con't)
+ * 28 (con't)
+ *
+ * The magic number number identifies the jmp_buf and
+ * how the buffer was created as well as providing
+ * a sanity check
+ *
+ * A side note I should mention - Please do not tamper
+ * with the floating point fields. While they are
+ * always saved and restored at the moment this cannot
+ * be garenteed especially if the compiler happens
+ * to be generating soft-float code so no fp
+ * registers will be used.
+ *
+ * Whilst this can be seen an encouraging people to
+ * use the setjmp buffer in this way I think that it
+ * is for the best then if changes occur compiles will
+ * break rather than just having new builds falling over
+ * mysteriously.
+ */
+
+#define _JB_MAGIC__SETJMP 0x4278f500
+#define _JB_MAGIC_SETJMP 0x4278f501
+
+/* Valid for all jmp_buf's */
+
+#define _JB_MAGIC 0
+#define _JB_REG_F4 1
+#define _JB_REG_F5 4
+#define _JB_REG_F6 7
+#define _JB_REG_F7 10
+#define _JB_REG_FPSR 13
+#define _JB_REG_R4 14
+#define _JB_REG_R5 15
+#define _JB_REG_R6 16
+#define _JB_REG_R7 17
+#define _JB_REG_R8 18
+#define _JB_REG_R9 19
+#define _JB_REG_R10 20
+#define _JB_REG_R11 21
+#define _JB_REG_R12 22
+#define _JB_REG_R13 23
+#define _JB_REG_R14 24
+
+/* Only valid with the _JB_MAGIC_SETJMP magic */
+
+#define _JB_SIGMASK 25
+#if __BSD_VISIBLE || __POSIX_VISIBLE || __XSI_VISIBLE
+typedef struct _sigjmp_buf { int _sjb[_JBLEN + 1]; } sigjmp_buf[1];
+#endif
+
+typedef struct _jmp_buf { int _jb[_JBLEN + 1]; } jmp_buf[1];
+
diff --git a/sys/arm/include/sf_buf.h b/sys/arm/include/sf_buf.h
new file mode 100644
index 0000000..7d438f0
--- /dev/null
+++ b/sys/arm/include/sf_buf.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SF_BUF_H_
+#define _MACHINE_SF_BUF_H_
+
+#include <sys/queue.h>
+
+struct vm_page;
+
+struct sf_buf {
+ LIST_ENTRY(sf_buf) list_entry; /* list of buffers */
+ TAILQ_ENTRY(sf_buf) free_entry; /* list of buffers */
+ struct vm_page *m; /* currently mapped page */
+ vm_offset_t kva; /* va of mapping */
+ int ref_count; /* usage of this mapping */
+};
+
+static __inline vm_offset_t
+sf_buf_kva(struct sf_buf *sf)
+{
+
+ return (sf->kva);
+}
+
+static __inline struct vm_page *
+sf_buf_page(struct sf_buf *sf)
+{
+
+ return (sf->m);
+}
+
+#endif /* !_MACHINE_SF_BUF_H_ */
diff --git a/sys/arm/include/sigframe.h b/sys/arm/include/sigframe.h
new file mode 100644
index 0000000..9787f57
--- /dev/null
+++ b/sys/arm/include/sigframe.h
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include <machine/frame.h>
diff --git a/sys/arm/include/smp.h b/sys/arm/include/smp.h
new file mode 100644
index 0000000..ca707e3
--- /dev/null
+++ b/sys/arm/include/smp.h
@@ -0,0 +1,6 @@
+/* $FreeBSD$ */
+
+#ifndef _MACHINE_SMP_H_
+#define _MACHINE_SMP_H_
+
+#endif /* !_MACHINE_SMP_H_ */
diff --git a/sys/arm/include/stdarg.h b/sys/arm/include/stdarg.h
new file mode 100644
index 0000000..128fb33
--- /dev/null
+++ b/sys/arm/include/stdarg.h
@@ -0,0 +1,85 @@
+/*-
+ * Copyright (c) 2002 David E. O'Brien. All rights reserved.
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stdarg.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_STDARG_H_
+#define _MACHINE_STDARG_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+#ifndef _VA_LIST_DECLARED
+#define _VA_LIST_DECLARED
+typedef __va_list va_list;
+#endif
+
+#if defined(__GNUC__) && (__GNUC__ == 2 && __GNUC_MINOR__ > 95 || __GNUC__ >= 3)
+
+#define va_start(ap, last) \
+ __builtin_stdarg_start((ap), (last))
+
+#define va_arg(ap, type) \
+ __builtin_va_arg((ap), type)
+
+#if __ISO_C_VISIBLE >= 1999
+#define va_copy(dest, src) \
+ __builtin_va_copy((dest), (src))
+#endif
+
+#define va_end(ap) \
+ __builtin_va_end(ap)
+
+#else /* ! __GNUC__ post GCC 2.95 */
+
+#define __va_size(type) \
+ (((sizeof(type) + sizeof(int) - 1) / sizeof(int)) * sizeof(int))
+
+#ifdef __GNUC__
+#define va_start(ap, last) \
+ ((ap) = (va_list)__builtin_next_arg(last))
+#else /* non-GNU compiler */
+#define va_start(ap, last) \
+ ((ap) = (va_list)&(last) + __va_size(last))
+#endif /* __GNUC__ */
+
+#define va_arg(ap, type) \
+ (*(type *)((ap) += __va_size(type), (ap) - __va_size(type)))
+
+#define va_end(ap)
+
+#endif /* __GNUC__ post GCC 2.95 */
+
+#endif /* !_MACHINE_STDARG_H_ */
diff --git a/sys/arm/include/swi.h b/sys/arm/include/swi.h
new file mode 100644
index 0000000..fef56ea
--- /dev/null
+++ b/sys/arm/include/swi.h
@@ -0,0 +1,23 @@
+/* $NetBSD: swi.h,v 1.1 2002/01/13 15:03:06 bjh21 Exp $ */
+/* $FreeBSD$ */
+
+/*
+ * This file is in the Public Domain.
+ * Ben Harris, 2002.
+ */
+
+#ifndef _MACHINE_SWI_H_
+#define _MACHINE_SWI_H_
+
+#define SWI_OS_MASK 0xf00000
+#define SWI_OS_RISCOS 0x000000
+#define SWI_OS_RISCIX 0x800000
+#define SWI_OS_LINUX 0x900000
+#define SWI_OS_NETBSD 0xa00000
+#define SWI_OS_ARM 0xf00000
+
+#define SWI_IMB 0xf00000
+#define SWI_IMBrange 0xf00001
+
+#endif /* !_MACHINE_SWI_H_ */
+
diff --git a/sys/arm/include/trap.h b/sys/arm/include/trap.h
new file mode 100644
index 0000000..f8b28fa
--- /dev/null
+++ b/sys/arm/include/trap.h
@@ -0,0 +1,4 @@
+/* $NetBSD: trap.h,v 1.1 2001/02/23 03:48:19 ichiro Exp $ */
+/* $FreeBSD$ */
+
+#define KERNEL_BREAKPOINT 0xe7ffffff
diff --git a/sys/arm/include/ucontext.h b/sys/arm/include/ucontext.h
index d8411da..b1f1d63 100644
--- a/sys/arm/include/ucontext.h
+++ b/sys/arm/include/ucontext.h
@@ -1,10 +1,11 @@
-/*
- * Copyright (c) 2001 David O'Brien.
- * Copyright (c) 1994-1996 Mark Brinicombe.
- * Copyright (c) 1994 Brini.
+/* $NetBSD: mcontext.h,v 1.4 2003/10/08 22:43:01 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
- * This code is derived from software written for Brini by Mark Brinicombe
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein and by Jason R. Thorpe of Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -16,65 +17,99 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
- * This product includes software developed by Brini.
- * 4. The name of the company nor the name of the author may be used to
- * endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * RiscBSD kernel project
- *
- * signal.h
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
- * Architecture dependant signal types and structures
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*
- * Created : 30/09/94
- *
- * $NetBSD: signal.h,v 1.8 1998/09/14 02:48:33 thorpej Exp $
* $FreeBSD$
*/
-#ifndef _MACHINE_UCONTEXT_H_
-#define _MACHINE_UCONTEXT_H_
+#ifndef _MACHINE_MCONTEXT_H_
+#define _MACHINE_MCONTEXT_H_
+/*
+ * General register state
+ */
+#define _NGREG 17
+typedef unsigned int __greg_t;
+typedef __greg_t __gregset_t[_NGREG];
-typedef struct __mcontext {
- /*
- * The first 20 fields must match the definition of
- * sigcontext. So that we can support sigcontext
- * and ucontext_t at the same time.
- */
- unsigned int mc_onstack; /* XXX - sigcontext compat. */
- unsigned int mc_spsr;
- unsigned int mc_r0;
- unsigned int mc_r1;
- unsigned int mc_r2;
- unsigned int mc_r3;
- unsigned int mc_r4;
- unsigned int mc_r5;
- unsigned int mc_r6;
- unsigned int mc_r7;
- unsigned int mc_r8;
- unsigned int mc_r9;
- unsigned int mc_r10;
- unsigned int mc_r11;
- unsigned int mc_r12;
- unsigned int mc_usr_sp;
- unsigned int mc_usr_lr;
- unsigned int mc_svc_lr;
- unsigned int mc_pc;
+#define _REG_R0 0
+#define _REG_R1 1
+#define _REG_R2 2
+#define _REG_R3 3
+#define _REG_R4 4
+#define _REG_R5 5
+#define _REG_R6 6
+#define _REG_R7 7
+#define _REG_R8 8
+#define _REG_R9 9
+#define _REG_R10 10
+#define _REG_R11 11
+#define _REG_R12 12
+#define _REG_R13 13
+#define _REG_R14 14
+#define _REG_R15 15
+#define _REG_CPSR 16
+/* Convenience synonyms */
+#define _REG_FP _REG_R11
+#define _REG_SP _REG_R13
+#define _REG_LR _REG_R14
+#define _REG_PC _REG_R15
- unsigned int __spare__[1]; /* XXX fix the size later */
+/*
+ * Floating point register state
+ */
+/* Note: the storage layout of this structure must be identical to ARMFPE! */
+typedef struct {
+ unsigned int __fp_fpsr;
+ struct {
+ unsigned int __fp_exponent;
+ unsigned int __fp_mantissa_hi;
+ unsigned int __fp_mantissa_lo;
+ } __fp_fr[8];
+} __fpregset_t;
+
+typedef struct {
+ unsigned int __vfp_fpscr;
+ unsigned int __vfp_fstmx[33];
+ unsigned int __vfp_fpsid;
+} __vfpregset_t;
+
+typedef struct {
+ __gregset_t __gregs;
+ union {
+ __fpregset_t __fpregs;
+ __vfpregset_t __vfpregs;
+ } __fpu;
} mcontext_t;
-#endif /* !_MACHINE_UCONTEXT_H_ */
+/* Machine-dependent uc_flags */
+#define _UC_ARM_VFP 0x00010000 /* FPU field is VFP */
+
+/* used by signal delivery to indicate status of signal stack */
+#define _UC_SETSTACK 0x00020000
+#define _UC_CLRSTACK 0x00040000
+
+#define _UC_MACHINE_PAD 3 /* Padding appended to ucontext_t */
+
+#define _UC_MACHINE_SP(uc) ((uc)->uc_mcontext.__gregs[_REG_SP])
+#define _UC_MACHINE_PC(uc) ((uc)->uc_mcontext.__gregs[_REG_PC])
+#define _UC_MACHINE_INTRV(uc) ((uc)->uc_mcontext.__gregs[_REG_R0])
+
+#define _UC_MACHINE_SET_PC(uc, pc) _UC_MACHINE_PC(uc) = (pc)
+
+#endif /* !_MACHINE_MCONTEXT_H_ */
diff --git a/sys/arm/include/undefined.h b/sys/arm/include/undefined.h
new file mode 100644
index 0000000..4a0a136
--- /dev/null
+++ b/sys/arm/include/undefined.h
@@ -0,0 +1,90 @@
+/* $NetBSD: undefined.h,v 1.4 2001/12/20 01:20:23 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1995-1996 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * undefined.h
+ *
+ * Undefined instruction types, symbols and prototypes
+ *
+ * Created : 08/02/95
+ *
+ * $FreeBSD$
+ */
+
+
+#ifndef _MACHINE_UNDEFINED_H_
+#define _MACHINE_UNDEFINED_H_
+#ifdef _KERNEL
+
+#include <sys/queue.h>
+
+typedef int (*undef_handler_t) __P((unsigned int, unsigned int, trapframe_t *, int));
+
+#define FP_COPROC 1
+#define FP_COPROC2 2
+#define MAX_COPROCS 16
+
+/* Prototypes for undefined.c */
+
+void *install_coproc_handler __P((int, undef_handler_t));
+void remove_coproc_handler __P((void *));
+void undefined_init __P((void));
+
+/*
+ * XXX Stuff below here is for use before malloc() is available. Most code
+ * shouldn't use it.
+ */
+
+struct undefined_handler {
+ LIST_ENTRY(undefined_handler) uh_link;
+ undef_handler_t uh_handler;
+};
+
+/*
+ * Handlers installed using install_coproc_handler_static shouldn't be
+ * removed.
+ */
+void install_coproc_handler_static __P((int, struct undefined_handler *));
+
+/* Calls up to undefined.c from trap handlers */
+void undefinedinstruction(struct trapframe *);
+
+#endif
+
+/* End of undefined.h */
+
+#endif /* _MACHINE_UNDEFINED_H_ */
diff --git a/sys/arm/include/utrap.h b/sys/arm/include/utrap.h
new file mode 100644
index 0000000..38a40b0
--- /dev/null
+++ b/sys/arm/include/utrap.h
@@ -0,0 +1,110 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_UTRAP_H_
+#define _MACHINE_UTRAP_H_
+
+#define UT_INSTRUCTION_EXCEPTION 1
+#define UT_INSTRUCTION_ERROR 2
+#define UT_INSTRUCTION_PROTECTION 3
+#define UT_ILLTRAP_INSTRUCTION 4
+#define UT_ILLEGAL_INSTRUCTION 5
+#define UT_PRIVILEGED_OPCODE 6
+#define UT_FP_DISABLED 7
+#define UT_FP_EXCEPTION_IEEE_754 8
+#define UT_FP_EXCEPTION_OTHER 9
+#define UT_TAG_OFERFLOW 10
+#define UT_DIVISION_BY_ZERO 11
+#define UT_DATA_EXCEPTION 12
+#define UT_DATA_ERROR 13
+#define UT_DATA_PROTECTION 14
+#define UT_MEM_ADDRESS_NOT_ALIGNED 15
+#define UT_PRIVILEGED_ACTION 16
+#define UT_ASYNC_DATA_ERROR 17
+#define UT_TRAP_INSTRUCTION_16 18
+#define UT_TRAP_INSTRUCTION_17 19
+#define UT_TRAP_INSTRUCTION_18 20
+#define UT_TRAP_INSTRUCTION_19 21
+#define UT_TRAP_INSTRUCTION_20 22
+#define UT_TRAP_INSTRUCTION_21 23
+#define UT_TRAP_INSTRUCTION_22 24
+#define UT_TRAP_INSTRUCTION_23 25
+#define UT_TRAP_INSTRUCTION_24 26
+#define UT_TRAP_INSTRUCTION_25 27
+#define UT_TRAP_INSTRUCTION_26 28
+#define UT_TRAP_INSTRUCTION_27 29
+#define UT_TRAP_INSTRUCTION_28 30
+#define UT_TRAP_INSTRUCTION_29 31
+#define UT_TRAP_INSTRUCTION_30 32
+#define UT_TRAP_INSTRUCTION_31 33
+#define UT_INSTRUCTION_MISS 34
+#define UT_DATA_MISS 35
+#define UT_MAX 36
+
+#define ST_SUNOS_SYSCALL 0
+#define ST_BREAKPOINT 1
+#define ST_DIVISION_BY_ZERO 2
+#define ST_FLUSH_WINDOWS 3 /* XXX implement! */
+#define ST_CLEAN_WINDOW 4
+#define ST_RANGE_CHECK 5
+#define ST_FIX_ALIGNMENT 6
+#define ST_INTEGER_OVERFLOW 7
+/* 8 is 32-bit ABI syscall (old solaris syscall?) */
+#define ST_BSD_SYSCALL 9
+#define ST_FP_RESTORE 10
+/* 11-15 are available */
+/* 16 is linux 32 bit syscall (but supposed to be reserved, grr) */
+/* 17 is old linux 64 bit syscall (but supposed to be reserved, grr) */
+/* 16-31 are reserved for user applications (utraps) */
+#define ST_GETCC 32 /* XXX implement! */
+#define ST_SETCC 33 /* XXX implement! */
+#define ST_GETPSR 34 /* XXX implement! */
+#define ST_SETPSR 35 /* XXX implement! */
+/* 36-63 are available */
+#define ST_SOLARIS_SYSCALL 64
+#define ST_SYSCALL 65
+#define ST_SYSCALL32 66
+/* 67 is reserved to OS source licensee */
+/* 68 is return from deferred trap (not supported) */
+/* 69-95 are reserved to SPARC international */
+/* 96-108 are available */
+/* 109 is linux 64 bit syscall */
+/* 110 is linux 64 bit getcontext (?) */
+/* 111 is linux 64 bit setcontext (?) */
+/* 112-255 are available */
+
+#define UTH_NOCHANGE (-1)
+
+#ifndef __ASM__
+
+typedef int utrap_entry_t;
+typedef void *utrap_handler_t;
+
+#endif
+
+#endif
diff --git a/sys/arm/include/vmparam.h b/sys/arm/include/vmparam.h
new file mode 100644
index 0000000..c922e36
--- /dev/null
+++ b/sys/arm/include/vmparam.h
@@ -0,0 +1,129 @@
+/* $NetBSD: vmparam.h,v 1.26 2003/08/07 16:27:47 agc Exp $ */
+
+/*
+ * Copyright (c) 1988 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_VMPARAM_H_
+#define _MACHINE_VMPARAM_H_
+
+
+/*#include <arm/arm32/vmparam.h>
+*/
+/*
+ * Address space constants
+ */
+
+/*
+ * The line between user space and kernel space
+ * Mappings >= KERNEL_BASE are constant across all processes
+ */
+#define KERNBASE 0xc0000000
+
+/*
+ * Override the default pager_map size, there's not enough KVA.
+ */
+/*
+ * Size of User Raw I/O map
+ */
+
+#define USRIOSIZE 300
+
+/* virtual sizes (bytes) for various kernel submaps */
+
+#define VM_PHYS_SIZE (USRIOSIZE*PAGE_SIZE)
+
+/*
+ * max number of non-contig chunks of physical RAM you can have
+ */
+
+#define VM_PHYSSEG_MAX 32
+
+/*
+ * when converting a physical address to a vm_page structure, we
+ * want to use a binary search on the chunks of physical memory
+ * to find our RAM
+ */
+
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
+
+/*
+ * this indicates that we can't add RAM to the VM system after the
+ * vm system is init'd.
+ */
+
+#define VM_PHYSSEG_NOADD
+
+/*
+ * we support 2 free lists:
+ *
+ * - DEFAULT for all systems
+ * - ISADMA for the ISA DMA range on Sharks only
+ */
+
+#define VM_NFREELIST 2
+#define VM_FREELIST_DEFAULT 0
+#define VM_FREELIST_ISADMA 1
+
+#define UPT_MAX_ADDRESS VADDR(UPTPTDI + 3, 0)
+#define UPT_MIN_ADDRESS VADDR(UPTPTDI, 0)
+
+#define VM_MIN_ADDRESS (0x00001000)
+#define VM_MAXUSER_ADDRESS KERNBASE
+#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
+
+#define USRSTACK VM_MAXUSER_ADDRESS
+
+/* initial pagein size of beginning of executable file */
+#ifndef VM_INITIAL_PAGEIN
+#define VM_INITIAL_PAGEIN 16
+#endif
+
+#ifndef VM_MIN_KERNEL_ADDRESS
+#define VM_MIN_KERNEL_ADDRESS KERNBASE
+#endif
+
+#define VM_MAX_KERNEL_ADDRESS 0xffffffff
+/*
+ * * Virtual size (bytes) for various kernel submaps.
+ * */
+#ifndef VM_KMEM_SIZE
+#define VM_KMEM_SIZE (12*1024*1024)
+#endif
+
+#define MAXTSIZ (16*1024*1024)
+#define DFLDSIZ (128*1024*1024)
+#define MAXDSIZ (512*1024*1024)
+#define DFLSSIZ (2*1024*1024)
+#define MAXSSIZ (8*1024*1024)
+#define SGROWSIZ (128*1024)
+#define MAXSLP 20
+
+#endif /* _MACHINE_VMPARAM_H_ */
diff --git a/sys/arm/sa11x0/assabet_machdep.c b/sys/arm/sa11x0/assabet_machdep.c
new file mode 100644
index 0000000..269e7e4
--- /dev/null
+++ b/sys/arm/sa11x0/assabet_machdep.c
@@ -0,0 +1,497 @@
+/* $NetBSD: hpc_machdep.c,v 1.70 2003/09/16 08:18:22 agc Exp $ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * machdep.c
+ *
+ * Machine dependant functions for kernel setup
+ *
+ * This file needs a lot of work.
+ *
+ * Created : 17/09/94
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#define _ARM32_BUS_DMA_PRIVATE
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysproto.h>
+#include <sys/signalvar.h>
+#include <sys/imgact.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/linker.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/cons.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/buf.h>
+#include <sys/exec.h>
+#include <machine/reg.h>
+#include <machine/cpu.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_map.h>
+#include <vm/vnode_pager.h>
+#include <machine/pmap.h>
+#include <machine/vmparam.h>
+#include <machine/pcb.h>
+#include <machine/undefined.h>
+#include <machine/machdep.h>
+#include <machine/metadata.h>
+#include <machine/armreg.h>
+#include <machine/bus.h>
+#include <sys/reboot.h>
+
+#define MDROOT_ADDR 0xd0400000
+
+#define KERNEL_PT_VMEM 0 /* Page table for mapping video memory */
+#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */
+#define KERNEL_PT_IO 3 /* Page table for mapping IO */
+#define KERNEL_PT_IRQ 2 /* Page table for mapping irq handler */
+#define KERNEL_PT_KERNEL 1 /* Page table for mapping kernel */
+#define KERNEL_PT_VMDATA 4 /* Page tables for mapping kernel VM */
+#define KERNEL_PT_VMDATA_NUM 12 /* start with 16MB of KVM */
+#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
+
+/* Define various stack sizes in pages */
+#define IRQ_STACK_SIZE 1
+#define ABT_STACK_SIZE 1
+#ifdef IPKDB
+#define UND_STACK_SIZE 2
+#else
+#define UND_STACK_SIZE 1
+#endif
+#define KERNEL_VM_BASE (KERNBASE + 0x00c00000)
+#define KERNEL_VM_SIZE 0x05000000
+
+extern u_int data_abort_handler_address;
+extern u_int prefetch_abort_handler_address;
+extern u_int undefined_handler_address;
+
+struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
+
+extern void *_end;
+
+int got_mmu = 0;
+
+extern vm_offset_t sa1_cache_clean_addr;
+
+extern int *end;
+
+struct pcpu __pcpu;
+struct pcpu *pcpup = &__pcpu;
+
+#define MDSIZE 8192
+/* Physical and virtual addresses for some global pages */
+
+vm_paddr_t phys_avail[10];
+vm_paddr_t physical_start;
+vm_paddr_t physical_end;
+vm_paddr_t physical_freestart;
+vm_offset_t physical_pages;
+vm_offset_t clean_sva, clean_eva;
+
+struct pv_addr systempage;
+struct pv_addr irqstack;
+struct pv_addr undstack;
+struct pv_addr abtstack;
+struct pv_addr kernelstack;
+void enable_mmu(vm_offset_t);
+static struct trapframe proc0_tf;
+
+struct arm32_dma_range *
+bus_dma_get_range(void)
+{
+
+ return (NULL);
+}
+
+#define CPU_SA110_CACHE_CLEAN_SIZE (0x4000 * 2)
+
+void *
+initarm(void *arg, void *arg2)
+{
+ struct pcpu *pc;
+ struct pv_addr kernel_l1pt;
+ struct pv_addr proc0_uarea;
+ struct pv_addr md_addr;
+ struct pv_addr md_bla;
+ int loop;
+ u_int kerneldatasize, symbolsize;
+ u_int l1pagetable;
+ vm_offset_t freemempos;
+ vm_size_t pt_size;
+ int i = 0;
+ uint32_t fake_preload[35];
+
+ boothowto = RB_VERBOSE | RB_SINGLE;
+ cninit();
+ set_cpufuncs();
+ fake_preload[i++] = MODINFO_NAME;
+ fake_preload[i++] = strlen("elf kernel") + 1;
+ strcpy((char*)&fake_preload[i++], "elf kernel");
+ i += 2;
+ fake_preload[i++] = MODINFO_TYPE;
+ fake_preload[i++] = strlen("elf kernel") + 1;
+ strcpy((char*)&fake_preload[i++], "elf kernel");
+ i += 2;
+ fake_preload[i++] = MODINFO_ADDR;
+ fake_preload[i++] = sizeof(vm_offset_t);
+ fake_preload[i++] = KERNBASE;
+ fake_preload[i++] = MODINFO_SIZE;
+ fake_preload[i++] = sizeof(uint32_t);
+ fake_preload[i++] = (uint32_t)&end - KERNBASE;
+ fake_preload[i++] = MODINFO_NAME;
+ fake_preload[i++] = strlen("md root") + 1;
+ strcpy((char*)&fake_preload[i++], "md root");
+ i += 1;
+ fake_preload[i++] = MODINFO_TYPE;
+ fake_preload[i++] = strlen("md_image") + 1;
+ strcpy((char*)&fake_preload[i++], "md_image");
+ i += 2;
+ fake_preload[i++] = MODINFO_ADDR;
+ fake_preload[i++] = sizeof(uint32_t);
+ fake_preload[i++] = MDROOT_ADDR;
+ fake_preload[i++] = MODINFO_SIZE;
+ fake_preload[i++] = sizeof(uint32_t);
+ fake_preload[i++] = MDSIZE * 1024;
+ fake_preload[i++] = 0;
+ fake_preload[i] = 0;
+ preload_metadata = (void *)fake_preload;
+
+ physmem =( 16 * 1024 * 1024) / PAGE_SIZE;
+ pc = &__pcpu;
+ pcpu_init(pc, 0, sizeof(struct pcpu));
+ PCPU_SET(curthread, &thread0);
+
+ physical_start = (vm_offset_t) KERNBASE;
+ physical_end = (vm_offset_t) &end;
+ physical_freestart = (((vm_offset_t)physical_end) + PAGE_MASK) & ~PAGE_MASK;
+ md_addr.pv_va = md_addr.pv_pa = MDROOT_ADDR;
+#define KERNEL_TEXT_BASE (KERNBASE + 0x00040000)
+ kerneldatasize = (u_int32_t)&end - (u_int32_t)KERNEL_TEXT_BASE;
+ symbolsize = 0;
+ freemempos = (vm_offset_t)round_page(physical_freestart);
+ memset((void *)freemempos, 0, 256*1024);
+ /* Define a macro to simplify memory allocation */
+#define valloc_pages(var, np) \
+ alloc_pages((var).pv_pa, (np)); \
+ (var).pv_va = (var).pv_pa;
+
+#define alloc_pages(var, np) \
+ (var) = freemempos; \
+ freemempos += ((np) * PAGE_SIZE);\
+ memset((char *)(var), 0, ((np) * PAGE_SIZE));
+
+ while ((freemempos & (L1_TABLE_SIZE - 1)) != 0)
+ freemempos += PAGE_SIZE;
+ valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
+ valloc_pages(md_bla, L2_TABLE_SIZE / PAGE_SIZE);
+ alloc_pages(sa1_cache_clean_addr, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE);
+ for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
+ valloc_pages(kernel_pt_table[loop],
+ L2_TABLE_SIZE / PAGE_SIZE);
+ }
+
+ valloc_pages(systempage, 1);
+
+ /*
+ * Allocate a page for the system page mapped to V0x00000000
+ * This page will just contain the system vectors and can be
+ * shared by all processes.
+ */
+ pt_size = round_page(freemempos) - physical_freestart;
+
+ /* Allocate stacks for all modes */
+ valloc_pages(irqstack, IRQ_STACK_SIZE);
+ valloc_pages(abtstack, ABT_STACK_SIZE);
+ valloc_pages(undstack, UND_STACK_SIZE);
+ valloc_pages(kernelstack, KSTACK_PAGES);
+
+
+#ifdef VERBOSE_INIT_ARM
+ printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
+ irqstack.pv_va);
+ printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
+ abtstack.pv_va);
+ printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
+ undstack.pv_va);
+ printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
+ kernelstack.pv_va);
+#endif
+ /*
+ * Allocate memory for the l1 and l2 page tables. The scheme to avoid
+ * wasting memory by allocating the l1pt on the first 16k memory was
+ * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for
+ * this to work (which is supposed to be the case).
+ */
+
+ /* Allocate pages for process 0 kernel stack and uarea */
+ valloc_pages(proc0_uarea, UAREA_PAGES);
+
+ /*
+ * Now we start construction of the L1 page table
+ * We start by mapping the L2 page tables into the L1.
+ * This means that we can replace L1 mappings later on if necessary
+ */
+ l1pagetable = kernel_l1pt.pv_pa;
+
+
+ /* XXX bla **/
+#if 0
+ bcopy((void*)0xd0300000, &mfs_root, MD_ROOT_SIZE*1024);
+#endif
+ /* Map the L2 pages tables in the L1 page table */
+ pmap_link_l2pt(l1pagetable, 0x00000000,
+ &kernel_pt_table[KERNEL_PT_SYS]);
+ pmap_link_l2pt(l1pagetable, KERNBASE,
+ &kernel_pt_table[KERNEL_PT_KERNEL]);
+ pmap_link_l2pt(l1pagetable, 0xd0000000,
+ &kernel_pt_table[KERNEL_PT_IO]);
+ pmap_link_l2pt(l1pagetable, 0x90000000, &kernel_pt_table[KERNEL_PT_IRQ]);
+ pmap_link_l2pt(l1pagetable, MDROOT_ADDR,
+ &md_bla);
+ for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
+ pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
+ &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
+ pmap_map_chunk(l1pagetable, KERNBASE, KERNBASE,
+ (uint32_t)&end - KERNBASE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ /* Map the stack pages */
+ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
+ IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, md_addr.pv_va, md_addr.pv_pa,
+ MDSIZE * 1024, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
+ ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
+ UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+ KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, proc0_uarea.pv_va, proc0_uarea.pv_pa,
+ UAREA_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+
+
+ pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+ L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
+
+ for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
+ pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
+ kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
+ VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
+ }
+ pmap_map_chunk(l1pagetable, md_bla.pv_va, md_bla.pv_pa, L2_TABLE_SIZE,
+ VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
+ /* Map the vector page. */
+ pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
+ VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ /* Map SACOM3. */
+ pmap_map_entry(l1pagetable, 0xd000d000, 0x80010000,
+ VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
+ pmap_map_entry(l1pagetable, 0x90050000, 0x90050000,
+ VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
+ pmap_map_chunk(l1pagetable, sa1_cache_clean_addr, 0xf0000000,
+ CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+
+ data_abort_handler_address = (u_int)data_abort_handler;
+ prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
+ undefined_handler_address = (u_int)undefinedinstruction_bounce;
+ undefined_init();
+ cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
+ setttb(kernel_l1pt.pv_pa);
+ cpu_tlb_flushID();
+ cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
+
+ /*
+ * Pages were allocated during the secondary bootstrap for the
+ * stacks for different CPU modes.
+ * We must now set the r13 registers in the different CPU modes to
+ * point to these stacks.
+ * Since the ARM stacks use STMFD etc. we must set r13 to the top end
+ * of the stack memory.
+ */
+ printf("init subsystems: stacks\n");
+
+ set_stackptr(PSR_IRQ32_MODE,
+ irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
+ set_stackptr(PSR_ABT32_MODE,
+ abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
+ set_stackptr(PSR_UND32_MODE,
+ undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
+
+
+
+ /*
+ * We must now clean the cache again....
+ * Cleaning may be done by reading new data to displace any
+ * dirty data in the cache. This will have happened in setttb()
+ * but since we are boot strapping the addresses used for the read
+ * may have just been remapped and thus the cache could be out
+ * of sync. A re-clean after the switch will cure this.
+ * After booting there are no gross reloations of the kernel thus
+ * this problem will not occur after initarm().
+ */
+/* cpu_idcache_wbinv_all();*/
+
+
+ bootverbose = 1;
+
+#if 0
+ set_cpufuncs();
+#endif
+ /* Set stack for exception handlers */
+
+#if 0
+ printf("arm_init: physical_pages = %08x\n", physical_pages);
+ printf("arm_init: kernel_l1pt: pa = %08x, va = %08x\n",
+ kernel_l1pt.pv_pa, kernel_l1pt.pv_va);
+ printf("arm_init: proc0_uarea: pa = %08x, va = %08x\n",
+ proc0_uarea.pv_pa, proc0_uarea.pv_va);
+ printf("arm_init: proc0_kstack: pa = %08x, va = %08x\n",
+ proc0_kstack.pv_pa, proc0_kstack.pv_va);
+#endif
+
+/* printf("arm_init: physfree = %08x\n", physical_freestart);
+ printf("arm_init: first = %08x\n", first);
+ printf("arm_init: end = %08x\n", (uint32_t) &end);
+
+ printf("arm_init: params = %08x\n", params);
+ printf("arm_init: params: page_size = %08x\n", params->u1.s.page_size);
+ printf("arm_init: params: nrpages = %08x\n", params->u1.s.nr_pages);
+ printf("arm_init: params: ramdisk_size = %08x\n", params->u1.s.ramdisk_size);
+ printf("arm_init: params: flags = %08x\n", params->u1.s.flags);
+ printf("arm_init: params: rootdev = %08x\n", params->u1.s.rootdev);
+ printf("arm_init: params: video_num_cols = %08x\n", params->u1.s.video_num_cols);
+ printf("arm_init: params: video_num_rows = %08x\n", params->u1.s.video_num_rows);
+ printf("arm_init: params: video_x = %08x\n", params->u1.s.video_x);
+ printf("arm_init: params: video_y = %08x\n", params->u1.s.video_y);
+ printf("arm_init: params: memc_control_reg = %08x\n", params->u1.s.memc_control_reg);
+ printf("arm_init: params: sounddefault = %02x\n", params->u1.s.sounddefault);
+ printf("arm_init: params: adfsdrives = %02x\n", params->u1.s.adfsdrives);
+ printf("arm_init: params: bytes_per_char_h = %02x\n", params->u1.s.bytes_per_char_h);
+ printf("arm_init: params: bytes_per_char_v = %02x\n", params->u1.s.bytes_per_char_v);
+ for(i = 0; i < 4; i++) {
+ printf("arm_init: params: pages_in_bank[%d] = %08x\n", i, params->u1.s.pages_in_bank[i]);
+ }
+ printf("arm_init: params: pages_in_vram = %08x\n", params->u1.s.pages_in_vram);
+ printf("arm_init: params: initrd_start = %08x\n", params->u1.s.initrd_start);
+ printf("arm_init: params: initrd_size = %08x\n", params->u1.s.initrd_size);
+ printf("arm_init: params: rd_start = %08x\n", params->u1.s.rd_start);
+ printf("arm_init: params: system_options = %08x\n", params->u1.s.system_options);
+ printf("arm_init: params: system_serial_num = %08x\n", params->u1.s.system_serial_num);
+ for(i = 0; i < 8; i++) {
+ printf("arm_init: params: paths[%d] = %s\n", i, (params->u2.paths[i][0]) ? params->u2.paths[i] : "(null)");
+ }
+ printf("arm_init: params: magic = %08x\n", params->u2.s.magic);
+ printf("arm_init: params: commandline = %s\n", (params->commandline[0]) ? params->commandline : "(null)");
+ printf("arm_init: params: bootsetting = %s\n", (params->bootsetting[0]) ? params->bootsetting : "(null)");*/
+
+ proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
+ proc0.p_uarea = (struct user *) proc0_uarea.pv_va;
+ thread0.td_kstack = kernelstack.pv_va;
+ thread0.td_pcb = (struct pcb *)
+ (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
+ thread0.td_pcb->pcb_flags = 0;
+ thread0.td_frame = &proc0_tf;
+
+
+ /* Enable MMU, I-cache, D-cache, write buffer. */
+#if 0
+ printf("it was %p\n", (void *)cpufunc_control(0,0));
+ printf("ca c fait\n");
+ printf("before\n");
+ printf("mmu enabled\n");
+ printf("now we have %p\n", (void*)cpufunc_control(0,0));
+#endif
+
+ cpufunc_control(0x337f, 0x107d);
+ got_mmu = 1;
+ arm_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
+
+
+ pmap_bootstrap(KERNEL_VM_BASE,
+ KERNEL_VM_BASE + KERNEL_PT_VMDATA_NUM * 0x400000, &kernel_l1pt);
+
+
+ mutex_init();
+
+
+#if 0
+ phys_avail[0] = 0x00000000;
+ phys_avail[1] = physmem;
+ phys_avail[2] = 0;
+#endif
+#if 0
+ phys_avail[1] = physical_start;
+ phys_avail[2] = physical_freestart;
+ phys_avail[3] = physmem;
+#endif
+#if 0
+ phys_avail[3] = 0;
+#endif
+#if 0
+ phys_avail[1] = 0x01000000 - 1;
+#endif
+ phys_avail[0] = round_page(freemempos);
+ phys_avail[1] = 0xc0000000 + 0x02000000 - 1;
+ phys_avail[2] = 0;
+ phys_avail[3] = 0;
+#if 0
+ phys_avail[4] = 0x00000000;
+ phys_avail[5] = 0x00000000;
+#endif
+
+ /* Do basic tuning, hz etc */
+ init_param1();
+ init_param2(physmem);
+ printf("arm_init: done!\n");
+ avail_end = 0xc0000000 + 0x02000000 - 1;
+ return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP));
+}
diff --git a/sys/arm/sa11x0/files.sa11x0 b/sys/arm/sa11x0/files.sa11x0
new file mode 100644
index 0000000..a704f53
--- /dev/null
+++ b/sys/arm/sa11x0/files.sa11x0
@@ -0,0 +1,11 @@
+# $FreeBSD$
+arm/sa11x0/assabet_machdep.c optional assabet
+arm/sa11x0/sa11x0.c optional saip
+arm/sa11x0/sa11x0_ost.c optional saip
+arm/sa11x0/sa11x0_io.c optional saip
+arm/sa11x0/sa11x0_io_asm.S optional saip
+arm/sa11x0/sa11x0_irq.S optional saip
+arm/sa11x0/sa11x0_irqhandler.c optional saip
+dev/uart/uart_cpu_sa1110.c optional uart saip
+dev/uart/uart_dev_sa1110.c optional uart saip
+dev/uart/uart_bus_sa1110.c optional uart saip
diff --git a/sys/arm/sa11x0/sa11x0.c b/sys/arm/sa11x0/sa11x0.c
new file mode 100644
index 0000000..9b89c8f
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0.c
@@ -0,0 +1,261 @@
+/* $NetBSD: sa11x0.c,v 1.14 2003/07/15 00:24:50 lukem Exp $ */
+
+/*-
+ * Copyright (c) 2001, The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by IWAMOTO Toshihiro and Ichiro FUKUHARA.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ */
+/*-
+ * Copyright (c) 1999
+ * Shin Takemura and PocketBSD Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the PocketBSD project
+ * and its contributors.
+ * 4. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/reboot.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+
+#include <machine/cpu.h>
+#include <machine/bus.h>
+#include <machine/intr.h>
+#include <arm/sa11x0/sa11x0_reg.h>
+#include <arm/sa11x0/sa11x0_var.h>
+#include <arm/sa11x0/sa11x0_dmacreg.h>
+#include <arm/sa11x0/sa11x0_ppcreg.h>
+#include <arm/sa11x0/sa11x0_gpioreg.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+
+extern struct intrhand *irqhandlers[];
+extern u_int levels[];
+
+static struct resource *sa1110_alloc_resource(device_t, device_t, int, int *,
+ u_long, u_long, u_long, u_int);
+
+static int sa1110_activate_resource(device_t, device_t, int, int,
+ struct resource *);
+static int sa1110_setup_intr(device_t, device_t, struct resource *, int,
+ driver_intr_t *, void *, void **);
+
+extern u_int irqmasks[];
+
+void irq_setmasks(void);
+void intr_calculatemasks(void);
+static int
+sa1110_setup_intr(device_t dev, device_t child,
+ struct resource *ires, int flags, driver_intr_t *intr, void *arg,
+ void **cookiep)
+{
+ int saved_cpsr;
+
+ if (flags & INTR_TYPE_TTY) {
+ ires->r_start = 15;
+ irqmasks[IPL_SERIAL] |= 1 << ires->r_start;
+ } else if (flags & INTR_TYPE_CLK) {
+ if (ires->r_start == 0)
+ ires->r_start = 26;
+ else
+ ires->r_start = 27;
+ irqmasks[IPL_SERIAL] |= 1 << ires->r_start;
+ }
+#if 0
+ intr_calculatemasks();
+#endif
+ saved_cpsr = SetCPSR(I32_bit, I32_bit);
+
+ set_splmasks();
+ irq_setmasks();
+ SetCPSR(I32_bit, saved_cpsr & I32_bit);
+ BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, intr, arg,
+ cookiep);
+ return (0);
+}
+
+static struct resource *
+sa1110_alloc_resource(device_t bus, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+ struct resource *res = malloc(sizeof(*res), M_DEVBUF, M_WAITOK);
+/* XXX */
+ res->r_start = *rid;
+ return (res);
+}
+static int
+sa1110_activate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ return (0);
+}
+/* prototypes */
+static int sa11x0_probe(device_t);
+static int sa11x0_attach(device_t);
+static void sa11x0_identify(driver_t *, device_t);
+
+extern vm_offset_t saipic_base;
+
+
+int
+sa11x0_probe(device_t dev)
+{
+ return 0;
+}
+
+void
+sa11x0_identify(driver_t *driver, device_t parent)
+{
+
+ BUS_ADD_CHILD(parent, 0, "saip", 0);
+}
+
+int
+sa11x0_attach(device_t dev)
+{
+ struct sa11x0_softc *sc = device_get_softc(dev);
+ int unit = device_get_unit(dev);
+ sc->sc_iot = &sa11x0_bs_tag;
+
+ /* Map the SAIP */
+
+ bzero(irqhandlers, 0x20 * sizeof(void*));
+ if (bus_space_map(sc->sc_iot, SAIPIC_BASE, SAIPIC_NPORTS,
+ 0, &sc->sc_ioh))
+ panic("saip%d: Cannot map registers", unit);
+ saipic_base = sc->sc_ioh;
+
+ /* Map the GPIO registers */
+ if (bus_space_map(sc->sc_iot, SAGPIO_BASE, SAGPIO_NPORTS,
+ 0, &sc->sc_gpioh))
+ panic("saip%d: unable to map GPIO registers", unit);
+ bus_space_write_4(sc->sc_iot, sc->sc_gpioh, SAGPIO_EDR, 0xffffffff);
+
+ /* Map the PPC registers */
+ if (bus_space_map(sc->sc_iot, SAPPC_BASE, SAPPC_NPORTS,
+ 0, &sc->sc_ppch))
+ panic("saip%d: unable to map PPC registers", unit);
+
+#if 0
+ /* Map the DMA controller registers */
+ if (bus_space_map(sc->sc_iot, SADMAC_BASE, SADMAC_NPORTS,
+ 0, &sc->sc_dmach))
+ panic("saip%d: unable to map DMAC registers", unit);
+#endif
+ /* Map the reset controller registers */
+ if (bus_space_map(sc->sc_iot, SARCR_BASE, PAGE_SIZE,
+ 0, &sc->sc_reseth))
+ panic("saip%d: unable to map reset registers", unit);
+ printf("\n");
+
+
+ /*
+ * Mask all interrupts.
+ * They are later unmasked at each device's attach routine.
+ */
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, SAIPIC_MR, 0);
+
+ /* Route all bits to IRQ */
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, SAIPIC_LR, 0);
+
+ /* Exit idle mode only when unmasked intr is received */
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, SAIPIC_CR, 1);
+#if 0
+ /* disable all DMAC channels */
+ bus_space_write_4(sc->sc_iot, sc->sc_dmach, SADMAC_DCR0_CLR, 1);
+ bus_space_write_4(sc->sc_iot, sc->sc_dmach, SADMAC_DCR1_CLR, 1);
+ bus_space_write_4(sc->sc_iot, sc->sc_dmach, SADMAC_DCR2_CLR, 1);
+ bus_space_write_4(sc->sc_iot, sc->sc_dmach, SADMAC_DCR3_CLR, 1);
+ bus_space_write_4(sc->sc_iot, sc->sc_dmach, SADMAC_DCR4_CLR, 1);
+ bus_space_write_4(sc->sc_iot, sc->sc_dmach, SADMAC_DCR5_CLR, 1);
+#endif
+ /*
+ * XXX this is probably a bad place, but intr bit shouldn't be
+ * XXX enabled before intr mask is set.
+ * XXX Having sane imask[] suffice??
+ */
+#if 0
+ SetCPSR(I32_bit, 0);
+#endif
+ /*
+ * Attach each devices
+ */
+ device_add_child(dev, "uart", 0);
+ device_add_child(dev, "saost", 0);
+ bus_generic_probe(dev);
+ bus_generic_attach(dev);
+ return (0);
+}
+
+static device_method_t saip_methods[] = {
+ DEVMETHOD(device_probe, sa11x0_probe),
+ DEVMETHOD(device_attach, sa11x0_attach),
+ DEVMETHOD(device_identify, sa11x0_identify),
+ DEVMETHOD(bus_alloc_resource, sa1110_alloc_resource),
+ DEVMETHOD(bus_activate_resource, sa1110_activate_resource),
+ DEVMETHOD(bus_setup_intr, sa1110_setup_intr),
+ {0, 0},
+};
+
+static driver_t saip_driver = {
+ "saip",
+ saip_methods,
+ sizeof(struct sa11x0_softc),
+};
+static devclass_t saip_devclass;
+
+DRIVER_MODULE(saip, nexus, saip_driver, saip_devclass, 0, 0);
diff --git a/sys/arm/sa11x0/sa11x0_dmacreg.h b/sys/arm/sa11x0/sa11x0_dmacreg.h
new file mode 100644
index 0000000..9112e21
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_dmacreg.h
@@ -0,0 +1,97 @@
+/* $NetBSD: sa11x0_dmacreg.h,v 1.1 2001/07/08 23:37:53 rjs Exp $ */
+
+/*-
+ * Copyright (c) 2001, The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by IWAMOTO Toshihiro.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/* SA11[01]0 integrated DMA controller */
+
+#define SADMAC_NPORTS 40
+
+#define SADMAC_DAR0 0x00 /* DMA device address register */
+#define SADMAC_DCR0_SET 0x04 /* DMA control/status (set) */
+#define SADMAC_DCR0_CLR 0x08 /* DMA control/status (clear) */
+#define SADMAC_DCR0 0x0C /* DMA control/status (read only) */
+#define SADMAC_DBSA0 0x10 /* DMA Buffer A start address */
+#define SADMAC_DBTA0 0x14 /* DMA Buffer A transfer count */
+#define SADMAC_DBSB0 0x18 /* DMA Buffer B start address */
+#define SADMAC_DBTB0 0x1C /* DMA Buffer B transfer count */
+
+#define SADMAC_DAR1 0x20
+#define SADMAC_DCR1_SET 0x24
+#define SADMAC_DCR1_CLR 0x28
+#define SADMAC_DCR1 0x2C
+#define SADMAC_DBSA1 0x30
+#define SADMAC_DBTA1 0x34
+#define SADMAC_DBSB1 0x38
+#define SADMAC_DBTB1 0x3C
+
+#define SADMAC_DAR2 0x40
+#define SADMAC_DCR2_SET 0x44
+#define SADMAC_DCR2_CLR 0x48
+#define SADMAC_DCR2 0x4C
+#define SADMAC_DBSA2 0x50
+#define SADMAC_DBTA2 0x54
+#define SADMAC_DBSB2 0x58
+#define SADMAC_DBTB2 0x5C
+
+#define SADMAC_DAR3 0x60
+#define SADMAC_DCR3_SET 0x64
+#define SADMAC_DCR3_CLR 0x68
+#define SADMAC_DCR3 0x6C
+#define SADMAC_DBSA3 0x70
+#define SADMAC_DBTA3 0x74
+#define SADMAC_DBSB3 0x78
+#define SADMAC_DBTB3 0x7C
+
+#define SADMAC_DAR4 0x80
+#define SADMAC_DCR4_SET 0x84
+#define SADMAC_DCR4_CLR 0x88
+#define SADMAC_DCR4 0x8C
+#define SADMAC_DBSA4 0x90
+#define SADMAC_DBTA4 0x94
+#define SADMAC_DBSB4 0x98
+#define SADMAC_DBTB4 0x9C
+
+#define SADMAC_DAR5 0xA0
+#define SADMAC_DCR5_SET 0xA4
+#define SADMAC_DCR5_CLR 0xA8
+#define SADMAC_DCR5 0xAC
+#define SADMAC_DBSA5 0xB0
+#define SADMAC_DBTA5 0xB4
+#define SADMAC_DBSB5 0xB8
+#define SADMAC_DBTB5 0xBC
diff --git a/sys/arm/sa11x0/sa11x0_gpioreg.h b/sys/arm/sa11x0/sa11x0_gpioreg.h
new file mode 100644
index 0000000..9f46e9d
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_gpioreg.h
@@ -0,0 +1,100 @@
+/* $NetBSD: sa11x0_gpioreg.h,v 1.2 2001/07/30 15:58:56 rjs Exp $ */
+
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Ichiro FUKUHARA (ichiro@ichiro.org).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * SA-11x0 GPIO Register
+ */
+
+#define SAGPIO_NPORTS 8
+
+/* GPIO pin-level register */
+#define SAGPIO_PLR 0x00
+
+/* GPIO pin direction register */
+#define SAGPIO_PDR 0x04
+
+/* GPIO pin output set register */
+#define SAGPIO_PSR 0x08
+
+/* GPIO pin output clear register */
+#define SAGPIO_PCR 0x0C
+
+/* GPIO rising-edge detect register */
+#define SAGPIO_RER 0x10
+
+/* GPIO falling-edge detect register */
+#define SAGPIO_FER 0x14
+
+/* GPIO edge-detect status register */
+#define SAGPIO_EDR 0x18
+
+/* GPIO alternate function register */
+#define SAGPIO_AFR 0x1C
+
+/* XXX */
+#define GPIO(x) (0x00000001 << (x))
+
+/*
+ * SA-11x0 GPIOs parameter
+ */
+/*
+port name desc
+0 Reserved
+1 Reserved
+2...9 LDD{8..15} LCD DATA(8-15)
+10 SSP_TXD SSP transmit
+11 SSP_RXD SSP receive
+12 SSP_SCLK SSP serial clock
+13 SSP_SFRM SSP frameclock
+14 UART_TXD UART transmit
+15 UART_RXD UART receive
+16 GPCLK_OUT General-purpose clock out
+17 Reserved
+18 UART_SCLK Sample clock input
+19 SSP_CLK Sample clock input
+20 UART_SCLK3 Sample clock input
+21 MCP_CLK MCP dock in
+22 TREQA Either TIC request A
+23 TREQB Either TIC request B
+24 Reserved
+25 RTC Real Time Clock
+26 RCLK_OUT internal clock /2
+27 32KHZ_OUT Raw 32.768kHz osc output
+ */
diff --git a/sys/arm/sa11x0/sa11x0_io.c b/sys/arm/sa11x0/sa11x0_io.c
new file mode 100644
index 0000000..7b91d5f
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_io.c
@@ -0,0 +1,252 @@
+/* $NetBSD: sa11x0_io.c,v 1.12 2003/07/15 00:24:51 lukem Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Ichiro FUKUHARA.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * bus_space I/O functions for sa11x0
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+
+#include <machine/bus.h>
+#include <machine/pmap.h>
+
+/* Proto types for all the bus_space structure functions */
+
+bs_protos(sa11x0);
+
+/* Declare the sa11x0 bus space tag */
+
+struct bus_space sa11x0_bs_tag = {
+ /* cookie */
+ NULL,
+
+ /* mapping/unmapping */
+ sa11x0_bs_map,
+ sa11x0_bs_unmap,
+ sa11x0_bs_subregion,
+
+ /* allocation/deallocation */
+ sa11x0_bs_alloc,
+ sa11x0_bs_free,
+
+ /* get kernel virtual address */
+ sa11x0_bs_vaddr,
+
+ /* mmap bus space for userland */
+ sa11x0_bs_mmap,
+
+ /* barrier */
+ sa11x0_bs_barrier,
+
+ /* read (single) */
+ sa11x0_bs_r_1,
+ sa11x0_bs_r_2,
+ sa11x0_bs_r_4,
+ NULL,
+
+ /* read multiple */
+ sa11x0_bs_rm_1,
+ sa11x0_bs_rm_2,
+ sa11x0_bs_rm_4,
+ NULL,
+
+ /* read region */
+ NULL,
+ sa11x0_bs_rr_2,
+ NULL,
+ NULL,
+ /* write (single) */
+ sa11x0_bs_w_1,
+ sa11x0_bs_w_2,
+ sa11x0_bs_w_4,
+ NULL,
+
+ /* write multiple */
+ sa11x0_bs_wm_1,
+ sa11x0_bs_wm_2,
+ sa11x0_bs_wm_4,
+ NULL,
+
+ /* write region */
+ NULL,
+ sa11x0_bs_wr_2,
+ NULL,
+ NULL,
+
+ /* set multiple */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+
+ /* set region */
+ NULL,
+ sa11x0_bs_sr_2,
+ NULL,
+ NULL,
+
+ /* copy */
+ NULL,
+ sa11x0_bs_c_2,
+ NULL,
+ NULL,
+};
+
+/* bus space functions */
+
+int
+sa11x0_bs_map(t, bpa, size, cacheable, bshp)
+ void *t;
+ bus_addr_t bpa;
+ bus_size_t size;
+ int cacheable;
+ bus_space_handle_t *bshp;
+{
+ u_long startpa, endpa, pa;
+ vm_offset_t va;
+ pt_entry_t *pte;
+
+ startpa = trunc_page(bpa);
+ endpa = round_page(bpa + size);
+
+ /* XXX use extent manager to check duplicate mapping */
+
+ va = kmem_alloc(kernel_map, endpa - startpa);
+ if (! va)
+ return(ENOMEM);
+
+ *bshp = (bus_space_handle_t)(va + (bpa - startpa));
+
+ for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
+ pmap_kenter(va, pa);
+ pte = vtopte(va);
+ if (cacheable == 0) {
+ *pte &= ~L2_S_CACHE_MASK;
+ PTE_SYNC(pte);
+ }
+ }
+ pmap_update(pmap_kernel());
+ return(0);
+}
+
+int
+sa11x0_bs_alloc(t, rstart, rend, size, alignment, boundary, cacheable,
+ bpap, bshp)
+ void *t;
+ bus_addr_t rstart, rend;
+ bus_size_t size, alignment, boundary;
+ int cacheable;
+ bus_addr_t *bpap;
+ bus_space_handle_t *bshp;
+{
+ panic("sa11x0_alloc(): Help!");
+}
+
+
+void
+sa11x0_bs_unmap(t, size)
+ void *t;
+ bus_size_t size;
+{
+ /*
+ * Temporary implementation
+ */
+}
+
+void
+sa11x0_bs_free(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+
+ panic("sa11x0_free(): Help!");
+ /* sa11x0_unmap() does all that we need to do. */
+/* sa11x0_unmap(t, bsh, size);*/
+}
+
+int
+sa11x0_bs_subregion(t, bsh, offset, size, nbshp)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, size;
+ bus_space_handle_t *nbshp;
+{
+
+ *nbshp = bsh + offset;
+ return (0);
+}
+
+int
+sa11x0_bs_mmap(dev_t t, vm_offset_t offset, vm_paddr_t *paddr, int nprot)
+{
+ *paddr = offset;
+ return (0);
+}
+
+void *
+sa11x0_bs_vaddr(t, bsh)
+ void *t;
+ bus_space_handle_t bsh;
+{
+ return ((void *)bsh);
+}
+
+void
+sa11x0_bs_barrier(t, bsh, offset, len, flags)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, len;
+ int flags;
+{
+/* NULL */
+}
+
+/* End of sa11x0_io.c */
diff --git a/sys/arm/sa11x0/sa11x0_io_asm.S b/sys/arm/sa11x0/sa11x0_io_asm.S
new file mode 100644
index 0000000..cb15cd0
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_io_asm.S
@@ -0,0 +1,290 @@
+/* $NetBSD: sa11x0_io_asm.S,v 1.1 2001/07/08 23:37:53 rjs Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * bus_space I/O functions for sa11x0
+ */
+
+/*
+ * read single
+ */
+
+ENTRY(sa11x0_bs_r_1)
+ ldrb r0, [r1, r2]
+ mov pc, lr
+
+ENTRY(sa11x0_bs_r_2)
+ ldrh r0, [r1, r2]
+ mov pc, lr
+
+ENTRY(sa11x0_bs_r_4)
+ ldr r0, [r1, r2]
+ mov pc, lr
+
+/*
+ * write single
+ */
+
+ENTRY(sa11x0_bs_w_1)
+ strb r3, [r1, r2]
+ mov pc, lr
+
+ENTRY(sa11x0_bs_w_2)
+ strh r3, [r1, r2]
+ mov pc, lr
+
+ENTRY(sa11x0_bs_w_4)
+ str r3, [r1, r2]
+ mov pc, lr
+
+
+/*
+ * read multiple
+ */
+
+ENTRY(sa11x0_bs_rm_1)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+sa11x0_bs_rm_1_loop:
+ ldrb r1, [r0]
+ subs r2, r2, #0x00000001
+ strb r1, [r3], #0x0001
+ bgt sa11x0_bs_rm_1_loop
+
+ mov pc, lr
+
+ENTRY(sa11x0_bs_rm_2)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+ tst r2, #0x00000007
+ tsteq r3, #0x00000003
+ beq sa11x0_bs_rm_2_fast
+
+sa11x0_bs_rm_2_loop:
+ ldrh r1, [r0]
+ subs r2, r2, #0x00000001
+ strh r1, [r3], #0x0002
+ bgt sa11x0_bs_rm_2_loop
+
+ mov pc, lr
+
+sa11x0_bs_rm_2_fast:
+ stmfd sp!, {r4, r5, lr}
+
+sa11x0_bs_rm_2_fastloop:
+ ldrh r1, [r0]
+ ldrh lr, [r0]
+ orr r1, r1, lr, lsl #16
+
+ ldrh r4, [r0]
+ ldrh lr, [r0]
+ orr r4, r4, lr, lsl #16
+
+ ldrh r5, [r0]
+ ldrh lr, [r0]
+ orr r5, r5, lr, lsl #16
+
+ ldrh ip, [r0]
+ ldrh lr, [r0]
+ orr ip, ip, lr, lsl #16
+
+ stmia r3!, {r1, r4, r5, ip}
+ subs r2, r2, #8
+ bgt sa11x0_bs_rm_2_fastloop
+
+ ldmfd sp!, {r4, r5, pc}
+
+
+ENTRY(sa11x0_bs_rm_4)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+sa11x0_bs_rm_4_loop:
+ ldr r1, [r0]
+ subs r2, r2, #0x00000001
+ str r1, [r3], #0x0004
+ bgt sa11x0_bs_rm_4_loop
+
+ mov pc, lr
+
+/*
+ * write multiple
+ */
+
+ENTRY(sa11x0_bs_wm_1)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+sa11x0_wm_1_loop:
+ ldrb r1, [r3], #0x0001
+ subs r2, r2, #0x00000001
+ strb r1, [r0]
+ bgt sa11x0_wm_1_loop
+
+ mov pc, lr
+
+ENTRY(sa11x0_bs_wm_2)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+sa11x0_bs_wm_2_loop:
+ ldrh r1, [r3], #0x0002
+ subs r2, r2, #0x00000001
+ strh r1, [r0]
+ bgt sa11x0_bs_wm_2_loop
+
+ mov pc, lr
+
+ENTRY(sa11x0_bs_wm_4)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+sa11x0_bs_wm_4_loop:
+ ldr r1, [r3], #0x0004
+ subs r2, r2, #0x00000001
+ str r1, [r0]
+ bgt sa11x0_bs_wm_4_loop
+
+ mov pc, lr
+
+/*
+ * read region
+ */
+
+ENTRY(sa11x0_bs_rr_2)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+sa11x0_bs_rr_2_loop:
+ ldrh r1, [r0], #0x0002
+ strh r1, [r3], #0x0002
+ subs r2, r2, #0x00000001
+ bgt sa11x0_bs_rr_2_loop
+
+ mov pc, lr
+
+/*
+ * write region
+ */
+
+ENTRY(sa11x0_bs_wr_2)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+sa11x0_bs_wr_2_loop:
+ ldrh r1, [r3], #0x0002
+ strh r1, [r0], #0x0002
+ subs r2, r2, #0x00000001
+ bgt sa11x0_bs_wr_2_loop
+
+ mov pc, lr
+
+/*
+ * set regiuon
+ */
+
+ENTRY(sa11x0_bs_sr_2)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+sa11x0_bs_sr_2_loop:
+ strh r3, [r0], #0x0002
+ subs r2, r2, #0x00000001
+ bgt sa11x0_bs_sr_2_loop
+
+ mov pc, lr
+
+/*
+ * copy region
+ */
+
+ENTRY(sa11x0_bs_c_2)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ add r1, r2, r3
+ ldr r2, [sp, #4]
+ cmp r2, #0x00000000
+ movle pc, lr
+
+ cmp r0, r1
+ blt sa11x0_bs_c_2_backwards
+
+sa11x0_bs_cf_2_loop:
+ ldrh r3, [r0], #0x0002
+ strh r3, [r1], #0x0002
+ subs r2, r2, #0x00000001
+ bgt sa11x0_bs_cf_2_loop
+
+ mov pc, lr
+
+sa11x0_bs_c_2_backwards:
+ add r0, r0, r2, lsl #1
+ add r1, r1, r2, lsl #1
+ sub r0, r0, #2
+ sub r1, r1, #2
+
+sa11x0_bs_cb_2_loop:
+ ldrh r3, [r0], #-2
+ strh r3, [r1], #-2
+ subs r2, r2, #1
+ bne sa11x0_bs_cb_2_loop
+
+ mov pc, lr
+
+/* end of sa11x0_io_asm.S */
diff --git a/sys/arm/sa11x0/sa11x0_irq.S b/sys/arm/sa11x0/sa11x0_irq.S
new file mode 100644
index 0000000..cabeb40
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_irq.S
@@ -0,0 +1,223 @@
+/* $NetBSD: sa11x0_irq.S,v 1.5 2003/03/31 19:52:35 chris Exp $ */
+
+/*
+ * Copyright (c) 1998 Mark Brinicombe.
+ * Copyright (c) 1998 Causality Limited
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to the NetBSD Foundation
+ * by IWAMOTO Toshihiro.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "assym.s"
+#include <machine/asm.h>
+#include <machine/armreg.h>
+#include <machine/asmacros.h>
+#include <arm/sa11x0/sa11x0_reg.h>
+__FBSDID("$FreeBSD$");
+Lcurrent_spl_level:
+ .word _C_LABEL(current_spl_level)
+Lcurrent_intr_depth:
+ .word _C_LABEL(current_intr_depth)
+
+Lspl_masks:
+ .word _C_LABEL(spl_masks)
+ .globl _C_LABEL(saipic_base)
+_C_LABEL(saipic_base):
+ .word 0x00000000
+
+#ifdef INTR_DEBUG
+Ldbg_str:
+ .asciz "irq_entry %x %x\n"
+#endif
+
+AST_ALIGNMENT_FAULT_LOCALS
+/*
+ * Regsister usage
+ *
+ * r6 - Address of current handler
+ * r7 - Pointer to handler pointer list
+ * r8 - Current IRQ requests.
+ * r9 - Used to count through possible IRQ bits.
+ * r10 - Base address of SAIP
+ */
+
+#define _SPL_LEVELS 13 /* XXX */
+ASENTRY_NP(irq_entry)
+ sub lr, lr, #0x00000004 /* Adjust the lr */
+
+ PUSHFRAMEINSVC /* Push an interrupt frame */
+ ENABLE_ALIGNMENT_FAULTS
+
+ /* Load r8 with the SAIPIC interrupt requests */
+
+ ldr r10, _C_LABEL(saipic_base)
+ ldr r8, [r10, #(SAIPIC_IP)] /* Load IRQ pending register */
+
+#ifdef INTR_DEBUG
+ ldr r2, [r10, #(SAIPIC_MR)]
+ adr r0, Ldbg_str
+ mov r1, r8
+ bl _C_LABEL(printf)
+#endif
+ /*
+ * Note that we have entered the IRQ handler.
+ * We are in SVC mode so we cannot use the processor mode
+ * to determine if we are in an IRQ. Instead we will count the
+ * each time the interrupt handler is nested.
+ */
+
+ ldr r0, Lcurrent_intr_depth
+ ldr r1, [r0]
+ add r1, r1, #1
+ str r1, [r0]
+
+ /*
+ * Need to block all interrupts at the IPL or lower for
+ * all asserted interrupts.
+ * This basically emulates hardware interrupt priority levels.
+ * Means we need to go through the interrupt mask and for
+ * every asserted interrupt we need to mask out all other
+ * interrupts at the same or lower IPL.
+ * If only we could wait until the main loop but we need to sort
+ * this out first so interrupts can be re-enabled.
+ *
+ * This would benefit from a special ffs type routine
+ */
+
+ mov r9, #(_SPL_LEVELS - 1)
+ ldr r7, Lspl_masks
+Lfind_highest_ipl:
+ ldr r2, [r7, r9, lsl #2]
+ tst r8, r2
+ subeq r9, r9, #1
+ beq Lfind_highest_ipl
+
+ /* r9 = SPL level of highest priority interrupt */
+ add r9, r9, #1
+ ldr r2, [r7, r9, lsl #2]
+ mvn r2, r2
+
+ ldr r0, Lcurrent_spl_level
+ ldr r1, [r0]
+ str r9, [r0]
+ stmfd sp!, {r1}
+
+ /* Update the SAIP irq masks */
+ bl _C_LABEL(irq_setmasks)
+#ifdef INTR_DEBUG
+ stmfd sp!, {r0,r1,r2}
+ adr r0, Ldbg_str
+ mov r2, r9
+ bl _C_LABEL(printf)
+ ldmia sp!, {r0,r1,r2}
+#endif
+ mrs r0, cpsr_all /* Enable IRQ's */
+ bic r0, r0, #I32_bit
+ msr cpsr_all, r0
+ mov r0, r8
+ bl _C_LABEL(arm_handler_execute)
+
+ ldmfd sp!, {r2}
+ ldr r1, Lcurrent_spl_level
+ str r2, [r1]
+ /* Restore previous disabled mask */
+ bl _C_LABEL(irq_setmasks)
+ bl _C_LABEL(dosoftints) /* Handle the soft interrupts */
+
+ /* Kill IRQ's in preparation for exit */
+ mrs r0, cpsr_all
+ orr r0, r0, #(I32_bit)
+ msr cpsr_all, r0
+
+#ifdef INTR_DEBUG
+ adr r0, Ldbg_str
+ mov r1, #3
+ ldr r2, [r10, #(SAIPIC_MR)]
+ bl _C_LABEL(printf)
+#endif
+
+ /* Decrement the nest count */
+ ldr r0, Lcurrent_intr_depth
+ ldr r1, [r0]
+ sub r1, r1, #1
+ str r1, [r0]
+
+ DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ PULLFRAMEFROMSVCANDEXIT
+
+ /* NOT REACHED */
+ b . - 8
+ENTRY(irq_setmasks)
+ /* Disable interrupts */
+ mrs r3, cpsr_all
+ orr r1, r3, #(I32_bit)
+ msr cpsr_all, r1
+
+ /* Calculate interrupt mask */
+ ldr r0, Lspl_masks
+ ldr r2, Lcurrent_spl_level
+ ldr r2, [r2]
+ ldr r2, [r0, r2, lsl #2]
+ ldr r0, _C_LABEL(saipic_base)
+ str r2, [r0, #(SAIPIC_MR)] /* Set mask register */
+
+ /* Restore old cpsr and exit */
+ /* msr cpsr_all, r3 XXX: not now.*/
+ mov pc, lr
+Lcnt:
+ .word _C_LABEL(cnt)
+
+#ifdef IRQSTATS
+Lintrcnt:
+ .word _C_LABEL(intrcnt)
+#endif
+
+Lirqhandlers:
+ .word _C_LABEL(irqhandlers) /* Pointer to array of irqhandlers */
+
+
+
+
+ .global _C_LABEL(intrnames), _C_LABEL(eintrnames)
+ .global _C_LABEL(eintrcnt)
+_C_LABEL(intrnames):
+_C_LABEL(eintrnames):
+_C_LABEL(eintrcnt):
+
+ .globl _C_LABEL(intrcnt), _C_LABEL(sintrcnt)
+
+_C_LABEL(intrcnt):
+ .space ICU_LEN*4 /* XXX Should be linked to number of interrupts */
+
+_C_LABEL(sintrcnt):
+ .space 32*4
diff --git a/sys/arm/sa11x0/sa11x0_irqhandler.c b/sys/arm/sa11x0/sa11x0_irqhandler.c
new file mode 100644
index 0000000..5b7e430
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_irqhandler.c
@@ -0,0 +1,233 @@
+/* $NetBSD: sa11x0_irqhandler.c,v 1.5 2003/08/07 16:26:54 agc Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to the NetBSD Foundation
+ * by IWAMOTO Toshihiro.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
+ * Simulation Facility, NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)isa.c 7.2 (Berkeley) 5/13/91
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/syslog.h>
+#include <sys/malloc.h>
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+
+#include <arm/sa11x0/sa11x0_reg.h>
+#include <arm/sa11x0/sa11x0_var.h>
+
+#include <machine/cpu.h>
+
+#define NIRQS 0x20
+struct intrhand *irqhandlers[NIRQS];
+
+int current_intr_depth;
+u_int actual_mask;
+#define IPL_LEVELS 13
+#ifdef hpcarm
+#define IPL_LEVELS (NIPL+1)
+u_int imask[NIPL];
+#else
+u_int spl_mask;
+u_int irqmasks[IPL_LEVELS];
+#endif
+u_int irqblock[NIRQS];
+u_int levels[IPL_LEVELS];
+
+
+extern void set_spl_masks(void);
+#if 0
+static int fakeintr(void *);
+#endif
+#ifdef DEBUG
+static int dumpirqhandlers(void);
+#endif
+
+/* Recalculate the interrupt masks from scratch.
+ * We could code special registry and deregistry versions of this function that
+ * would be faster, but the code would be nastier, and we don't expect this to
+ * happen very much anyway.
+ */
+void intr_calculatemasks(void);
+void
+intr_calculatemasks(void)
+{
+ int irq;
+ int intrlevel[ICU_LEN];
+ int level;
+
+ /* First, figure out which levels each IRQ uses. */
+ for (irq = 0; irq < ICU_LEN; irq++) {
+ intrlevel[irq] = levels[irq];
+ }
+ /* Then figure out which IRQs use each level. */
+#ifdef hpcarm
+ for (level = 0; level < NIPL; level++) {
+#else
+ for (level = 0; level <= IPL_LEVELS; level++) {
+#endif
+ int irqs = 0;
+ for (irq = 0; irq < ICU_LEN; irq++) {
+ if (intrlevel[irq] & (1 << level)) {
+ irqs |= 1 << irq;
+ }
+ }
+#ifdef hpcarm
+
+ imask[level] = irqs;
+#else
+ irqmasks[level] = irqs;
+ printf("level %d set to %x\n", level, irqs);
+#endif
+ }
+ /*
+ * Enforce a hierarchy that gives slow devices a better chance at not
+ * dropping data.
+ */
+#ifdef hpcarm
+ for (level = NIPL - 1; level > 0; level--)
+ imask[level - 1] |= imask[level];
+#else
+ for (level = IPL_LEVELS; level > 0; level--)
+ irqmasks[level - 1] |= irqmasks[level];
+#endif
+ /*
+ * Calculate irqblock[], which emulates hardware interrupt levels.
+ */
+#if 0
+ for (irq = 0; irq < ICU_LEN; irq++) {
+ int irqs = 1 << irq;
+ for (q = irqhandlers[irq]; q; q = q->ih_next)
+#ifdef hpcarm
+ irqs |= ~imask[q->ih_level];
+#else
+ irqs |= ~irqmasks[q->ih_level];
+#endif
+ irqblock[irq] = irqs;
+ }
+#endif
+}
+
+const struct evcnt *sa11x0_intr_evcnt(sa11x0_chipset_tag_t, int);
+void stray_irqhandler(void *);
+
+
+const struct evcnt *
+sa11x0_intr_evcnt(sa11x0_chipset_tag_t ic, int irq)
+{
+
+ /* XXX for now, no evcnt parent reported */
+ return NULL;
+}
+
+
+void
+stray_irqhandler(void *p)
+{
+
+ printf("stray interrupt %p\n", p);
+ printf("bla\n");
+}
+
+#if 0
+int
+fakeintr(void *p)
+{
+
+ return 0;
+}
+#endif
+#ifdef DEBUG
+int
+dumpirqhandlers()
+{
+ int irq;
+ struct irqhandler *p;
+
+ for (irq = 0; irq < ICU_LEN; irq++) {
+ printf("irq %d:", irq);
+ p = irqhandlers[irq];
+ for (; p; p = p->ih_next)
+ printf("ih_func: 0x%lx, ", (unsigned long)p->ih_func);
+ printf("\n");
+ }
+ return 0;
+}
+#endif
+/* End of irqhandler.c */
diff --git a/sys/arm/sa11x0/sa11x0_ost.c b/sys/arm/sa11x0/sa11x0_ost.c
new file mode 100644
index 0000000..152d114
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_ost.c
@@ -0,0 +1,355 @@
+/* $NetBSD: sa11x0_ost.c,v 1.11 2003/07/15 00:24:51 lukem Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by IWAMOTO Toshihiro and Ichiro FUKUHARA.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/time.h>
+#include <sys/bus.h>
+#include <sys/clock.h>
+
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <machine/intr.h>
+
+#include <machine/cpufunc.h>
+
+#include <machine/katelib.h>
+
+#include <arm/sa11x0/sa11x0_reg.h>
+#include <arm/sa11x0/sa11x0_var.h>
+#include <arm/sa11x0/sa11x0_ostreg.h>
+
+static int saost_probe(device_t);
+static int saost_attach(device_t);
+
+int gettick(void);
+static void clockintr(void *);
+#if 0
+static void statintr(void *);
+#endif
+void rtcinit(void);
+
+#if 0
+static struct mtx clock_lock;
+#endif
+
+struct saost_softc {
+ device_t sc_dev;
+ bus_addr_t sc_baseaddr;
+ bus_space_tag_t sc_iot;
+ bus_space_handle_t sc_ioh;
+
+ u_int32_t sc_clock_count;
+ u_int32_t sc_statclock_count;
+ u_int32_t sc_statclock_step;
+};
+
+static struct saost_softc *saost_sc = NULL;
+
+#define TIMER_FREQUENCY 3686400 /* 3.6864MHz */
+#define TICKS_PER_MICROSECOND (TIMER_FREQUENCY/1000000)
+
+#ifndef STATHZ
+#define STATHZ 64
+#endif
+
+static device_method_t saost_methods[] = {
+ DEVMETHOD(device_probe, saost_probe),
+ DEVMETHOD(device_attach, saost_attach),
+ {0, 0},
+};
+
+static driver_t saost_driver = {
+ "saost",
+ saost_methods,
+ sizeof(struct saost_softc),
+};
+static devclass_t saost_devclass;
+
+DRIVER_MODULE(saost, saip, saost_driver, saost_devclass, 0, 0);
+static int
+saost_probe(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+saost_attach(device_t dev)
+{
+ struct saost_softc *sc = device_get_softc(dev);
+ struct sa11x0_softc *sa = device_get_softc(device_get_parent(dev));
+
+ sc->sc_dev = dev;
+ sc->sc_iot = sa->sc_iot;
+ sc->sc_baseaddr = 0x90000000;
+
+ saost_sc = sc;
+
+ if(bus_space_map(sa->sc_iot, sc->sc_baseaddr, 8, 0,
+ &sc->sc_ioh))
+ panic("%s: Cannot map registers", device_get_name(dev));
+
+ /* disable all channel and clear interrupt status */
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_IR, 0);
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_SR, 0xf);
+ return (0);
+
+}
+
+static void
+clockintr(arg)
+ void *arg;
+{
+ struct clockframe *frame = arg;
+ u_int32_t oscr, nextmatch, oldmatch;
+ int s;
+
+#if 0
+ mtx_lock_spin(&clock_lock);
+#endif
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh,
+ SAOST_SR, 1);
+
+ /* schedule next clock intr */
+ oldmatch = saost_sc->sc_clock_count;
+ nextmatch = oldmatch + TIMER_FREQUENCY / hz;
+
+ oscr = bus_space_read_4(saost_sc->sc_iot, saost_sc->sc_ioh,
+ SAOST_CR);
+
+ if ((nextmatch > oldmatch &&
+ (oscr > nextmatch || oscr < oldmatch)) ||
+ (nextmatch < oldmatch && oscr > nextmatch && oscr < oldmatch)) {
+ /*
+ * we couldn't set the matching register in time.
+ * just set it to some value so that next interrupt happens.
+ * XXX is it possible to compansate lost interrupts?
+ */
+
+ s = splhigh();
+ oscr = bus_space_read_4(saost_sc->sc_iot, saost_sc->sc_ioh,
+ SAOST_CR);
+ nextmatch = oscr + 10;
+ splx(s);
+ }
+ saost_sc->sc_clock_count = nextmatch;
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_MR0,
+ nextmatch);
+ hardclock(frame);
+#if 0
+ mtx_unlock_spin(&clock_lock);
+#endif
+}
+
+#if 0
+static void
+statintr(arg)
+ void *arg;
+{
+ struct clockframe *frame = arg;
+ u_int32_t oscr, nextmatch, oldmatch;
+ int s;
+
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh,
+ SAOST_SR, 2);
+
+ /* schedule next clock intr */
+ oldmatch = saost_sc->sc_statclock_count;
+ nextmatch = oldmatch + saost_sc->sc_statclock_step;
+
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_MR1,
+ nextmatch);
+ oscr = bus_space_read_4(saost_sc->sc_iot, saost_sc->sc_ioh,
+ SAOST_CR);
+
+ if ((nextmatch > oldmatch &&
+ (oscr > nextmatch || oscr < oldmatch)) ||
+ (nextmatch < oldmatch && oscr > nextmatch && oscr < oldmatch)) {
+ /*
+ * we couldn't set the matching register in time.
+ * just set it to some value so that next interrupt happens.
+ * XXX is it possible to compansate lost interrupts?
+ */
+
+ s = splhigh();
+ oscr = bus_space_read_4(saost_sc->sc_iot, saost_sc->sc_ioh,
+ SAOST_CR);
+ nextmatch = oscr + 10;
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh,
+ SAOST_MR1, nextmatch);
+ splx(s);
+ }
+
+ saost_sc->sc_statclock_count = nextmatch;
+ statclock(frame);
+
+}
+#endif
+
+#if 0
+void
+setstatclockrate(int hz)
+{
+ u_int32_t count;
+
+ saost_sc->sc_statclock_step = TIMER_FREQUENCY / hz;
+ count = bus_space_read_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_CR);
+ count += saost_sc->sc_statclock_step;
+ saost_sc->sc_statclock_count = count;
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh,
+ SAOST_MR1, count);
+}
+#endif
+void
+cpu_initclocks()
+{
+ device_t dev = saost_sc->sc_dev;
+
+ stathz = STATHZ;
+ profhz = stathz;
+#if 0
+ mtx_init(&clock_lock, "SA1110 Clock locké", NULL, MTX_SPIN);
+#endif
+ saost_sc->sc_statclock_step = TIMER_FREQUENCY / stathz;
+ struct resource *irq1, *irq2;
+ int rid = 0;
+ void *ih1/*, *ih2 */;
+
+ printf("clock: hz=%d stathz = %d\n", hz, stathz);
+
+ /* Use the channels 0 and 1 for hardclock and statclock, respectively */
+ saost_sc->sc_clock_count = TIMER_FREQUENCY / hz;
+ saost_sc->sc_statclock_count = TIMER_FREQUENCY / stathz;
+
+ irq1 = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0,
+ ~0, 1, RF_ACTIVE);
+ rid = 1;
+ irq2 = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
+ RF_ACTIVE);
+ bus_setup_intr(dev, irq1, INTR_TYPE_CLK | INTR_FAST, clockintr, NULL,
+ &ih1);
+#if 0
+ bus_setup_intr(dev, irq2, INTR_TYPE_CLK | INTR_FAST, statintr, NULL
+ ,&ih2);
+#endif
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_SR, 0xf);
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_IR, 3);
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_MR0,
+ saost_sc->sc_clock_count);
+#if 0
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_MR1,
+ 0);
+#endif
+ /* Zero the counter value */
+ bus_space_write_4(saost_sc->sc_iot, saost_sc->sc_ioh, SAOST_CR, 0);
+}
+
+int
+gettick()
+{
+ int counter;
+ u_int savedints;
+ savedints = disable_interrupts(I32_bit);
+
+ counter = bus_space_read_4(saost_sc->sc_iot, saost_sc->sc_ioh,
+ SAOST_CR);
+
+ restore_interrupts(savedints);
+ return counter;
+}
+
+void
+DELAY(usecs)
+ int usecs;
+{
+ u_int32_t tick, otick, delta;
+ int j, csec, usec;
+
+ csec = usecs / 10000;
+ usec = usecs % 10000;
+
+ usecs = (TIMER_FREQUENCY / 100) * csec
+ + (TIMER_FREQUENCY / 100) * usec / 10000;
+
+ if (! saost_sc) {
+ /* clock isn't initialized yet */
+ for(; usecs > 0; usecs--)
+ for(j = 100; j > 0; j--)
+ ;
+ return;
+ }
+
+#if 0
+ mtx_lock_spin(&clock_lock);
+#endif
+ otick = gettick();
+
+ while (1) {
+ for(j = 100; j > 0; j--)
+ ;
+ tick = gettick();
+ delta = tick - otick;
+ if (delta > usecs) {
+ break;
+ }
+ usecs -= delta;
+ otick = tick;
+ }
+#if 0
+ mtx_unlock_spin(&clock_lock);
+#endif
+}
+
+void
+cpu_startprofclock(void)
+{
+ printf("STARTPROFCLOCK\n");
+}
+
+void
+cpu_stopprofclock(void)
+{
+}
diff --git a/sys/arm/sa11x0/sa11x0_ostreg.h b/sys/arm/sa11x0/sa11x0_ostreg.h
new file mode 100644
index 0000000..c03aea1
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_ostreg.h
@@ -0,0 +1,83 @@
+/* $NetBSD: sa11x0_ostreg.h,v 1.1 2001/07/08 23:37:53 rjs Exp $ */
+
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Ichiro FUKUHARA.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * SA-11x0 OS Timer Register
+ */
+
+/* OS Timer Match Register */
+#define SAOST_MR0 0x00
+#define SAOST_MR1 0x04
+#define SAOST_MR2 0x08
+#define SAOST_MR3 0x0C
+
+/* OS Timer Count Register */
+#define SAOST_CR 0x10
+
+/* OS Timer Status Register */
+#define SAOST_SR 0x14
+#define SR_CH0 (1<<0)
+#define SR_CH1 (1<<1)
+#define SR_CH2 (1<<2)
+#define SR_CH3 (1<<3)
+
+/* OS Timer Watchdog Match Enable Register */
+#define SAOST_WR 0x18
+
+/* OS Timer Interrupt Enable Register */
+#define SAOST_IR 0x1C
+
+/*
+ * SA-1110 Real Time Clock
+ */
+
+/* RTC Alarm Register */
+#define SARTC_AR 0x00
+
+/* RTC Counter Register */
+#define SARTC_CR 0x04
+
+/* RTC Trim Register */
+#define SARTC_TR 0x08
+
+/* RTC Status Register */
+#define SARTC_SR 0x0C
+
+/* end of sa11x0_ostreg.h */
diff --git a/sys/arm/sa11x0/sa11x0_ppcreg.h b/sys/arm/sa11x0/sa11x0_ppcreg.h
new file mode 100644
index 0000000..52c129a
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_ppcreg.h
@@ -0,0 +1,67 @@
+/* $NetBSD: sa11x0_ppcreg.h,v 1.2 2001/07/30 12:19:04 rjs Exp $ */
+
+/*-
+ * Copyright (c) 2001, The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by IWAMOTO Toshihiro.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/* SA11[01]0 PPC (peripheral pin controller) */
+
+/* size of I/O space */
+#define SAPPC_NPORTS 13
+
+#define SAPPC_PDR 0x00 /* pin direction register */
+
+#define SAPPC_PSR 0x04 /* pin state register */
+
+#define SAPPC_PAR 0x08 /* pin assignment register */
+#define PAR_UPR 0x01000 /* UART pin assignment */
+#define PAR_SPR 0x40000 /* SSP pin assignment */
+
+#define SAPPC_SDR 0x0C /* sleep mode direction register */
+
+#define SAPPC_PFR 0x10 /* pin flag register */
+#define PFR_LCD 0x00001 /* LCD controller flag */
+#define PFR_SP1TX 0x01000 /* serial port 1 Tx flag */
+#define PFR_SP1RX 0x02000 /* serial port 1 Rx flag */
+#define PFR_SP2TX 0x04000 /* serial port 2 Tx flag */
+#define PFR_SP2RX 0x08000 /* serial port 2 Rx flag */
+#define PFR_SP3TX 0x10000 /* serial port 3 Tx flag */
+#define PFR_SP3RX 0x20000 /* serial port 3 Rx flag */
+#define PFR_SP4 0x40000 /* serial port 4 flag */
+
+/* MCP control register 1 */
+#define SAMCP_CR1 0x30 /* MCP control register 1 */
diff --git a/sys/arm/sa11x0/sa11x0_reg.h b/sys/arm/sa11x0/sa11x0_reg.h
new file mode 100644
index 0000000..86ad31d
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_reg.h
@@ -0,0 +1,81 @@
+/* $NetBSD: sa11x0_reg.h,v 1.4 2002/07/19 18:26:56 ichiro Exp $ */
+
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by IWAMOTO Toshihiro.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _ARM_SA11X0_REG_H_
+#define _ARM_SA11X0_REG_H_
+
+/* Physical register base addresses */
+#define SAOST_BASE 0x90000000 /* OS Timer */
+#define SARTC_BASE 0x90010000 /* Real-Time Clock */
+#define SAPMR_BASE 0x90020000 /* Power Manager */
+#define SARCR_BASE 0x90030000 /* Reset Controller */
+#define SAGPIO_BASE 0x90040000 /* GPIO */
+#define SAIPIC_BASE 0x90050000 /* Interrupt Controller */
+#define SAPPC_BASE 0x90060000 /* Peripheral Pin Controller */
+#define SAUDC_BASE 0x80000000 /* USB Device Controller*/
+#define SACOM1_BASE 0x80010000 /* GPCLK/UART 1 */
+#define SACOM3_HW_BASE 0x80050000 /* UART 3 */
+#define SAMCP_BASE 0x80060000 /* MCP Controller */
+#define SASSP_BASE 0x80070000 /* Synchronous serial port */
+
+#define SADMAC_BASE 0xB0000000 /* DMA Controller */
+#define SALCD_BASE 0xB0100000 /* LCD */
+
+/* Register base virtual addresses mapped by initarm() */
+#define SACOM3_BASE 0xd000d000
+
+/* Interrupt controller registers */
+#define SAIPIC_NPORTS 9
+#define SAIPIC_IP 0x00 /* IRQ pending register */
+#define SAIPIC_MR 0x04 /* Mask register */
+#define SAIPIC_LR 0x08 /* Level register */
+#define SAIPIC_FP 0x10 /* FIQ pending register */
+#define SAIPIC_PR 0x20 /* Pending register */
+#define SAIPIC_CR 0x0C /* Control register */
+
+/* width of interrupt controller */
+#define ICU_LEN 32
+
+/* Reset controller registers */
+#define SARCR_RSRR 0x0 /* Software reset register */
+#define SARCR_RCSR 0x4 /* Reset status register */
+#define SARCR_TUCR 0x8 /* Test Unit control reg */
+
+#endif /* _ARM_SA11X0_REG_H_ */
diff --git a/sys/arm/sa11x0/sa11x0_var.h b/sys/arm/sa11x0/sa11x0_var.h
new file mode 100644
index 0000000..b1bf2a0
--- /dev/null
+++ b/sys/arm/sa11x0/sa11x0_var.h
@@ -0,0 +1,79 @@
+/* $NetBSD: sa11x0_var.h,v 1.4 2003/04/14 14:18:41 rjs Exp $ */
+
+/*-
+ * Copyright (c) 2001, The NetBSD Foundation, Inc. All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by IWAMOTO Toshihiro and Ichiro FUKUHARA.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _SA11X0_VAR_H
+#define _SA11X0_VAR_H
+
+#include <sys/conf.h>
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+
+struct sa11x0_softc {
+ device_t sc_dev;
+ bus_space_tag_t sc_iot;
+ bus_space_handle_t sc_ioh;
+ bus_space_handle_t sc_gpioh;
+ bus_space_handle_t sc_ppch;
+ bus_space_handle_t sc_dmach;
+ bus_space_handle_t sc_reseth;
+ u_int32_t sc_intrmask;
+};
+
+/* Attach args all devices */
+
+typedef void *sa11x0_chipset_tag_t;
+
+extern struct bus_space sa11x0_bs_tag;
+struct sa11x0_attach_args {
+ sa11x0_chipset_tag_t sa_sc;
+ bus_space_tag_t sa_iot; /* Bus tag */
+ bus_addr_t sa_addr; /* i/o address */
+ bus_size_t sa_size;
+
+ int sa_intr;
+ int sa_gpio;
+};
+
+void *sa11x0_intr_establish(sa11x0_chipset_tag_t, int, int, int,
+ int (*)(void *), void *);
+void sa11x0_intr_disestablish(sa11x0_chipset_tag_t, void *);
+
+#endif /* _SA11X0_VAR_H */
diff --git a/sys/arm/sa11x0/std.sa11x0 b/sys/arm/sa11x0/std.sa11x0
new file mode 100644
index 0000000..f95a815
--- /dev/null
+++ b/sys/arm/sa11x0/std.sa11x0
@@ -0,0 +1,5 @@
+#StrongARM SA11x0 common options
+#$FreeBSD$
+files "../sa11x0/files.sa11x0"
+cpu CPU_SA1100
+cpu CPU_SA1110
OpenPOWER on IntegriCloud