summaryrefslogtreecommitdiffstats
path: root/sys/arm/include
diff options
context:
space:
mode:
authorian <ian@FreeBSD.org>2014-05-17 13:53:38 +0000
committerian <ian@FreeBSD.org>2014-05-17 13:53:38 +0000
commitf51629e24e56540c52920e2b0e3fa2eddb3c8bed (patch)
tree21720b45d81bf613d158521e59271dd9e921cd76 /sys/arm/include
parent6341a2d92b98c830653de9c38705ec3321702a1e (diff)
downloadFreeBSD-src-f51629e24e56540c52920e2b0e3fa2eddb3c8bed.zip
FreeBSD-src-f51629e24e56540c52920e2b0e3fa2eddb3c8bed.tar.gz
MFC 262952, 262958, 262966, 262979, 262980, 262986, 262987, 262995, 262997,
263030, 263033, 263034, 263056, 263057, Remove all the redundant external declarations of exception vectors and runtime setting of the pointers that's scattered around various places. Remove all traces of support for ARM chips prior to the arm9 series. Make the default exception handler vectors point to where I thought they were already pointing: the default handlers (not a panic that says there is no default handler). Eliminate irq_dispatch.S. Move the data items it contained into arm/intr.c and the functionality it provided into arm/exception.S. Move the exception vector table (so-called "page0" data) into exception.S and eliminate vectors.S. Change the way the asm GET_CURTHREAD_PTR() macro is defined so that code using it doesn't have to have an "AST_LOCALS" macro somewhere in the file. Arrange for arm fork_trampoline() to return to userland via the standard swi_exit code in exception.S instead of having its own inline expansion of the DO_AST and PULLFRAME macros. Now that the PUSHFRAME and PULLFRAME macros are used only in the swi entry/exit code, they don't need to be macros. Except that didn't work and the whole change was reverted. Remove some unnecessary indirection and jump right to the handler functions. Use panic rather than printf to "handle" an arm26 address exception (should never happen on arm32). Remove the unreferenced DATA() macro. Remove #include <machine/asmacros.h> from files that don't need it.
Diffstat (limited to 'sys/arm/include')
-rw-r--r--sys/arm/include/armreg.h52
-rw-r--r--sys/arm/include/asmacros.h251
-rw-r--r--sys/arm/include/cpuconf.h35
-rw-r--r--sys/arm/include/cpufunc.h106
-rw-r--r--sys/arm/include/md_var.h8
-rw-r--r--sys/arm/include/param.h1
-rw-r--r--sys/arm/include/pmap.h20
-rw-r--r--sys/arm/include/sysarch.h2
8 files changed, 25 insertions, 450 deletions
diff --git a/sys/arm/include/armreg.h b/sys/arm/include/armreg.h
index 7b52be1..8e02621 100644
--- a/sys/arm/include/armreg.h
+++ b/sys/arm/include/armreg.h
@@ -73,17 +73,7 @@
#define CPU_ID_IS7(x) (((x) & 0x0000f000) == 0x00007000)
#define CPU_ID_ISNEW(x) (!CPU_ID_ISOLD(x) && !CPU_ID_IS7(x))
-/* On ARM3 and ARM6, this byte holds the foundry ID. */
-#define CPU_ID_FOUNDRY_MASK 0x00ff0000
-#define CPU_ID_FOUNDRY_VLSI 0x00560000
-
-/* On ARM7 it holds the architecture and variant (sub-model) */
-#define CPU_ID_7ARCH_MASK 0x00800000
-#define CPU_ID_7ARCH_V3 0x00000000
-#define CPU_ID_7ARCH_V4T 0x00800000
-#define CPU_ID_7VARIANT_MASK 0x007f0000
-
-/* On more recent ARMs, it does the same, but in a different format */
+/* On recent ARMs this byte holds the architecture and variant (sub-model) */
#define CPU_ID_ARCH_MASK 0x000f0000
#define CPU_ID_ARCH_V3 0x00000000
#define CPU_ID_ARCH_V4 0x00010000
@@ -110,29 +100,7 @@
/* Individual CPUs are probably best IDed by everything but the revision. */
#define CPU_ID_CPU_MASK 0xfffffff0
-/* Fake CPU IDs for ARMs without CP15 */
-#define CPU_ID_ARM2 0x41560200
-#define CPU_ID_ARM250 0x41560250
-
-/* Pre-ARM7 CPUs -- [15:12] == 0 */
-#define CPU_ID_ARM3 0x41560300
-#define CPU_ID_ARM600 0x41560600
-#define CPU_ID_ARM610 0x41560610
-#define CPU_ID_ARM620 0x41560620
-
-/* ARM7 CPUs -- [15:12] == 7 */
-#define CPU_ID_ARM700 0x41007000 /* XXX This is a guess. */
-#define CPU_ID_ARM710 0x41007100
-#define CPU_ID_ARM7500 0x41027100
-#define CPU_ID_ARM710A 0x41047100 /* inc ARM7100 */
-#define CPU_ID_ARM7500FE 0x41077100
-#define CPU_ID_ARM710T 0x41807100
-#define CPU_ID_ARM720T 0x41807200
-#define CPU_ID_ARM740T8K 0x41807400 /* XXX no MMU, 8KB cache */
-#define CPU_ID_ARM740T4K 0x41817400 /* XXX no MMU, 4KB cache */
-
-/* Post-ARM7 CPUs */
-#define CPU_ID_ARM810 0x41018100
+/* ARM9 and later CPUs */
#define CPU_ID_ARM920T 0x41129200
#define CPU_ID_ARM920T_ALT 0x41009200
#define CPU_ID_ARM922T 0x41029220
@@ -155,8 +123,6 @@
#define CPU_ID_CORTEXA9R2 0x412fc090
#define CPU_ID_CORTEXA9R3 0x413fc090
#define CPU_ID_CORTEXA15 0x410fc0f0
-#define CPU_ID_SA110 0x4401a100
-#define CPU_ID_SA1100 0x4401a110
#define CPU_ID_KRAIT 0x510f06f0 /* Snapdragon S4 Pro/APQ8064 */
#define CPU_ID_TI925T 0x54029250
#define CPU_ID_MV88FR131 0x56251310 /* Marvell Feroceon 88FR131 Core */
@@ -180,8 +146,6 @@
#define CPU_ID_FA526 0x66015260
#define CPU_ID_FA626TE 0x66056260
-#define CPU_ID_SA1110 0x6901b110
-#define CPU_ID_IXP1200 0x6901c120
#define CPU_ID_80200 0x69052000
#define CPU_ID_PXA250 0x69052100 /* sans core revision */
#define CPU_ID_PXA210 0x69052120
@@ -206,18 +170,6 @@
#define CPU_ID_IXP435 0x69054040
#define CPU_ID_IXP465 0x69054200
-/* ARM3-specific coprocessor 15 registers */
-#define ARM3_CP15_FLUSH 1
-#define ARM3_CP15_CONTROL 2
-#define ARM3_CP15_CACHEABLE 3
-#define ARM3_CP15_UPDATEABLE 4
-#define ARM3_CP15_DISRUPTIVE 5
-
-/* ARM3 Control register bits */
-#define ARM3_CTL_CACHE_ON 0x00000001
-#define ARM3_CTL_SHARED 0x00000002
-#define ARM3_CTL_MONITOR 0x00000004
-
/* CPUID registers */
#define ARM_PFR0_ARM_ISA_MASK 0x0000000f
diff --git a/sys/arm/include/asmacros.h b/sys/arm/include/asmacros.h
index 8796ae8..f785135 100644
--- a/sys/arm/include/asmacros.h
+++ b/sys/arm/include/asmacros.h
@@ -1,12 +1,7 @@
-/* $NetBSD: frame.h,v 1.6 2003/10/05 19:44:58 matt Exp $ */
-
/*-
- * Copyright (c) 1994-1997 Mark Brinicombe.
- * Copyright (c) 1994 Brini.
+ * Copyright (c) 2012 Olivier Houchard <cognet@FreeBSD.org>
* All rights reserved.
*
- * This code is derived from software written for Brini by Mark Brinicombe
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -15,20 +10,14 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Brini.
- * 4. The name of the company nor the name of the author may be used to
- * endorse or promote products derived from this software without specific
- * prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
@@ -47,233 +36,15 @@
#ifdef LOCORE
#include "opt_global.h"
-/*
- * ASM macros for pushing and pulling trapframes from the stack
- *
- * These macros are used to handle the irqframe and trapframe structures
- * defined above.
- */
-
-/*
- * PUSHFRAME - macro to push a trap frame on the stack in the current mode
- * Since the current mode is used, the SVC lr field is not defined.
- *
- * NOTE: r13 and r14 are stored separately as a work around for the
- * SA110 rev 2 STM^ bug
- */
-#ifdef ARM_TP_ADDRESS
-#define PUSHFRAME \
- sub sp, sp, #4; /* Align the stack */ \
- str lr, [sp, #-4]!; /* Push the return address */ \
- sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
- stmia sp, {r0-r12}; /* Push the user mode registers */ \
- add r0, sp, #(4*13); /* Adjust the stack pointer */ \
- stmia r0, {r13-r14}^; /* Push the user mode registers */ \
- mov r0, r0; /* NOP for previous instruction */ \
- mrs r0, spsr; /* Put the SPSR on the stack */ \
- str r0, [sp, #-4]!; \
- ldr r0, =ARM_RAS_START; \
- mov r1, #0; \
- str r1, [r0]; \
- mov r1, #0xffffffff; \
- str r1, [r0, #4];
-#else
-#define PUSHFRAME \
- sub sp, sp, #4; /* Align the stack */ \
- str lr, [sp, #-4]!; /* Push the return address */ \
- sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
- stmia sp, {r0-r12}; /* Push the user mode registers */ \
- add r0, sp, #(4*13); /* Adjust the stack pointer */ \
- stmia r0, {r13-r14}^; /* Push the user mode registers */ \
- mov r0, r0; /* NOP for previous instruction */ \
- mrs r0, spsr; /* Put the SPSR on the stack */ \
- str r0, [sp, #-4]!;
-#endif
-
-/*
- * PULLFRAME - macro to pull a trap frame from the stack in the current mode
- * Since the current mode is used, the SVC lr field is ignored.
- */
-
-#ifdef ARM_TP_ADDRESS
-#define PULLFRAME \
- ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
- msr spsr_fsxc, r0; \
- ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
- mov r0, r0; /* NOP for previous instruction */ \
- add sp, sp, #(4*17); /* Adjust the stack pointer */ \
- ldr lr, [sp], #0x0004; /* Pull the return address */ \
- add sp, sp, #4 /* Align the stack */
-#else
-#define PULLFRAME \
- ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
- msr spsr_fsxc, r0; \
- clrex; \
- ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
- mov r0, r0; /* NOP for previous instruction */ \
- add sp, sp, #(4*17); /* Adjust the stack pointer */ \
- ldr lr, [sp], #0x0004; /* Pull the return address */ \
- add sp, sp, #4 /* Align the stack */
-#endif
-
-/*
- * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode
- * This should only be used if the processor is not currently in SVC32
- * mode. The processor mode is switched to SVC mode and the trap frame is
- * stored. The SVC lr field is used to store the previous value of
- * lr in SVC mode.
- *
- * NOTE: r13 and r14 are stored separately as a work around for the
- * SA110 rev 2 STM^ bug
- */
-#ifdef ARM_TP_ADDRESS
-#define PUSHFRAMEINSVC \
- stmdb sp, {r0-r3}; /* Save 4 registers */ \
- mov r0, lr; /* Save xxx32 r14 */ \
- mov r1, sp; /* Save xxx32 sp */ \
- mrs r3, spsr; /* Save xxx32 spsr */ \
- mrs r2, cpsr; /* Get the CPSR */ \
- bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \
- orr r2, r2, #(PSR_SVC32_MODE); \
- msr cpsr_c, r2; /* Punch into SVC mode */ \
- mov r2, sp; /* Save SVC sp */ \
- bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \
- sub sp, sp, #4; /* Pad trapframe to keep alignment */ \
- str r0, [sp, #-4]!; /* Push return address */ \
- str lr, [sp, #-4]!; /* Push SVC lr */ \
- str r2, [sp, #-4]!; /* Push SVC sp */ \
- msr spsr_fsxc, r3; /* Restore correct spsr */ \
- ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
- sub sp, sp, #(4*15); /* Adjust the stack pointer */ \
- stmia sp, {r0-r12}; /* Push the user mode registers */ \
- add r0, sp, #(4*13); /* Adjust the stack pointer */ \
- stmia r0, {r13-r14}^; /* Push the user mode registers */ \
- mov r0, r0; /* NOP for previous instruction */ \
- ldr r5, =ARM_RAS_START; /* Check if there's any RAS */ \
- ldr r4, [r5, #4]; /* reset it to point at the */ \
- cmp r4, #0xffffffff; /* end of memory if necessary; */ \
- movne r1, #0xffffffff; /* leave value in r4 for later */ \
- strne r1, [r5, #4]; /* comparision against PC. */ \
- ldr r3, [r5]; /* Retrieve global RAS_START */ \
- cmp r3, #0; /* and reset it if non-zero. */ \
- movne r1, #0; /* If non-zero RAS_START and */ \
- strne r1, [r5]; /* PC was lower than RAS_END, */ \
- ldrne r1, [r0, #16]; /* adjust the saved PC so that */ \
- cmpne r4, r1; /* execution later resumes at */ \
- strhi r3, [r0, #16]; /* the RAS_START location. */ \
- mrs r0, spsr; \
- str r0, [sp, #-4]!
-#else
-#define PUSHFRAMEINSVC \
- stmdb sp, {r0-r3}; /* Save 4 registers */ \
- mov r0, lr; /* Save xxx32 r14 */ \
- mov r1, sp; /* Save xxx32 sp */ \
- mrs r3, spsr; /* Save xxx32 spsr */ \
- mrs r2, cpsr; /* Get the CPSR */ \
- bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \
- orr r2, r2, #(PSR_SVC32_MODE); \
- msr cpsr_c, r2; /* Punch into SVC mode */ \
- mov r2, sp; /* Save SVC sp */ \
- bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \
- sub sp, sp, #4; /* Pad trapframe to keep alignment */ \
- str r0, [sp, #-4]!; /* Push return address */ \
- str lr, [sp, #-4]!; /* Push SVC lr */ \
- str r2, [sp, #-4]!; /* Push SVC sp */ \
- msr spsr_fsxc, r3; /* Restore correct spsr */ \
- ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
- sub sp, sp, #(4*15); /* Adjust the stack pointer */ \
- stmia sp, {r0-r12}; /* Push the user mode registers */ \
- add r0, sp, #(4*13); /* Adjust the stack pointer */ \
- stmia r0, {r13-r14}^; /* Push the user mode registers */ \
- mov r0, r0; /* NOP for previous instruction */ \
- mrs r0, spsr; /* Put the SPSR on the stack */ \
- str r0, [sp, #-4]!
-#endif
-
-/*
- * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack
- * in SVC32 mode and restore the saved processor mode and PC.
- * This should be used when the SVC lr register needs to be restored on
- * exit.
- */
-
-#ifdef ARM_TP_ADDRESS
-#define PULLFRAMEFROMSVCANDEXIT \
- ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
- msr spsr_fsxc, r0; /* restore SPSR */ \
- ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
- mov r0, r0; /* NOP for previous instruction */ \
- add sp, sp, #(4*15); /* Adjust the stack pointer */ \
- ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
-#else
-#define PULLFRAMEFROMSVCANDEXIT \
- ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
- msr spsr_fsxc, r0; /* restore SPSR */ \
- clrex; \
- ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
- mov r0, r0; /* NOP for previous instruction */ \
- add sp, sp, #(4*15); /* Adjust the stack pointer */ \
- ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
-#endif
-#if defined(__ARM_EABI__)
-/*
- * Unwind hints so we can unwind past functions that use
- * PULLFRAMEFROMSVCANDEXIT. They are run in reverse order.
- * As the last thing we do is restore the stack pointer
- * we can ignore the padding at the end of struct trapframe.
- */
-#define UNWINDSVCFRAME \
- .save {r13-r15}; /* Restore sp, lr, pc */ \
- .pad #(2*4); /* Skip user sp and lr */ \
- .save {r0-r12}; /* Restore r0-r12 */ \
- .pad #(4) /* Skip spsr */
-#else
-#define UNWINDSVCFRAME
-#endif
-
-#define DATA(name) \
- .data ; \
- _ALIGN_DATA ; \
- .globl name ; \
- .type name, %object ; \
-name:
-
#ifdef _ARM_ARCH_6
-#define AST_LOCALS
#define GET_CURTHREAD_PTR(tmp) \
mrc p15, 0, tmp, c13, c0, 4
#else
-#define AST_LOCALS ;\
-.Lcurthread: ;\
- .word _C_LABEL(__pcpu) + PC_CURTHREAD
-
-#define GET_CURTHREAD_PTR(tmp) \
- ldr tmp, .Lcurthread; \
- ldr tmp, [tmp]
+#define GET_CURTHREAD_PTR(tmp) \
+ ldr tmp, =_C_LABEL(__pcpu);\
+ ldr tmp, [tmp, #PC_CURTHREAD]
#endif
-#define DO_AST \
- ldr r0, [sp] /* Get the SPSR from stack */ ;\
- mrs r4, cpsr /* save CPSR */ ;\
- orr r1, r4, #(I32_bit|F32_bit) ;\
- msr cpsr_c, r1 /* Disable interrupts */ ;\
- and r0, r0, #(PSR_MODE) /* Returning to USR mode? */ ;\
- teq r0, #(PSR_USR32_MODE) ;\
- bne 2f /* Nope, get out now */ ;\
- bic r4, r4, #(I32_bit|F32_bit) ;\
-1: GET_CURTHREAD_PTR(r5) ;\
- ldr r1, [r5, #(TD_FLAGS)] ;\
- and r1, r1, #(TDF_ASTPENDING|TDF_NEEDRESCHED) ;\
- teq r1, #0x00000000 ;\
- beq 2f /* Nope. Just bail */ ;\
- msr cpsr_c, r4 /* Restore interrupts */ ;\
- mov r0, sp ;\
- bl _C_LABEL(ast) /* ast(frame) */ ;\
- orr r0, r4, #(I32_bit|F32_bit) ;\
- msr cpsr_c, r0 ;\
- b 1b ;\
-2:
-
#endif /* LOCORE */
#endif /* _KERNEL */
diff --git a/sys/arm/include/cpuconf.h b/sys/arm/include/cpuconf.h
index 0c2faa3..b7cad0d 100644
--- a/sys/arm/include/cpuconf.h
+++ b/sys/arm/include/cpuconf.h
@@ -50,15 +50,11 @@
/*
* Step 1: Count the number of CPU types configured into the kernel.
*/
-#define CPU_NTYPES (defined(CPU_ARM7TDMI) + \
- defined(CPU_ARM8) + defined(CPU_ARM9) + \
+#define CPU_NTYPES (defined(CPU_ARM9) + \
defined(CPU_ARM9E) + \
defined(CPU_ARM10) + \
defined(CPU_ARM1136) + \
defined(CPU_ARM1176) + \
- defined(CPU_SA110) + defined(CPU_SA1100) + \
- defined(CPU_SA1110) + \
- defined(CPU_IXP12X0) + \
defined(CPU_XSCALE_80200) + \
defined(CPU_XSCALE_80321) + \
defined(CPU_XSCALE_PXA2X0) + \
@@ -72,9 +68,7 @@
/*
* Step 2: Determine which ARM architecture versions are configured.
*/
-#if (defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
- defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
- defined(CPU_IXP12X0) || defined(CPU_FA526))
+#if defined(CPU_ARM9) || defined(CPU_FA526)
#define ARM_ARCH_4 1
#else
#define ARM_ARCH_4 0
@@ -123,27 +117,17 @@
* ARM_MMU_MEMC Prehistoric, external memory controller
* and MMU for ARMv2 CPUs.
*
- * ARM_MMU_GENERIC Generic ARM MMU, compatible with ARM6.
+ * ARM_MMU_GENERIC Generic ARM MMU, compatible with ARMv4 and v5.
*
* ARM_MMU_V6 ARMv6 MMU.
*
* ARM_MMU_V7 ARMv7 MMU.
*
- * ARM_MMU_SA1 StrongARM SA-1 MMU. Compatible with generic
- * ARM MMU, but has no write-through cache mode.
- *
* ARM_MMU_XSCALE XScale MMU. Compatible with generic ARM
* MMU, but also has several extensions which
* require different PTE layout to use.
*/
-#if (defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3))
-#define ARM_MMU_MEMC 1
-#else
-#define ARM_MMU_MEMC 0
-#endif
-
-#if (defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
- defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM9E) || \
+#if (defined(CPU_ARM9) || defined(CPU_ARM9E) || \
defined(CPU_ARM10) || defined(CPU_FA526) || \
defined(CPU_FA626TE))
#define ARM_MMU_GENERIC 1
@@ -163,13 +147,6 @@
#define ARM_MMU_V7 0
#endif
-#if (defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||\
- defined(CPU_IXP12X0))
-#define ARM_MMU_SA1 1
-#else
-#define ARM_MMU_SA1 0
-#endif
-
#if (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342))
@@ -178,8 +155,8 @@
#define ARM_MMU_XSCALE 0
#endif
-#define ARM_NMMUS (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_V6 + \
- ARM_MMU_V7 + ARM_MMU_SA1 + ARM_MMU_XSCALE)
+#define ARM_NMMUS (ARM_MMU_GENERIC + ARM_MMU_V6 + \
+ ARM_MMU_V7 + ARM_MMU_XSCALE)
#if ARM_NMMUS == 0 && !defined(KLD_MODULE) && defined(_KERNEL)
#error ARM_NMMUS is 0
#endif
diff --git a/sys/arm/include/cpufunc.h b/sys/arm/include/cpufunc.h
index 68b15f5..49c08a3 100644
--- a/sys/arm/include/cpufunc.h
+++ b/sys/arm/include/cpufunc.h
@@ -284,63 +284,6 @@ u_int cpufunc_faultstatus (void);
u_int cpufunc_faultaddress (void);
u_int cpu_pfr (int);
-#ifdef CPU_ARM3
-u_int arm3_control (u_int clear, u_int bic);
-void arm3_cache_flush (void);
-#endif /* CPU_ARM3 */
-
-#if defined(CPU_ARM6) || defined(CPU_ARM7)
-void arm67_setttb (u_int ttb);
-void arm67_tlb_flush (void);
-void arm67_tlb_purge (u_int va);
-void arm67_cache_flush (void);
-void arm67_context_switch (void);
-#endif /* CPU_ARM6 || CPU_ARM7 */
-
-#ifdef CPU_ARM6
-void arm6_setup (char *string);
-#endif /* CPU_ARM6 */
-
-#ifdef CPU_ARM7
-void arm7_setup (char *string);
-#endif /* CPU_ARM7 */
-
-#ifdef CPU_ARM7TDMI
-int arm7_dataabt_fixup (void *arg);
-void arm7tdmi_setup (char *string);
-void arm7tdmi_setttb (u_int ttb);
-void arm7tdmi_tlb_flushID (void);
-void arm7tdmi_tlb_flushID_SE (u_int va);
-void arm7tdmi_cache_flushID (void);
-void arm7tdmi_context_switch (void);
-#endif /* CPU_ARM7TDMI */
-
-#ifdef CPU_ARM8
-void arm8_setttb (u_int ttb);
-void arm8_tlb_flushID (void);
-void arm8_tlb_flushID_SE (u_int va);
-void arm8_cache_flushID (void);
-void arm8_cache_flushID_E (u_int entry);
-void arm8_cache_cleanID (void);
-void arm8_cache_cleanID_E (u_int entry);
-void arm8_cache_purgeID (void);
-void arm8_cache_purgeID_E (u_int entry);
-
-void arm8_cache_syncI (void);
-void arm8_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
-void arm8_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
-void arm8_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
-void arm8_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
-void arm8_cache_syncI_rng (vm_offset_t start, vm_size_t end);
-
-void arm8_context_switch (void);
-
-void arm8_setup (char *string);
-
-u_int arm8_clock_config (u_int, u_int);
-#endif
-
-
#if defined(CPU_FA526) || defined(CPU_FA626TE)
void fa526_setup (char *arg);
void fa526_setttb (u_int ttb);
@@ -362,48 +305,6 @@ void fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end);
#endif
-#ifdef CPU_SA110
-void sa110_setup (char *string);
-void sa110_context_switch (void);
-#endif /* CPU_SA110 */
-
-#if defined(CPU_SA1100) || defined(CPU_SA1110)
-void sa11x0_drain_readbuf (void);
-
-void sa11x0_context_switch (void);
-void sa11x0_cpu_sleep (int mode);
-
-void sa11x0_setup (char *string);
-#endif
-
-#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
-void sa1_setttb (u_int ttb);
-
-void sa1_tlb_flushID_SE (u_int va);
-
-void sa1_cache_flushID (void);
-void sa1_cache_flushI (void);
-void sa1_cache_flushD (void);
-void sa1_cache_flushD_SE (u_int entry);
-
-void sa1_cache_cleanID (void);
-void sa1_cache_cleanD (void);
-void sa1_cache_cleanD_E (u_int entry);
-
-void sa1_cache_purgeID (void);
-void sa1_cache_purgeID_E (u_int entry);
-void sa1_cache_purgeD (void);
-void sa1_cache_purgeD_E (u_int entry);
-
-void sa1_cache_syncI (void);
-void sa1_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
-void sa1_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
-void sa1_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
-void sa1_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
-void sa1_cache_syncI_rng (vm_offset_t start, vm_size_t end);
-
-#endif
-
#ifdef CPU_ARM9
void arm9_setttb (u_int);
@@ -586,7 +487,6 @@ extern unsigned armv5_dcache_index_inc;
#endif
#if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
- defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_FA526) || defined(CPU_FA626TE) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
@@ -601,12 +501,6 @@ void armv4_drain_writebuf (void);
void armv4_idcache_inv_all (void);
#endif
-#if defined(CPU_IXP12X0)
-void ixp12x0_drain_readbuf (void);
-void ixp12x0_context_switch (void);
-void ixp12x0_setup (char *string);
-#endif
-
#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
diff --git a/sys/arm/include/md_var.h b/sys/arm/include/md_var.h
index ef202f5..7337b33 100644
--- a/sys/arm/include/md_var.h
+++ b/sys/arm/include/md_var.h
@@ -50,13 +50,6 @@ extern int _min_bzero_size;
enum cpu_class {
CPU_CLASS_NONE,
- CPU_CLASS_ARM2,
- CPU_CLASS_ARM2AS,
- CPU_CLASS_ARM3,
- CPU_CLASS_ARM6,
- CPU_CLASS_ARM7,
- CPU_CLASS_ARM7TDMI,
- CPU_CLASS_ARM8,
CPU_CLASS_ARM9TDMI,
CPU_CLASS_ARM9ES,
CPU_CLASS_ARM9EJS,
@@ -64,7 +57,6 @@ enum cpu_class {
CPU_CLASS_ARM10EJ,
CPU_CLASS_CORTEXA,
CPU_CLASS_KRAIT,
- CPU_CLASS_SA1,
CPU_CLASS_XSCALE,
CPU_CLASS_ARM11J,
CPU_CLASS_MARVELL
diff --git a/sys/arm/include/param.h b/sys/arm/include/param.h
index 5cd0b7b..ab5b406 100644
--- a/sys/arm/include/param.h
+++ b/sys/arm/include/param.h
@@ -70,7 +70,6 @@
#endif
#endif
#endif
-#define MID_MACHINE MID_ARM6
#if defined(SMP) || defined(KLD_MODULE)
#ifndef MAXCPU
diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h
index 74282cc..00080b9 100644
--- a/sys/arm/include/pmap.h
+++ b/sys/arm/include/pmap.h
@@ -355,7 +355,7 @@ extern int pmap_needs_pte_sync;
#define L1_C_PROTO pte_l1_c_proto
#define L2_S_PROTO pte_l2_s_proto
-#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+#elif ARM_MMU_GENERIC != 0
#define L2_S_PROT_U L2_S_PROT_U_generic
#define L2_S_PROT_W L2_S_PROT_W_generic
#define L2_S_PROT_MASK L2_S_PROT_MASK_generic
@@ -488,13 +488,10 @@ extern int pmap_needs_pte_sync;
#endif /* SMP */
#endif /* ARM_NMMUS > 1 */
-#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
-#define PMAP_NEEDS_PTE_SYNC 1
-#define PMAP_INCLUDE_PTE_SYNC
-#elif defined(CPU_XSCALE_81342) || ARM_ARCH_6 || ARM_ARCH_7A
+#if defined(CPU_XSCALE_81342) || ARM_ARCH_6 || ARM_ARCH_7A
#define PMAP_NEEDS_PTE_SYNC 1
#define PMAP_INCLUDE_PTE_SYNC
-#elif (ARM_MMU_SA1 == 0)
+#else
#define PMAP_NEEDS_PTE_SYNC 0
#endif
@@ -616,14 +613,11 @@ extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
-#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7 + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)
+#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 || defined(CPU_XSCALE_81342)
void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
void pmap_zero_page_generic(vm_paddr_t, int, int);
void pmap_pte_init_generic(void);
-#if defined(CPU_ARM8)
-void pmap_pte_init_arm8(void);
-#endif
#if defined(CPU_ARM9)
void pmap_pte_init_arm9(void);
#endif /* CPU_ARM9 */
@@ -633,11 +627,7 @@ void pmap_pte_init_arm10(void);
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
void pmap_pte_init_mmu_v6(void);
#endif /* (ARM_MMU_V6 + ARM_MMU_V7) != 0 */
-#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
-
-#if /* ARM_MMU_SA1 == */1
-void pmap_pte_init_sa1(void);
-#endif /* ARM_MMU_SA1 == 1 */
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 */
#if ARM_MMU_XSCALE == 1
void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
diff --git a/sys/arm/include/sysarch.h b/sys/arm/include/sysarch.h
index 95d0211..71023e8 100644
--- a/sys/arm/include/sysarch.h
+++ b/sys/arm/include/sysarch.h
@@ -45,7 +45,7 @@
* The cpu_switch() code assumes ARM_RAS_START is ARM_TP_ADDRESS + 4, and
* ARM_RAS_END is ARM_TP_ADDRESS + 8, so if that ever changes, be sure to
* update the cpu_switch() (and cpu_throw()) code as well.
- * In addition, code in arm/include/atomic.h and arm/include/asmacros.h
+ * In addition, code in arm/include/atomic.h and arm/arm/exception.S
* assumes that ARM_RAS_END is at ARM_RAS_START+4, so be sure to update those
* if ARM_RAS_END moves in relation to ARM_RAS_START (look for occurrences
* of ldr/str rm,[rn, #4]).
OpenPOWER on IntegriCloud