summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/arm/arm/cpu_asm-v6.S202
-rw-r--r--sys/arm/arm/cpuinfo.c121
-rw-r--r--sys/arm/arm/machdep.c3
-rw-r--r--sys/arm/include/asm.h1
-rw-r--r--sys/arm/include/cpu-v6.h396
-rw-r--r--sys/arm/include/cpufunc.h1
-rw-r--r--sys/arm/include/cpuinfo.h91
-rw-r--r--sys/conf/files.arm2
8 files changed, 816 insertions, 1 deletions
diff --git a/sys/arm/arm/cpu_asm-v6.S b/sys/arm/arm/cpu_asm-v6.S
new file mode 100644
index 0000000..b99401f
--- /dev/null
+++ b/sys/arm/arm/cpu_asm-v6.S
@@ -0,0 +1,202 @@
+/*-
+ * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
+ * Copyright 2014 Michal Meloun <meloun@miracle.cz>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/acle-compat.h>
+#include <machine/asm.h>
+#include <machine/asmacros.h>
+#include <machine/armreg.h>
+#include <machine/sysreg.h>
+
+#if __ARM_ARCH >= 7
+
+/*
+ * Define cache functions used by startup code, which counts on the fact that
+ * only r0-r3,r12 (ip) are modified and no stack space is used. These functions
+ * must be called with interrupts disabled. Moreover, these work only with
+ * caches integrated to CPU (accessible via CP15); systems with an external L2
+ * cache controller such as a PL310 need separate calls to that device driver
+ * to affect L2 caches. This is not a factor during early kernel startup, as
+ * any external L2 cache controller has not been enabled yet.
+ */
+
+/* Invalidate D cache to PoC. (aka all cache levels)*/
+ASENTRY_NP(dcache_inv_poc_all)
+ mrc CP15_CLIDR(r0)
+ ands r0, r0, #0x07000000
+ mov r0, r0, lsr #23 /* Get LoC (naturally aligned) */
+ beq 4f
+
+1: mcr CP15_CSSELR(r0) /* set cache level */
+ isb
+ mrc CP15_CCSIDR(r0) /* read CCSIDR */
+
+ ubfx r2, r0, #13, #15 /* get num sets - 1 from CCSIDR */
+ ubfx r3, r0, #3, #10 /* get num ways - 1 from CCSIDR */
+ clz r1, r3 /* number of bits to MSB of way */
+ lsl r3, r3, r1 /* shift into position */
+ mov ip, #1
+ lsl ip, ip, r1 /* ip now contains the way decr */
+
+ ubfx r0, r0, #0, #3 /* get linesize from CCSIDR */
+ add r0, r0, #4 /* apply bias */
+ lsl r2, r2, r0 /* shift sets by log2(linesize) */
+ add r3, r3, r2 /* merge numsets - 1 with numways - 1 */
+ sub ip, ip, r2 /* subtract numsets - 1 from way decr */
+ mov r1, #1
+ lsl r1, r1, r0 /* r1 now contains the set decr */
+ mov r2, ip /* r2 now contains set way decr */
+
+ /* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */
+2: mcr CP15_DCISW(r3) /* invalidate line */
+ movs r0, r3 /* get current way/set */
+ beq 3f /* at 0 means we are done */
+ movs r0, r0, lsl #10 /* clear way bits leaving only set bits*/
+ subne r3, r3, r1 /* non-zero?, decrement set */
+ subeq r3, r3, r2 /* zero?, decrement way and restore set count */
+ b 2b
+
+3:
+ mrc CP15_CSSELR(r0) /* get cache level */
+ add r0, r0, #2 /* next level */
+ mrc CP15_CLIDR(r1)
+ ands r1, r1, #0x07000000
+ mov r1, r1, lsr #23 /* Get LoC (naturally aligned) */
+ cmp r1, r0
+ bgt 1b
+
+4: dsb /* wait for stores to finish */
+ mov r0, #0
+ mcr CP15_CSSELR(r0)
+ isb
+ bx lr
+END(dcache_inv_poc_all)
+
+/* Invalidate D cache to PoU. (aka L1 cache only)*/
+ASENTRY_NP(dcache_inv_pou_all)
+ mrc CP15_CLIDR(r0)
+ ands r0, r0, #0x07000000
+ mov r0, r0, lsr #26 /* Get LoUU (naturally aligned) */
+ beq 4f
+
+1: mcr CP15_CSSELR(r0) /* set cache level */
+ isb
+ mrc CP15_CCSIDR(r0) /* read CCSIDR */
+
+ ubfx r2, r0, #13, #15 /* get num sets - 1 from CCSIDR */
+ ubfx r3, r0, #3, #10 /* get num ways - 1 from CCSIDR */
+ clz r1, r3 /* number of bits to MSB of way */
+ lsl r3, r3, r1 /* shift into position */
+ mov ip, #1
+ lsl ip, ip, r1 /* ip now contains the way decr */
+
+ ubfx r0, r0, #0, #3 /* get linesize from CCSIDR */
+ add r0, r0, #4 /* apply bias */
+ lsl r2, r2, r0 /* shift sets by log2(linesize) */
+ add r3, r3, r2 /* merge numsets - 1 with numways - 1 */
+ sub ip, ip, r2 /* subtract numsets - 1 from way decr */
+ mov r1, #1
+ lsl r1, r1, r0 /* r1 now contains the set decr */
+ mov r2, ip /* r2 now contains set way decr */
+
+ /* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */
+2: mcr CP15_DCISW(r3) /* clean & invalidate line */
+ movs r0, r3 /* get current way/set */
+ beq 3f /* at 0 means we are done */
+ movs r0, r0, lsl #10 /* clear way bits leaving only set bits*/
+ subne r3, r3, r1 /* non-zero?, decrement set */
+ subeq r3, r3, r2 /* zero?, decrement way and restore set count */
+ b 2b
+
+3:
+ mrc CP15_CSSELR(r0) /* get cache level */
+ add r0, r0, #2 /* next level */
+ mrc CP15_CLIDR(r1)
+ ands r1, r1, #0x07000000
+ mov r1, r1, lsr #26 /* Get LoUU (naturally aligned) */
+ cmp r1, r0
+ bgt 1b
+
+4: dsb /* wait for stores to finish */
+ mov r0, #0
+ mcr CP15_CSSELR(r0)
+ bx lr
+END(dcache_inv_pou_all)
+
+/* Write back and Invalidate D cache to PoC. */
+ASENTRY_NP(dcache_wbinv_poc_all)
+ mrc CP15_CLIDR(r0)
+ ands r0, r0, #0x07000000
+ mov r0, r0, lsr #23 /* Get LoC (naturally aligned) */
+ beq 4f
+
+1: mcr CP15_CSSELR(r0) /* set cache level */
+ isb
+ mrc CP15_CCSIDR(r0) /* read CCSIDR */
+
+ ubfx r2, r0, #13, #15 /* get num sets - 1 from CCSIDR */
+ ubfx r3, r0, #3, #10 /* get num ways - 1 from CCSIDR */
+ clz r1, r3 /* number of bits to MSB of way */
+ lsl r3, r3, r1 /* shift into position */
+ mov ip, #1
+ lsl ip, ip, r1 /* ip now contains the way decr */
+
+ ubfx r0, r0, #0, #3 /* get linesize from CCSIDR */
+ add r0, r0, #4 /* apply bias */
+ lsl r2, r2, r0 /* shift sets by log2(linesize) */
+ add r3, r3, r2 /* merge numsets - 1 with numways - 1 */
+ sub ip, ip, r2 /* subtract numsets - 1 from way decr */
+ mov r1, #1
+ lsl r1, r1, r0 /* r1 now contains the set decr */
+ mov r2, ip /* r2 now contains set way decr */
+
+ /* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */
+2: mcr CP15_DCCISW(r3) /* clean & invalidate line */
+ movs r0, r3 /* get current way/set */
+ beq 3f /* at 0 means we are done */
+ movs r0, r0, lsl #10 /* clear way bits leaving only set bits*/
+ subne r3, r3, r1 /* non-zero?, decrement set */
+ subeq r3, r3, r2 /* zero?, decrement way and restore set count */
+ b 2b
+
+3:
+ mrc CP15_CSSELR(r0) /* get cache level */
+ add r0, r0, #2 /* next level */
+ mrc CP15_CLIDR(r1)
+ ands r1, r1, #0x07000000
+ mov r1, r1, lsr #23 /* Get LoC (naturally aligned) */
+ cmp r1, r0
+ bgt 1b
+
+4: dsb /* wait for stores to finish */
+ mov r0, #0
+ mcr CP15_CSSELR(r0)
+ bx lr
+END(dcache_wbinv_poc_all)
+
+#endif /* __ARM_ARCH >= 7 */
diff --git a/sys/arm/arm/cpuinfo.c b/sys/arm/arm/cpuinfo.c
new file mode 100644
index 0000000..d20c561
--- /dev/null
+++ b/sys/arm/arm/cpuinfo.c
@@ -0,0 +1,121 @@
+/*-
+ * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
+ * Copyright 2014 Michal Meloun <meloun@miracle.cz>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/cpuinfo.h>
+#include <machine/cpu-v6.h>
+
+struct cpuinfo cpuinfo;
+
+/* Read and parse CPU id scheme */
+void
+cpuinfo_init(void)
+{
+
+ cpuinfo.midr = cp15_midr_get();
+ /* Test old version id schemes first */
+ if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
+ if (CPU_ID_ISOLD(cpuinfo.midr)) {
+ /* obsolete ARMv2 or ARMv3 CPU */
+ cpuinfo.midr = 0;
+ return;
+ }
+ if (CPU_ID_IS7(cpuinfo.midr)) {
+ if ((cpuinfo.midr & (1 << 23)) == 0) {
+ /* obsolete ARMv3 CPU */
+ cpuinfo.midr = 0;
+ return;
+ }
+ /* ARMv4T CPU */
+ cpuinfo.architecture = 1;
+ cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
+ }
+ } else {
+ /* must be new id scheme */
+ cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
+ cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
+ }
+ /* Parse rest of MIDR */
+ cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
+ cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
+ cpuinfo.patch = cpuinfo.midr & 0x0F;
+
+ /* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
+ cpuinfo.ctr = cp15_ctr_get();
+ cpuinfo.tcmtr = cp15_tcmtr_get();
+ cpuinfo.tlbtr = cp15_tlbtr_get();
+ cpuinfo.mpidr = cp15_mpidr_get();
+ cpuinfo.revidr = cp15_revidr_get();
+
+ /* if CPU is not v7 cpu id scheme */
+ if (cpuinfo.architecture != 0xF)
+ return;
+
+ cpuinfo.id_pfr0 = cp15_id_pfr0_get();
+ cpuinfo.id_pfr1 = cp15_id_pfr1_get();
+ cpuinfo.id_dfr0 = cp15_id_dfr0_get();
+ cpuinfo.id_afr0 = cp15_id_afr0_get();
+ cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
+ cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
+ cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
+ cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
+ cpuinfo.id_isar0 = cp15_id_isar0_get();
+ cpuinfo.id_isar1 = cp15_id_isar1_get();
+ cpuinfo.id_isar2 = cp15_id_isar2_get();
+ cpuinfo.id_isar3 = cp15_id_isar3_get();
+ cpuinfo.id_isar4 = cp15_id_isar4_get();
+ cpuinfo.id_isar5 = cp15_id_isar5_get();
+
+/* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
+ cpuinfo.cbar = cp15_cbar_get();
+*/
+
+ /* Test if revidr is implemented */
+ if (cpuinfo.revidr == cpuinfo.midr)
+ cpuinfo.revidr = 0;
+
+ /* parsed bits of above registers */
+ /* id_mmfr0 */
+ cpuinfo.outermost_shareability = (cpuinfo.id_mmfr0 >> 8) & 0xF;
+ cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
+ cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
+ cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
+ /* id_mmfr2 */
+ cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
+ /* id_mmfr3 */
+ cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
+ cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
+ /* id_pfr1 */
+ cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
+ cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
+ cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
+}
diff --git a/sys/arm/arm/machdep.c b/sys/arm/arm/machdep.c
index 685a059..c739ae9 100644
--- a/sys/arm/arm/machdep.c
+++ b/sys/arm/arm/machdep.c
@@ -90,6 +90,7 @@ __FBSDID("$FreeBSD$");
#include <machine/armreg.h>
#include <machine/atags.h>
#include <machine/cpu.h>
+#include <machine/cpuinfo.h>
#include <machine/devmap.h>
#include <machine/frame.h>
#include <machine/intr.h>
@@ -1058,6 +1059,8 @@ initarm(struct arm_boot_params *abp)
arm_physmem_kernaddr = abp->abp_physaddr;
memsize = 0;
+
+ cpuinfo_init();
set_cpufuncs();
/*
diff --git a/sys/arm/include/asm.h b/sys/arm/include/asm.h
index deaccec..9122e6e 100644
--- a/sys/arm/include/asm.h
+++ b/sys/arm/include/asm.h
@@ -39,6 +39,7 @@
#ifndef _MACHINE_ASM_H_
#define _MACHINE_ASM_H_
#include <sys/cdefs.h>
+#include <machine/acle-compat.h>
#include <machine/sysreg.h>
#define _C_LABEL(x) x
diff --git a/sys/arm/include/cpu-v6.h b/sys/arm/include/cpu-v6.h
new file mode 100644
index 0000000..550fa47
--- /dev/null
+++ b/sys/arm/include/cpu-v6.h
@@ -0,0 +1,396 @@
+/*-
+ * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
+ * Copyright 2014 Michal Meloun <meloun@miracle.cz>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef MACHINE_CPU_V6_H
+#define MACHINE_CPU_V6_H
+
+#include "machine/atomic.h"
+#include "machine/cpufunc.h"
+#include "machine/cpuinfo.h"
+#include "machine/sysreg.h"
+
+
+#define CPU_ASID_KERNEL 0
+
+/*
+ * Macros to generate CP15 (system control processor) read/write functions.
+ */
+#define _FX(s...) #s
+
+#define _RF0(fname, aname...) \
+static __inline register_t \
+fname(void) \
+{ \
+ register_t reg; \
+ __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \
+ return(reg); \
+}
+
+#define _WF0(fname, aname...) \
+static __inline void \
+fname(void) \
+{ \
+ __asm __volatile("mcr\t" _FX(aname)); \
+}
+
+#define _WF1(fname, aname...) \
+static __inline void \
+fname(register_t reg) \
+{ \
+ __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \
+}
+
+/*
+ * Raw CP15 maintenance operations
+ * !!! not for external use !!!
+ */
+
+/* TLB */
+
+_WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */
+#if __ARM_ARCH >= 7 && defined SMP
+_WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */
+#endif
+_WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */
+#if __ARM_ARCH >= 7 && defined SMP
+_WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */
+#endif
+_WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */
+#if __ARM_ARCH >= 7 && defined SMP
+_WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */
+#endif
+_WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */
+
+_WF1(_CP15_TTB_SET, CP15_TTBR0(%0))
+
+/* Cache and Branch predictor */
+
+_WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */
+#if __ARM_ARCH >= 7 && defined SMP
+_WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */
+#endif
+_WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */
+_WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */
+_WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */
+_WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */
+#if __ARM_ARCH >= 7
+_WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */
+#endif
+_WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */
+_WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */
+_WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */
+_WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */
+#if __ARM_ARCH >= 7 && defined SMP
+_WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */
+#endif
+_WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */
+
+/*
+ * Publicly accessible functions
+ */
+
+/* Various control registers */
+
+_RF0(cp15_dfsr_get, CP15_DFSR(%0))
+_RF0(cp15_ifsr_get, CP15_IFSR(%0))
+_WF1(cp15_prrr_set, CP15_PRRR(%0))
+_WF1(cp15_nmrr_set, CP15_NMRR(%0))
+_RF0(cp15_ttbr_get, CP15_TTBR0(%0))
+_RF0(cp15_dfar_get, CP15_DFAR(%0))
+#if __ARM_ARCH >= 7
+_RF0(cp15_ifar_get, CP15_IFAR(%0))
+#endif
+
+/*CPU id registers */
+_RF0(cp15_midr_get, CP15_MIDR(%0))
+_RF0(cp15_ctr_get, CP15_CTR(%0))
+_RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
+_RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
+_RF0(cp15_mpidr_get, CP15_MPIDR(%0))
+_RF0(cp15_revidr_get, CP15_REVIDR(%0))
+_RF0(cp15_aidr_get, CP15_AIDR(%0))
+_RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0))
+_RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0))
+_RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0))
+_RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0))
+_RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0))
+_RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0))
+_RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0))
+_RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0))
+_RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0))
+_RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0))
+_RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0))
+_RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0))
+_RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0))
+_RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0))
+_RF0(cp15_cbar_get, CP15_CBAR(%0))
+
+#undef _FX
+#undef _RF0
+#undef _WF0
+#undef _WF1
+
+/*
+ * TLB maintenance operations.
+ */
+
+/* Local (i.e. not broadcasting ) operations. */
+
+/* Flush all TLB entries (even global). */
+static __inline void
+tlb_flush_all_local(void)
+{
+
+ dsb();
+ _CP15_TLBIALL();
+ dsb();
+}
+
+/* Flush all not global TLB entries. */
+static __inline void
+tlb_flush_all_ng_local(void)
+{
+
+ dsb();
+ _CP15_TLBIASID(CPU_ASID_KERNEL);
+ dsb();
+}
+
+/* Flush single TLB entry (even global). */
+static __inline void
+tlb_flush_local(vm_offset_t sva)
+{
+
+ dsb();
+ _CP15_TLBIMVA((sva & ~PAGE_MASK ) | CPU_ASID_KERNEL);
+ dsb();
+}
+
+/* Flush range of TLB entries (even global). */
+static __inline void
+tlb_flush_range_local(vm_offset_t sva, vm_size_t size)
+{
+ vm_offset_t va;
+ vm_offset_t eva = sva + size;
+
+ dsb();
+ for (va = sva; va < eva; va += PAGE_SIZE)
+ _CP15_TLBIMVA((va & ~PAGE_MASK ) | CPU_ASID_KERNEL);
+ dsb();
+}
+
+/* Broadcasting operations. */
+#ifndef SMP
+
+#define tlb_flush_all() tlb_flush_all_local()
+#define tlb_flush_all_ng() tlb_flush_all_ng_local()
+#define tlb_flush(sva) tlb_flush_local(sva)
+#define tlb_flush_range(sva, size) tlb_flush_range_local(sva, size)
+
+#else /* SMP */
+
+static __inline void
+tlb_flush_all(void)
+{
+
+ dsb();
+ _CP15_TLBIALLIS();
+ dsb();
+}
+
+static __inline void
+tlb_flush_all_ng(void)
+{
+
+ dsb();
+ _CP15_TLBIASIDIS(CPU_ASID_KERNEL);
+ dsb();
+}
+
+static __inline void
+tlb_flush(vm_offset_t sva)
+{
+
+ dsb();
+ _CP15_TLBIMVAAIS(sva);
+ dsb();
+}
+
+static __inline void
+tlb_flush_range(vm_offset_t sva, vm_size_t size)
+{
+ vm_offset_t va;
+ vm_offset_t eva = sva + size;
+
+ dsb();
+ for (va = sva; va < eva; va += PAGE_SIZE)
+ _CP15_TLBIMVAAIS(va);
+ dsb();
+}
+#endif /* SMP */
+
+/*
+ * Cache maintenance operations.
+ */
+
+/* Sync I and D caches to PoU */
+static __inline void
+icache_sync(vm_offset_t sva, vm_size_t size)
+{
+ vm_offset_t va;
+ vm_offset_t eva = sva + size;
+
+ dsb();
+ for (va = sva; va < eva; va += arm_dcache_align) {
+#ifdef SMP
+ _CP15_DCCMVAU(va);
+#else
+ _CP15_DCCMVAC(va);
+#endif
+ }
+ dsb();
+#ifdef SMP
+ _CP15_ICIALLUIS();
+#else
+ _CP15_ICIALLU();
+#endif
+ dsb();
+ isb();
+}
+
+/* Invalidate I cache */
+static __inline void
+icache_inv_all(void)
+{
+#ifdef SMP
+ _CP15_ICIALLUIS();
+#else
+ _CP15_ICIALLU();
+#endif
+ dsb();
+ isb();
+}
+
+/* Write back D-cache to PoU */
+static __inline void
+dcache_wb_pou(vm_offset_t sva, vm_size_t size)
+{
+ vm_offset_t va;
+ vm_offset_t eva = sva + size;
+
+ dsb();
+ for (va = sva; va < eva; va += arm_dcache_align) {
+#ifdef SMP
+ _CP15_DCCMVAU(va);
+#else
+ _CP15_DCCMVAC(va);
+#endif
+ }
+ dsb();
+}
+
+/* Invalidate D-cache to PoC */
+static __inline void
+dcache_inv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
+{
+ vm_offset_t va;
+ vm_offset_t eva = sva + size;
+
+ /* invalidate L1 first */
+ for (va = sva; va < eva; va += arm_dcache_align) {
+ _CP15_DCIMVAC(va);
+ }
+ dsb();
+
+ /* then L2 */
+ cpu_l2cache_inv_range(pa, size);
+ dsb();
+
+ /* then L1 again */
+ for (va = sva; va < eva; va += arm_dcache_align) {
+ _CP15_DCIMVAC(va);
+ }
+ dsb();
+}
+
+/* Write back D-cache to PoC */
+static __inline void
+dcache_wb_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
+{
+ vm_offset_t va;
+ vm_offset_t eva = sva + size;
+
+ dsb();
+
+ for (va = sva; va < eva; va += arm_dcache_align) {
+ _CP15_DCCMVAC(va);
+ }
+ dsb();
+
+ cpu_l2cache_wb_range(pa, size);
+}
+
+/* Write back and invalidate D-cache to PoC */
+static __inline void
+dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
+{
+ vm_offset_t va;
+ vm_offset_t eva = sva + size;
+
+ dsb();
+
+ /* write back L1 first */
+ for (va = sva; va < eva; va += arm_dcache_align) {
+ _CP15_DCCMVAC(va);
+ }
+ dsb();
+
+ /* then write back and invalidate L2 */
+ cpu_l2cache_wbinv_range(pa, size);
+
+ /* then invalidate L1 */
+ for (va = sva; va < eva; va += arm_dcache_align) {
+ _CP15_DCIMVAC(va);
+ }
+ dsb();
+}
+
+/* Set TTB0 register */
+static __inline void
+cp15_ttbr_set(uint32_t reg)
+{
+ dsb();
+ _CP15_TTB_SET(reg);
+ dsb();
+ _CP15_BPIALL();
+ dsb();
+ isb();
+ tlb_flush_all_ng_local();
+}
+
+#endif /* !MACHINE_CPU_V6_H */
diff --git a/sys/arm/include/cpufunc.h b/sys/arm/include/cpufunc.h
index 16887b0..de5c8a8 100644
--- a/sys/arm/include/cpufunc.h
+++ b/sys/arm/include/cpufunc.h
@@ -572,7 +572,6 @@ void xscalec3_context_switch (void);
#endif /* CPU_XSCALE_81342 */
-#define tlb_flush cpu_tlb_flushID
#define setttb cpu_setttb
#define drain_writebuf cpu_drain_writebuf
diff --git a/sys/arm/include/cpuinfo.h b/sys/arm/include/cpuinfo.h
new file mode 100644
index 0000000..f347294
--- /dev/null
+++ b/sys/arm/include/cpuinfo.h
@@ -0,0 +1,91 @@
+/*-
+ * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
+ * Copyright 2014 Michal Meloun <meloun@miracle.cz>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUINFO_H_
+#define _MACHINE_CPUINFO_H_
+
+#include <sys/types.h>
+
+struct cpuinfo {
+ /* raw id registers */
+ uint32_t midr;
+ uint32_t ctr;
+ uint32_t tcmtr;
+ uint32_t tlbtr;
+ uint32_t mpidr;
+ uint32_t revidr;
+ uint32_t id_pfr0;
+ uint32_t id_pfr1;
+ uint32_t id_dfr0;
+ uint32_t id_afr0;
+ uint32_t id_mmfr0;
+ uint32_t id_mmfr1;
+ uint32_t id_mmfr2;
+ uint32_t id_mmfr3;
+ uint32_t id_isar0;
+ uint32_t id_isar1;
+ uint32_t id_isar2;
+ uint32_t id_isar3;
+ uint32_t id_isar4;
+ uint32_t id_isar5;
+ uint32_t cbar;
+
+ /* Parsed bits of above registers... */
+
+ /* midr */
+ int implementer;
+ int revision;
+ int architecture;
+ int part_number;
+ int patch;
+
+ /* id_mmfr0 */
+ int outermost_shareability;
+ int shareability_levels;
+ int auxiliary_registers;
+ int innermost_shareability;
+
+ /* id_mmfr1 */
+ int mem_barrier;
+
+ /* id_mmfr3 */
+ int coherent_walk;
+ int maintenance_broadcast;
+
+ /* id_pfr1 */
+ int generic_timer_ext;
+ int virtualization_ext;
+ int security_ext;
+};
+
+extern struct cpuinfo cpuinfo;
+
+void cpuinfo_init(void);
+
+#endif /* _MACHINE_CPUINFO_H_ */
diff --git a/sys/conf/files.arm b/sys/conf/files.arm
index 1a703e2..ac1fb33 100644
--- a/sys/conf/files.arm
+++ b/sys/conf/files.arm
@@ -11,6 +11,8 @@ arm/arm/copystr.S standard
arm/arm/cpufunc.c standard
arm/arm/cpufunc_asm.S standard
arm/arm/cpufunc_asm_armv4.S standard
+arm/arm/cpuinfo.c standard
+arm/arm/cpu_asm-v6.S optional armv6
arm/arm/db_disasm.c optional ddb
arm/arm/db_interface.c optional ddb
arm/arm/db_trace.c optional ddb
OpenPOWER on IntegriCloud