summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-09-25 15:35:28 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-09-25 15:35:28 +0100
commitc0e9587841a0fd79bbf8296034faefb9afe72fb4 (patch)
treeb82e0e79706f9f63985b4591e1fa02eaa2df73d2 /arch
parent90f1e084783be9bbff4861fa8e460b76de2787f4 (diff)
downloadop-kernel-dev-c0e9587841a0fd79bbf8296034faefb9afe72fb4.zip
op-kernel-dev-c0e9587841a0fd79bbf8296034faefb9afe72fb4.tar.gz
[ARM] Introduce new bitmask based cache type macros
Rather than trying to (inaccurately) decode the cache type from the registers each time we need to decide what type of cache we have, use a bitmask initialized early during boot. Since the setup is a one-off initialization, we can be a little more clever and take account of the CPU architecture as well. Note that we continue to achieve the compactness on optimised kernels by forcing tests to always-false or always-true as appropriate, thereby allowing the compiler to do build-time code elimination. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/cachetype.h118
-rw-r--r--arch/arm/kernel/setup.c22
2 files changed, 59 insertions, 81 deletions
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index b52386b..d3a4c2c 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -1,96 +1,52 @@
#ifndef __ASM_ARM_CACHETYPE_H
#define __ASM_ARM_CACHETYPE_H
-#include <asm/cputype.h>
+#define CACHEID_VIVT (1 << 0)
+#define CACHEID_VIPT_NONALIASING (1 << 1)
+#define CACHEID_VIPT_ALIASING (1 << 2)
+#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
+#define CACHEID_ASID_TAGGED (1 << 3)
-#define __cacheid_present(val) (val != read_cpuid_id())
-#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29))
+extern unsigned int cacheid;
-#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25))
-#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25))
-#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
-#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
+#define cache_is_vivt() cacheid_is(CACHEID_VIVT)
+#define cache_is_vipt() cacheid_is(CACHEID_VIPT)
+#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
+#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
+#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
-#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val))
-#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val))
-#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val))
-#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val))
-#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0)
-
-#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
/*
- * VIVT caches only
+ * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
+ * Mask out support which will never be present on newer CPUs.
+ * - v6+ is never VIVT
+ * - v7+ VIPT never aliases
*/
-#define cache_is_vivt() 1
-#define cache_is_vipt() 0
-#define cache_is_vipt_nonaliasing() 0
-#define cache_is_vipt_aliasing() 0
-#define icache_is_vivt_asid_tagged() 0
+#if __LINUX_ARM_ARCH__ >= 7
+#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED)
+#elif __LINUX_ARM_ARCH__ >= 6
+#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
+#else
+#define __CACHEID_ARCH_MIN (~0)
+#endif
-#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
/*
- * VIPT caches only
+ * Mask out support which isn't configured
*/
-#define cache_is_vivt() 0
-#define cache_is_vipt() 1
-#define cache_is_vipt_nonaliasing() \
- ({ \
- unsigned int __val = read_cpuid_cachetype(); \
- __cacheid_vipt_nonaliasing(__val); \
- })
-
-#define cache_is_vipt_aliasing() \
- ({ \
- unsigned int __val = read_cpuid_cachetype(); \
- __cacheid_vipt_aliasing(__val); \
- })
-
-#define icache_is_vivt_asid_tagged() \
- ({ \
- unsigned int __val = read_cpuid_cachetype(); \
- __cacheid_vivt_asid_tagged_instr(__val); \
- })
-
+#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
+#define __CACHEID_ALWAYS (CACHEID_VIVT)
+#define __CACHEID_NEVER (~CACHEID_VIVT)
+#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
+#define __CACHEID_ALWAYS (0)
+#define __CACHEID_NEVER (CACHEID_VIVT)
#else
-/*
- * VIVT or VIPT caches. Note that this is unreliable since ARM926
- * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test.
- * There's no way to tell from the CacheType register what type (!)
- * the cache is.
- */
-#define cache_is_vivt() \
- ({ \
- unsigned int __val = read_cpuid_cachetype(); \
- (!__cacheid_present(__val)) || __cacheid_vivt(__val); \
- })
-
-#define cache_is_vipt() \
- ({ \
- unsigned int __val = read_cpuid_cachetype(); \
- __cacheid_present(__val) && __cacheid_vipt(__val); \
- })
-
-#define cache_is_vipt_nonaliasing() \
- ({ \
- unsigned int __val = read_cpuid_cachetype(); \
- __cacheid_present(__val) && \
- __cacheid_vipt_nonaliasing(__val); \
- })
-
-#define cache_is_vipt_aliasing() \
- ({ \
- unsigned int __val = read_cpuid_cachetype(); \
- __cacheid_present(__val) && \
- __cacheid_vipt_aliasing(__val); \
- })
-
-#define icache_is_vivt_asid_tagged() \
- ({ \
- unsigned int __val = read_cpuid_cachetype(); \
- __cacheid_present(__val) && \
- __cacheid_vivt_asid_tagged_instr(__val); \
- })
-
+#define __CACHEID_ALWAYS (0)
+#define __CACHEID_NEVER (0)
#endif
+static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
+{
+ return (__CACHEID_ALWAYS & mask) |
+ (~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
+}
+
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1939c90..5b121d8 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -67,6 +67,8 @@ unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
unsigned int __machine_arch_type;
EXPORT_SYMBOL(__machine_arch_type);
+unsigned int cacheid;
+EXPORT_SYMBOL(cacheid);
unsigned int __atags_pointer __initdata;
@@ -229,6 +231,25 @@ int cpu_architecture(void)
return cpu_arch;
}
+static void __init cacheid_init(void)
+{
+ unsigned int cachetype = read_cpuid_cachetype();
+ unsigned int arch = cpu_architecture();
+
+ if (arch >= CPU_ARCH_ARMv7) {
+ cacheid = CACHEID_VIPT_NONALIASING;
+ if ((cachetype & (3 << 14)) == 1 << 14)
+ cacheid |= CACHEID_ASID_TAGGED;
+ } else if (arch >= CPU_ARCH_ARMv6) {
+ if (cachetype & (1 << 23))
+ cacheid = CACHEID_VIPT_ALIASING;
+ else
+ cacheid = CACHEID_VIPT_NONALIASING;
+ } else {
+ cacheid = CACHEID_VIVT;
+ }
+}
+
/*
* These functions re-use the assembly code in head.S, which
* already provide the required functionality.
@@ -278,6 +299,7 @@ static void __init setup_processor(void)
elf_hwcap &= ~HWCAP_THUMB;
#endif
+ cacheid_init();
cpu_proc_init();
}
OpenPOWER on IntegriCloud