summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Petazzoni <thomas.petazzoni@free-electrons.com>2008-08-18 12:33:20 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-18 16:05:45 +0200
commit8bfcb3960fde049b863266dab8c3617bb5a541aa (patch)
treeb658f2337d46d7f818eecc1be1e2ce76f57a32d0 /arch
parent1a10390708d675ebf1a2f5e169a5165626afbd88 (diff)
downloadop-kernel-dev-8bfcb3960fde049b863266dab8c3617bb5a541aa.zip
op-kernel-dev-8bfcb3960fde049b863266dab8c3617bb5a541aa.tar.gz
x86: make movsl_mask definition non-CPU specific
movsl_mask is currently defined in arch/x86/kernel/cpu/intel.c, which contains code specific to Intel CPUs. However, movsl_mask is used in the non-CPU specific code in arch/x86/lib/usercopy_32.c, which breaks the compilation when support for Intel CPUs is compiled out. This patch solves this problem by moving movsl_mask's definition close to its users in arch/x86/lib/usercopy_32.c. Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Cc: michael@free-electrons.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/intel.c7
-rw-r--r--arch/x86/lib/usercopy_32.c7
2 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b75f256..5c8959b 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -23,13 +23,6 @@
#include <mach_apic.h>
#endif
-#ifdef CONFIG_X86_INTEL_USERCOPY
-/*
- * Alignment at which movsl is preferred for bulk memory copies.
- */
-struct movsl_mask movsl_mask __read_mostly;
-#endif
-
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 24e6094..9e68075 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -14,6 +14,13 @@
#include <asm/uaccess.h>
#include <asm/mmx.h>
+#ifdef CONFIG_X86_INTEL_USERCOPY
+/*
+ * Alignment at which movsl is preferred for bulk memory copies.
+ */
+struct movsl_mask movsl_mask __read_mostly;
+#endif
+
static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
{
#ifdef CONFIG_X86_INTEL_USERCOPY
OpenPOWER on IntegriCloud