summaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/irqflags.h29
-rw-r--r--include/asm-x86/linkage.h35
-rw-r--r--include/asm-x86/nops.h20
3 files changed, 73 insertions, 11 deletions
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
index 92021c1..0e22924 100644
--- a/include/asm-x86/irqflags.h
+++ b/include/asm-x86/irqflags.h
@@ -70,6 +70,26 @@ static inline void raw_local_irq_restore(unsigned long flags)
native_restore_fl(flags);
}
+#ifdef CONFIG_X86_VSMP
+
+/*
+ * Interrupt control for the VSMP architecture:
+ */
+
+static inline void raw_local_irq_disable(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+ raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
+}
+
+static inline void raw_local_irq_enable(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+ raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
+}
+
+#else
+
static inline void raw_local_irq_disable(void)
{
native_irq_disable();
@@ -80,6 +100,8 @@ static inline void raw_local_irq_enable(void)
native_irq_enable();
}
+#endif
+
/*
* Used in the idle loop; sti takes one instruction cycle
* to complete:
@@ -137,10 +159,17 @@ static inline unsigned long __raw_local_irq_save(void)
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
+#ifdef CONFIG_X86_VSMP
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
+}
+#else
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return !(flags & X86_EFLAGS_IF);
}
+#endif
static inline int raw_irqs_disabled(void)
{
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
index 31739c7..c048353 100644
--- a/include/asm-x86/linkage.h
+++ b/include/asm-x86/linkage.h
@@ -8,12 +8,45 @@
#ifdef CONFIG_X86_32
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
-#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret))
/*
* For 32-bit UML - mark functions implemented in assembly that use
* regparm input parameters:
*/
#define asmregparm __attribute__((regparm(3)))
+
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ *
+ * NOTE! On x86-64, all the arguments are in registers, so this
+ * only matters on a 32-bit kernel.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+ __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+ __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+ __asmlinkage_protect_n(ret, "g" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
+ "g" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
+ "g" (arg4), "g" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
+ "g" (arg4), "g" (arg5), "g" (arg6))
+
#endif
#ifdef CONFIG_X86_ALIGNMENT_16
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h
index e3b2bce..b3930ae 100644
--- a/include/asm-x86/nops.h
+++ b/include/asm-x86/nops.h
@@ -73,16 +73,7 @@
#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
-#if defined(CONFIG_MK8)
-#define ASM_NOP1 K8_NOP1
-#define ASM_NOP2 K8_NOP2
-#define ASM_NOP3 K8_NOP3
-#define ASM_NOP4 K8_NOP4
-#define ASM_NOP5 K8_NOP5
-#define ASM_NOP6 K8_NOP6
-#define ASM_NOP7 K8_NOP7
-#define ASM_NOP8 K8_NOP8
-#elif defined(CONFIG_MK7)
+#if defined(CONFIG_MK7)
#define ASM_NOP1 K7_NOP1
#define ASM_NOP2 K7_NOP2
#define ASM_NOP3 K7_NOP3
@@ -100,6 +91,15 @@
#define ASM_NOP6 P6_NOP6
#define ASM_NOP7 P6_NOP7
#define ASM_NOP8 P6_NOP8
+#elif defined(CONFIG_X86_64)
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
#else
#define ASM_NOP1 GENERIC_NOP1
#define ASM_NOP2 GENERIC_NOP2
OpenPOWER on IntegriCloud