summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/a.out.h1
-rw-r--r--include/asm-i386/cmpxchg.h14
-rw-r--r--include/asm-i386/kprobes.h1
-rw-r--r--include/asm-i386/percpu.h5
-rw-r--r--include/asm-i386/required-features.h2
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-i386/tsc.h1
7 files changed, 13 insertions, 20 deletions
diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h
index ab17bb8..851a60f 100644
--- a/include/asm-i386/a.out.h
+++ b/include/asm-i386/a.out.h
@@ -20,6 +20,7 @@ struct exec
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
#endif
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h
index 7adcef0..64dcdf4 100644
--- a/include/asm-i386/cmpxchg.h
+++ b/include/asm-i386/cmpxchg.h
@@ -3,14 +3,16 @@
#include <linux/bitops.h> /* for LOCK_PREFIX */
+/*
+ * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
+ * you need to test for the feature in boot_cpu_data.
+ */
+
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
-
-#ifdef CONFIG_X86_CMPXCHG64
-
/*
* The semantics of XCHGCMP8B are a bit strange, this is why
* there is a loop and the loading of %%eax and %%edx has to
@@ -65,8 +67,6 @@ static inline void __set_64bit_var (unsigned long long *ptr,
__set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
__set_64bit(ptr, ll_low(value), ll_high(value)) )
-#endif
-
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -252,8 +252,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
})
#endif
-#ifdef CONFIG_X86_CMPXCHG64
-
static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
unsigned long long new)
{
@@ -289,5 +287,3 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
(unsigned long long)(n)))
#endif
-
-#endif
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index 8774d06..06f7303 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -42,7 +42,6 @@ typedef u8 kprobe_opcode_t;
? (MAX_STACK_SIZE) \
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
-#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define ARCH_SUPPORTS_KRETPROBES
#define ARCH_INACTIVE_KPROBE_COUNT 0
#define flush_insn_slot(p) do { } while (0)
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index f54830b..a7ebd43 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -54,6 +54,11 @@ extern unsigned long __per_cpu_offset[];
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
diff --git a/include/asm-i386/required-features.h b/include/asm-i386/required-features.h
index 65848a0..618feb9 100644
--- a/include/asm-i386/required-features.h
+++ b/include/asm-i386/required-features.h
@@ -29,7 +29,7 @@
# define NEED_CMOV 0
#endif
-#ifdef CONFIG_X86_CMPXCHG64
+#ifdef CONFIG_X86_PAE
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
#else
# define NEED_CX8 0
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 94ed3686..609756c 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -310,15 +310,6 @@ void enable_hlt(void);
extern int es7000_plat;
void cpu_idle_wait(void);
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible:
- */
-static inline void sched_cacheflush(void)
-{
- wbinvd();
-}
-
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index 62c091f..a4d8066 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -63,6 +63,7 @@ extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern void init_tsc_clocksource(void);
+int check_tsc_unstable(void);
/*
* Boot-time check whether the TSCs are synchronized across
OpenPOWER on IntegriCloud