summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor_core@ameritech.net>2006-04-29 01:11:23 -0400
committerDmitry Torokhov <dtor_core@ameritech.net>2006-04-29 01:11:23 -0400
commit7b7e394185014e0f3bd8989cac937003f20ef9ce (patch)
tree3beda5f979bba0aa9822534e239cf1b45f3be69c /include/asm-x86_64
parentddc5d3414593e4d7ad7fbd33e7f7517fcc234544 (diff)
parent693f7d362055261882659475d2ef022e32edbff1 (diff)
downloadop-kernel-dev-7b7e394185014e0f3bd8989cac937003f20ef9ce.zip
op-kernel-dev-7b7e394185014e0f3bd8989cac937003f20ef9ce.tar.gz
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/cache.h4
-rw-r--r--include/asm-x86_64/cpufeature.h1
-rw-r--r--include/asm-x86_64/e820.h3
-rw-r--r--include/asm-x86_64/hpet.h2
-rw-r--r--include/asm-x86_64/i387.h20
-rw-r--r--include/asm-x86_64/ia32_unistd.h2
-rw-r--r--include/asm-x86_64/io.h2
-rw-r--r--include/asm-x86_64/mce.h7
-rw-r--r--include/asm-x86_64/mmzone.h3
-rw-r--r--include/asm-x86_64/numa.h3
-rw-r--r--include/asm-x86_64/numnodes.h12
-rw-r--r--include/asm-x86_64/percpu.h2
-rw-r--r--include/asm-x86_64/timex.h2
-rw-r--r--include/asm-x86_64/unistd.h10
14 files changed, 48 insertions, 25 deletions
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index c8043a1..f8dff1c 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -20,8 +20,8 @@
__attribute__((__section__(".data.page_aligned")))
#endif
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
-
#endif
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
#endif
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index 76bb619..662964b 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -64,6 +64,7 @@
#define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */
#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
+#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
index 8dcc326..93b51df 100644
--- a/include/asm-x86_64/e820.h
+++ b/include/asm-x86_64/e820.h
@@ -47,7 +47,8 @@ extern void contig_e820_setup(void);
extern unsigned long e820_end_of_ram(void);
extern void e820_reserve_resources(void);
extern void e820_print_map(char *who);
-extern int e820_mapped(unsigned long start, unsigned long end, unsigned type);
+extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
+extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
extern void e820_setup_gap(void);
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index 08b75c1..18ff7ee 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -51,6 +51,8 @@
#define HPET_TN_ROUTE_SHIFT 9
+#define HPET_TICK_RATE (HZ * 100000UL)
+
extern int is_hpet_enabled(void);
extern int hpet_rtc_timer_init(void);
extern int oem_force_hpet_timer(void);
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index 876eb9a..cba8a3b 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -72,6 +72,23 @@ extern int set_fpregs(struct task_struct *tsk,
#define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
#define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
+#define X87_FSW_ES (1 << 7) /* Exception Summary */
+
+/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+ values. The kernel data segment can be sometimes 0 and sometimes
+ new user value. Both should be ok.
+ Use the PDA as safe address because it should be already in L1. */
+static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
+{
+ if (unlikely(fx->swd & X87_FSW_ES))
+ asm volatile("fnclex");
+ alternative_input(ASM_NOP8 ASM_NOP2,
+ " emms\n" /* clear stack tags */
+ " fildl %%gs:0", /* load to clear state */
+ X86_FEATURE_FXSAVE_LEAK);
+}
+
static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
{
int err;
@@ -119,6 +136,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
#endif
if (unlikely(err))
__clear_user(fx, sizeof(struct i387_fxsave_struct));
+ /* No need to clear here because the caller clears USED_MATH */
return err;
}
@@ -149,7 +167,7 @@ static inline void __fxsave_clear(struct task_struct *tsk)
"i" (offsetof(__typeof__(*tsk),
thread.i387.fxsave)));
#endif
- __asm__ __volatile__("fnclex");
+ clear_fpu_state(&tsk->thread.i387.fxsave);
}
static inline void kernel_fpu_begin(void)
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
index eeb2bcd..b4f4b17 100644
--- a/include/asm-x86_64/ia32_unistd.h
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -317,6 +317,4 @@
#define __NR_ia32_ppoll 309
#define __NR_ia32_unshare 310
-#define IA32_NR_syscalls 315 /* must be > than biggest syscall! */
-
#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
index cafdfb3..a05da8a 100644
--- a/include/asm-x86_64/io.h
+++ b/include/asm-x86_64/io.h
@@ -177,7 +177,7 @@ static inline __u16 __readw(const volatile void __iomem *addr)
{
return *(__force volatile __u16 *)addr;
}
-static inline __u32 __readl(const volatile void __iomem *addr)
+static __always_inline __u32 __readl(const volatile void __iomem *addr)
{
return *(__force volatile __u32 *)addr;
}
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 5d298b7..7229785 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -70,6 +70,9 @@ struct mce_log {
#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */
#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4
+#ifdef __KERNEL__
+#include <asm/atomic.h>
+
void mce_log(struct mce *m);
#ifdef CONFIG_X86_MCE_INTEL
void mce_intel_feature_init(struct cpuinfo_x86 *c);
@@ -87,4 +90,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)
}
#endif
+extern atomic_t mce_entry;
+
+#endif
+
#endif
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 6b18cd8..6944e71 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -12,7 +12,8 @@
#include <asm/smp.h>
-#define NODEMAPSIZE 0xfff
+/* Should really switch to dynamic allocation at some point */
+#define NODEMAPSIZE 0x4fff
/* Simple perfect hash to map physical addresses to node numbers */
struct memnode {
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index f6cbb4c..1cc92fe 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -2,7 +2,6 @@
#define _ASM_X8664_NUMA_H 1
#include <linux/nodemask.h>
-#include <asm/numnodes.h>
struct bootnode {
u64 start,end;
@@ -18,6 +17,8 @@ extern void numa_init_array(void);
extern int numa_off;
extern void numa_set_node(int cpu, int node);
+extern void srat_reserve_add_area(int nodeid);
+extern int hotadd_percent;
extern unsigned char apicid_to_node[256];
#ifdef CONFIG_NUMA
diff --git a/include/asm-x86_64/numnodes.h b/include/asm-x86_64/numnodes.h
deleted file mode 100644
index 32be16b..0000000
--- a/include/asm-x86_64/numnodes.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X8664_NUMNODES_H
-#define _ASM_X8664_NUMNODES_H 1
-
-#include <linux/config.h>
-
-#ifdef CONFIG_NUMA
-#define NODES_SHIFT 6
-#else
-#define NODES_SHIFT 0
-#endif
-
-#endif
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 4405b4a..7f33aaf 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -26,7 +26,7 @@
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for_each_cpu(__i) \
+ for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset(__i), \
(src), (size)); \
} while (0)
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index f18443f..b9e5320 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -33,7 +33,7 @@ static __always_inline cycles_t get_cycles_sync(void)
unsigned eax;
/* Don't do an additional sync on CPUs where we know
RDTSC is already synchronous. */
- alternative_io(ASM_NOP2, "cpuid", X86_FEATURE_SYNC_RDTSC,
+ alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
"=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
rdtscll(ret);
return ret;
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index f21ff2c..feb77cb 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -611,8 +611,14 @@ __SYSCALL(__NR_set_robust_list, sys_set_robust_list)
__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
#define __NR_splice 275
__SYSCALL(__NR_splice, sys_splice)
-
-#define __NR_syscall_max __NR_splice
+#define __NR_tee 276
+__SYSCALL(__NR_tee, sys_tee)
+#define __NR_sync_file_range 277
+__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
+#define __NR_vmsplice 278
+__SYSCALL(__NR_vmsplice, sys_vmsplice)
+
+#define __NR_syscall_max __NR_vmsplice
#ifndef __NO_STUBS
OpenPOWER on IntegriCloud