diff options
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/bitops.h | 37 | ||||
-rw-r--r-- | include/asm-x86/geode.h | 12 | ||||
-rw-r--r-- | include/asm-x86/i387.h | 10 | ||||
-rw-r--r-- | include/asm-x86/kvm_x86_emulate.h | 1 | ||||
-rw-r--r-- | include/asm-x86/pat.h | 8 | ||||
-rw-r--r-- | include/asm-x86/pgtable.h | 16 | ||||
-rw-r--r-- | include/asm-x86/spinlock.h | 18 | ||||
-rw-r--r-- | include/asm-x86/topology.h | 18 |
8 files changed, 67 insertions, 53 deletions
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index b81a4d4..ee4b3ea 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -23,13 +23,10 @@ #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) /* Technically wrong, but this avoids compilation errors on some gcc versions. */ -#define ADDR "=m" (*(volatile long *)addr) -#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5]) +#define ADDR "=m" (*(volatile long *) addr) #else #define ADDR "+m" (*(volatile long *) addr) -#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5]) #endif -#define BASE_ADDR "m" (*(volatile int *)addr) /** * set_bit - Atomically set a bit in memory @@ -77,7 +74,7 @@ static inline void __set_bit(int nr, volatile void *addr) */ static inline void clear_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr)); } /* @@ -96,7 +93,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr) static inline void __clear_bit(int nr, volatile void *addr) { - asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); } /* @@ -131,7 +128,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) */ static inline void __change_bit(int nr, volatile void *addr) { - asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); } /** @@ -145,7 +142,7 @@ static inline void __change_bit(int nr, volatile void *addr) */ static inline void change_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); } /** @@ -191,9 +188,10 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("bts %2,%3\n\t" - "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm("bts %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); return oldbit; } @@ -229,9 +227,10 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btr %2,%3\n\t" + asm volatile("btr %2,%1\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), ADDR + : "Ir" (nr)); return oldbit; } @@ -240,9 +239,10 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btc %2,%3\n\t" + asm volatile("btc %2,%1\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), ADDR + : "Ir" (nr) : "memory"); return oldbit; } @@ -276,11 +276,10 @@ static inline int variable_test_bit(int nr, volatile const void *addr) { int oldbit; - asm volatile("bt %2,%3\n\t" + asm volatile("bt %2,%1\n\t" "sbb %0,%0" : "=r" (oldbit) - : "m" (((volatile const int *)addr)[nr >> 5]), - "Ir" (nr), BASE_ADDR); + : "m" (*(unsigned long *)addr), "Ir" (nr)); return oldbit; } @@ -397,8 +396,6 @@ static inline int fls(int x) } #endif /* __KERNEL__ */ -#undef BASE_ADDR -#undef BIT_ADDR #undef ADDR static inline void set_bit_string(unsigned long *bitmap, diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 7154dc4..6e64588 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h @@ -185,16 +185,14 @@ static inline int is_geode(void) return (is_geode_gx() || is_geode_lx()); } -/* - * The VSA has virtual registers that we can query for a signature. - */ +#ifdef CONFIG_MGEODE_LX +extern int geode_has_vsa2(void); +#else static inline int geode_has_vsa2(void) { - outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); - outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); - - return (inw(VSA_VRC_DATA) == VSA_SIG); + return 0; } +#endif /* MFGPTs */ diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index da2adb4..6b722d3 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -175,7 +175,15 @@ static inline int save_i387(struct _fpstate __user *buf) */ static inline int restore_i387(struct _fpstate __user *buf) { - set_used_math(); + struct task_struct *tsk = current; + int err; + + if (!used_math()) { + err = init_fpu(tsk); + if (err) + return err; + } + if (!(task_thread_info(current)->status & TS_USEDFPU)) { clts(); task_thread_info(current)->status |= TS_USEDFPU; diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h index d6337f94..b877bbd 100644 --- a/include/asm-x86/kvm_x86_emulate.h +++ b/include/asm-x86/kvm_x86_emulate.h @@ -135,6 +135,7 @@ struct decode_cache { u8 modrm_rm; u8 use_modrm_ea; unsigned long modrm_ea; + void *modrm_ptr; unsigned long modrm_val; struct fetch_cache fetch; }; diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h index 8b822b5..88f60cc 100644 --- a/include/asm-x86/pat.h +++ b/include/asm-x86/pat.h @@ -4,7 +4,13 @@ #include <linux/types.h> +#ifdef CONFIG_X86_PAT extern int pat_wc_enabled; +extern void validate_pat_support(struct cpuinfo_x86 *c); +#else +static const int pat_wc_enabled = 0; +static inline void validate_pat_support(struct cpuinfo_x86 *c) { } +#endif extern void pat_init(void); @@ -12,5 +18,7 @@ extern int reserve_memtype(u64 start, u64 end, unsigned long req_type, unsigned long *ret_type); extern int free_memtype(u64 start, u64 end); +extern void pat_disable(char *reason); + #endif diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 801b31f..55c3a0e 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -57,7 +57,8 @@ #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ _PAGE_DIRTY) -#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_PCD | _PAGE_PWT | \ + _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) #define _PAGE_CACHE_WB (0) @@ -288,12 +289,21 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * Chop off the NX bit (if present), and add the NX portion of * the newprot (if present): */ - val &= _PAGE_CHG_MASK & ~_PAGE_NX; - val |= pgprot_val(newprot) & __supported_pte_mask; + val &= _PAGE_CHG_MASK; + val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask; return __pte(val); } +/* mprotect needs to preserve PAT bits when updating vm_page_prot */ +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; + pgprotval_t addbits = pgprot_val(newprot); + return __pgprot(preservebits | addbits); +} + #define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX)) #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index bc6376f..21e89bf 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -20,18 +20,8 @@ */ #ifdef CONFIG_X86_32 -typedef char _slock_t; -# define LOCK_INS_DEC "decb" -# define LOCK_INS_XCH "xchgb" -# define LOCK_INS_MOV "movb" -# define LOCK_INS_CMP "cmpb" # define LOCK_PTR_REG "a" #else -typedef int _slock_t; -# define LOCK_INS_DEC "decl" -# define LOCK_INS_XCH "xchgl" -# define LOCK_INS_MOV "movl" -# define LOCK_INS_CMP "cmpl" # define LOCK_PTR_REG "D" #endif @@ -66,14 +56,14 @@ typedef int _slock_t; #if (NR_CPUS < 256) static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 8) & 0xff) != (tmp & 0xff)); } static inline int __raw_spin_is_contended(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; } @@ -130,14 +120,14 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) #else static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); } static inline int __raw_spin_is_contended(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; } diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 4f35a0f..dcf3f81 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -25,6 +25,16 @@ #ifndef _ASM_X86_TOPOLOGY_H #define _ASM_X86_TOPOLOGY_H +#ifdef CONFIG_X86_32 +# ifdef CONFIG_X86_HT +# define ENABLE_TOPO_DEFINES +# endif +#else +# ifdef CONFIG_SMP +# define ENABLE_TOPO_DEFINES +# endif +#endif + #ifdef CONFIG_NUMA #include <linux/cpumask.h> #include <asm/mpspec.h> @@ -130,10 +140,6 @@ extern unsigned long node_end_pfn[]; extern unsigned long node_remap_size[]; #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) -# ifdef CONFIG_X86_HT -# define ENABLE_TOPO_DEFINES -# endif - # define SD_CACHE_NICE_TRIES 1 # define SD_IDLE_IDX 1 # define SD_NEWIDLE_IDX 2 @@ -141,10 +147,6 @@ extern unsigned long node_remap_size[]; #else -# ifdef CONFIG_SMP -# define ENABLE_TOPO_DEFINES -# endif - # define SD_CACHE_NICE_TRIES 2 # define SD_IDLE_IDX 2 # define SD_NEWIDLE_IDX 2 |