diff options
author | James Bottomley <James.Bottomley@HansenPartnership.com> | 2016-07-06 07:25:55 -0700 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2016-07-06 07:25:55 -0700 |
commit | ea1a25c3348abc33d7d94db28501766adf3d1c7d (patch) | |
tree | ca3c5c8b72532117034dd2d57a2a85eead13db7b /arch/x86/include | |
parent | 5e7ff2ca7f2da55fe777167849d0c93403bd0dc8 (diff) | |
parent | 262e2bfd7d1e1f1ee48b870e5dfabb87c06b975e (diff) | |
download | op-kernel-dev-ea1a25c3348abc33d7d94db28501766adf3d1c7d.zip op-kernel-dev-ea1a25c3348abc33d7d94db28501766adf3d1c7d.tar.gz |
Merge branch 'jejb-fixes' into fixes
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/bugs.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/disabled-features.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/intel-family.h | 68 | ||||
-rw-r--r-- | arch/x86/include/asm/intel_telemetry.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/kprobes.h | 11 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 43 | ||||
-rw-r--r-- | arch/x86/include/asm/msr.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/pgalloc.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/pmc_core.h | 27 | ||||
-rw-r--r-- | arch/x86/include/asm/stacktrace.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/svm.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_32.h | 62 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_64.h | 7 | ||||
-rw-r--r-- | arch/x86/include/uapi/asm/kvm.h | 6 | ||||
-rw-r--r-- | arch/x86/include/uapi/asm/svm.h | 49 |
18 files changed, 237 insertions, 96 deletions
diff --git a/arch/x86/include/asm/bugs.h b/arch/x86/include/asm/bugs.h index 08abf63..5490bba 100644 --- a/arch/x86/include/asm/bugs.h +++ b/arch/x86/include/asm/bugs.h @@ -1,8 +1,16 @@ #ifndef _ASM_X86_BUGS_H #define _ASM_X86_BUGS_H +#include <asm/processor.h> + extern void check_bugs(void); +#if defined(CONFIG_CPU_SUP_INTEL) +void check_mpx_erratum(struct cpuinfo_x86 *c); +#else +static inline void check_mpx_erratum(struct cpuinfo_x86 *c) {} +#endif + #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32) int ppro_with_ram_bug(void); #else diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 25ebb54..483fb54 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -64,9 +64,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; (((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) || \ (((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) || \ (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) || \ - (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \ - (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \ - (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK16)) ) + (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \ + (((bit)>>5)==15 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \ + (((bit)>>5)==16 && (1UL<<((bit)&31) & REQUIRED_MASK16)) ) #define DISABLED_MASK_BIT_SET(bit) \ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0 )) || \ @@ -83,9 +83,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; (((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) || \ (((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) || \ (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) || \ - (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \ - (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \ - (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK16)) ) + (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \ + (((bit)>>5)==15 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \ + (((bit)>>5)==16 && (1UL<<((bit)&31) & DISABLED_MASK16)) ) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 39343be..911e935 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -29,11 +29,11 @@ #endif /* CONFIG_X86_64 */ #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS -# define DISABLE_PKU (1<<(X86_FEATURE_PKU)) -# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE)) -#else # define DISABLE_PKU 0 # define DISABLE_OSPKE 0 +#else +# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31)) +# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ /* diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h new file mode 100644 index 0000000..6999f7d --- /dev/null +++ b/arch/x86/include/asm/intel-family.h @@ -0,0 +1,68 @@ +#ifndef _ASM_X86_INTEL_FAMILY_H +#define _ASM_X86_INTEL_FAMILY_H + +/* + * "Big Core" Processors (Branded as Core, Xeon, etc...) + * + * The "_X" parts are generally the EP and EX Xeons, or the + * "Extreme" ones, like Broadwell-E. + * + * Things ending in "2" are usually because we have no better + * name for them. There's no processor called "WESTMERE2". + */ + +#define INTEL_FAM6_CORE_YONAH 0x0E +#define INTEL_FAM6_CORE2_MEROM 0x0F +#define INTEL_FAM6_CORE2_MEROM_L 0x16 +#define INTEL_FAM6_CORE2_PENRYN 0x17 +#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D + +#define INTEL_FAM6_NEHALEM 0x1E +#define INTEL_FAM6_NEHALEM_EP 0x1A +#define INTEL_FAM6_NEHALEM_EX 0x2E +#define INTEL_FAM6_WESTMERE 0x25 +#define INTEL_FAM6_WESTMERE2 0x1F +#define INTEL_FAM6_WESTMERE_EP 0x2C +#define INTEL_FAM6_WESTMERE_EX 0x2F + +#define INTEL_FAM6_SANDYBRIDGE 0x2A +#define INTEL_FAM6_SANDYBRIDGE_X 0x2D +#define INTEL_FAM6_IVYBRIDGE 0x3A +#define INTEL_FAM6_IVYBRIDGE_X 0x3E + +#define INTEL_FAM6_HASWELL_CORE 0x3C +#define INTEL_FAM6_HASWELL_X 0x3F +#define INTEL_FAM6_HASWELL_ULT 0x45 +#define INTEL_FAM6_HASWELL_GT3E 0x46 + +#define INTEL_FAM6_BROADWELL_CORE 0x3D +#define INTEL_FAM6_BROADWELL_XEON_D 0x56 +#define INTEL_FAM6_BROADWELL_GT3E 0x47 +#define INTEL_FAM6_BROADWELL_X 0x4F + +#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E +#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E +#define INTEL_FAM6_SKYLAKE_X 0x55 +#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E +#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E + +/* "Small Core" Processors (Atom) */ + +#define INTEL_FAM6_ATOM_PINEVIEW 0x1C +#define INTEL_FAM6_ATOM_LINCROFT 0x26 +#define INTEL_FAM6_ATOM_PENWELL 0x27 +#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35 +#define INTEL_FAM6_ATOM_CEDARVIEW 0x36 +#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */ +#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ +#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ +#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */ +#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */ +#define INTEL_FAM6_ATOM_GOLDMONT 0x5C +#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ + +/* Xeon Phi */ + +#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ + +#endif /* _ASM_X86_INTEL_FAMILY_H */ diff --git a/arch/x86/include/asm/intel_telemetry.h b/arch/x86/include/asm/intel_telemetry.h index ed65fe7..85029b5 100644 --- a/arch/x86/include/asm/intel_telemetry.h +++ b/arch/x86/include/asm/intel_telemetry.h @@ -99,7 +99,7 @@ struct telemetry_core_ops { int (*reset_events)(void); }; -int telemetry_set_pltdata(struct telemetry_core_ops *ops, +int telemetry_set_pltdata(const struct telemetry_core_ops *ops, struct telemetry_plt_config *pltconfig); int telemetry_clear_pltdata(void); diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 4421b5d..d1d1e50 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t; #define RELATIVECALL_OPCODE 0xe8 #define RELATIVE_ADDR_SIZE 4 #define MAX_STACK_SIZE 64 -#define MIN_STACK_SIZE(ADDR) \ - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ - THREAD_SIZE - (unsigned long)(ADDR))) \ - ? (MAX_STACK_SIZE) \ - : (((unsigned long)current_thread_info()) + \ - THREAD_SIZE - (unsigned long)(ADDR))) +#define CUR_STACK_SIZE(ADDR) \ + (current_top_of_stack() - (unsigned long)(ADDR)) +#define MIN_STACK_SIZE(ADDR) \ + (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ? \ + MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR)) #define flush_insn_slot(p) do { } while (0) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b7e3944..69e62862 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -27,6 +27,7 @@ #include <linux/irqbypass.h> #include <linux/hyperv.h> +#include <asm/apic.h> #include <asm/pvclock-abi.h> #include <asm/desc.h> #include <asm/mtrr.h> @@ -562,7 +563,6 @@ struct kvm_vcpu_arch { struct { u64 msr_val; u64 last_steal; - u64 accum_steal; struct gfn_to_hva_cache stime; struct kvm_steal_time steal; } st; @@ -774,6 +774,11 @@ struct kvm_arch { u8 nr_reserved_ioapic_pins; bool disabled_lapic_found; + + /* Struct members for AVIC */ + u32 ldr_mode; + struct page *avic_logical_id_table_page; + struct page *avic_physical_id_table_page; }; struct kvm_vm_stat { @@ -804,6 +809,7 @@ struct kvm_vcpu_stat { u32 halt_exits; u32 halt_successful_poll; u32 halt_attempted_poll; + u32 halt_poll_invalid; u32 halt_wakeup; u32 request_irq_exits; u32 irq_exits; @@ -848,6 +854,9 @@ struct kvm_x86_ops { bool (*cpu_has_high_real_mode_segbase)(void); void (*cpuid_update)(struct kvm_vcpu *vcpu); + int (*vm_init)(struct kvm *kvm); + void (*vm_destroy)(struct kvm *kvm); + /* Create, but do not attach this VCPU */ struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); void (*vcpu_free)(struct kvm_vcpu *vcpu); @@ -914,7 +923,7 @@ struct kvm_x86_ops { bool (*get_enable_apicv)(void); void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); - void (*hwapic_isr_update)(struct kvm *kvm, int isr); + void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); @@ -990,8 +999,13 @@ struct kvm_x86_ops { */ int (*pre_block)(struct kvm_vcpu *vcpu); void (*post_block)(struct kvm_vcpu *vcpu); + + void (*vcpu_blocking)(struct kvm_vcpu *vcpu); + void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); + int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); + void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); }; struct kvm_arch_async_pf { @@ -1341,7 +1355,28 @@ bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm_lapic_irq *irq); -static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) +{ + if (kvm_x86_ops->vcpu_blocking) + kvm_x86_ops->vcpu_blocking(vcpu); +} + +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) +{ + if (kvm_x86_ops->vcpu_unblocking) + kvm_x86_ops->vcpu_unblocking(vcpu); +} + +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} + +static inline int kvm_cpu_get_apicid(int mps_cpu) +{ +#ifdef CONFIG_X86_LOCAL_APIC + return __default_cpu_present_to_apicid(mps_cpu); +#else + WARN_ON_ONCE(1); + return BAD_APICID; +#endif +} #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 7dc1d8f..b5fee97 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr, "2:\n" _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) : : "c" (msr), "a"(low), "d" (high) : "memory"); - if (msr_tracepoint_active(__tracepoint_read_msr)) + if (msr_tracepoint_active(__tracepoint_write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), 0); } @@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr, : "c" (msr), "0" (low), "d" (high), [fault] "i" (-EIO) : "memory"); - if (msr_tracepoint_active(__tracepoint_read_msr)) + if (msr_tracepoint_active(__tracepoint_write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), err); return err; } diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index bf7f8b5..574c23c 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -81,7 +81,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *page; - page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); if (!page) return NULL; if (!pgtable_pmd_page_ctor(page)) { @@ -125,7 +125,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + return (pud_t *)get_zeroed_page(GFP_KERNEL); } static inline void pud_free(struct mm_struct *mm, pud_t *pud) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index f86491a..1a27396 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -181,6 +181,7 @@ static inline int pmd_trans_huge(pmd_t pmd) return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; } +#define has_transparent_hugepage has_transparent_hugepage static inline int has_transparent_hugepage(void) { return boot_cpu_has(X86_FEATURE_PSE); diff --git a/arch/x86/include/asm/pmc_core.h b/arch/x86/include/asm/pmc_core.h new file mode 100644 index 0000000..d4855f1 --- /dev/null +++ b/arch/x86/include/asm/pmc_core.h @@ -0,0 +1,27 @@ +/* + * Intel Core SoC Power Management Controller Header File + * + * Copyright (c) 2016, Intel Corporation. + * All Rights Reserved. + * + * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> + * Vishwanath Somayaji <vishwanath.somayaji@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _ASM_PMC_CORE_H +#define _ASM_PMC_CORE_H + +/* API to read SLP_S0_RESIDENCY counter */ +int intel_pmc_slp_s0_counter_read(u32 *data); + +#endif /* _ASM_PMC_CORE_H */ diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 7c247e7..0944218 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -14,7 +14,7 @@ extern int kstack_depth_to_print; struct thread_info; struct stacktrace_ops; -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, +typedef unsigned long (*walk_stack_t)(struct task_struct *task, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, @@ -23,13 +23,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, int *graph); extern unsigned long -print_context_stack(struct thread_info *tinfo, +print_context_stack(struct task_struct *task, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph); extern unsigned long -print_context_stack_bp(struct thread_info *tinfo, +print_context_stack_bp(struct task_struct *task, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph); diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 6136d99..d0fe23e 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -78,7 +78,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { u32 exit_int_info; u32 exit_int_info_err; u64 nested_ctl; - u8 reserved_4[16]; + u64 avic_vapic_bar; + u8 reserved_4[8]; u32 event_inj; u32 event_inj_err; u64 nested_cr3; @@ -88,7 +89,11 @@ struct __attribute__ ((__packed__)) vmcb_control_area { u64 next_rip; u8 insn_len; u8 insn_bytes[15]; - u8 reserved_6[800]; + u64 avic_backing_page; /* Offset 0xe0 */ + u8 reserved_6[8]; /* Offset 0xe8 */ + u64 avic_logical_id; /* Offset 0xf0 */ + u64 avic_physical_id; /* Offset 0xf8 */ + u8 reserved_7[768]; }; @@ -111,6 +116,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define V_INTR_MASKING_SHIFT 24 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) +#define AVIC_ENABLE_SHIFT 31 +#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT) + #define SVM_INTERRUPT_SHADOW_MASK 1 #define SVM_IOIO_STR_SHIFT 2 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 12f9653..2982387 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -5,6 +5,7 @@ */ #include <linux/errno.h> #include <linux/compiler.h> +#include <linux/kasan-checks.h> #include <linux/thread_info.h> #include <linux/string.h> #include <asm/asm.h> @@ -721,6 +722,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n) might_fault(); + kasan_check_write(to, n); + /* * While we would like to have the compiler do the checking for us * even in the non-constant size case, any false positives there are @@ -754,6 +757,8 @@ copy_to_user(void __user *to, const void *from, unsigned long n) { int sz = __compiletime_object_size(from); + kasan_check_read(from, n); + might_fault(); /* See the comment in copy_from_user() above. */ diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 3fe0eac..4b32da24 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -33,46 +33,10 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero * the specified block with access_ok() before calling this function. * The caller should also make sure he pins the user space address * so that we don't result in page fault and sleep. - * - * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault - * we return the initial request size (1, 2 or 4), as copy_*_user should do. - * If a store crosses a page boundary and gets a fault, the x86 will not write - * anything, so this is accurate. */ - static __always_inline unsigned long __must_check __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __uaccess_begin(); - __put_user_size(*(u8 *)from, (u8 __user *)to, - 1, ret, 1); - __uaccess_end(); - return ret; - case 2: - __uaccess_begin(); - __put_user_size(*(u16 *)from, (u16 __user *)to, - 2, ret, 2); - __uaccess_end(); - return ret; - case 4: - __uaccess_begin(); - __put_user_size(*(u32 *)from, (u32 __user *)to, - 4, ret, 4); - __uaccess_end(); - return ret; - case 8: - __uaccess_begin(); - __put_user_size(*(u64 *)from, (u64 __user *)to, - 8, ret, 8); - __uaccess_end(); - return ret; - } - } return __copy_to_user_ll(to, from, n); } @@ -101,32 +65,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) static __always_inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { - /* Avoid zeroing the tail if the copy fails.. - * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, - * but as the zeroing behaviour is only significant when n is not - * constant, that shouldn't be a problem. - */ - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __uaccess_begin(); - __get_user_size(*(u8 *)to, from, 1, ret, 1); - __uaccess_end(); - return ret; - case 2: - __uaccess_begin(); - __get_user_size(*(u16 *)to, from, 2, ret, 2); - __uaccess_end(); - return ret; - case 4: - __uaccess_begin(); - __get_user_size(*(u32 *)to, from, 4, ret, 4); - __uaccess_end(); - return ret; - } - } return __copy_from_user_ll_nozero(to, from, n); } diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 3076986..2eac2aa 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -7,6 +7,7 @@ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/lockdep.h> +#include <linux/kasan-checks.h> #include <asm/alternative.h> #include <asm/cpufeatures.h> #include <asm/page.h> @@ -109,6 +110,7 @@ static __always_inline __must_check int __copy_from_user(void *dst, const void __user *src, unsigned size) { might_fault(); + kasan_check_write(dst, size); return __copy_from_user_nocheck(dst, src, size); } @@ -175,6 +177,7 @@ static __always_inline __must_check int __copy_to_user(void __user *dst, const void *src, unsigned size) { might_fault(); + kasan_check_read(src, size); return __copy_to_user_nocheck(dst, src, size); } @@ -242,12 +245,14 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) static __must_check __always_inline int __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) { + kasan_check_write(dst, size); return __copy_from_user_nocheck(dst, src, size); } static __must_check __always_inline int __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) { + kasan_check_read(src, size); return __copy_to_user_nocheck(dst, src, size); } @@ -258,6 +263,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) { might_fault(); + kasan_check_write(dst, size); return __copy_user_nocache(dst, src, size, 1); } @@ -265,6 +271,7 @@ static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { + kasan_check_write(dst, size); return __copy_user_nocache(dst, src, size, 0); } diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index cd54147..739c0c5 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -216,9 +216,9 @@ struct kvm_cpuid_entry2 { __u32 padding[3]; }; -#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0) -#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) -#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) +#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX (1 << 0) +#define KVM_CPUID_FLAG_STATEFUL_FUNC (1 << 1) +#define KVM_CPUID_FLAG_STATE_READ_NEXT (1 << 2) /* for KVM_SET_CPUID2 */ struct kvm_cpuid2 { diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index 8a4add8..3725e14 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -2,10 +2,12 @@ #define _UAPI__SVM_H #define SVM_EXIT_READ_CR0 0x000 +#define SVM_EXIT_READ_CR2 0x002 #define SVM_EXIT_READ_CR3 0x003 #define SVM_EXIT_READ_CR4 0x004 #define SVM_EXIT_READ_CR8 0x008 #define SVM_EXIT_WRITE_CR0 0x010 +#define SVM_EXIT_WRITE_CR2 0x012 #define SVM_EXIT_WRITE_CR3 0x013 #define SVM_EXIT_WRITE_CR4 0x014 #define SVM_EXIT_WRITE_CR8 0x018 @@ -73,15 +75,19 @@ #define SVM_EXIT_MWAIT_COND 0x08c #define SVM_EXIT_XSETBV 0x08d #define SVM_EXIT_NPF 0x400 +#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401 +#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402 #define SVM_EXIT_ERR -1 #define SVM_EXIT_REASONS \ { SVM_EXIT_READ_CR0, "read_cr0" }, \ + { SVM_EXIT_READ_CR2, "read_cr2" }, \ { SVM_EXIT_READ_CR3, "read_cr3" }, \ { SVM_EXIT_READ_CR4, "read_cr4" }, \ { SVM_EXIT_READ_CR8, "read_cr8" }, \ { SVM_EXIT_WRITE_CR0, "write_cr0" }, \ + { SVM_EXIT_WRITE_CR2, "write_cr2" }, \ { SVM_EXIT_WRITE_CR3, "write_cr3" }, \ { SVM_EXIT_WRITE_CR4, "write_cr4" }, \ { SVM_EXIT_WRITE_CR8, "write_cr8" }, \ @@ -89,32 +95,66 @@ { SVM_EXIT_READ_DR1, "read_dr1" }, \ { SVM_EXIT_READ_DR2, "read_dr2" }, \ { SVM_EXIT_READ_DR3, "read_dr3" }, \ + { SVM_EXIT_READ_DR4, "read_dr4" }, \ + { SVM_EXIT_READ_DR5, "read_dr5" }, \ + { SVM_EXIT_READ_DR6, "read_dr6" }, \ + { SVM_EXIT_READ_DR7, "read_dr7" }, \ { SVM_EXIT_WRITE_DR0, "write_dr0" }, \ { SVM_EXIT_WRITE_DR1, "write_dr1" }, \ { SVM_EXIT_WRITE_DR2, "write_dr2" }, \ { SVM_EXIT_WRITE_DR3, "write_dr3" }, \ + { SVM_EXIT_WRITE_DR4, "write_dr4" }, \ { SVM_EXIT_WRITE_DR5, "write_dr5" }, \ + { SVM_EXIT_WRITE_DR6, "write_dr6" }, \ { SVM_EXIT_WRITE_DR7, "write_dr7" }, \ + { SVM_EXIT_EXCP_BASE + DE_VECTOR, "DE excp" }, \ { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \ { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \ + { SVM_EXIT_EXCP_BASE + OF_VECTOR, "OF excp" }, \ + { SVM_EXIT_EXCP_BASE + BR_VECTOR, "BR excp" }, \ { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \ - { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \ { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \ + { SVM_EXIT_EXCP_BASE + DF_VECTOR, "DF excp" }, \ + { SVM_EXIT_EXCP_BASE + TS_VECTOR, "TS excp" }, \ + { SVM_EXIT_EXCP_BASE + NP_VECTOR, "NP excp" }, \ + { SVM_EXIT_EXCP_BASE + SS_VECTOR, "SS excp" }, \ + { SVM_EXIT_EXCP_BASE + GP_VECTOR, "GP excp" }, \ + { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \ + { SVM_EXIT_EXCP_BASE + MF_VECTOR, "MF excp" }, \ { SVM_EXIT_EXCP_BASE + AC_VECTOR, "AC excp" }, \ { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \ + { SVM_EXIT_EXCP_BASE + XM_VECTOR, "XF excp" }, \ { SVM_EXIT_INTR, "interrupt" }, \ { SVM_EXIT_NMI, "nmi" }, \ { SVM_EXIT_SMI, "smi" }, \ { SVM_EXIT_INIT, "init" }, \ { SVM_EXIT_VINTR, "vintr" }, \ + { SVM_EXIT_CR0_SEL_WRITE, "cr0_sel_write" }, \ + { SVM_EXIT_IDTR_READ, "read_idtr" }, \ + { SVM_EXIT_GDTR_READ, "read_gdtr" }, \ + { SVM_EXIT_LDTR_READ, "read_ldtr" }, \ + { SVM_EXIT_TR_READ, "read_rt" }, \ + { SVM_EXIT_IDTR_WRITE, "write_idtr" }, \ + { SVM_EXIT_GDTR_WRITE, "write_gdtr" }, \ + { SVM_EXIT_LDTR_WRITE, "write_ldtr" }, \ + { SVM_EXIT_TR_WRITE, "write_rt" }, \ + { SVM_EXIT_RDTSC, "rdtsc" }, \ + { SVM_EXIT_RDPMC, "rdpmc" }, \ + { SVM_EXIT_PUSHF, "pushf" }, \ + { SVM_EXIT_POPF, "popf" }, \ { SVM_EXIT_CPUID, "cpuid" }, \ + { SVM_EXIT_RSM, "rsm" }, \ + { SVM_EXIT_IRET, "iret" }, \ + { SVM_EXIT_SWINT, "swint" }, \ { SVM_EXIT_INVD, "invd" }, \ + { SVM_EXIT_PAUSE, "pause" }, \ { SVM_EXIT_HLT, "hlt" }, \ { SVM_EXIT_INVLPG, "invlpg" }, \ { SVM_EXIT_INVLPGA, "invlpga" }, \ { SVM_EXIT_IOIO, "io" }, \ { SVM_EXIT_MSR, "msr" }, \ { SVM_EXIT_TASK_SWITCH, "task_switch" }, \ + { SVM_EXIT_FERR_FREEZE, "ferr_freeze" }, \ { SVM_EXIT_SHUTDOWN, "shutdown" }, \ { SVM_EXIT_VMRUN, "vmrun" }, \ { SVM_EXIT_VMMCALL, "hypercall" }, \ @@ -123,11 +163,16 @@ { SVM_EXIT_STGI, "stgi" }, \ { SVM_EXIT_CLGI, "clgi" }, \ { SVM_EXIT_SKINIT, "skinit" }, \ + { SVM_EXIT_RDTSCP, "rdtscp" }, \ + { SVM_EXIT_ICEBP, "icebp" }, \ { SVM_EXIT_WBINVD, "wbinvd" }, \ { SVM_EXIT_MONITOR, "monitor" }, \ { SVM_EXIT_MWAIT, "mwait" }, \ { SVM_EXIT_XSETBV, "xsetbv" }, \ - { SVM_EXIT_NPF, "npf" } + { SVM_EXIT_NPF, "npf" }, \ + { SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \ + { SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \ + { SVM_EXIT_ERR, "invalid_guest_state" } #endif /* _UAPI__SVM_H */ |