summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/microblaze/include/asm/uaccess.h87
-rw-r--r--arch/microblaze/kernel/cpu/cache.c3
-rw-r--r--arch/microblaze/kernel/entry-nommu.S2
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c11
-rw-r--r--arch/microblaze/kernel/module.c2
-rw-r--r--arch/microblaze/mm/init.c1
-rw-r--r--arch/microblaze/mm/pgtable.c1
-rw-r--r--arch/microblaze/pci/pci-common.c2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h38
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c5
-rw-r--r--arch/powerpc/kernel/entry_64.S9
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/time.c60
-rw-r--r--arch/powerpc/kvm/44x_tlb.c2
-rw-r--r--arch/s390/kernel/head31.S2
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/ptrace.c5
-rw-r--r--arch/x86/kernel/kprobes.c27
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/x86.c4
22 files changed, 167 insertions, 114 deletions
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 446bec2..26460d1 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -182,6 +182,39 @@ extern long __user_bad(void);
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ unsigned long __gu_val = 0; \
+ const typeof(*(ptr)) __user *__gu_addr = (ptr); \
+ int __gu_err = 0; \
+ \
+ if (access_ok(VERIFY_READ, __gu_addr, size)) { \
+ switch (size) { \
+ case 1: \
+ __get_user_asm("lbu", __gu_addr, __gu_val, \
+ __gu_err); \
+ break; \
+ case 2: \
+ __get_user_asm("lhu", __gu_addr, __gu_val, \
+ __gu_err); \
+ break; \
+ case 4: \
+ __get_user_asm("lw", __gu_addr, __gu_val, \
+ __gu_err); \
+ break; \
+ default: \
+ __gu_err = __user_bad(); \
+ break; \
+ } \
+ } else { \
+ __gu_err = -EFAULT; \
+ } \
+ x = (typeof(*(ptr)))__gu_val; \
+ __gu_err; \
+})
#define __get_user(x, ptr) \
({ \
@@ -206,12 +239,6 @@ extern long __user_bad(void);
})
-#define get_user(x, ptr) \
-({ \
- access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
- ? __get_user((x), (ptr)) : -EFAULT; \
-})
-
#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
@@ -266,6 +293,42 @@ extern long __user_bad(void);
*
* Returns zero on success, or -EFAULT on error.
*/
+#define put_user(x, ptr) \
+ __put_user_check((x), (ptr), sizeof(*(ptr)))
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ typeof(*(ptr)) __pu_val; \
+ typeof(*(ptr)) __user *__pu_addr = (ptr); \
+ int __pu_err = 0; \
+ \
+ __pu_val = (x); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \
+ switch (size) { \
+ case 1: \
+ __put_user_asm("sb", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ case 2: \
+ __put_user_asm("sh", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ case 4: \
+ __put_user_asm("sw", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ case 8: \
+ __put_user_asm_8(__pu_addr, __pu_val, __pu_err);\
+ break; \
+ default: \
+ __pu_err = __user_bad(); \
+ break; \
+ } \
+ } else { \
+ __pu_err = -EFAULT; \
+ } \
+ __pu_err; \
+})
#define __put_user(x, ptr) \
({ \
@@ -290,18 +353,6 @@ extern long __user_bad(void);
__gu_err; \
})
-#ifndef CONFIG_MMU
-
-#define put_user(x, ptr) __put_user((x), (ptr))
-
-#else /* CONFIG_MMU */
-
-#define put_user(x, ptr) \
-({ \
- access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
- ? __put_user((x), (ptr)) : -EFAULT; \
-})
-#endif /* CONFIG_MMU */
/* copy_to_from_user */
#define __copy_from_user(to, from, n) \
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 21c3a92..109876e 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -137,8 +137,9 @@ do { \
do { \
int step = -line_length; \
int align = ~(line_length - 1); \
+ int count; \
end = ((end & align) == end) ? end - line_length : end & align; \
- int count = end - start; \
+ count = end - start; \
WARN_ON(count < 0); \
\
__asm__ __volatile__ (" 1: " #op " %0, %1; \
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 391d619..8cc18cd 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -476,6 +476,8 @@ ENTRY(ret_from_fork)
nop
work_pending:
+ enable_irq
+
andi r11, r19, _TIF_NEED_RESCHED
beqi r11, 1f
bralid r15, schedule
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index bc4dcb7..ff85f77 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -52,3 +52,14 @@ EXPORT_SYMBOL_GPL(_ebss);
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
+
+/*
+ * Assembly functions that may be used (directly or indirectly) by modules
+ */
+EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__strncpy_user);
+
+#ifdef CONFIG_OPT_LIB_ASM
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+#endif
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index cbecf11..0e73f66 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
void *module_alloc(unsigned long size)
{
@@ -151,6 +152,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *module)
{
+ flush_dcache();
return 0;
}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index f42c2dd..cca3579 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -47,6 +47,7 @@ unsigned long memory_start;
EXPORT_SYMBOL(memory_start);
unsigned long memory_end; /* due to mm/nommu.c */
unsigned long memory_size;
+EXPORT_SYMBOL(memory_size);
/*
* paging_init() sets up the page tables - in fact we've already done this.
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 784557f..59bf233 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -42,6 +42,7 @@
unsigned long ioremap_base;
unsigned long ioremap_bot;
+EXPORT_SYMBOL(ioremap_bot);
/* The maximum lowmem defaults to 768Mb, but this can be configured to
* another value.
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 01c8c97..9cb782b 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -1507,7 +1507,7 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
pci_bus_add_devices(bus);
/* Fixup EEH */
- eeh_add_device_tree_late(bus);
+ /* eeh_add_device_tree_late(bus); */
}
EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 9f4c9d4f..bd100fc 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -130,43 +130,5 @@ static inline int irqs_disabled_flags(unsigned long flags)
*/
struct irq_chip;
-#ifdef CONFIG_PERF_EVENTS
-
-#ifdef CONFIG_PPC64
-static inline unsigned long test_perf_event_pending(void)
-{
- unsigned long x;
-
- asm volatile("lbz %0,%1(13)"
- : "=r" (x)
- : "i" (offsetof(struct paca_struct, perf_event_pending)));
- return x;
-}
-
-static inline void set_perf_event_pending(void)
-{
- asm volatile("stb %0,%1(13)" : :
- "r" (1),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
-}
-
-static inline void clear_perf_event_pending(void)
-{
- asm volatile("stb %0,%1(13)" : :
- "r" (0),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
-}
-#endif /* CONFIG_PPC64 */
-
-#else /* CONFIG_PERF_EVENTS */
-
-static inline unsigned long test_perf_event_pending(void)
-{
- return 0;
-}
-
-static inline void clear_perf_event_pending(void) {}
-#endif /* CONFIG_PERF_EVENTS */
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 957ceb7..c09138d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -133,7 +133,6 @@ int main(void)
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
- DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 59c9285..4ff4da2c 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -1,7 +1,8 @@
/*
* Contains routines needed to support swiotlb for ppc.
*
- * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
+ * Copyright (C) 2009-2010 Freescale Semiconductor, Inc.
+ * Author: Becky Bruce
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -70,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
sd->max_direct_dma_addr = 0;
/* May need to bounce if the device can't address all of DRAM */
- if (dma_get_mask(dev) < lmb_end_of_DRAM())
+ if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM())
set_dma_ops(dev, &swiotlb_dma_ops);
return NOTIFY_DONE;
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 07109d8..42e9d90 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -556,15 +556,6 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
2:
TRACE_AND_RESTORE_IRQ(r5);
-#ifdef CONFIG_PERF_EVENTS
- /* check paca->perf_event_pending if we're enabling ints */
- lbz r3,PACAPERFPEND(r13)
- and. r3,r3,r5
- beq 27f
- bl .perf_event_do_pending
-27:
-#endif /* CONFIG_PERF_EVENTS */
-
/* extract EE bit and use it to restore paca->hard_enabled */
ld r3,_MSR(r1)
rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 64f6f20..066bd31 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,7 +53,6 @@
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
-#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -145,11 +144,6 @@ notrace void raw_local_irq_restore(unsigned long en)
}
#endif /* CONFIG_PPC_STD_MMU_64 */
- if (test_perf_event_pending()) {
- clear_perf_event_pending();
- perf_event_do_pending();
- }
-
/*
* if (get_paca()->hard_enabled) return;
* But again we need to take care that gcc gets hard_enabled directly
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1b16b9a..0441bbd 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -532,25 +532,60 @@ void __init iSeries_time_init_early(void)
}
#endif /* CONFIG_PPC_ISERIES */
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
-DEFINE_PER_CPU(u8, perf_event_pending);
+#ifdef CONFIG_PERF_EVENTS
-void set_perf_event_pending(void)
+/*
+ * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
+ */
+#ifdef CONFIG_PPC64
+static inline unsigned long test_perf_event_pending(void)
{
- get_cpu_var(perf_event_pending) = 1;
- set_dec(1);
- put_cpu_var(perf_event_pending);
+ unsigned long x;
+
+ asm volatile("lbz %0,%1(13)"
+ : "=r" (x)
+ : "i" (offsetof(struct paca_struct, perf_event_pending)));
+ return x;
}
+static inline void set_perf_event_pending_flag(void)
+{
+ asm volatile("stb %0,%1(13)" : :
+ "r" (1),
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
+}
+
+static inline void clear_perf_event_pending(void)
+{
+ asm volatile("stb %0,%1(13)" : :
+ "r" (0),
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
+}
+
+#else /* 32-bit */
+
+DEFINE_PER_CPU(u8, perf_event_pending);
+
+#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
-#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
+#endif /* 32 vs 64 bit */
+
+void set_perf_event_pending(void)
+{
+ preempt_disable();
+ set_perf_event_pending_flag();
+ set_dec(1);
+ preempt_enable();
+}
+
+#else /* CONFIG_PERF_EVENTS */
#define test_perf_event_pending() 0
#define clear_perf_event_pending()
-#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
+#endif /* CONFIG_PERF_EVENTS */
/*
* For iSeries shared processors, we have to let the hypervisor
@@ -582,10 +617,6 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(DECREMENTER_MAX);
#ifdef CONFIG_PPC32
- if (test_perf_event_pending()) {
- clear_perf_event_pending();
- perf_event_do_pending();
- }
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
@@ -604,6 +635,11 @@ void timer_interrupt(struct pt_regs * regs)
calculate_steal_time();
+ if (test_perf_event_pending()) {
+ clear_perf_event_pending();
+ perf_event_do_pending();
+ }
+
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES))
get_lppaca()->int_dword.fields.decr_int = 0;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 2570fcc..8123125 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -440,7 +440,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
unsigned int gtlb_index;
gtlb_index = kvmppc_get_gpr(vcpu, ra);
- if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
+ if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
printk("%s: index %d\n", __func__, gtlb_index);
kvmppc_dump_vcpu(vcpu);
return EMULATE_FAIL;
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 1bbcc49..b8f8dc1 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -82,7 +82,7 @@ startup_continue:
_ehead:
#ifdef CONFIG_SHARED_KERNEL
- .org 0x100000
+ .org 0x100000 - 0x11000 # head.o ends at 0x11000
#endif
#
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 1f70970..cdef687 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -80,7 +80,7 @@ startup_continue:
_ehead:
#ifdef CONFIG_SHARED_KERNEL
- .org 0x100000
+ .org 0x100000 - 0x11000 # head.o ends at 0x11000
#endif
#
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 33fdc5a..9f654da 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -640,7 +640,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret;
+ long ret = 0;
/* Do the secure computing check first. */
secure_computing(regs->gprs[2]);
@@ -649,7 +649,6 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
* The sysc_tracesys code in entry.S stored the system
* call number to gprs[2].
*/
- ret = regs->gprs[2];
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
(tracehook_report_syscall_entry(regs) ||
regs->gprs[2] >= NR_syscalls)) {
@@ -671,7 +670,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
regs->gprs[2], regs->orig_gpr2,
regs->gprs[3], regs->gprs[4],
regs->gprs[5]);
- return ret;
+ return ret ?: regs->gprs[2];
}
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index b43bbae..1658efd 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -534,20 +534,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
struct kprobe_ctlblk *kcb;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- * Back up over the (now missing) int3 and run
- * the original instruction.
- */
- regs->ip = (unsigned long)addr;
- return 1;
- }
-
/*
* We don't want to be preempted for the entire
* duration of kprobe processing. We conditionally
@@ -579,6 +565,19 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
setup_singlestep(p, regs, kcb, 0);
return 1;
}
+ } else if (*addr != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Back up over the (now missing) int3 and run
+ * the original instruction.
+ */
+ regs->ip = (unsigned long)addr;
+ preempt_enable_no_resched();
+ return 1;
} else if (kprobe_running()) {
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2ba5820..737361f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2067,7 +2067,7 @@ static int cpuid_interception(struct vcpu_svm *svm)
static int iret_interception(struct vcpu_svm *svm)
{
++svm->vcpu.stat.nmi_window_exits;
- svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
return 1;
}
@@ -2479,7 +2479,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
- svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
}
@@ -2539,10 +2539,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
if (masked) {
svm->vcpu.arch.hflags |= HF_NMI_MASK;
- svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
} else {
svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
- svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
}
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bc933cf..2f8db0e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2703,8 +2703,7 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
return 0;
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
- (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
- GUEST_INTR_STATE_NMI));
+ (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
}
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3c4ca98..c4f35b5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1712,6 +1712,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
if (copy_from_user(cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out_free;
+ vcpu_load(vcpu);
for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -1729,6 +1730,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
r = 0;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
+ vcpu_put(vcpu);
out_free:
vfree(cpuid_entries);
@@ -1749,9 +1751,11 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
+ vcpu_load(vcpu);
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
+ vcpu_put(vcpu);
return 0;
out:
OpenPOWER on IntegriCloud