diff options
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r-- | arch/s390/kvm/gaccess.h | 62 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 14 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 21 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 9 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 5 |
5 files changed, 60 insertions, 51 deletions
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 4e0633c..ed60f3a 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -18,11 +18,11 @@ #include <asm/uaccess.h> static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, - u64 guestaddr) + unsigned long guestaddr) { - u64 prefix = vcpu->arch.sie_block->prefix; - u64 origin = vcpu->kvm->arch.guest_origin; - u64 memsize = vcpu->kvm->arch.guest_memsize; + unsigned long prefix = vcpu->arch.sie_block->prefix; + unsigned long origin = vcpu->kvm->arch.guest_origin; + unsigned long memsize = vcpu->kvm->arch.guest_memsize; if (guestaddr < 2 * PAGE_SIZE) guestaddr += prefix; @@ -37,7 +37,7 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, return (void __user *) guestaddr; } -static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, +static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, u64 *result) { void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); @@ -47,10 +47,10 @@ static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, if (IS_ERR((void __force *) uptr)) return PTR_ERR((void __force *) uptr); - return get_user(*result, (u64 __user *) uptr); + return get_user(*result, (unsigned long __user *) uptr); } -static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, +static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, u32 *result) { void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); @@ -63,7 +63,7 @@ static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, return get_user(*result, (u32 __user *) uptr); } -static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, +static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, u16 *result) { void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); @@ -76,7 +76,7 @@ static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, return get_user(*result, (u16 __user *) uptr); } -static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, +static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, u8 *result) { void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); @@ -87,7 +87,7 @@ static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, return get_user(*result, (u8 __user *) uptr); } -static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, +static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, u64 value) { void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); @@ -100,7 +100,7 @@ static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, return put_user(value, (u64 __user *) uptr); } -static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, +static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, u32 value) { void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); @@ -113,7 +113,7 @@ static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, return put_user(value, (u32 __user *) uptr); } -static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, +static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, u16 value) { void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); @@ -126,7 +126,7 @@ static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, return put_user(value, (u16 __user *) uptr); } -static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, +static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, u8 value) { void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); @@ -138,7 +138,8 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, } -static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, +static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, + unsigned long guestdest, const void *from, unsigned long n) { int rc; @@ -153,12 +154,12 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, return 0; } -static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, +static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, const void *from, unsigned long n) { - u64 prefix = vcpu->arch.sie_block->prefix; - u64 origin = vcpu->kvm->arch.guest_origin; - u64 memsize = vcpu->kvm->arch.guest_memsize; + unsigned long prefix = vcpu->arch.sie_block->prefix; + unsigned long origin = vcpu->kvm->arch.guest_origin; + unsigned long memsize = vcpu->kvm->arch.guest_memsize; if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) goto slowpath; @@ -189,7 +190,8 @@ slowpath: } static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, - u64 guestsrc, unsigned long n) + unsigned long guestsrc, + unsigned long n) { int rc; unsigned long i; @@ -204,11 +206,11 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, } static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, - u64 guestsrc, unsigned long n) + unsigned long guestsrc, unsigned long n) { - u64 prefix = vcpu->arch.sie_block->prefix; - u64 origin = vcpu->kvm->arch.guest_origin; - u64 memsize = vcpu->kvm->arch.guest_memsize; + unsigned long prefix = vcpu->arch.sie_block->prefix; + unsigned long origin = vcpu->kvm->arch.guest_origin; + unsigned long memsize = vcpu->kvm->arch.guest_memsize; if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) goto slowpath; @@ -238,11 +240,12 @@ slowpath: return __copy_from_guest_slow(vcpu, to, guestsrc, n); } -static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, +static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, + unsigned long guestdest, const void *from, unsigned long n) { - u64 origin = vcpu->kvm->arch.guest_origin; - u64 memsize = vcpu->kvm->arch.guest_memsize; + unsigned long origin = vcpu->kvm->arch.guest_origin; + unsigned long memsize = vcpu->kvm->arch.guest_memsize; if (guestdest + n > memsize) return -EFAULT; @@ -256,10 +259,11 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, } static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, - u64 guestsrc, unsigned long n) + unsigned long guestsrc, + unsigned long n) { - u64 origin = vcpu->kvm->arch.guest_origin; - u64 memsize = vcpu->kvm->arch.guest_memsize; + unsigned long origin = vcpu->kvm->arch.guest_origin; + unsigned long memsize = vcpu->kvm->arch.guest_memsize; if (guestsrc + n > memsize) return -EFAULT; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 47a0b64..6123610 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -20,7 +20,7 @@ #include "kvm-s390.h" #include "gaccess.h" -static int handle_lctg(struct kvm_vcpu *vcpu) +static int handle_lctlg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; @@ -30,7 +30,7 @@ static int handle_lctg(struct kvm_vcpu *vcpu) u64 useraddr; int reg, rc; - vcpu->stat.instruction_lctg++; + vcpu->stat.instruction_lctlg++; if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) return -ENOTSUPP; @@ -38,9 +38,12 @@ static int handle_lctg(struct kvm_vcpu *vcpu) if (base2) useraddr += vcpu->arch.guest_gprs[base2]; + if (useraddr & 7) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + reg = reg1; - VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, + VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, disp2); do { @@ -74,6 +77,9 @@ static int handle_lctl(struct kvm_vcpu *vcpu) if (base2) useraddr += vcpu->arch.guest_gprs[base2]; + if (useraddr & 3) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, disp2); @@ -99,7 +105,7 @@ static intercept_handler_t instruction_handlers[256] = { [0xae] = kvm_s390_handle_sigp, [0xb2] = kvm_s390_handle_priv, [0xb7] = handle_lctl, - [0xeb] = handle_lctg, + [0xeb] = handle_lctlg, }; static int handle_noop(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 11230b0..2960702 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -13,6 +13,7 @@ #include <asm/lowcore.h> #include <asm/uaccess.h> #include <linux/kvm_host.h> +#include <linux/signal.h> #include "kvm-s390.h" #include "gaccess.h" @@ -246,15 +247,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, default: BUG(); } - if (exception) { - VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" - " interrupt"); - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - if (inti->type == KVM_S390_PROGRAM_INT) { - printk(KERN_WARNING "kvm: recursive program check\n"); - BUG(); - } + printk("kvm: The guest lowcore is not mapped during interrupt " + "delivery, killing userspace\n"); + do_exit(SIGKILL); } } @@ -277,14 +273,11 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) __LC_EXT_NEW_PSW, sizeof(psw_t)); if (rc == -EFAULT) exception = 1; - if (exception) { - VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \ - " ckc interrupt"); - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - return 0; + printk("kvm: The guest lowcore is not mapped during interrupt " + "delivery, killing userspace\n"); + do_exit(SIGKILL); } - return 1; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 1782cbc..8b00eb2 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -39,7 +39,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "exit_instruction", VCPU_STAT(exit_instruction) }, { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, - { "instruction_lctg", VCPU_STAT(instruction_lctg) }, + { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, { "instruction_lctl", VCPU_STAT(instruction_lctl) }, { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, @@ -112,7 +112,12 @@ long kvm_arch_dev_ioctl(struct file *filp, int kvm_dev_ioctl_check_extension(long ext) { - return 0; + switch (ext) { + case KVM_CAP_USER_MEMORY: + return 1; + default: + return 0; + } } /* Section: vm related */ diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 5a55611..1703926 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -43,7 +43,8 @@ #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL -static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) +static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, + unsigned long *reg) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; int rc; @@ -167,7 +168,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) } static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, - u64 *reg) + unsigned long *reg) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li; |