From 462fce46065ec4b200c08619c047b9e5a8fd154a Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Wed, 27 Feb 2013 19:41:56 +0900 Subject: KVM: set_memory_region: Drop user_alloc from prepare/commit_memory_region() X86 does not use this any more. The remaining user, s390's !user_alloc check, can be simply removed since KVM_SET_MEMORY_REGION ioctl is no longer supported. Note: fixed powerpc's indentations with spaces to suppress checkpatch errors. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/kvm-s390.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 4cf35a0..07ac302 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -975,8 +975,7 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, - struct kvm_userspace_memory_region *mem, - bool user_alloc) + struct kvm_userspace_memory_region *mem) { /* A few sanity checks. We can have exactly one memory slot which has to start at guest virtual zero and which has to be located at a @@ -997,16 +996,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (mem->memory_size & 0xffffful) return -EINVAL; - if (!user_alloc) - return -EINVAL; - return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot old, - bool user_alloc) + struct kvm_memory_slot old) { int rc; -- cgit v1.1 From 7b6195a91d60909a2834ab7181e2b9476e6fe749 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Wed, 27 Feb 2013 19:44:34 +0900 Subject: KVM: set_memory_region: Refactor prepare_memory_region() This patch drops the parameter old, a copy of the old memory slot, and adds a new parameter named change to know the change being requested. This not only cleans up the code but also removes extra copying of the memory slot structure. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/kvm-s390.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 07ac302..4288780 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -974,8 +974,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, - struct kvm_memory_slot old, - struct kvm_userspace_memory_region *mem) + struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) { /* A few sanity checks. We can have exactly one memory slot which has to start at guest virtual zero and which has to be located at a -- cgit v1.1 From 8482644aea11e0647867732319ccf35879a9acc2 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Wed, 27 Feb 2013 19:45:25 +0900 Subject: KVM: set_memory_region: Refactor commit_memory_region() This patch makes the parameter old a const pointer to the old memory slot and adds a new parameter named change to know the change being requested: the former is for removing extra copying and the latter is for cleaning up the code. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/kvm-s390.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 4288780..6cae4ad 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1001,7 +1001,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot old) + const struct kvm_memory_slot *old, + enum kvm_mr_change change) { int rc; -- cgit v1.1 From 10ccaa1e7057d8a9dc3e9ce833af40ec8187b25e Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 28 Feb 2013 12:33:21 +0100 Subject: KVM: s390: Wire up ioeventfd. Enable ioeventfd support on s390 and hook up diagnose 500 virtio-ccw notifications. Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/Kconfig | 1 + arch/s390/kvm/Makefile | 2 +- arch/s390/kvm/diag.c | 26 ++++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.c | 1 + 4 files changed, 29 insertions(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 60f9f8a..70b46ea 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -22,6 +22,7 @@ config KVM select PREEMPT_NOTIFIERS select ANON_INODES select HAVE_KVM_CPU_RELAX_INTERCEPT + select HAVE_KVM_EVENTFD ---help--- Support hosting paravirtualized guest machines using the SIE virtualization capability on the mainframe. This should work diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile index 3975722..8fe9d65 100644 --- a/arch/s390/kvm/Makefile +++ b/arch/s390/kvm/Makefile @@ -6,7 +6,7 @@ # it under the terms of the GNU General Public License (version 2 only) # as published by the Free Software Foundation. -common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o) +common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o eventfd.o) ccflags-y := -Ivirt/kvm -Iarch/s390/kvm diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index a390687..1c01a99 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -13,6 +13,7 @@ #include #include +#include #include "kvm-s390.h" #include "trace.h" #include "trace-s390.h" @@ -104,6 +105,29 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) return -EREMOTE; } +static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) +{ + int ret, idx; + + /* No virtio-ccw notification? Get out quickly. */ + if (!vcpu->kvm->arch.css_support || + (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) + return -EOPNOTSUPP; + + idx = srcu_read_lock(&vcpu->kvm->srcu); + /* + * The layout is as follows: + * - gpr 2 contains the subchannel id (passed as addr) + * - gpr 3 contains the virtqueue index (passed as datamatch) + */ + ret = kvm_io_bus_write(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, + vcpu->run->s.regs.gprs[2], + 8, &vcpu->run->s.regs.gprs[3]); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + /* kvm_io_bus_write returns -EOPNOTSUPP if it found no match. */ + return ret < 0 ? ret : 0; +} + int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) { int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; @@ -118,6 +142,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) return __diag_time_slice_end_directed(vcpu); case 0x308: return __diag_ipl_functions(vcpu); + case 0x500: + return __diag_virtio_hypercall(vcpu); default: return -EOPNOTSUPP; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 6cae4ad..33161b4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -142,6 +142,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: case KVM_CAP_S390_CSS_SUPPORT: + case KVM_CAP_IOEVENTFD: r = 1; break; case KVM_CAP_NR_VCPUS: -- cgit v1.1 From 744b37fb5a63d45e92e590967bae82d8ac62e950 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 5 Mar 2013 13:14:40 +0100 Subject: s390/kvm,gaccess: fix guest access return code handling Guest access functions like copy_to/from_guest() call __guestaddr_to_user() which in turn call gmap_fault() in order to translate a guest address to a user space address. In error case __guest_addr_to_user() returns either -EFAULT or -ENOMEM. The copy_to/from_guest functions just pass these return values down to the callers. The -ENOMEM case however is problematic since there are several places which access guest memory like: rc = copy_to_guest(...); if (rc == -EFAULT) error_handling(); So in case of -ENOMEM the code assumes that the guest memory access succeeded even though it failed. This can cause guest data or state corruption. If __guestaddr_to_user() returns -ENOMEM the meaning is that a valid user space mapping exists, but there was not enough memory available when trying to build the guest mapping. In other words an out-of-memory situation occured. For normal user space accesses an out-of-memory situation causes the page fault handler to map -ENOMEM to -EFAULT (see fixup code in do_no_context()). We need to do exactly the same for the kvm gaccess functions. So __guestaddr_to_user() should just map all error codes to -EFAULT. Signed-off-by: Heiko Carstens Reviewed-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/gaccess.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 4703f12..84d01dd 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -22,13 +22,16 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, unsigned long guestaddr) { unsigned long prefix = vcpu->arch.sie_block->prefix; + unsigned long uaddress; if (guestaddr < 2 * PAGE_SIZE) guestaddr += prefix; else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) guestaddr -= prefix; - - return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap); + uaddress = gmap_fault(guestaddr, vcpu->arch.gmap); + if (IS_ERR_VALUE(uaddress)) + uaddress = -EFAULT; + return (void __user *)uaddress; } static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, -- cgit v1.1 From 59a1fa2d80c0d351755cb29273b2b256dc4b3a11 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 5 Mar 2013 13:14:42 +0100 Subject: s390/kvm,tprot: use new gmap_translate() function When out-of-memory the tprot code incorrectly injected a program check for the guest which reported an addressing exception even if the guest address was valid. Let's use the new gmap_translate() which translates a guest address to a user space address whithout the chance of running into an out-of-memory situation. Also make it more explicit that for -EFAULT we won't find a vma. Signed-off-by: Heiko Carstens Reviewed-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/priv.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 0ef9894..75ad91e 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -575,20 +575,13 @@ static int handle_tprot(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) return -EOPNOTSUPP; - - /* we must resolve the address without holding the mmap semaphore. - * This is ok since the userspace hypervisor is not supposed to change - * the mapping while the guest queries the memory. Otherwise the guest - * might crash or get wrong info anyway. */ - user_address = (unsigned long) __guestaddr_to_user(vcpu, address1); - down_read(¤t->mm->mmap_sem); + user_address = __gmap_translate(address1, vcpu->arch.gmap); + if (IS_ERR_VALUE(user_address)) + goto out_inject; vma = find_vma(current->mm, user_address); - if (!vma) { - up_read(¤t->mm->mmap_sem); - return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - } - + if (!vma) + goto out_inject; vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); @@ -597,6 +590,10 @@ static int handle_tprot(struct kvm_vcpu *vcpu) up_read(¤t->mm->mmap_sem); return 0; + +out_inject: + up_read(¤t->mm->mmap_sem); + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) -- cgit v1.1 From dc5008b9bf6adb0c0a5afba6fb376a85451b2697 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 5 Mar 2013 13:14:43 +0100 Subject: s390/kvm: remove explicit -EFAULT return code checking on guest access Let's change to the paradigm that every return code from guest memory access functions that is not zero translates to -EFAULT and do not explictly compare. Explictly comparing the return value with -EFAULT has already shown to be a bit fragile. In addition this is closer to the handling of copy_to/from_user functions, which imho is in general a good idea. Also shorten the return code handling in interrupt.c a bit. Signed-off-by: Heiko Carstens Acked-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/intercept.c | 4 +- arch/s390/kvm/interrupt.c | 241 +++++++++++++--------------------------------- arch/s390/kvm/priv.c | 6 +- 3 files changed, 74 insertions(+), 177 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index f26ff1e..9b22047 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -45,7 +45,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) do { rc = get_guest_u64(vcpu, useraddr, &vcpu->arch.sie_block->gcr[reg]); - if (rc == -EFAULT) { + if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; } @@ -79,7 +79,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu) reg = reg1; do { rc = get_guest_u32(vcpu, useraddr, &val); - if (rc == -EFAULT) { + if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 37116a7..5afa931 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -180,7 +180,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt_info *inti) { const unsigned short table[] = { 2, 4, 4, 6 }; - int rc, exception = 0; + int rc = 0; switch (inti->type) { case KVM_S390_INT_EMERGENCY: @@ -188,74 +188,38 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_emergency_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->emerg.code, 0); - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code); - if (rc == -EFAULT) - exception = 1; - - rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, - __LC_EXT_NEW_PSW, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; + rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); + rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code); + rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_EXT_NEW_PSW, sizeof(psw_t)); break; - case KVM_S390_INT_EXTERNAL_CALL: VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); vcpu->stat.deliver_external_call++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->extcall.code, 0); - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code); - if (rc == -EFAULT) - exception = 1; - - rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, - __LC_EXT_NEW_PSW, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; + rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); + rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code); + rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_EXT_NEW_PSW, sizeof(psw_t)); break; - case KVM_S390_INT_SERVICE: VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", inti->ext.ext_params); vcpu->stat.deliver_service_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->ext.ext_params, 0); - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); - if (rc == -EFAULT) - exception = 1; - - rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, - __LC_EXT_NEW_PSW, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); - if (rc == -EFAULT) - exception = 1; + rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); + rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_EXT_NEW_PSW, sizeof(psw_t)); + rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); break; - case KVM_S390_INT_VIRTIO: VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", inti->ext.ext_params, inti->ext.ext_params2); @@ -263,34 +227,16 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->ext.ext_params, inti->ext.ext_params2); - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00); - if (rc == -EFAULT) - exception = 1; - - rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, - __LC_EXT_NEW_PSW, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2, - inti->ext.ext_params2); - if (rc == -EFAULT) - exception = 1; + rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); + rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00); + rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_EXT_NEW_PSW, sizeof(psw_t)); + rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); + rc |= put_guest_u64(vcpu, __LC_EXT_PARAMS2, + inti->ext.ext_params2); break; - case KVM_S390_SIGP_STOP: VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); vcpu->stat.deliver_stop_signal++; @@ -313,18 +259,14 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_restart_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 0); - rc = copy_to_guest(vcpu, offsetof(struct _lowcore, - restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, - offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; + rc = copy_to_guest(vcpu, + offsetof(struct _lowcore, restart_old_psw), + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + offsetof(struct _lowcore, restart_psw), + sizeof(psw_t)); atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); break; - case KVM_S390_PROGRAM_INT: VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", inti->pgm.code, @@ -332,24 +274,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_program_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->pgm.code, 0); - rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u16(vcpu, __LC_PGM_ILC, - table[vcpu->arch.sie_block->ipa >> 14]); - if (rc == -EFAULT) - exception = 1; - - rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, - __LC_PGM_NEW_PSW, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; + rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); + rc |= put_guest_u16(vcpu, __LC_PGM_ILC, + table[vcpu->arch.sie_block->ipa >> 14]); + rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_PGM_NEW_PSW, sizeof(psw_t)); break; case KVM_S390_MCHK: @@ -358,24 +289,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->mchk.cr14, inti->mchk.mcic); - rc = kvm_s390_vcpu_store_status(vcpu, - KVM_S390_STORE_STATUS_PREFIXED); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic); - if (rc == -EFAULT) - exception = 1; - - rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, - __LC_MCK_NEW_PSW, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; + rc = kvm_s390_vcpu_store_status(vcpu, + KVM_S390_STORE_STATUS_PREFIXED); + rc |= put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic); + rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_MCK_NEW_PSW, sizeof(psw_t)); break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: @@ -388,67 +308,44 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_io_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, param0, param1); - rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID, - inti->io.subchannel_id); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR, - inti->io.subchannel_nr); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u32(vcpu, __LC_IO_INT_PARM, - inti->io.io_int_parm); - if (rc == -EFAULT) - exception = 1; - - rc = put_guest_u32(vcpu, __LC_IO_INT_WORD, - inti->io.io_int_word); - if (rc == -EFAULT) - exception = 1; - - rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - - rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, - __LC_IO_NEW_PSW, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; + rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID, + inti->io.subchannel_id); + rc |= put_guest_u16(vcpu, __LC_SUBCHANNEL_NR, + inti->io.subchannel_nr); + rc |= put_guest_u32(vcpu, __LC_IO_INT_PARM, + inti->io.io_int_parm); + rc |= put_guest_u32(vcpu, __LC_IO_INT_WORD, + inti->io.io_int_word); + rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_IO_NEW_PSW, sizeof(psw_t)); break; } default: BUG(); } - if (exception) { + if (rc) { printk("kvm: The guest lowcore is not mapped during interrupt " - "delivery, killing userspace\n"); + "delivery, killing userspace\n"); do_exit(SIGKILL); } } static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) { - int rc, exception = 0; + int rc; if (psw_extint_disabled(vcpu)) return 0; if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) return 0; - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); - if (rc == -EFAULT) - exception = 1; - rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, - __LC_EXT_NEW_PSW, sizeof(psw_t)); - if (rc == -EFAULT) - exception = 1; - if (exception) { + rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); + rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_EXT_NEW_PSW, sizeof(psw_t)); + if (rc) { printk("kvm: The guest lowcore is not mapped during interrupt " "delivery, killing userspace\n"); do_exit(SIGKILL); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 75ad91e..34b42dc 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -108,7 +108,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) } rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); - if (rc == -EFAULT) { + if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } @@ -230,7 +230,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), &facility_list, sizeof(facility_list)); - if (rc == -EFAULT) + if (rc) kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); else { VCPU_EVENT(vcpu, 5, "store facility list value %x", @@ -348,7 +348,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu) } rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); - if (rc == -EFAULT) { + if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } -- cgit v1.1 From 396083a964aa4e86061d0e3449b1e0548a8197a9 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 5 Mar 2013 13:14:44 +0100 Subject: s390/kvm,gaccess: shorten put/get_guest code The put_guest_u*/get_guest_u* are nothing but wrappers for the regular put_user/get_user uaccess functions. The only difference is that before accessing user space the guest address must be translated to a user space address. Change the order of arguments for the guest access functions so they match their uaccess parts. Also remove the u* suffix, so we simply have put_guest/get_guest which will automatically use the right size dependent on pointer type of the destination/source that now must be correct. In result the same behaviour as put_user/get_user except that accesses must be aligned. Signed-off-by: Heiko Carstens Acked-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/gaccess.h | 153 ++++++++++++---------------------------------- arch/s390/kvm/intercept.c | 6 +- arch/s390/kvm/interrupt.c | 52 ++++++++-------- arch/s390/kvm/priv.c | 22 +++---- 4 files changed, 81 insertions(+), 152 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 84d01dd..82f450e 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -18,122 +18,47 @@ #include #include "kvm-s390.h" -static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, - unsigned long guestaddr) +static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) { unsigned long prefix = vcpu->arch.sie_block->prefix; - unsigned long uaddress; - - if (guestaddr < 2 * PAGE_SIZE) - guestaddr += prefix; - else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) - guestaddr -= prefix; - uaddress = gmap_fault(guestaddr, vcpu->arch.gmap); - if (IS_ERR_VALUE(uaddress)) - uaddress = -EFAULT; - return (void __user *)uaddress; -} - -static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, - u64 *result) -{ - void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); - - BUG_ON(guestaddr & 7); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - return get_user(*result, (unsigned long __user *) uptr); -} - -static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, - u32 *result) -{ - void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); - - BUG_ON(guestaddr & 3); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - return get_user(*result, (u32 __user *) uptr); -} - -static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, - u16 *result) -{ - void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); - - BUG_ON(guestaddr & 1); - - if (IS_ERR(uptr)) - return PTR_ERR(uptr); - - return get_user(*result, (u16 __user *) uptr); -} - -static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, - u8 *result) -{ - void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - return get_user(*result, (u8 __user *) uptr); -} - -static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, - u64 value) -{ - void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); - - BUG_ON(guestaddr & 7); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - return put_user(value, (u64 __user *) uptr); -} - -static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, - u32 value) -{ - void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); - - BUG_ON(guestaddr & 3); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - return put_user(value, (u32 __user *) uptr); -} - -static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, - u16 value) -{ - void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); - - BUG_ON(guestaddr & 1); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - return put_user(value, (u16 __user *) uptr); -} - -static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, - u8 value) -{ - void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - return put_user(value, (u8 __user *) uptr); + unsigned long gaddr = (unsigned long) gptr; + unsigned long uaddr; + + if (gaddr < 2 * PAGE_SIZE) + gaddr += prefix; + else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE)) + gaddr -= prefix; + uaddr = gmap_fault(gaddr, vcpu->arch.gmap); + if (IS_ERR_VALUE(uaddr)) + uaddr = -EFAULT; + return (void *)uaddr; } +#define get_guest(vcpu, x, gptr) \ +({ \ + __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \ + int __mask = sizeof(__typeof__(*(gptr))) - 1; \ + int __ret = PTR_RET(__uptr); \ + \ + if (!__ret) { \ + BUG_ON((unsigned long)__uptr & __mask); \ + __ret = get_user(x, __uptr); \ + } \ + __ret; \ +}) + +#define put_guest(vcpu, x, gptr) \ +({ \ + __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \ + int __mask = sizeof(__typeof__(*(gptr))) - 1; \ + int __ret = PTR_RET(__uptr); \ + \ + if (!__ret) { \ + BUG_ON((unsigned long)__uptr & __mask); \ + __ret = put_user(x, __uptr); \ + } \ + __ret; \ +}) static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, unsigned long guestdest, @@ -144,7 +69,7 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u8 *data = from; for (i = 0; i < n; i++) { - rc = put_guest_u8(vcpu, guestdest++, *(data++)); + rc = put_guest(vcpu, *(data++), (u8 *)guestdest++); if (rc < 0) return rc; } @@ -270,7 +195,7 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, u8 *data = to; for (i = 0; i < n; i++) { - rc = get_guest_u8(vcpu, guestsrc++, data++); + rc = get_guest(vcpu, *(data++), (u8 *)guestsrc++); if (rc < 0) return rc; } diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 9b22047..6474400 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -43,8 +43,8 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr); do { - rc = get_guest_u64(vcpu, useraddr, - &vcpu->arch.sie_block->gcr[reg]); + rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg], + (u64 *) useraddr); if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; @@ -78,7 +78,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu) reg = reg1; do { - rc = get_guest_u32(vcpu, useraddr, &val); + rc = get_guest(vcpu, val, (u32 *) useraddr); if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 5afa931..d78824b 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -188,8 +188,9 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_emergency_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->emerg.code, 0); - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); - rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code); + rc = put_guest(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest(vcpu, inti->emerg.code, + (u16 *)__LC_EXT_CPU_ADDR); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -200,8 +201,9 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_external_call++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->extcall.code, 0); - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); - rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code); + rc = put_guest(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest(vcpu, inti->extcall.code, + (u16 *)__LC_EXT_CPU_ADDR); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -213,12 +215,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_service_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->ext.ext_params, 0); - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); + rc = put_guest(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, __LC_EXT_NEW_PSW, sizeof(psw_t)); - rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); + rc |= put_guest(vcpu, inti->ext.ext_params, + (u32 *)__LC_EXT_PARAMS); break; case KVM_S390_INT_VIRTIO: VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", @@ -227,15 +230,16 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->ext.ext_params, inti->ext.ext_params2); - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); - rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00); + rc = put_guest(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, __LC_EXT_NEW_PSW, sizeof(psw_t)); - rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); - rc |= put_guest_u64(vcpu, __LC_EXT_PARAMS2, - inti->ext.ext_params2); + rc |= put_guest(vcpu, inti->ext.ext_params, + (u32 *)__LC_EXT_PARAMS); + rc |= put_guest(vcpu, inti->ext.ext_params2, + (u64 *)__LC_EXT_PARAMS2); break; case KVM_S390_SIGP_STOP: VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); @@ -274,9 +278,9 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_program_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->pgm.code, 0); - rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); - rc |= put_guest_u16(vcpu, __LC_PGM_ILC, - table[vcpu->arch.sie_block->ipa >> 14]); + rc = put_guest(vcpu, inti->pgm.code, (u16 *)__LC_PGM_INT_CODE); + rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14], + (u16 *)__LC_PGM_ILC); rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -291,7 +295,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, inti->mchk.mcic); rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); - rc |= put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic); + rc |= put_guest(vcpu, inti->mchk.mcic, (u64 *) __LC_MCCK_CODE); rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -308,14 +312,14 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_io_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, param0, param1); - rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID, - inti->io.subchannel_id); - rc |= put_guest_u16(vcpu, __LC_SUBCHANNEL_NR, - inti->io.subchannel_nr); - rc |= put_guest_u32(vcpu, __LC_IO_INT_PARM, - inti->io.io_int_parm); - rc |= put_guest_u32(vcpu, __LC_IO_INT_WORD, - inti->io.io_int_word); + rc = put_guest(vcpu, inti->io.subchannel_id, + (u16 *) __LC_SUBCHANNEL_ID); + rc |= put_guest(vcpu, inti->io.subchannel_nr, + (u16 *) __LC_SUBCHANNEL_NR); + rc |= put_guest(vcpu, inti->io.io_int_parm, + (u32 *) __LC_IO_INT_PARM); + rc |= put_guest(vcpu, inti->io.io_int_word, + (u32 *) __LC_IO_INT_WORD); rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -340,7 +344,7 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) return 0; if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) return 0; - rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); + rc = put_guest(vcpu, 0x1004, (u16 *)__LC_EXT_INT_CODE); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 34b42dc..cb07147 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -41,7 +41,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) } /* get the value */ - if (get_guest_u32(vcpu, operand2, &address)) { + if (get_guest(vcpu, address, (u32 *) operand2)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } @@ -82,7 +82,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) address = address & 0x7fffe000u; /* get the value */ - if (put_guest_u32(vcpu, operand2, address)) { + if (put_guest(vcpu, address, (u32 *)operand2)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } @@ -107,7 +107,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) goto out; } - rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); + rc = put_guest(vcpu, vcpu->vcpu_id, (u16 *)useraddr); if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; @@ -142,18 +142,18 @@ static int handle_tpi(struct kvm_vcpu *vcpu) * Store the two-word I/O interruption code into the * provided area. */ - put_guest_u16(vcpu, addr, inti->io.subchannel_id); - put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr); - put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm); + put_guest(vcpu, inti->io.subchannel_id, (u16 *) addr); + put_guest(vcpu, inti->io.subchannel_nr, (u16 *) (addr + 2)); + put_guest(vcpu, inti->io.io_int_parm, (u32 *) (addr + 4)); } else { /* * Store the three-word I/O interruption code into * the appropriate lowcore area. */ - put_guest_u16(vcpu, 184, inti->io.subchannel_id); - put_guest_u16(vcpu, 186, inti->io.subchannel_nr); - put_guest_u32(vcpu, 188, inti->io.io_int_parm); - put_guest_u32(vcpu, 192, inti->io.io_int_word); + put_guest(vcpu, inti->io.subchannel_id, (u16 *) 184); + put_guest(vcpu, inti->io.subchannel_nr, (u16 *) 186); + put_guest(vcpu, inti->io.io_int_parm, (u32 *) 188); + put_guest(vcpu, inti->io.io_int_word, (u32 *) 192); } cc = 1; } else @@ -347,7 +347,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu) goto out; } - rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); + rc = put_guest(vcpu, vcpu->arch.stidp_data, (u64 *)operand2); if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; -- cgit v1.1 From f9dc72e82d32cc9fe40d1dea7709d434bba2d4a9 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 5 Mar 2013 13:14:45 +0100 Subject: s390/kvm,gaccess: shorten copy_to/from_guest code The code can be significantly shortened. There is no functional change, except that for large (> PAGE_SIZE) copies the guest translation would be done more frequently. However, there is not a single user which does this currently. If one gets added later on this functionality can be added easily again. Signed-off-by: Heiko Carstens Reviewed-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/gaccess.h | 294 +++++++----------------------------------------- 1 file changed, 41 insertions(+), 253 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 82f450e..8608d7e 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -18,16 +18,19 @@ #include #include "kvm-s390.h" -static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) +static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr, + int prefixing) { unsigned long prefix = vcpu->arch.sie_block->prefix; unsigned long gaddr = (unsigned long) gptr; unsigned long uaddr; - if (gaddr < 2 * PAGE_SIZE) - gaddr += prefix; - else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE)) - gaddr -= prefix; + if (prefixing) { + if (gaddr < 2 * PAGE_SIZE) + gaddr += prefix; + else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE)) + gaddr -= prefix; + } uaddr = gmap_fault(gaddr, vcpu->arch.gmap); if (IS_ERR_VALUE(uaddr)) uaddr = -EFAULT; @@ -36,7 +39,7 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) #define get_guest(vcpu, x, gptr) \ ({ \ - __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \ + __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ int __mask = sizeof(__typeof__(*(gptr))) - 1; \ int __ret = PTR_RET(__uptr); \ \ @@ -49,7 +52,7 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) #define put_guest(vcpu, x, gptr) \ ({ \ - __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \ + __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ int __mask = sizeof(__typeof__(*(gptr))) - 1; \ int __ret = PTR_RET(__uptr); \ \ @@ -60,255 +63,40 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) __ret; \ }) -static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, - unsigned long guestdest, - void *from, unsigned long n) -{ - int rc; - unsigned long i; - u8 *data = from; - - for (i = 0; i < n; i++) { - rc = put_guest(vcpu, *(data++), (u8 *)guestdest++); - if (rc < 0) - return rc; - } - return 0; -} - -static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu, - unsigned long guestdest, - void *from, unsigned long n) -{ - int r; - void __user *uptr; - unsigned long size; - - if (guestdest + n < guestdest) - return -EFAULT; - - /* simple case: all within one segment table entry? */ - if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) { - uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - r = copy_to_user(uptr, from, n); - - if (r) - r = -EFAULT; - - goto out; - } - - /* copy first segment */ - uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - size = PMD_SIZE - (guestdest & ~PMD_MASK); - - r = copy_to_user(uptr, from, size); - - if (r) { - r = -EFAULT; - goto out; - } - from += size; - n -= size; - guestdest += size; - - /* copy full segments */ - while (n >= PMD_SIZE) { - uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - r = copy_to_user(uptr, from, PMD_SIZE); - - if (r) { - r = -EFAULT; - goto out; - } - from += PMD_SIZE; - n -= PMD_SIZE; - guestdest += PMD_SIZE; - } - - /* copy the tail segment */ - if (n) { - uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - r = copy_to_user(uptr, from, n); - - if (r) - r = -EFAULT; - } -out: - return r; -} - -static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, - unsigned long guestdest, - void *from, unsigned long n) -{ - return __copy_to_guest_fast(vcpu, guestdest, from, n); -} - -static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, - void *from, unsigned long n) -{ - unsigned long prefix = vcpu->arch.sie_block->prefix; - - if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) - goto slowpath; - - if ((guestdest < prefix) && (guestdest + n > prefix)) - goto slowpath; - - if ((guestdest < prefix + 2 * PAGE_SIZE) - && (guestdest + n > prefix + 2 * PAGE_SIZE)) - goto slowpath; - - if (guestdest < 2 * PAGE_SIZE) - guestdest += prefix; - else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) - guestdest -= prefix; - - return __copy_to_guest_fast(vcpu, guestdest, from, n); -slowpath: - return __copy_to_guest_slow(vcpu, guestdest, from, n); -} - -static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, - unsigned long guestsrc, - unsigned long n) +static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to, + unsigned long from, unsigned long len, + int to_guest, int prefixing) { - int rc; - unsigned long i; - u8 *data = to; - - for (i = 0; i < n; i++) { - rc = get_guest(vcpu, *(data++), (u8 *)guestsrc++); - if (rc < 0) - return rc; + unsigned long _len, rc; + void *uptr; + + while (len) { + uptr = to_guest ? (void *)to : (void *)from; + uptr = __gptr_to_uptr(vcpu, uptr, prefixing); + if (IS_ERR(uptr)) + return -EFAULT; + _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1)); + _len = min(_len, len); + if (to_guest) + rc = copy_to_user(uptr, (void *)from, _len); + else + rc = copy_from_user((void *)to, uptr, _len); + if (rc) + return -EFAULT; + len -= _len; + from += _len; + to += _len; } return 0; } -static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to, - unsigned long guestsrc, - unsigned long n) -{ - int r; - void __user *uptr; - unsigned long size; - - if (guestsrc + n < guestsrc) - return -EFAULT; - - /* simple case: all within one segment table entry? */ - if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) { - uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - r = copy_from_user(to, uptr, n); - - if (r) - r = -EFAULT; - - goto out; - } - - /* copy first segment */ - uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - size = PMD_SIZE - (guestsrc & ~PMD_MASK); - - r = copy_from_user(to, uptr, size); - - if (r) { - r = -EFAULT; - goto out; - } - to += size; - n -= size; - guestsrc += size; - - /* copy full segments */ - while (n >= PMD_SIZE) { - uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); +#define copy_to_guest(vcpu, to, from, size) \ + __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1) +#define copy_from_guest(vcpu, to, from, size) \ + __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1) +#define copy_to_guest_absolute(vcpu, to, from, size) \ + __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0) +#define copy_from_guest_absolute(vcpu, to, from, size) \ + __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0) - r = copy_from_user(to, uptr, PMD_SIZE); - - if (r) { - r = -EFAULT; - goto out; - } - to += PMD_SIZE; - n -= PMD_SIZE; - guestsrc += PMD_SIZE; - } - - /* copy the tail segment */ - if (n) { - uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); - - if (IS_ERR((void __force *) uptr)) - return PTR_ERR((void __force *) uptr); - - r = copy_from_user(to, uptr, n); - - if (r) - r = -EFAULT; - } -out: - return r; -} - -static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, - unsigned long guestsrc, - unsigned long n) -{ - return __copy_from_guest_fast(vcpu, to, guestsrc, n); -} - -static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, - unsigned long guestsrc, unsigned long n) -{ - unsigned long prefix = vcpu->arch.sie_block->prefix; - - if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) - goto slowpath; - - if ((guestsrc < prefix) && (guestsrc + n > prefix)) - goto slowpath; - - if ((guestsrc < prefix + 2 * PAGE_SIZE) - && (guestsrc + n > prefix + 2 * PAGE_SIZE)) - goto slowpath; - - if (guestsrc < 2 * PAGE_SIZE) - guestsrc += prefix; - else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) - guestsrc -= prefix; - - return __copy_from_guest_fast(vcpu, to, guestsrc, n); -slowpath: - return __copy_from_guest_slow(vcpu, to, guestsrc, n); -} -#endif +#endif /* __KVM_S390_GACCESS_H */ -- cgit v1.1 From 7c959e82ac331396d05e7118a48c7c1debbefdf8 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 5 Mar 2013 13:14:46 +0100 Subject: s390/kvm: cleanup/fix handle_tpi() - add missing specification exception check - remove one level of indentation - use defines instead of magic numbers Signed-off-by: Heiko Carstens Reviewed-by: Cornelia Huck Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/priv.c | 54 +++++++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 24 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index cb07147..d64382c 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -129,39 +130,44 @@ static int handle_skey(struct kvm_vcpu *vcpu) static int handle_tpi(struct kvm_vcpu *vcpu) { - u64 addr; struct kvm_s390_interrupt_info *inti; + u64 addr; int cc; addr = kvm_s390_get_base_disp_s(vcpu); - + if (addr & 3) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + cc = 0; inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); - if (inti) { - if (addr) { - /* - * Store the two-word I/O interruption code into the - * provided area. - */ - put_guest(vcpu, inti->io.subchannel_id, (u16 *) addr); - put_guest(vcpu, inti->io.subchannel_nr, (u16 *) (addr + 2)); - put_guest(vcpu, inti->io.io_int_parm, (u32 *) (addr + 4)); - } else { - /* - * Store the three-word I/O interruption code into - * the appropriate lowcore area. - */ - put_guest(vcpu, inti->io.subchannel_id, (u16 *) 184); - put_guest(vcpu, inti->io.subchannel_nr, (u16 *) 186); - put_guest(vcpu, inti->io.io_int_parm, (u32 *) 188); - put_guest(vcpu, inti->io.io_int_word, (u32 *) 192); - } - cc = 1; - } else - cc = 0; + if (!inti) + goto no_interrupt; + cc = 1; + if (addr) { + /* + * Store the two-word I/O interruption code into the + * provided area. + */ + put_guest(vcpu, inti->io.subchannel_id, (u16 *) addr); + put_guest(vcpu, inti->io.subchannel_nr, (u16 *) (addr + 2)); + put_guest(vcpu, inti->io.io_int_parm, (u32 *) (addr + 4)); + } else { + /* + * Store the three-word I/O interruption code into + * the appropriate lowcore area. + */ + put_guest(vcpu, inti->io.subchannel_id, (u16 *) __LC_SUBCHANNEL_ID); + put_guest(vcpu, inti->io.subchannel_nr, (u16 *) __LC_SUBCHANNEL_NR); + put_guest(vcpu, inti->io.io_int_parm, (u32 *) __LC_IO_INT_PARM); + put_guest(vcpu, inti->io.io_int_word, (u32 *) __LC_IO_INT_WORD); + } kfree(inti); +no_interrupt: /* Set condition code and we're done. */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; +out: return 0; } -- cgit v1.1 From 0a75ca277c9f1145df37f8bbad10aecf0049a554 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 5 Mar 2013 13:14:47 +0100 Subject: s390/kvm,gaccess: add address space annotations Add missing address space annotations to all put_guest()/get_guest() callers. Signed-off-by: Heiko Carstens Acked-by: Christian Borntraeger Acked-by: Martin Schwidefsky Signed-off-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/gaccess.h | 21 +++++++++++---------- arch/s390/kvm/intercept.c | 4 ++-- arch/s390/kvm/interrupt.c | 36 ++++++++++++++++++------------------ arch/s390/kvm/priv.c | 22 +++++++++++----------- 4 files changed, 42 insertions(+), 41 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 8608d7e..302e0e5 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -18,8 +18,9 @@ #include #include "kvm-s390.h" -static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr, - int prefixing) +static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu, + void __user *gptr, + int prefixing) { unsigned long prefix = vcpu->arch.sie_block->prefix; unsigned long gaddr = (unsigned long) gptr; @@ -34,14 +35,14 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr, uaddr = gmap_fault(gaddr, vcpu->arch.gmap); if (IS_ERR_VALUE(uaddr)) uaddr = -EFAULT; - return (void *)uaddr; + return (void __user *)uaddr; } #define get_guest(vcpu, x, gptr) \ ({ \ __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ int __mask = sizeof(__typeof__(*(gptr))) - 1; \ - int __ret = PTR_RET(__uptr); \ + int __ret = PTR_RET((void __force *)__uptr); \ \ if (!__ret) { \ BUG_ON((unsigned long)__uptr & __mask); \ @@ -54,7 +55,7 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr, ({ \ __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ int __mask = sizeof(__typeof__(*(gptr))) - 1; \ - int __ret = PTR_RET(__uptr); \ + int __ret = PTR_RET((void __force *)__uptr); \ \ if (!__ret) { \ BUG_ON((unsigned long)__uptr & __mask); \ @@ -68,19 +69,19 @@ static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to, int to_guest, int prefixing) { unsigned long _len, rc; - void *uptr; + void __user *uptr; while (len) { - uptr = to_guest ? (void *)to : (void *)from; + uptr = to_guest ? (void __user *)to : (void __user *)from; uptr = __gptr_to_uptr(vcpu, uptr, prefixing); - if (IS_ERR(uptr)) + if (IS_ERR((void __force *)uptr)) return -EFAULT; _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1)); _len = min(_len, len); if (to_guest) - rc = copy_to_user(uptr, (void *)from, _len); + rc = copy_to_user((void __user *) uptr, (void *)from, _len); else - rc = copy_from_user((void *)to, uptr, _len); + rc = copy_from_user((void *)to, (void __user *)uptr, _len); if (rc) return -EFAULT; len -= _len; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 6474400..c6ba4df 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -44,7 +44,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) do { rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg], - (u64 *) useraddr); + (u64 __user *) useraddr); if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; @@ -78,7 +78,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu) reg = reg1; do { - rc = get_guest(vcpu, val, (u32 *) useraddr); + rc = get_guest(vcpu, val, (u32 __user *) useraddr); if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index d78824b..5c94817 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -188,9 +188,9 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_emergency_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->emerg.code, 0); - rc = put_guest(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); + rc = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE); rc |= put_guest(vcpu, inti->emerg.code, - (u16 *)__LC_EXT_CPU_ADDR); + (u16 __user *)__LC_EXT_CPU_ADDR); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -201,9 +201,9 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_external_call++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->extcall.code, 0); - rc = put_guest(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); + rc = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE); rc |= put_guest(vcpu, inti->extcall.code, - (u16 *)__LC_EXT_CPU_ADDR); + (u16 __user *)__LC_EXT_CPU_ADDR); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -215,13 +215,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_service_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->ext.ext_params, 0); - rc = put_guest(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); + rc = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, __LC_EXT_NEW_PSW, sizeof(psw_t)); rc |= put_guest(vcpu, inti->ext.ext_params, - (u32 *)__LC_EXT_PARAMS); + (u32 __user *)__LC_EXT_PARAMS); break; case KVM_S390_INT_VIRTIO: VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", @@ -230,16 +230,16 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->ext.ext_params, inti->ext.ext_params2); - rc = put_guest(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); + rc = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE); + rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, __LC_EXT_NEW_PSW, sizeof(psw_t)); rc |= put_guest(vcpu, inti->ext.ext_params, - (u32 *)__LC_EXT_PARAMS); + (u32 __user *)__LC_EXT_PARAMS); rc |= put_guest(vcpu, inti->ext.ext_params2, - (u64 *)__LC_EXT_PARAMS2); + (u64 __user *)__LC_EXT_PARAMS2); break; case KVM_S390_SIGP_STOP: VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); @@ -278,9 +278,9 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, vcpu->stat.deliver_program_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, inti->pgm.code, 0); - rc = put_guest(vcpu, inti->pgm.code, (u16 *)__LC_PGM_INT_CODE); + rc = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE); rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14], - (u16 *)__LC_PGM_ILC); + (u16 __user *)__LC_PGM_ILC); rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -295,7 +295,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, inti->mchk.mcic); rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); - rc |= put_guest(vcpu, inti->mchk.mcic, (u64 *) __LC_MCCK_CODE); + rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE); rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -313,13 +313,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, param0, param1); rc = put_guest(vcpu, inti->io.subchannel_id, - (u16 *) __LC_SUBCHANNEL_ID); + (u16 __user *) __LC_SUBCHANNEL_ID); rc |= put_guest(vcpu, inti->io.subchannel_nr, - (u16 *) __LC_SUBCHANNEL_NR); + (u16 __user *) __LC_SUBCHANNEL_NR); rc |= put_guest(vcpu, inti->io.io_int_parm, - (u32 *) __LC_IO_INT_PARM); + (u32 __user *) __LC_IO_INT_PARM); rc |= put_guest(vcpu, inti->io.io_int_word, - (u32 *) __LC_IO_INT_WORD); + (u32 __user *) __LC_IO_INT_WORD); rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, @@ -344,7 +344,7 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) return 0; if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) return 0; - rc = put_guest(vcpu, 0x1004, (u16 *)__LC_EXT_INT_CODE); + rc = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d64382c..7db2ad0 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -42,7 +42,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) } /* get the value */ - if (get_guest(vcpu, address, (u32 *) operand2)) { + if (get_guest(vcpu, address, (u32 __user *) operand2)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } @@ -83,7 +83,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) address = address & 0x7fffe000u; /* get the value */ - if (put_guest(vcpu, address, (u32 *)operand2)) { + if (put_guest(vcpu, address, (u32 __user *)operand2)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } @@ -108,7 +108,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) goto out; } - rc = put_guest(vcpu, vcpu->vcpu_id, (u16 *)useraddr); + rc = put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr); if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; @@ -149,18 +149,18 @@ static int handle_tpi(struct kvm_vcpu *vcpu) * Store the two-word I/O interruption code into the * provided area. */ - put_guest(vcpu, inti->io.subchannel_id, (u16 *) addr); - put_guest(vcpu, inti->io.subchannel_nr, (u16 *) (addr + 2)); - put_guest(vcpu, inti->io.io_int_parm, (u32 *) (addr + 4)); + put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr); + put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2)); + put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4)); } else { /* * Store the three-word I/O interruption code into * the appropriate lowcore area. */ - put_guest(vcpu, inti->io.subchannel_id, (u16 *) __LC_SUBCHANNEL_ID); - put_guest(vcpu, inti->io.subchannel_nr, (u16 *) __LC_SUBCHANNEL_NR); - put_guest(vcpu, inti->io.io_int_parm, (u32 *) __LC_IO_INT_PARM); - put_guest(vcpu, inti->io.io_int_word, (u32 *) __LC_IO_INT_WORD); + put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID); + put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR); + put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM); + put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD); } kfree(inti); no_interrupt: @@ -353,7 +353,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu) goto out; } - rc = put_guest(vcpu, vcpu->arch.stidp_data, (u64 *)operand2); + rc = put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2); if (rc) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; -- cgit v1.1 From 2cef4deb4018c02fb3cd08f76c8a988f7ddee480 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 25 Mar 2013 17:22:48 +0100 Subject: KVM: s390: Dont do a gmap update on minor memslot changes Some memslot updates dont affect the gmap implementation, e.g. setting/unsetting dirty tracking. Since a gmap update will cause tlb flushes and segment table invalidations we want to avoid that. Signed-off-by: Christian Borntraeger Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/kvm-s390.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 33161b4..f241e33 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1007,6 +1007,16 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, { int rc; + /* If the basics of the memslot do not change, we do not want + * to update the gmap. Every update causes several unnecessary + * segment translation exceptions. This is usually handled just + * fine by the normal fault handler + gmap, but it will also + * cause faults on the prefix page of running guest CPUs. + */ + if (old->userspace_addr == mem->userspace_addr && + old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && + old->npages * PAGE_SIZE == mem->memory_size) + return; rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, mem->guest_phys_addr, mem->memory_size); -- cgit v1.1 From d21683ea1f1b03823928a98b6380332b9385e3a7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 25 Mar 2013 17:22:49 +0100 Subject: KVM: s390: fix 24 bit psw handling in lpsw/lpswe handler When checking for validity the lpsw/lpswe handler check that only the lower 20 bits instead of 24 bits have a non-zero value. There handling valid psws as invalid ones. Fix the 24 bit psw mask. Signed-off-by: Heiko Carstens Acked-by: Cornelia Huck Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/priv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 7db2ad0..7b397b3 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -255,7 +255,7 @@ static void handle_new_psw(struct kvm_vcpu *vcpu) #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL -#define PSW_ADDR_24 0x00000000000fffffUL +#define PSW_ADDR_24 0x0000000000ffffffUL #define PSW_ADDR_31 0x000000007fffffffUL int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) -- cgit v1.1 From ace5058763b72d128efcbe27969e89226c9c593a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 25 Mar 2013 17:22:50 +0100 Subject: KVM: s390: fix psw conversion in lpsw handler When converting a 64 bit psw to a 128 bit psw the addressing mode bit of the "addr" part of the 64 bit psw must be moved to the basic addressing mode bit of the "mask" part of the 128 bit psw. In addition the addressing mode bit must be cleared when moved to the "addr" part of the 128 bit psw. Otherwise an invalid psw would be generated if the orginal psw was in the 31 bit addressing mode. Signed-off-by: Heiko Carstens Acked-by: Cornelia Huck Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/priv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 7b397b3..844a2b9 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -286,7 +286,8 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->gpsw.mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; - vcpu->arch.sie_block->gpsw.addr = new_psw.addr; + vcpu->arch.sie_block->gpsw.mask |= new_psw.addr & PSW32_ADDR_AMODE; + vcpu->arch.sie_block->gpsw.addr = new_psw.addr & ~PSW32_ADDR_AMODE; if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) || (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && -- cgit v1.1 From 6fd0fcc93b1eaf82911782de5c7aa35c174bf620 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 25 Mar 2013 17:22:51 +0100 Subject: KVM: s390: fix return code handling in lpsw/lpswe handlers kvm_s390_inject_program_int() may return with a non-zero return value, in case of an error (out of memory). Report that to the calling functions instead of ignoring the error case. Signed-off-by: Heiko Carstens Acked-by: Cornelia Huck Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/priv.c | 44 ++++++++++++++------------------------------ 1 file changed, 14 insertions(+), 30 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 844a2b9..9d32c56 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -269,20 +269,14 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) addr = kvm_s390_get_base_disp_s(vcpu); - if (addr & 7) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + if (addr & 7) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - goto out; - } + if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - if (!(new_psw.mask & PSW32_MASK_BASE)) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + if (!(new_psw.mask & PSW32_MASK_BASE)) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); vcpu->arch.sie_block->gpsw.mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; @@ -293,13 +287,10 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == - PSW_MASK_EA)) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + PSW_MASK_EA)) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); handle_new_psw(vcpu); -out: return 0; } @@ -310,15 +301,11 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) addr = kvm_s390_get_base_disp_s(vcpu); - if (addr & 7) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + if (addr & 7) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - goto out; - } + if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); vcpu->arch.sie_block->gpsw.mask = new_psw.mask; vcpu->arch.sie_block->gpsw.addr = new_psw.addr; @@ -330,13 +317,10 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == - PSW_MASK_EA)) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + PSW_MASK_EA)) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); handle_new_psw(vcpu); -out: return 0; } -- cgit v1.1 From 3736b874a39a1df2a94186c357aabeb6a7d7d4f6 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 25 Mar 2013 17:22:52 +0100 Subject: KVM: s390: make if statements in lpsw/lpswe handlers readable Being unable to parse the 5- and 8-line if statements I had to split them to be able to make any sense of them and verify that they match the architecture. So change the code since I guess that other people will also have a hard time parsing such long conditional statements with line breaks. Introduce a common is_valid_psw() function which does all the checks needed. In case of lpsw (64 bit psw -> 128 bit psw conversion) it will do some not needed additional checks, since a couple of bits can't be set anyway, but that doesn't hurt. Signed-off-by: Heiko Carstens Acked-by: Cornelia Huck Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/priv.c | 58 ++++++++++++++++++++++------------------------------ 1 file changed, 24 insertions(+), 34 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 9d32c56..05d186c 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -258,68 +258,58 @@ static void handle_new_psw(struct kvm_vcpu *vcpu) #define PSW_ADDR_24 0x0000000000ffffffUL #define PSW_ADDR_31 0x000000007fffffffUL +static int is_valid_psw(psw_t *psw) { + if (psw->mask & PSW_MASK_UNASSIGNED) + return 0; + if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { + if (psw->addr & ~PSW_ADDR_31) + return 0; + } + if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) + return 0; + if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) + return 0; + return 1; +} + int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) { - u64 addr; + psw_t *gpsw = &vcpu->arch.sie_block->gpsw; psw_compat_t new_psw; + u64 addr; - if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + if (gpsw->mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); - addr = kvm_s390_get_base_disp_s(vcpu); - if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - if (!(new_psw.mask & PSW32_MASK_BASE)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - - vcpu->arch.sie_block->gpsw.mask = - (new_psw.mask & ~PSW32_MASK_BASE) << 32; - vcpu->arch.sie_block->gpsw.mask |= new_psw.addr & PSW32_ADDR_AMODE; - vcpu->arch.sie_block->gpsw.addr = new_psw.addr & ~PSW32_ADDR_AMODE; - - if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) || - (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && - (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || - ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == - PSW_MASK_EA)) + gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; + gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; + gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; + if (!is_valid_psw(gpsw)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - handle_new_psw(vcpu); return 0; } static int handle_lpswe(struct kvm_vcpu *vcpu) { - u64 addr; psw_t new_psw; + u64 addr; addr = kvm_s390_get_base_disp_s(vcpu); - if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - - vcpu->arch.sie_block->gpsw.mask = new_psw.mask; - vcpu->arch.sie_block->gpsw.addr = new_psw.addr; - - if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) || - (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == - PSW_MASK_BA) && - (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) || - (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && - (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || - ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == - PSW_MASK_EA)) + vcpu->arch.sie_block->gpsw = new_psw; + if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - handle_new_psw(vcpu); return 0; } -- cgit v1.1 From db4a29cb6ac7b2fda505923bdbc58fc35a719f62 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 25 Mar 2013 17:22:53 +0100 Subject: KVM: s390: fix and enforce return code handling for irq injections kvm_s390_inject_program_int() and friends may fail if no memory is available. This must be reported to the calling functions, so that this gets passed down to user space which should fix the situation. Alternatively we end up with guest state corruption. So fix this and enforce return value checking by adding a __must_check annotation to all of these function prototypes. Signed-off-by: Heiko Carstens Acked-by: Cornelia Huck Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/intercept.c | 12 +++---- arch/s390/kvm/kvm-s390.c | 3 +- arch/s390/kvm/kvm-s390.h | 12 +++---- arch/s390/kvm/priv.c | 83 +++++++++++++++-------------------------------- 4 files changed, 37 insertions(+), 73 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index c6ba4df..b7d1b2e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -45,10 +45,8 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) do { rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg], (u64 __user *) useraddr); - if (rc) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - break; - } + if (rc) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); useraddr += 8; if (reg == reg3) break; @@ -79,10 +77,8 @@ static int handle_lctl(struct kvm_vcpu *vcpu) reg = reg1; do { rc = get_guest(vcpu, val, (u32 __user *) useraddr); - if (rc) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - break; - } + if (rc) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; vcpu->arch.sie_block->gcr[reg] |= val; useraddr += 4; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f241e33..d05a59c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -633,8 +633,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) } else { VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); trace_kvm_s390_sie_fault(vcpu); - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - rc = 0; + rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 4d89d64..efc14f6 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -110,12 +110,12 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); void kvm_s390_tasklet(unsigned long parm); void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); -int kvm_s390_inject_vm(struct kvm *kvm, - struct kvm_s390_interrupt *s390int); -int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int); -int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); -int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); +int __must_check kvm_s390_inject_vm(struct kvm *kvm, + struct kvm_s390_interrupt *s390int); +int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int); +int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); +int __must_check kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 cr6, u64 schid); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 05d186c..23a8370 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -36,31 +36,24 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ - if (operand2 & 3) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + if (operand2 & 3) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* get the value */ - if (get_guest(vcpu, address, (u32 __user *) operand2)) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - goto out; - } + if (get_guest(vcpu, address, (u32 __user *) operand2)) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); address = address & 0x7fffe000u; /* make sure that the new value is valid memory */ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || - (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - goto out; - } + (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_set_prefix(vcpu, address); VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 1, address); -out: return 0; } @@ -74,49 +67,37 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ - if (operand2 & 3) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + if (operand2 & 3) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); address = vcpu->arch.sie_block->prefix; address = address & 0x7fffe000u; /* get the value */ - if (put_guest(vcpu, address, (u32 __user *)operand2)) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - goto out; - } + if (put_guest(vcpu, address, (u32 __user *)operand2)) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 0, address); -out: return 0; } static int handle_store_cpu_address(struct kvm_vcpu *vcpu) { u64 useraddr; - int rc; vcpu->stat.instruction_stap++; useraddr = kvm_s390_get_base_disp_s(vcpu); - if (useraddr & 1) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + if (useraddr & 1) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr); - if (rc) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - goto out; - } + if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr)) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); trace_kvm_s390_handle_stap(vcpu, useraddr); -out: return 0; } @@ -135,10 +116,8 @@ static int handle_tpi(struct kvm_vcpu *vcpu) int cc; addr = kvm_s390_get_base_disp_s(vcpu); - if (addr & 3) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + if (addr & 3) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); cc = 0; inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); if (!inti) @@ -167,7 +146,6 @@ no_interrupt: /* Set condition code and we're done. */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; -out: return 0; } @@ -237,12 +215,9 @@ static int handle_stfl(struct kvm_vcpu *vcpu) rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), &facility_list, sizeof(facility_list)); if (rc) - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - else { - VCPU_EVENT(vcpu, 5, "store facility list value %x", - facility_list); - trace_kvm_s390_handle_stfl(vcpu, facility_list); - } + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list); + trace_kvm_s390_handle_stfl(vcpu, facility_list); return 0; } @@ -317,25 +292,18 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) static int handle_stidp(struct kvm_vcpu *vcpu) { u64 operand2; - int rc; vcpu->stat.instruction_stidp++; operand2 = kvm_s390_get_base_disp_s(vcpu); - if (operand2 & 7) { - kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - goto out; - } + if (operand2 & 7) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2); - if (rc) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - goto out; - } + if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2)) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); -out: return 0; } @@ -377,6 +345,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; u64 operand2; unsigned long mem; + int rc = 0; vcpu->stat.instruction_stsi++; VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); @@ -412,7 +381,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) } if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out_mem; } trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); @@ -425,7 +394,7 @@ out_mem: out_fail: /* condition code 3 */ vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; - return 0; + return rc; } static const intercept_handler_t b2_handlers[256] = { -- cgit v1.1 From c51f068c23c76a86d427260b8219430ee6f99516 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 25 Mar 2013 17:22:54 +0100 Subject: KVM: s390: fix stsi exception handling In case of an exception the guest psw condition code should be left alone. Signed-off-by: Heiko Carstens Acked-By: Cornelia Huck Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/priv.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 23a8370..de1b1b6 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -343,8 +343,8 @@ static int handle_stsi(struct kvm_vcpu *vcpu) int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; + unsigned long mem = 0; u64 operand2; - unsigned long mem; int rc = 0; vcpu->stat.instruction_stsi++; @@ -364,36 +364,36 @@ static int handle_stsi(struct kvm_vcpu *vcpu) case 2: mem = get_zeroed_page(GFP_KERNEL); if (!mem) - goto out_fail; + goto out_no_data; if (stsi((void *) mem, fc, sel1, sel2)) - goto out_mem; + goto out_no_data; break; case 3: if (sel1 != 2 || sel2 != 2) - goto out_fail; + goto out_no_data; mem = get_zeroed_page(GFP_KERNEL); if (!mem) - goto out_fail; + goto out_no_data; handle_stsi_3_2_2(vcpu, (void *) mem); break; default: - goto out_fail; + goto out_no_data; } if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - goto out_mem; + goto out_exception; } trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); free_page(mem); vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->run->s.regs.gprs[0] = 0; return 0; -out_mem: - free_page(mem); -out_fail: +out_no_data: /* condition code 3 */ vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; +out_exception: + free_page(mem); return rc; } -- cgit v1.1 From b13b5dc7c96d40ebdadbdb752a92ecde5a9f2914 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 25 Mar 2013 17:22:55 +0100 Subject: KVM: s390: fix compile with !CONFIG_COMPAT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit arch/s390/kvm/priv.c should include both linux/compat.h and asm/compat.h. Fixes this one: In file included from arch/s390/kvm/priv.c:23:0: arch/s390/include/asm/compat.h: In function ‘arch_compat_alloc_user_space’: arch/s390/include/asm/compat.h:258:2: error: implicit declaration of function ‘is_compat_task’ Signed-off-by: Heiko Carstens Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/priv.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index de1b1b6..6bbd7b5 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include -- cgit v1.1 From dd2887e7c36d0be986ef17a9dbec904e3e334566 Mon Sep 17 00:00:00 2001 From: Nick Wang Date: Mon, 25 Mar 2013 17:22:57 +0100 Subject: KVM: s390: Remove the sanity checks for kvm memory slot To model the standby memory with memory_region_add_subregion and friends, the guest would have one or more regions of ram. Remove the check allowing only one memory slot and the check requiring the real address of memory slot starts at zero. Signed-off-by: Nick Wang Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/kvm-s390.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d05a59c..b322ff1 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -977,18 +977,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { - /* A few sanity checks. We can have exactly one memory slot which has - to start at guest virtual zero and which has to be located at a - page boundary in userland and which has to end at a page boundary. - The memory in userland is ok to be fragmented into various different - vmas. It is okay to mmap() and munmap() stuff in this slot after - doing this call at any time */ - - if (mem->slot) - return -EINVAL; - - if (mem->guest_phys_addr) - return -EINVAL; + /* A few sanity checks. We can have memory slots which have to be + located/ended at a segment boundary (1MB). The memory in userland is + ok to be fragmented into various different vmas. It is okay to mmap() + and munmap() stuff in this slot after doing this call at any time */ if (mem->userspace_addr & 0xffffful) return -EINVAL; -- cgit v1.1 From e1e2e605c2ad6791ce6346b22443ce611709fa65 Mon Sep 17 00:00:00 2001 From: Nick Wang Date: Mon, 25 Mar 2013 17:22:58 +0100 Subject: KVM: s390: Enable KVM_CAP_NR_MEMSLOTS on s390 Return KVM_USER_MEM_SLOTS in kvm_dev_ioctl_check_extension(). Signed-off-by: Nick Wang Reviewed-by: Christian Borntraeger Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov --- arch/s390/kvm/kvm-s390.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b322ff1..c1c7c68 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -149,6 +149,9 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_NR_MEMSLOTS: + r = KVM_USER_MEM_SLOTS; + break; case KVM_CAP_S390_COW: r = MACHINE_HAS_ESOP; break; -- cgit v1.1