summaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-05 14:47:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-05 14:47:31 -0700
commit01227a889ed56ae53aeebb9f93be9d54dd8b2de8 (patch)
treed5eba9359a9827e84d4112b84d48c54df5c5acde /arch/s390/kvm
parent9e6879460c8edb0cd3c24c09b83d06541b5af0dc (diff)
parentdb6ae6158186a17165ef990bda2895ae7594b039 (diff)
downloadop-kernel-dev-01227a889ed56ae53aeebb9f93be9d54dd8b2de8.zip
op-kernel-dev-01227a889ed56ae53aeebb9f93be9d54dd8b2de8.tar.gz
Merge tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Gleb Natapov: "Highlights of the updates are: general: - new emulated device API - legacy device assignment is now optional - irqfd interface is more generic and can be shared between arches x86: - VMCS shadow support and other nested VMX improvements - APIC virtualization and Posted Interrupt hardware support - Optimize mmio spte zapping ppc: - BookE: in-kernel MPIC emulation with irqfd support - Book3S: in-kernel XICS emulation (incomplete) - Book3S: HV: migration fixes - BookE: more debug support preparation - BookE: e6500 support ARM: - reworking of Hyp idmaps s390: - ioeventfd for virtio-ccw And many other bug fixes, cleanups and improvements" * tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (204 commits) kvm: Add compat_ioctl for device control API KVM: x86: Account for failing enable_irq_window for NMI window request KVM: PPC: Book3S: Add API for in-kernel XICS emulation kvm/ppc/mpic: fix missing unlock in set_base_addr() kvm/ppc: Hold srcu lock when calling kvm_io_bus_read/write kvm/ppc/mpic: remove users kvm/ppc/mpic: fix mmio region lists when multiple guests used kvm/ppc/mpic: remove default routes from documentation kvm: KVM_CAP_IOMMU only available with device assignment ARM: KVM: iterate over all CPUs for CPU compatibility check KVM: ARM: Fix spelling in error message ARM: KVM: define KVM_ARM_MAX_VCPUS unconditionally KVM: ARM: Fix API documentation for ONE_REG encoding ARM: KVM: promote vfp_host pointer to generic host cpu context ARM: KVM: add architecture specific hook for capabilities ARM: KVM: perform HYP initilization for hotplugged CPUs ARM: KVM: switch to a dual-step HYP init code ARM: KVM: rework HYP page table freeing ARM: KVM: enforce maximum size for identity mapped code ARM: KVM: move to a KVM provided HYP idmap ...
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/diag.c26
-rw-r--r--arch/s390/kvm/gaccess.h429
-rw-r--r--arch/s390/kvm/intercept.c18
-rw-r--r--arch/s390/kvm/interrupt.c245
-rw-r--r--arch/s390/kvm/kvm-s390.c43
-rw-r--r--arch/s390/kvm/kvm-s390.h12
-rw-r--r--arch/s390/kvm/priv.c270
9 files changed, 318 insertions, 728 deletions
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 60f9f8a..70b46ea 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select HAVE_KVM_EVENTFD
---help---
Support hosting paravirtualized guest machines using the SIE
virtualization capability on the mainframe. This should work
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 3975722..8fe9d65 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -6,7 +6,7 @@
# it under the terms of the GNU General Public License (version 2 only)
# as published by the Free Software Foundation.
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o eventfd.o)
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a390687..1c01a99 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -13,6 +13,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <asm/virtio-ccw.h>
#include "kvm-s390.h"
#include "trace.h"
#include "trace-s390.h"
@@ -104,6 +105,29 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
return -EREMOTE;
}
+static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
+{
+ int ret, idx;
+
+ /* No virtio-ccw notification? Get out quickly. */
+ if (!vcpu->kvm->arch.css_support ||
+ (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
+ return -EOPNOTSUPP;
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ /*
+ * The layout is as follows:
+ * - gpr 2 contains the subchannel id (passed as addr)
+ * - gpr 3 contains the virtqueue index (passed as datamatch)
+ */
+ ret = kvm_io_bus_write(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
+ vcpu->run->s.regs.gprs[2],
+ 8, &vcpu->run->s.regs.gprs[3]);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ /* kvm_io_bus_write returns -EOPNOTSUPP if it found no match. */
+ return ret < 0 ? ret : 0;
+}
+
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
@@ -118,6 +142,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
return __diag_time_slice_end_directed(vcpu);
case 0x308:
return __diag_ipl_functions(vcpu);
+ case 0x500:
+ return __diag_virtio_hypercall(vcpu);
default:
return -EOPNOTSUPP;
}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 4703f12..302e0e5 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,369 +18,86 @@
#include <asm/uaccess.h>
#include "kvm-s390.h"
-static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
- unsigned long guestaddr)
+static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
+ void __user *gptr,
+ int prefixing)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
-
- if (guestaddr < 2 * PAGE_SIZE)
- guestaddr += prefix;
- else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
- guestaddr -= prefix;
-
- return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
-}
-
-static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
- u64 *result)
-{
- void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
-
- BUG_ON(guestaddr & 7);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- return get_user(*result, (unsigned long __user *) uptr);
-}
-
-static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
- u32 *result)
-{
- void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
-
- BUG_ON(guestaddr & 3);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- return get_user(*result, (u32 __user *) uptr);
-}
-
-static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
- u16 *result)
-{
- void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
-
- BUG_ON(guestaddr & 1);
-
- if (IS_ERR(uptr))
- return PTR_ERR(uptr);
-
- return get_user(*result, (u16 __user *) uptr);
-}
-
-static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
- u8 *result)
-{
- void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- return get_user(*result, (u8 __user *) uptr);
-}
-
-static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
- u64 value)
-{
- void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
-
- BUG_ON(guestaddr & 7);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- return put_user(value, (u64 __user *) uptr);
-}
-
-static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
- u32 value)
-{
- void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
-
- BUG_ON(guestaddr & 3);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- return put_user(value, (u32 __user *) uptr);
-}
-
-static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
- u16 value)
-{
- void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
-
- BUG_ON(guestaddr & 1);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- return put_user(value, (u16 __user *) uptr);
-}
-
-static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
- u8 value)
-{
- void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- return put_user(value, (u8 __user *) uptr);
-}
-
-
-static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
- unsigned long guestdest,
- void *from, unsigned long n)
-{
- int rc;
- unsigned long i;
- u8 *data = from;
-
- for (i = 0; i < n; i++) {
- rc = put_guest_u8(vcpu, guestdest++, *(data++));
- if (rc < 0)
- return rc;
+ unsigned long gaddr = (unsigned long) gptr;
+ unsigned long uaddr;
+
+ if (prefixing) {
+ if (gaddr < 2 * PAGE_SIZE)
+ gaddr += prefix;
+ else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE))
+ gaddr -= prefix;
}
- return 0;
-}
-
-static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
- unsigned long guestdest,
- void *from, unsigned long n)
-{
- int r;
+ uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
+ if (IS_ERR_VALUE(uaddr))
+ uaddr = -EFAULT;
+ return (void __user *)uaddr;
+}
+
+#define get_guest(vcpu, x, gptr) \
+({ \
+ __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
+ int __mask = sizeof(__typeof__(*(gptr))) - 1; \
+ int __ret = PTR_RET((void __force *)__uptr); \
+ \
+ if (!__ret) { \
+ BUG_ON((unsigned long)__uptr & __mask); \
+ __ret = get_user(x, __uptr); \
+ } \
+ __ret; \
+})
+
+#define put_guest(vcpu, x, gptr) \
+({ \
+ __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
+ int __mask = sizeof(__typeof__(*(gptr))) - 1; \
+ int __ret = PTR_RET((void __force *)__uptr); \
+ \
+ if (!__ret) { \
+ BUG_ON((unsigned long)__uptr & __mask); \
+ __ret = put_user(x, __uptr); \
+ } \
+ __ret; \
+})
+
+static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to,
+ unsigned long from, unsigned long len,
+ int to_guest, int prefixing)
+{
+ unsigned long _len, rc;
void __user *uptr;
- unsigned long size;
-
- if (guestdest + n < guestdest)
- return -EFAULT;
-
- /* simple case: all within one segment table entry? */
- if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
- uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- r = copy_to_user(uptr, from, n);
-
- if (r)
- r = -EFAULT;
-
- goto out;
- }
-
- /* copy first segment */
- uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
- size = PMD_SIZE - (guestdest & ~PMD_MASK);
-
- r = copy_to_user(uptr, from, size);
-
- if (r) {
- r = -EFAULT;
- goto out;
- }
- from += size;
- n -= size;
- guestdest += size;
-
- /* copy full segments */
- while (n >= PMD_SIZE) {
- uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- r = copy_to_user(uptr, from, PMD_SIZE);
-
- if (r) {
- r = -EFAULT;
- goto out;
- }
- from += PMD_SIZE;
- n -= PMD_SIZE;
- guestdest += PMD_SIZE;
- }
-
- /* copy the tail segment */
- if (n) {
- uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- r = copy_to_user(uptr, from, n);
-
- if (r)
- r = -EFAULT;
- }
-out:
- return r;
-}
-
-static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
- unsigned long guestdest,
- void *from, unsigned long n)
-{
- return __copy_to_guest_fast(vcpu, guestdest, from, n);
-}
-
-static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
- void *from, unsigned long n)
-{
- unsigned long prefix = vcpu->arch.sie_block->prefix;
-
- if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
- goto slowpath;
-
- if ((guestdest < prefix) && (guestdest + n > prefix))
- goto slowpath;
-
- if ((guestdest < prefix + 2 * PAGE_SIZE)
- && (guestdest + n > prefix + 2 * PAGE_SIZE))
- goto slowpath;
-
- if (guestdest < 2 * PAGE_SIZE)
- guestdest += prefix;
- else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
- guestdest -= prefix;
-
- return __copy_to_guest_fast(vcpu, guestdest, from, n);
-slowpath:
- return __copy_to_guest_slow(vcpu, guestdest, from, n);
-}
-
-static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
- unsigned long guestsrc,
- unsigned long n)
-{
- int rc;
- unsigned long i;
- u8 *data = to;
-
- for (i = 0; i < n; i++) {
- rc = get_guest_u8(vcpu, guestsrc++, data++);
- if (rc < 0)
- return rc;
+ while (len) {
+ uptr = to_guest ? (void __user *)to : (void __user *)from;
+ uptr = __gptr_to_uptr(vcpu, uptr, prefixing);
+ if (IS_ERR((void __force *)uptr))
+ return -EFAULT;
+ _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1));
+ _len = min(_len, len);
+ if (to_guest)
+ rc = copy_to_user((void __user *) uptr, (void *)from, _len);
+ else
+ rc = copy_from_user((void *)to, (void __user *)uptr, _len);
+ if (rc)
+ return -EFAULT;
+ len -= _len;
+ from += _len;
+ to += _len;
}
return 0;
}
-static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
- unsigned long guestsrc,
- unsigned long n)
-{
- int r;
- void __user *uptr;
- unsigned long size;
-
- if (guestsrc + n < guestsrc)
- return -EFAULT;
-
- /* simple case: all within one segment table entry? */
- if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
- uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- r = copy_from_user(to, uptr, n);
-
- if (r)
- r = -EFAULT;
-
- goto out;
- }
-
- /* copy first segment */
- uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- size = PMD_SIZE - (guestsrc & ~PMD_MASK);
-
- r = copy_from_user(to, uptr, size);
-
- if (r) {
- r = -EFAULT;
- goto out;
- }
- to += size;
- n -= size;
- guestsrc += size;
-
- /* copy full segments */
- while (n >= PMD_SIZE) {
- uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- r = copy_from_user(to, uptr, PMD_SIZE);
-
- if (r) {
- r = -EFAULT;
- goto out;
- }
- to += PMD_SIZE;
- n -= PMD_SIZE;
- guestsrc += PMD_SIZE;
- }
-
- /* copy the tail segment */
- if (n) {
- uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
-
- if (IS_ERR((void __force *) uptr))
- return PTR_ERR((void __force *) uptr);
-
- r = copy_from_user(to, uptr, n);
-
- if (r)
- r = -EFAULT;
- }
-out:
- return r;
-}
-
-static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
- unsigned long guestsrc,
- unsigned long n)
-{
- return __copy_from_guest_fast(vcpu, to, guestsrc, n);
-}
-
-static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
- unsigned long guestsrc, unsigned long n)
-{
- unsigned long prefix = vcpu->arch.sie_block->prefix;
-
- if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
- goto slowpath;
+#define copy_to_guest(vcpu, to, from, size) \
+ __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1)
+#define copy_from_guest(vcpu, to, from, size) \
+ __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1)
+#define copy_to_guest_absolute(vcpu, to, from, size) \
+ __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0)
+#define copy_from_guest_absolute(vcpu, to, from, size) \
+ __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0)
- if ((guestsrc < prefix) && (guestsrc + n > prefix))
- goto slowpath;
-
- if ((guestsrc < prefix + 2 * PAGE_SIZE)
- && (guestsrc + n > prefix + 2 * PAGE_SIZE))
- goto slowpath;
-
- if (guestsrc < 2 * PAGE_SIZE)
- guestsrc += prefix;
- else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
- guestsrc -= prefix;
-
- return __copy_from_guest_fast(vcpu, to, guestsrc, n);
-slowpath:
- return __copy_from_guest_slow(vcpu, to, guestsrc, n);
-}
-#endif
+#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index f26ff1e..b7d1b2e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -43,12 +43,10 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
do {
- rc = get_guest_u64(vcpu, useraddr,
- &vcpu->arch.sie_block->gcr[reg]);
- if (rc == -EFAULT) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- break;
- }
+ rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
+ (u64 __user *) useraddr);
+ if (rc)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
useraddr += 8;
if (reg == reg3)
break;
@@ -78,11 +76,9 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
reg = reg1;
do {
- rc = get_guest_u32(vcpu, useraddr, &val);
- if (rc == -EFAULT) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- break;
- }
+ rc = get_guest(vcpu, val, (u32 __user *) useraddr);
+ if (rc)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
vcpu->arch.sie_block->gcr[reg] |= val;
useraddr += 4;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 37116a7..5c94817 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -180,7 +180,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt_info *inti)
{
const unsigned short table[] = { 2, 4, 4, 6 };
- int rc, exception = 0;
+ int rc = 0;
switch (inti->type) {
case KVM_S390_INT_EMERGENCY:
@@ -188,74 +188,41 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
vcpu->stat.deliver_emergency_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->emerg.code, 0);
- rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
- __LC_EXT_NEW_PSW, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
+ rc = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE);
+ rc |= put_guest(vcpu, inti->emerg.code,
+ (u16 __user *)__LC_EXT_CPU_ADDR);
+ rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
break;
-
case KVM_S390_INT_EXTERNAL_CALL:
VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
vcpu->stat.deliver_external_call++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->extcall.code, 0);
- rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
- __LC_EXT_NEW_PSW, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
+ rc = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE);
+ rc |= put_guest(vcpu, inti->extcall.code,
+ (u16 __user *)__LC_EXT_CPU_ADDR);
+ rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
break;
-
case KVM_S390_INT_SERVICE:
VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
inti->ext.ext_params);
vcpu->stat.deliver_service_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->ext.ext_params, 0);
- rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
- __LC_EXT_NEW_PSW, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
- if (rc == -EFAULT)
- exception = 1;
+ rc = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE);
+ rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ rc |= put_guest(vcpu, inti->ext.ext_params,
+ (u32 __user *)__LC_EXT_PARAMS);
break;
-
case KVM_S390_INT_VIRTIO:
VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
inti->ext.ext_params, inti->ext.ext_params2);
@@ -263,34 +230,17 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->ext.ext_params,
inti->ext.ext_params2);
- rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
- __LC_EXT_NEW_PSW, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
- inti->ext.ext_params2);
- if (rc == -EFAULT)
- exception = 1;
+ rc = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE);
+ rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR);
+ rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ rc |= put_guest(vcpu, inti->ext.ext_params,
+ (u32 __user *)__LC_EXT_PARAMS);
+ rc |= put_guest(vcpu, inti->ext.ext_params2,
+ (u64 __user *)__LC_EXT_PARAMS2);
break;
-
case KVM_S390_SIGP_STOP:
VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
vcpu->stat.deliver_stop_signal++;
@@ -313,18 +263,14 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
vcpu->stat.deliver_restart_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
0, 0);
- rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
- restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
- offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
+ rc = copy_to_guest(vcpu,
+ offsetof(struct _lowcore, restart_old_psw),
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ offsetof(struct _lowcore, restart_psw),
+ sizeof(psw_t));
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
break;
-
case KVM_S390_PROGRAM_INT:
VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
inti->pgm.code,
@@ -332,24 +278,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
vcpu->stat.deliver_program_int++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->pgm.code, 0);
- rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u16(vcpu, __LC_PGM_ILC,
- table[vcpu->arch.sie_block->ipa >> 14]);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
- __LC_PGM_NEW_PSW, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
+ rc = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE);
+ rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14],
+ (u16 __user *)__LC_PGM_ILC);
+ rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_PGM_NEW_PSW, sizeof(psw_t));
break;
case KVM_S390_MCHK:
@@ -358,24 +293,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->mchk.cr14,
inti->mchk.mcic);
- rc = kvm_s390_vcpu_store_status(vcpu,
- KVM_S390_STORE_STATUS_PREFIXED);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
- __LC_MCK_NEW_PSW, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
+ rc = kvm_s390_vcpu_store_status(vcpu,
+ KVM_S390_STORE_STATUS_PREFIXED);
+ rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE);
+ rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_MCK_NEW_PSW, sizeof(psw_t));
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
@@ -388,67 +312,44 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
vcpu->stat.deliver_io_int++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
param0, param1);
- rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID,
- inti->io.subchannel_id);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR,
- inti->io.subchannel_nr);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u32(vcpu, __LC_IO_INT_PARM,
- inti->io.io_int_parm);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = put_guest_u32(vcpu, __LC_IO_INT_WORD,
- inti->io.io_int_word);
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
-
- rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
- __LC_IO_NEW_PSW, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
+ rc = put_guest(vcpu, inti->io.subchannel_id,
+ (u16 __user *) __LC_SUBCHANNEL_ID);
+ rc |= put_guest(vcpu, inti->io.subchannel_nr,
+ (u16 __user *) __LC_SUBCHANNEL_NR);
+ rc |= put_guest(vcpu, inti->io.io_int_parm,
+ (u32 __user *) __LC_IO_INT_PARM);
+ rc |= put_guest(vcpu, inti->io.io_int_word,
+ (u32 __user *) __LC_IO_INT_WORD);
+ rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_IO_NEW_PSW, sizeof(psw_t));
break;
}
default:
BUG();
}
- if (exception) {
+ if (rc) {
printk("kvm: The guest lowcore is not mapped during interrupt "
- "delivery, killing userspace\n");
+ "delivery, killing userspace\n");
do_exit(SIGKILL);
}
}
static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
{
- int rc, exception = 0;
+ int rc;
if (psw_extint_disabled(vcpu))
return 0;
if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
return 0;
- rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
- if (rc == -EFAULT)
- exception = 1;
- rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
- rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
- __LC_EXT_NEW_PSW, sizeof(psw_t));
- if (rc == -EFAULT)
- exception = 1;
- if (exception) {
+ rc = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
+ rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ if (rc) {
printk("kvm: The guest lowcore is not mapped during interrupt "
"delivery, killing userspace\n");
do_exit(SIGKILL);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 4cf35a0..c1c7c68 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -142,12 +142,16 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_S390_CSS_SUPPORT:
+ case KVM_CAP_IOEVENTFD:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+ case KVM_CAP_NR_MEMSLOTS:
+ r = KVM_USER_MEM_SLOTS;
+ break;
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;
break;
@@ -632,8 +636,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
} else {
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
trace_kvm_s390_sie_fault(vcpu);
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- rc = 0;
+ rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
}
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
@@ -974,22 +977,13 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- bool user_alloc)
+ enum kvm_mr_change change)
{
- /* A few sanity checks. We can have exactly one memory slot which has
- to start at guest virtual zero and which has to be located at a
- page boundary in userland and which has to end at a page boundary.
- The memory in userland is ok to be fragmented into various different
- vmas. It is okay to mmap() and munmap() stuff in this slot after
- doing this call at any time */
-
- if (mem->slot)
- return -EINVAL;
-
- if (mem->guest_phys_addr)
- return -EINVAL;
+ /* A few sanity checks. We can have memory slots which have to be
+ located/ended at a segment boundary (1MB). The memory in userland is
+ ok to be fragmented into various different vmas. It is okay to mmap()
+ and munmap() stuff in this slot after doing this call at any time */
if (mem->userspace_addr & 0xffffful)
return -EINVAL;
@@ -997,19 +991,26 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->memory_size & 0xffffful)
return -EINVAL;
- if (!user_alloc)
- return -EINVAL;
-
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- bool user_alloc)
+ const struct kvm_memory_slot *old,
+ enum kvm_mr_change change)
{
int rc;
+ /* If the basics of the memslot do not change, we do not want
+ * to update the gmap. Every update causes several unnecessary
+ * segment translation exceptions. This is usually handled just
+ * fine by the normal fault handler + gmap, but it will also
+ * cause faults on the prefix page of running guest CPUs.
+ */
+ if (old->userspace_addr == mem->userspace_addr &&
+ old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
+ old->npages * PAGE_SIZE == mem->memory_size)
+ return;
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
mem->guest_phys_addr, mem->memory_size);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 4d89d64..efc14f6 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -110,12 +110,12 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
void kvm_s390_tasklet(unsigned long parm);
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
-int kvm_s390_inject_vm(struct kvm *kvm,
- struct kvm_s390_interrupt *s390int);
-int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt *s390int);
-int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
-int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
+int __must_check kvm_s390_inject_vm(struct kvm *kvm,
+ struct kvm_s390_interrupt *s390int);
+int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt *s390int);
+int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+int __must_check kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 cr6, u64 schid);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0ef9894..6bbd7b5 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -14,6 +14,8 @@
#include <linux/kvm.h>
#include <linux/gfp.h>
#include <linux/errno.h>
+#include <linux/compat.h>
+#include <asm/asm-offsets.h>
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
@@ -35,31 +37,24 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
operand2 = kvm_s390_get_base_disp_s(vcpu);
/* must be word boundary */
- if (operand2 & 3) {
- kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- goto out;
- }
+ if (operand2 & 3)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* get the value */
- if (get_guest_u32(vcpu, operand2, &address)) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- goto out;
- }
+ if (get_guest(vcpu, address, (u32 __user *) operand2))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
address = address & 0x7fffe000u;
/* make sure that the new value is valid memory */
if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
- (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- goto out;
- }
+ (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
kvm_s390_set_prefix(vcpu, address);
VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
trace_kvm_s390_handle_prefix(vcpu, 1, address);
-out:
return 0;
}
@@ -73,49 +68,37 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
operand2 = kvm_s390_get_base_disp_s(vcpu);
/* must be word boundary */
- if (operand2 & 3) {
- kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- goto out;
- }
+ if (operand2 & 3)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
address = vcpu->arch.sie_block->prefix;
address = address & 0x7fffe000u;
/* get the value */
- if (put_guest_u32(vcpu, operand2, address)) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- goto out;
- }
+ if (put_guest(vcpu, address, (u32 __user *)operand2))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
trace_kvm_s390_handle_prefix(vcpu, 0, address);
-out:
return 0;
}
static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
u64 useraddr;
- int rc;
vcpu->stat.instruction_stap++;
useraddr = kvm_s390_get_base_disp_s(vcpu);
- if (useraddr & 1) {
- kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- goto out;
- }
+ if (useraddr & 1)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
- if (rc == -EFAULT) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- goto out;
- }
+ if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
trace_kvm_s390_handle_stap(vcpu, useraddr);
-out:
return 0;
}
@@ -129,36 +112,38 @@ static int handle_skey(struct kvm_vcpu *vcpu)
static int handle_tpi(struct kvm_vcpu *vcpu)
{
- u64 addr;
struct kvm_s390_interrupt_info *inti;
+ u64 addr;
int cc;
addr = kvm_s390_get_base_disp_s(vcpu);
-
+ if (addr & 3)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ cc = 0;
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
- if (inti) {
- if (addr) {
- /*
- * Store the two-word I/O interruption code into the
- * provided area.
- */
- put_guest_u16(vcpu, addr, inti->io.subchannel_id);
- put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr);
- put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm);
- } else {
- /*
- * Store the three-word I/O interruption code into
- * the appropriate lowcore area.
- */
- put_guest_u16(vcpu, 184, inti->io.subchannel_id);
- put_guest_u16(vcpu, 186, inti->io.subchannel_nr);
- put_guest_u32(vcpu, 188, inti->io.io_int_parm);
- put_guest_u32(vcpu, 192, inti->io.io_int_word);
- }
- cc = 1;
- } else
- cc = 0;
+ if (!inti)
+ goto no_interrupt;
+ cc = 1;
+ if (addr) {
+ /*
+ * Store the two-word I/O interruption code into the
+ * provided area.
+ */
+ put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr);
+ put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2));
+ put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4));
+ } else {
+ /*
+ * Store the three-word I/O interruption code into
+ * the appropriate lowcore area.
+ */
+ put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
+ put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
+ put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
+ put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
+ }
kfree(inti);
+no_interrupt:
/* Set condition code and we're done. */
vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
@@ -230,13 +215,10 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&facility_list, sizeof(facility_list));
- if (rc == -EFAULT)
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- else {
- VCPU_EVENT(vcpu, 5, "store facility list value %x",
- facility_list);
- trace_kvm_s390_handle_stfl(vcpu, facility_list);
- }
+ if (rc)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
+ trace_kvm_s390_handle_stfl(vcpu, facility_list);
return 0;
}
@@ -249,112 +231,80 @@ static void handle_new_psw(struct kvm_vcpu *vcpu)
#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
-#define PSW_ADDR_24 0x00000000000fffffUL
+#define PSW_ADDR_24 0x0000000000ffffffUL
#define PSW_ADDR_31 0x000000007fffffffUL
+static int is_valid_psw(psw_t *psw) {
+ if (psw->mask & PSW_MASK_UNASSIGNED)
+ return 0;
+ if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
+ if (psw->addr & ~PSW_ADDR_31)
+ return 0;
+ }
+ if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
+ return 0;
+ if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
+ return 0;
+ return 1;
+}
+
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
- u64 addr;
+ psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
psw_compat_t new_psw;
+ u64 addr;
- if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu,
PGM_PRIVILEGED_OPERATION);
-
addr = kvm_s390_get_base_disp_s(vcpu);
-
- if (addr & 7) {
- kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- goto out;
- }
-
- if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- goto out;
- }
-
- if (!(new_psw.mask & PSW32_MASK_BASE)) {
- kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- goto out;
- }
-
- vcpu->arch.sie_block->gpsw.mask =
- (new_psw.mask & ~PSW32_MASK_BASE) << 32;
- vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
-
- if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
- (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
- (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
- ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
- PSW_MASK_EA)) {
- kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- goto out;
- }
-
+ if (addr & 7)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ if (!(new_psw.mask & PSW32_MASK_BASE))
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
+ gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
+ gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
+ if (!is_valid_psw(gpsw))
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
handle_new_psw(vcpu);
-out:
return 0;
}
static int handle_lpswe(struct kvm_vcpu *vcpu)
{
- u64 addr;
psw_t new_psw;
+ u64 addr;
addr = kvm_s390_get_base_disp_s(vcpu);
-
- if (addr & 7) {
- kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- goto out;
- }
-
- if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- goto out;
- }
-
- vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
- vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
-
- if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
- (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
- PSW_MASK_BA) &&
- (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
- (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
- (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
- ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
- PSW_MASK_EA)) {
- kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- goto out;
- }
-
+ if (addr & 7)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ vcpu->arch.sie_block->gpsw = new_psw;
+ if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
handle_new_psw(vcpu);
-out:
return 0;
}
static int handle_stidp(struct kvm_vcpu *vcpu)
{
u64 operand2;
- int rc;
vcpu->stat.instruction_stidp++;
operand2 = kvm_s390_get_base_disp_s(vcpu);
- if (operand2 & 7) {
- kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- goto out;
- }
+ if (operand2 & 7)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
- if (rc == -EFAULT) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- goto out;
- }
+ if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
-out:
return 0;
}
@@ -394,8 +344,9 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
+ unsigned long mem = 0;
u64 operand2;
- unsigned long mem;
+ int rc = 0;
vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -414,37 +365,37 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
case 2:
mem = get_zeroed_page(GFP_KERNEL);
if (!mem)
- goto out_fail;
+ goto out_no_data;
if (stsi((void *) mem, fc, sel1, sel2))
- goto out_mem;
+ goto out_no_data;
break;
case 3:
if (sel1 != 2 || sel2 != 2)
- goto out_fail;
+ goto out_no_data;
mem = get_zeroed_page(GFP_KERNEL);
if (!mem)
- goto out_fail;
+ goto out_no_data;
handle_stsi_3_2_2(vcpu, (void *) mem);
break;
default:
- goto out_fail;
+ goto out_no_data;
}
if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
- kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- goto out_mem;
+ rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out_exception;
}
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
free_page(mem);
vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
vcpu->run->s.regs.gprs[0] = 0;
return 0;
-out_mem:
- free_page(mem);
-out_fail:
+out_no_data:
/* condition code 3 */
vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
- return 0;
+out_exception:
+ free_page(mem);
+ return rc;
}
static const intercept_handler_t b2_handlers[256] = {
@@ -575,20 +526,13 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
return -EOPNOTSUPP;
-
- /* we must resolve the address without holding the mmap semaphore.
- * This is ok since the userspace hypervisor is not supposed to change
- * the mapping while the guest queries the memory. Otherwise the guest
- * might crash or get wrong info anyway. */
- user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
-
down_read(&current->mm->mmap_sem);
+ user_address = __gmap_translate(address1, vcpu->arch.gmap);
+ if (IS_ERR_VALUE(user_address))
+ goto out_inject;
vma = find_vma(current->mm, user_address);
- if (!vma) {
- up_read(&current->mm->mmap_sem);
- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- }
-
+ if (!vma)
+ goto out_inject;
vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
@@ -597,6 +541,10 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
up_read(&current->mm->mmap_sem);
return 0;
+
+out_inject:
+ up_read(&current->mm->mmap_sem);
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
OpenPOWER on IntegriCloud