From a6b7e459ff6d569227980f711664f927100c73a8 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 1 Oct 2014 14:48:42 +0200 Subject: KVM: s390: Make the simple ipte mutex specific to a VM instead of global The ipte-locking should be done for each VM seperately, not globally. This way we avoid possible congestions when the simple ipte-lock is used and multiple VMs are running. Suggested-by: Heiko Carstens Signed-off-by: Thomas Huth Acked-by: Heiko Carstens Reviewed-by: Christian Borntraeger Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 20 +++++++++----------- arch/s390/kvm/kvm-s390.c | 1 + 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 0f961a1..c1424e8 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -207,8 +207,6 @@ union raddress { unsigned long pfra : 52; /* Page-Frame Real Address */ }; -static int ipte_lock_count; -static DEFINE_MUTEX(ipte_mutex); int ipte_lock_held(struct kvm_vcpu *vcpu) { @@ -216,16 +214,16 @@ int ipte_lock_held(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->eca & 1) return ic->kh != 0; - return ipte_lock_count != 0; + return vcpu->kvm->arch.ipte_lock_count != 0; } static void ipte_lock_simple(struct kvm_vcpu *vcpu) { union ipte_control old, new, *ic; - mutex_lock(&ipte_mutex); - ipte_lock_count++; - if (ipte_lock_count > 1) + mutex_lock(&vcpu->kvm->arch.ipte_mutex); + vcpu->kvm->arch.ipte_lock_count++; + if (vcpu->kvm->arch.ipte_lock_count > 1) goto out; ic = &vcpu->kvm->arch.sca->ipte_control; do { @@ -238,16 +236,16 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu) new.k = 1; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); out: - mutex_unlock(&ipte_mutex); + mutex_unlock(&vcpu->kvm->arch.ipte_mutex); } static void ipte_unlock_simple(struct kvm_vcpu *vcpu) { union ipte_control old, new, *ic; - mutex_lock(&ipte_mutex); - ipte_lock_count--; - if (ipte_lock_count) + mutex_lock(&vcpu->kvm->arch.ipte_mutex); + vcpu->kvm->arch.ipte_lock_count--; + if (vcpu->kvm->arch.ipte_lock_count) goto out; ic = &vcpu->kvm->arch.sca->ipte_control; do { @@ -256,7 +254,7 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu) } while (cmpxchg(&ic->val, old.val, new.val) != old.val); wake_up(&vcpu->kvm->arch.ipte_wq); out: - mutex_unlock(&ipte_mutex); + mutex_unlock(&vcpu->kvm->arch.ipte_mutex); } static void ipte_lock_siif(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 55aade4..3e83d4b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -453,6 +453,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) spin_lock_init(&kvm->arch.float_int.lock); INIT_LIST_HEAD(&kvm->arch.float_int.list); init_waitqueue_head(&kvm->arch.ipte_wq); + mutex_init(&kvm->arch.ipte_mutex); debug_register_view(kvm->arch.dbf, &debug_sprintf_view); VM_EVENT(kvm, 3, "%s", "vm created"); -- cgit v1.1 From a36c5393266222129ce6f622e3bc3fb5463f290c Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 16 Oct 2014 14:31:53 +0200 Subject: KVM: s390: Fix size of monitor-class number field The monitor-class number field is only 16 bits, so we have to use a u16 pointer to access it. Signed-off-by: Thomas Huth Reviewed-by: David Hildenbrand CC: stable@vger.kernel.org # v3.16+ Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index a398384..4fc3fed 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -270,7 +270,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, break; case PGM_MONITOR: rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, - (u64 *)__LC_MON_CLASS_NR); + (u16 *)__LC_MON_CLASS_NR); rc |= put_guest_lc(vcpu, pgm_info->mon_code, (u64 *)__LC_MON_CODE); break; -- cgit v1.1 From 3526a66b66e3a997fb7d77af2f65dbdd77d2556e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 19 Mar 2014 14:57:49 +0100 Subject: KVM: s390: sigp: dispatch orders with one target in a separate function All sigp orders except SIGP SET ARCHITECTURE target exactly one vcpu. Let's move the dispatch code for these orders into a separate function to prepare for cleaner target availability checks. Signed-off-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/sigp.c | 74 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 31 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index cf243ba..5e259bd 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -349,32 +349,15 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) return rc; } -int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) +static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, + u16 cpu_addr, u32 parameter, u64 *status_reg) { - int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; - int r3 = vcpu->arch.sie_block->ipa & 0x000f; - u32 parameter; - u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; - u8 order_code; int rc; - /* sigp in userspace can exit */ - if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) - return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - - order_code = kvm_s390_get_base_disp_rs(vcpu); - - if (r1 % 2) - parameter = vcpu->run->s.regs.gprs[r1]; - else - parameter = vcpu->run->s.regs.gprs[r1 + 1]; - - trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); switch (order_code) { case SIGP_SENSE: vcpu->stat.instruction_sigp_sense++; - rc = __sigp_sense(vcpu, cpu_addr, - &vcpu->run->s.regs.gprs[r1]); + rc = __sigp_sense(vcpu, cpu_addr, status_reg); break; case SIGP_EXTERNAL_CALL: vcpu->stat.instruction_sigp_external_call++; @@ -395,25 +378,19 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) break; case SIGP_STORE_STATUS_AT_ADDRESS: rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter, - &vcpu->run->s.regs.gprs[r1]); - break; - case SIGP_SET_ARCHITECTURE: - vcpu->stat.instruction_sigp_arch++; - rc = __sigp_set_arch(vcpu, parameter); + status_reg); break; case SIGP_SET_PREFIX: vcpu->stat.instruction_sigp_prefix++; - rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, - &vcpu->run->s.regs.gprs[r1]); + rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, status_reg); break; case SIGP_COND_EMERGENCY_SIGNAL: rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter, - &vcpu->run->s.regs.gprs[r1]); + status_reg); break; case SIGP_SENSE_RUNNING: vcpu->stat.instruction_sigp_sense_running++; - rc = __sigp_sense_running(vcpu, cpu_addr, - &vcpu->run->s.regs.gprs[r1]); + rc = __sigp_sense_running(vcpu, cpu_addr, status_reg); break; case SIGP_START: rc = sigp_check_callable(vcpu, cpu_addr); @@ -432,7 +409,42 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) } break; default: - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; + } + + return rc; +} + +int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) +{ + int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; + int r3 = vcpu->arch.sie_block->ipa & 0x000f; + u32 parameter; + u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; + u8 order_code; + int rc; + + /* sigp in userspace can exit */ + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + + order_code = kvm_s390_get_base_disp_rs(vcpu); + + if (r1 % 2) + parameter = vcpu->run->s.regs.gprs[r1]; + else + parameter = vcpu->run->s.regs.gprs[r1 + 1]; + + trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); + switch (order_code) { + case SIGP_SET_ARCHITECTURE: + vcpu->stat.instruction_sigp_arch++; + rc = __sigp_set_arch(vcpu, parameter); + break; + default: + rc = handle_sigp_dst(vcpu, order_code, cpu_addr, + parameter, + &vcpu->run->s.regs.gprs[r1]); } if (rc < 0) -- cgit v1.1 From 3d95c7d2d7cb9a1f47e9a46473413844c6245c15 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 19 Mar 2014 16:59:39 +0100 Subject: KVM: s390: sigp: move target cpu checks into dispatcher All sigp orders targeting one VCPU have to verify that the target is valid and available. Let's move the check from the single functions to the dispatcher. The destination VCPU is directly passed as a pointer - instead of the cpu address of the target. Please note that all SIGP orders except SIGP SET ARCHITECTURE - even unknown ones - will now check for the availability of the target VCPU. This is what the architecture documentation specifies. Signed-off-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/sigp.c | 139 ++++++++++++++++++--------------------------------- 1 file changed, 48 insertions(+), 91 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 5e259bd..660a945 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -20,20 +20,13 @@ #include "kvm-s390.h" #include "trace.h" -static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, +static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u64 *reg) { struct kvm_s390_local_interrupt *li; - struct kvm_vcpu *dst_vcpu = NULL; int cpuflags; int rc; - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; - - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int; cpuflags = atomic_read(li->cpuflags); @@ -48,44 +41,36 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, rc = SIGP_CC_STATUS_STORED; } - VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); + VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, + rc); return rc; } -static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) +static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) { struct kvm_s390_interrupt s390int = { .type = KVM_S390_INT_EMERGENCY, .parm = vcpu->vcpu_id, }; - struct kvm_vcpu *dst_vcpu = NULL; int rc = 0; - if (cpu_addr < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); if (!rc) - VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); + VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", + dst_vcpu->vcpu_id); return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; } -static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, +static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u16 asn, u64 *reg) { - struct kvm_vcpu *dst_vcpu = NULL; const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; u16 p_asn, s_asn; psw_t *psw; u32 flags; - if (cpu_addr < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); psw = &dst_vcpu->arch.sie_block->gpsw; p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ @@ -96,7 +81,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, || (psw->mask & psw_int_mask) != psw_int_mask || ((flags & CPUSTAT_WAIT) && psw->addr != 0) || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { - return __sigp_emergency(vcpu, cpu_addr); + return __sigp_emergency(vcpu, dst_vcpu); } else { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; @@ -104,23 +89,19 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, } } -static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) +static int __sigp_external_call(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu) { struct kvm_s390_interrupt s390int = { .type = KVM_S390_INT_EXTERNAL_CALL, .parm = vcpu->vcpu_id, }; - struct kvm_vcpu *dst_vcpu = NULL; int rc; - if (cpu_addr < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); if (!rc) - VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); + VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", + dst_vcpu->vcpu_id); return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; } @@ -160,21 +141,13 @@ out: return rc; } -static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) +static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, + int action) { - struct kvm_vcpu *dst_vcpu = NULL; int rc; - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; - - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - rc = __inject_sigp_stop(dst_vcpu, action); - - VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); + VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id); if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { /* If the CPU has already been stopped, we still have @@ -212,18 +185,13 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) return rc; } -static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, - u64 *reg) +static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, + u32 address, u64 *reg) { struct kvm_s390_local_interrupt *li; - struct kvm_vcpu *dst_vcpu = NULL; struct kvm_s390_interrupt_info *inti; int rc; - if (cpu_addr < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int; /* @@ -260,24 +228,20 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, kvm_s390_vcpu_wakeup(dst_vcpu); rc = SIGP_CC_ORDER_CODE_ACCEPTED; - VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); + VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id, + address); out_li: spin_unlock(&li->lock); return rc; } -static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, - u32 addr, u64 *reg) +static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, + u32 addr, u64 *reg) { - struct kvm_vcpu *dst_vcpu = NULL; int flags; int rc; - if (cpu_id < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - spin_lock(&dst_vcpu->arch.local_int.lock); flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); spin_unlock(&dst_vcpu->arch.local_int.lock); @@ -297,19 +261,12 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, return rc; } -static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, - u64 *reg) +static int __sigp_sense_running(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u64 *reg) { struct kvm_s390_local_interrupt *li; - struct kvm_vcpu *dst_vcpu = NULL; int rc; - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; - - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int; if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { /* running */ @@ -321,26 +278,18 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, rc = SIGP_CC_STATUS_STORED; } - VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, - rc); + VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", + dst_vcpu->vcpu_id, rc); return rc; } /* Test whether the destination CPU is available and not busy */ -static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) +static int sigp_check_callable(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) { - struct kvm_s390_local_interrupt *li; + struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; int rc = SIGP_CC_ORDER_CODE_ACCEPTED; - struct kvm_vcpu *dst_vcpu = NULL; - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; - - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - li = &dst_vcpu->arch.local_int; spin_lock(&li->lock); if (li->action_bits & ACTION_STOP_ON_STOP) rc = SIGP_CC_BUSY; @@ -353,53 +302,61 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, u16 cpu_addr, u32 parameter, u64 *status_reg) { int rc; + struct kvm_vcpu *dst_vcpu; + + if (cpu_addr >= KVM_MAX_VCPUS) + return SIGP_CC_NOT_OPERATIONAL; + + dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); + if (!dst_vcpu) + return SIGP_CC_NOT_OPERATIONAL; switch (order_code) { case SIGP_SENSE: vcpu->stat.instruction_sigp_sense++; - rc = __sigp_sense(vcpu, cpu_addr, status_reg); + rc = __sigp_sense(vcpu, dst_vcpu, status_reg); break; case SIGP_EXTERNAL_CALL: vcpu->stat.instruction_sigp_external_call++; - rc = __sigp_external_call(vcpu, cpu_addr); + rc = __sigp_external_call(vcpu, dst_vcpu); break; case SIGP_EMERGENCY_SIGNAL: vcpu->stat.instruction_sigp_emergency++; - rc = __sigp_emergency(vcpu, cpu_addr); + rc = __sigp_emergency(vcpu, dst_vcpu); break; case SIGP_STOP: vcpu->stat.instruction_sigp_stop++; - rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP); + rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STOP_ON_STOP); break; case SIGP_STOP_AND_STORE_STATUS: vcpu->stat.instruction_sigp_stop++; - rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP | + rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STORE_ON_STOP | ACTION_STOP_ON_STOP); break; case SIGP_STORE_STATUS_AT_ADDRESS: - rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter, + rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter, status_reg); break; case SIGP_SET_PREFIX: vcpu->stat.instruction_sigp_prefix++; - rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, status_reg); + rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); break; case SIGP_COND_EMERGENCY_SIGNAL: - rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter, + rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter, status_reg); break; case SIGP_SENSE_RUNNING: vcpu->stat.instruction_sigp_sense_running++; - rc = __sigp_sense_running(vcpu, cpu_addr, status_reg); + rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); break; case SIGP_START: - rc = sigp_check_callable(vcpu, cpu_addr); + rc = sigp_check_callable(vcpu, dst_vcpu); if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) rc = -EOPNOTSUPP; /* Handle START in user space */ break; case SIGP_RESTART: vcpu->stat.instruction_sigp_restart++; - rc = sigp_check_callable(vcpu, cpu_addr); + rc = sigp_check_callable(vcpu, dst_vcpu); if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) { VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace", -- cgit v1.1 From b8983830826f3b0747a6d1c1f351121b9cc93276 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 23 May 2014 12:22:56 +0200 Subject: KVM: s390: sigp: separate preparation handlers This patch introduces in preparation for further code changes separate handler functions for: - SIGP (RE)START - will not be allowed to terminate pending orders - SIGP (INITIAL) CPU RESET - will be allowed to terminate certain pending orders - unknown sigp orders All sigp orders that require user space intervention are logged. Signed-off-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/sigp.c | 47 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 15 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 660a945..a9e1739 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -284,11 +284,12 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, return rc; } -/* Test whether the destination CPU is available and not busy */ -static int sigp_check_callable(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) +static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u8 order_code) { struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; - int rc = SIGP_CC_ORDER_CODE_ACCEPTED; + /* handle (RE)START in user space */ + int rc = -EOPNOTSUPP; spin_lock(&li->lock); if (li->action_bits & ACTION_STOP_ON_STOP) @@ -298,6 +299,20 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) return rc; } +static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u8 order_code) +{ + /* handle (INITIAL) CPU RESET in user space */ + return -EOPNOTSUPP; +} + +static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu) +{ + /* handle unknown orders in user space */ + return -EOPNOTSUPP; +} + static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, u16 cpu_addr, u32 parameter, u64 *status_reg) { @@ -350,25 +365,27 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); break; case SIGP_START: - rc = sigp_check_callable(vcpu, dst_vcpu); - if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) - rc = -EOPNOTSUPP; /* Handle START in user space */ + rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); break; case SIGP_RESTART: vcpu->stat.instruction_sigp_restart++; - rc = sigp_check_callable(vcpu, dst_vcpu); - if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) { - VCPU_EVENT(vcpu, 4, - "sigp restart %x to handle userspace", - cpu_addr); - /* user space must know about restart */ - rc = -EOPNOTSUPP; - } + rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); + break; + case SIGP_INITIAL_CPU_RESET: + rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); + break; + case SIGP_CPU_RESET: + rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); break; default: - rc = -EOPNOTSUPP; + rc = __prepare_sigp_unknown(vcpu, dst_vcpu); } + if (rc == -EOPNOTSUPP) + VCPU_EVENT(vcpu, 4, + "sigp order %u -> cpu %x: handled in user space", + order_code, dst_vcpu->vcpu_id); + return rc; } -- cgit v1.1 From 42cb0c9ff92eba2168d1b8f69d6e62d2af608a13 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 23 May 2014 12:25:11 +0200 Subject: KVM: s390: sigp: instruction counters for all sigp orders This patch introduces instruction counters for all known sigp orders and also a separate one for unknown orders that are passed to user space. Signed-off-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 7 +++++++ arch/s390/kvm/sigp.c | 8 +++++++- 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3e83d4b..06878bd 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -81,10 +81,17 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, + { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) }, + { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) }, { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, + { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, + { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, + { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, + { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, + { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, { "diagnose_10", VCPU_STAT(diagnose_10) }, { "diagnose_44", VCPU_STAT(diagnose_44) }, { "diagnose_9c", VCPU_STAT(diagnose_9c) }, diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index a9e1739..9ee63e4 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -344,11 +344,12 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STOP_ON_STOP); break; case SIGP_STOP_AND_STORE_STATUS: - vcpu->stat.instruction_sigp_stop++; + vcpu->stat.instruction_sigp_stop_store_status++; rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STORE_ON_STOP | ACTION_STOP_ON_STOP); break; case SIGP_STORE_STATUS_AT_ADDRESS: + vcpu->stat.instruction_sigp_store_status++; rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter, status_reg); break; @@ -357,6 +358,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); break; case SIGP_COND_EMERGENCY_SIGNAL: + vcpu->stat.instruction_sigp_cond_emergency++; rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter, status_reg); break; @@ -365,6 +367,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); break; case SIGP_START: + vcpu->stat.instruction_sigp_start++; rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); break; case SIGP_RESTART: @@ -372,12 +375,15 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); break; case SIGP_INITIAL_CPU_RESET: + vcpu->stat.instruction_sigp_init_cpu_reset++; rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); break; case SIGP_CPU_RESET: + vcpu->stat.instruction_sigp_cpu_reset++; rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); break; default: + vcpu->stat.instruction_sigp_unknown++; rc = __prepare_sigp_unknown(vcpu, dst_vcpu); } -- cgit v1.1 From 07b0303540e1951c75c98b7dd729ff1851a0049f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 6 Jun 2014 10:24:15 +0200 Subject: KVM: s390: sigp: inject emergency calls in a separate function In preparation for further code changes, this patch moves the injection of emergency calls into a separate function and uses it for the processing of SIGP EMERGENCY CALL and SIGP CONDITIONAL EMERGENCY CALL. Signed-off-by: David Hildenbrand Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/sigp.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 9ee63e4..1b330d4 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -46,7 +46,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, return rc; } -static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) +static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu) { struct kvm_s390_interrupt s390int = { .type = KVM_S390_INT_EMERGENCY, @@ -62,6 +63,11 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; } +static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) +{ + return __inject_sigp_emergency(vcpu, dst_vcpu); +} + static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u16 asn, u64 *reg) @@ -76,12 +82,12 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ - /* Deliver the emergency signal? */ + /* Inject the emergency signal? */ if (!(flags & CPUSTAT_STOPPED) || (psw->mask & psw_int_mask) != psw_int_mask || ((flags & CPUSTAT_WAIT) && psw->addr != 0) || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { - return __sigp_emergency(vcpu, dst_vcpu); + return __inject_sigp_emergency(vcpu, dst_vcpu); } else { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; -- cgit v1.1 From a6cc3108567e0adc06c4a8031186f84ad1e1e194 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 6 Jun 2014 10:25:09 +0200 Subject: KVM: s390: sigp: split handling of SIGP STOP (AND STORE STATUS) In preparation for further code changes (e.g. getting rid of action_flags), this patch splits the handling of the two sigp orders SIGP STOP and SIGP STOP AND STORE STATUS by introducing a separate handler function for SIGP STOP AND STORE STATUS. Signed-off-by: David Hildenbrand Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/sigp.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 1b330d4..f7cd3f7 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -147,15 +147,27 @@ out: return rc; } -static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, - int action) +static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) { int rc; - rc = __inject_sigp_stop(dst_vcpu, action); + rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP); VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id); - if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { + return rc; +} + +static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u64 *reg) +{ + int rc; + + rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP | + ACTION_STORE_ON_STOP); + VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", + dst_vcpu->vcpu_id); + + if (rc == -ESHUTDOWN) { /* If the CPU has already been stopped, we still have * to save the status when doing stop-and-store. This * has to be done after unlocking all spinlocks. */ @@ -347,12 +359,11 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, break; case SIGP_STOP: vcpu->stat.instruction_sigp_stop++; - rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STOP_ON_STOP); + rc = __sigp_stop(vcpu, dst_vcpu); break; case SIGP_STOP_AND_STORE_STATUS: vcpu->stat.instruction_sigp_stop_store_status++; - rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STORE_ON_STOP | - ACTION_STOP_ON_STOP); + rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg); break; case SIGP_STORE_STATUS_AT_ADDRESS: vcpu->stat.instruction_sigp_store_status++; -- cgit v1.1 From 1365039d0cb32c0cf96eb9f750f4277c9a90f87d Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 4 Nov 2014 08:31:16 +0100 Subject: KVM: s390: Fix ipte locking ipte_unlock_siif uses cmpxchg to replace the in-memory data of the ipte lock together with ACCESS_ONCE for the intial read. union ipte_control { unsigned long val; struct { unsigned long k : 1; unsigned long kh : 31; unsigned long kg : 32; }; }; [...] static void ipte_unlock_siif(struct kvm_vcpu *vcpu) { union ipte_control old, new, *ic; ic = &vcpu->kvm->arch.sca->ipte_control; do { new = old = ACCESS_ONCE(*ic); new.kh--; if (!new.kh) new.k = 0; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); if (!new.kh) wake_up(&vcpu->kvm->arch.ipte_wq); } The new value, is loaded twice from memory with gcc 4.7.2 of fedora 18, despite the ACCESS_ONCE: ---> l %r4,0(%r3) <--- load first 32 bit of lock (k and kh) in r4 alfi %r4,2147483647 <--- add -1 to r4 llgtr %r4,%r4 <--- zero out the sign bit of r4 lg %r1,0(%r3) <--- load all 64 bit of lock into new lgr %r2,%r1 <--- load the same into old risbg %r1,%r4,1,31,32 <--- shift and insert r4 into the bits 1-31 of new llihf %r4,2147483647 ngrk %r4,%r1,%r4 jne aa0 nihh %r1,32767 lgr %r4,%r2 csg %r4,%r1,0(%r3) cgr %r2,%r4 jne a70 If the memory value changes between the first load (l) and the second load (lg) we are broken. If that happens VCPU threads will hang (unkillable) in handle_ipte_interlock. Andreas Krebbel analyzed this and tracked it down to a compiler bug in that version: "while it is not that obvious the C99 standard basically forbids duplicating the memory access also in that case. For an argumentation of a similiar case please see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=22278#c43 For the implementation-defined cases regarding volatile there are some GCC-specific clarifications which can be found here: https://gcc.gnu.org/onlinedocs/gcc/Volatiles.html#Volatiles I've tracked down the problem with a reduced testcase. The problem was that during a tree level optimization (SRA - scalar replacement of aggregates) the volatile marker is lost. And an RTL level optimizer (CSE - common subexpression elimination) then propagated the memory read into its second use introducing another access to the memory location. So indeed Christian's suspicion that the union access has something to do with it is correct (since it triggered the SRA optimization). This issue has been reported and fixed in the GCC 4.8 development cycle: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145" This patch replaces the ACCESS_ONCE scheme with a barrier() based scheme that should work for all supported compilers. Signed-off-by: Christian Borntraeger Cc: stable@vger.kernel.org # v3.16+ --- arch/s390/kvm/gaccess.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 0f961a1..6dc0ad9 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -229,10 +229,12 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu) goto out; ic = &vcpu->kvm->arch.sca->ipte_control; do { - old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); while (old.k) { cond_resched(); - old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); } new = old; new.k = 1; @@ -251,7 +253,9 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu) goto out; ic = &vcpu->kvm->arch.sca->ipte_control; do { - new = old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); + new = old; new.k = 0; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); wake_up(&vcpu->kvm->arch.ipte_wq); @@ -265,10 +269,12 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu) ic = &vcpu->kvm->arch.sca->ipte_control; do { - old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); while (old.kg) { cond_resched(); - old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); } new = old; new.k = 1; @@ -282,7 +288,9 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) ic = &vcpu->kvm->arch.sca->ipte_control; do { - new = old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); + new = old; new.kh--; if (!new.kh) new.k = 0; -- cgit v1.1 From 2dca485f8740208604543c3960be31a5dd3ea603 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 31 Oct 2014 09:24:20 +0100 Subject: KVM: s390: flush CPU on load control some control register changes will flush some aspects of the CPU, e.g. POP explicitely mentions that for CR9-CR11 "TLBs may be cleared". Instead of trying to be clever and only flush on specific CRs, let play safe and flush on all lctl(g) as future machines might define new bits in CRs. Load control intercept should not happen that often. Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck Reviewed-by: David Hildenbrand Cc: stable@vger.kernel.org --- arch/s390/kvm/priv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 72bb2dd..9c565b6 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -791,7 +791,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) break; reg = (reg + 1) % 16; } while (1); - + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return 0; } @@ -863,7 +863,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) break; reg = (reg + 1) % 16; } while (1); - + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return 0; } -- cgit v1.1 From fc56eb66c348febef6c7bbd2c3918410cafd6313 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 29 Oct 2014 10:07:16 +0100 Subject: KVM: s390: fix handling of lctl[g]/stctl[g] According to the architecture all instructions are suppressing if memory access is prohibited due to DAT protection, unless stated otherwise for an instruction. The lctl[g]/stctl[g] implementations handled this incorrectly since control register handling was done piecemeal, which means they had terminating instead of suppressing semantics. This patch fixes this. Signed-off-by: Heiko Carstens Reviewed-by: Thomas Huth Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/priv.c | 68 +++++++++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 36 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 9c565b6..9bde32f 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -762,8 +762,8 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - u32 val = 0; - int reg, rc; + int reg, rc, nr_regs; + u32 ctl_array[16]; u64 ga; vcpu->stat.instruction_lctl++; @@ -779,14 +779,15 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); + nr_regs = ((reg3 - reg1) & 0xf) + 1; + rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); reg = reg1; + nr_regs = 0; do { - rc = read_guest(vcpu, ga, &val, sizeof(val)); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; - vcpu->arch.sie_block->gcr[reg] |= val; - ga += 4; + vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; if (reg == reg3) break; reg = (reg + 1) % 16; @@ -799,9 +800,9 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; + int reg, rc, nr_regs; + u32 ctl_array[16]; u64 ga; - u32 val; - int reg, rc; vcpu->stat.instruction_stctl++; @@ -817,26 +818,24 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); reg = reg1; + nr_regs = 0; do { - val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful; - rc = write_guest(vcpu, ga, &val, sizeof(val)); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); - ga += 4; + ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); - - return 0; + rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); + return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; } static int handle_lctlg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - u64 ga, val; - int reg, rc; + int reg, rc, nr_regs; + u64 ctl_array[16]; + u64 ga; vcpu->stat.instruction_lctlg++; @@ -848,17 +847,17 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - reg = reg1; - VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); + nr_regs = ((reg3 - reg1) & 0xf) + 1; + rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); + reg = reg1; + nr_regs = 0; do { - rc = read_guest(vcpu, ga, &val, sizeof(val)); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); - vcpu->arch.sie_block->gcr[reg] = val; - ga += 8; + vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; if (reg == reg3) break; reg = (reg + 1) % 16; @@ -871,8 +870,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - u64 ga, val; - int reg, rc; + int reg, rc, nr_regs; + u64 ctl_array[16]; + u64 ga; vcpu->stat.instruction_stctg++; @@ -884,23 +884,19 @@ static int handle_stctg(struct kvm_vcpu *vcpu) if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - reg = reg1; - VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); + reg = reg1; + nr_regs = 0; do { - val = vcpu->arch.sie_block->gcr[reg]; - rc = write_guest(vcpu, ga, &val, sizeof(val)); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); - ga += 8; + ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); - - return 0; + rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); + return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; } static const intercept_handler_t eb_handlers[256] = { -- cgit v1.1 From a02689fecdbc36503b1496a5d36707bb4559db63 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Mon, 10 Nov 2014 15:59:32 +0100 Subject: KVM: s390: Small fixes for the PFMF handler This patch includes two small fixes for the PFMF handler: First, the start address for PFMF has to be masked according to the current addressing mode, which is now done with kvm_s390_logical_to_effective(). Second, the protection exceptions have a lower priority than the specification exceptions, so the check for low-address protection has to be moved after the last spot where we inject a specification exception. Signed-off-by: Thomas Huth Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/priv.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 9bde32f..04f70fd 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -646,10 +646,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; - if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { - if (kvm_s390_check_low_addr_protection(vcpu, start)) - return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); - } + start = kvm_s390_logical_to_effective(vcpu, start); switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { case 0x00000000: @@ -665,6 +662,12 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) default: return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); } + + if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { + if (kvm_s390_check_low_addr_protection(vcpu, start)) + return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); + } + while (start < end) { unsigned long useraddr, abs_addr; -- cgit v1.1 From 04b41acd060541fa76407d4de1e0acf0edd57c2a Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 12 Nov 2014 17:13:29 +0100 Subject: KVM: s390: Fix rewinding of the PSW pointing to an EXECUTE instruction A couple of our interception handlers rewind the PSW to the beginning of the instruction to run the intercepted instruction again during the next SIE entry. This normally works fine, but there is also the possibility that the instruction did not get run directly but via an EXECUTE instruction. In this case, the PSW does not point to the instruction that caused the interception, but to the EXECUTE instruction! So we've got to rewind the PSW to the beginning of the EXECUTE instruction instead. This is now accomplished with a new helper function kvm_s390_rewind_psw(). Signed-off-by: Thomas Huth Reviewed-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/intercept.c | 16 ++++++++++++++-- arch/s390/kvm/kvm-s390.h | 6 ++++-- arch/s390/kvm/priv.c | 12 ++++-------- 3 files changed, 22 insertions(+), 12 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index eaf4629..1d244df 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -38,6 +38,19 @@ static const intercept_handler_t instruction_handlers[256] = { [0xeb] = kvm_s390_handle_eb, }; +void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc) +{ + struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; + + /* Use the length of the EXECUTE instruction if necessary */ + if (sie_block->icptstatus & 1) { + ilc = (sie_block->icptstatus >> 4) & 0x6; + if (!ilc) + ilc = 4; + } + sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc); +} + static int handle_noop(struct kvm_vcpu *vcpu) { switch (vcpu->arch.sie_block->icptcode) { @@ -288,7 +301,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) */ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) { - psw_t *psw = &vcpu->arch.sie_block->gpsw; unsigned long srcaddr, dstaddr; int reg1, reg2, rc; @@ -310,7 +322,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) if (rc != 0) return rc; - psw->addr = __rewind_psw(*psw, 4); + kvm_s390_rewind_psw(vcpu, 4); return 0; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 244d023..ff8d977 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -24,8 +24,6 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); /* declare vfacilities extern */ extern unsigned long *vfacilities; -int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); - /* Transactional Memory Execution related macros */ #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) #define TDB_FORMAT1 1 @@ -152,6 +150,10 @@ void kvm_s390_reinject_io_int(struct kvm *kvm, struct kvm_s390_interrupt_info *inti); int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); +/* implemented in intercept.c */ +void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc); +int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); + /* implemented in priv.c */ int is_valid_psw(psw_t *psw); int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 04f70fd..b37db1a 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -176,21 +176,18 @@ static int handle_skey(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - vcpu->arch.sie_block->gpsw.addr = - __rewind_psw(vcpu->arch.sie_block->gpsw, 4); + kvm_s390_rewind_psw(vcpu, 4); VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return 0; } static int handle_ipte_interlock(struct kvm_vcpu *vcpu) { - psw_t *psw = &vcpu->arch.sie_block->gpsw; - vcpu->stat.instruction_ipte_interlock++; - if (psw_bits(*psw).p) + if (psw_bits(vcpu->arch.sie_block->gpsw).p) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); - psw->addr = __rewind_psw(*psw, 4); + kvm_s390_rewind_psw(vcpu, 4); VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); return 0; } @@ -721,8 +718,7 @@ static int handle_essa(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* Rewind PSW to repeat the ESSA instruction */ - vcpu->arch.sie_block->gpsw.addr = - __rewind_psw(vcpu->arch.sie_block->gpsw, 4); + kvm_s390_rewind_psw(vcpu, 4); vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); down_read(&gmap->mm->mmap_sem); -- cgit v1.1 From da00fcbdac1b00bf33b71093047e975cc1f68779 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 21 Nov 2014 09:38:12 +0100 Subject: KVM: s390: trigger the right CPU exit for floating interrupts When injecting a floating interrupt and no CPU is idle we kick one CPU to do an external exit. In case of I/O we should trigger an I/O exit instead. This does not matter for Linux guests as external and I/O interrupts are enabled/disabled at the same time, but play safe anyway. The same holds true for machine checks. Since there is no special exit, just reuse the generic stop exit. The injection code inside the VCPU loop will recheck anyway and rearm the proper exits (e.g. control registers) if necessary. Signed-off-by: Christian Borntraeger Reviewed-by: Thomas Huth Reviewed-by: David Hildenbrand --- arch/s390/kvm/interrupt.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 4fc3fed..ead52bf 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -851,7 +851,17 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) dst_vcpu = kvm_get_vcpu(kvm, sigcpu); li = &dst_vcpu->arch.local_int; spin_lock(&li->lock); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + switch (inti->type) { + case KVM_S390_MCHK: + atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); + break; + default: + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + break; + } spin_unlock(&li->lock); kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); unlock_fi: -- cgit v1.1 From 0146a7b0b0e8691e74d6c8d1d82ad40e3d526ac2 Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Mon, 28 Jul 2014 15:37:58 +0200 Subject: KVM: s390: refactor interrupt injection code In preparation for the rework of the local interrupt injection code, factor out injection routines from kvm_s390_inject_vcpu(). Signed-off-by: Jens Freimann Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 221 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 167 insertions(+), 54 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index ead52bf..8f50f8c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -719,6 +719,16 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) return rc; } +static int __inject_prog_irq(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + list_add(&inti->list, &li->list); + atomic_set(&li->active, 1); + return 0; +} + int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -746,6 +756,7 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_interrupt_info *inti; + int rc; inti = kzalloc(sizeof(*inti), GFP_KERNEL); if (!inti) @@ -759,10 +770,133 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, inti->type = KVM_S390_PROGRAM_INT; memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); spin_lock(&li->lock); - list_add(&inti->list, &li->list); - atomic_set(&li->active, 1); + rc = __inject_prog_irq(vcpu, inti); BUG_ON(waitqueue_active(li->wq)); spin_unlock(&li->lock); + return rc; +} + +static int __inject_pfault_init(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + inti->ext.ext_params2 = s390int->parm64; + list_add_tail(&inti->list, &li->list); + atomic_set(&li->active, 1); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + return 0; +} + +static int __inject_extcall(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", + s390int->parm); + if (s390int->parm & 0xffff0000) + return -EINVAL; + inti->extcall.code = s390int->parm; + list_add_tail(&inti->list, &li->list); + atomic_set(&li->active, 1); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + return 0; +} + +static int __inject_set_prefix(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", + s390int->parm); + inti->prefix.address = s390int->parm; + list_add_tail(&inti->list, &li->list); + atomic_set(&li->active, 1); + return 0; +} + +static int __inject_sigp_stop(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + list_add_tail(&inti->list, &li->list); + atomic_set(&li->active, 1); + li->action_bits |= ACTION_STOP_ON_STOP; + return 0; +} + +static int __inject_sigp_restart(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); + list_add_tail(&inti->list, &li->list); + atomic_set(&li->active, 1); + return 0; +} + +static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); + if (s390int->parm & 0xffff0000) + return -EINVAL; + inti->emerg.code = s390int->parm; + list_add_tail(&inti->list, &li->list); + atomic_set(&li->active, 1); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + return 0; +} + +static int __inject_mchk(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", + s390int->parm64); + inti->mchk.mcic = s390int->parm64; + list_add_tail(&inti->list, &li->list); + atomic_set(&li->active, 1); + return 0; +} + +static int __inject_ckc(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); + list_add_tail(&inti->list, &li->list); + atomic_set(&li->active, 1); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + return 0; +} + +static int __inject_cpu_timer(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt *s390int, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); + list_add_tail(&inti->list, &li->list); + atomic_set(&li->active, 1); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -933,89 +1067,68 @@ void kvm_s390_reinject_io_int(struct kvm *kvm, int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt *s390int) { - struct kvm_s390_local_interrupt *li; + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_interrupt_info *inti; + int rc; inti = kzalloc(sizeof(*inti), GFP_KERNEL); if (!inti) return -ENOMEM; - switch (s390int->type) { + inti->type = s390int->type; + + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, + s390int->parm, 0, 2); + spin_lock(&li->lock); + switch (inti->type) { case KVM_S390_PROGRAM_INT: - if (s390int->parm & 0xffff0000) { - kfree(inti); - return -EINVAL; - } - inti->type = s390int->type; - inti->pgm.code = s390int->parm; VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", s390int->parm); + inti->pgm.code = s390int->parm; + if (s390int->parm & 0xffff0000) + rc = -EINVAL; + else + rc = __inject_prog_irq(vcpu, inti); break; case KVM_S390_SIGP_SET_PREFIX: - inti->prefix.address = s390int->parm; - inti->type = s390int->type; - VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", - s390int->parm); + rc = __inject_set_prefix(vcpu, s390int, inti); break; case KVM_S390_SIGP_STOP: + rc = __inject_sigp_stop(vcpu, s390int, inti); + break; case KVM_S390_RESTART: + rc = __inject_sigp_restart(vcpu, s390int, inti); + break; case KVM_S390_INT_CLOCK_COMP: + rc = __inject_ckc(vcpu, s390int, inti); + break; case KVM_S390_INT_CPU_TIMER: - VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); - inti->type = s390int->type; + rc = __inject_cpu_timer(vcpu, s390int, inti); break; case KVM_S390_INT_EXTERNAL_CALL: - if (s390int->parm & 0xffff0000) { - kfree(inti); - return -EINVAL; - } - VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", - s390int->parm); - inti->type = s390int->type; - inti->extcall.code = s390int->parm; + rc = __inject_extcall(vcpu, s390int, inti); break; case KVM_S390_INT_EMERGENCY: - if (s390int->parm & 0xffff0000) { - kfree(inti); - return -EINVAL; - } - VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); - inti->type = s390int->type; - inti->emerg.code = s390int->parm; + rc = __inject_sigp_emergency(vcpu, s390int, inti); break; case KVM_S390_MCHK: - VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", - s390int->parm64); - inti->type = s390int->type; - inti->mchk.mcic = s390int->parm64; + rc = __inject_mchk(vcpu, s390int, inti); break; case KVM_S390_INT_PFAULT_INIT: - inti->type = s390int->type; - inti->ext.ext_params2 = s390int->parm64; + rc = __inject_pfault_init(vcpu, s390int, inti); break; case KVM_S390_INT_VIRTIO: case KVM_S390_INT_SERVICE: case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: default: - kfree(inti); - return -EINVAL; + rc = -EINVAL; } - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, - s390int->parm64, 2); - - li = &vcpu->arch.local_int; - spin_lock(&li->lock); - if (inti->type == KVM_S390_PROGRAM_INT) - list_add(&inti->list, &li->list); - else - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); - if (inti->type == KVM_S390_SIGP_STOP) - li->action_bits |= ACTION_STOP_ON_STOP; - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); spin_unlock(&li->lock); - kvm_s390_vcpu_wakeup(vcpu); - return 0; + if (!rc) + kvm_s390_vcpu_wakeup(vcpu); + else + kfree(inti); + return rc; } void kvm_s390_clear_float_irqs(struct kvm *kvm) -- cgit v1.1 From af43eb2fd76a275a14d0fcbed43dbf650f2c315b Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 7 Nov 2014 14:35:55 +0100 Subject: KVM: s390: external param not valid for cpu timer and ckc The 32bit external interrupt parameter is only valid for timing-alert and service-signal interrupts. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 8f50f8c..bccda76 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -345,12 +345,12 @@ static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, break; case KVM_S390_INT_CLOCK_COMP: trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, 0); + 0, 0); rc = deliver_ckc_interrupt(vcpu); break; case KVM_S390_INT_CPU_TIMER: trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, 0); + 0, 0); rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, (u16 *)__LC_EXT_INT_CODE); rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, @@ -358,8 +358,6 @@ static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params, - (u32 *)__LC_EXT_PARAMS); break; case KVM_S390_INT_SERVICE: VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", -- cgit v1.1 From 60f90a14dd3e675adfa5c3e0a153696a0230e725 Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Mon, 10 Nov 2014 17:20:07 +0100 Subject: KVM: s390: add defines for virtio and pfault interrupt code Get rid of open coded value for virtio and pfault completion interrupts. Signed-off-by: Jens Freimann Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index bccda76..481f136 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -27,6 +27,8 @@ #define IOINT_CSSID_MASK 0x03fc0000 #define IOINT_AI_MASK 0x04000000 #define PFAULT_INIT 0x0600 +#define PFAULT_DONE 0x0680 +#define VIRTIO_PARAM 0x0d00 static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu); @@ -391,7 +393,7 @@ static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, inti->ext.ext_params2); rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR); + rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR); rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); @@ -408,7 +410,7 @@ static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, inti->ext.ext_params, inti->ext.ext_params2); rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); + rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR); rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); -- cgit v1.1 From 0fb97abe050348bf3bc1796329e75ac522de6b14 Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Tue, 29 Jul 2014 13:45:21 +0200 Subject: KVM: s390: refactor interrupt delivery code Move delivery code for cpu-local interrupt from the huge do_deliver_interrupt() to smaller functions which handle one type of interrupt. Signed-off-by: Jens Freimann Reviewed-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 459 ++++++++++++++++++++++++++++------------------ 1 file changed, 282 insertions(+), 177 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 481f136..0d7f0a7 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -30,8 +30,6 @@ #define PFAULT_DONE 0x0680 #define VIRTIO_PARAM 0x0d00 -static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu); - static int is_ioint(u64 type) { return ((type & 0xfffe0000u) != 0xfffe0000u); @@ -228,12 +226,183 @@ static u16 get_ilc(struct kvm_vcpu *vcpu) } } -static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, - struct kvm_s390_pgm_info *pgm_info) +static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) +{ + int rc; + + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, + 0, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, + (u16 *)__LC_EXT_INT_CODE); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc; +} + +static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) +{ + int rc; + + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, + 0, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, + (u16 __user *)__LC_EXT_INT_CODE); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc; +} + +static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_ext_info *ext = &inti->ext; + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", + 0, ext->ext_params2); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_PFAULT_INIT, + 0, ext->ext_params2); + + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= put_guest_lc(vcpu, ext->ext_params2, (u64 *) __LC_EXT_PARAMS2); + return rc; +} + +static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_mchk_info *mchk = &inti->mchk; + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + mchk->mcic); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, + mchk->cr14, mchk->mcic); + + rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); + rc |= put_guest_lc(vcpu, mchk->mcic, + (u64 __user *) __LC_MCCK_CODE); + rc |= put_guest_lc(vcpu, mchk->failing_storage_address, + (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); + rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, + &mchk->fixed_logout, sizeof(mchk->fixed_logout)); + rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc; +} + +static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) +{ + int rc; + + VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); + vcpu->stat.deliver_restart_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); + + rc = write_guest_lc(vcpu, + offsetof(struct _lowcore, restart_old_psw), + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc; +} + +static int __must_check __deliver_stop(struct kvm_vcpu *vcpu) +{ + VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); + vcpu->stat.deliver_stop_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP, + 0, 0); + + __set_cpuflag(vcpu, CPUSTAT_STOP_INT); + return 0; +} + +static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_prefix_info *prefix = &inti->prefix; + + VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix->address); + vcpu->stat.deliver_prefix_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_SIGP_SET_PREFIX, + prefix->address, 0); + + kvm_s390_set_prefix(vcpu, prefix->address); + return 0; +} + +static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_emerg_info *emerg = &inti->emerg; + int rc; + + VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); + vcpu->stat.deliver_emergency_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + inti->emerg.code, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, + (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, emerg->code, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc; +} + +static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_extcall_info *extcall = &inti->extcall; + int rc; + + VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); + vcpu->stat.deliver_external_call++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_EXTERNAL_CALL, + extcall->code, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, + (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, extcall->code, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + return rc; +} + +static int __must_check __deliver_prog(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) { + struct kvm_s390_pgm_info *pgm_info = &inti->pgm; int rc = 0; u16 ilc = get_ilc(vcpu); + VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", + pgm_info->code, ilc); + vcpu->stat.deliver_program_int++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, + pgm_info->code, 0); + switch (pgm_info->code & ~PGM_PER) { case PGM_AFX_TRANSLATION: case PGM_ASX_TRANSLATION: @@ -306,202 +475,151 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc; +} + +static int __must_check __deliver_service(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", + inti->ext.ext_params); + vcpu->stat.deliver_service_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + inti->ext.ext_params, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= put_guest_lc(vcpu, inti->ext.ext_params, + (u32 *)__LC_EXT_PARAMS); + return rc; +} + +static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + int rc; + + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_PFAULT_DONE, 0, + inti->ext.ext_params2); + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= put_guest_lc(vcpu, inti->ext.ext_params2, + (u64 *)__LC_EXT_PARAMS2); + return rc; +} + +static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", + inti->ext.ext_params, inti->ext.ext_params2); + vcpu->stat.deliver_virtio_interrupt++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + inti->ext.ext_params, + inti->ext.ext_params2); + + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= put_guest_lc(vcpu, inti->ext.ext_params, + (u32 *)__LC_EXT_PARAMS); + rc |= put_guest_lc(vcpu, inti->ext.ext_params2, + (u64 *)__LC_EXT_PARAMS2); + return rc; +} + +static int __must_check __deliver_io(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); + vcpu->stat.deliver_io_int++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + ((__u32)inti->io.subchannel_id << 16) | + inti->io.subchannel_nr, + ((__u64)inti->io.io_int_parm << 32) | + inti->io.io_int_word); + + rc = put_guest_lc(vcpu, inti->io.subchannel_id, + (u16 *)__LC_SUBCHANNEL_ID); + rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, + (u16 *)__LC_SUBCHANNEL_NR); + rc |= put_guest_lc(vcpu, inti->io.io_int_parm, + (u32 *)__LC_IO_INT_PARM); + rc |= put_guest_lc(vcpu, inti->io.io_int_word, + (u32 *)__LC_IO_INT_WORD); + rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); return rc; } static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt_info *inti) { - const unsigned short table[] = { 2, 4, 4, 6 }; - int rc = 0; + int rc; switch (inti->type) { case KVM_S390_INT_EMERGENCY: - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); - vcpu->stat.deliver_emergency_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->emerg.code, 0); - rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, inti->emerg.code, - (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc = __deliver_emergency_signal(vcpu, inti); break; case KVM_S390_INT_EXTERNAL_CALL: - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); - vcpu->stat.deliver_external_call++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->extcall.code, 0); - rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, inti->extcall.code, - (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); + rc = __deliver_external_call(vcpu, inti); break; case KVM_S390_INT_CLOCK_COMP: - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - 0, 0); - rc = deliver_ckc_interrupt(vcpu); + rc = __deliver_ckc(vcpu); break; case KVM_S390_INT_CPU_TIMER: - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - 0, 0); - rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, - (u16 *)__LC_EXT_INT_CODE); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc = __deliver_cpu_timer(vcpu); break; case KVM_S390_INT_SERVICE: - VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", - inti->ext.ext_params); - vcpu->stat.deliver_service_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, 0); - rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params, - (u32 *)__LC_EXT_PARAMS); + rc = __deliver_service(vcpu, inti); break; case KVM_S390_INT_PFAULT_INIT: - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, - inti->ext.ext_params2); - rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, - (u16 *) __LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params2, - (u64 *) __LC_EXT_PARAMS2); + rc = __deliver_pfault_init(vcpu, inti); break; case KVM_S390_INT_PFAULT_DONE: - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, - inti->ext.ext_params2); - rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params2, - (u64 *)__LC_EXT_PARAMS2); + rc = __deliver_pfault_done(vcpu, inti); break; case KVM_S390_INT_VIRTIO: - VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", - inti->ext.ext_params, inti->ext.ext_params2); - vcpu->stat.deliver_virtio_interrupt++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, - inti->ext.ext_params2); - rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params, - (u32 *)__LC_EXT_PARAMS); - rc |= put_guest_lc(vcpu, inti->ext.ext_params2, - (u64 *)__LC_EXT_PARAMS2); + rc = __deliver_virtio(vcpu, inti); break; case KVM_S390_SIGP_STOP: - VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); - vcpu->stat.deliver_stop_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - 0, 0); - __set_intercept_indicator(vcpu, inti); + rc = __deliver_stop(vcpu); break; - case KVM_S390_SIGP_SET_PREFIX: - VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", - inti->prefix.address); - vcpu->stat.deliver_prefix_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->prefix.address, 0); - kvm_s390_set_prefix(vcpu, inti->prefix.address); + rc = __deliver_set_prefix(vcpu, inti); break; - case KVM_S390_RESTART: - VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); - vcpu->stat.deliver_restart_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - 0, 0); - rc = write_guest_lc(vcpu, - offsetof(struct _lowcore, restart_old_psw), - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); + rc = __deliver_restart(vcpu); break; case KVM_S390_PROGRAM_INT: - VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", - inti->pgm.code, - table[vcpu->arch.sie_block->ipa >> 14]); - vcpu->stat.deliver_program_int++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->pgm.code, 0); - rc = __deliver_prog_irq(vcpu, &inti->pgm); + rc = __deliver_prog(vcpu, inti); break; - case KVM_S390_MCHK: - VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", - inti->mchk.mcic); - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->mchk.cr14, - inti->mchk.mcic); - rc = kvm_s390_vcpu_store_status(vcpu, - KVM_S390_STORE_STATUS_PREFIXED); - rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE); - rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc = __deliver_machine_check(vcpu, inti); break; - case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - { - __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | - inti->io.subchannel_nr; - __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | - inti->io.io_int_word; - VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); - vcpu->stat.deliver_io_int++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - param0, param1); - rc = put_guest_lc(vcpu, inti->io.subchannel_id, - (u16 *)__LC_SUBCHANNEL_ID); - rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, - (u16 *)__LC_SUBCHANNEL_NR); - rc |= put_guest_lc(vcpu, inti->io.io_int_parm, - (u32 *)__LC_IO_INT_PARM); - rc |= put_guest_lc(vcpu, inti->io.io_int_word, - (u32 *)__LC_IO_INT_WORD); - rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); + rc = __deliver_io(vcpu, inti); break; - } default: BUG(); } @@ -509,19 +627,6 @@ static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, return rc; } -static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu) -{ - int rc; - - rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - return rc; -} - /* Check whether SIGP interpretation facility has an external call pending */ int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) { @@ -691,7 +796,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } if (!rc && kvm_cpu_has_pending_timer(vcpu)) - rc = deliver_ckc_interrupt(vcpu); + rc = __deliver_ckc(vcpu); if (!rc && atomic_read(&fi->active)) { do { -- cgit v1.1 From 383d0b050106abecb82f43101cac94fa423af5cd Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Tue, 29 Jul 2014 15:11:49 +0200 Subject: KVM: s390: handle pending local interrupts via bitmap This patch adapts handling of local interrupts to be more compliant with the z/Architecture Principles of Operation and introduces a data structure which allows more efficient handling of interrupts. * get rid of li->active flag, use bitmap instead * Keep interrupts in a bitmap instead of a list * Deliver interrupts in the order of their priority as defined in the PoP * Use a second bitmap for sigp emergency requests, as a CPU can have one request pending from every other CPU in the system. Signed-off-by: Jens Freimann Reviewed-by: Cornelia Huck Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/intercept.c | 4 +- arch/s390/kvm/interrupt.c | 601 +++++++++++++++++++++++++++------------------- arch/s390/kvm/kvm-s390.c | 14 +- arch/s390/kvm/kvm-s390.h | 5 +- arch/s390/kvm/sigp.c | 36 +-- 5 files changed, 380 insertions(+), 280 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 1d244df..81c77ab 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -257,7 +257,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) static int handle_external_interrupt(struct kvm_vcpu *vcpu) { u16 eic = vcpu->arch.sie_block->eic; - struct kvm_s390_interrupt irq; + struct kvm_s390_irq irq; psw_t newpsw; int rc; @@ -282,7 +282,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) if (kvm_s390_si_ext_call_pending(vcpu)) return 0; irq.type = KVM_S390_INT_EXTERNAL_CALL; - irq.parm = vcpu->arch.sie_block->extcpuaddr; + irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; break; default: return -EOPNOTSUPP; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 0d7f0a7..1aa7f28 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include "kvm-s390.h" @@ -136,6 +137,31 @@ static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu, return 0; } +static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.local_int.pending_irqs; +} + +static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) +{ + unsigned long active_mask = pending_local_irqs(vcpu); + + if (psw_extint_disabled(vcpu)) + active_mask &= ~IRQ_PEND_EXT_MASK; + if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) + __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); + if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) + __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); + if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) + __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); + if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) + __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); + if (psw_mchk_disabled(vcpu)) + active_mask &= ~IRQ_PEND_MCHK_MASK; + + return active_mask; +} + static void __set_cpu_idle(struct kvm_vcpu *vcpu) { atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); @@ -170,26 +196,45 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); } +static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) +{ + if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) + return; + if (psw_extint_disabled(vcpu)) + __set_cpuflag(vcpu, CPUSTAT_EXT_INT); + else + vcpu->arch.sie_block->lctl |= LCTL_CR0; +} + +static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) +{ + if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) + return; + if (psw_mchk_disabled(vcpu)) + vcpu->arch.sie_block->ictl |= ICTL_LPSW; + else + vcpu->arch.sie_block->lctl |= LCTL_CR14; +} + +/* Set interception request for non-deliverable local interrupts */ +static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) +{ + set_intercept_indicators_ext(vcpu); + set_intercept_indicators_mchk(vcpu); +} + static void __set_intercept_indicator(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt_info *inti) { switch (inti->type) { - case KVM_S390_INT_EXTERNAL_CALL: - case KVM_S390_INT_EMERGENCY: case KVM_S390_INT_SERVICE: - case KVM_S390_INT_PFAULT_INIT: case KVM_S390_INT_PFAULT_DONE: case KVM_S390_INT_VIRTIO: - case KVM_S390_INT_CLOCK_COMP: - case KVM_S390_INT_CPU_TIMER: if (psw_extint_disabled(vcpu)) __set_cpuflag(vcpu, CPUSTAT_EXT_INT); else vcpu->arch.sie_block->lctl |= LCTL_CR0; break; - case KVM_S390_SIGP_STOP: - __set_cpuflag(vcpu, CPUSTAT_STOP_INT); - break; case KVM_S390_MCHK: if (psw_mchk_disabled(vcpu)) vcpu->arch.sie_block->ictl |= ICTL_LPSW; @@ -228,6 +273,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu) static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) { + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, @@ -239,11 +285,13 @@ static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); return rc; } static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) { + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, @@ -255,20 +303,27 @@ static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); return rc; } -static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) { - struct kvm_s390_ext_info *ext = &inti->ext; + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_ext_info ext; int rc; + spin_lock(&li->lock); + ext = li->irq.ext; + clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); + li->irq.ext.ext_params2 = 0; + spin_unlock(&li->lock); + VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", - 0, ext->ext_params2); + 0, ext.ext_params2); trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, - 0, ext->ext_params2); + 0, ext.ext_params2); rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); @@ -276,28 +331,40 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, ext->ext_params2, (u64 *) __LC_EXT_PARAMS2); + rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2); return rc; } -static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) { - struct kvm_s390_mchk_info *mchk = &inti->mchk; + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_mchk_info mchk; int rc; + spin_lock(&li->lock); + mchk = li->irq.mchk; + /* + * If there was an exigent machine check pending, then any repressible + * machine checks that might have been pending are indicated along + * with it, so always clear both bits + */ + clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); + clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); + memset(&li->irq.mchk, 0, sizeof(mchk)); + spin_unlock(&li->lock); + VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", - mchk->mcic); + mchk.mcic); trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, - mchk->cr14, mchk->mcic); + mchk.cr14, mchk.mcic); rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); - rc |= put_guest_lc(vcpu, mchk->mcic, + rc |= put_guest_lc(vcpu, mchk.mcic, (u64 __user *) __LC_MCCK_CODE); - rc |= put_guest_lc(vcpu, mchk->failing_storage_address, + rc |= put_guest_lc(vcpu, mchk.failing_storage_address, (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, - &mchk->fixed_logout, sizeof(mchk->fixed_logout)); + &mchk.fixed_logout, sizeof(mchk.fixed_logout)); rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, @@ -307,6 +374,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu, static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) { + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc; VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); @@ -318,6 +386,7 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); return rc; } @@ -329,38 +398,52 @@ static int __must_check __deliver_stop(struct kvm_vcpu *vcpu) 0, 0); __set_cpuflag(vcpu, CPUSTAT_STOP_INT); + clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs); return 0; } -static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) { - struct kvm_s390_prefix_info *prefix = &inti->prefix; + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_prefix_info prefix; + + spin_lock(&li->lock); + prefix = li->irq.prefix; + li->irq.prefix.address = 0; + clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); + spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix->address); + VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address); vcpu->stat.deliver_prefix_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, - prefix->address, 0); + prefix.address, 0); - kvm_s390_set_prefix(vcpu, prefix->address); + kvm_s390_set_prefix(vcpu, prefix.address); return 0; } -static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) { - struct kvm_s390_emerg_info *emerg = &inti->emerg; + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc; + int cpu_addr; + + spin_lock(&li->lock); + cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS); + clear_bit(cpu_addr, li->sigp_emerg_pending); + if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS)) + clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); + spin_unlock(&li->lock); VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); vcpu->stat.deliver_emergency_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->emerg.code, 0); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, + cpu_addr, 0); rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, emerg->code, (u16 *)__LC_EXT_CPU_ADDR); + rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR); rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, @@ -368,21 +451,27 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu, return rc; } -static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) { - struct kvm_s390_extcall_info *extcall = &inti->extcall; + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_extcall_info extcall; int rc; + spin_lock(&li->lock); + extcall = li->irq.extcall; + li->irq.extcall.code = 0; + clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); + spin_unlock(&li->lock); + VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); vcpu->stat.deliver_external_call++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, - extcall->code, 0); + extcall.code, 0); rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, extcall->code, (u16 *)__LC_EXT_CPU_ADDR); + rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR); rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, @@ -390,20 +479,26 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu, return rc; } -static int __must_check __deliver_prog(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) { - struct kvm_s390_pgm_info *pgm_info = &inti->pgm; + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_pgm_info pgm_info; int rc = 0; u16 ilc = get_ilc(vcpu); + spin_lock(&li->lock); + pgm_info = li->irq.pgm; + clear_bit(IRQ_PEND_PROG, &li->pending_irqs); + memset(&li->irq.pgm, 0, sizeof(pgm_info)); + spin_unlock(&li->lock); + VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", - pgm_info->code, ilc); + pgm_info.code, ilc); vcpu->stat.deliver_program_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, - pgm_info->code, 0); + pgm_info.code, 0); - switch (pgm_info->code & ~PGM_PER) { + switch (pgm_info.code & ~PGM_PER) { case PGM_AFX_TRANSLATION: case PGM_ASX_TRANSLATION: case PGM_EX_TRANSLATION: @@ -414,7 +509,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu, case PGM_PRIMARY_AUTHORITY: case PGM_SECONDARY_AUTHORITY: case PGM_SPACE_SWITCH: - rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, + rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, (u64 *)__LC_TRANS_EXC_CODE); break; case PGM_ALEN_TRANSLATION: @@ -423,7 +518,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu, case PGM_ASTE_SEQUENCE: case PGM_ASTE_VALIDITY: case PGM_EXTENDED_AUTHORITY: - rc = put_guest_lc(vcpu, pgm_info->exc_access_id, + rc = put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); break; case PGM_ASCE_TYPE: @@ -432,44 +527,44 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu, case PGM_REGION_SECOND_TRANS: case PGM_REGION_THIRD_TRANS: case PGM_SEGMENT_TRANSLATION: - rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, + rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, (u64 *)__LC_TRANS_EXC_CODE); - rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, + rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); - rc |= put_guest_lc(vcpu, pgm_info->op_access_id, + rc |= put_guest_lc(vcpu, pgm_info.op_access_id, (u8 *)__LC_OP_ACCESS_ID); break; case PGM_MONITOR: - rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, + rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, (u16 *)__LC_MON_CLASS_NR); - rc |= put_guest_lc(vcpu, pgm_info->mon_code, + rc |= put_guest_lc(vcpu, pgm_info.mon_code, (u64 *)__LC_MON_CODE); break; case PGM_DATA: - rc = put_guest_lc(vcpu, pgm_info->data_exc_code, + rc = put_guest_lc(vcpu, pgm_info.data_exc_code, (u32 *)__LC_DATA_EXC_CODE); break; case PGM_PROTECTION: - rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, + rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, (u64 *)__LC_TRANS_EXC_CODE); - rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, + rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); break; } - if (pgm_info->code & PGM_PER) { - rc |= put_guest_lc(vcpu, pgm_info->per_code, + if (pgm_info.code & PGM_PER) { + rc |= put_guest_lc(vcpu, pgm_info.per_code, (u8 *) __LC_PER_CODE); - rc |= put_guest_lc(vcpu, pgm_info->per_atmid, + rc |= put_guest_lc(vcpu, pgm_info.per_atmid, (u8 *)__LC_PER_ATMID); - rc |= put_guest_lc(vcpu, pgm_info->per_address, + rc |= put_guest_lc(vcpu, pgm_info.per_address, (u64 *) __LC_PER_ADDRESS); - rc |= put_guest_lc(vcpu, pgm_info->per_access_id, + rc |= put_guest_lc(vcpu, pgm_info.per_access_id, (u8 *) __LC_PER_ACCESS_ID); } rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); - rc |= put_guest_lc(vcpu, pgm_info->code, + rc |= put_guest_lc(vcpu, pgm_info.code, (u16 *)__LC_PGM_INT_CODE); rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); @@ -572,50 +667,63 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, return rc; } -static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_mchk_info *mchk = &inti->mchk; + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + mchk->mcic); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, + mchk->cr14, mchk->mcic); + + rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); + rc |= put_guest_lc(vcpu, mchk->mcic, + (u64 __user *) __LC_MCCK_CODE); + rc |= put_guest_lc(vcpu, mchk->failing_storage_address, + (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); + rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, + &mchk->fixed_logout, sizeof(mchk->fixed_logout)); + rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc; +} + +typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); + +static const deliver_irq_t deliver_irq_funcs[] = { + [IRQ_PEND_MCHK_EX] = __deliver_machine_check, + [IRQ_PEND_PROG] = __deliver_prog, + [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, + [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, + [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, + [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, + [IRQ_PEND_RESTART] = __deliver_restart, + [IRQ_PEND_SIGP_STOP] = __deliver_stop, + [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, + [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, +}; + +static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) { int rc; switch (inti->type) { - case KVM_S390_INT_EMERGENCY: - rc = __deliver_emergency_signal(vcpu, inti); - break; - case KVM_S390_INT_EXTERNAL_CALL: - rc = __deliver_external_call(vcpu, inti); - break; - case KVM_S390_INT_CLOCK_COMP: - rc = __deliver_ckc(vcpu); - break; - case KVM_S390_INT_CPU_TIMER: - rc = __deliver_cpu_timer(vcpu); - break; case KVM_S390_INT_SERVICE: rc = __deliver_service(vcpu, inti); break; - case KVM_S390_INT_PFAULT_INIT: - rc = __deliver_pfault_init(vcpu, inti); - break; case KVM_S390_INT_PFAULT_DONE: rc = __deliver_pfault_done(vcpu, inti); break; case KVM_S390_INT_VIRTIO: rc = __deliver_virtio(vcpu, inti); break; - case KVM_S390_SIGP_STOP: - rc = __deliver_stop(vcpu); - break; - case KVM_S390_SIGP_SET_PREFIX: - rc = __deliver_set_prefix(vcpu, inti); - break; - case KVM_S390_RESTART: - rc = __deliver_restart(vcpu); - break; - case KVM_S390_PROGRAM_INT: - rc = __deliver_prog(vcpu, inti); - break; case KVM_S390_MCHK: - rc = __deliver_machine_check(vcpu, inti); + rc = __deliver_mchk_floating(vcpu, inti); break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: rc = __deliver_io(vcpu, inti); @@ -643,20 +751,11 @@ int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) { - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; struct kvm_s390_interrupt_info *inti; - int rc = 0; + int rc; - if (atomic_read(&li->active)) { - spin_lock(&li->lock); - list_for_each_entry(inti, &li->list, list) - if (__interrupt_is_deliverable(vcpu, inti)) { - rc = 1; - break; - } - spin_unlock(&li->lock); - } + rc = !!deliverable_local_irqs(vcpu); if ((!rc) && atomic_read(&fi->active)) { spin_lock(&fi->lock); @@ -748,18 +847,15 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_interrupt_info *n, *inti = NULL; spin_lock(&li->lock); - list_for_each_entry_safe(inti, n, &li->list, list) { - list_del(&inti->list); - kfree(inti); - } - atomic_set(&li->active, 0); + li->pending_irqs = 0; + bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS); + memset(&li->irq, 0, sizeof(li->irq)); spin_unlock(&li->lock); /* clear pending external calls set by sigp interpretation facility */ - atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); + atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); atomic_clear_mask(SIGP_CTRL_C, &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); } @@ -769,34 +865,35 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; struct kvm_s390_interrupt_info *n, *inti = NULL; + deliver_irq_t func; int deliver; int rc = 0; + unsigned long irq_type; + unsigned long deliverable_irqs; __reset_intercept_indicators(vcpu); - if (atomic_read(&li->active)) { - do { - deliver = 0; - spin_lock(&li->lock); - list_for_each_entry_safe(inti, n, &li->list, list) { - if (__interrupt_is_deliverable(vcpu, inti)) { - list_del(&inti->list); - deliver = 1; - break; - } - __set_intercept_indicator(vcpu, inti); - } - if (list_empty(&li->list)) - atomic_set(&li->active, 0); - spin_unlock(&li->lock); - if (deliver) { - rc = __do_deliver_interrupt(vcpu, inti); - kfree(inti); - } - } while (!rc && deliver); - } - if (!rc && kvm_cpu_has_pending_timer(vcpu)) - rc = __deliver_ckc(vcpu); + /* pending ckc conditions might have been invalidated */ + clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); + if (kvm_cpu_has_pending_timer(vcpu)) + set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); + + do { + deliverable_irqs = deliverable_local_irqs(vcpu); + /* bits are in the order of interrupt priority */ + irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT); + if (irq_type == IRQ_PEND_COUNT) + break; + func = deliver_irq_funcs[irq_type]; + if (!func) { + WARN_ON_ONCE(func == NULL); + clear_bit(irq_type, &li->pending_irqs); + continue; + } + rc = func(vcpu); + } while (!rc && irq_type != IRQ_PEND_COUNT); + + set_intercept_indicators_local(vcpu); if (!rc && atomic_read(&fi->active)) { do { @@ -815,7 +912,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) atomic_set(&fi->active, 0); spin_unlock(&fi->lock); if (deliver) { - rc = __do_deliver_interrupt(vcpu, inti); + rc = __deliver_floating_interrupt(vcpu, inti); kfree(inti); } } while (!rc && deliver); @@ -824,33 +921,26 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) return rc; } -static int __inject_prog_irq(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - list_add(&inti->list, &li->list); - atomic_set(&li->active, 1); + li->irq.pgm = irq->u.pgm; + __set_bit(IRQ_PEND_PROG, &li->pending_irqs); return 0; } int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_interrupt_info *inti; - - inti = kzalloc(sizeof(*inti), GFP_KERNEL); - if (!inti) - return -ENOMEM; - - inti->type = KVM_S390_PROGRAM_INT; - inti->pgm.code = code; + struct kvm_s390_irq irq; VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code, + 0, 1); spin_lock(&li->lock); - list_add(&inti->list, &li->list); - atomic_set(&li->active, 1); + irq.u.pgm.code = code; + __inject_prog(vcpu, &irq); BUG_ON(waitqueue_active(li->wq)); spin_unlock(&li->lock); return 0; @@ -860,151 +950,158 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, struct kvm_s390_pgm_info *pgm_info) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_interrupt_info *inti; + struct kvm_s390_irq irq; int rc; - inti = kzalloc(sizeof(*inti), GFP_KERNEL); - if (!inti) - return -ENOMEM; - VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", pgm_info->code); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, pgm_info->code, 0, 1); - - inti->type = KVM_S390_PROGRAM_INT; - memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); spin_lock(&li->lock); - rc = __inject_prog_irq(vcpu, inti); + irq.u.pgm = *pgm_info; + rc = __inject_prog(vcpu, &irq); BUG_ON(waitqueue_active(li->wq)); spin_unlock(&li->lock); return rc; } -static int __inject_pfault_init(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int, - struct kvm_s390_interrupt_info *inti) +static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - inti->ext.ext_params2 = s390int->parm64; - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx", + irq->u.ext.ext_params, irq->u.ext.ext_params2); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, + irq->u.ext.ext_params, + irq->u.ext.ext_params2, 2); + + li->irq.ext = irq->u.ext; + set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; } -static int __inject_extcall(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int, - struct kvm_s390_interrupt_info *inti) +int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_extcall_info *extcall = &li->irq.extcall; VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", - s390int->parm); - if (s390int->parm & 0xffff0000) - return -EINVAL; - inti->extcall.code = s390int->parm; - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + irq->u.extcall.code); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, + irq->u.extcall.code, 0, 2); + + *extcall = irq->u.extcall; + __set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; } -static int __inject_set_prefix(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int, - struct kvm_s390_interrupt_info *inti) +static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_prefix_info *prefix = &li->irq.prefix; VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", - s390int->parm); - inti->prefix.address = s390int->parm; - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + prefix->address); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, + prefix->address, 0, 2); + + *prefix = irq->u.prefix; + set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); return 0; } -static int __inject_sigp_stop(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int, - struct kvm_s390_interrupt_info *inti) +static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); + li->action_bits |= ACTION_STOP_ON_STOP; + set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); return 0; } static int __inject_sigp_restart(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int, - struct kvm_s390_interrupt_info *inti) + struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2); + + set_bit(IRQ_PEND_RESTART, &li->pending_irqs); return 0; } static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int, - struct kvm_s390_interrupt_info *inti) + struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_emerg_info *emerg = &li->irq.emerg; - VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); - if (s390int->parm & 0xffff0000) - return -EINVAL; - inti->emerg.code = s390int->parm; - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", + irq->u.emerg.code); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, + emerg->code, 0, 2); + + set_bit(emerg->code, li->sigp_emerg_pending); + set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; } -static int __inject_mchk(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int, - struct kvm_s390_interrupt_info *inti) +static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_mchk_info *mchk = &li->irq.mchk; VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", - s390int->parm64); - inti->mchk.mcic = s390int->parm64; - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + mchk->mcic); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, + mchk->mcic, 2); + + /* + * Combine mcic with previously injected machine checks and + * indicate them all together as described in the Principles + * of Operation, Chapter 11, Interruption action + */ + mchk->mcic |= irq->u.mchk.mcic; + if (mchk->mcic & MCHK_EX_MASK) + set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); + else if (mchk->mcic & MCHK_REP_MASK) + set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); return 0; } -static int __inject_ckc(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int, - struct kvm_s390_interrupt_info *inti) +static int __inject_ckc(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, + 0, 0, 2); + + set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; } -static int __inject_cpu_timer(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int, - struct kvm_s390_interrupt_info *inti) +static int __inject_cpu_timer(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, + 0, 0, 2); + + set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; } + struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 cr6, u64 schid) { @@ -1169,58 +1266,74 @@ void kvm_s390_reinject_io_int(struct kvm *kvm, __inject_vm(kvm, inti); } -int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int) +int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, + struct kvm_s390_irq *irq) +{ + irq->type = s390int->type; + switch (irq->type) { + case KVM_S390_PROGRAM_INT: + if (s390int->parm & 0xffff0000) + return -EINVAL; + irq->u.pgm.code = s390int->parm; + break; + case KVM_S390_SIGP_SET_PREFIX: + irq->u.prefix.address = s390int->parm; + break; + case KVM_S390_INT_EXTERNAL_CALL: + if (irq->u.extcall.code & 0xffff0000) + return -EINVAL; + irq->u.extcall.code = s390int->parm; + break; + case KVM_S390_INT_EMERGENCY: + if (irq->u.emerg.code & 0xffff0000) + return -EINVAL; + irq->u.emerg.code = s390int->parm; + break; + case KVM_S390_MCHK: + irq->u.mchk.mcic = s390int->parm64; + break; + } + return 0; +} + +int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_interrupt_info *inti; int rc; - inti = kzalloc(sizeof(*inti), GFP_KERNEL); - if (!inti) - return -ENOMEM; - - inti->type = s390int->type; - - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, - s390int->parm, 0, 2); spin_lock(&li->lock); - switch (inti->type) { + switch (irq->type) { case KVM_S390_PROGRAM_INT: VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", - s390int->parm); - inti->pgm.code = s390int->parm; - if (s390int->parm & 0xffff0000) - rc = -EINVAL; - else - rc = __inject_prog_irq(vcpu, inti); + irq->u.pgm.code); + rc = __inject_prog(vcpu, irq); break; case KVM_S390_SIGP_SET_PREFIX: - rc = __inject_set_prefix(vcpu, s390int, inti); + rc = __inject_set_prefix(vcpu, irq); break; case KVM_S390_SIGP_STOP: - rc = __inject_sigp_stop(vcpu, s390int, inti); + rc = __inject_sigp_stop(vcpu, irq); break; case KVM_S390_RESTART: - rc = __inject_sigp_restart(vcpu, s390int, inti); + rc = __inject_sigp_restart(vcpu, irq); break; case KVM_S390_INT_CLOCK_COMP: - rc = __inject_ckc(vcpu, s390int, inti); + rc = __inject_ckc(vcpu); break; case KVM_S390_INT_CPU_TIMER: - rc = __inject_cpu_timer(vcpu, s390int, inti); + rc = __inject_cpu_timer(vcpu); break; case KVM_S390_INT_EXTERNAL_CALL: - rc = __inject_extcall(vcpu, s390int, inti); + rc = __inject_extcall(vcpu, irq); break; case KVM_S390_INT_EMERGENCY: - rc = __inject_sigp_emergency(vcpu, s390int, inti); + rc = __inject_sigp_emergency(vcpu, irq); break; case KVM_S390_MCHK: - rc = __inject_mchk(vcpu, s390int, inti); + rc = __inject_mchk(vcpu, irq); break; case KVM_S390_INT_PFAULT_INIT: - rc = __inject_pfault_init(vcpu, s390int, inti); + rc = __inject_pfault_init(vcpu, irq); break; case KVM_S390_INT_VIRTIO: case KVM_S390_INT_SERVICE: @@ -1231,8 +1344,6 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, spin_unlock(&li->lock); if (!rc) kvm_s390_vcpu_wakeup(vcpu); - else - kfree(inti); return rc; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 06878bd..f66591e 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -719,7 +719,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, } spin_lock_init(&vcpu->arch.local_int.lock); - INIT_LIST_HEAD(&vcpu->arch.local_int.list); vcpu->arch.local_int.float_int = &kvm->arch.float_int; vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; @@ -1122,13 +1121,15 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, unsigned long token) { struct kvm_s390_interrupt inti; - inti.parm64 = token; + struct kvm_s390_irq irq; if (start_token) { - inti.type = KVM_S390_INT_PFAULT_INIT; - WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); + irq.u.ext.ext_params2 = token; + irq.type = KVM_S390_INT_PFAULT_INIT; + WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); } else { inti.type = KVM_S390_INT_PFAULT_DONE; + inti.parm64 = token; WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); } } @@ -1622,11 +1623,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, switch (ioctl) { case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; + struct kvm_s390_irq s390irq; r = -EFAULT; if (copy_from_user(&s390int, argp, sizeof(s390int))) break; - r = kvm_s390_inject_vcpu(vcpu, &s390int); + if (s390int_to_s390irq(&s390int, &s390irq)) + return -EINVAL; + r = kvm_s390_inject_vcpu(vcpu, &s390irq); break; } case KVM_S390_STORE_STATUS: diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index ff8d977..a8f3d9b 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -142,7 +142,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm); int __must_check kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int); int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int); + struct kvm_s390_irq *irq); int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 cr6, u64 schid); @@ -224,6 +224,9 @@ static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc) return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); } +int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, + struct kvm_s390_irq *s390irq); + /* implemented in interrupt.c */ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int psw_extint_disabled(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index f7cd3f7..6651f9f 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -49,13 +49,13 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) { - struct kvm_s390_interrupt s390int = { + struct kvm_s390_irq irq = { .type = KVM_S390_INT_EMERGENCY, - .parm = vcpu->vcpu_id, + .u.emerg.code = vcpu->vcpu_id, }; int rc = 0; - rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); + rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); if (!rc) VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", dst_vcpu->vcpu_id); @@ -98,13 +98,13 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, static int __sigp_external_call(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) { - struct kvm_s390_interrupt s390int = { + struct kvm_s390_irq irq = { .type = KVM_S390_INT_EXTERNAL_CALL, - .parm = vcpu->vcpu_id, + .u.extcall.code = vcpu->vcpu_id, }; int rc; - rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); + rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); if (!rc) VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", dst_vcpu->vcpu_id); @@ -115,29 +115,20 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) { struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; - struct kvm_s390_interrupt_info *inti; int rc = SIGP_CC_ORDER_CODE_ACCEPTED; - inti = kzalloc(sizeof(*inti), GFP_ATOMIC); - if (!inti) - return -ENOMEM; - inti->type = KVM_S390_SIGP_STOP; - spin_lock(&li->lock); if (li->action_bits & ACTION_STOP_ON_STOP) { /* another SIGP STOP is pending */ - kfree(inti); rc = SIGP_CC_BUSY; goto out; } if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { - kfree(inti); if ((action & ACTION_STORE_ON_STOP) != 0) rc = -ESHUTDOWN; goto out; } - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); li->action_bits |= action; atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); kvm_s390_vcpu_wakeup(dst_vcpu); @@ -207,7 +198,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u32 address, u64 *reg) { struct kvm_s390_local_interrupt *li; - struct kvm_s390_interrupt_info *inti; int rc; li = &dst_vcpu->arch.local_int; @@ -224,25 +214,17 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, return SIGP_CC_STATUS_STORED; } - inti = kzalloc(sizeof(*inti), GFP_KERNEL); - if (!inti) - return SIGP_CC_BUSY; - spin_lock(&li->lock); /* cpu must be in stopped state */ if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; rc = SIGP_CC_STATUS_STORED; - kfree(inti); goto out_li; } - inti->type = KVM_S390_SIGP_SET_PREFIX; - inti->prefix.address = address; - - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + li->irq.prefix.address = address; + set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); kvm_s390_vcpu_wakeup(dst_vcpu); rc = SIGP_CC_ORDER_CODE_ACCEPTED; -- cgit v1.1 From fc2020cfe9f8102d17dad79ed96dc68a9d84b19e Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Wed, 13 Aug 2014 10:09:04 +0200 Subject: KVM: s390: allow injecting all kinds of machine checks Allow to specify CR14, logout area, external damage code and failed storage address. Since more then one machine check can be indicated to the guest at a time we need to combine all indication bits with already pending requests. Signed-off-by: Jens Freimann Reviewed-by: Cornelia Huck Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 1aa7f28..b3d4409 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1063,11 +1063,19 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) mchk->mcic, 2); /* - * Combine mcic with previously injected machine checks and - * indicate them all together as described in the Principles - * of Operation, Chapter 11, Interruption action + * Because repressible machine checks can be indicated along with + * exigent machine checks (PoP, Chapter 11, Interruption action) + * we need to combine cr14, mcic and external damage code. + * Failing storage address and the logout area should not be or'ed + * together, we just indicate the last occurrence of the corresponding + * machine check */ + mchk->cr14 |= irq->u.mchk.cr14; mchk->mcic |= irq->u.mchk.mcic; + mchk->ext_damage_code |= irq->u.mchk.ext_damage_code; + mchk->failing_storage_address = irq->u.mchk.failing_storage_address; + memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout, + sizeof(mchk->fixed_logout)); if (mchk->mcic & MCHK_EX_MASK) set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); else if (mchk->mcic & MCHK_REP_MASK) -- cgit v1.1 From 467fc29892b8d563592d17d7128296495b6cf335 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 1 Dec 2014 12:02:44 +0100 Subject: KVM: s390: some ext irqs have to clear the ext cpu addr The cpu address of a source cpu (responsible for an external irq) is only to be stored if bit 6 of the ext irq code is set. If bit 6 is not set, it is to be zeroed out. The special external irq code used for virtio and pfault uses the cpu addr as a parameter field. As bit 6 is set, this implementation is correct. Reviewed-by: Thomas Huth Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index b3d4409..6c0d14b 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -281,6 +281,7 @@ static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, @@ -299,6 +300,7 @@ static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, (u16 __user *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, @@ -585,6 +587,7 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu, inti->ext.ext_params, 0); rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, -- cgit v1.1 From 9185124e8754e54f3ae03ef3b3eab628aa066ef6 Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Mon, 1 Dec 2014 16:43:40 +0100 Subject: KVM: s390: use atomic bitops to access pending_irqs bitmap Currently we use a mixture of atomic/non-atomic bitops and the local_int spin lock to protect the pending_irqs bitmap and interrupt payload data. We need to use atomic bitops for the pending_irqs bitmap everywhere and in addition acquire the local_int lock where interrupt data needs to be protected. Signed-off-by: Jens Freimann Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 6c0d14b..86bc89a 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -929,7 +929,7 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; li->irq.pgm = irq->u.pgm; - __set_bit(IRQ_PEND_PROG, &li->pending_irqs); + set_bit(IRQ_PEND_PROG, &li->pending_irqs); return 0; } @@ -995,7 +995,7 @@ int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) irq->u.extcall.code, 0, 2); *extcall = irq->u.extcall; - __set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); + set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; } -- cgit v1.1 From 99e20009aeee47049900ac152d7a88d4f68697d3 Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Mon, 1 Dec 2014 17:05:39 +0100 Subject: KVM: s390: clean up return code handling in irq delivery code Instead of returning a possibly random or'ed together value, let's always return -EFAULT if rc is set. Signed-off-by: Jens Freimann Reviewed-by: David Hildenbrand Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 86bc89a..f00f31e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -287,7 +287,7 @@ static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) @@ -306,7 +306,7 @@ static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) @@ -334,7 +334,7 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) @@ -371,7 +371,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) @@ -389,7 +389,7 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_stop(struct kvm_vcpu *vcpu) @@ -450,7 +450,7 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) @@ -478,7 +478,7 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) @@ -572,7 +572,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_service(struct kvm_vcpu *vcpu, @@ -594,7 +594,7 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= put_guest_lc(vcpu, inti->ext.ext_params, (u32 *)__LC_EXT_PARAMS); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu, @@ -614,7 +614,7 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= put_guest_lc(vcpu, inti->ext.ext_params2, (u64 *)__LC_EXT_PARAMS2); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu, @@ -639,7 +639,7 @@ static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu, (u32 *)__LC_EXT_PARAMS); rc |= put_guest_lc(vcpu, inti->ext.ext_params2, (u64 *)__LC_EXT_PARAMS2); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_io(struct kvm_vcpu *vcpu, @@ -667,7 +667,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - return rc; + return rc ? -EFAULT : 0; } static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu, @@ -692,7 +692,7 @@ static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - return rc; + return rc ? -EFAULT : 0; } typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); -- cgit v1.1