From b75f4c9afac2604feb971441116c07a24ecca1ec Mon Sep 17 00:00:00 2001 From: Ekaterina Tumanova Date: Tue, 3 Mar 2015 09:54:41 +0100 Subject: KVM: s390: Zero out current VMDB of STSI before including level3 data. s390 documentation requires words 0 and 10-15 to be reserved and stored as zeros. As we fill out all other fields, we can memset the full structure. Signed-off-by: Ekaterina Tumanova Cc: stable@vger.kernel.org Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/priv.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 3511169..c7fee9d 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -467,6 +467,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) for (n = mem->count - 1; n > 0 ; n--) memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); + memset(&mem->vm[0], 0, sizeof(mem->vm[0])); mem->vm[0].cpus_total = cpus; mem->vm[0].cpus_configured = cpus; mem->vm[0].cpus_standby = 0; -- cgit v1.1 From 261520dcfcba93ca5dfe671b88ffab038cd940c8 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 4 Feb 2015 15:53:42 +0100 Subject: KVM: s390: fix handling of write errors in the tpi handler If the I/O interrupt could not be written to the guest provided area (e.g. access exception), a program exception was injected into the guest but "inti" wasn't freed, therefore resulting in a memory leak. In addition, the I/O interrupt wasn't reinjected. Therefore the dequeued interrupt is lost. This patch fixes the problem while cleaning up the function and making the cc and rc logic easier to handle. Signed-off-by: David Hildenbrand Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: Christian Borntraeger --- arch/s390/kvm/priv.c | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index c7fee9d..be7138e 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu) struct kvm_s390_interrupt_info *inti; unsigned long len; u32 tpi_data[3]; - int cc, rc; + int rc; u64 addr; - rc = 0; addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - cc = 0; + inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); - if (!inti) - goto no_interrupt; - cc = 1; + if (!inti) { + kvm_s390_set_psw_cc(vcpu, 0); + return 0; + } + tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; tpi_data[1] = inti->io.io_int_parm; tpi_data[2] = inti->io.io_int_word; @@ -251,30 +252,35 @@ static int handle_tpi(struct kvm_vcpu *vcpu) */ len = sizeof(tpi_data) - 4; rc = write_guest(vcpu, addr, &tpi_data, len); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); + if (rc) { + rc = kvm_s390_inject_prog_cond(vcpu, rc); + goto reinject_interrupt; + } } else { /* * Store the three-word I/O interruption code into * the appropriate lowcore area. */ len = sizeof(tpi_data); - if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) + if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { + /* failed writes to the low core are not recoverable */ rc = -EFAULT; + goto reinject_interrupt; + } } + + /* irq was successfully handed to the guest */ + kfree(inti); + kvm_s390_set_psw_cc(vcpu, 1); + return 0; +reinject_interrupt: /* * If we encounter a problem storing the interruption code, the * instruction is suppressed from the guest's view: reinject the * interrupt. */ - if (!rc) - kfree(inti); - else - kvm_s390_reinject_io_int(vcpu->kvm, inti); -no_interrupt: - /* Set condition code and we're done. */ - if (!rc) - kvm_s390_set_psw_cc(vcpu, cc); + kvm_s390_reinject_io_int(vcpu->kvm, inti); + /* don't set the cc, a pgm irq was injected or we drop to user space */ return rc ? -EFAULT : 0; } -- cgit v1.1 From 15462e37ca848abac7477dece65f8af25febd744 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 4 Feb 2015 15:59:11 +0100 Subject: KVM: s390: reinjection of irqs can fail in the tpi handler The reinjection of an I/O interrupt can fail if the list is at the limit and between the dequeue and the reinjection, another I/O interrupt is injected (e.g. if user space floods kvm with I/O interrupts). This patch avoids this memory leak and returns -EFAULT in this special case. This error is not recoverable, so let's fail hard. This can later be avoided by not dequeuing the interrupt but working directly on the locked list. Signed-off-by: David Hildenbrand Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 4 ++-- arch/s390/kvm/kvm-s390.h | 4 ++-- arch/s390/kvm/priv.c | 5 ++++- 3 files changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 073b5f3..e7a46e8 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1332,10 +1332,10 @@ int kvm_s390_inject_vm(struct kvm *kvm, return rc; } -void kvm_s390_reinject_io_int(struct kvm *kvm, +int kvm_s390_reinject_io_int(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) { - __inject_vm(kvm, inti); + return __inject_vm(kvm, inti); } int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c34109a..6995a30 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 cr6, u64 schid); -void kvm_s390_reinject_io_int(struct kvm *kvm, - struct kvm_s390_interrupt_info *inti); +int kvm_s390_reinject_io_int(struct kvm *kvm, + struct kvm_s390_interrupt_info *inti); int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); /* implemented in intercept.c */ diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index be7138e..b982fbc 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -279,7 +279,10 @@ reinject_interrupt: * instruction is suppressed from the guest's view: reinject the * interrupt. */ - kvm_s390_reinject_io_int(vcpu->kvm, inti); + if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { + kfree(inti); + rc = -EFAULT; + } /* don't set the cc, a pgm irq was injected or we drop to user space */ return rc ? -EFAULT : 0; } -- cgit v1.1 From a9a846fd5c1723820c97cef56989ea14eea4b30e Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 5 Feb 2015 09:06:56 +0100 Subject: KVM: s390: Nullify instruction for certain program exceptions When certain program exceptions (e.g. DAT access exceptions) occur, the current instruction has to be nullified, i.e. the old PSW that gets written into the low-core has to point to the beginning of the instruction again, and not to the beginning of the next instruction. Thus we have to rewind the PSW before writing it into the low-core. The list of nullifying exceptions can be found in the POP, chapter 6, figure 6-1 ("Interruption Action"). Signed-off-by: Thomas Huth Reviewed-by: Jens Freimann Reviewed-by: David Hildenbrand Acked-by: Christian Borntraeger Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index e7a46e8..98a3131 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -484,7 +484,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_pgm_info pgm_info; - int rc = 0; + int rc = 0, nullifying = false; u16 ilc = get_ilc(vcpu); spin_lock(&li->lock); @@ -509,6 +509,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) case PGM_LX_TRANSLATION: case PGM_PRIMARY_AUTHORITY: case PGM_SECONDARY_AUTHORITY: + nullifying = true; + /* fall through */ case PGM_SPACE_SWITCH: rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, (u64 *)__LC_TRANS_EXC_CODE); @@ -521,6 +523,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) case PGM_EXTENDED_AUTHORITY: rc = put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); + nullifying = true; break; case PGM_ASCE_TYPE: case PGM_PAGE_TRANSLATION: @@ -534,6 +537,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) (u8 *)__LC_EXC_ACCESS_ID); rc |= put_guest_lc(vcpu, pgm_info.op_access_id, (u8 *)__LC_OP_ACCESS_ID); + nullifying = true; break; case PGM_MONITOR: rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, @@ -551,6 +555,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); break; + case PGM_STACK_FULL: + case PGM_STACK_EMPTY: + case PGM_STACK_SPECIFICATION: + case PGM_STACK_TYPE: + case PGM_STACK_OPERATION: + case PGM_TRACE_TABEL: + case PGM_CRYPTO_OPERATION: + nullifying = true; + break; } if (pgm_info.code & PGM_PER) { @@ -564,6 +577,9 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) (u8 *) __LC_PER_ACCESS_ID); } + if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST) + kvm_s390_rewind_psw(vcpu, ilc); + rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); rc |= put_guest_lc(vcpu, pgm_info.code, (u16 *)__LC_PGM_INT_CODE); -- cgit v1.1 From 492d8642eaefbd47f6fb0e8265f058c02720e5c8 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Tue, 10 Feb 2015 16:11:01 +0100 Subject: KVM: s390: Forward PSW to next instruction for addressing exceptions When the SIE exited by a DAT access exceptions which we can not resolve, the guest tried to access a page which is out of bounds and can not be paged-in. In this case we have to signal the bad access by injecting an address exception. However, address exceptions are either suppressing or terminating, i.e. the PSW has to point to the next instruction when the exception is delivered. Since the originating DAT access exception is nullifying, the PSW still points to the offending instruction instead, so we've got to forward the PSW to the next instruction. Having fixed this issue, we can now also enable the TPROT interpretation facility again which had been disabled because of this problem. Signed-off-by: Thomas Huth Reviewed-by: Christian Borntraeger Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f6579cf..7ac40aa 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1148,8 +1148,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->eca |= 1; if (sclp_has_sigpif()) vcpu->arch.sie_block->eca |= 0x10000000U; - vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | - ICTL_TPROT; + vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; if (kvm_s390_cmma_enabled(vcpu->kvm)) { rc = kvm_s390_vcpu_setup_cmma(vcpu); @@ -1726,6 +1725,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) return 0; } +static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) +{ + psw_t *psw = &vcpu->arch.sie_block->gpsw; + u8 opcode; + int rc; + + VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); + trace_kvm_s390_sie_fault(vcpu); + + /* + * We want to inject an addressing exception, which is defined as a + * suppressing or terminating exception. However, since we came here + * by a DAT access exception, the PSW still points to the faulting + * instruction since DAT exceptions are nullifying. So we've got + * to look up the current opcode to get the length of the instruction + * to be able to forward the PSW. + */ + rc = read_guest(vcpu, psw->addr, &opcode, 1); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); + psw->addr = __rewind_psw(*psw, -insn_length(opcode)); + + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); +} + static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) { int rc = -1; @@ -1757,11 +1781,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) } } - if (rc == -1) { - VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); - trace_kvm_s390_sie_fault(vcpu); - rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - } + if (rc == -1) + rc = vcpu_post_run_fault_in_sie(vcpu); memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); -- cgit v1.1 From 33b412acd32d403a8de9511f236f9b4f31551868 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 11 Feb 2015 10:38:46 +0100 Subject: KVM: s390: Use insn_length() to calculate length of instruction The common s390 function insn_length() results in slightly smaller (and thus hopefully faster) code than the calculation of the instruction length via a lookup-table. So let's use that function in the interrupt delivery code, too. Signed-off-by: Thomas Huth Reviewed-by: Jens Freimann Reviewed-by: David Hildenbrand Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 98a3131..9561e1d 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1,7 +1,7 @@ /* * handling kvm guest interrupts * - * Copyright IBM Corp. 2008,2014 + * Copyright IBM Corp. 2008, 2015 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include "kvm-s390.h" @@ -265,8 +266,6 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, static u16 get_ilc(struct kvm_vcpu *vcpu) { - const unsigned short table[] = { 2, 4, 4, 6 }; - switch (vcpu->arch.sie_block->icptcode) { case ICPT_INST: case ICPT_INSTPROGI: @@ -274,7 +273,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu) case ICPT_PARTEXEC: case ICPT_IOINST: /* last instruction only stored for these icptcodes */ - return table[vcpu->arch.sie_block->ipa >> 14]; + return insn_length(vcpu->arch.sie_block->ipa >> 8); case ICPT_PROGI: return vcpu->arch.sie_block->pgmilc; default: -- cgit v1.1 From 91520f1af8a01d349d19911238fc3dbed3fa58d2 Mon Sep 17 00:00:00 2001 From: Michael Mueller Date: Fri, 27 Feb 2015 14:32:11 +0100 Subject: KVM: s390: perform vcpu model setup in a function The function kvm_s390_vcpu_setup_model() now performs all cpu model realated setup tasks for a vcpu. Besides cpuid and ibc initialization, facility list assignment takes place during the setup step as well. The model setup has been pulled to the begin of vcpu setup to allow kvm facility tests. There is no need to protect the cpu model setup with a lock since the attributes can't be changed anymore as soon the first vcpu is online. Signed-off-by: Michael Mueller Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 7ac40aa..f3517e7 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1130,6 +1130,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) return 0; } +static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; + + vcpu->arch.cpu_id = model->cpu_id; + vcpu->arch.sie_block->ibc = model->ibc; + vcpu->arch.sie_block->fac = (int) (long) model->fac->list; +} + int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int rc = 0; @@ -1138,6 +1147,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) CPUSTAT_SM | CPUSTAT_STOPPED | CPUSTAT_GED); + kvm_s390_vcpu_setup_model(vcpu); + vcpu->arch.sie_block->ecb = 6; if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) vcpu->arch.sie_block->ecb |= 0x10; @@ -1158,11 +1169,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; - mutex_lock(&vcpu->kvm->lock); - vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; - vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; - mutex_unlock(&vcpu->kvm->lock); - kvm_s390_vcpu_crypto_setup(vcpu); return rc; @@ -1205,7 +1211,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); } - vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; spin_lock_init(&vcpu->arch.local_int.lock); vcpu->arch.local_int.float_int = &kvm->arch.float_int; -- cgit v1.1 From 16b0fc13d60293ce073c8f9baa53469744b4d74a Mon Sep 17 00:00:00 2001 From: Yannick Guerrini Date: Thu, 26 Feb 2015 23:16:44 +0100 Subject: KVM: s390: Fix trivial typo in comments Change 'architecuture' to 'architecture' Signed-off-by: Yannick Guerrini Message-Id: <1424989004-14412-1-git-send-email-yguerrini@tomshardware.fr> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 267523c..633fe9b 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -333,7 +333,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) * @write: indicates if access is a write access * * Translate a guest virtual address into a guest absolute address by means - * of dynamic address translation as specified by the architecuture. + * of dynamic address translation as specified by the architecture. * If the resulting absolute address is not available in the configuration * an addressing exception is indicated and @gpa will not be changed. * -- cgit v1.1 From 16a0c4c3aa68423b306fb82f6b9a2794e76f6fc0 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 21 Nov 2014 15:39:06 +0100 Subject: KVM: s390: fix instruction interception trace point trace-cmd fails to parse the instruction interception trace point: "Error: expected type 5 but read 4 failed to read event print fmt for kvm_s390_intercept_instruction" The result is an unformatted string in the output, with a warning: "kvm_s390_intercept_instruction: [FAILED TO PARSE]..." So let's add parentheses around the instruction parser macro to fix the format parsing. Acked-by: Christian Borntraeger Acked-by: Cornelia Huck Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/sie.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h index d4096fd..ee69c08 100644 --- a/arch/s390/include/uapi/asm/sie.h +++ b/arch/s390/include/uapi/asm/sie.h @@ -230,7 +230,7 @@ * and returns a key, which can be used to find a mnemonic name * of the instruction in the icpt_insn_codes table. */ -#define icpt_insn_decoder(insn) \ +#define icpt_insn_decoder(insn) ( \ INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \ INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \ INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \ @@ -239,6 +239,6 @@ INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \ INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \ INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \ - INSN_DECODE(insn) + INSN_DECODE(insn)) #endif /* _UAPI_ASM_S390_SIE_H */ -- cgit v1.1 From 1f289a8429022f112be9817a81ff07308eb78a9c Mon Sep 17 00:00:00 2001 From: Alexander Yarygin Date: Tue, 3 Mar 2015 19:05:43 +0300 Subject: KVM: s390: Use the read_guest_abs() in guest debug functions The guest debug functions work on absolute addresses and should use the read_guest_abs() function rather than general read_guest() that works with logical addresses. Cc: David Hildenbrand Signed-off-by: Alexander Yarygin Reviewed-by: David Hildenbrand Reviewed-by: Thomas Huth Signed-off-by: Christian Borntraeger --- arch/s390/kvm/guestdbg.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index 3e8d409..e97b345 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu, if (!wp_info->old_data) return -ENOMEM; /* try to backup the original value */ - ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data, - wp_info->len); + ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data, + wp_info->len); if (ret) { kfree(wp_info->old_data); wp_info->old_data = NULL; @@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) continue; /* refetch the wp data and compare it to the old value */ - if (!read_guest(vcpu, wp_info->phys_addr, temp, - wp_info->len)) { + if (!read_guest_abs(vcpu, wp_info->phys_addr, temp, + wp_info->len)) { if (memcmp(temp, wp_info->old_data, wp_info->len)) { kfree(temp); return wp_info; -- cgit v1.1 From 68c557501b008515cb86c9a36c75f4e82e14a819 Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Mon, 9 Jun 2014 10:57:26 -0400 Subject: KVM: s390: Allocate and save/restore vector registers Define and allocate space for both the host and guest views of the vector registers for a given vcpu. The 32 vector registers occupy 128 bits each (512 bytes total), but architecturally are paired with 512 additional bytes of reserved space for future expansion. The kvm_sync_regs structs containing the registers are union'ed with 1024 bytes of padding in the common kvm_run struct. The addition of 1024 bytes of new register information clearly exceeds the existing union, so an expansion of that padding is required. When changing environments, we need to appropriately save and restore the vector registers viewed by both the host and guest, into and out of the sync_regs space. The floating point registers overlay the upper half of vector registers 0-15, so there's a bit of data duplication here that needs to be carefully avoided. Signed-off-by: Eric Farman Reviewed-by: Thomas Huth Acked-by: Christian Borntraeger Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 10 +++++++++- arch/s390/include/uapi/asm/kvm.h | 4 ++++ arch/s390/kvm/kvm-s390.c | 39 +++++++++++++++++++++++++++++++++------ 3 files changed, 46 insertions(+), 7 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index f407bbf..3449a38 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -183,11 +183,17 @@ struct kvm_s390_itdb { __u8 data[256]; } __packed; +struct kvm_s390_vregs { + __vector128 vrs[32]; + __u8 reserved200[512]; /* for future vector expansion */ +} __packed; + struct sie_page { struct kvm_s390_sie_block sie_block; __u8 reserved200[1024]; /* 0x0200 */ struct kvm_s390_itdb itdb; /* 0x0600 */ - __u8 reserved700[2304]; /* 0x0700 */ + __u8 reserved700[1280]; /* 0x0700 */ + struct kvm_s390_vregs vregs; /* 0x0c00 */ } __packed; struct kvm_vcpu_stat { @@ -465,6 +471,7 @@ struct kvm_vcpu_arch { s390_fp_regs host_fpregs; unsigned int host_acrs[NUM_ACRS]; s390_fp_regs guest_fpregs; + struct kvm_s390_vregs *host_vregs; struct kvm_s390_local_interrupt local_int; struct hrtimer ckc_timer; struct kvm_s390_pgm_info pgm; @@ -551,6 +558,7 @@ struct kvm_arch{ int css_support; int use_irqchip; int use_cmma; + int use_vectors; int user_cpu_state_ctrl; int user_sigp; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 9c77e60..ef1a5fc 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -150,6 +150,7 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_CRS (1UL << 3) #define KVM_SYNC_ARCH0 (1UL << 4) #define KVM_SYNC_PFAULT (1UL << 5) +#define KVM_SYNC_VRS (1UL << 6) /* definition of registers in kvm_run */ struct kvm_sync_regs { __u64 prefix; /* prefix register */ @@ -164,6 +165,9 @@ struct kvm_sync_regs { __u64 pft; /* pfault token [PFAULT] */ __u64 pfs; /* pfault select [PFAULT] */ __u64 pfc; /* pfault compare [PFAULT] */ + __u64 vrs[32][2]; /* vector registers */ + __u8 reserved[512]; /* for future vector expansion */ + __u32 fpc; /* only valid with vector registers */ }; #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f3517e7..a8fe3ab 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -185,6 +185,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_COW: r = MACHINE_HAS_ESOP; break; + case KVM_CAP_S390_VECTOR_REGISTERS: + r = MACHINE_HAS_VX; + break; default: r = 0; } @@ -265,6 +268,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) kvm->arch.user_sigp = 1; r = 0; break; + case KVM_CAP_S390_VECTOR_REGISTERS: + kvm->arch.use_vectors = MACHINE_HAS_VX; + r = MACHINE_HAS_VX ? 0 : -EINVAL; + break; default: r = -EINVAL; break; @@ -942,6 +949,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.css_support = 0; kvm->arch.use_irqchip = 0; + kvm->arch.use_vectors = 0; kvm->arch.epoch = 0; spin_lock_init(&kvm->arch.start_stop_lock); @@ -1035,6 +1043,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) KVM_SYNC_CRS | KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT; + if (test_kvm_facility(vcpu->kvm, 129)) + vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; if (kvm_is_ucontrol(vcpu->kvm)) return __kvm_ucontrol_vcpu_init(vcpu); @@ -1045,10 +1055,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { save_fp_ctl(&vcpu->arch.host_fpregs.fpc); - save_fp_regs(vcpu->arch.host_fpregs.fprs); + if (vcpu->kvm->arch.use_vectors) + save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); + else + save_fp_regs(vcpu->arch.host_fpregs.fprs); save_access_regs(vcpu->arch.host_acrs); - restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - restore_fp_regs(vcpu->arch.guest_fpregs.fprs); + if (vcpu->kvm->arch.use_vectors) { + restore_fp_ctl(&vcpu->run->s.regs.fpc); + restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); + } else { + restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); + restore_fp_regs(vcpu->arch.guest_fpregs.fprs); + } restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); @@ -1058,11 +1076,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); - save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - save_fp_regs(vcpu->arch.guest_fpregs.fprs); + if (vcpu->kvm->arch.use_vectors) { + save_fp_ctl(&vcpu->run->s.regs.fpc); + save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); + } else { + save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); + save_fp_regs(vcpu->arch.guest_fpregs.fprs); + } save_access_regs(vcpu->run->s.regs.acrs); restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); - restore_fp_regs(vcpu->arch.host_fpregs.fprs); + if (vcpu->kvm->arch.use_vectors) + restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); + else + restore_fp_regs(vcpu->arch.host_fpregs.fprs); restore_access_regs(vcpu->arch.host_acrs); } @@ -1196,6 +1222,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block = &sie_page->sie_block; vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; + vcpu->arch.host_vregs = &sie_page->vregs; vcpu->arch.sie_block->icpua = id; if (!kvm_is_ucontrol(kvm)) { -- cgit v1.1 From 403c8648cb191ef88555bfa72deaa969c0367f41 Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Mon, 2 Feb 2015 15:01:06 -0500 Subject: KVM: s390: Vector exceptions A new exception type for vector instructions is introduced with the new processor, but is handled exactly like a Data Exception which is already handled by the system. Signed-off-by: Eric Farman Reviewed-by: David Hildenbrand Acked-by: Christian Borntraeger Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 1 + arch/s390/kvm/intercept.c | 1 + arch/s390/kvm/interrupt.c | 1 + 3 files changed, 3 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3449a38..fb1fd39 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -276,6 +276,7 @@ struct kvm_vcpu_stat { #define PGM_SPECIAL_OPERATION 0x13 #define PGM_OPERAND 0x15 #define PGM_TRACE_TABEL 0x16 +#define PGM_VECTOR_PROCESSING 0x1b #define PGM_SPACE_SWITCH 0x1c #define PGM_HFP_SQUARE_ROOT 0x1d #define PGM_PC_TRANSLATION_SPEC 0x1f diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index bebd215..08ae10a 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu, pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; pgm_info->mon_code = vcpu->arch.sie_block->tecmc; break; + case PGM_VECTOR_PROCESSING: case PGM_DATA: pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; break; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 9561e1d..036d375 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -544,6 +544,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) rc |= put_guest_lc(vcpu, pgm_info.mon_code, (u64 *)__LC_MON_CODE); break; + case PGM_VECTOR_PROCESSING: case PGM_DATA: rc = put_guest_lc(vcpu, pgm_info.data_exc_code, (u32 *)__LC_DATA_EXC_CODE); -- cgit v1.1 From cd7b4b61063eb55ab7a5f96523e028c9e0914694 Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Thu, 12 Feb 2015 09:06:34 -0500 Subject: KVM: s390: Add new SIGP order to kernel counters The new SIGP order Store Additional Status at Address is totally handled by user space, but we should still record the occurrence of this order in the kernel code. Signed-off-by: Eric Farman Reviewed-by: Thomas Huth Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 1 + arch/s390/kvm/kvm-s390.c | 1 + arch/s390/kvm/sigp.c | 3 +++ 3 files changed, 5 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index fb1fd39..3fe2597 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -244,6 +244,7 @@ struct kvm_vcpu_stat { u32 instruction_sigp_stop; u32 instruction_sigp_stop_store_status; u32 instruction_sigp_store_status; + u32 instruction_sigp_store_adtl_status; u32 instruction_sigp_arch; u32 instruction_sigp_prefix; u32 instruction_sigp_restart; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index a8fe3ab..c0ae03a 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -87,6 +87,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, + { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) }, { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 23b1e86..755a733 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) case SIGP_STORE_STATUS_AT_ADDRESS: vcpu->stat.instruction_sigp_store_status++; break; + case SIGP_STORE_ADDITIONAL_STATUS: + vcpu->stat.instruction_sigp_store_adtl_status++; + break; case SIGP_SET_PREFIX: vcpu->stat.instruction_sigp_prefix++; break; -- cgit v1.1 From bc17de7c966504b287a1dceb76a523d8b7816731 Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Mon, 14 Apr 2014 16:01:09 -0400 Subject: KVM: s390: Machine Check Store additional status in the machine check handler, in order to collect status (such as vector registers) that is not defined by store status. Signed-off-by: Eric Farman Reviewed-by: Thomas Huth Reviewed-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kernel/asm-offsets.c | 1 + arch/s390/kvm/interrupt.c | 4 ++++ arch/s390/kvm/kvm-s390.c | 29 +++++++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.h | 3 +++ 4 files changed, 37 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index e07e9160..8dc4db1 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -171,6 +171,7 @@ int main(void) #else /* CONFIG_32BIT */ DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); + DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 036d375..2afec60 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -351,6 +351,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_mchk_info mchk; + unsigned long adtl_status_addr; int rc; spin_lock(&li->lock); @@ -371,6 +372,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) mchk.cr14, mchk.mcic); rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); + rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, + &adtl_status_addr, sizeof(unsigned long)); + rc |= kvm_s390_vcpu_store_adtl_status(vcpu, adtl_status_addr); rc |= put_guest_lc(vcpu, mchk.mcic, (u64 __user *) __LC_MCCK_CODE); rc |= put_guest_lc(vcpu, mchk.failing_storage_address, diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c0ae03a..0c045cf 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2031,6 +2031,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return kvm_s390_store_status_unloaded(vcpu, addr); } +/* + * store additional status at address + */ +int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, + unsigned long gpa) +{ + /* Only bits 0-53 are used for address formation */ + if (!(gpa & ~0x3ff)) + return 0; + + return write_guest_abs(vcpu, gpa & ~0x3ff, + (void *)&vcpu->run->s.regs.vrs, 512); +} + +int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) +{ + if (!test_kvm_facility(vcpu->kvm, 129)) + return 0; + + /* + * The guest VXRS are in the host VXRs due to the lazy + * copying in vcpu load/put. Let's update our copies before we save + * it into the save area. + */ + save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); + + return kvm_s390_store_adtl_status_unloaded(vcpu, addr); +} + static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) { kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 6995a30..fda3f31 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -177,7 +177,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); +int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, + unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); +int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void s390_vcpu_block(struct kvm_vcpu *vcpu); -- cgit v1.1 From 13211ea7b47db3d8ee2ff258a9a973a6d3aa3d43 Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Wed, 30 Apr 2014 13:39:46 -0400 Subject: KVM: s390: Enable vector support for capable guest We finally have all the pieces in place, so let's include the vector facility bit in the mask of available hardware facilities for the guest to recognize. Also, enable the vector functionality in the guest control blocks, to avoid a possible vector data exception that would otherwise occur when a vector instruction is issued by the guest operating system. Signed-off-by: Eric Farman Reviewed-by: Thomas Huth Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 4 +++- arch/s390/kvm/kvm-s390.c | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3fe2597..347a333 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -172,7 +172,9 @@ struct kvm_s390_sie_block { __u32 fac; /* 0x01a0 */ __u8 reserved1a4[20]; /* 0x01a4 */ __u64 cbrlo; /* 0x01b8 */ - __u8 reserved1c0[30]; /* 0x01c0 */ + __u8 reserved1c0[8]; /* 0x01c0 */ + __u32 ecd; /* 0x01c8 */ + __u8 reserved1cc[18]; /* 0x01cc */ __u64 pp; /* 0x01de */ __u8 reserved1e6[2]; /* 0x01e6 */ __u64 itdba; /* 0x01e8 */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0c045cf..02e03c8 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -104,6 +104,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { unsigned long kvm_s390_fac_list_mask[] = { 0xff82fffbf4fc2000UL, 0x005c000000000000UL, + 0x4000000000000000UL, }; unsigned long kvm_s390_fac_list_mask_size(void) @@ -1186,6 +1187,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->eca |= 1; if (sclp_has_sigpif()) vcpu->arch.sie_block->eca |= 0x10000000U; + if (vcpu->kvm->arch.use_vectors) { + vcpu->arch.sie_block->eca |= 0x00020000; + vcpu->arch.sie_block->ecd |= 0x20000000; + } vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; if (kvm_s390_cmma_enabled(vcpu->kvm)) { -- cgit v1.1 From 1e8d242478a471b92b07e7966faa9f618df98e61 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 9 Mar 2015 21:27:12 +0100 Subject: KVM: s390: Spelling s/intance/instance/ Signed-off-by: Geert Uytterhoeven Message-Id: <1425932832-6244-1-git-send-email-geert+renesas@glider.be> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index fda3f31..83f32a1 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -125,7 +125,7 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) vcpu->arch.sie_block->gpsw.mask |= cc << 44; } -/* test availability of facility in a kvm intance */ +/* test availability of facility in a kvm instance */ static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) { return __test_facility(nr, kvm->arch.model.fac->mask) && -- cgit v1.1 From 40f5b735e867b8fd3e6090f5a184950c68d227bb Mon Sep 17 00:00:00 2001 From: Dominik Dingel Date: Thu, 12 Mar 2015 13:55:53 +0100 Subject: KVM: s390: cleanup jump lables in kvm_arch_init_vm As all cleanup functions can handle their respective NULL case there is no need to have more than one error jump label. Signed-off-by: Dominik Dingel Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 02e03c8..4075acb 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -897,7 +897,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); if (!kvm->arch.dbf) - goto out_nodbf; + goto out_err; /* * The architectural maximum amount of facilities is 16 kbit. To store @@ -909,7 +909,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.model.fac = (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!kvm->arch.model.fac) - goto out_nofac; + goto out_err; /* Populate the facility mask initially. */ memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, @@ -929,7 +929,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; if (kvm_s390_crypto_init(kvm) < 0) - goto out_crypto; + goto out_err; spin_lock_init(&kvm->arch.float_int.lock); INIT_LIST_HEAD(&kvm->arch.float_int.list); @@ -944,7 +944,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) } else { kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); if (!kvm->arch.gmap) - goto out_nogmap; + goto out_err; kvm->arch.gmap->private = kvm; kvm->arch.gmap->pfault_enabled = 0; } @@ -957,15 +957,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) spin_lock_init(&kvm->arch.start_stop_lock); return 0; -out_nogmap: +out_err: kfree(kvm->arch.crypto.crycb); -out_crypto: free_page((unsigned long)kvm->arch.model.fac); -out_nofac: debug_unregister(kvm->arch.dbf); -out_nodbf: free_page((unsigned long)(kvm->arch.sca)); -out_err: return rc; } -- cgit v1.1 From dd9e5b7bdba3250c075a212ff632d31edfa91ae7 Mon Sep 17 00:00:00 2001 From: Alexander Yarygin Date: Tue, 3 Mar 2015 14:26:14 +0300 Subject: KVM: s390: Fix low-address protection for real addresses The kvm_s390_check_low_addr_protection() function is used only with real addresses. According to the POP (the "Low-Address Protection" paragraph in chapter 3), if the effective address is real or absolute, the low-address protection procedure should raise a PROTECTION exception only when the low-address protection is enabled in the control register 0 and the address is low. This patch removes ASCE checks from the function and renames it to better reflect its behavior. Cc: Thomas Huth Signed-off-by: Alexander Yarygin Reviewed-by: Thomas Huth Reviewed-by: David Hildenbrand Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 11 ++++++----- arch/s390/kvm/gaccess.h | 2 +- arch/s390/kvm/priv.c | 4 ++-- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 633fe9b..c230904 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -697,28 +697,29 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, } /** - * kvm_s390_check_low_addr_protection - check for low-address protection - * @ga: Guest address + * kvm_s390_check_low_addr_prot_real - check for low-address protection + * @gra: Guest real address * * Checks whether an address is subject to low-address protection and set * up vcpu->arch.pgm accordingly if necessary. * * Return: 0 if no protection exception, or PGM_PROTECTION if protected. */ -int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga) +int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; psw_t *psw = &vcpu->arch.sie_block->gpsw; struct trans_exc_code_bits *tec_bits; + union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; - if (!is_low_address(ga) || !low_address_protection_enabled(vcpu)) + if (!ctlreg0.lap || !is_low_address(gra)) return 0; memset(pgm, 0, sizeof(*pgm)); tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; tec_bits->fsi = FSI_STORE; tec_bits->as = psw_bits(*psw).as; - tec_bits->addr = ga >> PAGE_SHIFT; + tec_bits->addr = gra >> PAGE_SHIFT; pgm->code = PGM_PROTECTION; return pgm->code; diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 0149cf1..20de77e 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -330,6 +330,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, void ipte_lock(struct kvm_vcpu *vcpu); void ipte_unlock(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu); -int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); +int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); #endif /* __KVM_S390_GACCESS_H */ diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index b982fbc..5f26425 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -207,7 +207,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu) kvm_s390_get_regs_rre(vcpu, NULL, ®2); addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; addr = kvm_s390_logical_to_effective(vcpu, addr); - if (kvm_s390_check_low_addr_protection(vcpu, addr)) + if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); addr = kvm_s390_real_to_abs(vcpu, addr); @@ -680,7 +680,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) } if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { - if (kvm_s390_check_low_addr_protection(vcpu, start)) + if (kvm_s390_check_low_addr_prot_real(vcpu, start)) return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); } -- cgit v1.1 From 8ae04b8f500b9f46652c63431bf658223d875597 Mon Sep 17 00:00:00 2001 From: Alexander Yarygin Date: Mon, 19 Jan 2015 13:24:51 +0300 Subject: KVM: s390: Guest's memory access functions get access registers In access register mode, the write_guest() read_guest() and other functions will invoke the access register translation, which requires an ar, designated by one of the instruction fields. Signed-off-by: Alexander Yarygin Reviewed-by: Thomas Huth Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/diag.c | 4 +-- arch/s390/kvm/gaccess.c | 4 +-- arch/s390/kvm/gaccess.h | 14 +++++---- arch/s390/kvm/intercept.c | 4 +-- arch/s390/kvm/kvm-s390.c | 2 +- arch/s390/kvm/kvm-s390.h | 25 +++++++++++++--- arch/s390/kvm/priv.c | 72 ++++++++++++++++++++++++++++------------------- arch/s390/kvm/sigp.c | 4 +-- 8 files changed, 81 insertions(+), 48 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 9254aff..89140dd 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) if (vcpu->run->s.regs.gprs[rx] & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); + rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) @@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) { - int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; + int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index c230904..494131e 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -578,7 +578,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, return 0; } -int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, +int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len, int write) { psw_t *psw = &vcpu->arch.sie_block->gpsw; @@ -652,7 +652,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, * Note: The IPTE lock is not taken during this function, so the caller * has to take care of this. */ -int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, +int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, unsigned long *gpa, int write) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 20de77e..7c2866b 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -156,9 +156,9 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, } int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, - unsigned long *gpa, int write); + ar_t ar, unsigned long *gpa, int write); -int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, +int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len, int write); int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, @@ -168,6 +168,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, * write_guest - copy data from kernel space to guest space * @vcpu: virtual cpu * @ga: guest address + * @ar: access register * @data: source address in kernel space * @len: number of bytes to copy * @@ -210,16 +211,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, * if data has been changed in guest space in case of an exception. */ static inline __must_check -int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, +int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len) { - return access_guest(vcpu, ga, data, len, 1); + return access_guest(vcpu, ga, ar, data, len, 1); } /** * read_guest - copy data from guest space to kernel space * @vcpu: virtual cpu * @ga: guest address + * @ar: access register * @data: destination address in kernel space * @len: number of bytes to copy * @@ -229,10 +231,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, * data will be copied from guest space to kernel space. */ static inline __must_check -int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, +int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len) { - return access_guest(vcpu, ga, data, len, 0); + return access_guest(vcpu, ga, ar, data, len, 0); } /** diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 08ae10a..9e3779e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -320,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) /* Make sure that the source is paged-in */ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], - &srcaddr, 0); + reg2, &srcaddr, 0); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); @@ -329,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) /* Make sure that the destination is paged-in */ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], - &dstaddr, 1); + reg1, &dstaddr, 1); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 4075acb..610e90a 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1776,7 +1776,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) * to look up the current opcode to get the length of the instruction * to be able to forward the PSW. */ - rc = read_guest(vcpu, psw->addr, &opcode, 1); + rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); psw->addr = __rewind_psw(*psw, -insn_length(opcode)); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 83f32a1..5d54191 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); } -static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) +typedef u8 __bitwise ar_t; + +static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar) { u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + if (ar) + *ar = base2; + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; } static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, - u64 *address1, u64 *address2) + u64 *address1, u64 *address2, + ar_t *ar_b1, ar_t *ar_b2) { u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; @@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; + + if (ar_b1) + *ar_b1 = base1; + if (ar_b2) + *ar_b2 = base2; } static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) @@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; } -static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) +static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) { u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + @@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) if (disp2 & 0x80000) disp2+=0xfff00000; + if (ar) + *ar = base2; + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; } -static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) +static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) { u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + if (ar) + *ar = base2; + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; } diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 5f26425..f4fe02e 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) struct kvm_vcpu *cpup; s64 hostclk, val; int i, rc; + ar_t ar; u64 op2; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - op2 = kvm_s390_get_base_disp_s(vcpu); + op2 = kvm_s390_get_base_disp_s(vcpu, &ar); if (op2 & 7) /* Operand must be on a doubleword boundary */ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, op2, &val, sizeof(val)); + rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) u64 operand2; u32 address; int rc; + ar_t ar; vcpu->stat.instruction_spx++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - operand2 = kvm_s390_get_base_disp_s(vcpu); + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); /* must be word boundary */ if (operand2 & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* get the value */ - rc = read_guest(vcpu, operand2, &address, sizeof(address)); + rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) u64 operand2; u32 address; int rc; + ar_t ar; vcpu->stat.instruction_stpx++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - operand2 = kvm_s390_get_base_disp_s(vcpu); + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); /* must be word boundary */ if (operand2 & 3) @@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) address = kvm_s390_get_prefix(vcpu); /* get the value */ - rc = write_guest(vcpu, operand2, &address, sizeof(address)); + rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) u16 vcpu_id = vcpu->vcpu_id; u64 ga; int rc; + ar_t ar; vcpu->stat.instruction_stap++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_s(vcpu); + ga = kvm_s390_get_base_disp_s(vcpu, &ar); if (ga & 1) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); + rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -231,8 +235,9 @@ static int handle_tpi(struct kvm_vcpu *vcpu) u32 tpi_data[3]; int rc; u64 addr; + ar_t ar; - addr = kvm_s390_get_base_disp_s(vcpu); + addr = kvm_s390_get_base_disp_s(vcpu, &ar); if (addr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -251,7 +256,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu) * provided area. */ len = sizeof(tpi_data) - 4; - rc = write_guest(vcpu, addr, &tpi_data, len); + rc = write_guest(vcpu, addr, ar, &tpi_data, len); if (rc) { rc = kvm_s390_inject_prog_cond(vcpu, rc); goto reinject_interrupt; @@ -395,15 +400,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) psw_compat_t new_psw; u64 addr; int rc; + ar_t ar; if (gpsw->mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - addr = kvm_s390_get_base_disp_s(vcpu); + addr = kvm_s390_get_base_disp_s(vcpu, &ar); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); + rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); if (!(new_psw.mask & PSW32_MASK_BASE)) @@ -421,14 +427,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) psw_t new_psw; u64 addr; int rc; + ar_t ar; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - addr = kvm_s390_get_base_disp_s(vcpu); + addr = kvm_s390_get_base_disp_s(vcpu, &ar); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); + rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); vcpu->arch.sie_block->gpsw = new_psw; @@ -442,18 +449,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu) u64 stidp_data = vcpu->arch.stidp_data; u64 operand2; int rc; + ar_t ar; vcpu->stat.instruction_stidp++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - operand2 = kvm_s390_get_base_disp_s(vcpu); + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); if (operand2 & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); + rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -496,6 +504,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) unsigned long mem = 0; u64 operand2; int rc = 0; + ar_t ar; vcpu->stat.instruction_stsi++; VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); @@ -518,7 +527,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) return 0; } - operand2 = kvm_s390_get_base_disp_s(vcpu); + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); if (operand2 & 0xfff) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -542,7 +551,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) break; } - rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); + rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); if (rc) { rc = kvm_s390_inject_prog_cond(vcpu, rc); goto out; @@ -786,13 +795,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) int reg, rc, nr_regs; u32 ctl_array[16]; u64 ga; + ar_t ar; vcpu->stat.instruction_lctl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_rs(vcpu); + ga = kvm_s390_get_base_disp_rs(vcpu, &ar); if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -801,7 +811,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); nr_regs = ((reg3 - reg1) & 0xf) + 1; - rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); + rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); reg = reg1; @@ -824,13 +834,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) int reg, rc, nr_regs; u32 ctl_array[16]; u64 ga; + ar_t ar; vcpu->stat.instruction_stctl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_rs(vcpu); + ga = kvm_s390_get_base_disp_rs(vcpu, &ar); if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -846,7 +857,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) break; reg = (reg + 1) % 16; } while (1); - rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); + rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; } @@ -857,13 +868,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) int reg, rc, nr_regs; u64 ctl_array[16]; u64 ga; + ar_t ar; vcpu->stat.instruction_lctlg++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_rsy(vcpu); + ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -872,7 +884,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); nr_regs = ((reg3 - reg1) & 0xf) + 1; - rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); + rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); reg = reg1; @@ -894,13 +906,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu) int reg, rc, nr_regs; u64 ctl_array[16]; u64 ga; + ar_t ar; vcpu->stat.instruction_stctg++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_rsy(vcpu); + ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -916,7 +929,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu) break; reg = (reg + 1) % 16; } while (1); - rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); + rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; } @@ -941,13 +954,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu) unsigned long hva, gpa; int ret = 0, cc = 0; bool writable; + ar_t ar; vcpu->stat.instruction_tprot++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); + kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); /* we only handle the Linux memory detection case: * access key == 0 @@ -956,11 +970,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) ipte_lock(vcpu); - ret = guest_translate_address(vcpu, address1, &gpa, 1); + ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); if (ret == PGM_PROTECTION) { /* Write protected? Try again with read-only... */ cc = 1; - ret = guest_translate_address(vcpu, address1, &gpa, 0); + ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); } if (ret) { if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 755a733..72e58bd 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -434,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - order_code = kvm_s390_get_base_disp_rs(vcpu); + order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); if (handle_sigp_order_in_user_space(vcpu, order_code)) return -EOPNOTSUPP; @@ -476,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) int r3 = vcpu->arch.sie_block->ipa & 0x000f; u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; struct kvm_vcpu *dest_vcpu; - u8 order_code = kvm_s390_get_base_disp_rs(vcpu); + u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); -- cgit v1.1 From 75a1812230ad7ad16e5a06b5ef2220f765b12da5 Mon Sep 17 00:00:00 2001 From: Alexander Yarygin Date: Thu, 22 Jan 2015 12:44:11 +0300 Subject: KVM: s390: Optimize paths where get_vcpu_asce() is invoked During dynamic address translation the get_vcpu_asce() function can be invoked several times. It's ok for usual modes, but will be slow if CPUs are in AR mode. Let's call the get_vcpu_asce() once and pass the result to the called functions. Signed-off-by: Alexander Yarygin Reviewed-by: Thomas Huth Reviewed-by: David Hildenbrand Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 494131e..c74462a 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -330,6 +330,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) * @vcpu: virtual cpu * @gva: guest virtual address * @gpa: points to where guest physical (absolute) address should be stored + * @asce: effective asce * @write: indicates if access is a write access * * Translate a guest virtual address into a guest absolute address by means @@ -345,7 +346,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) * by the architecture */ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, - unsigned long *gpa, int write) + unsigned long *gpa, const union asce asce, + int write) { union vaddress vaddr = {.addr = gva}; union raddress raddr = {.addr = gva}; @@ -354,12 +356,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, union ctlreg0 ctlreg0; unsigned long ptr; int edat1, edat2; - union asce asce; ctlreg0.val = vcpu->arch.sie_block->gcr[0]; edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); - asce.val = get_vcpu_asce(vcpu); if (asce.r) goto real_address; ptr = asce.origin * 4096; @@ -506,15 +506,14 @@ static inline int is_low_address(unsigned long ga) return (ga & ~0x11fful) == 0; } -static int low_address_protection_enabled(struct kvm_vcpu *vcpu) +static int low_address_protection_enabled(struct kvm_vcpu *vcpu, + const union asce asce) { union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; psw_t *psw = &vcpu->arch.sie_block->gpsw; - union asce asce; if (!ctlreg0.lap) return 0; - asce.val = get_vcpu_asce(vcpu); if (psw_bits(*psw).t && asce.p) return 0; return 1; @@ -536,7 +535,7 @@ enum { static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, unsigned long *pages, unsigned long nr_pages, - int write) + const union asce asce, int write) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; psw_t *psw = &vcpu->arch.sie_block->gpsw; @@ -547,7 +546,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; tec_bits->as = psw_bits(*psw).as; - lap_enabled = low_address_protection_enabled(vcpu); + lap_enabled = low_address_protection_enabled(vcpu, asce); while (nr_pages) { ga = kvm_s390_logical_to_effective(vcpu, ga); tec_bits->addr = ga >> PAGE_SHIFT; @@ -557,7 +556,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, } ga &= PAGE_MASK; if (psw_bits(*psw).t) { - rc = guest_translate(vcpu, ga, pages, write); + rc = guest_translate(vcpu, ga, pages, asce, write); if (rc < 0) return rc; if (rc == PGM_PROTECTION) @@ -604,7 +603,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, need_ipte_lock = psw_bits(*psw).t && !asce.r; if (need_ipte_lock) ipte_lock(vcpu); - rc = guest_page_range(vcpu, ga, pages, nr_pages, write); + rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); for (idx = 0; idx < nr_pages && !rc; idx++) { gpa = *(pages + idx) + (ga & ~PAGE_MASK); _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); @@ -671,16 +670,16 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, tec->as = psw_bits(*psw).as; tec->fsi = write ? FSI_STORE : FSI_FETCH; tec->addr = gva >> PAGE_SHIFT; - if (is_low_address(gva) && low_address_protection_enabled(vcpu)) { + asce.val = get_vcpu_asce(vcpu); + if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { if (write) { rc = pgm->code = PGM_PROTECTION; return rc; } } - asce.val = get_vcpu_asce(vcpu); if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ - rc = guest_translate(vcpu, gva, gpa, write); + rc = guest_translate(vcpu, gva, gpa, asce, write); if (rc > 0) { if (rc == PGM_PROTECTION) tec->b61 = 1; -- cgit v1.1 From 664b4973537068402954bee6e2959b858f263a6f Mon Sep 17 00:00:00 2001 From: Alexander Yarygin Date: Mon, 9 Mar 2015 14:17:25 +0300 Subject: KVM: s390: Add access register mode Access register mode is one of the modes that control dynamic address translation. In this mode the address space is specified by values of the access registers. The effective address-space-control element is obtained from the result of the access register translation. See the "Access-Register Introduction" section of the chapter 5 "Program Execution" in "Principles of Operations" for more details. Signed-off-by: Alexander Yarygin Reviewed-by: Thomas Huth Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 234 +++++++++++++++++++++++++++++++++++++++++------- arch/s390/kvm/gaccess.h | 3 +- 2 files changed, 202 insertions(+), 35 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index c74462a..ea38d71 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -10,6 +10,7 @@ #include #include "kvm-s390.h" #include "gaccess.h" +#include union asce { unsigned long val; @@ -207,6 +208,54 @@ union raddress { unsigned long pfra : 52; /* Page-Frame Real Address */ }; +union alet { + u32 val; + struct { + u32 reserved : 7; + u32 p : 1; + u32 alesn : 8; + u32 alen : 16; + }; +}; + +union ald { + u32 val; + struct { + u32 : 1; + u32 alo : 24; + u32 all : 7; + }; +}; + +struct ale { + unsigned long i : 1; /* ALEN-Invalid Bit */ + unsigned long : 5; + unsigned long fo : 1; /* Fetch-Only Bit */ + unsigned long p : 1; /* Private Bit */ + unsigned long alesn : 8; /* Access-List-Entry Sequence Number */ + unsigned long aleax : 16; /* Access-List-Entry Authorization Index */ + unsigned long : 32; + unsigned long : 1; + unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */ + unsigned long : 6; + unsigned long astesn : 32; /* ASTE Sequence Number */ +} __packed; + +struct aste { + unsigned long i : 1; /* ASX-Invalid Bit */ + unsigned long ato : 29; /* Authority-Table Origin */ + unsigned long : 1; + unsigned long b : 1; /* Base-Space Bit */ + unsigned long ax : 16; /* Authorization Index */ + unsigned long atl : 12; /* Authority-Table Length */ + unsigned long : 2; + unsigned long ca : 1; /* Controlled-ASN Bit */ + unsigned long ra : 1; /* Reusable-ASN Bit */ + unsigned long asce : 64; /* Address-Space-Control Element */ + unsigned long ald : 32; + unsigned long astesn : 32; + /* .. more fields there */ +} __packed; int ipte_lock_held(struct kvm_vcpu *vcpu) { @@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu) ipte_unlock_simple(vcpu); } -static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu) +static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, + int write) +{ + union alet alet; + struct ale ale; + struct aste aste; + unsigned long ald_addr, authority_table_addr; + union ald ald; + int eax, rc; + u8 authority_table; + + if (ar >= NUM_ACRS) + return -EINVAL; + + save_access_regs(vcpu->run->s.regs.acrs); + alet.val = vcpu->run->s.regs.acrs[ar]; + + if (ar == 0 || alet.val == 0) { + asce->val = vcpu->arch.sie_block->gcr[1]; + return 0; + } else if (alet.val == 1) { + asce->val = vcpu->arch.sie_block->gcr[7]; + return 0; + } + + if (alet.reserved) + return PGM_ALET_SPECIFICATION; + + if (alet.p) + ald_addr = vcpu->arch.sie_block->gcr[5]; + else + ald_addr = vcpu->arch.sie_block->gcr[2]; + ald_addr &= 0x7fffffc0; + + rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald)); + if (rc) + return rc; + + if (alet.alen / 8 > ald.all) + return PGM_ALEN_TRANSLATION; + + if (0x7fffffff - ald.alo * 128 < alet.alen * 16) + return PGM_ADDRESSING; + + rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale, + sizeof(struct ale)); + if (rc) + return rc; + + if (ale.i == 1) + return PGM_ALEN_TRANSLATION; + if (ale.alesn != alet.alesn) + return PGM_ALE_SEQUENCE; + + rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste)); + if (rc) + return rc; + + if (aste.i) + return PGM_ASTE_VALIDITY; + if (aste.astesn != ale.astesn) + return PGM_ASTE_SEQUENCE; + + if (ale.p == 1) { + eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; + if (ale.aleax != eax) { + if (eax / 16 > aste.atl) + return PGM_EXTENDED_AUTHORITY; + + authority_table_addr = aste.ato * 4 + eax / 4; + + rc = read_guest_real(vcpu, authority_table_addr, + &authority_table, + sizeof(u8)); + if (rc) + return rc; + + if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0) + return PGM_EXTENDED_AUTHORITY; + } + } + + if (ale.fo == 1 && write) + return PGM_PROTECTION; + + asce->val = aste.asce; + return 0; +} + +struct trans_exc_code_bits { + unsigned long addr : 52; /* Translation-exception Address */ + unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ + unsigned long : 6; + unsigned long b60 : 1; + unsigned long b61 : 1; + unsigned long as : 2; /* ASCE Identifier */ +}; + +enum { + FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ + FSI_STORE = 1, /* Exception was due to store operation */ + FSI_FETCH = 2 /* Exception was due to fetch operation */ +}; + +static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, + ar_t ar, int write) { + int rc; + psw_t *psw = &vcpu->arch.sie_block->gpsw; + struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; + struct trans_exc_code_bits *tec_bits; + + memset(pgm, 0, sizeof(*pgm)); + tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; + tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; + tec_bits->as = psw_bits(*psw).as; + + if (!psw_bits(*psw).t) { + asce->val = 0; + asce->r = 1; + return 0; + } + switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { case PSW_AS_PRIMARY: - return vcpu->arch.sie_block->gcr[1]; + asce->val = vcpu->arch.sie_block->gcr[1]; + return 0; case PSW_AS_SECONDARY: - return vcpu->arch.sie_block->gcr[7]; + asce->val = vcpu->arch.sie_block->gcr[7]; + return 0; case PSW_AS_HOME: - return vcpu->arch.sie_block->gcr[13]; + asce->val = vcpu->arch.sie_block->gcr[13]; + return 0; + case PSW_AS_ACCREG: + rc = ar_translation(vcpu, asce, ar, write); + switch (rc) { + case PGM_ALEN_TRANSLATION: + case PGM_ALE_SEQUENCE: + case PGM_ASTE_VALIDITY: + case PGM_ASTE_SEQUENCE: + case PGM_EXTENDED_AUTHORITY: + vcpu->arch.pgm.exc_access_id = ar; + break; + case PGM_PROTECTION: + tec_bits->b60 = 1; + tec_bits->b61 = 1; + break; + } + if (rc > 0) + pgm->code = rc; + return rc; } return 0; } @@ -519,20 +710,6 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu, return 1; } -struct trans_exc_code_bits { - unsigned long addr : 52; /* Translation-exception Address */ - unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ - unsigned long : 7; - unsigned long b61 : 1; - unsigned long as : 2; /* ASCE Identifier */ -}; - -enum { - FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ - FSI_STORE = 1, /* Exception was due to store operation */ - FSI_FETCH = 2 /* Exception was due to fetch operation */ -}; - static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, unsigned long *pages, unsigned long nr_pages, const union asce asce, int write) @@ -542,10 +719,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, struct trans_exc_code_bits *tec_bits; int lap_enabled, rc; - memset(pgm, 0, sizeof(*pgm)); tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; - tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; - tec_bits->as = psw_bits(*psw).as; lap_enabled = low_address_protection_enabled(vcpu, asce); while (nr_pages) { ga = kvm_s390_logical_to_effective(vcpu, ga); @@ -590,16 +764,15 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, if (!len) return 0; - /* Access register mode is not supported yet. */ - if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) - return -EOPNOTSUPP; + rc = get_vcpu_asce(vcpu, &asce, ar, write); + if (rc) + return rc; nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; pages = pages_array; if (nr_pages > ARRAY_SIZE(pages_array)) pages = vmalloc(nr_pages * sizeof(unsigned long)); if (!pages) return -ENOMEM; - asce.val = get_vcpu_asce(vcpu); need_ipte_lock = psw_bits(*psw).t && !asce.r; if (need_ipte_lock) ipte_lock(vcpu); @@ -660,17 +833,12 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, union asce asce; int rc; - /* Access register mode is not supported yet. */ - if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) - return -EOPNOTSUPP; - gva = kvm_s390_logical_to_effective(vcpu, gva); - memset(pgm, 0, sizeof(*pgm)); tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; - tec->as = psw_bits(*psw).as; - tec->fsi = write ? FSI_STORE : FSI_FETCH; + rc = get_vcpu_asce(vcpu, &asce, ar, write); tec->addr = gva >> PAGE_SHIFT; - asce.val = get_vcpu_asce(vcpu); + if (rc) + return rc; if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { if (write) { rc = pgm->code = PGM_PROTECTION; diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 7c2866b..835e557 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -177,8 +177,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, * If DAT is off data will be copied to guest real or absolute memory. * If DAT is on data will be copied to the address space as specified by * the address space bits of the PSW: - * Primary, secondory or home space (access register mode is currently not - * implemented). + * Primary, secondary, home space or access register mode. * The addressing mode of the PSW is also inspected, so that address wrap * around is taken into account for 24-, 31- and 64-bit addressing mode, * if the to be copied data crosses page boundaries in guest address space. -- cgit v1.1 From 41408c28f283b49202ae374b1c42bc8e9b33a048 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Fri, 6 Feb 2015 15:01:21 +0100 Subject: KVM: s390: Add MEMOP ioctls for reading/writing guest memory On s390, we've got to make sure to hold the IPTE lock while accessing logical memory. So let's add an ioctl for reading and writing logical memory to provide this feature for userspace, too. The maximum transfer size of this call is limited to 64kB to prevent that the guest can trigger huge copy_from/to_user transfers. QEMU currently only requests up to one or two pages so far, so 16*4kB seems to be a reasonable limit here. Signed-off-by: Thomas Huth Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 22 ++++++++++++++ arch/s390/kvm/gaccess.h | 2 ++ arch/s390/kvm/kvm-s390.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index ea38d71..a7559f7 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -864,6 +864,28 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, } /** + * check_gva_range - test a range of guest virtual addresses for accessibility + */ +int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, + unsigned long length, int is_write) +{ + unsigned long gpa; + unsigned long currlen; + int rc = 0; + + ipte_lock(vcpu); + while (length > 0 && !rc) { + currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); + rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); + gva += currlen; + length -= currlen; + } + ipte_unlock(vcpu); + + return rc; +} + +/** * kvm_s390_check_low_addr_prot_real - check for low-address protection * @gra: Guest real address * diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 835e557..ef03726 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -157,6 +157,8 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, unsigned long *gpa, int write); +int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, + unsigned long length, int is_write); int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len, int write); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 610e90a..b7ecef9 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,8 @@ #include "trace.h" #include "trace-s390.h" +#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */ + #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU struct kvm_stats_debugfs_item debugfs_entries[] = { @@ -177,6 +180,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_USER_SIGP: r = 1; break; + case KVM_CAP_S390_MEM_OP: + r = MEM_OP_MAX_SIZE; + break; case KVM_CAP_NR_VCPUS: case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; @@ -2185,6 +2191,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, return r; } +static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, + struct kvm_s390_mem_op *mop) +{ + void __user *uaddr = (void __user *)mop->buf; + void *tmpbuf = NULL; + int r, srcu_idx; + const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION + | KVM_S390_MEMOP_F_CHECK_ONLY; + + if (mop->flags & ~supported_flags) + return -EINVAL; + + if (mop->size > MEM_OP_MAX_SIZE) + return -E2BIG; + + if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { + tmpbuf = vmalloc(mop->size); + if (!tmpbuf) + return -ENOMEM; + } + + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + + switch (mop->op) { + case KVM_S390_MEMOP_LOGICAL_READ: + if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { + r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); + break; + } + r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); + if (r == 0) { + if (copy_to_user(uaddr, tmpbuf, mop->size)) + r = -EFAULT; + } + break; + case KVM_S390_MEMOP_LOGICAL_WRITE: + if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { + r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); + break; + } + if (copy_from_user(tmpbuf, uaddr, mop->size)) { + r = -EFAULT; + break; + } + r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); + break; + default: + r = -EINVAL; + } + + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); + + if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) + kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); + + vfree(tmpbuf); + return r; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -2284,6 +2349,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); break; } + case KVM_S390_MEM_OP: { + struct kvm_s390_mem_op mem_op; + + if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0) + r = kvm_s390_guest_mem_op(vcpu, &mem_op); + else + r = -EFAULT; + break; + } default: r = -ENOTTY; } -- cgit v1.1 From e44fc8c9dab215ac0e398622a05574cffd5f5184 Mon Sep 17 00:00:00 2001 From: Ekaterina Tumanova Date: Fri, 30 Jan 2015 16:55:56 +0100 Subject: KVM: s390: introduce post handlers for STSI The Store System Information (STSI) instruction currently collects all information it relays to the caller in the kernel. Some information, however, is only available in user space. An example of this is the guest name: The kernel always sets "KVMGuest", but user space knows the actual guest name. This patch introduces a new exit, KVM_EXIT_S390_STSI, guarded by a capability that can be enabled by user space if it wants to be able to insert such data. User space will be provided with the target buffer and the requested STSI function code. Reviewed-by: Eric Farman Reviewed-by: Christian Borntraeger Signed-off-by: Ekaterina Tumanova Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 1 + arch/s390/kvm/kvm-s390.c | 5 +++++ arch/s390/kvm/priv.c | 17 ++++++++++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 347a333..2356a8c 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -565,6 +565,7 @@ struct kvm_arch{ int use_vectors; int user_cpu_state_ctrl; int user_sigp; + int user_stsi; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; wait_queue_head_t ipte_wq; int ipte_lock_count; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b7ecef9..fdfa106 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -178,6 +178,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_MP_STATE: case KVM_CAP_S390_USER_SIGP: + case KVM_CAP_S390_USER_STSI: r = 1; break; case KVM_CAP_S390_MEM_OP: @@ -280,6 +281,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) kvm->arch.use_vectors = MACHINE_HAS_VX; r = MACHINE_HAS_VX ? 0 : -EINVAL; break; + case KVM_CAP_S390_USER_STSI: + kvm->arch.user_stsi = 1; + r = 0; + break; default: r = -EINVAL; break; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index f4fe02e..5e4658d 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -496,6 +496,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) ASCEBC(mem->vm[0].cpi, 16); } +static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, + u8 fc, u8 sel1, u16 sel2) +{ + vcpu->run->exit_reason = KVM_EXIT_S390_STSI; + vcpu->run->s390_stsi.addr = addr; + vcpu->run->s390_stsi.ar = ar; + vcpu->run->s390_stsi.fc = fc; + vcpu->run->s390_stsi.sel1 = sel1; + vcpu->run->s390_stsi.sel2 = sel2; +} + static int handle_stsi(struct kvm_vcpu *vcpu) { int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; @@ -556,11 +567,15 @@ static int handle_stsi(struct kvm_vcpu *vcpu) rc = kvm_s390_inject_prog_cond(vcpu, rc); goto out; } + if (vcpu->kvm->arch.user_stsi) { + insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); + rc = -EREMOTE; + } trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); free_page(mem); kvm_s390_set_psw_cc(vcpu, 0); vcpu->run->s.regs.gprs[0] = 0; - return 0; + return rc; out_no_data: kvm_s390_set_psw_cc(vcpu, 3); out: -- cgit v1.1 From 30ee2a984f07b00895e0e01d78859b3aff9307c7 Mon Sep 17 00:00:00 2001 From: "Jason J. Herne" Date: Tue, 23 Sep 2014 09:23:01 -0400 Subject: KVM: s390: Create ioctl for Getting/Setting guest storage keys Provide the KVM_S390_GET_SKEYS and KVM_S390_SET_SKEYS ioctl which can be used to get/set guest storage keys. This functionality is needed for live migration of s390 guests that use storage keys. Signed-off-by: Jason J. Herne Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 123 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index fdfa106..0dc22ba 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -179,6 +179,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MP_STATE: case KVM_CAP_S390_USER_SIGP: case KVM_CAP_S390_USER_STSI: + case KVM_CAP_S390_SKEYS: r = 1; break; case KVM_CAP_S390_MEM_OP: @@ -729,6 +730,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) return ret; } +static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) +{ + uint8_t *keys; + uint64_t hva; + unsigned long curkey; + int i, r = 0; + + if (args->flags != 0) + return -EINVAL; + + /* Is this guest using storage keys? */ + if (!mm_use_skey(current->mm)) + return KVM_S390_GET_SKEYS_NONE; + + /* Enforce sane limit on memory allocation */ + if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) + return -EINVAL; + + keys = kmalloc_array(args->count, sizeof(uint8_t), + GFP_KERNEL | __GFP_NOWARN); + if (!keys) + keys = vmalloc(sizeof(uint8_t) * args->count); + if (!keys) + return -ENOMEM; + + for (i = 0; i < args->count; i++) { + hva = gfn_to_hva(kvm, args->start_gfn + i); + if (kvm_is_error_hva(hva)) { + r = -EFAULT; + goto out; + } + + curkey = get_guest_storage_key(current->mm, hva); + if (IS_ERR_VALUE(curkey)) { + r = curkey; + goto out; + } + keys[i] = curkey; + } + + r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, + sizeof(uint8_t) * args->count); + if (r) + r = -EFAULT; +out: + kvfree(keys); + return r; +} + +static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) +{ + uint8_t *keys; + uint64_t hva; + int i, r = 0; + + if (args->flags != 0) + return -EINVAL; + + /* Enforce sane limit on memory allocation */ + if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) + return -EINVAL; + + keys = kmalloc_array(args->count, sizeof(uint8_t), + GFP_KERNEL | __GFP_NOWARN); + if (!keys) + keys = vmalloc(sizeof(uint8_t) * args->count); + if (!keys) + return -ENOMEM; + + r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, + sizeof(uint8_t) * args->count); + if (r) { + r = -EFAULT; + goto out; + } + + /* Enable storage key handling for the guest */ + s390_enable_skey(); + + for (i = 0; i < args->count; i++) { + hva = gfn_to_hva(kvm, args->start_gfn + i); + if (kvm_is_error_hva(hva)) { + r = -EFAULT; + goto out; + } + + /* Lowest order bit is reserved */ + if (keys[i] & 0x01) { + r = -EINVAL; + goto out; + } + + r = set_guest_storage_key(current->mm, hva, + (unsigned long)keys[i], 0); + if (r) + goto out; + } +out: + kvfree(keys); + return r; +} + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -788,6 +891,26 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_s390_vm_has_attr(kvm, &attr); break; } + case KVM_S390_GET_SKEYS: { + struct kvm_s390_skeys args; + + r = -EFAULT; + if (copy_from_user(&args, argp, + sizeof(struct kvm_s390_skeys))) + break; + r = kvm_s390_get_skeys(kvm, &args); + break; + } + case KVM_S390_SET_SKEYS: { + struct kvm_s390_skeys args; + + r = -EFAULT; + if (copy_from_user(&args, argp, + sizeof(struct kvm_s390_skeys))) + break; + r = kvm_s390_set_skeys(kvm, &args); + break; + } default: r = -ENOTTY; } -- cgit v1.1 From 400ac6cd73633f61e42a035b910c3db2b590b9d5 Mon Sep 17 00:00:00 2001 From: Michael Mueller Date: Tue, 17 Mar 2015 11:03:07 +0100 Subject: KVM: s390: drop SIMD bit from kvm_s390_fac_list_mask Setting the SIMD bit in the KVM mask is an issue because it makes the facility visible but not usable to the guest, thus it needs to be removed again. Signed-off-by: Michael Mueller Reviewed-by: Eric Farman Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0dc22ba..42b8a25 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -107,7 +107,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { unsigned long kvm_s390_fac_list_mask[] = { 0xff82fffbf4fc2000UL, 0x005c000000000000UL, - 0x4000000000000000UL, }; unsigned long kvm_s390_fac_list_mask_size(void) -- cgit v1.1 From 18280d8b4bcd4a2b174ee3cd748166c6190acacb Mon Sep 17 00:00:00 2001 From: Michael Mueller Date: Mon, 16 Mar 2015 16:05:41 +0100 Subject: KVM: s390: represent SIMD cap in kvm facility The patch represents capability KVM_CAP_S390_VECTOR_REGISTERS by means of the SIMD facility bit. This allows to a) disable the use of SIMD when used in conjunction with a not-SIMD-aware QEMU, b) to enable SIMD when used with a SIMD-aware version of QEMU and c) finally by means of a QEMU version using the future cpu model ioctls. Signed-off-by: Michael Mueller Reviewed-by: Eric Farman Tested-by: Eric Farman Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 1 - arch/s390/kvm/kvm-s390.c | 19 +++++++++++-------- arch/s390/kvm/kvm-s390.h | 11 +++++++++++ 3 files changed, 22 insertions(+), 9 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 2356a8c..b8d1e97 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -562,7 +562,6 @@ struct kvm_arch{ int css_support; int use_irqchip; int use_cmma; - int use_vectors; int user_cpu_state_ctrl; int user_sigp; int user_stsi; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 42b8a25..9072127 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -278,8 +278,12 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) r = 0; break; case KVM_CAP_S390_VECTOR_REGISTERS: - kvm->arch.use_vectors = MACHINE_HAS_VX; - r = MACHINE_HAS_VX ? 0 : -EINVAL; + if (MACHINE_HAS_VX) { + set_kvm_facility(kvm->arch.model.fac->mask, 129); + set_kvm_facility(kvm->arch.model.fac->list, 129); + r = 0; + } else + r = -EINVAL; break; case KVM_CAP_S390_USER_STSI: kvm->arch.user_stsi = 1; @@ -1084,7 +1088,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.css_support = 0; kvm->arch.use_irqchip = 0; - kvm->arch.use_vectors = 0; kvm->arch.epoch = 0; spin_lock_init(&kvm->arch.start_stop_lock); @@ -1186,12 +1189,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { save_fp_ctl(&vcpu->arch.host_fpregs.fpc); - if (vcpu->kvm->arch.use_vectors) + if (test_kvm_facility(vcpu->kvm, 129)) save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); else save_fp_regs(vcpu->arch.host_fpregs.fprs); save_access_regs(vcpu->arch.host_acrs); - if (vcpu->kvm->arch.use_vectors) { + if (test_kvm_facility(vcpu->kvm, 129)) { restore_fp_ctl(&vcpu->run->s.regs.fpc); restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); } else { @@ -1207,7 +1210,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); - if (vcpu->kvm->arch.use_vectors) { + if (test_kvm_facility(vcpu->kvm, 129)) { save_fp_ctl(&vcpu->run->s.regs.fpc); save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); } else { @@ -1216,7 +1219,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) } save_access_regs(vcpu->run->s.regs.acrs); restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); - if (vcpu->kvm->arch.use_vectors) + if (test_kvm_facility(vcpu->kvm, 129)) restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); else restore_fp_regs(vcpu->arch.host_fpregs.fprs); @@ -1316,7 +1319,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->eca |= 1; if (sclp_has_sigpif()) vcpu->arch.sie_block->eca |= 0x10000000U; - if (vcpu->kvm->arch.use_vectors) { + if (test_kvm_facility(vcpu->kvm, 129)) { vcpu->arch.sie_block->eca |= 0x00020000; vcpu->arch.sie_block->ecd |= 0x20000000; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 5d54191..c5aefef 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -149,6 +149,17 @@ static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) __test_facility(nr, kvm->arch.model.fac->list); } +static inline int set_kvm_facility(u64 *fac_list, unsigned long nr) +{ + unsigned char *ptr; + + if (nr >= MAX_FACILITY_BIT) + return -EINVAL; + ptr = (unsigned char *) fac_list + (nr >> 3); + *ptr |= (0x80UL >> (nr & 7)); + return 0; +} + /* are cpu states controlled by user space */ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) { -- cgit v1.1 From e32edf4fd0fa4897e12ca66118ab67bf257e16e4 Mon Sep 17 00:00:00 2001 From: Nikolay Nikolaev Date: Thu, 26 Mar 2015 14:39:28 +0000 Subject: KVM: Redesign kvm_io_bus_ API to pass VCPU structure to the callbacks. This is needed in e.g. ARM vGIC emulation, where the MMIO handling depends on the VCPU that does the access. Signed-off-by: Nikolay Nikolaev Signed-off-by: Andre Przywara Acked-by: Paolo Bonzini Acked-by: Christoffer Dall Reviewed-by: Marc Zyngier Signed-off-by: Marc Zyngier --- arch/s390/kvm/diag.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 9254aff..329ec75 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) * - gpr 3 contains the virtqueue index (passed as datamatch) * - gpr 4 contains the index on the bus (optionally) */ - ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, + ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS, vcpu->run->s.regs.gprs[2] & 0xffffffff, 8, &vcpu->run->s.regs.gprs[3], vcpu->run->s.regs.gprs[4]); -- cgit v1.1 From 2ba459685204af53b034d269d5cdb3059d4b471e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 25 Mar 2015 13:12:32 +0100 Subject: KVM: s390: store the breaking-event address on pgm interrupts If the PER-3 facility is installed, the breaking-event address is to be stored in the low core. There is no facility bit for PER-3 in stfl(e) and Linux always uses the value at address 272 no matter if PER-3 is available or not. We can't hide its existence from the guest. All program interrupts injected via the SIE automatically store this information if the PER-3 facility is available in the hypervisor. Also the itdb contains the address automatically. As there is no switch to turn this mechanism off, let's simply make it consistent and also store the breaking event address in case of manual program interrupt injection. Reviewed-by: Jens Freimann Signed-off-by: David Hildenbrand Reviewed-by: Christian Borntraeger Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck --- arch/s390/kvm/interrupt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 2afec60..2361b8e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -585,6 +585,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) kvm_s390_rewind_psw(vcpu, ilc); rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); + rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, + (u64 *) __LC_LAST_BREAK); rc |= put_guest_lc(vcpu, pgm_info.code, (u16 *)__LC_PGM_INT_CODE); rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, -- cgit v1.1 From a3ed8dae6e3db479ca275883ba7fe994170b0ae6 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 18 Mar 2015 13:54:31 +0100 Subject: KVM: s390: enable more features that need no hypervisor changes After some review about what these facilities do, the following facilities will work under KVM and can, therefore, be reported to the guest if the cpu model and the host cpu provide this bit. There are plans underway to make the whole bit thing more readable, but its not yet finished. So here are some last bit changes and we enhance the KVM mask with: 9 The sense-running-status facility is installed in the z/Architecture architectural mode. ---> handled by SIE or KVM 10 The conditional-SSKE facility is installed in the z/Architecture architectural mode. ---> handled by SIE. KVM will retry SIE 13 The IPTE-range facility is installed in the z/Architecture architectural mode. ---> handled by SIE. KVM will retry SIE 36 The enhanced-monitor facility is installed in the z/Architecture architectural mode. ---> handled by SIE 47 The CMPSC-enhancement facility is installed in the z/Architecture architectural mode. ---> handled by SIE 48 The decimal-floating-point zoned-conversion facility is installed in the z/Architecture architectural mode. ---> handled by SIE 49 The execution-hint, load-and-trap, miscellaneous- instruction-extensions and processor-assist ---> handled by SIE 51 The local-TLB-clearing facility is installed in the z/Architecture architectural mode. ---> handled by SIE 52 The interlocked-access facility 2 is installed. ---> handled by SIE 53 The load/store-on-condition facility 2 and load-and- zero-rightmost-byte facility are installed in the z/Architecture architectural mode. ---> handled by SIE 57 The message-security-assist-extension-5 facility is installed in the z/Architecture architectural mode. ---> handled by SIE 66 The reset-reference-bits-multiple facility is installed in the z/Architecture architectural mode. ---> handled by SIE. KVM will retry SIE 80 The decimal-floating-point packed-conversion facility is installed in the z/Architecture architectural mode. ---> handled by SIE Signed-off-by: Christian Borntraeger Tested-by: Michael Mueller Acked-by: Cornelia Huck --- arch/s390/kvm/kvm-s390.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 9072127..a130885 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -105,8 +105,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { /* upper facilities limit for kvm */ unsigned long kvm_s390_fac_list_mask[] = { - 0xff82fffbf4fc2000UL, - 0x005c000000000000UL, + 0xffe6fffbfcfdfc40UL, + 0x205c800000000000UL, }; unsigned long kvm_s390_fac_list_mask_size(void) -- cgit v1.1 From 94aa033efcac47b09db22cb561e135baf37b7887 Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Mon, 16 Mar 2015 12:17:13 +0100 Subject: KVM: s390: fix get_all_floating_irqs This fixes a bug introduced with commit c05c4186bbe4 ("KVM: s390: add floating irq controller"). get_all_floating_irqs() does copy_to_user() while holding a spin lock. Let's fix this by filling a temporary buffer first and copy it to userspace after giving up the lock. Cc: # 3.18+: 69a8d4562638 KVM: s390: no need to hold... Reviewed-by: David Hildenbrand Signed-off-by: Jens Freimann Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck --- arch/s390/kvm/interrupt.c | 58 ++++++++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 26 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 2361b8e..5ebd500 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -1477,61 +1478,66 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) spin_unlock(&fi->lock); } -static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, - u8 *addr) +static void inti_to_irq(struct kvm_s390_interrupt_info *inti, + struct kvm_s390_irq *irq) { - struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; - struct kvm_s390_irq irq = {0}; - - irq.type = inti->type; + irq->type = inti->type; switch (inti->type) { case KVM_S390_INT_PFAULT_INIT: case KVM_S390_INT_PFAULT_DONE: case KVM_S390_INT_VIRTIO: case KVM_S390_INT_SERVICE: - irq.u.ext = inti->ext; + irq->u.ext = inti->ext; break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - irq.u.io = inti->io; + irq->u.io = inti->io; break; case KVM_S390_MCHK: - irq.u.mchk = inti->mchk; + irq->u.mchk = inti->mchk; break; - default: - return -EINVAL; } - - if (copy_to_user(uptr, &irq, sizeof(irq))) - return -EFAULT; - - return 0; } -static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) +static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) { struct kvm_s390_interrupt_info *inti; struct kvm_s390_float_interrupt *fi; + struct kvm_s390_irq *buf; + int max_irqs; int ret = 0; int n = 0; + if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) + return -EINVAL; + + /* + * We are already using -ENOMEM to signal + * userspace it may retry with a bigger buffer, + * so we need to use something else for this case + */ + buf = vzalloc(len); + if (!buf) + return -ENOBUFS; + + max_irqs = len / sizeof(struct kvm_s390_irq); + fi = &kvm->arch.float_int; spin_lock(&fi->lock); - list_for_each_entry(inti, &fi->list, list) { - if (len < sizeof(struct kvm_s390_irq)) { + if (n == max_irqs) { /* signal userspace to try again */ ret = -ENOMEM; break; } - ret = copy_irq_to_user(inti, buf); - if (ret) - break; - buf += sizeof(struct kvm_s390_irq); - len -= sizeof(struct kvm_s390_irq); + inti_to_irq(inti, &buf[n]); n++; } - spin_unlock(&fi->lock); + if (!ret && n > 0) { + if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) + ret = -EFAULT; + } + vfree(buf); return ret < 0 ? ret : n; } @@ -1542,7 +1548,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) switch (attr->group) { case KVM_DEV_FLIC_GET_ALL_IRQS: - r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, + r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, attr->attr); break; default: -- cgit v1.1 From 6d3da241416e6088f83a7ff1f37fb6bb518d9bc8 Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Wed, 3 Jul 2013 15:18:35 +0200 Subject: KVM: s390: deliver floating interrupts in order of priority This patch makes interrupt handling compliant to the z/Architecture Principles of Operation with regard to interrupt priorities. Add a bitmap for pending floating interrupts. Each bit relates to a interrupt type and its list. A turned on bit indicates that a list contains items (interrupts) which need to be delivered. When delivering interrupts on a cpu we can merge the existing bitmap for cpu-local interrupts and floating interrupts and have a single mechanism for delivery. Currently we have one list for all kinds of floating interrupts and a corresponding spin lock. This patch adds a separate list per interrupt type. An exception to this are service signal and machine check interrupts, as there can be only one pending interrupt at a time. Signed-off-by: Jens Freimann Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck --- arch/s390/include/asm/kvm_host.h | 30 +- arch/s390/kvm/interrupt.c | 832 ++++++++++++++++++++++----------------- arch/s390/kvm/kvm-s390.c | 4 +- arch/s390/kvm/kvm-s390.h | 2 +- arch/s390/kvm/priv.c | 9 +- 5 files changed, 510 insertions(+), 367 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index b8d1e97..d01fc58 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -344,6 +344,11 @@ enum irq_types { IRQ_PEND_COUNT }; +/* We have 2M for virtio device descriptor pages. Smallest amount of + * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381 + */ +#define KVM_S390_MAX_VIRTIO_IRQS 87381 + /* * Repressible (non-floating) machine check interrupts * subclass bits in MCIC @@ -421,13 +426,32 @@ struct kvm_s390_local_interrupt { unsigned long pending_irqs; }; +#define FIRQ_LIST_IO_ISC_0 0 +#define FIRQ_LIST_IO_ISC_1 1 +#define FIRQ_LIST_IO_ISC_2 2 +#define FIRQ_LIST_IO_ISC_3 3 +#define FIRQ_LIST_IO_ISC_4 4 +#define FIRQ_LIST_IO_ISC_5 5 +#define FIRQ_LIST_IO_ISC_6 6 +#define FIRQ_LIST_IO_ISC_7 7 +#define FIRQ_LIST_PFAULT 8 +#define FIRQ_LIST_VIRTIO 9 +#define FIRQ_LIST_COUNT 10 +#define FIRQ_CNTR_IO 0 +#define FIRQ_CNTR_SERVICE 1 +#define FIRQ_CNTR_VIRTIO 2 +#define FIRQ_CNTR_PFAULT 3 +#define FIRQ_MAX_COUNT 4 + struct kvm_s390_float_interrupt { + unsigned long pending_irqs; spinlock_t lock; - struct list_head list; - atomic_t active; + struct list_head lists[FIRQ_LIST_COUNT]; + int counters[FIRQ_MAX_COUNT]; + struct kvm_s390_mchk_info mchk; + struct kvm_s390_ext_info srv_signal; int next_rr_cpu; unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; - unsigned int irq_count; }; struct kvm_hw_wp_info_arch { diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 5ebd500..2872fdb 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "kvm-s390.h" #include "gaccess.h" #include "trace-s390.h" @@ -34,11 +35,6 @@ #define PFAULT_DONE 0x0680 #define VIRTIO_PARAM 0x0d00 -static int is_ioint(u64 type) -{ - return ((type & 0xfffe0000u) != 0xfffe0000u); -} - int psw_extint_disabled(struct kvm_vcpu *vcpu) { return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); @@ -74,70 +70,25 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) return 1; } -static u64 int_word_to_isc_bits(u32 int_word) +static inline int is_ioirq(unsigned long irq_type) { - u8 isc = (int_word & 0x38000000) >> 27; + return ((irq_type >= IRQ_PEND_IO_ISC_0) && + (irq_type <= IRQ_PEND_IO_ISC_7)); +} +static uint64_t isc_to_isc_bits(int isc) +{ return (0x80 >> isc) << 24; } -static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static inline u8 int_word_to_isc(u32 int_word) { - switch (inti->type) { - case KVM_S390_INT_EXTERNAL_CALL: - if (psw_extint_disabled(vcpu)) - return 0; - if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) - return 1; - return 0; - case KVM_S390_INT_EMERGENCY: - if (psw_extint_disabled(vcpu)) - return 0; - if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) - return 1; - return 0; - case KVM_S390_INT_CLOCK_COMP: - return ckc_interrupts_enabled(vcpu); - case KVM_S390_INT_CPU_TIMER: - if (psw_extint_disabled(vcpu)) - return 0; - if (vcpu->arch.sie_block->gcr[0] & 0x400ul) - return 1; - return 0; - case KVM_S390_INT_SERVICE: - case KVM_S390_INT_PFAULT_INIT: - case KVM_S390_INT_PFAULT_DONE: - case KVM_S390_INT_VIRTIO: - if (psw_extint_disabled(vcpu)) - return 0; - if (vcpu->arch.sie_block->gcr[0] & 0x200ul) - return 1; - return 0; - case KVM_S390_PROGRAM_INT: - case KVM_S390_SIGP_STOP: - case KVM_S390_SIGP_SET_PREFIX: - case KVM_S390_RESTART: - return 1; - case KVM_S390_MCHK: - if (psw_mchk_disabled(vcpu)) - return 0; - if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) - return 1; - return 0; - case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - if (psw_ioint_disabled(vcpu)) - return 0; - if (vcpu->arch.sie_block->gcr[6] & - int_word_to_isc_bits(inti->io.io_int_word)) - return 1; - return 0; - default: - printk(KERN_WARNING "illegal interrupt type %llx\n", - inti->type); - BUG(); - } - return 0; + return (int_word & 0x38000000) >> 27; +} + +static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu) +{ + return vcpu->kvm->arch.float_int.pending_irqs; } static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) @@ -145,12 +96,31 @@ static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) return vcpu->arch.local_int.pending_irqs; } -static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) +static unsigned long disable_iscs(struct kvm_vcpu *vcpu, + unsigned long active_mask) { - unsigned long active_mask = pending_local_irqs(vcpu); + int i; + + for (i = 0; i <= MAX_ISC; i++) + if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) + active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i)); + + return active_mask; +} + +static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) +{ + unsigned long active_mask; + + active_mask = pending_local_irqs(vcpu); + active_mask |= pending_floating_irqs(vcpu); if (psw_extint_disabled(vcpu)) active_mask &= ~IRQ_PEND_EXT_MASK; + if (psw_ioint_disabled(vcpu)) + active_mask &= ~IRQ_PEND_IO_MASK; + else + active_mask = disable_iscs(vcpu, active_mask); if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) @@ -159,8 +129,13 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); + if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) + __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); if (psw_mchk_disabled(vcpu)) active_mask &= ~IRQ_PEND_MCHK_MASK; + if (!(vcpu->arch.sie_block->gcr[14] & + vcpu->kvm->arch.float_int.mchk.cr14)) + __clear_bit(IRQ_PEND_MCHK_REP, &active_mask); /* * STOP irqs will never be actively delivered. They are triggered via @@ -202,6 +177,16 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); } +static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) +{ + if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK)) + return; + else if (psw_ioint_disabled(vcpu)) + __set_cpuflag(vcpu, CPUSTAT_IO_INT); + else + vcpu->arch.sie_block->lctl |= LCTL_CR6; +} + static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) { if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) @@ -228,43 +213,15 @@ static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) __set_cpuflag(vcpu, CPUSTAT_STOP_INT); } -/* Set interception request for non-deliverable local interrupts */ -static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) +/* Set interception request for non-deliverable interrupts */ +static void set_intercept_indicators(struct kvm_vcpu *vcpu) { + set_intercept_indicators_io(vcpu); set_intercept_indicators_ext(vcpu); set_intercept_indicators_mchk(vcpu); set_intercept_indicators_stop(vcpu); } -static void __set_intercept_indicator(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) -{ - switch (inti->type) { - case KVM_S390_INT_SERVICE: - case KVM_S390_INT_PFAULT_DONE: - case KVM_S390_INT_VIRTIO: - if (psw_extint_disabled(vcpu)) - __set_cpuflag(vcpu, CPUSTAT_EXT_INT); - else - vcpu->arch.sie_block->lctl |= LCTL_CR0; - break; - case KVM_S390_MCHK: - if (psw_mchk_disabled(vcpu)) - vcpu->arch.sie_block->ictl |= ICTL_LPSW; - else - vcpu->arch.sie_block->lctl |= LCTL_CR14; - break; - case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - if (psw_ioint_disabled(vcpu)) - __set_cpuflag(vcpu, CPUSTAT_IO_INT); - else - vcpu->arch.sie_block->lctl |= LCTL_CR6; - break; - default: - BUG(); - } -} - static u16 get_ilc(struct kvm_vcpu *vcpu) { switch (vcpu->arch.sie_block->icptcode) { @@ -350,42 +307,72 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) { + struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_mchk_info mchk; + struct kvm_s390_mchk_info mchk = {}; unsigned long adtl_status_addr; - int rc; + int deliver = 0; + int rc = 0; + spin_lock(&fi->lock); spin_lock(&li->lock); - mchk = li->irq.mchk; + if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) || + test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) { + /* + * If there was an exigent machine check pending, then any + * repressible machine checks that might have been pending + * are indicated along with it, so always clear bits for + * repressible and exigent interrupts + */ + mchk = li->irq.mchk; + clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); + clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); + memset(&li->irq.mchk, 0, sizeof(mchk)); + deliver = 1; + } /* - * If there was an exigent machine check pending, then any repressible - * machine checks that might have been pending are indicated along - * with it, so always clear both bits + * We indicate floating repressible conditions along with + * other pending conditions. Channel Report Pending and Channel + * Subsystem damage are the only two and and are indicated by + * bits in mcic and masked in cr14. */ - clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); - clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); - memset(&li->irq.mchk, 0, sizeof(mchk)); + if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { + mchk.mcic |= fi->mchk.mcic; + mchk.cr14 |= fi->mchk.cr14; + memset(&fi->mchk, 0, sizeof(mchk)); + deliver = 1; + } spin_unlock(&li->lock); + spin_unlock(&fi->lock); - VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", - mchk.mcic); - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, - mchk.cr14, mchk.mcic); - - rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); - rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, - &adtl_status_addr, sizeof(unsigned long)); - rc |= kvm_s390_vcpu_store_adtl_status(vcpu, adtl_status_addr); - rc |= put_guest_lc(vcpu, mchk.mcic, - (u64 __user *) __LC_MCCK_CODE); - rc |= put_guest_lc(vcpu, mchk.failing_storage_address, - (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); - rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, - &mchk.fixed_logout, sizeof(mchk.fixed_logout)); - rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + if (deliver) { + VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + mchk.mcic); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_MCHK, + mchk.cr14, mchk.mcic); + + rc = kvm_s390_vcpu_store_status(vcpu, + KVM_S390_STORE_STATUS_PREFIXED); + rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, + &adtl_status_addr, + sizeof(unsigned long)); + rc |= kvm_s390_vcpu_store_adtl_status(vcpu, + adtl_status_addr); + rc |= put_guest_lc(vcpu, mchk.mcic, + (u64 __user *) __LC_MCCK_CODE); + rc |= put_guest_lc(vcpu, mchk.failing_storage_address, + (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); + rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, + &mchk.fixed_logout, + sizeof(mchk.fixed_logout)); + rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + } return rc ? -EFAULT : 0; } @@ -597,16 +584,27 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) return rc ? -EFAULT : 0; } -static int __must_check __deliver_service(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_service(struct kvm_vcpu *vcpu) { - int rc; + struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; + struct kvm_s390_ext_info ext; + int rc = 0; + + spin_lock(&fi->lock); + if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { + spin_unlock(&fi->lock); + return 0; + } + ext = fi->srv_signal; + memset(&fi->srv_signal, 0, sizeof(ext)); + clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); + spin_unlock(&fi->lock); VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", - inti->ext.ext_params); + ext.ext_params); vcpu->stat.deliver_service_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, 0); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, + ext.ext_params, 0); rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); @@ -614,106 +612,146 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params, + rc |= put_guest_lc(vcpu, ext.ext_params, (u32 *)__LC_EXT_PARAMS); + return rc ? -EFAULT : 0; } -static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) { - int rc; + struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; + struct kvm_s390_interrupt_info *inti; + int rc = 0; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, - KVM_S390_INT_PFAULT_DONE, 0, - inti->ext.ext_params2); + spin_lock(&fi->lock); + inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT], + struct kvm_s390_interrupt_info, + list); + if (inti) { + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_PFAULT_DONE, 0, + inti->ext.ext_params2); + list_del(&inti->list); + fi->counters[FIRQ_CNTR_PFAULT] -= 1; + } + if (list_empty(&fi->lists[FIRQ_LIST_PFAULT])) + clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); + spin_unlock(&fi->lock); - rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params2, - (u64 *)__LC_EXT_PARAMS2); + if (inti) { + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, + (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, PFAULT_DONE, + (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + rc |= put_guest_lc(vcpu, inti->ext.ext_params2, + (u64 *)__LC_EXT_PARAMS2); + kfree(inti); + } return rc ? -EFAULT : 0; } -static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) { - int rc; + struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; + struct kvm_s390_interrupt_info *inti; + int rc = 0; - VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", - inti->ext.ext_params, inti->ext.ext_params2); - vcpu->stat.deliver_virtio_interrupt++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, - inti->ext.ext_params2); + spin_lock(&fi->lock); + inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO], + struct kvm_s390_interrupt_info, + list); + if (inti) { + VCPU_EVENT(vcpu, 4, + "interrupt: virtio parm:%x,parm64:%llx", + inti->ext.ext_params, inti->ext.ext_params2); + vcpu->stat.deliver_virtio_interrupt++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + inti->type, + inti->ext.ext_params, + inti->ext.ext_params2); + list_del(&inti->list); + fi->counters[FIRQ_CNTR_VIRTIO] -= 1; + } + if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO])) + clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); + spin_unlock(&fi->lock); - rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params, - (u32 *)__LC_EXT_PARAMS); - rc |= put_guest_lc(vcpu, inti->ext.ext_params2, - (u64 *)__LC_EXT_PARAMS2); + if (inti) { + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, + (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, VIRTIO_PARAM, + (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + rc |= put_guest_lc(vcpu, inti->ext.ext_params, + (u32 *)__LC_EXT_PARAMS); + rc |= put_guest_lc(vcpu, inti->ext.ext_params2, + (u64 *)__LC_EXT_PARAMS2); + kfree(inti); + } return rc ? -EFAULT : 0; } static int __must_check __deliver_io(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) + unsigned long irq_type) { - int rc; + struct list_head *isc_list; + struct kvm_s390_float_interrupt *fi; + struct kvm_s390_interrupt_info *inti = NULL; + int rc = 0; - VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); - vcpu->stat.deliver_io_int++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - ((__u32)inti->io.subchannel_id << 16) | - inti->io.subchannel_nr, - ((__u64)inti->io.io_int_parm << 32) | - inti->io.io_int_word); - - rc = put_guest_lc(vcpu, inti->io.subchannel_id, - (u16 *)__LC_SUBCHANNEL_ID); - rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, - (u16 *)__LC_SUBCHANNEL_NR); - rc |= put_guest_lc(vcpu, inti->io.io_int_parm, - (u32 *)__LC_IO_INT_PARM); - rc |= put_guest_lc(vcpu, inti->io.io_int_word, - (u32 *)__LC_IO_INT_WORD); - rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - return rc ? -EFAULT : 0; -} + fi = &vcpu->kvm->arch.float_int; -static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) -{ - struct kvm_s390_mchk_info *mchk = &inti->mchk; - int rc; + spin_lock(&fi->lock); + isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0]; + inti = list_first_entry_or_null(isc_list, + struct kvm_s390_interrupt_info, + list); + if (inti) { + VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); + vcpu->stat.deliver_io_int++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + inti->type, + ((__u32)inti->io.subchannel_id << 16) | + inti->io.subchannel_nr, + ((__u64)inti->io.io_int_parm << 32) | + inti->io.io_int_word); + list_del(&inti->list); + fi->counters[FIRQ_CNTR_IO] -= 1; + } + if (list_empty(isc_list)) + clear_bit(irq_type, &fi->pending_irqs); + spin_unlock(&fi->lock); + + if (inti) { + rc = put_guest_lc(vcpu, inti->io.subchannel_id, + (u16 *)__LC_SUBCHANNEL_ID); + rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, + (u16 *)__LC_SUBCHANNEL_NR); + rc |= put_guest_lc(vcpu, inti->io.io_int_parm, + (u32 *)__LC_IO_INT_PARM); + rc |= put_guest_lc(vcpu, inti->io.io_int_word, + (u32 *)__LC_IO_INT_WORD); + rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + kfree(inti); + } - VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", - mchk->mcic); - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, - mchk->cr14, mchk->mcic); - - rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); - rc |= put_guest_lc(vcpu, mchk->mcic, - (u64 __user *) __LC_MCCK_CODE); - rc |= put_guest_lc(vcpu, mchk->failing_storage_address, - (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); - rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, - &mchk->fixed_logout, sizeof(mchk->fixed_logout)); - rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); return rc ? -EFAULT : 0; } @@ -721,6 +759,7 @@ typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); static const deliver_irq_t deliver_irq_funcs[] = { [IRQ_PEND_MCHK_EX] = __deliver_machine_check, + [IRQ_PEND_MCHK_REP] = __deliver_machine_check, [IRQ_PEND_PROG] = __deliver_prog, [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, @@ -729,36 +768,11 @@ static const deliver_irq_t deliver_irq_funcs[] = { [IRQ_PEND_RESTART] = __deliver_restart, [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, + [IRQ_PEND_EXT_SERVICE] = __deliver_service, + [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, + [IRQ_PEND_VIRTIO] = __deliver_virtio, }; -static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) -{ - int rc; - - switch (inti->type) { - case KVM_S390_INT_SERVICE: - rc = __deliver_service(vcpu, inti); - break; - case KVM_S390_INT_PFAULT_DONE: - rc = __deliver_pfault_done(vcpu, inti); - break; - case KVM_S390_INT_VIRTIO: - rc = __deliver_virtio(vcpu, inti); - break; - case KVM_S390_MCHK: - rc = __deliver_mchk_floating(vcpu, inti); - break; - case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - rc = __deliver_io(vcpu, inti); - break; - default: - BUG(); - } - - return rc; -} - /* Check whether an external call is pending (deliverable or not) */ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) { @@ -774,21 +788,9 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) { - struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; - struct kvm_s390_interrupt_info *inti; int rc; - rc = !!deliverable_local_irqs(vcpu); - - if ((!rc) && atomic_read(&fi->active)) { - spin_lock(&fi->lock); - list_for_each_entry(inti, &fi->list, list) - if (__interrupt_is_deliverable(vcpu, inti)) { - rc = 1; - break; - } - spin_unlock(&fi->lock); - } + rc = !!deliverable_irqs(vcpu); if (!rc && kvm_cpu_has_pending_timer(vcpu)) rc = 1; @@ -907,13 +909,10 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; - struct kvm_s390_interrupt_info *n, *inti = NULL; deliver_irq_t func; - int deliver; int rc = 0; unsigned long irq_type; - unsigned long deliverable_irqs; + unsigned long irqs; __reset_intercept_indicators(vcpu); @@ -923,44 +922,27 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); do { - deliverable_irqs = deliverable_local_irqs(vcpu); + irqs = deliverable_irqs(vcpu); /* bits are in the order of interrupt priority */ - irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT); + irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); if (irq_type == IRQ_PEND_COUNT) break; - func = deliver_irq_funcs[irq_type]; - if (!func) { - WARN_ON_ONCE(func == NULL); - clear_bit(irq_type, &li->pending_irqs); - continue; + if (is_ioirq(irq_type)) { + rc = __deliver_io(vcpu, irq_type); + } else { + func = deliver_irq_funcs[irq_type]; + if (!func) { + WARN_ON_ONCE(func == NULL); + clear_bit(irq_type, &li->pending_irqs); + continue; + } + rc = func(vcpu); } - rc = func(vcpu); - } while (!rc && irq_type != IRQ_PEND_COUNT); - - set_intercept_indicators_local(vcpu); + if (rc) + break; + } while (!rc); - if (!rc && atomic_read(&fi->active)) { - do { - deliver = 0; - spin_lock(&fi->lock); - list_for_each_entry_safe(inti, n, &fi->list, list) { - if (__interrupt_is_deliverable(vcpu, inti)) { - list_del(&inti->list); - fi->irq_count--; - deliver = 1; - break; - } - __set_intercept_indicator(vcpu, inti); - } - if (list_empty(&fi->list)) - atomic_set(&fi->active, 0); - spin_unlock(&fi->lock); - if (deliver) { - rc = __deliver_floating_interrupt(vcpu, inti); - kfree(inti); - } - } while (!rc && deliver); - } + set_intercept_indicators(vcpu); return rc; } @@ -1195,80 +1177,182 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) return 0; } +static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, + int isc, u32 schid) +{ + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; + struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; + struct kvm_s390_interrupt_info *iter; + u16 id = (schid & 0xffff0000U) >> 16; + u16 nr = schid & 0x0000ffffU; + spin_lock(&fi->lock); + list_for_each_entry(iter, isc_list, list) { + if (schid && (id != iter->io.subchannel_id || + nr != iter->io.subchannel_nr)) + continue; + /* found an appropriate entry */ + list_del_init(&iter->list); + fi->counters[FIRQ_CNTR_IO] -= 1; + if (list_empty(isc_list)) + clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); + spin_unlock(&fi->lock); + return iter; + } + spin_unlock(&fi->lock); + return NULL; +} + +/* + * Dequeue and return an I/O interrupt matching any of the interruption + * subclasses as designated by the isc mask in cr6 and the schid (if != 0). + */ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, - u64 cr6, u64 schid) + u64 isc_mask, u32 schid) +{ + struct kvm_s390_interrupt_info *inti = NULL; + int isc; + + for (isc = 0; isc <= MAX_ISC && !inti; isc++) { + if (isc_mask & isc_to_isc_bits(isc)) + inti = get_io_int(kvm, isc, schid); + } + return inti; +} + +#define SCCB_MASK 0xFFFFFFF8 +#define SCCB_EVENT_PENDING 0x3 + +static int __inject_service(struct kvm *kvm, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; + + spin_lock(&fi->lock); + fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; + /* + * Early versions of the QEMU s390 bios will inject several + * service interrupts after another without handling a + * condition code indicating busy. + * We will silently ignore those superfluous sccb values. + * A future version of QEMU will take care of serialization + * of servc requests + */ + if (fi->srv_signal.ext_params & SCCB_MASK) + goto out; + fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK; + set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); +out: + spin_unlock(&fi->lock); + kfree(inti); + return 0; +} + +static int __inject_virtio(struct kvm *kvm, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; + + spin_lock(&fi->lock); + if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { + spin_unlock(&fi->lock); + return -EBUSY; + } + fi->counters[FIRQ_CNTR_VIRTIO] += 1; + list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]); + set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); + spin_unlock(&fi->lock); + return 0; +} + +static int __inject_pfault_done(struct kvm *kvm, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; + + spin_lock(&fi->lock); + if (fi->counters[FIRQ_CNTR_PFAULT] >= + (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { + spin_unlock(&fi->lock); + return -EBUSY; + } + fi->counters[FIRQ_CNTR_PFAULT] += 1; + list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]); + set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); + spin_unlock(&fi->lock); + return 0; +} + +#define CR_PENDING_SUBCLASS 28 +static int __inject_float_mchk(struct kvm *kvm, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; + + spin_lock(&fi->lock); + fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); + fi->mchk.mcic |= inti->mchk.mcic; + set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs); + spin_unlock(&fi->lock); + kfree(inti); + return 0; +} + +static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) { struct kvm_s390_float_interrupt *fi; - struct kvm_s390_interrupt_info *inti, *iter; + struct list_head *list; + int isc; - if ((!schid && !cr6) || (schid && cr6)) - return NULL; fi = &kvm->arch.float_int; spin_lock(&fi->lock); - inti = NULL; - list_for_each_entry(iter, &fi->list, list) { - if (!is_ioint(iter->type)) - continue; - if (cr6 && - ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) - continue; - if (schid) { - if (((schid & 0x00000000ffff0000) >> 16) != - iter->io.subchannel_id) - continue; - if ((schid & 0x000000000000ffff) != - iter->io.subchannel_nr) - continue; - } - inti = iter; - break; - } - if (inti) { - list_del_init(&inti->list); - fi->irq_count--; + if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { + spin_unlock(&fi->lock); + return -EBUSY; } - if (list_empty(&fi->list)) - atomic_set(&fi->active, 0); + fi->counters[FIRQ_CNTR_IO] += 1; + + isc = int_word_to_isc(inti->io.io_int_word); + list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; + list_add_tail(&inti->list, list); + set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); spin_unlock(&fi->lock); - return inti; + return 0; } static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) { struct kvm_s390_local_interrupt *li; struct kvm_s390_float_interrupt *fi; - struct kvm_s390_interrupt_info *iter; struct kvm_vcpu *dst_vcpu = NULL; int sigcpu; - int rc = 0; + u64 type = READ_ONCE(inti->type); + int rc; fi = &kvm->arch.float_int; - spin_lock(&fi->lock); - if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { + + switch (type) { + case KVM_S390_MCHK: + rc = __inject_float_mchk(kvm, inti); + break; + case KVM_S390_INT_VIRTIO: + rc = __inject_virtio(kvm, inti); + break; + case KVM_S390_INT_SERVICE: + rc = __inject_service(kvm, inti); + break; + case KVM_S390_INT_PFAULT_DONE: + rc = __inject_pfault_done(kvm, inti); + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + rc = __inject_io(kvm, inti); + break; + default: rc = -EINVAL; - goto unlock_fi; } - fi->irq_count++; - if (!is_ioint(inti->type)) { - list_add_tail(&inti->list, &fi->list); - } else { - u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); + if (rc) + return rc; - /* Keep I/O interrupts sorted in isc order. */ - list_for_each_entry(iter, &fi->list, list) { - if (!is_ioint(iter->type)) - continue; - if (int_word_to_isc_bits(iter->io.io_int_word) - <= isc_bits) - continue; - break; - } - list_add_tail(&inti->list, &iter->list); - } - atomic_set(&fi->active, 1); - if (atomic_read(&kvm->online_vcpus) == 0) - goto unlock_fi; sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); if (sigcpu == KVM_MAX_VCPUS) { do { @@ -1280,7 +1364,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) dst_vcpu = kvm_get_vcpu(kvm, sigcpu); li = &dst_vcpu->arch.local_int; spin_lock(&li->lock); - switch (inti->type) { + switch (type) { case KVM_S390_MCHK: atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); break; @@ -1293,9 +1377,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) } spin_unlock(&li->lock); kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); -unlock_fi: - spin_unlock(&fi->lock); - return rc; + return 0; + } int kvm_s390_inject_vm(struct kvm *kvm, @@ -1462,20 +1545,14 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) return rc; } -void kvm_s390_clear_float_irqs(struct kvm *kvm) +static inline void clear_irq_list(struct list_head *_list) { - struct kvm_s390_float_interrupt *fi; - struct kvm_s390_interrupt_info *n, *inti = NULL; + struct kvm_s390_interrupt_info *inti, *n; - fi = &kvm->arch.float_int; - spin_lock(&fi->lock); - list_for_each_entry_safe(inti, n, &fi->list, list) { + list_for_each_entry_safe(inti, n, _list, list) { list_del(&inti->list); kfree(inti); } - fi->irq_count = 0; - atomic_set(&fi->active, 0); - spin_unlock(&fi->lock); } static void inti_to_irq(struct kvm_s390_interrupt_info *inti, @@ -1486,26 +1563,37 @@ static void inti_to_irq(struct kvm_s390_interrupt_info *inti, case KVM_S390_INT_PFAULT_INIT: case KVM_S390_INT_PFAULT_DONE: case KVM_S390_INT_VIRTIO: - case KVM_S390_INT_SERVICE: irq->u.ext = inti->ext; break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: irq->u.io = inti->io; break; - case KVM_S390_MCHK: - irq->u.mchk = inti->mchk; - break; } } +void kvm_s390_clear_float_irqs(struct kvm *kvm) +{ + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; + int i; + + spin_lock(&fi->lock); + for (i = 0; i < FIRQ_LIST_COUNT; i++) + clear_irq_list(&fi->lists[i]); + for (i = 0; i < FIRQ_MAX_COUNT; i++) + fi->counters[i] = 0; + spin_unlock(&fi->lock); +}; + static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) { struct kvm_s390_interrupt_info *inti; struct kvm_s390_float_interrupt *fi; struct kvm_s390_irq *buf; + struct kvm_s390_irq *irq; int max_irqs; int ret = 0; int n = 0; + int i; if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) return -EINVAL; @@ -1523,15 +1611,41 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) fi = &kvm->arch.float_int; spin_lock(&fi->lock); - list_for_each_entry(inti, &fi->list, list) { + for (i = 0; i < FIRQ_LIST_COUNT; i++) { + list_for_each_entry(inti, &fi->lists[i], list) { + if (n == max_irqs) { + /* signal userspace to try again */ + ret = -ENOMEM; + goto out; + } + inti_to_irq(inti, &buf[n]); + n++; + } + } + if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { if (n == max_irqs) { /* signal userspace to try again */ ret = -ENOMEM; - break; + goto out; } - inti_to_irq(inti, &buf[n]); + irq = (struct kvm_s390_irq *) &buf[n]; + irq->type = KVM_S390_INT_SERVICE; + irq->u.ext = fi->srv_signal; n++; } + if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { + if (n == max_irqs) { + /* signal userspace to try again */ + ret = -ENOMEM; + goto out; + } + irq = (struct kvm_s390_irq *) &buf[n]; + irq->type = KVM_S390_MCHK; + irq->u.mchk = fi->mchk; + n++; +} + +out: spin_unlock(&fi->lock); if (!ret && n > 0) { if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index a130885..dbc9ca3 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "kvm-s390.h" #include "gaccess.h" @@ -1069,7 +1070,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) goto out_err; spin_lock_init(&kvm->arch.float_int.lock); - INIT_LIST_HEAD(&kvm->arch.float_int.list); + for (i = 0; i < FIRQ_LIST_COUNT; i++) + INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); init_waitqueue_head(&kvm->arch.ipte_wq); mutex_init(&kvm->arch.ipte_mutex); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c5aefef..343644a 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -178,7 +178,7 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq); int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, - u64 cr6, u64 schid); + u64 isc_mask, u32 schid); int kvm_s390_reinject_io_int(struct kvm *kvm, struct kvm_s390_interrupt_info *inti); int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 5e4658d..d22d8ee 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -294,10 +294,13 @@ reinject_interrupt: static int handle_tsch(struct kvm_vcpu *vcpu) { - struct kvm_s390_interrupt_info *inti; + struct kvm_s390_interrupt_info *inti = NULL; + const u64 isc_mask = 0xffUL << 24; /* all iscs set */ - inti = kvm_s390_get_io_int(vcpu->kvm, 0, - vcpu->run->s.regs.gprs[1]); + /* a valid schid has at least one bit set */ + if (vcpu->run->s.regs.gprs[1]) + inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, + vcpu->run->s.regs.gprs[1]); /* * Prepare exit to userspace. -- cgit v1.1 From b4aec92567f3146167cbc262c686ff73730aa4ca Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 1 Dec 2014 15:55:42 +0100 Subject: KVM: s390: cpu timer irq priority We now have a mechanism for delivering interrupts according to their priority. Let's inject them using our new infrastructure (instead of letting only hardware handle them), so we can be sure that the irq priorities are satisfied. For s390, the cpu timer and the clock comparator are to be checked for common code kvm_cpu_has_pending_timer(), although the cpu timer is only stepped when the guest is being executed. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck --- arch/s390/kvm/interrupt.c | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 2872fdb..8a0786c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -70,6 +70,26 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) return 1; } +static int ckc_irq_pending(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.sie_block->ckc < + get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) + return 0; + return ckc_interrupts_enabled(vcpu); +} + +static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) +{ + return !psw_extint_disabled(vcpu) && + (vcpu->arch.sie_block->gcr[0] & 0x400ul); +} + +static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.sie_block->cputm >> 63) && + cpu_timer_interrupts_enabled(vcpu); +} + static inline int is_ioirq(unsigned long irq_type) { return ((irq_type >= IRQ_PEND_IO_ISC_0) && @@ -809,12 +829,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - if (!(vcpu->arch.sie_block->ckc < - get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) - return 0; - if (!ckc_interrupts_enabled(vcpu)) - return 0; - return 1; + return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); } int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) @@ -918,9 +933,14 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) /* pending ckc conditions might have been invalidated */ clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); - if (kvm_cpu_has_pending_timer(vcpu)) + if (ckc_irq_pending(vcpu)) set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); + /* pending cpu timer conditions might have been invalidated */ + clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); + if (cpu_timer_irq_pending(vcpu)) + set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); + do { irqs = deliverable_irqs(vcpu); /* bits are in the order of interrupt priority */ -- cgit v1.1 From 47b43c52ee4b0425449d1b2b1eedca7f6b7a578a Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Tue, 11 Nov 2014 20:57:06 +0100 Subject: KVM: s390: add ioctl to inject local interrupts We have introduced struct kvm_s390_irq a while ago which allows to inject all kinds of interrupts as defined in the Principles of Operation. Add ioctl to inject interrupts with the extended struct kvm_s390_irq Signed-off-by: Jens Freimann Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck --- arch/s390/kvm/kvm-s390.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index dbc9ca3..8bc25d4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -177,6 +177,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_IRQCHIP: case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_MP_STATE: + case KVM_CAP_S390_INJECT_IRQ: case KVM_CAP_S390_USER_SIGP: case KVM_CAP_S390_USER_STSI: case KVM_CAP_S390_SKEYS: @@ -2391,6 +2392,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, long r; switch (ioctl) { + case KVM_S390_IRQ: { + struct kvm_s390_irq s390irq; + + r = -EFAULT; + if (copy_from_user(&s390irq, argp, sizeof(s390irq))) + break; + r = kvm_s390_inject_vcpu(vcpu, &s390irq); + break; + } case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; struct kvm_s390_irq s390irq; -- cgit v1.1 From 79e87a103de1eda0cb4d726cd8581798e2d38f3e Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Thu, 19 Mar 2015 15:12:12 +0100 Subject: KVM: s390: refactor vcpu injection function Let's provide a version of kvm_s390_inject_vcpu() that does not acquire the local-interrupt lock and skips waking up the vcpu. To be used in a later patch for vcpu-local interrupt migration, where we are already holding the lock. Reviewed-by: David Hildenbrand Signed-off-by: Jens Freimann Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck --- arch/s390/kvm/interrupt.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 8a0786c..bc09880 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1514,12 +1514,10 @@ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) spin_unlock(&li->lock); } -int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) +static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc; - spin_lock(&li->lock); switch (irq->type) { case KVM_S390_PROGRAM_INT: VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", @@ -1559,6 +1557,17 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) default: rc = -EINVAL; } + + return rc; +} + +int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + int rc; + + spin_lock(&li->lock); + rc = do_inject_vcpu(vcpu, irq); spin_unlock(&li->lock); if (!rc) kvm_s390_vcpu_wakeup(vcpu); -- cgit v1.1 From 816c7667ea97c61884e014cfeedaede5b67b0e58 Mon Sep 17 00:00:00 2001 From: Jens Freimann Date: Mon, 24 Nov 2014 17:13:46 +0100 Subject: KVM: s390: migrate vcpu interrupt state This patch adds support to migrate vcpu interrupts. Two new vcpu ioctls are added which get/set the complete status of pending interrupts in one go. The ioctls are marked as available with the new capability KVM_CAP_S390_IRQ_STATE. We can not use a ONEREG, as the number of pending local interrupts is not constant and depends on the number of CPUs. To retrieve the interrupt state we add an ioctl KVM_S390_GET_IRQ_STATE. Its input parameter is a pointer to a struct kvm_s390_irq_state which has a buffer and length. For all currently pending interrupts, we copy a struct kvm_s390_irq into the buffer and pass it to userspace. To store interrupt state into a buffer provided by userspace, we add an ioctl KVM_S390_SET_IRQ_STATE. It passes a struct kvm_s390_irq_state into the kernel and injects all interrupts contained in the buffer. Signed-off-by: Jens Freimann Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck --- arch/s390/kvm/interrupt.c | 140 ++++++++++++++++++++++++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.c | 36 ++++++++++++ arch/s390/kvm/kvm-s390.h | 4 ++ 3 files changed, 180 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index bc09880..9de4726 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2123,3 +2123,143 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, { return -EINVAL; } + +int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_irq *buf; + int r = 0; + int n; + + buf = vmalloc(len); + if (!buf) + return -ENOMEM; + + if (copy_from_user((void *) buf, irqstate, len)) { + r = -EFAULT; + goto out_free; + } + + /* + * Don't allow setting the interrupt state + * when there are already interrupts pending + */ + spin_lock(&li->lock); + if (li->pending_irqs) { + r = -EBUSY; + goto out_unlock; + } + + for (n = 0; n < len / sizeof(*buf); n++) { + r = do_inject_vcpu(vcpu, &buf[n]); + if (r) + break; + } + +out_unlock: + spin_unlock(&li->lock); +out_free: + vfree(buf); + + return r; +} + +static void store_local_irq(struct kvm_s390_local_interrupt *li, + struct kvm_s390_irq *irq, + unsigned long irq_type) +{ + switch (irq_type) { + case IRQ_PEND_MCHK_EX: + case IRQ_PEND_MCHK_REP: + irq->type = KVM_S390_MCHK; + irq->u.mchk = li->irq.mchk; + break; + case IRQ_PEND_PROG: + irq->type = KVM_S390_PROGRAM_INT; + irq->u.pgm = li->irq.pgm; + break; + case IRQ_PEND_PFAULT_INIT: + irq->type = KVM_S390_INT_PFAULT_INIT; + irq->u.ext = li->irq.ext; + break; + case IRQ_PEND_EXT_EXTERNAL: + irq->type = KVM_S390_INT_EXTERNAL_CALL; + irq->u.extcall = li->irq.extcall; + break; + case IRQ_PEND_EXT_CLOCK_COMP: + irq->type = KVM_S390_INT_CLOCK_COMP; + break; + case IRQ_PEND_EXT_CPU_TIMER: + irq->type = KVM_S390_INT_CPU_TIMER; + break; + case IRQ_PEND_SIGP_STOP: + irq->type = KVM_S390_SIGP_STOP; + irq->u.stop = li->irq.stop; + break; + case IRQ_PEND_RESTART: + irq->type = KVM_S390_RESTART; + break; + case IRQ_PEND_SET_PREFIX: + irq->type = KVM_S390_SIGP_SET_PREFIX; + irq->u.prefix = li->irq.prefix; + break; + } +} + +int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) +{ + uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; + unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + unsigned long pending_irqs; + struct kvm_s390_irq irq; + unsigned long irq_type; + int cpuaddr; + int n = 0; + + spin_lock(&li->lock); + pending_irqs = li->pending_irqs; + memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending, + sizeof(sigp_emerg_pending)); + spin_unlock(&li->lock); + + for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) { + memset(&irq, 0, sizeof(irq)); + if (irq_type == IRQ_PEND_EXT_EMERGENCY) + continue; + if (n + sizeof(irq) > len) + return -ENOBUFS; + store_local_irq(&vcpu->arch.local_int, &irq, irq_type); + if (copy_to_user(&buf[n], &irq, sizeof(irq))) + return -EFAULT; + n += sizeof(irq); + } + + if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) { + for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) { + memset(&irq, 0, sizeof(irq)); + if (n + sizeof(irq) > len) + return -ENOBUFS; + irq.type = KVM_S390_INT_EMERGENCY; + irq.u.emerg.code = cpuaddr; + if (copy_to_user(&buf[n], &irq, sizeof(irq))) + return -EFAULT; + n += sizeof(irq); + } + } + + if ((sigp_ctrl & SIGP_CTRL_C) && + (atomic_read(&vcpu->arch.sie_block->cpuflags) & + CPUSTAT_ECALL_PEND)) { + if (n + sizeof(irq) > len) + return -ENOBUFS; + memset(&irq, 0, sizeof(irq)); + irq.type = KVM_S390_INT_EXTERNAL_CALL; + irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK; + if (copy_to_user(&buf[n], &irq, sizeof(irq))) + return -EFAULT; + n += sizeof(irq); + } + + return n; +} diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8bc25d4..3040b14 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -41,6 +41,9 @@ #include "trace-s390.h" #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */ +#define LOCAL_IRQS 32 +#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \ + (KVM_MAX_VCPUS + LOCAL_IRQS)) #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU @@ -181,6 +184,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_USER_SIGP: case KVM_CAP_S390_USER_STSI: case KVM_CAP_S390_SKEYS: + case KVM_CAP_S390_IRQ_STATE: r = 1; break; case KVM_CAP_S390_MEM_OP: @@ -2500,6 +2504,38 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; break; } + case KVM_S390_SET_IRQ_STATE: { + struct kvm_s390_irq_state irq_state; + + r = -EFAULT; + if (copy_from_user(&irq_state, argp, sizeof(irq_state))) + break; + if (irq_state.len > VCPU_IRQS_MAX_BUF || + irq_state.len == 0 || + irq_state.len % sizeof(struct kvm_s390_irq) > 0) { + r = -EINVAL; + break; + } + r = kvm_s390_set_irq_state(vcpu, + (void __user *) irq_state.buf, + irq_state.len); + break; + } + case KVM_S390_GET_IRQ_STATE: { + struct kvm_s390_irq_state irq_state; + + r = -EFAULT; + if (copy_from_user(&irq_state, argp, sizeof(irq_state))) + break; + if (irq_state.len == 0) { + r = -EINVAL; + break; + } + r = kvm_s390_get_irq_state(vcpu, + (__u8 __user *) irq_state.buf, + irq_state.len); + break; + } default: r = -ENOTTY; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 343644a..ca108b9 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -272,6 +272,10 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); extern struct kvm_device_ops kvm_flic_ops; int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); +int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, + void __user *buf, int len); +int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, + __u8 __user *buf, int len); /* implemented in guestdbg.c */ void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); -- cgit v1.1