diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-08 05:27:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-08 05:27:39 -0400 |
commit | e4e65676f272adb63655a2ca95207e8212d282f1 (patch) | |
tree | 3679a3e6897d698ee949642660281e7f74e2852b /arch/s390/kvm/kvm-s390.c | |
parent | f89f4a06a59f30dec64b2afc4111426fc01e9e12 (diff) | |
parent | f439ed27f8b8b90d243ae15acb193d37f96eebe0 (diff) | |
download | op-kernel-dev-e4e65676f272adb63655a2ca95207e8212d282f1.zip op-kernel-dev-e4e65676f272adb63655a2ca95207e8212d282f1.tar.gz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"Fixes and features for 3.18.
Apart from the usual cleanups, here is the summary of new features:
- s390 moves closer towards host large page support
- PowerPC has improved support for debugging (both inside the guest
and via gdbstub) and support for e6500 processors
- ARM/ARM64 support read-only memory (which is necessary to put
firmware in emulated NOR flash)
- x86 has the usual emulator fixes and nested virtualization
improvements (including improved Windows support on Intel and
Jailhouse hypervisor support on AMD), adaptive PLE which helps
overcommitting of huge guests. Also included are some patches that
make KVM more friendly to memory hot-unplug, and fixes for rare
caching bugs.
Two patches have trivial mm/ parts that were acked by Rik and Andrew.
Note: I will soon switch to a subkey for signing purposes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (157 commits)
kvm: do not handle APIC access page if in-kernel irqchip is not in use
KVM: s390: count vcpu wakeups in stat.halt_wakeup
KVM: s390/facilities: allow TOD-CLOCK steering facility bit
KVM: PPC: BOOK3S: HV: CMA: Reserve cma region only in hypervisor mode
arm/arm64: KVM: Report correct FSC for unsupported fault types
arm/arm64: KVM: Fix VTTBR_BADDR_MASK and pgd alloc
kvm: Fix kvm_get_page_retry_io __gup retval check
arm/arm64: KVM: Fix set_clear_sgi_pend_reg offset
kvm: x86: Unpin and remove kvm_arch->apic_access_page
kvm: vmx: Implement set_apic_access_page_addr
kvm: x86: Add request bit to reload APIC access page address
kvm: Add arch specific mmu notifier for page invalidation
kvm: Rename make_all_cpus_request() to kvm_make_all_cpus_request() and make it non-static
kvm: Fix page ageing bugs
kvm/x86/mmu: Pass gfn and level to rmapp callback.
x86: kvm: use alternatives for VMCALL vs. VMMCALL if kernel text is read-only
kvm: x86: use macros to compute bank MSRs
KVM: x86: Remove debug assertion of non-PAE reserved bits
kvm: don't take vcpu mutex for obviously invalid vcpu ioctls
kvm: Faults which trigger IO release the mmap_sem
...
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 179 |
1 files changed, 101 insertions, 78 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 81b0e11..55aade4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -50,6 +50,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "exit_instruction", VCPU_STAT(exit_instruction) }, { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, + { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, { "instruction_lctl", VCPU_STAT(instruction_lctl) }, { "instruction_stctl", VCPU_STAT(instruction_stctl) }, @@ -100,16 +101,12 @@ int test_vfacility(unsigned long nr) } /* Section: not file related */ -int kvm_arch_hardware_enable(void *garbage) +int kvm_arch_hardware_enable(void) { /* every s390 is virtualization enabled ;-) */ return 0; } -void kvm_arch_hardware_disable(void *garbage) -{ -} - static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); int kvm_arch_hardware_setup(void) @@ -124,17 +121,10 @@ void kvm_arch_hardware_unsetup(void) gmap_unregister_ipte_notifier(&gmap_notifier); } -void kvm_arch_check_processor_compat(void *rtn) -{ -} - int kvm_arch_init(void *opaque) { - return 0; -} - -void kvm_arch_exit(void) -{ + /* Register floating interrupt controller interface. */ + return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); } /* Section: device related */ @@ -404,6 +394,22 @@ long kvm_arch_vm_ioctl(struct file *filp, return r; } +static int kvm_s390_crypto_init(struct kvm *kvm) +{ + if (!test_vfacility(76)) + return 0; + + kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), + GFP_KERNEL | GFP_DMA); + if (!kvm->arch.crypto.crycb) + return -ENOMEM; + + kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb | + CRYCB_FORMAT1; + + return 0; +} + int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int rc; @@ -441,6 +447,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.dbf) goto out_nodbf; + if (kvm_s390_crypto_init(kvm) < 0) + goto out_crypto; + spin_lock_init(&kvm->arch.float_int.lock); INIT_LIST_HEAD(&kvm->arch.float_int.list); init_waitqueue_head(&kvm->arch.ipte_wq); @@ -451,7 +460,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (type & KVM_VM_S390_UCONTROL) { kvm->arch.gmap = NULL; } else { - kvm->arch.gmap = gmap_alloc(current->mm); + kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); if (!kvm->arch.gmap) goto out_nogmap; kvm->arch.gmap->private = kvm; @@ -465,6 +474,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return 0; out_nogmap: + kfree(kvm->arch.crypto.crycb); +out_crypto: debug_unregister(kvm->arch.dbf); out_nodbf: free_page((unsigned long)(kvm->arch.sca)); @@ -514,15 +525,12 @@ static void kvm_free_vcpus(struct kvm *kvm) mutex_unlock(&kvm->lock); } -void kvm_arch_sync_events(struct kvm *kvm) -{ -} - void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_free_vcpus(kvm); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); + kfree(kvm->arch.crypto.crycb); if (!kvm_is_ucontrol(kvm)) gmap_free(kvm->arch.gmap); kvm_s390_destroy_adapters(kvm); @@ -535,7 +543,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; kvm_clear_async_pf_completion_queue(vcpu); if (kvm_is_ucontrol(vcpu->kvm)) { - vcpu->arch.gmap = gmap_alloc(current->mm); + vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); if (!vcpu->arch.gmap) return -ENOMEM; vcpu->arch.gmap->private = vcpu->kvm; @@ -546,15 +554,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | - KVM_SYNC_CRS; + KVM_SYNC_CRS | + KVM_SYNC_ARCH0 | + KVM_SYNC_PFAULT; return 0; } -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - /* Nothing todo */ -} - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { save_fp_ctl(&vcpu->arch.host_fpregs.fpc); @@ -607,6 +612,14 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) return 0; } +static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) +{ + if (!test_vfacility(76)) + return; + + vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; +} + void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) { free_page(vcpu->arch.sie_block->cbrlo); @@ -653,6 +666,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; get_cpu_id(&vcpu->arch.cpu_id); vcpu->arch.cpu_id.version = 0xff; + + kvm_s390_vcpu_crypto_setup(vcpu); + return rc; } @@ -1049,6 +1065,11 @@ retry: goto retry; } + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + vcpu->arch.sie_block->ihcpu = 0xffff; + goto retry; + } + if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { if (!ibs_enabled(vcpu)) { trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); @@ -1085,18 +1106,8 @@ retry: */ long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) { - struct mm_struct *mm = current->mm; - hva_t hva; - long rc; - - hva = gmap_fault(gpa, vcpu->arch.gmap); - if (IS_ERR_VALUE(hva)) - return (long)hva; - down_read(&mm->mmap_sem); - rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL); - up_read(&mm->mmap_sem); - - return rc < 0 ? rc : 0; + return gmap_fault(vcpu->arch.gmap, gpa, + writable ? FAULT_FLAG_WRITE : 0); } static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, @@ -1191,8 +1202,11 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) if (test_cpu_flag(CIF_MCCK_PENDING)) s390_handle_mcck(); - if (!kvm_is_ucontrol(vcpu->kvm)) - kvm_s390_deliver_pending_interrupts(vcpu); + if (!kvm_is_ucontrol(vcpu->kvm)) { + rc = kvm_s390_deliver_pending_interrupts(vcpu); + if (rc) + return rc; + } rc = kvm_s390_handle_requests(vcpu); if (rc) @@ -1296,6 +1310,48 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) return rc; } +static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; + vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; + if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) + kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); + if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { + memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); + /* some control register changes require a tlb flush */ + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { + vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; + vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; + vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; + vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; + vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; + } + if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { + vcpu->arch.pfault_token = kvm_run->s.regs.pft; + vcpu->arch.pfault_select = kvm_run->s.regs.pfs; + vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; + } + kvm_run->kvm_dirty_regs = 0; +} + +static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; + kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; + kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); + memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); + kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; + kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; + kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; + kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; + kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; + kvm_run->s.regs.pft = vcpu->arch.pfault_token; + kvm_run->s.regs.pfs = vcpu->arch.pfault_select; + kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int rc; @@ -1317,17 +1373,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return -EINVAL; } - vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; - vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; - if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { - kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX; - kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); - } - if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { - kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS; - memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); - kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); - } + sync_regs(vcpu, kvm_run); might_fault(); rc = __vcpu_run(vcpu); @@ -1357,10 +1403,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) rc = 0; } - kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; - kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; - kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); - memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); + store_regs(vcpu, kvm_run); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); @@ -1489,7 +1532,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) * Another VCPU might have used IBS while we were offline. * Let's play safe and flush the VCPU at startup. */ - vcpu->arch.sie_block->ihcpu = 0xffff; + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); spin_unlock(&vcpu->kvm->arch.start_stop_lock); return; } @@ -1644,9 +1687,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } #endif case KVM_S390_VCPU_FAULT: { - r = gmap_fault(arg, vcpu->arch.gmap); - if (!IS_ERR_VALUE(r)) - r = 0; + r = gmap_fault(vcpu->arch.gmap, arg, 0); break; } case KVM_ENABLE_CAP: @@ -1677,21 +1718,12 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) -{ -} - int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { return 0; } -void kvm_arch_memslots_updated(struct kvm *kvm) -{ -} - /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, @@ -1737,15 +1769,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, return; } -void kvm_arch_flush_shadow_all(struct kvm *kvm) -{ -} - -void kvm_arch_flush_shadow_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ -} - static int __init kvm_s390_init(void) { int ret; @@ -1764,7 +1787,7 @@ static int __init kvm_s390_init(void) return -ENOMEM; } memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); - vfacilities[0] &= 0xff82fff3f4fc2000UL; + vfacilities[0] &= 0xff82fffbf47c2000UL; vfacilities[1] &= 0x005c000000000000UL; return 0; } |