summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorAndrea Arcangeli <andrea@qumranet.com>2008-07-25 16:24:52 +0200
committerAvi Kivity <avi@qumranet.com>2008-07-29 12:33:53 +0300
commite930bffe95e1e886a1ede80726ea38df5838d067 (patch)
treed39227c3de8e7d4a70737c78693f6d7f458066af /include/linux
parent604b38ac0369bd50fcbb33344aa5553c071009f7 (diff)
downloadop-kernel-dev-e930bffe95e1e886a1ede80726ea38df5838d067.zip
op-kernel-dev-e930bffe95e1e886a1ede80726ea38df5838d067.tar.gz
KVM: Synchronize guest physical memory map to host virtual memory map
Synchronize changes to host virtual addresses which are part of a KVM memory slot to the KVM shadow mmu. This allows pte operations like swapping, page migration, and madvise() to transparently work with KVM. Signed-off-by: Andrea Arcangeli <andrea@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/kvm_host.h24
1 files changed, 24 insertions, 0 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 07d68a8..8525afc 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -121,6 +121,12 @@ struct kvm {
struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
#endif
+
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+ struct mmu_notifier mmu_notifier;
+ unsigned long mmu_notifier_seq;
+ long mmu_notifier_count;
+#endif
};
/* The guest did something we don't support. */
@@ -332,4 +338,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
#define kvm_trace_cleanup() ((void)0)
#endif
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
+{
+ if (unlikely(vcpu->kvm->mmu_notifier_count))
+ return 1;
+ /*
+ * Both reads happen under the mmu_lock and both values are
+ * modified under mmu_lock, so there's no need of smb_rmb()
+ * here in between, otherwise mmu_notifier_count should be
+ * read before mmu_notifier_seq, see
+ * mmu_notifier_invalidate_range_end write side.
+ */
+ if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+#endif
+
#endif
OpenPOWER on IntegriCloud