summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-07-07 20:16:45 +0300
committerAvi Kivity <avi@redhat.com>2010-08-02 06:40:33 +0300
commitedba23e51578f7cb6781461568489fc1825db4ac (patch)
tree54ce6b22998c1bf7c40cfa43e3ebd8b9df271dcb /virt
parentfa7bff8f8a7d3de61c0473d0b6dc5a0f4fdc6ac9 (diff)
downloadop-kernel-dev-edba23e51578f7cb6781461568489fc1825db4ac.zip
op-kernel-dev-edba23e51578f7cb6781461568489fc1825db4ac.tar.gz
KVM: Return EFAULT from kvm ioctl when guest accesses bad area
Currently if guest access address that belongs to memory slot but is not backed up by page or page is read only KVM treats it like MMIO access. Remove that capability. It was never part of the interface and should not be relied upon. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 630d122..b78b794 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -96,6 +96,9 @@ static bool largepages_enabled = true;
static struct page *hwpoison_page;
static pfn_t hwpoison_pfn;
+static struct page *fault_page;
+static pfn_t fault_pfn;
+
inline int kvm_is_mmio_pfn(pfn_t pfn)
{
if (pfn_valid(pfn)) {
@@ -815,13 +818,13 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages);
int is_error_page(struct page *page)
{
- return page == bad_page || page == hwpoison_page;
+ return page == bad_page || page == hwpoison_page || page == fault_page;
}
EXPORT_SYMBOL_GPL(is_error_page);
int is_error_pfn(pfn_t pfn)
{
- return pfn == bad_pfn || pfn == hwpoison_pfn;
+ return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
}
EXPORT_SYMBOL_GPL(is_error_pfn);
@@ -831,6 +834,12 @@ int is_hwpoison_pfn(pfn_t pfn)
}
EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
+int is_fault_pfn(pfn_t pfn)
+{
+ return pfn == fault_pfn;
+}
+EXPORT_SYMBOL_GPL(is_fault_pfn);
+
static inline unsigned long bad_hva(void)
{
return PAGE_OFFSET;
@@ -959,8 +968,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
if (vma == NULL || addr < vma->vm_start ||
!(vma->vm_flags & VM_PFNMAP)) {
up_read(&current->mm->mmap_sem);
- get_page(bad_page);
- return page_to_pfn(bad_page);
+ get_page(fault_page);
+ return page_to_pfn(fault_page);
}
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -2226,6 +2235,15 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
hwpoison_pfn = page_to_pfn(hwpoison_page);
+ fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+
+ if (fault_page == NULL) {
+ r = -ENOMEM;
+ goto out_free_0;
+ }
+
+ fault_pfn = page_to_pfn(fault_page);
+
if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
r = -ENOMEM;
goto out_free_0;
@@ -2298,6 +2316,8 @@ out_free_1:
out_free_0a:
free_cpumask_var(cpus_hardware_enabled);
out_free_0:
+ if (fault_page)
+ __free_page(fault_page);
if (hwpoison_page)
__free_page(hwpoison_page);
__free_page(bad_page);
OpenPOWER on IntegriCloud