summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2014-05-24 19:13:25 +0000
committerneel <neel@FreeBSD.org>2014-05-24 19:13:25 +0000
commit52a4f11861c5ba735a7bb75bd093905e734e16bd (patch)
treeb6c3e4d6aa87835dcea4e12f5ba6efff47d7bd7e
parent13dd914b0d639a32495b89699ea2df916c62f9cf (diff)
downloadFreeBSD-src-52a4f11861c5ba735a7bb75bd093905e734e16bd.zip
FreeBSD-src-52a4f11861c5ba735a7bb75bd093905e734e16bd.tar.gz
When injecting a page fault into the guest also update the guest's %cr2 to
indicate the faulting linear address. If the guest PML4 entry has the PG_PS bit set then inject a page fault into the guest with the PGEX_RSV bit set in the error_code. Get rid of redundant checks for the PG_RW violations when walking the page tables.
-rw-r--r--sys/amd64/include/vmm.h3
-rw-r--r--sys/amd64/vmm/intel/vmx.c2
-rw-r--r--sys/amd64/vmm/vmm.c9
-rw-r--r--sys/amd64/vmm/vmm_instruction_emul.c33
4 files changed, 28 insertions, 19 deletions
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index c759eee..407b5c1 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -237,7 +237,7 @@ int vm_exception_pending(struct vm *vm, int vcpuid, struct vm_exception *vme);
void vm_inject_gp(struct vm *vm, int vcpuid); /* general protection fault */
void vm_inject_ud(struct vm *vm, int vcpuid); /* undefined instruction fault */
-void vm_inject_pf(struct vm *vm, int vcpuid, int error_code); /* page fault */
+void vm_inject_pf(struct vm *vm, int vcpuid, int error_code, uint64_t cr2);
enum vm_reg_name vm_segment_name(int seg_encoding);
@@ -284,6 +284,7 @@ enum vm_reg_name {
VM_REG_GUEST_IDTR,
VM_REG_GUEST_GDTR,
VM_REG_GUEST_EFER,
+ VM_REG_GUEST_CR2,
VM_REG_LAST
};
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 0cc1cc9..5754b22 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -2383,6 +2383,8 @@ vmxctx_regptr(struct vmxctx *vmxctx, int reg)
return (&vmxctx->guest_r14);
case VM_REG_GUEST_R15:
return (&vmxctx->guest_r15);
+ case VM_REG_GUEST_CR2:
+ return (&vmxctx->guest_cr2);
default:
break;
}
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 132af36..f5ed0fe 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1441,13 +1441,20 @@ vm_inject_fault(struct vm *vm, int vcpuid, struct vm_exception *exception)
}
void
-vm_inject_pf(struct vm *vm, int vcpuid, int error_code)
+vm_inject_pf(struct vm *vm, int vcpuid, int error_code, uint64_t cr2)
{
struct vm_exception pf = {
.vector = IDT_PF,
.error_code_valid = 1,
.error_code = error_code
};
+ int error;
+
+ VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
+ error_code, cr2);
+
+ error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
+ KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
vm_inject_fault(vm, vcpuid, &pf);
}
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
index 3cf4c6b..cef8563 100644
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -599,7 +599,7 @@ vie_init(struct vie *vie)
}
static int
-pf_error_code(int usermode, int prot, uint64_t pte)
+pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
{
int error_code = 0;
@@ -609,6 +609,8 @@ pf_error_code(int usermode, int prot, uint64_t pte)
error_code |= PGEX_W;
if (usermode)
error_code |= PGEX_U;
+ if (rsvd)
+ error_code |= PGEX_RSV;
if (prot & VM_PROT_EXECUTE)
error_code |= PGEX_I;
@@ -679,14 +681,12 @@ restart:
if ((pte32 & PG_V) == 0 ||
(usermode && (pte32 & PG_U) == 0) ||
(writable && (pte32 & PG_RW) == 0)) {
- pfcode = pf_error_code(usermode, prot, pte32);
- vm_inject_pf(vm, vcpuid, pfcode);
+ pfcode = pf_error_code(usermode, prot, 0,
+ pte32);
+ vm_inject_pf(vm, vcpuid, pfcode, gla);
goto pagefault;
}
- if (writable && (pte32 & PG_RW) == 0)
- goto error;
-
/*
* Emulate the x86 MMU's management of the accessed
* and dirty flags. While the accessed flag is set
@@ -735,8 +735,8 @@ restart:
pte = ptpbase[ptpindex];
if ((pte & PG_V) == 0) {
- pfcode = pf_error_code(usermode, prot, pte);
- vm_inject_pf(vm, vcpuid, pfcode);
+ pfcode = pf_error_code(usermode, prot, 0, pte);
+ vm_inject_pf(vm, vcpuid, pfcode, gla);
goto pagefault;
}
@@ -762,14 +762,11 @@ restart:
if ((pte & PG_V) == 0 ||
(usermode && (pte & PG_U) == 0) ||
(writable && (pte & PG_RW) == 0)) {
- pfcode = pf_error_code(usermode, prot, pte);
- vm_inject_pf(vm, vcpuid, pfcode);
+ pfcode = pf_error_code(usermode, prot, 0, pte);
+ vm_inject_pf(vm, vcpuid, pfcode, gla);
goto pagefault;
}
- if (writable && (pte & PG_RW) == 0)
- goto error;
-
/* Set the accessed bit in the page table entry */
if ((pte & PG_A) == 0) {
if (atomic_cmpset_64(&ptpbase[ptpindex],
@@ -779,10 +776,12 @@ restart:
}
if (nlevels > 0 && (pte & PG_PS) != 0) {
- if (pgsize > 1 * GB)
- goto error;
- else
- break;
+ if (pgsize > 1 * GB) {
+ pfcode = pf_error_code(usermode, prot, 1, pte);
+ vm_inject_pf(vm, vcpuid, pfcode, gla);
+ goto pagefault;
+ }
+ break;
}
ptpphys = pte;
OpenPOWER on IntegriCloud