summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2012-09-29 01:15:45 +0000
committerneel <neel@FreeBSD.org>2012-09-29 01:15:45 +0000
commitbc87f08e9822e6446dc91b0451317740259de95c (patch)
treecc4182b3e13e70bcf805289a17de0b30a4a5ec94 /sys/amd64
parentb65259b285734eec4d40fe639b4e84a6f4bf9f02 (diff)
downloadFreeBSD-src-bc87f08e9822e6446dc91b0451317740259de95c.zip
FreeBSD-src-bc87f08e9822e6446dc91b0451317740259de95c.tar.gz
Get rid of assumptions in the hypervisor that the host physical memory
associated with guest physical memory is contiguous. In this case vm_malloc() was using vm_gpa2hpa() to indirectly infer whether or not the address range had already been allocated. Replace this instead with an explicit API 'vm_gpa_available()' that returns TRUE if a page is available for allocation in guest physical address space.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/include/vmm.h2
-rw-r--r--sys/amd64/vmm/vmm.c60
-rw-r--r--sys/amd64/vmm/vmm_dev.c2
3 files changed, 53 insertions, 11 deletions
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 0b3a29c..bb2f778 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -89,7 +89,7 @@ extern struct vmm_ops vmm_ops_amd;
struct vm *vm_create(const char *name);
void vm_destroy(struct vm *vm);
const char *vm_name(struct vm *vm);
-int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa);
+int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len);
int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index db2f9b8..06109b1 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -315,20 +315,63 @@ vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
VM_PROT_NONE, spok));
}
+/*
+ * Returns TRUE if 'gpa' is available for allocation and FALSE otherwise
+ */
+static boolean_t
+vm_gpa_available(struct vm *vm, vm_paddr_t gpa)
+{
+ int i;
+ vm_paddr_t gpabase, gpalimit;
+
+ if (gpa & PAGE_MASK)
+ panic("vm_gpa_available: gpa (0x%016lx) not page aligned", gpa);
+
+ for (i = 0; i < vm->num_mem_segs; i++) {
+ gpabase = vm->mem_segs[i].gpa;
+ gpalimit = gpabase + vm->mem_segs[i].len;
+ if (gpa >= gpabase && gpa < gpalimit)
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
int
-vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa)
+vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
{
- int error;
- vm_paddr_t hpa;
+ int error, available, allocated;
+ vm_paddr_t g, hpa;
const boolean_t spok = TRUE; /* superpage mappings are ok */
+
+ if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
+ return (EINVAL);
+ available = allocated = 0;
+ g = gpa;
+ while (g < gpa + len) {
+ if (vm_gpa_available(vm, g))
+ available++;
+ else
+ allocated++;
+
+ g += PAGE_SIZE;
+ }
+
/*
- * find the hpa if already it was already vm_malloc'd.
+ * If there are some allocated and some available pages in the address
+ * range then it is an error.
*/
- hpa = vm_gpa2hpa(vm, gpa, len);
- if (hpa != ((vm_paddr_t)-1))
- goto out;
+ if (allocated && available)
+ return (EINVAL);
+
+ /*
+ * If the entire address range being requested has already been
+ * allocated then there isn't anything more to do.
+ */
+ if (allocated && available == 0)
+ return (0);
if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
return (E2BIG);
@@ -350,8 +393,7 @@ vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa)
vm->mem_segs[vm->num_mem_segs].hpa = hpa;
vm->mem_segs[vm->num_mem_segs].len = len;
vm->num_mem_segs++;
-out:
- *ret_hpa = hpa;
+
return (0);
}
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
index 686ddec..b504e6b 100644
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -295,7 +295,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
break;
case VM_MAP_MEMORY:
seg = (struct vm_memory_segment *)data;
- error = vm_malloc(sc->vm, seg->gpa, seg->len, &seg->hpa);
+ error = vm_malloc(sc->vm, seg->gpa, seg->len);
break;
case VM_GET_MEMORY_SEG:
seg = (struct vm_memory_segment *)data;
OpenPOWER on IntegriCloud