summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2012-10-04 02:27:14 +0000
committerneel <neel@FreeBSD.org>2012-10-04 02:27:14 +0000
commit18dd2c0d511c600e708ac8f756e8e51151b43656 (patch)
tree15ee06b08211ba25bab098239ee01704254807a8 /sys/amd64
parentcdb0dba22bd5328fdb484a5ef8405149b9522c02 (diff)
downloadFreeBSD-src-18dd2c0d511c600e708ac8f756e8e51151b43656.zip
FreeBSD-src-18dd2c0d511c600e708ac8f756e8e51151b43656.tar.gz
Change vm_malloc() to map pages in the guest physical address space in 4KB
chunks. This breaks the assumption that the entire memory segment is contiguously allocated in the host physical address space. This also paves the way to satisfy the 4KB page allocations by requesting free pages from the VM subsystem as opposed to hard-partitioning host memory at boot time.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/include/vmm_dev.h1
-rw-r--r--sys/amd64/vmm/io/ppt.c1
-rw-r--r--sys/amd64/vmm/vmm.c61
-rw-r--r--sys/amd64/vmm/vmm_dev.c2
-rw-r--r--sys/amd64/vmm/vmm_mem.c8
5 files changed, 53 insertions, 20 deletions
diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h
index fc64fd8..42ad236 100644
--- a/sys/amd64/include/vmm_dev.h
+++ b/sys/amd64/include/vmm_dev.h
@@ -35,7 +35,6 @@ void vmmdev_cleanup(void);
#endif
struct vm_memory_segment {
- vm_paddr_t hpa; /* out */
vm_paddr_t gpa; /* in */
size_t len; /* in */
};
diff --git a/sys/amd64/vmm/io/ppt.c b/sys/amd64/vmm/io/ppt.c
index ace2877..e81fdbc 100644
--- a/sys/amd64/vmm/io/ppt.c
+++ b/sys/amd64/vmm/io/ppt.c
@@ -356,7 +356,6 @@ ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
if (error == 0) {
seg->gpa = gpa;
seg->len = len;
- seg->hpa = hpa;
}
return (error);
}
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 3dabbd6..7bd3f7f 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -275,6 +275,28 @@ vm_create(const char *name)
return (vm);
}
+static void
+vm_free_mem_seg(struct vm *vm, struct vm_memory_segment *seg)
+{
+ size_t len;
+ vm_paddr_t hpa;
+
+ len = 0;
+ while (len < seg->len) {
+ hpa = vm_gpa2hpa(vm, seg->gpa + len, PAGE_SIZE);
+ if (hpa == (vm_paddr_t)-1) {
+ panic("vm_free_mem_segs: cannot free hpa "
+ "associated with gpa 0x%016lx", seg->gpa + len);
+ }
+
+ vmm_mem_free(hpa, PAGE_SIZE);
+
+ len += PAGE_SIZE;
+ }
+
+ bzero(seg, sizeof(struct vm_memory_segment));
+}
+
void
vm_destroy(struct vm *vm)
{
@@ -283,7 +305,9 @@ vm_destroy(struct vm *vm)
ppt_unassign_all(vm);
for (i = 0; i < vm->num_mem_segs; i++)
- vmm_mem_free(vm->mem_segs[i].hpa, vm->mem_segs[i].len);
+ vm_free_mem_seg(vm, &vm->mem_segs[i]);
+
+ vm->num_mem_segs = 0;
for (i = 0; i < VM_MAXCPU; i++)
vcpu_cleanup(&vm->vcpu[i]);
@@ -345,6 +369,7 @@ int
vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
{
int error, available, allocated;
+ struct vm_memory_segment *seg;
vm_paddr_t g, hpa;
const boolean_t spok = TRUE; /* superpage mappings are ok */
@@ -380,22 +405,32 @@ vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
return (E2BIG);
- hpa = vmm_mem_alloc(len);
- if (hpa == 0)
- return (ENOMEM);
+ seg = &vm->mem_segs[vm->num_mem_segs];
- error = VMMMAP_SET(vm->cookie, gpa, hpa, len, VM_MEMATTR_WRITE_BACK,
- VM_PROT_ALL, spok);
- if (error) {
- vmm_mem_free(hpa, len);
- return (error);
+ seg->gpa = gpa;
+ seg->len = 0;
+ while (seg->len < len) {
+ hpa = vmm_mem_alloc(PAGE_SIZE);
+ if (hpa == 0) {
+ error = ENOMEM;
+ break;
+ }
+
+ error = VMMMAP_SET(vm->cookie, gpa + seg->len, hpa, PAGE_SIZE,
+ VM_MEMATTR_WRITE_BACK, VM_PROT_ALL, spok);
+ if (error)
+ break;
+
+ iommu_create_mapping(vm->iommu, gpa + seg->len, hpa, PAGE_SIZE);
+
+ seg->len += PAGE_SIZE;
}
- iommu_create_mapping(vm->iommu, gpa, hpa, len);
+ if (seg->len != len) {
+ vm_free_mem_seg(vm, seg);
+ return (error);
+ }
- vm->mem_segs[vm->num_mem_segs].gpa = gpa;
- vm->mem_segs[vm->num_mem_segs].hpa = hpa;
- vm->mem_segs[vm->num_mem_segs].len = len;
vm->num_mem_segs++;
return (0);
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
index b504e6b..91edbe8 100644
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -299,7 +299,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
break;
case VM_GET_MEMORY_SEG:
seg = (struct vm_memory_segment *)data;
- seg->hpa = seg->len = 0;
+ seg->len = 0;
(void)vm_gpabase2memseg(sc->vm, seg->gpa, seg);
error = 0;
break;
diff --git a/sys/amd64/vmm/vmm_mem.c b/sys/amd64/vmm/vmm_mem.c
index 54f98ac..eb05b9d 100644
--- a/sys/amd64/vmm/vmm_mem.c
+++ b/sys/amd64/vmm/vmm_mem.c
@@ -318,9 +318,9 @@ vmm_mem_alloc(size_t size)
int i;
vm_paddr_t addr;
- if ((size & PDRMASK) != 0) {
+ if ((size & PAGE_MASK) != 0) {
panic("vmm_mem_alloc: size 0x%0lx must be "
- "aligned on a 0x%0x boundary\n", size, NBPDR);
+ "aligned on a 0x%0x boundary\n", size, PAGE_SIZE);
}
addr = 0;
@@ -373,9 +373,9 @@ vmm_mem_free(vm_paddr_t base, size_t length)
{
int i;
- if ((base & PDRMASK) != 0 || (length & PDRMASK) != 0) {
+ if ((base & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
panic("vmm_mem_free: base 0x%0lx and length 0x%0lx must be "
- "aligned on a 0x%0x boundary\n", base, length, NBPDR);
+ "aligned on a 0x%0x boundary\n", base, length, PAGE_SIZE);
}
mtx_lock(&vmm_mem_mtx);
OpenPOWER on IntegriCloud