summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2013-03-18 22:38:30 +0000
committerneel <neel@FreeBSD.org>2013-03-18 22:38:30 +0000
commit8d05d984e8a9a632b3de9eed9ad8c765b40f88d9 (patch)
treea6c9b082e1d8d2eb2520edd75fe30f56de9a96a3 /lib
parentee34459918e9cf13e4840cef3ebcc8df7fa218bf (diff)
downloadFreeBSD-src-8d05d984e8a9a632b3de9eed9ad8c765b40f88d9.zip
FreeBSD-src-8d05d984e8a9a632b3de9eed9ad8c765b40f88d9.tar.gz
Simplify the assignment of memory to virtual machines by requiring a single
command line option "-m <memsize in MB>" to specify the memory size. Prior to this change the user needed to explicitly specify the amount of memory allocated below 4G (-m <lowmem>) and the amount above 4G (-M <highmem>). The "-M" option is no longer supported by 'bhyveload' and 'bhyve'. The start of the PCI hole is fixed at 3GB and cannot be directly changed using command line options. However it is still possible to change this in special circumstances via the 'vm_set_lowmem_limit()' API provided by libvmmapi. Submitted by: Dinakar Medavaram (initial version) Reviewed by: grehan Obtained from: NetApp
Diffstat (limited to 'lib')
-rw-r--r--lib/libvmmapi/vmmapi.c90
-rw-r--r--lib/libvmmapi/vmmapi.h26
2 files changed, 95 insertions, 21 deletions
diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c
index 6a368b3..6982ba3 100644
--- a/lib/libvmmapi/vmmapi.c
+++ b/lib/libvmmapi/vmmapi.c
@@ -48,8 +48,16 @@ __FBSDID("$FreeBSD$");
#include "vmmapi.h"
+#define GB (1024 * 1024 * 1024UL)
+
struct vmctx {
int fd;
+ uint32_t lowmem_limit;
+ enum vm_mmap_style vms;
+ size_t lowmem;
+ char *lowmem_addr;
+ size_t highmem;
+ char *highmem_addr;
char *name;
};
@@ -90,6 +98,7 @@ vm_open(const char *name)
assert(vm != NULL);
vm->fd = -1;
+ vm->lowmem_limit = 3 * GB;
vm->name = (char *)(vm + 1);
strcpy(vm->name, name);
@@ -151,8 +160,22 @@ vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len)
return (error);
}
-int
-vm_setup_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **mapaddr)
+uint32_t
+vm_get_lowmem_limit(struct vmctx *ctx)
+{
+
+ return (ctx->lowmem_limit);
+}
+
+void
+vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
+{
+
+ ctx->lowmem_limit = limit;
+}
+
+static int
+setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr)
{
int error;
struct vm_memory_segment seg;
@@ -165,20 +188,69 @@ vm_setup_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **mapaddr)
seg.gpa = gpa;
seg.len = len;
error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg);
- if (error == 0 && mapaddr != NULL) {
- *mapaddr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED,
+ if (error == 0 && addr != NULL) {
+ *addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED,
ctx->fd, gpa);
}
return (error);
}
-char *
-vm_map_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
+int
+vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
+{
+ char **addr;
+ int error;
+
+ /* XXX VM_MMAP_SPARSE not implemented yet */
+ assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL);
+ ctx->vms = vms;
+
+ /*
+ * If 'memsize' cannot fit entirely in the 'lowmem' segment then
+ * create another 'highmem' segment above 4GB for the remainder.
+ */
+ if (memsize > ctx->lowmem_limit) {
+ ctx->lowmem = ctx->lowmem_limit;
+ ctx->highmem = memsize - ctx->lowmem;
+ } else {
+ ctx->lowmem = memsize;
+ ctx->highmem = 0;
+ }
+
+ if (ctx->lowmem > 0) {
+ addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL;
+ error = setup_memory_segment(ctx, 0, ctx->lowmem, addr);
+ if (error)
+ return (error);
+ }
+
+ if (ctx->highmem > 0) {
+ addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL;
+ error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr);
+ if (error)
+ return (error);
+ }
+
+ return (0);
+}
+
+void *
+vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
{
- /* Map 'len' bytes of memory at guest physical address 'gpa' */
- return ((char *)mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED,
- ctx->fd, gpa));
+ /* XXX VM_MMAP_SPARSE not implemented yet */
+ assert(ctx->vms == VM_MMAP_ALL);
+
+ if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem)
+ return ((void *)(ctx->lowmem_addr + gaddr));
+
+ if (gaddr >= 4*GB) {
+ gaddr -= 4*GB;
+ if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem)
+ return ((void *)(ctx->highmem_addr + gaddr));
+ }
+
+ return (NULL);
}
int
diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h
index e8cc8ba..f066c50 100644
--- a/lib/libvmmapi/vmmapi.h
+++ b/lib/libvmmapi/vmmapi.h
@@ -32,24 +32,26 @@
struct vmctx;
enum x2apic_state;
+/*
+ * Different styles of mapping the memory assigned to a VM into the address
+ * space of the controlling process.
+ */
+enum vm_mmap_style {
+ VM_MMAP_NONE, /* no mapping */
+ VM_MMAP_ALL, /* fully and statically mapped */
+ VM_MMAP_SPARSE, /* mappings created on-demand */
+};
+
int vm_create(const char *name);
struct vmctx *vm_open(const char *name);
void vm_destroy(struct vmctx *ctx);
size_t vmm_get_mem_total(void);
size_t vmm_get_mem_free(void);
int vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len);
-/*
- * Create a memory segment of 'len' bytes in the guest physical address space
- * at offset 'gpa'.
- *
- * If 'mapaddr' is not NULL then this region is mmap'ed into the address
- * space of the calling process. If there is an mmap error then *mapaddr
- * will be set to MAP_FAILED.
- */
-
-int vm_setup_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len,
- char **mapaddr);
-char * vm_map_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len);
+int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s);
+void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len);
+uint32_t vm_get_lowmem_limit(struct vmctx *ctx);
+void vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit);
int vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
uint64_t base, uint32_t limit, uint32_t access);
int vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
OpenPOWER on IntegriCloud