summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cpu-all.h3
-rw-r--r--exec-all.h3
-rw-r--r--exec.c130
-rw-r--r--kqemu.c9
-rw-r--r--vl.c56
5 files changed, 149 insertions, 52 deletions
diff --git a/cpu-all.h b/cpu-all.h
index 6094e44..aaaa70d 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -854,11 +854,10 @@ typedef unsigned long ram_addr_t;
/* memory API */
-extern ram_addr_t phys_ram_size;
extern int phys_ram_fd;
-extern uint8_t *phys_ram_base;
extern uint8_t *phys_ram_dirty;
extern ram_addr_t ram_size;
+extern ram_addr_t last_ram_offset;
/* physical memory access */
diff --git a/exec-all.h b/exec-all.h
index 0afaae8..b4664f9 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -370,6 +370,9 @@ void kqemu_record_dump(void);
extern uint32_t kqemu_comm_base;
+extern ram_addr_t kqemu_phys_ram_size;
+extern uint8_t *kqemu_phys_ram_base;
+
static inline int kqemu_is_ok(CPUState *env)
{
return(env->kqemu_enabled &&
diff --git a/exec.c b/exec.c
index eda3bf3..13e43d0 100644
--- a/exec.c
+++ b/exec.c
@@ -107,12 +107,22 @@ static unsigned long code_gen_buffer_max_size;
uint8_t *code_gen_ptr;
#if !defined(CONFIG_USER_ONLY)
-ram_addr_t phys_ram_size;
int phys_ram_fd;
-uint8_t *phys_ram_base;
uint8_t *phys_ram_dirty;
static int in_migration;
-static ram_addr_t phys_ram_alloc_offset = 0;
+
+typedef struct RAMBlock {
+ uint8_t *host;
+ ram_addr_t offset;
+ ram_addr_t length;
+ struct RAMBlock *next;
+} RAMBlock;
+
+static RAMBlock *ram_blocks;
+/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
+ then we can no longet assume contiguous ram offsets, and external uses
+ of this variable will break. */
+ram_addr_t last_ram_offset;
#endif
CPUState *first_cpu;
@@ -411,7 +421,7 @@ static void code_gen_alloc(unsigned long tb_size)
code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
#else
/* XXX: needs ajustments */
- code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
+ code_gen_buffer_size = (unsigned long)(ram_size / 4);
#endif
}
if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
@@ -2419,22 +2429,55 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
kvm_uncoalesce_mmio_region(addr, size);
}
+#ifdef USE_KQEMU
/* XXX: better than nothing */
-ram_addr_t qemu_ram_alloc(ram_addr_t size)
+static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
{
ram_addr_t addr;
- if ((phys_ram_alloc_offset + size) > phys_ram_size) {
+ if ((last_ram_offset + size) > kqemu_phys_ram_size) {
fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
- (uint64_t)size, (uint64_t)phys_ram_size);
+ (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
abort();
}
- addr = phys_ram_alloc_offset;
- phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
+ addr = last_ram_offset;
+ last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
return addr;
}
+#endif
+
+ram_addr_t qemu_ram_alloc(ram_addr_t size)
+{
+ RAMBlock *new_block;
+
+#ifdef USE_KQEMU
+ if (kqemu_phys_ram_base) {
+ return kqemu_ram_alloc(size);
+ }
+#endif
+
+ size = TARGET_PAGE_ALIGN(size);
+ new_block = qemu_malloc(sizeof(*new_block));
+
+ new_block->host = qemu_vmalloc(size);
+ new_block->offset = last_ram_offset;
+ new_block->length = size;
+
+ new_block->next = ram_blocks;
+ ram_blocks = new_block;
+
+ phys_ram_dirty = qemu_realloc(phys_ram_dirty,
+ (last_ram_offset + size) >> TARGET_PAGE_BITS);
+ memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
+ 0xff, size >> TARGET_PAGE_BITS);
+
+ last_ram_offset += size;
+
+ return new_block->offset;
+}
void qemu_ram_free(ram_addr_t addr)
{
+ /* TODO: implement this. */
}
/* Return a host pointer to ram allocated with qemu_ram_alloc.
@@ -2447,14 +2490,69 @@ void qemu_ram_free(ram_addr_t addr)
*/
void *qemu_get_ram_ptr(ram_addr_t addr)
{
- return phys_ram_base + addr;
+ RAMBlock *prev;
+ RAMBlock **prevp;
+ RAMBlock *block;
+
+#ifdef USE_KQEMU
+ if (kqemu_phys_ram_base) {
+ return kqemu_phys_ram_base + addr;
+ }
+#endif
+
+ prev = NULL;
+ prevp = &ram_blocks;
+ block = ram_blocks;
+ while (block && (block->offset > addr
+ || block->offset + block->length <= addr)) {
+ if (prev)
+ prevp = &prev->next;
+ prev = block;
+ block = block->next;
+ }
+ if (!block) {
+ fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
+ abort();
+ }
+ /* Move this entry to to start of the list. */
+ if (prev) {
+ prev->next = block->next;
+ block->next = *prevp;
+ *prevp = block;
+ }
+ return block->host + (addr - block->offset);
}
/* Some of the softmmu routines need to translate from a host pointer
(typically a TLB entry) back to a ram offset. */
ram_addr_t qemu_ram_addr_from_host(void *ptr)
{
- return (uint8_t *)ptr - phys_ram_base;
+ RAMBlock *prev;
+ RAMBlock **prevp;
+ RAMBlock *block;
+ uint8_t *host = ptr;
+
+#ifdef USE_KQEMU
+ if (kqemu_phys_ram_base) {
+ return host - kqemu_phys_ram_base;
+ }
+#endif
+
+ prev = NULL;
+ prevp = &ram_blocks;
+ block = ram_blocks;
+ while (block && (block->host > host
+ || block->host + block->length <= host)) {
+ if (prev)
+ prevp = &prev->next;
+ prev = block;
+ block = block->next;
+ }
+ if (!block) {
+ fprintf(stderr, "Bad ram pointer %p\n", ptr);
+ abort();
+ }
+ return block->offset + (host - block->host);
}
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
@@ -2895,9 +2993,13 @@ static void io_mem_init(void)
io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
watch_mem_write, NULL);
- /* alloc dirty bits array */
- phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
- memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
+#ifdef USE_KQEMU
+ if (kqemu_phys_ram_base) {
+ /* alloc dirty bits array */
+ phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
+ memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
+ }
+#endif
}
/* mem_read and mem_write are arrays of functions containing the
diff --git a/kqemu.c b/kqemu.c
index 96660b0..040e817 100644
--- a/kqemu.c
+++ b/kqemu.c
@@ -91,6 +91,8 @@ unsigned int nb_modified_ram_pages;
uint8_t *modified_ram_pages_table;
int qpi_io_memory;
uint32_t kqemu_comm_base; /* physical address of the QPI communication page */
+ram_addr_t kqemu_phys_ram_size;
+uint8_t *kqemu_phys_ram_base;
#define cpuid(index, eax, ebx, ecx, edx) \
asm volatile ("cpuid" \
@@ -214,13 +216,14 @@ int kqemu_init(CPUState *env)
sizeof(uint64_t));
if (!modified_ram_pages)
goto fail;
- modified_ram_pages_table = qemu_mallocz(phys_ram_size >> TARGET_PAGE_BITS);
+ modified_ram_pages_table =
+ qemu_mallocz(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
if (!modified_ram_pages_table)
goto fail;
memset(&kinit, 0, sizeof(kinit)); /* set the paddings to zero */
- kinit.ram_base = phys_ram_base;
- kinit.ram_size = phys_ram_size;
+ kinit.ram_base = kqemu_phys_ram_base;
+ kinit.ram_size = kqemu_phys_ram_size;
kinit.ram_dirty = phys_ram_dirty;
kinit.pages_to_flush = pages_to_flush;
kinit.ram_pages_to_update = ram_pages_to_update;
diff --git a/vl.c b/vl.c
index cd974be..b698439 100644
--- a/vl.c
+++ b/vl.c
@@ -3094,9 +3094,9 @@ static int ram_load_v1(QEMUFile *f, void *opaque)
int ret;
ram_addr_t i;
- if (qemu_get_be32(f) != phys_ram_size)
+ if (qemu_get_be32(f) != last_ram_offset)
return -EINVAL;
- for(i = 0; i < phys_ram_size; i+= TARGET_PAGE_SIZE) {
+ for(i = 0; i < last_ram_offset; i+= TARGET_PAGE_SIZE) {
ret = ram_get_page(f, qemu_get_ram_ptr(i), TARGET_PAGE_SIZE);
if (ret)
return ret;
@@ -3182,7 +3182,7 @@ static int ram_save_block(QEMUFile *f)
ram_addr_t addr = 0;
int found = 0;
- while (addr < phys_ram_size) {
+ while (addr < last_ram_offset) {
if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
uint8_t *p;
@@ -3204,7 +3204,7 @@ static int ram_save_block(QEMUFile *f)
break;
}
addr += TARGET_PAGE_SIZE;
- current_addr = (saved_addr + addr) % phys_ram_size;
+ current_addr = (saved_addr + addr) % last_ram_offset;
}
return found;
@@ -3217,7 +3217,7 @@ static ram_addr_t ram_save_remaining(void)
ram_addr_t addr;
ram_addr_t count = 0;
- for (addr = 0; addr < phys_ram_size; addr += TARGET_PAGE_SIZE) {
+ for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
count++;
}
@@ -3231,7 +3231,7 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque)
if (stage == 1) {
/* Make sure all dirty bits are set */
- for (addr = 0; addr < phys_ram_size; addr += TARGET_PAGE_SIZE) {
+ for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
cpu_physical_memory_set_dirty(addr);
}
@@ -3239,7 +3239,7 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque)
/* Enable dirty memory tracking */
cpu_physical_memory_set_dirty_tracking(1);
- qemu_put_be64(f, phys_ram_size | RAM_SAVE_FLAG_MEM_SIZE);
+ qemu_put_be64(f, last_ram_offset | RAM_SAVE_FLAG_MEM_SIZE);
}
while (!qemu_file_rate_limit(f)) {
@@ -3272,7 +3272,7 @@ static int ram_load_dead(QEMUFile *f, void *opaque)
if (ram_decompress_open(s, f) < 0)
return -EINVAL;
- for(i = 0; i < phys_ram_size; i+= BDRV_HASH_BLOCK_SIZE) {
+ for(i = 0; i < last_ram_offset; i+= BDRV_HASH_BLOCK_SIZE) {
if (ram_decompress_buf(s, buf, 1) < 0) {
fprintf(stderr, "Error while reading ram block header\n");
goto error;
@@ -3303,7 +3303,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
return ram_load_v1(f, opaque);
if (version_id == 2) {
- if (qemu_get_be32(f) != phys_ram_size)
+ if (qemu_get_be32(f) != last_ram_offset)
return -EINVAL;
return ram_load_dead(f, opaque);
}
@@ -3318,7 +3318,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
addr &= TARGET_PAGE_MASK;
if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
- if (addr != phys_ram_size)
+ if (addr != last_ram_offset)
return -EINVAL;
}
@@ -5132,31 +5132,21 @@ int main(int argc, char **argv, char **envp)
exit(1);
/* init the memory */
- phys_ram_size = machine->ram_require & ~RAMSIZE_FIXED;
-
- if (machine->ram_require & RAMSIZE_FIXED) {
- if (ram_size > 0) {
- if (ram_size < phys_ram_size) {
- fprintf(stderr, "Machine `%s' requires %llu bytes of memory\n",
- machine->name, (unsigned long long) phys_ram_size);
- exit(-1);
- }
-
- phys_ram_size = ram_size;
- } else
- ram_size = phys_ram_size;
- } else {
- if (ram_size == 0)
- ram_size = DEFAULT_RAM_SIZE * 1024 * 1024;
-
- phys_ram_size += ram_size;
- }
+ if (ram_size == 0)
+ ram_size = DEFAULT_RAM_SIZE * 1024 * 1024;
- phys_ram_base = qemu_vmalloc(phys_ram_size);
- if (!phys_ram_base) {
- fprintf(stderr, "Could not allocate physical memory\n");
- exit(1);
+#ifdef USE_KQEMU
+ /* FIXME: This is a nasty hack because kqemu can't cope with dynamic
+ guest ram allocation. It needs to go away. */
+ if (kqemu_allowed) {
+ kqemu_phys_ram_size = ram_size + VGA_RAM_SIZE + 4 * 1024 * 1024;
+ kqemu_phys_ram_base = qemu_vmalloc(kqemu_phys_ram_size);
+ if (!kqemu_phys_ram_base) {
+ fprintf(stderr, "Could not allocate physical memory\n");
+ exit(1);
+ }
}
+#endif
/* init the dynamic translator */
cpu_exec_init_all(tb_size * 1024 * 1024);
OpenPOWER on IntegriCloud