summaryrefslogtreecommitdiffstats
path: root/exec.c
diff options
context:
space:
mode:
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c84
1 files changed, 45 insertions, 39 deletions
diff --git a/exec.c b/exec.c
index 7e49e8e..9ad0a4b 100644
--- a/exec.c
+++ b/exec.c
@@ -50,6 +50,7 @@
#include "translate-all.h"
#include "exec/memory-internal.h"
+#include "exec/ram_addr.h"
#include "qemu/cache-utils.h"
#include "qemu/range.h"
@@ -57,7 +58,7 @@
//#define DEBUG_SUBPAGE
#if !defined(CONFIG_USER_ONLY)
-static int in_migration;
+static bool in_migration;
RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
@@ -324,7 +325,7 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
hwaddr *plen, bool resolve_subpage)
{
MemoryRegionSection *section;
- Int128 diff;
+ Int128 diff, diff_page;
section = address_space_lookup_region(d, addr, resolve_subpage);
/* Compute offset within MemoryRegionSection */
@@ -333,7 +334,9 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
/* Compute offset within MemoryRegion */
*xlat = addr + section->offset_within_region;
+ diff_page = int128_make64(((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr);
diff = int128_sub(section->mr->size, int128_make64(addr));
+ diff = int128_min(diff, diff_page);
*plen = int128_get64(int128_min(diff, int128_make64(*plen)));
return section;
}
@@ -348,7 +351,7 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr len = *plen;
for (;;) {
- section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
+ section = address_space_translate_internal(as->dispatch, addr, &addr, &len, true);
mr = section->mr;
if (!mr->iommu_ops) {
@@ -724,11 +727,14 @@ found:
return block;
}
-static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
- uintptr_t length)
+static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
{
- RAMBlock *block;
ram_addr_t start1;
+ RAMBlock *block;
+ ram_addr_t end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
block = qemu_get_ram_block(start);
assert(block == qemu_get_ram_block(end - 1));
@@ -737,29 +743,21 @@ static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
}
/* Note: start and end must be within the same ram block. */
-void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
- int dirty_flags)
+void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
+ unsigned client)
{
- uintptr_t length;
-
- start &= TARGET_PAGE_MASK;
- end = TARGET_PAGE_ALIGN(end);
-
- length = end - start;
if (length == 0)
return;
- cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
+ cpu_physical_memory_clear_dirty_range(start, length, client);
if (tcg_enabled()) {
- tlb_reset_dirty_range_all(start, end, length);
+ tlb_reset_dirty_range_all(start, length);
}
}
-static int cpu_physical_memory_set_dirty_tracking(int enable)
+static void cpu_physical_memory_set_dirty_tracking(bool enable)
{
- int ret = 0;
in_migration = enable;
- return ret;
}
hwaddr memory_region_section_get_iotlb(CPUArchState *env,
@@ -1074,7 +1072,7 @@ static void *file_ram_alloc(RAMBlock *block,
}
/* MAP_POPULATE silently ignores failures */
- for (i = 0; i < (memory/hpagesize)-1; i++) {
+ for (i = 0; i < (memory/hpagesize); i++) {
memset(area + (hpagesize*i), 0, 1);
}
@@ -1211,6 +1209,9 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr)
{
RAMBlock *block, *new_block;
+ ram_addr_t old_ram_size, new_ram_size;
+
+ old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
size = TARGET_PAGE_ALIGN(size);
new_block = g_malloc0(sizeof(*new_block));
@@ -1271,11 +1272,17 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
ram_list.version++;
qemu_mutex_unlock_ramlist();
- ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
- last_ram_offset() >> TARGET_PAGE_BITS);
- memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
- 0, size >> TARGET_PAGE_BITS);
- cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
+ new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
+
+ if (new_ram_size > old_ram_size) {
+ int i;
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ ram_list.dirty_memory[i] =
+ bitmap_zero_extend(ram_list.dirty_memory[i],
+ old_ram_size, new_ram_size);
+ }
+ }
+ cpu_physical_memory_set_dirty_range(new_block->offset, size);
qemu_ram_setup_dump(new_block->host, size);
qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
@@ -1485,11 +1492,8 @@ found:
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
uint64_t val, unsigned size)
{
- int dirty_flags;
- dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
- if (!(dirty_flags & CODE_DIRTY_FLAG)) {
+ if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
tb_invalidate_phys_page_fast(ram_addr, size);
- dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
}
switch (size) {
case 1:
@@ -1504,11 +1508,11 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
default:
abort();
}
- dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
+ cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
+ cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
/* we remove the notdirty callback only if the code has been
flushed */
- if (dirty_flags == 0xff) {
+ if (!cpu_physical_memory_is_clean(ram_addr)) {
CPUArchState *env = current_cpu->env_ptr;
tlb_set_dirty(env, env->mem_io_vaddr);
}
@@ -1795,12 +1799,12 @@ static void tcg_commit(MemoryListener *listener)
static void core_log_global_start(MemoryListener *listener)
{
- cpu_physical_memory_set_dirty_tracking(1);
+ cpu_physical_memory_set_dirty_tracking(true);
}
static void core_log_global_stop(MemoryListener *listener)
{
- cpu_physical_memory_set_dirty_tracking(0);
+ cpu_physical_memory_set_dirty_tracking(false);
}
static MemoryListener core_memory_listener = {
@@ -1911,11 +1915,12 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
static void invalidate_and_set_dirty(hwaddr addr,
hwaddr length)
{
- if (!cpu_physical_memory_is_dirty(addr)) {
+ if (cpu_physical_memory_is_clean(addr)) {
/* invalidate code */
tb_invalidate_phys_page_range(addr, addr + length, 0);
/* set dirty bit */
- cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
+ cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
+ cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
}
xen_modified_memory(addr, length);
}
@@ -2526,12 +2531,13 @@ void stl_phys_notdirty(hwaddr addr, uint32_t val)
stl_p(ptr, val);
if (unlikely(in_migration)) {
- if (!cpu_physical_memory_is_dirty(addr1)) {
+ if (cpu_physical_memory_is_clean(addr1)) {
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
/* set dirty bit */
- cpu_physical_memory_set_dirty_flags(
- addr1, (0xff & ~CODE_DIRTY_FLAG));
+ cpu_physical_memory_set_dirty_flag(addr1,
+ DIRTY_MEMORY_MIGRATION);
+ cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
}
}
}
OpenPOWER on IntegriCloud