summaryrefslogtreecommitdiffstats
path: root/exec.c
diff options
context:
space:
mode:
authorJuan Quintela <quintela@redhat.com>2012-05-22 00:42:40 +0200
committerJuan Quintela <quintela@redhat.com>2012-06-29 13:27:28 +0200
commitd24981d37e793b0a8fcde1879db19eb11fe0f975 (patch)
tree4e9d11cda26e573692d2cbb366b6ef2f0a5b6180 /exec.c
parentaac844ed97c39b8c0fb16d7bf9851fdedf325be3 (diff)
downloadhqemu-d24981d37e793b0a8fcde1879db19eb11fe0f975.zip
hqemu-d24981d37e793b0a8fcde1879db19eb11fe0f975.tar.gz
Only TCG needs TLB handling
Refactor the code that is only needed for tcg to an static function. Call that only when tcg is enabled. We can't refactor to a dummy function in the kvm case, as qemu can be compiled at the same time with tcg and kvm. Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/exec.c b/exec.c
index 8244d54..a68b65c 100644
--- a/exec.c
+++ b/exec.c
@@ -1824,11 +1824,29 @@ void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}
+static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
+ uintptr_t length)
+{
+ uintptr_t start1;
+
+ /* we modify the TLB cache so that the dirty bit will be set again
+ when accessing the range */
+ start1 = (uintptr_t)qemu_safe_ram_ptr(start);
+ /* Check that we don't span multiple blocks - this breaks the
+ address comparisons below. */
+ if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
+ != (end - 1) - start) {
+ abort();
+ }
+ cpu_tlb_reset_dirty_all(start1, length);
+
+}
+
/* Note: start and end must be within the same ram block. */
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags)
{
- uintptr_t length, start1;
+ uintptr_t length;
start &= TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
@@ -1838,16 +1856,9 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
return;
cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
- /* we modify the TLB cache so that the dirty bit will be set again
- when accessing the range */
- start1 = (uintptr_t)qemu_safe_ram_ptr(start);
- /* Check that we don't span multiple blocks - this breaks the
- address comparisons below. */
- if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
- != (end - 1) - start) {
- abort();
+ if (tcg_enabled()) {
+ tlb_reset_dirty_range_all(start, end, length);
}
- cpu_tlb_reset_dirty_all(start1, length);
}
int cpu_physical_memory_set_dirty_tracking(int enable)
OpenPOWER on IntegriCloud