summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorj_mayer <j_mayer@c046a42c-6fe2-441c-8c8c-71466251a162>2007-04-05 06:43:27 +0000
committerj_mayer <j_mayer@c046a42c-6fe2-441c-8c8c-71466251a162>2007-04-05 06:43:27 +0000
commit6fa4cea9e8e904f7aac0c3d4f73a883c9e1e53bd (patch)
treef9b4ad511745090b7f796be40c1ff8356f1eead5
parent876d4b07832b3b4fd30510420631570b6b8c94ea (diff)
downloadhqemu-6fa4cea9e8e904f7aac0c3d4f73a883c9e1e53bd.zip
hqemu-6fa4cea9e8e904f7aac0c3d4f73a883c9e1e53bd.tar.gz
Infrastructure to support more than 2 MMU modes.
Add example for Alpha and PowerPC hypervisor mode. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@2596 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r--cpu-defs.h11
-rw-r--r--exec.c38
2 files changed, 48 insertions, 1 deletions
diff --git a/cpu-defs.h b/cpu-defs.h
index 46cda20..202eaee 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -108,6 +108,15 @@ typedef struct CPUTLBEntry {
target_phys_addr_t addend;
} CPUTLBEntry;
+/* Alpha has 4 different running levels */
+#if defined(TARGET_ALPHA)
+#define NB_MMU_MODES 4
+#elif defined(TARGET_PPC64H) /* PowerPC 64 with hypervisor mode support */
+#define NB_MMU_MODES 3
+#else
+#define NB_MMU_MODES 2
+#endif
+
#define CPU_COMMON \
struct TranslationBlock *current_tb; /* currently executing TB */ \
/* soft mmu support */ \
@@ -119,7 +128,7 @@ typedef struct CPUTLBEntry {
target_ulong mem_write_vaddr; /* target virtual addr at which the \
memory was written */ \
/* 0 = kernel, 1 = user */ \
- CPUTLBEntry tlb_table[2][CPU_TLB_SIZE]; \
+ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
\
/* from this point: preserved by CPU reset */ \
diff --git a/exec.c b/exec.c
index 818fe21..c49c86f 100644
--- a/exec.c
+++ b/exec.c
@@ -1300,6 +1300,16 @@ void tlb_flush(CPUState *env, int flush_global)
env->tlb_table[1][i].addr_read = -1;
env->tlb_table[1][i].addr_write = -1;
env->tlb_table[1][i].addr_code = -1;
+#if (NB_MMU_MODES >= 3)
+ env->tlb_table[2][i].addr_read = -1;
+ env->tlb_table[2][i].addr_write = -1;
+ env->tlb_table[2][i].addr_code = -1;
+#if (NB_MMU_MODES == 4)
+ env->tlb_table[3][i].addr_read = -1;
+ env->tlb_table[3][i].addr_write = -1;
+ env->tlb_table[3][i].addr_code = -1;
+#endif
+#endif
}
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
@@ -1345,6 +1355,12 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
tlb_flush_entry(&env->tlb_table[0][i], addr);
tlb_flush_entry(&env->tlb_table[1][i], addr);
+#if (NB_MMU_MODES >= 3)
+ tlb_flush_entry(&env->tlb_table[2][i], addr);
+#if (NB_MMU_MODES == 4)
+ tlb_flush_entry(&env->tlb_table[3][i], addr);
+#endif
+#endif
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
@@ -1434,6 +1450,14 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
for(i = 0; i < CPU_TLB_SIZE; i++)
tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
+#if (NB_MMU_MODES >= 3)
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
+#if (NB_MMU_MODES == 4)
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
+#endif
+#endif
}
#if !defined(CONFIG_SOFTMMU)
@@ -1486,6 +1510,14 @@ void cpu_tlb_update_dirty(CPUState *env)
tlb_update_dirty(&env->tlb_table[0][i]);
for(i = 0; i < CPU_TLB_SIZE; i++)
tlb_update_dirty(&env->tlb_table[1][i]);
+#if (NB_MMU_MODES >= 3)
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_update_dirty(&env->tlb_table[2][i]);
+#if (NB_MMU_MODES == 4)
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_update_dirty(&env->tlb_table[3][i]);
+#endif
+#endif
}
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
@@ -1511,6 +1543,12 @@ static inline void tlb_set_dirty(CPUState *env,
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
tlb_set_dirty1(&env->tlb_table[0][i], addr);
tlb_set_dirty1(&env->tlb_table[1][i], addr);
+#if (NB_MMU_MODES >= 3)
+ tlb_set_dirty1(&env->tlb_table[2][i], addr);
+#if (NB_MMU_MODES == 4)
+ tlb_set_dirty1(&env->tlb_table[3][i], addr);
+#endif
+#endif
}
/* add a new TLB entry. At most one entry for a given virtual address
OpenPOWER on IntegriCloud