summaryrefslogtreecommitdiffstats
path: root/translate-all.c
diff options
context:
space:
mode:
Diffstat (limited to 'translate-all.c')
-rw-r--r--translate-all.c215
1 files changed, 134 insertions, 81 deletions
diff --git a/translate-all.c b/translate-all.c
index 042a857..bf05326 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -63,6 +63,10 @@
#include "qemu/bitmap.h"
#include "qemu/timer.h"
+#include "hqemu.h"
+
+size_t get_cpu_size(void) { return sizeof(CPUArchState); }
+
//#define DEBUG_TB_INVALIDATE
//#define DEBUG_FLUSH
/* make various TB consistency checks */
@@ -124,7 +128,8 @@ intptr_t qemu_host_page_mask;
static void *l1_map[V_L1_SIZE];
/* code generation context */
-TCGContext tcg_ctx;
+TCGContext tcg_ctx_global;
+__thread TCGContext tcg_ctx;
/* translation block context */
#ifdef CONFIG_USER_ONLY
@@ -135,7 +140,7 @@ void tb_lock(void)
{
#ifdef CONFIG_USER_ONLY
assert(!have_tb_lock);
- qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
+ qemu_mutex_lock(&tcg_ctx.tb_ctx->tb_lock);
have_tb_lock++;
#endif
}
@@ -145,7 +150,7 @@ void tb_unlock(void)
#ifdef CONFIG_USER_ONLY
assert(have_tb_lock);
have_tb_lock--;
- qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
+ qemu_mutex_unlock(&tcg_ctx.tb_ctx->tb_lock);
#endif
}
@@ -153,7 +158,7 @@ void tb_lock_reset(void)
{
#ifdef CONFIG_USER_ONLY
if (have_tb_lock) {
- qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
+ qemu_mutex_unlock(&tcg_ctx.tb_ctx->tb_lock);
have_tb_lock = 0;
}
#endif
@@ -161,11 +166,12 @@ void tb_lock_reset(void)
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2);
-static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
+static TranslationBlock *tb_find_pc(CPUState *cpu, uintptr_t tc_ptr);
void cpu_gen_init(void)
{
tcg_context_init(&tcg_ctx);
+ tcg_ctx.tb_ctx = g_malloc0(sizeof(TBContext));
}
/* Encode VAL as a signed leb128 sequence at P.
@@ -190,7 +196,7 @@ static uint8_t *encode_sleb128(uint8_t *p, target_long val)
/* Decode a signed leb128 sequence at *PP; increment *PP past the
decoded value. Return the decoded value. */
-static target_long decode_sleb128(uint8_t **pp)
+target_long decode_sleb128(uint8_t **pp)
{
uint8_t *p = *pp;
target_long val = 0;
@@ -268,6 +274,11 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
int64_t ti = profile_getclock();
#endif
+#if defined(CONFIG_LLVM)
+ if (llvm_locate_trace(searched_pc))
+ return llvm_restore_state(cpu, tb, searched_pc);
+#endif
+
if (searched_pc < host_pc) {
return -1;
}
@@ -297,8 +308,8 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
restore_state_to_opc(env, tb, data);
#ifdef CONFIG_PROFILER
- tcg_ctx.restore_time += profile_getclock() - ti;
- tcg_ctx.restore_count++;
+ tcg_ctx_global.restore_time += profile_getclock() - ti;
+ tcg_ctx_global.restore_count++;
#endif
return 0;
}
@@ -307,7 +318,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
{
TranslationBlock *tb;
- tb = tb_find_pc(retaddr);
+ tb = tb_find_pc(cpu, retaddr);
if (tb) {
cpu_restore_state_from_tb(cpu, tb, retaddr);
if (tb->cflags & CF_NOCACHE) {
@@ -485,7 +496,13 @@ static inline PageDesc *page_find(tb_page_addr_t index)
# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
#endif
+/* Note: The size of the code buffer is doubled. We steal half of the buffer
+ * acting as the trace code cache. */
+#if defined(CONFIG_LLVM)
+#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024 * 2)
+#else
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
+#endif
#define DEFAULT_CODE_GEN_BUFFER_SIZE \
(DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
@@ -503,6 +520,9 @@ static inline size_t size_code_gen_buffer(size_t tb_size)
static buffer, we could size this on RESERVED_VA, on the text
segment size of the executable, or continue to use the default. */
tb_size = (unsigned long)(ram_size / 4);
+#if defined(CONFIG_LLVM)
+ tb_size = (unsigned long)(ram_size / 2);
+#endif
#endif
}
if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
@@ -730,15 +750,18 @@ static inline void code_gen_alloc(size_t tb_size)
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
exit(1);
}
+#if defined(CONFIG_LLVM)
+ llvm_alloc_cache();
+#endif
/* Estimate a good size for the number of TBs we can support. We
still haven't deducted the prologue from the buffer size here,
but that's minimal and won't affect the estimate much. */
tcg_ctx.code_gen_max_blocks
= tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
- tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
+ tcg_ctx.tb_ctx->tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
- qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
+ qemu_mutex_init(&tcg_ctx.tb_ctx->tb_lock);
}
/* Must be called before using the QEMU cpus. 'tb_size' is the size
@@ -765,26 +788,35 @@ bool tcg_enabled(void)
too many translation blocks or too much generated code. */
static TranslationBlock *tb_alloc(target_ulong pc)
{
+ TCGContext *s = &tcg_ctx_global;
TranslationBlock *tb;
- if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
+ if (s->tb_ctx->nb_tbs >= s->code_gen_max_blocks) {
return NULL;
}
- tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
+#if defined(CONFIG_LLVM)
+ if (llvm_check_cache() == 1)
+ return NULL;
+#endif
+
+ tb = &s->tb_ctx->tbs[s->tb_ctx->nb_tbs++];
tb->pc = pc;
tb->cflags = 0;
+
+ optimization_init_tb(tb, s->tb_ctx->nb_tbs - 1);
return tb;
}
void tb_free(TranslationBlock *tb)
{
+ TCGContext *s = &tcg_ctx_global;
/* In practice this is mostly used for single use temporary TB
Ignore the hard cases and just back up if this TB happens to
be the last one generated. */
- if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
- tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
- tcg_ctx.code_gen_ptr = tb->tc_ptr;
- tcg_ctx.tb_ctx.nb_tbs--;
+ if (s->tb_ctx->nb_tbs > 0 &&
+ tb == &s->tb_ctx->tbs[s->tb_ctx->nb_tbs - 1]) {
+ s->code_gen_ptr = tb->tc_ptr;
+ s->tb_ctx->nb_tbs--;
}
}
@@ -832,42 +864,49 @@ static void page_flush_tb(void)
/* XXX: tb_flush is currently not thread safe */
void tb_flush(CPUState *cpu)
{
+ TCGContext *s = &tcg_ctx_global;
#if defined(DEBUG_FLUSH)
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
- (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
- tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
- ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
- tcg_ctx.tb_ctx.nb_tbs : 0);
+ (unsigned long)(s->code_gen_ptr - s->code_gen_buffer),
+ s->tb_ctx->nb_tbs, s->tb_ctx->nb_tbs > 0 ?
+ ((unsigned long)(s->code_gen_ptr - s->code_gen_buffer)) /
+ s->tb_ctx->nb_tbs : 0);
#endif
- if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
- > tcg_ctx.code_gen_buffer_size) {
+ if ((unsigned long)(s->code_gen_ptr - s->code_gen_buffer)
+ > s->code_gen_buffer_size) {
cpu_abort(cpu, "Internal error: code buffer overflow\n");
}
- tcg_ctx.tb_ctx.nb_tbs = 0;
+#if defined(CONFIG_LLVM)
+ llvm_tb_flush();
+#endif
+
+ s->tb_ctx->nb_tbs = 0;
CPU_FOREACH(cpu) {
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+ optimization_reset(cpu->env_ptr, 1);
}
- memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
+ memset(s->tb_ctx->tb_phys_hash, 0, sizeof(s->tb_ctx->tb_phys_hash));
page_flush_tb();
- tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
+ s->code_gen_ptr = s->code_gen_buffer;
/* XXX: flush processor icache at this point if cache flush is
expensive */
- tcg_ctx.tb_ctx.tb_flush_count++;
+ s->tb_ctx->tb_flush_count++;
}
#ifdef DEBUG_TB_CHECK
static void tb_invalidate_check(target_ulong address)
{
+ TCGContext *s = &tcg_ctx_global;
TranslationBlock *tb;
int i;
address &= TARGET_PAGE_MASK;
for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
- for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
+ for (tb = s->tb_ctx->tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
address >= tb->pc + tb->size)) {
printf("ERROR invalidate: address=" TARGET_FMT_lx
@@ -881,11 +920,12 @@ static void tb_invalidate_check(target_ulong address)
/* verify that all the pages have correct rights for code */
static void tb_page_check(void)
{
+ TCGContext *s = &tcg_ctx_global;
TranslationBlock *tb;
int i, flags1, flags2;
for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
- for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
+ for (tb = s->tb_ctx->tb_phys_hash[i]; tb != NULL;
tb = tb->phys_hash_next) {
flags1 = page_get_flags(tb->pc);
flags2 = page_get_flags(tb->pc + tb->size - 1);
@@ -911,6 +951,10 @@ static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
}
ptb = &tb1->phys_hash_next;
}
+#if defined(CONFIG_LLVM)
+ tb->mode = BLOCK_INVALID;
+ llvm_tb_remove(tb);
+#endif
}
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
@@ -969,16 +1013,15 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n)
/* invalidate one TB */
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
+ TCGContext *s = &tcg_ctx_global;
CPUState *cpu;
PageDesc *p;
unsigned int h, n1;
- tb_page_addr_t phys_pc;
TranslationBlock *tb1, *tb2;
/* remove the TB from the hash list */
- phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_phys_hash_func(phys_pc);
- tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
+ h = tb_phys_hash_func(tb->pc);
+ tb_hash_remove(&s->tb_ctx->tb_phys_hash[h], tb);
/* remove the TB from the page list */
if (tb->page_addr[0] != page_addr) {
@@ -992,7 +1035,7 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
invalidate_page_bitmap(p);
}
- tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
+ s->tb_ctx->tb_invalidated_flag = 1;
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
@@ -1000,6 +1043,7 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
if (cpu->tb_jmp_cache[h] == tb) {
cpu->tb_jmp_cache[h] = NULL;
}
+ optimization_remove_entry(cpu->env_ptr, tb);
}
/* suppress this TB from the two jump lists */
@@ -1021,7 +1065,7 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
}
tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
- tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
+ s->tb_ctx->tb_phys_invalidate_count++;
}
static void build_page_bitmap(PageDesc *p)
@@ -1058,6 +1102,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,
int flags, int cflags)
{
+ TCGContext *s = &tcg_ctx_global;
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
tb_page_addr_t phys_pc, phys_page2;
@@ -1082,22 +1127,22 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb = tb_alloc(pc);
assert(tb != NULL);
/* Don't forget to invalidate previous TB info. */
- tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
+ s->tb_ctx->tb_invalidated_flag = 1;
}
- gen_code_buf = tcg_ctx.code_gen_ptr;
- tb->tc_ptr = gen_code_buf;
+ gen_code_buf = s->code_gen_ptr;
+ tb->tc_ptr = tb->opt_ptr = gen_code_buf;
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
#ifdef CONFIG_PROFILER
- tcg_ctx.tb_count1++; /* includes aborted translations because of
+ s->tb_count1++; /* includes aborted translations because of
exceptions */
ti = profile_getclock();
#endif
- tcg_func_start(&tcg_ctx);
+ tcg_func_start(&tcg_ctx, tb);
gen_intermediate_code(env, tb);
@@ -1116,9 +1161,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
#endif
#ifdef CONFIG_PROFILER
- tcg_ctx.tb_count++;
- tcg_ctx.interm_time += profile_getclock() - ti;
- tcg_ctx.code_time -= profile_getclock();
+ s->tb_count++;
+ s->interm_time += profile_getclock() - ti;
+ s->code_time -= profile_getclock();
#endif
/* ??? Overflow could be handled better here. In particular, we
@@ -1136,10 +1181,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
#ifdef CONFIG_PROFILER
- tcg_ctx.code_time += profile_getclock();
- tcg_ctx.code_in_len += tb->size;
- tcg_ctx.code_out_len += gen_code_size;
- tcg_ctx.search_out_len += search_size;
+ s->code_time += profile_getclock();
+ s->code_in_len += tb->size;
+ s->code_out_len += gen_code_size;
+ s->search_out_len += search_size;
#endif
#ifdef DEBUG_DISAS
@@ -1151,7 +1196,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
#endif
- tcg_ctx.code_gen_ptr = (void *)
+ s->code_gen_ptr = (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN);
@@ -1247,7 +1292,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
current_tb = NULL;
if (cpu->mem_io_pc) {
/* now we have a real cpu fault */
- current_tb = tb_find_pc(cpu->mem_io_pc);
+ current_tb = tb_find_pc(cpu, cpu->mem_io_pc);
}
}
if (current_tb == tb &&
@@ -1365,7 +1410,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
tb = p->first_tb;
#ifdef TARGET_HAS_PRECISE_SMC
if (tb && pc != 0) {
- current_tb = tb_find_pc(pc);
+ current_tb = tb_find_pc(cpu, pc);
}
if (cpu != NULL) {
env = cpu->env_ptr;
@@ -1475,12 +1520,13 @@ static inline void tb_alloc_page(TranslationBlock *tb,
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2)
{
+ TCGContext *s = &tcg_ctx_global;
unsigned int h;
TranslationBlock **ptb;
/* add in the physical hash table */
- h = tb_phys_hash_func(phys_pc);
- ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
+ h = tb_phys_hash_func(tb->pc);
+ ptb = &s->tb_ctx->tb_phys_hash[h];
tb->phys_hash_next = *ptb;
*ptb = tb;
@@ -1511,25 +1557,31 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
tb[1].tc_ptr. Return NULL if not found */
-static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
+static TranslationBlock *tb_find_pc(CPUState *cpu, uintptr_t tc_ptr)
{
+ TCGContext *s = &tcg_ctx_global;
int m_min, m_max, m;
uintptr_t v;
TranslationBlock *tb;
- if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
+ if (s->tb_ctx->nb_tbs <= 0) {
return NULL;
}
- if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
- tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
+#if defined(CONFIG_LLVM)
+ tb = llvm_find_pc(cpu, tc_ptr);
+ if (tb)
+ return tb;
+#endif
+ if (tc_ptr < (uintptr_t)s->code_gen_buffer ||
+ tc_ptr >= (uintptr_t)s->code_gen_ptr) {
return NULL;
}
/* binary search (cf Knuth) */
m_min = 0;
- m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
+ m_max = s->tb_ctx->nb_tbs - 1;
while (m_min <= m_max) {
m = (m_min + m_max) >> 1;
- tb = &tcg_ctx.tb_ctx.tbs[m];
+ tb = &s->tb_ctx->tbs[m];
v = (uintptr_t)tb->tc_ptr;
if (v == tc_ptr) {
return tb;
@@ -1539,7 +1591,7 @@ static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
m_min = m + 1;
}
}
- return &tcg_ctx.tb_ctx.tbs[m_max];
+ return &s->tb_ctx->tbs[m_max];
}
#if !defined(CONFIG_USER_ONLY)
@@ -1567,7 +1619,7 @@ void tb_check_watchpoint(CPUState *cpu)
{
TranslationBlock *tb;
- tb = tb_find_pc(cpu->mem_io_pc);
+ tb = tb_find_pc(cpu, cpu->mem_io_pc);
if (tb) {
/* We can use retranslation to find the PC. */
cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
@@ -1599,7 +1651,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
target_ulong pc, cs_base;
uint64_t flags;
- tb = tb_find_pc(retaddr);
+ tb = tb_find_pc(cpu, retaddr);
if (!tb) {
cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
(void *)retaddr);
@@ -1675,6 +1727,7 @@ void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
{
+ TCGContext *s = &tcg_ctx_global;
int i, target_code_size, max_target_code_size;
int direct_jmp_count, direct_jmp2_count, cross_page;
TranslationBlock *tb;
@@ -1684,8 +1737,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
cross_page = 0;
direct_jmp_count = 0;
direct_jmp2_count = 0;
- for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
- tb = &tcg_ctx.tb_ctx.tbs[i];
+ for (i = 0; i < s->tb_ctx->nb_tbs; i++) {
+ tb = &s->tb_ctx->tbs[i];
target_code_size += tb->size;
if (tb->size > max_target_code_size) {
max_target_code_size = tb->size;
@@ -1703,35 +1756,35 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
/* XXX: avoid using doubles ? */
cpu_fprintf(f, "Translation buffer state:\n");
cpu_fprintf(f, "gen code size %td/%zd\n",
- tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
- tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
+ s->code_gen_ptr - s->code_gen_buffer,
+ s->code_gen_highwater - s->code_gen_buffer);
cpu_fprintf(f, "TB count %d/%d\n",
- tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
+ s->tb_ctx->nb_tbs, s->code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
- tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
- tcg_ctx.tb_ctx.nb_tbs : 0,
+ s->tb_ctx->nb_tbs ? target_code_size /
+ s->tb_ctx->nb_tbs : 0,
max_target_code_size);
cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
- tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
- tcg_ctx.code_gen_buffer) /
- tcg_ctx.tb_ctx.nb_tbs : 0,
- target_code_size ? (double) (tcg_ctx.code_gen_ptr -
- tcg_ctx.code_gen_buffer) /
+ s->tb_ctx->nb_tbs ? (s->code_gen_ptr -
+ s->code_gen_buffer) /
+ s->tb_ctx->nb_tbs : 0,
+ target_code_size ? (double) (s->code_gen_ptr -
+ s->code_gen_buffer) /
target_code_size : 0);
cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
- tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
- tcg_ctx.tb_ctx.nb_tbs : 0);
+ s->tb_ctx->nb_tbs ? (cross_page * 100) /
+ s->tb_ctx->nb_tbs : 0);
cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
direct_jmp_count,
- tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
- tcg_ctx.tb_ctx.nb_tbs : 0,
+ s->tb_ctx->nb_tbs ? (direct_jmp_count * 100) /
+ s->tb_ctx->nb_tbs : 0,
direct_jmp2_count,
- tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
- tcg_ctx.tb_ctx.nb_tbs : 0);
+ s->tb_ctx->nb_tbs ? (direct_jmp2_count * 100) /
+ s->tb_ctx->nb_tbs : 0);
cpu_fprintf(f, "\nStatistics:\n");
- cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
+ cpu_fprintf(f, "TB flush count %d\n", s->tb_ctx->tb_flush_count);
cpu_fprintf(f, "TB invalidate count %d\n",
- tcg_ctx.tb_ctx.tb_phys_invalidate_count);
+ s->tb_ctx->tb_phys_invalidate_count);
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
tcg_dump_info(f, cpu_fprintf);
}
OpenPOWER on IntegriCloud