summaryrefslogtreecommitdiffstats
path: root/translate-all.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2015-09-22 13:01:15 -0700
committerRichard Henderson <rth@twiddle.net>2015-10-07 20:36:53 +1100
commitb125f9dc7bd68cd4c57189db4da83b0620b28a72 (patch)
tree8d2b7a06e64121738343a3abebb5070b64f4ff21 /translate-all.c
parentf293709c6af7a65a9bcec09cdba7a60183657a3e (diff)
downloadhqemu-b125f9dc7bd68cd4c57189db4da83b0620b28a72.zip
hqemu-b125f9dc7bd68cd4c57189db4da83b0620b28a72.tar.gz
tcg: Check for overflow via highwater mark
We currently pre-compute an worst case code size for any TB, which works out to be 122kB. Since the average TB size is near 1kB, this wastes quite a lot of storage. Instead, check for overflow in between generating code for each opcode. The overhead of the check isn't measurable and wastage is minimized. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'translate-all.c')
-rw-r--r--translate-all.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/translate-all.c b/translate-all.c
index b43bd03..333eba4 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -223,6 +223,7 @@ static target_long decode_sleb128(uint8_t **pp)
static int encode_search(TranslationBlock *tb, uint8_t *block)
{
+ uint8_t *highwater = tcg_ctx.code_gen_highwater;
uint8_t *p = block;
int i, j, n;
@@ -241,6 +242,14 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
}
prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
+
+ /* Test for (pending) buffer overflow. The assumption is that any
+ one row beginning below the high water mark cannot overrun
+ the buffer completely. Thus we can test for overflow after
+ encoding a row without having to check during encoding. */
+ if (unlikely(p > highwater)) {
+ return -1;
+ }
}
return p - block;
@@ -756,9 +765,7 @@ static TranslationBlock *tb_alloc(target_ulong pc)
{
TranslationBlock *tb;
- if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
- (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
- tcg_ctx.code_gen_buffer_max_size) {
+ if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
return NULL;
}
tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
@@ -1063,12 +1070,15 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
if (use_icount) {
cflags |= CF_USE_ICOUNT;
}
+
tb = tb_alloc(pc);
- if (!tb) {
+ if (unlikely(!tb)) {
+ buffer_overflow:
/* flush must be done */
tb_flush(cpu);
/* cannot fail at this point */
tb = tb_alloc(pc);
+ assert(tb != NULL);
/* Don't forget to invalidate previous TB info. */
tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
}
@@ -1109,8 +1119,19 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_ctx.code_time -= profile_getclock();
#endif
+ /* ??? Overflow could be handled better here. In particular, we
+ don't need to re-do gen_intermediate_code, nor should we re-do
+ the tcg optimization currently hidden inside tcg_gen_code. All
+ that should be required is to flush the TBs, allocate a new TB,
+ re-initialize it per above, and re-do the actual code generation. */
gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf);
+ if (unlikely(gen_code_size < 0)) {
+ goto buffer_overflow;
+ }
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
+ if (unlikely(search_size < 0)) {
+ goto buffer_overflow;
+ }
#ifdef CONFIG_PROFILER
tcg_ctx.code_time += profile_getclock();
@@ -1681,7 +1702,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
cpu_fprintf(f, "Translation buffer state:\n");
cpu_fprintf(f, "gen code size %td/%zd\n",
tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
- tcg_ctx.code_gen_buffer_max_size);
+ tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
cpu_fprintf(f, "TB count %d/%d\n",
tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
OpenPOWER on IntegriCloud