summaryrefslogtreecommitdiffstats
path: root/target-i386/translate.c
diff options
context:
space:
mode:
authorbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2008-05-21 10:12:54 +0000
committerbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2008-05-21 10:12:54 +0000
commit0211e5aff995ee55722148923a7fc317796e4114 (patch)
tree0957c189e7296c7b40fbc3a472fa16ae3d54d5bd /target-i386/translate.c
parent30898801ad8c70708ead392b243ca8bcd28ca722 (diff)
downloadhqemu-0211e5aff995ee55722148923a7fc317796e4114.zip
hqemu-0211e5aff995ee55722148923a7fc317796e4114.tar.gz
converted MUL/IMUL to TCG
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4508 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'target-i386/translate.c')
-rw-r--r--target-i386/translate.c140
1 files changed, 129 insertions, 11 deletions
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 0755987..c73fac9 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -3799,21 +3799,64 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 4: /* mul */
switch(ot) {
case OT_BYTE:
- gen_op_mulb_AL_T0();
+ gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
+ tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_WORD, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
s->cc_op = CC_OP_MULB;
break;
case OT_WORD:
- gen_op_mulw_AX_T0();
+ gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
+ tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_WORD, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
+ gen_op_mov_reg_T0(OT_WORD, R_EDX);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
s->cc_op = CC_OP_MULW;
break;
default:
case OT_LONG:
- gen_op_mull_EAX_T0();
+#ifdef TARGET_X86_64
+ gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
+ tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_LONG, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
+ gen_op_mov_reg_T0(OT_LONG, R_EDX);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
+#else
+ {
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ t1 = tcg_temp_new(TCG_TYPE_I64);
+ gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
+ tcg_gen_extu_i32_i64(t0, cpu_T[0]);
+ tcg_gen_extu_i32_i64(t1, cpu_T[1]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ gen_op_mov_reg_T0(OT_LONG, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ gen_op_mov_reg_T0(OT_LONG, R_EDX);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
+ }
+#endif
s->cc_op = CC_OP_MULL;
break;
#ifdef TARGET_X86_64
case OT_QUAD:
- gen_op_mulq_EAX_T0();
+ tcg_gen_helper_0_1(helper_mulq_EAX_T0, cpu_T[0]);
s->cc_op = CC_OP_MULQ;
break;
#endif
@@ -3822,21 +3865,68 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 5: /* imul */
switch(ot) {
case OT_BYTE:
- gen_op_imulb_AL_T0();
+ gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
+ tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_WORD, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
s->cc_op = CC_OP_MULB;
break;
case OT_WORD:
- gen_op_imulw_AX_T0();
+ gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_WORD, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
+ gen_op_mov_reg_T0(OT_WORD, R_EDX);
s->cc_op = CC_OP_MULW;
break;
default:
case OT_LONG:
- gen_op_imull_EAX_T0();
+#ifdef TARGET_X86_64
+ gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
+ tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_LONG, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
+ gen_op_mov_reg_T0(OT_LONG, R_EDX);
+#else
+ {
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ t1 = tcg_temp_new(TCG_TYPE_I64);
+ gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
+ tcg_gen_ext_i32_i64(t0, cpu_T[0]);
+ tcg_gen_ext_i32_i64(t1, cpu_T[1]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ gen_op_mov_reg_T0(OT_LONG, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ gen_op_mov_reg_T0(OT_LONG, R_EDX);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+ }
+#endif
s->cc_op = CC_OP_MULL;
break;
#ifdef TARGET_X86_64
case OT_QUAD:
- gen_op_imulq_EAX_T0();
+ tcg_gen_helper_0_1(helper_imulq_EAX_T0, cpu_T[0]);
s->cc_op = CC_OP_MULQ;
break;
#endif
@@ -4104,13 +4194,41 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
#ifdef TARGET_X86_64
if (ot == OT_QUAD) {
- gen_op_imulq_T0_T1();
+ tcg_gen_helper_1_2(helper_imulq_T0_T1, cpu_T[0], cpu_T[0], cpu_T[1]);
} else
#endif
if (ot == OT_LONG) {
- gen_op_imull_T0_T1();
+#ifdef TARGET_X86_64
+ tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+#else
+ {
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ t1 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_ext_i32_i64(t0, cpu_T[0]);
+ tcg_gen_ext_i32_i64(t1, cpu_T[1]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_i32(cpu_T[1], t0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
+ }
+#endif
} else {
- gen_op_imulw_T0_T1();
+ tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
}
gen_op_mov_reg_T0(ot, reg);
s->cc_op = CC_OP_MULB + ot;
OpenPOWER on IntegriCloud