From 2f93c23fe71420e5095f2fae1877fe747ad9f876 Mon Sep 17 00:00:00 2001 From: Aurelien Jarno Date: Tue, 11 Sep 2012 08:47:12 +0000 Subject: target-ppc: use the softfloat float32_muladd function Use the new softfloat float32_muladd() function to implement the vmaddfp and vnmsubfp instructions. As a bonus we can get rid of the call to the HANDLE_NAN3 macro, as the NaN handling is directly done at the softfloat level. Signed-off-by: Aurelien Jarno Signed-off-by: Alexander Graf --- target-ppc/int_helper.c | 57 ++++++++++++------------------------------------- 1 file changed, 14 insertions(+), 43 deletions(-) (limited to 'target-ppc/int_helper.c') diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c index 6141243..6f9beff 100644 --- a/target-ppc/int_helper.c +++ b/target-ppc/int_helper.c @@ -418,6 +418,20 @@ VARITHFP(minfp, float32_min) VARITHFP(maxfp, float32_max) #undef VARITHFP +#define VARITHFPFMA(suffix, type) \ + void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \ + ppc_avr_t *b, ppc_avr_t *c) \ + { \ + int i; \ + for (i = 0; i < ARRAY_SIZE(r->f); i++) { \ + r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i], \ + type, &env->vec_status); \ + } \ + } +VARITHFPFMA(maddfp, 0); +VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c); +#undef VARITHFPFMA + #define VARITHSAT_CASE(type, op, cvt, element) \ { \ type result = (type)a->element[i] op (type)b->element[i]; \ @@ -649,27 +663,6 @@ VCT(uxs, cvtsduw, u32) VCT(sxs, cvtsdsw, s32) #undef VCT -void helper_vmaddfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, - ppc_avr_t *c) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(r->f); i++) { - HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) { - /* Need to do the computation in higher precision and round - * once at the end. */ - float64 af, bf, cf, t; - - af = float32_to_float64(a->f[i], &env->vec_status); - bf = float32_to_float64(b->f[i], &env->vec_status); - cf = float32_to_float64(c->f[i], &env->vec_status); - t = float64_mul(af, cf, &env->vec_status); - t = float64_add(t, bf, &env->vec_status); - r->f[i] = float64_to_float32(t, &env->vec_status); - } - } -} - void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { @@ -909,28 +902,6 @@ VMUL(uh, u16, u32) #undef VMUL_DO #undef VMUL -void helper_vnmsubfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, - ppc_avr_t *b, ppc_avr_t *c) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(r->f); i++) { - HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) { - /* Need to do the computation is higher precision and round - * once at the end. */ - float64 af, bf, cf, t; - - af = float32_to_float64(a->f[i], &env->vec_status); - bf = float32_to_float64(b->f[i], &env->vec_status); - cf = float32_to_float64(c->f[i], &env->vec_status); - t = float64_mul(af, cf, &env->vec_status); - t = float64_sub(t, bf, &env->vec_status); - t = float64_chs(t); - r->f[i] = float64_to_float32(t, &env->vec_status); - } - } -} - void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -- cgit v1.1