summaryrefslogtreecommitdiffstats
path: root/lib/msun/src/e_rem_pio2f.c
diff options
context:
space:
mode:
authorbde <bde@FreeBSD.org>2008-02-25 13:33:20 +0000
committerbde <bde@FreeBSD.org>2008-02-25 13:33:20 +0000
commit83268c5f08f2704f42d759a606f22e68c5c972ae (patch)
tree1e217d6cd9b666a2937b2671e5facc73681f694b /lib/msun/src/e_rem_pio2f.c
parentbcb6adff032f0c60cc6f679a7c23e322e4b28ef6 (diff)
downloadFreeBSD-src-83268c5f08f2704f42d759a606f22e68c5c972ae.zip
FreeBSD-src-83268c5f08f2704f42d759a606f22e68c5c972ae.tar.gz
Change __ieee754_rem_pio2f() to return double instead of float so that
this function and its callers cosf(), sinf() and tanf() don't waste time converting values from doubles to floats and back for |x| > 9pi/4. All these functions were optimized a few years ago to mostly use doubles internally and across the __kernel*() interfaces but not across the __ieee754_rem_pio2f() interface. This saves about 40 cycles in cosf(), sinf() and tanf() for |x| > 9pi/4 on amd64 (A64), and about 20 cycles on i386 (A64) (except for cosf() and sinf() in the upper range). 40 cycles is about 35% for |x| < 9pi/4 <= 2**19pi/2 and about 5% for |x| > 2**19pi/2. The saving is much larger on amd64 than on i386 since the conversions are not easy to optimize except on i386 where some of them are automatic and others are optimized invalidly. amd64 is still about 10% slower in cosf() and tanf() in the lower range due to conversion overhead. This also gives a tiny speedup for |x| <= 9pi/4 on amd64 (by simplifying the code). It also avoids compiler bugs and/or additional slowness in the conversions on (not yet supported) machines where double_t != double.
Diffstat (limited to 'lib/msun/src/e_rem_pio2f.c')
-rw-r--r--lib/msun/src/e_rem_pio2f.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/lib/msun/src/e_rem_pio2f.c b/lib/msun/src/e_rem_pio2f.c
index 2cc4669..4c6ffc8 100644
--- a/lib/msun/src/e_rem_pio2f.c
+++ b/lib/msun/src/e_rem_pio2f.c
@@ -19,8 +19,8 @@ __FBSDID("$FreeBSD$");
/* __ieee754_rem_pio2f(x,y)
*
- * return the remainder of x rem pi/2 in y[0]+y[1]
- * use double precision internally
+ * return the remainder of x rem pi/2 in *y
+ * use double precision for everything except passing x
* use __kernel_rem_pio2() for large x
*/
@@ -42,10 +42,10 @@ pio2_1 = 1.57079632673412561417e+00, /* 0x3FF921FB, 0x54400000 */
pio2_1t = 6.07710050650619224932e-11; /* 0x3DD0B461, 0x1A626331 */
int
-__ieee754_rem_pio2f(float x, float *y)
+__ieee754_rem_pio2f(float x, double *y)
{
double w,r,fn;
- double tx[1],ty[1];
+ double tx[1];
float z;
int32_t e0,n,ix,hx;
@@ -63,23 +63,20 @@ __ieee754_rem_pio2f(float x, float *y)
#endif
r = x-fn*pio2_1;
w = fn*pio2_1t;
- y[0] = r-w;
- y[1] = (r-y[0])-w;
+ *y = r-w;
return n;
}
/*
* all other (large) arguments
*/
if(ix>=0x7f800000) { /* x is inf or NaN */
- y[0]=y[1]=x-x; return 0;
+ *y=x-x; return 0;
}
/* set z = scalbn(|x|,ilogb(|x|)-23) */
e0 = (ix>>23)-150; /* e0 = ilogb(|x|)-23; */
SET_FLOAT_WORD(z, ix - ((int32_t)(e0<<23)));
tx[0] = z;
- n = __kernel_rem_pio2(tx,ty,e0,1,0);
- y[0] = ty[0];
- y[1] = ty[0] - y[0];
- if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
+ n = __kernel_rem_pio2(tx,y,e0,1,0);
+ if(hx<0) {*y = -*y; return -n;}
return n;
}
OpenPOWER on IntegriCloud