summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/libc/arm/Symbol.map2
-rw-r--r--lib/libc/arm/aeabi/Makefile.inc4
-rw-r--r--lib/libc/arm/aeabi/aeabi_double.c99
-rw-r--r--lib/libc/arm/aeabi/aeabi_float.c99
-rw-r--r--lib/libc/arm/aeabi/aeabi_vfp.h129
-rw-r--r--lib/libc/arm/aeabi/aeabi_vfp_double.S170
-rw-r--r--lib/libc/arm/aeabi/aeabi_vfp_float.S160
-rw-r--r--lib/msun/arm/Makefile.inc8
-rw-r--r--lib/msun/arm/Symbol.map3
-rw-r--r--lib/msun/arm/fenv-mangle.h53
-rw-r--r--lib/msun/arm/fenv-softfp.c32
-rw-r--r--lib/msun/arm/fenv-vfp.c33
-rw-r--r--lib/msun/arm/fenv.c261
-rw-r--r--lib/msun/arm/fenv.h18
14 files changed, 937 insertions, 134 deletions
diff --git a/lib/libc/arm/Symbol.map b/lib/libc/arm/Symbol.map
index 1c83b0a..a58d443 100644
--- a/lib/libc/arm/Symbol.map
+++ b/lib/libc/arm/Symbol.map
@@ -76,4 +76,6 @@ FBSDprivate_1.0 {
__fixunsdfsi;
__extendsfdf2;
__truncdfsf2;
+
+ _libc_arm_fpu_present;
};
diff --git a/lib/libc/arm/aeabi/Makefile.inc b/lib/libc/arm/aeabi/Makefile.inc
index 379eb23..a42831d 100644
--- a/lib/libc/arm/aeabi/Makefile.inc
+++ b/lib/libc/arm/aeabi/Makefile.inc
@@ -6,6 +6,10 @@ SRCS+= aeabi_atexit.c \
aeabi_double.c \
aeabi_float.c \
aeabi_unwind_cpp.c
+.if ${MACHINE_ARCH:Marmv6*}
+SRCS+= aeabi_vfp_double.S \
+ aeabi_vfp_float.S
+.endif
# Add the aeabi_mem* functions. While they live in compiler-rt they call into
# libc. This causes issues when other parts of libc call these functions.
diff --git a/lib/libc/arm/aeabi/aeabi_double.c b/lib/libc/arm/aeabi/aeabi_double.c
index 5f9065c..274279d 100644
--- a/lib/libc/arm/aeabi/aeabi_double.c
+++ b/lib/libc/arm/aeabi/aeabi_double.c
@@ -32,70 +32,45 @@ __FBSDID("$FreeBSD$");
#include "milieu.h"
#include "softfloat.h"
-flag __unorddf2(float64, float64);
-
-int __aeabi_dcmpeq(float64 a, float64 b)
-{
- return float64_eq(a, b);
-}
-
-int __aeabi_dcmplt(float64 a, float64 b)
-{
- return float64_lt(a, b);
-}
-
-int __aeabi_dcmple(float64 a, float64 b)
-{
- return float64_le(a, b);
-}
-
-int __aeabi_dcmpge(float64 a, float64 b)
-{
- return float64_le(b, a);
-}
-
-int __aeabi_dcmpgt(float64 a, float64 b)
-{
- return float64_lt(b, a);
-}
-
-int __aeabi_dcmpun(float64 a, float64 b)
-{
- return __unorddf2(a, b);
-}
+#include "aeabi_vfp.h"
-int __aeabi_d2iz(float64 a)
-{
- return float64_to_int32_round_to_zero(a);
-}
+extern int _libc_arm_fpu_present;
-float32 __aeabi_d2f(float64 a)
-{
- return float64_to_float32(a);
-}
-
-float64 __aeabi_i2d(int a)
-{
- return int32_to_float64(a);
-}
-
-float64 __aeabi_dadd(float64 a, float64 b)
-{
- return float64_add(a, b);
-}
-
-float64 __aeabi_ddiv(float64 a, float64 b)
-{
- return float64_div(a, b);
-}
+flag __unorddf2(float64, float64);
-float64 __aeabi_dmul(float64 a, float64 b)
-{
- return float64_mul(a, b);
-}
+/* These are written in asm and are only called from this file */
+int __aeabi_dcmpeq_vfp(float64, float64);
+int __aeabi_dcmplt_vfp(float64, float64);
+int __aeabi_dcmple_vfp(float64, float64);
+int __aeabi_dcmpgt_vfp(float64, float64);
+int __aeabi_dcmpge_vfp(float64, float64);
+int __aeabi_dcmpun_vfp(float64, float64);
+int __aeabi_d2iz_vfp(float64);
+float32 __aeabi_d2f_vfp(float64);
+float64 __aeabi_i2d_vfp(int);
+float64 __aeabi_dadd_vfp(float64, float64);
+float64 __aeabi_ddiv_vfp(float64, float64);
+float64 __aeabi_dmul_vfp(float64, float64);
+float64 __aeabi_dsub_vfp(float64, float64);
-float64 __aeabi_dsub(float64 a, float64 b)
-{
- return float64_sub(a, b);
-}
+/*
+ * Depending on the target these will:
+ * On armv6 with a vfp call the above function, or
+ * Call the softfloat function in the 3rd argument.
+ */
+int AEABI_FUNC2(dcmpeq, float64, float64_eq)
+int AEABI_FUNC2(dcmplt, float64, float64_lt)
+int AEABI_FUNC2(dcmple, float64, float64_le)
+int AEABI_FUNC2_REV(dcmpge, float64, float64_le)
+int AEABI_FUNC2_REV(dcmpgt, float64, float64_lt)
+int AEABI_FUNC2(dcmpun, float64, __unorddf2)
+
+int AEABI_FUNC(d2iz, float64, float64_to_int32_round_to_zero)
+float32 AEABI_FUNC(d2f, float64, float64_to_float32)
+float64 AEABI_FUNC(i2d, int, int32_to_float64)
+
+float64 AEABI_FUNC2(dadd, float64, float64_add)
+float64 AEABI_FUNC2(ddiv, float64, float64_div)
+float64 AEABI_FUNC2(dmul, float64, float64_mul)
+float64 AEABI_FUNC2(dsub, float64, float64_sub)
diff --git a/lib/libc/arm/aeabi/aeabi_float.c b/lib/libc/arm/aeabi/aeabi_float.c
index 97751ad..be7a6d6 100644
--- a/lib/libc/arm/aeabi/aeabi_float.c
+++ b/lib/libc/arm/aeabi/aeabi_float.c
@@ -32,70 +32,45 @@ __FBSDID("$FreeBSD$");
#include "milieu.h"
#include "softfloat.h"
-flag __unordsf2(float32, float32);
-
-int __aeabi_fcmpeq(float32 a, float32 b)
-{
- return float32_eq(a, b);
-}
-
-int __aeabi_fcmplt(float32 a, float32 b)
-{
- return float32_lt(a, b);
-}
-
-int __aeabi_fcmple(float32 a, float32 b)
-{
- return float32_le(a, b);
-}
-
-int __aeabi_fcmpge(float32 a, float32 b)
-{
- return float32_le(b, a);
-}
-
-int __aeabi_fcmpgt(float32 a, float32 b)
-{
- return float32_lt(b, a);
-}
-
-int __aeabi_fcmpun(float32 a, float32 b)
-{
- return __unordsf2(a, b);
-}
+#include "aeabi_vfp.h"
-int __aeabi_f2iz(float32 a)
-{
- return float32_to_int32_round_to_zero(a);
-}
+extern int _libc_arm_fpu_present;
-float32 __aeabi_f2d(float32 a)
-{
- return float32_to_float64(a);
-}
-
-float32 __aeabi_i2f(int a)
-{
- return int32_to_float32(a);
-}
-
-float32 __aeabi_fadd(float32 a, float32 b)
-{
- return float32_add(a, b);
-}
-
-float32 __aeabi_fdiv(float32 a, float32 b)
-{
- return float32_div(a, b);
-}
+flag __unordsf2(float32, float32);
-float32 __aeabi_fmul(float32 a, float32 b)
-{
- return float32_mul(a, b);
-}
+/* These are written in asm and are only called from this file */
+int __aeabi_fcmpeq_vfp(float32, float32);
+int __aeabi_fcmplt_vfp(float32, float32);
+int __aeabi_fcmple_vfp(float32, float32);
+int __aeabi_fcmpgt_vfp(float32, float32);
+int __aeabi_fcmpge_vfp(float32, float32);
+int __aeabi_fcmpun_vfp(float32, float32);
+int __aeabi_f2iz_vfp(float32);
+float64 __aeabi_f2d_vfp(float32);
+float32 __aeabi_i2f_vfp(int);
+float32 __aeabi_fadd_vfp(float32, float32);
+float32 __aeabi_fdiv_vfp(float32, float32);
+float32 __aeabi_fmul_vfp(float32, float32);
+float32 __aeabi_fsub_vfp(float32, float32);
-float32 __aeabi_fsub(float32 a, float32 b)
-{
- return float32_sub(a, b);
-}
+/*
+ * Depending on the target these will:
+ * On armv6 with a vfp call the above function, or
+ * Call the softfloat function in the 3rd argument.
+ */
+int AEABI_FUNC2(fcmpeq, float32, float32_eq)
+int AEABI_FUNC2(fcmplt, float32, float32_lt)
+int AEABI_FUNC2(fcmple, float32, float32_le)
+int AEABI_FUNC2_REV(fcmpge, float32, float32_le)
+int AEABI_FUNC2_REV(fcmpgt, float32, float32_lt)
+int AEABI_FUNC2(fcmpun, float32, __unordsf2)
+
+int AEABI_FUNC(f2iz, float32, float32_to_int32_round_to_zero)
+float64 AEABI_FUNC(f2d, float32, float32_to_float64)
+float32 AEABI_FUNC(i2f, int, int32_to_float32)
+
+float32 AEABI_FUNC2(fadd, float32, float32_add)
+float32 AEABI_FUNC2(fdiv, float32, float32_div)
+float32 AEABI_FUNC2(fmul, float32, float32_mul)
+float32 AEABI_FUNC2(fsub, float32, float32_sub)
diff --git a/lib/libc/arm/aeabi/aeabi_vfp.h b/lib/libc/arm/aeabi/aeabi_vfp.h
new file mode 100644
index 0000000..76c2ff0
--- /dev/null
+++ b/lib/libc/arm/aeabi/aeabi_vfp.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2013 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef AEABI_VFP_H
+#define AEABI_VFP_H
+
+/*
+ * ASM helper macros. These allow the functions to be changed depending on
+ * the endian-ness we are building for.
+ */
+
+/* Allow the name of the function to be changed depending on the ABI */
+#ifndef __ARM_PCS_VFP
+#define AEABI_ENTRY(x) ENTRY(__aeabi_ ## x ## _vfp)
+#define AEABI_END(x) END(__aeabi_ ## x ## _vfp)
+#else
+#define AEABI_ENTRY(x) ENTRY(__aeabi_ ## x)
+#define AEABI_END(x) END(__aeabi_ ## x)
+#endif
+
+/*
+ * These should be used when a function either takes, or returns a floating
+ * point falue. They will load the data from an ARM to a VFP register(s),
+ * or from a VFP to an ARM register
+ */
+#ifdef __ARMEB__
+#define LOAD_DREG(vreg, reg0, reg1) vmov vreg, reg1, reg0
+#define UNLOAD_DREG(reg0, reg1, vreg) vmov reg1, reg0, vreg
+#else
+#define LOAD_DREG(vreg, reg0, reg1) vmov vreg, reg0, reg1
+#define UNLOAD_DREG(reg0, reg1, vreg) vmov reg0, reg1, vreg
+#endif
+
+#define LOAD_SREGS(vreg0, vreg1, reg0, reg1) vmov vreg0, vreg1, reg0, reg1
+#define LOAD_SREG(vreg, reg) vmov vreg, reg
+#define UNLOAD_SREG(reg, vreg) vmov reg, vreg
+
+/*
+ * C Helper macros
+ */
+
+#if defined(__FreeBSD_ARCH_armv6__) || (defined(__ARM_ARCH) && __ARM_ARCH >= 6)
+/*
+ * Generate a function that will either call into the VFP implementation,
+ * or the soft float version for a given __aeabi_* helper. The function
+ * will take a single argument of the type given by in_type.
+ */
+#define AEABI_FUNC(name, in_type, soft_func) \
+__aeabi_ ## name(in_type a) \
+{ \
+ if (_libc_arm_fpu_present) \
+ return __aeabi_ ## name ## _vfp(a); \
+ else \
+ return soft_func (a); \
+}
+
+/* As above, but takes two arguments of the same type */
+#define AEABI_FUNC2(name, in_type, soft_func) \
+__aeabi_ ## name(in_type a, in_type b) \
+{ \
+ if (_libc_arm_fpu_present) \
+ return __aeabi_ ## name ## _vfp(a, b); \
+ else \
+ return soft_func (a, b); \
+}
+
+/* As above, but with the soft float arguments reversed */
+#define AEABI_FUNC2_REV(name, in_type, soft_func) \
+__aeabi_ ## name(in_type a, in_type b) \
+{ \
+ if (_libc_arm_fpu_present) \
+ return __aeabi_ ## name ## _vfp(a, b); \
+ else \
+ return soft_func (b, a); \
+}
+#else
+/*
+ * Helper macros for when we are only able to use the softfloat
+ * version of these functions, i.e. on arm before armv6.
+ */
+#define AEABI_FUNC(name, in_type, soft_func) \
+__aeabi_ ## name(in_type a) \
+{ \
+ return soft_func (a); \
+}
+
+/* As above, but takes two arguments of the same type */
+#define AEABI_FUNC2(name, in_type, soft_func) \
+__aeabi_ ## name(in_type a, in_type b) \
+{ \
+ return soft_func (a, b); \
+}
+
+/* As above, but with the soft float arguments reversed */
+#define AEABI_FUNC2_REV(name, in_type, soft_func) \
+__aeabi_ ## name(in_type a, in_type b) \
+{ \
+ return soft_func (b, a); \
+}
+#endif
+
+#endif
+
diff --git a/lib/libc/arm/aeabi/aeabi_vfp_double.S b/lib/libc/arm/aeabi/aeabi_vfp_double.S
new file mode 100644
index 0000000..842412b
--- /dev/null
+++ b/lib/libc/arm/aeabi/aeabi_vfp_double.S
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2013 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include "aeabi_vfp.h"
+
+.fpu vfp
+.syntax unified
+
+/* int __aeabi_dcmpeq(double, double) */
+AEABI_ENTRY(dcmpeq)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movne r0, #0
+ moveq r0, #1
+ RET
+AEABI_END(dcmpeq)
+
+/* int __aeabi_dcmplt(double, double) */
+AEABI_ENTRY(dcmplt)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movcs r0, #0
+ movlt r0, #1
+ RET
+AEABI_END(dcmplt)
+
+/* int __aeabi_dcmple(double, double) */
+AEABI_ENTRY(dcmple)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movhi r0, #0
+ movls r0, #1
+ RET
+AEABI_END(dcmple)
+
+/* int __aeabi_dcmpge(double, double) */
+AEABI_ENTRY(dcmpge)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movlt r0, #0
+ movge r0, #1
+ RET
+AEABI_END(dcmpge)
+
+/* int __aeabi_dcmpgt(double, double) */
+AEABI_ENTRY(dcmpgt)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movle r0, #0
+ movgt r0, #1
+ RET
+AEABI_END(dcmpgt)
+
+/* int __aeabi_dcmpun(double, double) */
+AEABI_ENTRY(dcmpun)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movvc r0, #0
+ movvs r0, #1
+ RET
+AEABI_END(dcmpun)
+
+/* int __aeabi_d2iz(double) */
+AEABI_ENTRY(d2iz)
+ LOAD_DREG(d0, r0, r1)
+#if 0
+ /*
+ * This should be the correct instruction, but binutils incorrectly
+ * encodes it as the version that used FPSCR to determine the rounding.
+ * When binutils is fixed we can use this again.
+ */
+ vcvt.s32.f64 s0, d0
+#else
+ ftosizd s0, d0
+#endif
+ vmov r0, s0
+ RET
+AEABI_END(d2iz)
+
+/* float __aeabi_d2f(double) */
+AEABI_ENTRY(d2f)
+ LOAD_DREG(d0, r0, r1)
+ vcvt.f32.f64 s0, d0
+ UNLOAD_SREG(r0, s0)
+ RET
+AEABI_END(d2f)
+
+/* double __aeabi_i2d(int) */
+AEABI_ENTRY(i2d)
+ vmov s0, r0
+ vcvt.f64.s32 d0, s0
+ UNLOAD_DREG(r0, r1, d0)
+ RET
+AEABI_END(i2d)
+
+/* double __aeabi_dadd(double, double) */
+AEABI_ENTRY(dadd)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vadd.f64 d0, d0, d1
+ UNLOAD_DREG(r0, r1, d0)
+ RET
+AEABI_END(dadd)
+
+/* double __aeabi_ddiv(double, double) */
+AEABI_ENTRY(ddiv)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vdiv.f64 d0, d0, d1
+ UNLOAD_DREG(r0, r1, d0)
+ RET
+AEABI_END(ddiv)
+
+/* double __aeabi_dmul(double, double) */
+AEABI_ENTRY(dmul)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vmul.f64 d0, d0, d1
+ UNLOAD_DREG(r0, r1, d0)
+ RET
+AEABI_END(dmul)
+
+/* double __aeabi_dsub(double, double) */
+AEABI_ENTRY(dsub)
+ LOAD_DREG(d0, r0, r1)
+ LOAD_DREG(d1, r2, r3)
+ vsub.f64 d0, d0, d1
+ UNLOAD_DREG(r0, r1, d0)
+ RET
+AEABI_END(dsub)
+
diff --git a/lib/libc/arm/aeabi/aeabi_vfp_float.S b/lib/libc/arm/aeabi/aeabi_vfp_float.S
new file mode 100644
index 0000000..d81b2b2
--- /dev/null
+++ b/lib/libc/arm/aeabi/aeabi_vfp_float.S
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2013 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include "aeabi_vfp.h"
+
+.fpu vfp
+.syntax unified
+
+/* int __aeabi_fcmpeq(float, float) */
+AEABI_ENTRY(fcmpeq)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movne r0, #0
+ moveq r0, #1
+ RET
+AEABI_END(fcmpeq)
+
+/* int __aeabi_fcmplt(float, float) */
+AEABI_ENTRY(fcmplt)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movcs r0, #0
+ movlt r0, #1
+ RET
+AEABI_END(fcmplt)
+
+/* int __aeabi_fcmple(float, float) */
+AEABI_ENTRY(fcmple)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movhi r0, #0
+ movls r0, #1
+ RET
+AEABI_END(fcmple)
+
+/* int __aeabi_fcmpge(float, float) */
+AEABI_ENTRY(fcmpge)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movlt r0, #0
+ movge r0, #1
+ RET
+AEABI_END(fcmpge)
+
+/* int __aeabi_fcmpgt(float, float) */
+AEABI_ENTRY(fcmpgt)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movle r0, #0
+ movgt r0, #1
+ RET
+AEABI_END(fcmpgt)
+
+/* int __aeabi_fcmpun(float, float) */
+AEABI_ENTRY(fcmpun)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movvc r0, #0
+ movvs r0, #1
+ RET
+AEABI_END(fcmpun)
+
+/* int __aeabi_f2iz(float) */
+AEABI_ENTRY(f2iz)
+ LOAD_SREG(s0, r0)
+#if 0
+ /*
+ * This should be the correct instruction, but binutils incorrectly
+ * encodes it as the version that used FPSCR to determine the rounding.
+ * When binutils is fixed we can use this again.
+ */
+ vcvt.s32.f32 s0, s0
+#else
+ ftosizs s0, s0
+#endif
+ vmov r0, s0
+ RET
+AEABI_END(f2iz)
+
+/* double __aeabi_f2d(float) */
+AEABI_ENTRY(f2d)
+ LOAD_SREG(s0, r0)
+ vcvt.f64.f32 d0, s0
+ UNLOAD_DREG(r0, r1, d0)
+ RET
+AEABI_END(f2d)
+
+/* float __aeabi_i2f(int) */
+AEABI_ENTRY(i2f)
+ vmov s0, r0
+ vcvt.f32.s32 s0, s0
+ UNLOAD_SREG(r0, s0)
+ RET
+AEABI_END(i2f)
+
+/* float __aeabi_fadd(float, float) */
+AEABI_ENTRY(fadd)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vadd.f32 s0, s0, s1
+ UNLOAD_SREG(r0, s0)
+ RET
+AEABI_END(fadd)
+
+/* float __aeabi_fmul(float, float) */
+AEABI_ENTRY(fdiv)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vdiv.f32 s0, s0, s1
+ UNLOAD_SREG(r0, s0)
+ RET
+AEABI_END(fdiv)
+
+/* float __aeabi_fmul(float, float) */
+AEABI_ENTRY(fmul)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vmul.f32 s0, s0, s1
+ UNLOAD_SREG(r0, s0)
+ RET
+AEABI_END(fmul)
+
+/* float __aeabi_fsub(float, float) */
+AEABI_ENTRY(fsub)
+ LOAD_SREGS(s0, s1, r0, r1)
+ vsub.f32 s0, s0, s1
+ UNLOAD_SREG(r0, s0)
+ RET
+AEABI_END(fsub)
+
diff --git a/lib/msun/arm/Makefile.inc b/lib/msun/arm/Makefile.inc
index 7d9e10b..09f08d8 100644
--- a/lib/msun/arm/Makefile.inc
+++ b/lib/msun/arm/Makefile.inc
@@ -2,3 +2,11 @@
LDBL_PREC = 53
SYM_MAPS += ${.CURDIR}/arm/Symbol.map
+
+.if ${TARGET_ARCH} == "armv6"
+ARCH_SRCS = fenv-softfp.c fenv-vfp.c
+.endif
+
+CFLAGS.fenv-vfp.c= -mfloat-abi=softfp
+CFLAGS+= ${CFLAGS.${.IMPSRC:T}}
+
diff --git a/lib/msun/arm/Symbol.map b/lib/msun/arm/Symbol.map
index c43d8cf..081294c 100644
--- a/lib/msun/arm/Symbol.map
+++ b/lib/msun/arm/Symbol.map
@@ -15,4 +15,7 @@ FBSD_1.3 {
fegetenv;
feholdexcept;
feupdateenv;
+ feenableexcept;
+ fedisableexcept;
+ fegetexcept;
};
diff --git a/lib/msun/arm/fenv-mangle.h b/lib/msun/arm/fenv-mangle.h
new file mode 100644
index 0000000..476f7b2
--- /dev/null
+++ b/lib/msun/arm/fenv-mangle.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2013 Andrew Turner <andrew@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifdef _FENV_MANGLE_H_
+#error Only include fenv-mangle.h once
+#endif
+
+#define _FENV_MANGLE_H_
+
+#ifndef FENV_MANGLE
+#error FENV_MANGLE is undefined
+#endif
+
+#define feclearexcept FENV_MANGLE(feclearexcept)
+#define fegetexceptflag FENV_MANGLE(fegetexceptflag)
+#define fesetexceptflag FENV_MANGLE(fesetexceptflag)
+#define feraiseexcept FENV_MANGLE(feraiseexcept)
+#define fetestexcept FENV_MANGLE(fetestexcept)
+#define fegetround FENV_MANGLE(fegetround)
+#define fesetround FENV_MANGLE(fesetround)
+#define fegetenv FENV_MANGLE(fegetenv)
+#define feholdexcept FENV_MANGLE(feholdexcept)
+#define fesetenv FENV_MANGLE(fesetenv)
+#define feupdateenv FENV_MANGLE(feupdateenv)
+#define feenableexcept FENV_MANGLE(feenableexcept)
+#define fedisableexcept FENV_MANGLE(fedisableexcept)
+#define fegetexcept FENV_MANGLE(fegetexcept)
+
diff --git a/lib/msun/arm/fenv-softfp.c b/lib/msun/arm/fenv-softfp.c
new file mode 100644
index 0000000..c32c1c2
--- /dev/null
+++ b/lib/msun/arm/fenv-softfp.c
@@ -0,0 +1,32 @@
+/*-
+ * Copyright (c) 2013 Andrew Turner <andrew@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define FENV_MANGLE(x) __softfp_ ##x
+#include "fenv-mangle.h"
+#include "fenv.c"
+
diff --git a/lib/msun/arm/fenv-vfp.c b/lib/msun/arm/fenv-vfp.c
new file mode 100644
index 0000000..fd615f3
--- /dev/null
+++ b/lib/msun/arm/fenv-vfp.c
@@ -0,0 +1,33 @@
+/*-
+ * Copyright (c) 2013 Andrew Turner <andrew@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define FENV_MANGLE(x) __vfp_ ##x
+#include "fenv-mangle.h"
+#define __ARM_PCS_VFP
+#include "fenv.c"
+
diff --git a/lib/msun/arm/fenv.c b/lib/msun/arm/fenv.c
index c94f9b4..2dd1933 100644
--- a/lib/msun/arm/fenv.c
+++ b/lib/msun/arm/fenv.c
@@ -1,5 +1,6 @@
/*-
* Copyright (c) 2004 David Schultz <das@FreeBSD.ORG>
+ * Copyright (c) 2013 Andrew Turner <andrew@FreeBSD.ORG>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -29,11 +30,34 @@
#define __fenv_static
#include "fenv.h"
+#if defined(__FreeBSD_ARCH_armv6__) || (defined(__ARM_ARCH) && __ARM_ARCH >= 6)
+#define FENV_ARMv6
+#endif
+
+/* When SOFTFP_ABI is defined we are using the softfp ABI. */
+#if defined(__VFP_FP__) && !defined(__ARM_PCS_VFP)
+#define SOFTFP_ABI
+#endif
+
+
+#ifndef FENV_MANGLE
+/*
+ * Hopefully the system ID byte is immutable, so it's valid to use
+ * this as a default environment.
+ */
+const fenv_t __fe_dfl_env = 0;
+#endif
+
+
+/* If this is a non-mangled softfp version special processing is required */
+#if defined(FENV_MANGLE) || !defined(SOFTFP_ABI) || !defined(FENV_ARMv6)
+
/*
* The following macros map between the softfloat emulator's flags and
* the hardware's FPSR. The hardware this file was written for doesn't
* have rounding control bits, so we stick those in the system ID byte.
*/
+#ifndef __ARM_PCS_VFP
#define __set_env(env, flags, mask, rnd) env = ((flags) \
| (mask)<<_FPUSW_SHIFT \
| (rnd) << 24)
@@ -42,17 +66,12 @@
& FE_ALL_EXCEPT)
#define __env_round(env) (((env) >> 24) & _ROUND_MASK)
#include "fenv-softfloat.h"
+#endif
#ifdef __GNUC_GNU_INLINE__
#error "This file must be compiled with C99 'inline' semantics"
#endif
-/*
- * Hopefully the system ID byte is immutable, so it's valid to use
- * this as a default environment.
- */
-const fenv_t __fe_dfl_env = 0;
-
extern inline int feclearexcept(int __excepts);
extern inline int fegetexceptflag(fexcept_t *__flagp, int __excepts);
extern inline int fesetexceptflag(const fexcept_t *__flagp, int __excepts);
@@ -64,3 +83,233 @@ extern inline int fegetenv(fenv_t *__envp);
extern inline int feholdexcept(fenv_t *__envp);
extern inline int fesetenv(const fenv_t *__envp);
extern inline int feupdateenv(const fenv_t *__envp);
+extern inline int feenableexcept(int __mask);
+extern inline int fedisableexcept(int __mask);
+extern inline int fegetexcept(void);
+
+#else /* !FENV_MANGLE && SOFTFP_ABI */
+/* Set by libc when the VFP unit is enabled */
+extern int _libc_arm_fpu_present;
+
+int __softfp_feclearexcept(int __excepts);
+int __softfp_fegetexceptflag(fexcept_t *__flagp, int __excepts);
+int __softfp_fesetexceptflag(const fexcept_t *__flagp, int __excepts);
+int __softfp_feraiseexcept(int __excepts);
+int __softfp_fetestexcept(int __excepts);
+int __softfp_fegetround(void);
+int __softfp_fesetround(int __round);
+int __softfp_fegetenv(fenv_t *__envp);
+int __softfp_feholdexcept(fenv_t *__envp);
+int __softfp_fesetenv(const fenv_t *__envp);
+int __softfp_feupdateenv(const fenv_t *__envp);
+int __softfp_feenableexcept(int __mask);
+int __softfp_fedisableexcept(int __mask);
+int __softfp_fegetexcept(void);
+
+int __vfp_feclearexcept(int __excepts);
+int __vfp_fegetexceptflag(fexcept_t *__flagp, int __excepts);
+int __vfp_fesetexceptflag(const fexcept_t *__flagp, int __excepts);
+int __vfp_feraiseexcept(int __excepts);
+int __vfp_fetestexcept(int __excepts);
+int __vfp_fegetround(void);
+int __vfp_fesetround(int __round);
+int __vfp_fegetenv(fenv_t *__envp);
+int __vfp_feholdexcept(fenv_t *__envp);
+int __vfp_fesetenv(const fenv_t *__envp);
+int __vfp_feupdateenv(const fenv_t *__envp);
+int __vfp_feenableexcept(int __mask);
+int __vfp_fedisableexcept(int __mask);
+int __vfp_fegetexcept(void);
+
+static int
+__softfp_round_to_vfp(int round)
+{
+
+ switch (round) {
+ case FE_TONEAREST:
+ default:
+ return VFP_FE_TONEAREST;
+ case FE_TOWARDZERO:
+ return VFP_FE_TOWARDZERO;
+ case FE_UPWARD:
+ return VFP_FE_UPWARD;
+ case FE_DOWNWARD:
+ return VFP_FE_DOWNWARD;
+ }
+}
+
+static int
+__softfp_round_from_vfp(int round)
+{
+
+ switch (round) {
+ case VFP_FE_TONEAREST:
+ default:
+ return FE_TONEAREST;
+ case VFP_FE_TOWARDZERO:
+ return FE_TOWARDZERO;
+ case VFP_FE_UPWARD:
+ return FE_UPWARD;
+ case VFP_FE_DOWNWARD:
+ return FE_DOWNWARD;
+ }
+}
+
+int feclearexcept(int __excepts)
+{
+
+ if (_libc_arm_fpu_present)
+ __vfp_feclearexcept(__excepts);
+ __softfp_feclearexcept(__excepts);
+
+ return (0);
+}
+
+int fegetexceptflag(fexcept_t *__flagp, int __excepts)
+{
+ fexcept_t __vfp_flagp;
+
+ __vfp_flagp = 0;
+ if (_libc_arm_fpu_present)
+ __vfp_fegetexceptflag(&__vfp_flagp, __excepts);
+ __softfp_fegetexceptflag(__flagp, __excepts);
+
+ *__flagp |= __vfp_flagp;
+
+ return (0);
+}
+
+int fesetexceptflag(const fexcept_t *__flagp, int __excepts)
+{
+
+ if (_libc_arm_fpu_present)
+ __vfp_fesetexceptflag(__flagp, __excepts);
+ __softfp_fesetexceptflag(__flagp, __excepts);
+
+ return (0);
+}
+
+int feraiseexcept(int __excepts)
+{
+
+ if (_libc_arm_fpu_present)
+ __vfp_feraiseexcept(__excepts);
+ __softfp_feraiseexcept(__excepts);
+
+ return (0);
+}
+
+int fetestexcept(int __excepts)
+{
+ int __got_excepts;
+
+ __got_excepts = 0;
+ if (_libc_arm_fpu_present)
+ __got_excepts = __vfp_fetestexcept(__excepts);
+ __got_excepts |= __softfp_fetestexcept(__excepts);
+
+ return (__got_excepts);
+}
+
+int fegetround(void)
+{
+
+ if (_libc_arm_fpu_present)
+ return __softfp_round_from_vfp(__vfp_fegetround());
+ return __softfp_fegetround();
+}
+
+int fesetround(int __round)
+{
+
+ if (_libc_arm_fpu_present)
+ __vfp_fesetround(__softfp_round_to_vfp(__round));
+ __softfp_fesetround(__round);
+
+ return (0);
+}
+
+int fegetenv(fenv_t *__envp)
+{
+ fenv_t __vfp_envp;
+
+ __vfp_envp = 0;
+ if (_libc_arm_fpu_present)
+ __vfp_fegetenv(&__vfp_envp);
+ __softfp_fegetenv(__envp);
+ *__envp |= __vfp_envp;
+
+ return (0);
+}
+
+int feholdexcept(fenv_t *__envp)
+{
+ fenv_t __vfp_envp;
+
+ __vfp_envp = 0;
+ if (_libc_arm_fpu_present)
+ __vfp_feholdexcept(&__vfp_envp);
+ __softfp_feholdexcept(__envp);
+ *__envp |= __vfp_envp;
+
+ return (0);
+}
+
+int fesetenv(const fenv_t *__envp)
+{
+
+ if (_libc_arm_fpu_present)
+ __vfp_fesetenv(__envp);
+ __softfp_fesetenv(__envp);
+
+ return (0);
+}
+
+int feupdateenv(const fenv_t *__envp)
+{
+
+ if (_libc_arm_fpu_present)
+ __vfp_feupdateenv(__envp);
+ __softfp_feupdateenv(__envp);
+
+ return (0);
+}
+
+int feenableexcept(int __mask)
+{
+ int __unmasked;
+
+ __unmasked = 0;
+ if (_libc_arm_fpu_present)
+ __unmasked = __vfp_feenableexcept(__mask);
+ __unmasked |= __softfp_feenableexcept(__mask);
+
+ return (__unmasked);
+}
+
+int fedisableexcept(int __mask)
+{
+ int __unmasked;
+
+ __unmasked = 0;
+ if (_libc_arm_fpu_present)
+ __unmasked = __vfp_fedisableexcept(__mask);
+ __unmasked |= __softfp_fedisableexcept(__mask);
+
+ return (__unmasked);
+}
+
+int fegetexcept(void)
+{
+ int __unmasked;
+
+ __unmasked = 0;
+ if (_libc_arm_fpu_present)
+ __unmasked = __vfp_fegetexcept();
+ __unmasked |= __softfp_fegetexcept();
+
+ return (__unmasked);
+}
+
+#endif
+
diff --git a/lib/msun/arm/fenv.h b/lib/msun/arm/fenv.h
index 0605819..280532f 100644
--- a/lib/msun/arm/fenv.h
+++ b/lib/msun/arm/fenv.h
@@ -54,11 +54,16 @@ typedef __uint32_t fexcept_t;
#endif
/* Rounding modes */
+#define VFP_FE_TONEAREST 0x00000000
+#define VFP_FE_UPWARD 0x00400000
+#define VFP_FE_DOWNWARD 0x00800000
+#define VFP_FE_TOWARDZERO 0x00c00000
+
#ifdef __ARM_PCS_VFP
-#define FE_TONEAREST 0x00000000
-#define FE_UPWARD 0x00400000
-#define FE_DOWNWARD 0x00800000
-#define FE_TOWARDZERO 0x00c00000
+#define FE_TONEAREST VFP_FE_TONEAREST
+#define FE_UPWARD VFP_FE_UPWARD
+#define FE_DOWNWARD VFP_FE_DOWNWARD
+#define FE_TOWARDZERO VFP_FE_TOWARDZERO
#else
#define FE_TONEAREST 0x0000
#define FE_TOWARDZERO 0x0001
@@ -92,6 +97,11 @@ int fegetenv(fenv_t *__envp);
int feholdexcept(fenv_t *__envp);
int fesetenv(const fenv_t *__envp);
int feupdateenv(const fenv_t *__envp);
+#if __BSD_VISIBLE
+int feenableexcept(int __mask);
+int fedisableexcept(int __mask);
+int fegetexcept(void);
+#endif
#else /* __ARM_PCS_VFP */
OpenPOWER on IntegriCloud