summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorandrew <andrew@FreeBSD.org>2015-05-12 10:03:14 +0000
committerandrew <andrew@FreeBSD.org>2015-05-12 10:03:14 +0000
commit07deb5ca155fbe831ace29174b2256c2d771d789 (patch)
tree8c78ecaeb4a389d3842d051785d416b6a727ead2 /lib
parentc809908d6a30b706fbf371b3e6b5dff3fc17df18 (diff)
downloadFreeBSD-src-07deb5ca155fbe831ace29174b2256c2d771d789.zip
FreeBSD-src-07deb5ca155fbe831ace29174b2256c2d771d789.tar.gz
Teach bits of libc about Thumb. This adds the if-then instructions needed
to handle the ARM conditional execution. While here fix a bug found by this in the hard-float code, cc is the opposite of cs. The former is used for 'less than' in floating-point code and is executed when the C (carry) bit is clear, the latter is used when greater than, equal, or unordered, and is executed when the C bit is set.
Diffstat (limited to 'lib')
-rw-r--r--lib/libc/arm/SYS.h2
-rw-r--r--lib/libc/arm/aeabi/aeabi_vfp_double.S8
-rw-r--r--lib/libc/arm/aeabi/aeabi_vfp_float.S8
-rw-r--r--lib/libc/arm/gen/_setjmp.S17
-rw-r--r--lib/libc/arm/gen/setjmp.S17
5 files changed, 48 insertions, 4 deletions
diff --git a/lib/libc/arm/SYS.h b/lib/libc/arm/SYS.h
index ed1a045..3254c45 100644
--- a/lib/libc/arm/SYS.h
+++ b/lib/libc/arm/SYS.h
@@ -62,6 +62,7 @@
#define _SYSCALL(x) \
_SYSCALL_NOERROR(x); \
+ it cs; \
bcs PIC_SYM(CERROR, PLT)
#define SYSCALL(x) \
@@ -72,6 +73,7 @@
.weak _C_LABEL(__CONCAT(_,x)); \
.set _C_LABEL(__CONCAT(_,x)),_C_LABEL(__CONCAT(__sys_,x)); \
SYSTRAP(x); \
+ it cs; \
bcs PIC_SYM(CERROR, PLT); \
RET
diff --git a/lib/libc/arm/aeabi/aeabi_vfp_double.S b/lib/libc/arm/aeabi/aeabi_vfp_double.S
index 62100d2..aae49f8 100644
--- a/lib/libc/arm/aeabi/aeabi_vfp_double.S
+++ b/lib/libc/arm/aeabi/aeabi_vfp_double.S
@@ -66,6 +66,7 @@ AEABI_ENTRY(dcmpeq)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
+ ite ne
movne r0, #0
moveq r0, #1
RET
@@ -77,8 +78,9 @@ AEABI_ENTRY(dcmplt)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
+ ite cs
movcs r0, #0
- movlt r0, #1
+ movcc r0, #1
RET
AEABI_END(dcmplt)
@@ -88,6 +90,7 @@ AEABI_ENTRY(dcmple)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
+ ite hi
movhi r0, #0
movls r0, #1
RET
@@ -99,6 +102,7 @@ AEABI_ENTRY(dcmpge)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
+ ite lt
movlt r0, #0
movge r0, #1
RET
@@ -110,6 +114,7 @@ AEABI_ENTRY(dcmpgt)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
+ ite le
movle r0, #0
movgt r0, #1
RET
@@ -121,6 +126,7 @@ AEABI_ENTRY(dcmpun)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
+ ite vc
movvc r0, #0
movvs r0, #1
RET
diff --git a/lib/libc/arm/aeabi/aeabi_vfp_float.S b/lib/libc/arm/aeabi/aeabi_vfp_float.S
index c9a9a7e..7de8daf 100644
--- a/lib/libc/arm/aeabi/aeabi_vfp_float.S
+++ b/lib/libc/arm/aeabi/aeabi_vfp_float.S
@@ -62,6 +62,7 @@ AEABI_ENTRY(fcmpeq)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
+ ite ne
movne r0, #0
moveq r0, #1
RET
@@ -72,8 +73,9 @@ AEABI_ENTRY(fcmplt)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
+ ite cs
movcs r0, #0
- movlt r0, #1
+ movcc r0, #1
RET
AEABI_END(fcmplt)
@@ -82,6 +84,7 @@ AEABI_ENTRY(fcmple)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
+ ite hi
movhi r0, #0
movls r0, #1
RET
@@ -92,6 +95,7 @@ AEABI_ENTRY(fcmpge)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
+ ite lt
movlt r0, #0
movge r0, #1
RET
@@ -102,6 +106,7 @@ AEABI_ENTRY(fcmpgt)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
+ ite le
movle r0, #0
movgt r0, #1
RET
@@ -112,6 +117,7 @@ AEABI_ENTRY(fcmpun)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
+ ite vc
movvc r0, #0
movvs r0, #1
RET
diff --git a/lib/libc/arm/gen/_setjmp.S b/lib/libc/arm/gen/_setjmp.S
index 387f8a9..3de9d99 100644
--- a/lib/libc/arm/gen/_setjmp.S
+++ b/lib/libc/arm/gen/_setjmp.S
@@ -85,7 +85,13 @@ ENTRY(_setjmp)
add r0, r0, #(_JB_REG_R4 * 4)
/* Store integer registers */
+#ifndef __thumb__
stmia r0, {r4-r14}
+#else
+ stmia r0, {r4-r12}
+ str r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
+ str r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
+#endif
mov r0, #0x00000000
RET
@@ -120,15 +126,24 @@ ENTRY(_longjmp)
add r0, r0, #(_JB_REG_R4 * 4)
/* Restore integer registers */
+#ifndef __thumb__
ldmia r0, {r4-r14}
+#else
+ ldmia r0, {r4-r12}
+ ldr r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
+ ldr r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
+#endif
/* Validate sp and r14 */
teq sp, #0
+ it ne
teqne r14, #0
+ it eq
beq botch
/* Set return value */
movs r0, r1
+ it eq
moveq r0, #0x00000001
RET
@@ -137,7 +152,7 @@ botch:
#if !defined(_STANDALONE)
bl PIC_SYM(_C_LABEL(longjmperror), PLT)
bl PIC_SYM(_C_LABEL(abort), PLT)
- b . - 8 /* Cannot get here */
+1: b 1b /* Cannot get here */
#else
b .
#endif
diff --git a/lib/libc/arm/gen/setjmp.S b/lib/libc/arm/gen/setjmp.S
index ad4ba38..6269563 100644
--- a/lib/libc/arm/gen/setjmp.S
+++ b/lib/libc/arm/gen/setjmp.S
@@ -90,7 +90,13 @@ ENTRY(setjmp)
/* Store integer registers */
add r0, r0, #(_JB_REG_R4 * 4)
+#ifndef __thumb__
stmia r0, {r4-r14}
+#else
+ stmia r0, {r4-r12}
+ str r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
+ str r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
+#endif
mov r0, #0x00000000
RET
@@ -133,15 +139,24 @@ ENTRY(__longjmp)
add r0, r0, #(_JB_REG_R4 * 4)
/* Restore integer registers */
+#ifndef __thumb__
ldmia r0, {r4-r14}
+#else
+ ldmia r0, {r4-r12}
+ ldr r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
+ ldr r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
+#endif
/* Validate sp and r14 */
teq sp, #0
+ it ne
teqne r14, #0
+ it eq
beq .Lbotch
/* Set return value */
movs r0, r1
+ it eq
moveq r0, #0x00000001
RET
@@ -149,5 +164,5 @@ ENTRY(__longjmp)
.Lbotch:
bl PIC_SYM(_C_LABEL(longjmperror), PLT)
bl PIC_SYM(_C_LABEL(abort), PLT)
- b . - 8 /* Cannot get here */
+1: b 1b /* Cannot get here */
END(__longjmp)
OpenPOWER on IntegriCloud