summaryrefslogtreecommitdiffstats
path: root/src/macros.h
diff options
context:
space:
mode:
authorAnthony Blake <anthonix@me.com>2012-08-09 13:36:45 +1200
committerAnthony Blake <anthonix@me.com>2012-08-09 13:36:45 +1200
commit829c8fbb2fe38c0f5c45d6f081c53dde73dc605d (patch)
tree50a3148093431a325aaf876e177524b1883223de /src/macros.h
parent9993b4b77bd21971c2e1a43dbb45567b692698c7 (diff)
downloadffts-829c8fbb2fe38c0f5c45d6f081c53dde73dc605d.zip
ffts-829c8fbb2fe38c0f5c45d6f081c53dde73dc605d.tar.gz
Pre- dynamic code generation
Diffstat (limited to 'src/macros.h')
-rw-r--r--src/macros.h528
1 files changed, 393 insertions, 135 deletions
diff --git a/src/macros.h b/src/macros.h
index b2f44e6..039ee40 100644
--- a/src/macros.h
+++ b/src/macros.h
@@ -1,125 +1,121 @@
#ifndef __MACROS_H__
#define __MACROS_H__
+#include "../config.h"
+
+#ifdef HAVE_NEON
+ #include "neon_float.h"
+#else
+ #include "sse_float.h"
+#endif
+
+
#include "cp_sse.h"
#define __INLINE static inline __attribute__((always_inline))
-#define VLIT4 _mm_set_ps
-
-__m128 MULI_SIGN;
+cdata_t SCALAR_MULI_SIGN;
+V MULI_SIGN;
+V LEAFLUT[12];
-__INLINE __m128 IMULI(__m128 a) {
- __m128 temp = _mm_xor_ps(a, MULI_SIGN);//_mm_set_ps(-0.0f, 0.0f, -0.0f, 0.0f));
- return _mm_shuffle_ps(temp, temp, _MM_SHUFFLE(2,3,0,1));
+__INLINE V IMULI(V a) {
+ return VSWAPPAIRS(VXOR(a, MULI_SIGN));
}
__INLINE void
-S_4(__m128 r0, __m128 r1, __m128 r2, __m128 r3, data_t * restrict o0, data_t * restrict o1, data_t * restrict o2, data_t * restrict o3) {
- __m128 t0, t1, t2, t3;
- _mm_store_ps(o0, r0);
- _mm_store_ps(o1, r1);
- _mm_store_ps(o2, r2);
- _mm_store_ps(o3, r3);
+S_4(V r0, V r1, V r2, V r3, data_t * restrict o0, data_t * restrict o1, data_t * restrict o2, data_t * restrict o3) {
+ V t0, t1, t2, t3;
+ VST(o0, r0); VST(o1, r1); VST(o2, r2); VST(o3, r3);
}
-__INLINE void S_2(__m128 r0, __m128 r1, data_t * restrict o0, data_t * restrict o1) {
- _mm_store_ps(o0, r0);
- _mm_store_ps(o1, r1);
+__INLINE void S_2(V r0, V r1, data_t * restrict o0, data_t * restrict o1) {
+ VST(o0, r0); VST(o1, r1);
}
-__INLINE void L_S2(const data_t * restrict i0, const data_t * restrict i1, __m128 * restrict r0, __m128 * restrict r1) {
- __m128 t0, t1;
- t0 = _mm_load_ps(i0);
- t1 = _mm_load_ps(i1);
- *r0 = _mm_add_ps(t0, t1);
- *r1 = _mm_sub_ps(t0, t1);
+__INLINE void L_S2(const data_t * restrict i0, const data_t * restrict i1, V * restrict r0, V * restrict r1) {
+ V t0, t1;
+ t0 = VLD(i0); t1 = VLD(i1);
+ *r0 = VADD(t0, t1);
+ *r1 = VSUB(t0, t1);
}
__INLINE void
L_2(const data_t * restrict i0, const data_t * restrict i1, const data_t * restrict i2, const data_t * restrict i3,
- __m128 *r0, __m128 *r1, __m128 *r2, __m128 *r3) {
- __m128 t0, t1, t2, t3;
- t0 = _mm_load_ps(i0);
- t1 = _mm_load_ps(i1);
- t2 = _mm_load_ps(i2);
- t3 = _mm_load_ps(i3);
- *r0 = _mm_add_ps (t0, t1);
- *r1 = _mm_sub_ps (t0, t1);
- *r2 = _mm_add_ps (t2, t3);
- *r3 = _mm_sub_ps (t2, t3);
+ V *r0, V *r1, V *r2, V *r3) {
+ V t0, t1, t2, t3;
+ t0 = VLD(i0);
+ t1 = VLD(i1);
+ t2 = VLD(i2);
+ t3 = VLD(i3);
+ *r0 = VADD (t0, t1);
+ *r1 = VSUB (t0, t1);
+ *r2 = VADD (t2, t3);
+ *r3 = VSUB (t2, t3);
}
__INLINE void
L_4(const data_t * restrict i0, const data_t * restrict i1, const data_t * restrict i2, const data_t * restrict i3,
- __m128 *r0, __m128 *r1, __m128 *r2, __m128 *r3) {
- __m128 t0, t1, t2, t3, t4, t5, t6, t7;
- t0 = _mm_load_ps(i0);
- t1 = _mm_load_ps(i1);
- t2 = _mm_load_ps(i2);
- t3 = _mm_load_ps(i3);
- t4 = _mm_add_ps (t0, t1);
- t5 = _mm_sub_ps (t0, t1);
- t6 = _mm_add_ps (t2, t3);
- t7 = IMULI(_mm_sub_ps (t2, t3));
- *r0 = _mm_add_ps (t4, t6);
- *r2 = _mm_sub_ps (t4, t6);
- *r1 = _mm_sub_ps (t5, t7);
- *r3 = _mm_add_ps (t5, t7);
+ V *r0, V *r1, V *r2, V *r3) {
+ V t0, t1, t2, t3, t4, t5, t6, t7;
+ t0 = VLD(i0); t1 = VLD(i1); t2 = VLD(i2); t3 = VLD(i3);
+ t4 = VADD (t0, t1);
+ t5 = VSUB (t0, t1);
+ t6 = VADD (t2, t3);
+ t7 = IMULI(VSUB (t2, t3));
+ *r0 = VADD (t4, t6);
+ *r2 = VSUB (t4, t6);
+ *r1 = VSUB (t5, t7);
+ *r3 = VADD (t5, t7);
}
__INLINE void
-K_0(__m128 *r0, __m128 *r1, __m128 *r2, __m128 *r3) {
- __m128 uk, uk2, zk, zk_d;
- uk = *r0;
- uk2 = *r1;
- zk = _mm_add_ps(*r2, *r3);
- zk_d = IMULI(_mm_sub_ps(*r2, *r3));
- *r0 = _mm_add_ps(uk, zk);
- *r2 = _mm_sub_ps(uk, zk);
- *r1 = _mm_sub_ps(uk2, zk_d);
- *r3 = _mm_add_ps(uk2, zk_d);
+K_0(V *r0, V *r1, V *r2, V *r3) {
+ V uk, uk2, zk, zk_d;
+ uk = *r0; uk2 = *r1;
+ zk = VADD(*r2, *r3);
+ zk_d = IMULI(VSUB(*r2, *r3));
+ *r0 = VADD(uk, zk);
+ *r2 = VSUB(uk, zk);
+ *r1 = VSUB(uk2, zk_d);
+ *r3 = VADD(uk2, zk_d);
}
-__INLINE __m128 IMUL(__m128 d, __m128 re, __m128 im) {
- re = _mm_mul_ps(re, d);
- im = _mm_mul_ps(im, _mm_shuffle_ps(d, d, _MM_SHUFFLE(2,3,0,1)));
- return _mm_sub_ps(re, im);
+__INLINE V IMUL(V d, V re, V im) {
+ re = VMUL(re, d);
+ im = VMUL(im, VSWAPPAIRS(d));
+ return VSUB(re, im);
}
-__INLINE __m128 IMULJ(__m128 d, __m128 re, __m128 im) {
- re = _mm_mul_ps(re, d);
- im = _mm_mul_ps(im, _mm_shuffle_ps(d, d, _MM_SHUFFLE(2,3,0,1)));
- return _mm_add_ps(re, im);
+__INLINE V IMULJ(V d, V re, V im) {
+ re = VMUL(re, d);
+ im = VMUL(im, VSWAPPAIRS(d));
+ return VADD(re, im);
}
__INLINE void
-K_N(__m128 re, __m128 im, __m128 *r0, __m128 *r1, __m128 *r2, __m128 *r3) {
- __m128 uk, uk2, zk_p, zk_n, zk, zk_d;
-
- uk = *r0;
- uk2 = *r1;
+K_N(V re, V im, V *r0, V *r1, V *r2, V *r3) {
+ V uk, uk2, zk_p, zk_n, zk, zk_d;
+ uk = *r0; uk2 = *r1;
zk_p = IMUL(*r2, re, im);
zk_n = IMULJ(*r3, re, im);
+
+ zk = VADD(zk_p, zk_n);
+ zk_d = IMULI(VSUB(zk_p, zk_n));
- zk = _mm_add_ps(zk_p, zk_n);
- zk_d = IMULI(_mm_sub_ps(zk_p, zk_n));
-
- *r2 = _mm_sub_ps(uk, zk);
- *r0 = _mm_add_ps(uk, zk);
- *r3 = _mm_add_ps(uk2, zk_d);
- *r1 = _mm_sub_ps(uk2, zk_d);
+ *r2 = VSUB(uk, zk);
+ *r0 = VADD(uk, zk);
+ *r3 = VADD(uk2, zk_d);
+ *r1 = VSUB(uk2, zk_d);
}
-__INLINE void TX2(__m128 *a, __m128 *b) {
- __m128 TX2_t0 = _mm_shuffle_ps(*a, *b, _MM_SHUFFLE(1,0,1,0));
- __m128 TX2_t1 = _mm_shuffle_ps(*a, *b, _MM_SHUFFLE(3,2,3,2));
+__INLINE void TX2(V *a, V *b) {
+ V TX2_t0 = VUNPACKLO(*a, *b);
+ V TX2_t1 = VUNPACKHI(*a, *b);
*a = TX2_t0; *b = TX2_t1;
}
-__m128 __attribute__((aligned(32))) LEAFLUT[12];
__INLINE void
LEAF_EE(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) {
- __m128 r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;
+ V r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;
data_t *out0 = out + (*out_offsets)[0];
data_t *out1 = out + (*out_offsets)[1];
@@ -147,7 +143,7 @@ LEAF_EE(size_t ** restrict is, const data_t * restrict in, size_t ** restrict ou
__INLINE void
LEAF_OO(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) {
- __m128 r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;
+ V r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;
data_t *out0 = out + (*out_offsets)[0];
data_t *out1 = out + (*out_offsets)[1];
@@ -170,32 +166,123 @@ LEAF_OO(size_t ** restrict is, const data_t * restrict in, size_t ** restrict ou
*is += 16;
}
+#ifdef __ARM_NEON__
+__INLINE void
+S_4_1(V r0, V r1, V r2, V r3, data_t * restrict o0, data_t * restrict o1, data_t * restrict o2, data_t * restrict o3) {
+ register V p0 __asm__ ("q0") = r0; register V p1 __asm__ ("q1") = r1; register V p2 __asm__ ("q2") = r2; register V p3 __asm__ ("q3") = r3;
+ __asm__ __volatile__ ("vst4.32 {%q1,%q2}, [%0, :128]!\n\t"
+ "vst4.32 {%q3,%q4}, [%0, :128]!\n\t"
+ :
+ : "r" (o0), "w" (p0), "w" (p1), "w" (p2), "w" (p3)
+ : "memory");
+}
+__INLINE void
+S_4_2(V r0, V r1, V r2, V r3, data_t * restrict o0, data_t * restrict o1, data_t * restrict o2, data_t * restrict o3) {
+ register V p0 __asm__ ("q4") = r0; register V p1 __asm__ ("q5") = r1; register V p2 __asm__ ("q6") = r2; register V p3 __asm__ ("q7") = r3;
+ __asm__ __volatile__ ("vst4.32 {%q1,%q2}, [%0, :128]!\n\t"
+ "vst4.32 {%q3,%q4}, [%0, :128]!\n\t"
+ :
+ : "r" (o0), "w" (p0), "w" (p1), "w" (p2), "w" (p3)
+ : "memory");
+}
+__INLINE void
+LEAF_EE8(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) {
+ V r0,r1,r2,r3,r4,r5,r6,r7;
+ data_t *out0 = out + (*out_offsets)[0];
+ data_t *out1 = out + (*out_offsets)[1];
+ *out_offsets += 2;
+
+ L_4(in+(*is)[0],in+(*is)[1],in+(*is)[2],in+(*is)[3],&r0,&r1,&r2,&r3);
+ L_2(in+(*is)[4],in+(*is)[5],in+(*is)[6],in+(*is)[7],&r4,&r5,&r6,&r7);
+ K_0(&r0,&r2,&r4,&r6);
+ K_N(LEAFLUT[0], LEAFLUT[1],&r1,&r3,&r5,&r7);
+
+ register V p0 __asm__ ("q0") = r0;
+ register V p1 __asm__ ("q1") = r2;
+ register V p2 __asm__ ("q2") = r4;
+ register V p3 __asm__ ("q3") = r6;
+ register V p4 __asm__ ("q4") = r1;
+ register V p5 __asm__ ("q5") = r3;
+ register V p6 __asm__ ("q6") = r5;
+ register V p7 __asm__ ("q7") = r7;
+
+ __asm__ __volatile__ ("vswp %f1,%e6\n\t"
+ "vswp %f2,%e7\n\t"
+ "vswp %f3,%e8\n\t"
+ "vswp %f4,%e9\n\t"
+ "vst4.32 {%q1,%q2}, [%0, :128]!\n\t"
+ "vst4.32 {%q3,%q4}, [%0, :128]!\n\t"
+ "vst4.32 {%q6,%q7}, [%5, :128]!\n\t"
+ "vst4.32 {%q8,%q9}, [%5, :128]!\n\t"
+ :
+ : "r" (out0), "w" (p0), "w" (p1), "w" (p2), "w" (p3),
+ "r" (out1), "w" (p4), "w" (p5), "w" (p6), "w" (p7)
+ : "memory");
+//TX2(&r0,&r1); TX2(&r2,&r3); TX2(&r4,&r5); TX2(&r6,&r7);
+//S_4_1(r0,r2,r4,r6,out0+0,out0+4,out0+8,out0+12);
+//S_4_2(r1,r3,r5,r7,out1+0,out1+4,out1+8,out1+12);
+ *is += 8;
+}
+__INLINE void
+LEAF_OO8(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) {
+ V r0,r1,r2,r3,r4,r5,r6,r7;
+ data_t *out0 = out + (*out_offsets)[0];
+ data_t *out1 = out + (*out_offsets)[1];
+ *out_offsets += 2;
+ L_4(in+(*is)[0],in+(*is)[1],in+(*is)[2],in+(*is)[3],&r0,&r1,&r2,&r3);
+ L_4(in+(*is)[4],in+(*is)[5],in+(*is)[6],in+(*is)[7],&r4,&r5,&r6,&r7);
+ register V p0 __asm__ ("q0") = r0;
+ register V p1 __asm__ ("q1") = r2;
+ register V p2 __asm__ ("q2") = r4;
+ register V p3 __asm__ ("q3") = r6;
+ register V p4 __asm__ ("q4") = r1;
+ register V p5 __asm__ ("q5") = r3;
+ register V p6 __asm__ ("q6") = r5;
+ register V p7 __asm__ ("q7") = r7;
+ __asm__ __volatile__ ("vswp %f1,%e6\n\t"
+ "vswp %f2,%e7\n\t"
+ "vswp %f3,%e8\n\t"
+ "vswp %f4,%e9\n\t"
+ "vst4.32 {%q1,%q2}, [%0, :128]!\n\t"
+ "vst4.32 {%q3,%q4}, [%0, :128]!\n\t"
+ "vst4.32 {%q6,%q7}, [%5, :128]!\n\t"
+ "vst4.32 {%q8,%q9}, [%5, :128]!\n\t"
+ :
+ : "r" (out0), "w" (p0), "w" (p1), "w" (p2), "w" (p3),
+ "r" (out1), "w" (p4), "w" (p5), "w" (p6), "w" (p7)
+ : "memory");
+//TX2(&r0,&r1); TX2(&r2,&r3); TX2(&r4,&r5); TX2(&r6,&r7);
+//S_4_1(r0,r2,r4,r6,out0+0,out0+4,out0+8,out0+12);
+//S_4_2(r1,r3,r5,r7,out1+0,out1+4,out1+8,out1+12);
+ *is += 8;
+}
+#endif
__INLINE void
L_4_4(const data_t * restrict i0, const data_t * restrict i1, const data_t * restrict i2, const data_t * restrict i3,
- __m128 *r0, __m128 *r1, __m128 *r2, __m128 *r3) {
- __m128 t0, t1, t2, t3, t4, t5, t6, t7;
- t0 = _mm_load_ps(i0); t1 = _mm_load_ps(i1); t2 = _mm_load_ps(i2); t3 = _mm_load_ps(i3);
- t4 = _mm_add_ps(t0, t1);
- t5 = _mm_sub_ps(t0, t1);
- t6 = _mm_add_ps(t2, t3);
- t7 = IMULI(_mm_sub_ps(t2, t3));
- t0 = _mm_add_ps(t4, t6);
- t2 = _mm_sub_ps(t4, t6);
- t1 = _mm_sub_ps(t5, t7);
- t3 = _mm_add_ps(t5, t7);
+ V *r0, V *r1, V *r2, V *r3) {
+ V t0, t1, t2, t3, t4, t5, t6, t7;
+ t0 = VLD(i0); t1 = VLD(i1); t2 = VLD(i2); t3 = VLD(i3);
+ t4 = VADD(t0, t1);
+ t5 = VSUB(t0, t1);
+ t6 = VADD(t2, t3);
+ t7 = IMULI(VSUB(t2, t3));
+ t0 = VADD(t4, t6);
+ t2 = VSUB(t4, t6);
+ t1 = VSUB(t5, t7);
+ t3 = VADD(t5, t7);
TX2(&t0,&t1);
TX2(&t2,&t3);
*r0 = t0; *r2 = t1; *r1 = t2; *r3 = t3; }
__INLINE void
L_2_2(const data_t * restrict i0, const data_t * restrict i1, const data_t * restrict i2, const data_t * restrict i3,
- __m128 *r0, __m128 *r1, __m128 *r2, __m128 *r3) {
- __m128 t0, t1, t2, t3, t4, t5, t6, t7;
- t0 = _mm_load_ps(i0); t1 = _mm_load_ps(i1); t2 = _mm_load_ps(i2); t3 = _mm_load_ps(i3); t4 = _mm_add_ps(t0, t1);
- t5 = _mm_sub_ps(t0, t1);
- t6 = _mm_add_ps(t2, t3);
- t7 = _mm_sub_ps(t2, t3);
+ V *r0, V *r1, V *r2, V *r3) {
+ V t0, t1, t2, t3, t4, t5, t6, t7;
+ t0 = VLD(i0); t1 = VLD(i1); t2 = VLD(i2); t3 = VLD(i3); t4 = VADD(t0, t1);
+ t5 = VSUB(t0, t1);
+ t6 = VADD(t2, t3);
+ t7 = VSUB(t2, t3);
TX2(&t4,&t5);
TX2(&t6,&t7);
*r0 = t4; *r2 = t5; *r1 = t6; *r3 = t7;
@@ -203,52 +290,49 @@ L_2_2(const data_t * restrict i0, const data_t * restrict i1, const data_t * res
__INLINE void
L_2_4(const data_t * restrict i0, const data_t * restrict i1, const data_t * restrict i2, const data_t * restrict i3,
- __m128 *r0, __m128 *r1, __m128 *r2, __m128 *r3) {
- __m128 t0, t1, t2, t3, t4, t5, t6, t7;
- t0 = _mm_load_ps(i0); t1 = _mm_load_ps(i1); t2 = _mm_load_ps(i2); t3 = _mm_load_ps(i3);
- t4 = _mm_add_ps(t0, t1);
- t5 = _mm_sub_ps(t0, t1);
- t6 = _mm_add_ps(t2, t3);
- t7 = _mm_sub_ps(t2, t3);
- *r0 = _mm_shuffle_ps(t4, t5, _MM_SHUFFLE(1,0,1,0));
- *r1 = _mm_shuffle_ps(t6, t7, _MM_SHUFFLE(1,0,1,0));
+ V *r0, V *r1, V *r2, V *r3) {
+ V t0, t1, t2, t3, t4, t5, t6, t7;
+ t0 = VLD(i0); t1 = VLD(i1); t2 = VLD(i2); t3 = VLD(i3);
+ t4 = VADD(t0, t1);
+ t5 = VSUB(t0, t1);
+ t6 = VADD(t2, t3);
+ t7 = VSUB(t2, t3);
+ *r0 = VUNPACKLO(t4, t5);
+ *r1 = VUNPACKLO(t6, t7);
t5 = IMULI(t5);
- t0 = _mm_add_ps(t6, t4);
- t2 = _mm_sub_ps(t6, t4);
- t1 = _mm_sub_ps(t7, t5);
- t3 = _mm_add_ps(t7, t5);
- *r3 = _mm_shuffle_ps(t0, t1, _MM_SHUFFLE(3,2,3,2));
- *r2 = _mm_shuffle_ps(t2, t3, _MM_SHUFFLE(3,2,3,2));
+ t0 = VADD(t6, t4);
+ t2 = VSUB(t6, t4);
+ t1 = VSUB(t7, t5);
+ t3 = VADD(t7, t5);
+ *r3 = VUNPACKHI(t0, t1);
+ *r2 = VUNPACKHI(t2, t3);
}
__INLINE void
L_4_2(const data_t * restrict i0, const data_t * restrict i1, const data_t * restrict i2, const data_t * restrict i3,
- __m128 *r0, __m128 *r1, __m128 *r2, __m128 *r3) {
- __m128 t0, t1, t2, t3, t4, t5, t6, t7;
- t0 = _mm_load_ps(i0);
- t1 = _mm_load_ps(i1);
- t6 = _mm_load_ps(i2);
- t7 = _mm_load_ps(i3);
- t2 = _mm_shuffle_ps(t6, t7, _MM_SHUFFLE(3,2,1,0));
- t3 = _mm_shuffle_ps(t7, t6, _MM_SHUFFLE(3,2,1,0));
- t4 = _mm_add_ps(t0, t1);
- t5 = _mm_sub_ps(t0, t1);
- t6 = _mm_add_ps(t2, t3);
- t7 = _mm_sub_ps(t2, t3);
- *r2 = _mm_shuffle_ps(t4, t5, _MM_SHUFFLE(3,2,3,2));
- *r3 = _mm_shuffle_ps(t6, t7, _MM_SHUFFLE(3,2,3,2));
+ V *r0, V *r1, V *r2, V *r3) {
+ V t0, t1, t2, t3, t4, t5, t6, t7;
+ t0 = VLD(i0); t1 = VLD(i1); t6 = VLD(i2); t7 = VLD(i3);
+ t2 = VBLEND(t6, t7);
+ t3 = VBLEND(t7, t6);
+ t4 = VADD(t0, t1);
+ t5 = VSUB(t0, t1);
+ t6 = VADD(t2, t3);
+ t7 = VSUB(t2, t3);
+ *r2 = VUNPACKHI(t4, t5);
+ *r3 = VUNPACKHI(t6, t7);
t7 = IMULI(t7);
- t0 = _mm_add_ps(t4, t6);
- t2 = _mm_sub_ps(t4, t6);
- t1 = _mm_sub_ps(t5, t7);
- t3 = _mm_add_ps(t5, t7);
- *r0 = _mm_shuffle_ps(t0, t1, _MM_SHUFFLE(1,0,1,0));
- *r1 = _mm_shuffle_ps(t2, t3, _MM_SHUFFLE(1,0,1,0));
+ t0 = VADD(t4, t6);
+ t2 = VSUB(t4, t6);
+ t1 = VSUB(t5, t7);
+ t3 = VADD(t5, t7);
+ *r0 = VUNPACKLO(t0, t1);
+ *r1 = VUNPACKLO(t2, t3);
}
__INLINE void
LEAF_OE(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) {
- __m128 r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15,r16_17,r18_19,r20_21,r22_23,r24_25,r26_27,r28_29,r30_31;
+ V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15,r16_17,r18_19,r20_21,r22_23,r24_25,r26_27,r28_29,r30_31;
data_t *out0 = out + (*out_offsets)[0];
data_t *out1 = out + (*out_offsets)[1];
@@ -273,7 +357,7 @@ LEAF_OE(size_t ** restrict is, const data_t * restrict in, size_t ** restrict ou
__INLINE void
LEAF_EO(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) {
- __m128 r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15,r16_17,r18_19,r20_21,r22_23,r24_25,r26_27,r28_29,r30_31;
+ V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15,r16_17,r18_19,r20_21,r22_23,r24_25,r26_27,r28_29,r30_31;
data_t *out0 = out + (*out_offsets)[0];
data_t *out1 = out + (*out_offsets)[1];
@@ -295,6 +379,180 @@ LEAF_EO(size_t ** restrict is, const data_t * restrict in, size_t ** restrict ou
*is += 16;
}
+#ifdef __ARM_NEON__
+__INLINE void
+LEAF_OE8(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) {
+ V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15;
+ data_t *out0 = out + (*out_offsets)[0];
+ data_t *out1 = out + (*out_offsets)[1];
+ *out_offsets += 2;
+
+ L_4_2(in+(*is)[0],in+(*is)[1],in+(*is)[2],in+(*is)[3],&r0_1,&r2_3,&r12_13,&r14_15);
+ L_4_4(in+(*is)[4],in+(*is)[5],in+(*is)[6],in+(*is)[7],&r4_5,&r6_7,&r8_9,&r10_11);
+ S_4_1(r0_1,r2_3,r4_5,r6_7,out0+0,out0+4,out0+8,out0+12);
+ K_N(LEAFLUT[6],LEAFLUT[7],&r8_9,&r10_11,&r12_13,&r14_15);
+ S_4_2(r8_9,r10_11,r12_13,r14_15,out1+0,out1+4,out1+8,out1+12);
+ *is += 8;
+}
+__INLINE void
+LEAF_EO8(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) {
+ V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15;
+ data_t *out0 = out + (*out_offsets)[0];
+ data_t *out1 = out + (*out_offsets)[1];
+ *out_offsets += 2;
+
+ L_4_4(in+(*is)[0],in+(*is)[1],in+(*is)[2],in+(*is)[3],&r0_1,&r2_3,&r8_9,&r10_11);
+ L_2_4(in+(*is)[4],in+(*is)[5],in+(*is)[6],in+(*is)[7],&r4_5,&r6_7,&r14_15,&r12_13);
+ S_4_1(r8_9,r10_11,r12_13,r14_15,out1+0,out1+4,out1+8,out1+12);
+ K_N(LEAFLUT[6],LEAFLUT[7],&r0_1,&r2_3,&r4_5,&r6_7);
+ S_4_2(r0_1,r2_3,r4_5,r6_7,out0+0,out0+4,out0+8,out0+12);
+
+ *is += 8;
+}
+#endif
+__INLINE void
+firstpass_32(const data_t * restrict in, data_t * restrict out, ffts_plan_t * restrict p) {
+ V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15,r16_17,r18_19,r20_21,r22_23,r24_25,r26_27,r28_29,r30_31;
+ float *LUT8 = p->ws[0];
+ float *LUT16 = p->ws[1];
+ float *LUT32 = p->ws[2];
+
+ L_4_4(in+0,in+32,in+16,in+48,&r0_1,&r2_3,&r16_17,&r18_19);
+ L_2_2(in+8,in+40,in+56,in+24,&r4_5,&r6_7,&r20_21,&r22_23);
+ K_N(VLD(LUT8),VLD(LUT8+4),&r0_1,&r2_3,&r4_5,&r6_7);
+ L_4_2(in+4,in+36,in+20,in+52,&r8_9,&r10_11,&r28_29,&r30_31);
+ L_4_4(in+60,in+28,in+12,in+44,&r12_13,&r14_15,&r24_25,&r26_27);
+ K_N(VLD(LUT16),VLD(LUT16+4),&r0_1,&r4_5,&r8_9,&r12_13);
+ K_N(VLD(LUT16+8),VLD(LUT16+12),&r2_3,&r6_7,&r10_11,&r14_15);
+ K_N(VLD(LUT8),VLD(LUT8+4),&r16_17,&r18_19,&r20_21,&r22_23);
+ K_N(VLD(LUT8),VLD(LUT8+4),&r24_25,&r26_27,&r28_29,&r30_31);
+ K_N(VLD(LUT32),VLD(LUT32+4),&r0_1,&r8_9,&r16_17,&r24_25);
+ S_4(r0_1,r8_9,r16_17,r24_25,out+0,out+16,out+32,out+48);
+ K_N(VLD(LUT32+8),VLD(LUT32+12),&r2_3,&r10_11,&r18_19,&r26_27);
+ S_4(r2_3,r10_11,r18_19,r26_27,out+4,out+20,out+36,out+52);
+ K_N(VLD(LUT32+16),VLD(LUT32+20),&r4_5,&r12_13,&r20_21,&r28_29);
+ S_4(r4_5,r12_13,r20_21,r28_29,out+8,out+24,out+40,out+56);
+ K_N(VLD(LUT32+24),VLD(LUT32+28),&r6_7,&r14_15,&r22_23,&r30_31);
+ S_4(r6_7,r14_15,r22_23,r30_31,out+12,out+28,out+44,out+60);
+
+}
+
+__INLINE void
+firstpass_16(const data_t * restrict in, data_t * restrict out, ffts_plan_t * restrict p) {
+ V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15;
+ float *LUT8 = p->ws[0];
+ float *LUT16 = p->ws[1];
+
+ L_4_4(in+0,in+16,in+8,in+24,&r0_1,&r2_3,&r8_9,&r10_11);
+ L_2_4(in+4,in+20,in+28,in+12,&r4_5,&r6_7,&r14_15,&r12_13);
+ K_N(VLD(LUT8),VLD(LUT8+4),&r0_1,&r2_3,&r4_5,&r6_7);
+ K_N(VLD(LUT16),VLD(LUT16+4),&r0_1,&r4_5,&r8_9,&r12_13);
+ S_4(r0_1,r4_5,r8_9,r12_13,out+0,out+8,out+16,out+24);
+ K_N(VLD(LUT16+8),VLD(LUT16+12),&r2_3,&r6_7,&r10_11,&r14_15);
+ S_4(r2_3,r6_7,r10_11,r14_15,out+4,out+12,out+20,out+28);
+}
+__INLINE void
+firstpass_8(const data_t * restrict in, data_t * restrict out, ffts_plan_t * restrict p) {
+ V r0_1,r2_3,r4_5,r6_7;
+ float *LUT8 = p->ws[0];
+ L_4_2(in+0,in+8,in+4,in+12,&r0_1,&r2_3,&r4_5,&r6_7);
+ K_N(VLD(LUT8),VLD(LUT8+4),&r0_1,&r2_3,&r4_5,&r6_7);
+ S_4(r0_1,r2_3,r4_5,r6_7,out+0,out+4,out+8,out+12);
+}
+__INLINE void
+firstpass_4_f(const data_t * restrict in, data_t * restrict out, ffts_plan_t * restrict p) {
+ cdata_t *i = (cdata_t *)in, *o = (cdata_t *)out;
+ cdata_t t0, t1, t2, t3, t4, t5, t6, t7;
+ t0 = i[0]; t1 = i[2]; t2 = i[1]; t3 = i[3];
+ t4 = t0 + t1;
+ t5 = t0 - t1;
+ t6 = t2 + t3;
+ t7 = (t2 - t3);
+ t7 = (creal(t7))*I - (cimag(t7));
+ o[0] = t4 + t6;
+ o[2] = t4 - t6;
+ o[1] = t5 - t7;
+ o[3] = t5 + t7;
+}
+__INLINE void
+firstpass_4_b(const data_t * restrict in, data_t * restrict out, ffts_plan_t * restrict p) {
+ cdata_t *i = (cdata_t *)in, *o = (cdata_t *)out;
+ cdata_t t0, t1, t2, t3, t4, t5, t6, t7;
+ t0 = i[0]; t1 = i[2]; t2 = i[1]; t3 = i[3];
+ t4 = t0 + t1;
+ t5 = t0 - t1;
+ t6 = t2 + t3;
+ t7 = (t2 - t3);
+ t7 = -(creal(t7))*I + (cimag(t7));
+ o[0] = t4 + t6;
+ o[2] = t4 - t6;
+ o[1] = t5 - t7;
+ o[3] = t5 + t7;
+}
+__INLINE void
+firstpass_2(const data_t * restrict in, data_t * restrict out, ffts_plan_t * restrict p) {
+ cdata_t t0, t1, r0,r1;
+ t0 = ((cdata_t *)in)[0]; t1 = ((cdata_t *)in)[1];
+ r0 = t0 + t1; r1 = t0 - t1;
+ ((cdata_t *)out)[0] = r0;
+ ((cdata_t *)out)[1] = r1;
+}
+
+__INLINE void X_8(data_t * restrict data0, size_t N, const data_t * restrict LUT) {
+ data_t *data2 = data0 + 2*N/4;
+ data_t *data4 = data0 + 4*N/4;
+ data_t *data6 = data0 + 6*N/4;
+ data_t *data1 = data0 + 1*N/4;
+ data_t *data3 = data0 + 3*N/4;
+ data_t *data5 = data0 + 5*N/4;
+ data_t *data7 = data0 + 7*N/4;
+ size_t k, n4 = N/4;
+
+ for(k=N/8/2;k>0;--k) {
+ V r0, r1, r2, r3, r4, r5, r6, r7;
+ r0 = VLD(data0);
+ r1 = VLD(data1);
+ r2 = VLD(data2);
+ r3 = VLD(data3);
+ K_N(VLD(LUT), VLD(LUT+4), &r0, &r1, &r2, &r3);
+ r4 = VLD(data4);
+ r6 = VLD(data6);
+ K_N(VLD(LUT+8), VLD(LUT+12), &r0, &r2, &r4, &r6);
+ r5 = VLD(data5);
+ r7 = VLD(data7);
+ K_N(VLD(LUT+16), VLD(LUT+20), &r1, &r3, &r5, &r7);
+ LUT += 24;
+ VST(data0, r0); data0 += 4;
+ VST(data1, r1); data1 += 4;
+ VST(data2, r2); data2 += 4;
+ VST(data3, r3); data3 += 4;
+ VST(data4, r4); data4 += 4;
+ VST(data5, r5); data5 += 4;
+ VST(data6, r6); data6 += 4;
+ VST(data7, r7); data7 += 4;
+ }
+}
+
+__INLINE void X_4(data_t * restrict data, size_t N, const data_t * restrict LUT) {
+
+ size_t i;
+ for(i=0;i<N/4/2;i++) {
+ V uk = VLD(data);
+ V uk2 = VLD(data + 2*N/4);
+ V zk_p = VLD(data + 4*N/4);
+ V zk_n = VLD(data + 6*N/4);
+
+ K_N(VLD(LUT), VLD(LUT+4), &uk, &uk2, &zk_p, &zk_n);
+
+ VST(data, uk);
+ VST(data + 2*N/4, uk2);
+ VST(data + 4*N/4, zk_p);
+ VST(data + 6*N/4, zk_n);
+
+ LUT += 8;
+ data += 4;
+ }
+}
#endif
OpenPOWER on IntegriCloud