summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/Headers/smmintrin.h')
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/smmintrin.h127
1 files changed, 71 insertions, 56 deletions
diff --git a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
index 2b8b321..2fab50e 100644
--- a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
@@ -57,23 +57,34 @@
#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
-#define _mm_round_ps(X, Y) __builtin_ia32_roundps((X), (Y))
-#define _mm_round_ss(X, Y, M) __builtin_ia32_roundss((X), (Y), (M))
-#define _mm_round_pd(X, M) __builtin_ia32_roundpd((X), (M))
-#define _mm_round_sd(X, Y, M) __builtin_ia32_roundsd((X), (Y), (M))
+#define _mm_round_ps(X, M) __extension__ ({ \
+ __m128 __X = (X); \
+ (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); })
+
+#define _mm_round_ss(X, Y, M) __extension__ ({ \
+ __m128 __X = (X); \
+ __m128 __Y = (Y); \
+ (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_round_pd(X, M) __extension__ ({ \
+ __m128d __X = (X); \
+ (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); })
+
+#define _mm_round_sd(X, Y, M) __extension__ ({ \
+ __m128d __X = (X); \
+ __m128d __Y = (Y); \
+ (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); })
/* SSE4 Packed Blending Intrinsics. */
-static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
-_mm_blend_pd (__m128d __V1, __m128d __V2, const int __M)
-{
- return (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, __M);
-}
+#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
+ __m128d __V1 = (V1); \
+ __m128d __V2 = (V2); \
+ (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, (M)); })
-static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
-_mm_blend_ps (__m128 __V1, __m128 __V2, const int __M)
-{
- return (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, __M);
-}
+#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
+ __m128 __V1 = (V1); \
+ __m128 __V2 = (V2); \
+ (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, (M)); })
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
@@ -96,11 +107,10 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
(__v16qi)__M);
}
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_blend_epi16 (__m128i __V1, __m128i __V2, const int __M)
-{
- return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, __M);
-}
+#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
+ __m128i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, (M)); })
/* SSE4 Dword Multiply Instructions. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -116,8 +126,15 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
}
/* SSE4 Floating Point Dot Product Instructions. */
-#define _mm_dp_ps(X, Y, M) __builtin_ia32_dpps ((X), (Y), (M))
-#define _mm_dp_pd(X, Y, M) __builtin_ia32_dppd ((X), (Y), (M))
+#define _mm_dp_ps(X, Y, M) __extension__ ({ \
+ __m128 __X = (X); \
+ __m128 __Y = (Y); \
+ (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_dp_pd(X, Y, M) __extension__ ({\
+ __m128d __X = (X); \
+ __m128d __Y = (Y); \
+ (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); })
/* SSE4 Streaming Load Hint Instruction. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -198,14 +215,14 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/* Insert int into packed integer array at index. */
#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- __a[N] = I; \
+ __a[(N)] = (I); \
__a;}))
#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- __a[N] = I; \
+ __a[(N)] = (I); \
__a;}))
#ifdef __x86_64__
#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[N] = I; \
+ __a[(N)] = (I); \
__a;}))
#endif /* __x86_64__ */
@@ -213,12 +230,12 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
* as a zero extended value, so it is unsigned.
*/
#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- (unsigned char)__a[N];}))
+ (unsigned char)__a[(N)];}))
#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- (unsigned)__a[N];}))
+ (unsigned)__a[(N)];}))
#ifdef __x86_64__
#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[N];}))
+ __a[(N)];}))
#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
@@ -242,13 +259,13 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((V), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pcmpeqq((__v2di)__V1, (__v2di)__V2);
+ return (__m128i)((__v2di)__V1 == (__v2di)__V2);
}
/* SSE4 Packed Integer Sign-Extension. */
@@ -333,7 +350,16 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
}
/* SSE4 Multiple Packed Sums of Absolute Difference. */
-#define _mm_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw128((X), (Y), (M))
+#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
+ __m128i __X = (X); \
+ __m128i __Y = (Y); \
+ (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_minpos_epu16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+}
/* These definitions are normally in nmmintrin.h, but gcc puts them in here
so we'll do the same. */
@@ -371,20 +397,20 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
#define _mm_cmpestrm(A, LA, B, LB, M) \
__builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M))
-#define _mm_cmpestri(X, LX, Y, LY, M) \
+#define _mm_cmpestri(A, LA, B, LB, M) \
__builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M))
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
-#define _mm_cmpistra(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistria128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrc(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistric128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistro(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistrio128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrs(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistris128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrz(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistriz128((A), (LA), (B), (LB), (M))
+#define _mm_cmpistra(A, B, M) \
+ __builtin_ia32_pcmpistria128((A), (B), (M))
+#define _mm_cmpistrc(A, B, M) \
+ __builtin_ia32_pcmpistric128((A), (B), (M))
+#define _mm_cmpistro(A, B, M) \
+ __builtin_ia32_pcmpistrio128((A), (B), (M))
+#define _mm_cmpistrs(A, B, M) \
+ __builtin_ia32_pcmpistris128((A), (B), (M))
+#define _mm_cmpistrz(A, B, M) \
+ __builtin_ia32_pcmpistriz128((A), (B), (M))
#define _mm_cmpestra(A, LA, B, LB, M) \
__builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M))
@@ -401,7 +427,7 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
{
- return __builtin_ia32_pcmpgtq((__v2di)__V1, (__v2di)__V2);
+ return (__m128i)((__v2di)__V1 > (__v2di)__V2);
}
/* SSE4.2 Accumulate CRC32. */
@@ -431,20 +457,9 @@ _mm_crc32_u64(unsigned long long __C, unsigned long long __D)
}
#endif /* __x86_64__ */
-/* SSE4.2 Population Count. */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-_mm_popcnt_u32(unsigned int __A)
-{
- return __builtin_popcount(__A);
-}
-
-#ifdef __x86_64__
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
-_mm_popcnt_u64(unsigned long long __A)
-{
- return __builtin_popcountll(__A);
-}
-#endif /* __x86_64__ */
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
#endif /* __SSE4_2__ */
#endif /* __SSE4_1__ */
OpenPOWER on IntegriCloud