summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/Headers
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/Headers')
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx2intrin.h961
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avxintrin.h331
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h75
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmiintrin.h115
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/cpuid.h33
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/emmintrin.h56
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/float.h11
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/fma4intrin.h231
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/immintrin.h18
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h55
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mm3dnow.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/module.map108
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/popcntintrin.h45
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/smmintrin.h127
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/tgmath.h23
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/tmmintrin.h11
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/unwind.h124
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/wmmintrin.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/x86intrin.h26
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xmmintrin.h21
20 files changed, 2161 insertions, 214 deletions
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
new file mode 100644
index 0000000..d165f1f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
@@ -0,0 +1,961 @@
+/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx2intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M))
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi8(__m256i a)
+{
+ return (__m256i)__builtin_ia32_pabsb256((__v32qi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi16(__m256i a)
+{
+ return (__m256i)__builtin_ia32_pabsw256((__v16hi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi32(__m256i a)
+{
+ return (__m256i)__builtin_ia32_pabsd256((__v8si)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_packsswb256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packs_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_packssdw256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packus_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_packuswb256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packus_epi32(__m256i __V1, __m256i __V2)
+{
+ return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a + (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a + (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a + (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi64(__m256i a, __m256i b)
+{
+ return a + b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddusb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddusw256((__v16hi)a, (__v16hi)b);
+}
+
+#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
+ __m256i __a = (a); \
+ __m256i __b = (b); \
+ (__m256i)__builtin_ia32_palignr256((__v32qi)__a, (__v32qi)__b, (n)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_and_si256(__m256i a, __m256i b)
+{
+ return a & b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_andnot_si256(__m256i a, __m256i b)
+{
+ return ~a & b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_avg_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pavgb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_avg_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pavgw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
+ (__v32qi)__M);
+}
+
+#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ (__m256i)__builtin_ia32_pblendw256((__v16hi)__V1, (__v16hi)__V2, (M)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a == (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a == (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a == (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)(a == b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a > (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a > (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a > (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)(a > b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phaddw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phaddd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadds_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phaddsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phsubw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phsubd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsubs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phsubsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maddubs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_madd_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxsd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxub256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxud256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminsd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminub256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminud256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm256_movemask_epi8(__m256i a)
+{
+ return __builtin_ia32_pmovmskb256((__v32qi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmuldq256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhrs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhi_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhi_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmulhw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mullo_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a * (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mullo_epi32 (__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a * (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_epu32(__m256i a, __m256i b)
+{
+ return __builtin_ia32_pmuludq256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_or_si256(__m256i a, __m256i b)
+{
+ return a | b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sad_epu8(__m256i a, __m256i b)
+{
+ return __builtin_ia32_psadbw256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_shuffle_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pshufb256((__v32qi)a, (__v32qi)b);
+}
+
+#define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)_mm256_set1_epi32(0), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6), \
+ 8, 9, 10, 11, \
+ 12 + (((imm) & 0x03) >> 0), \
+ 12 + (((imm) & 0x0c) >> 2), \
+ 12 + (((imm) & 0x30) >> 4), \
+ 12 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+ (imm) & 0x3,((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7, \
+ 8 + (((imm) & 0x03) >> 0), \
+ 8 + (((imm) & 0x0c) >> 2), \
+ 8 + (((imm) & 0x30) >> 4), \
+ 8 + (((imm) & 0xc0) >> 6), \
+ 12, 13, 14, 15); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psignb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psignw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psignd256((__v8si)a, (__v8si)b);
+}
+
+#define _mm256_slli_si256(a, count) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_ia32_pslldqi256(__a, (count)*8); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi16(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psllwi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi16(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psllw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi32(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_pslldi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi32(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_pslld256((__v8si)a, (__v4si)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi64(__m256i a, int count)
+{
+ return __builtin_ia32_psllqi256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi64(__m256i a, __m128i count)
+{
+ return __builtin_ia32_psllq256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srai_epi16(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psrawi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sra_epi16(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psraw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srai_epi32(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psradi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sra_epi32(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psrad256((__v8si)a, (__v4si)count);
+}
+
+#define _mm256_srli_si256(a, count) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_ia32_psrldqi256(__a, (count)*8); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi16(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psrlwi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi16(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psrlw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi32(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psrldi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi32(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psrld256((__v8si)a, (__v4si)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi64(__m256i a, int count)
+{
+ return __builtin_ia32_psrlqi256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi64(__m256i a, __m128i count)
+{
+ return __builtin_ia32_psrlq256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a - (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a - (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a - (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi64(__m256i a, __m256i b)
+{
+ return a - b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubusb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubusw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)a, (__v32qi)b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)a, (__v16hi)b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)a, (__v8si)b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector(a, b, 1, 4+1, 3, 4+3);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)a, (__v32qi)b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)a, (__v16hi)b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)a, (__v8si)b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector(a, b, 0, 4+0, 2, 4+2);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_xor_si256(__m256i a, __m256i b)
+{
+ return a ^ b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_load_si256(__m256i *__V)
+{
+ return (__m256i)__builtin_ia32_movntdqa256((__v4di *)__V);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastss_ps(__m128 __X)
+{
+ return (__m128)__builtin_ia32_vbroadcastss_ps((__v4sf)__X);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastss_ps(__m128 __X)
+{
+ return (__m256)__builtin_ia32_vbroadcastss_ps256((__v4sf)__X);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastsd_pd(__m128d __X)
+{
+ return (__m256d)__builtin_ia32_vbroadcastsd_pd256((__v2df)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastsi128_si256(__m128i const *a)
+{
+ return (__m256i)__builtin_ia32_vbroadcastsi256(a);
+}
+
+#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
+ __m128i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m128i)__builtin_ia32_pblendd128((__v4si)__V1, (__v4si)__V2, (M)); })
+
+#define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ (__m256i)__builtin_ia32_pblendd256((__v8si)__V1, (__v8si)__V2, (M)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastb_epi8(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastb256((__v16qi)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastw_epi16(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastw256((__v8hi)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastd_epi32(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastd256((__v4si)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastq_epi64(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastq256(__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastb_epi8(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastb128((__v16qi)__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastw_epi16(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastw128((__v8hi)__X);
+}
+
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastd_epi32(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastd128((__v4si)__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastq_epi64(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastq128(__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar8x32_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_permvarsi256((__v8si)a, (__v8si)b);
+}
+
+#define _mm256_permute4x64_pd(V, M) __extension__ ({ \
+ __m256d __V = (V); \
+ (__m256d)__builtin_ia32_permdf256((__v4df)__V, (M)); })
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar8x32_ps(__m256 a, __m256 b)
+{
+ return (__m256)__builtin_ia32_permvarsf256((__v8sf)a, (__v8sf)b);
+}
+
+#define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
+ __m256i __V = (V); \
+ (__m256i)__builtin_ia32_permdi256(__V, (M)); })
+
+#define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ __builtin_shufflevector(__V1, __V2, \
+ ((M) & 0x3) * 2, \
+ ((M) & 0x3) * 2 + 1, \
+ (((M) & 0x30) >> 4) * 2, \
+ (((M) & 0x30) >> 4) * 2 + 1); })
+
+#define _mm256_extracti128_si256(A, O) __extension__ ({ \
+ __m256i __A = (A); \
+ (__m128i)__builtin_ia32_extract128i256(__A, (O)); })
+
+#define _mm256_inserti128_si256(V1, V2, O) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m256i)__builtin_ia32_insert128i256(__V1, __V2, (O)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_epi32(int const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_epi64(long long const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, __M);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_epi32(int const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_epi64(long long const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstoreq256((__v4di *)__X, __M, __Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstoreq(( __v2di *)__X, __M, __Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sllv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sllv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sllv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv4di(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sllv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv2di(__X, __Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srav_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srav_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srlv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srlv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srlv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv4di(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srlv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv2di(__X, __Y);
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
index 0a0d2e4..7a0ec3f 100644
--- a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
@@ -145,17 +145,13 @@ _mm256_rcp_ps(__m256 a)
return (__m256)__builtin_ia32_rcpps256((__v8sf)a);
}
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_round_pd(__m256d v, const int m)
-{
- return (__m256d)__builtin_ia32_roundpd256((__v4df)v, m);
-}
+#define _mm256_round_pd(V, M) __extension__ ({ \
+ __m256d __V = (V); \
+ (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); })
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_round_ps(__m256 v, const int m)
-{
- return (__m256)__builtin_ia32_roundps256((__v8sf)v, m);
-}
+#define _mm256_round_ps(V, M) __extension__ ({ \
+ __m256 __V = (V); \
+ (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); })
#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
@@ -262,60 +258,79 @@ _mm256_permutevar_ps(__m256 a, __m256i c)
(__v8si)c);
}
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
-_mm_permute_pd(__m128d a, const int c)
-{
- return (__m128d)__builtin_ia32_vpermilpd((__v2df)a, c);
-}
-
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_permute_pd(__m256d a, const int c)
-{
- return (__m256d)__builtin_ia32_vpermilpd256((__v4df)a, c);
-}
-
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
-_mm_permute_ps(__m128 a, const int c)
-{
- return (__m128)__builtin_ia32_vpermilps((__v4sf)a, c);
-}
-
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_permute_ps(__m256 a, const int c)
-{
- return (__m256)__builtin_ia32_vpermilps256((__v8sf)a, c);
-}
-
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_permute2f128_pd(__m256d a, __m256d b, const int c)
-{
- return (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)a, (__v4df)b, c);
-}
-
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_permute2f128_ps(__m256 a, __m256 b, const int c)
-{
- return (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)a, (__v8sf)b, c);
-}
-
-static __inline __m256i __attribute__((__always_inline__, __nodebug__))
-_mm256_permute2f128_si256(__m256i a, __m256i b, const int c)
-{
- return (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)a, (__v8si)b, c);
-}
+#define _mm_permute_pd(A, C) __extension__ ({ \
+ __m128d __A = (A); \
+ (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1); })
+
+#define _mm256_permute_pd(A, C) __extension__ ({ \
+ __m256d __A = (A); \
+ (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1, \
+ 2 + (((C) & 0x4) >> 2), \
+ 2 + (((C) & 0x8) >> 3)); })
+
+#define _mm_permute_ps(A, C) __extension__ ({ \
+ __m128 __A = (A); \
+ (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
+
+#define _mm256_permute_ps(A, C) __extension__ ({ \
+ __m256 __A = (A); \
+ (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
+ 4 + (((C) & 0x03) >> 0), \
+ 4 + (((C) & 0x0c) >> 2), \
+ 4 + (((C) & 0x30) >> 4), \
+ 4 + (((C) & 0xc0) >> 6)); })
+
+#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
+ __m256d __V1 = (V1); \
+ __m256d __V2 = (V2); \
+ (__m256d)__builtin_shufflevector((__v4df)__V1, (__v4df)__V2, \
+ ((M) & 0x3) * 2, \
+ ((M) & 0x3) * 2 + 1, \
+ (((M) & 0x30) >> 4) * 2, \
+ (((M) & 0x30) >> 4) * 2 + 1); })
+
+#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m256 __V2 = (V2); \
+ (__m256)__builtin_shufflevector((__v8sf)__V1, (__v8sf)__V2, \
+ ((M) & 0x3) * 4, \
+ ((M) & 0x3) * 4 + 1, \
+ ((M) & 0x3) * 4 + 2, \
+ ((M) & 0x3) * 4 + 3, \
+ (((M) & 0x30) >> 4) * 4, \
+ (((M) & 0x30) >> 4) * 4 + 1, \
+ (((M) & 0x30) >> 4) * 4 + 2, \
+ (((M) & 0x30) >> 4) * 4 + 3); })
+
+#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ (__m256i)__builtin_shufflevector((__v8si)__V1, (__v8si)__V2, \
+ ((M) & 0x3) * 4, \
+ ((M) & 0x3) * 4 + 1, \
+ ((M) & 0x3) * 4 + 2, \
+ ((M) & 0x3) * 4 + 3, \
+ (((M) & 0x30) >> 4) * 4, \
+ (((M) & 0x30) >> 4) * 4 + 1, \
+ (((M) & 0x30) >> 4) * 4 + 2, \
+ (((M) & 0x30) >> 4) * 4 + 3); })
/* Vector Blend */
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_blend_pd(__m256d a, __m256d b, const int c)
-{
- return (__m256d)__builtin_ia32_blendpd256((__v4df)a, (__v4df)b, c);
-}
+#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
+ __m256d __V1 = (V1); \
+ __m256d __V2 = (V2); \
+ (__m256d)__builtin_ia32_blendpd256((__v4df)__V1, (__v4df)__V2, (M)); })
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_blend_ps(__m256 a, __m256 b, const int c)
-{
- return (__m256)__builtin_ia32_blendps256((__v8sf)a, (__v8sf)b, c);
-}
+#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m256 __V2 = (V2); \
+ (__m256)__builtin_ia32_blendps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
static __inline __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_blendv_pd(__m256d a, __m256d b, __m256d c)
@@ -330,26 +345,29 @@ _mm256_blendv_ps(__m256 a, __m256 b, __m256 c)
}
/* Vector Dot Product */
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_dp_ps(__m256 a, __m256 b, const int c)
-{
- return (__m256)__builtin_ia32_dpps256((__v8sf)a, (__v8sf)b, c);
-}
+#define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m256 __V2 = (V2); \
+ (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
/* Vector shuffle */
-#define _mm256_shuffle_ps(a, b, mask) \
- (__builtin_shufflevector((__v8sf)(a), (__v8sf)(b), \
+#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
+ __m256 __a = (a); \
+ __m256 __b = (b); \
+ (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \
(mask) & 0x3, ((mask) & 0xc) >> 2, \
(((mask) & 0x30) >> 4) + 8, (((mask) & 0xc0) >> 6) + 8, \
((mask) & 0x3) + 4, (((mask) & 0xc) >> 2) + 4, \
- (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12))
+ (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); })
-#define _mm256_shuffle_pd(a, b, mask) \
- (__builtin_shufflevector((__v4df)(a), (__v4df)(b), \
+#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
+ __m256d __a = (a); \
+ __m256d __b = (b); \
+ (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \
(mask) & 0x1, \
(((mask) & 0x2) >> 1) + 4, \
(((mask) & 0x4) >> 2) + 2, \
- (((mask) & 0x8) >> 3) + 6))
+ (((mask) & 0x8) >> 3) + 6); })
/* Compare */
#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
@@ -385,42 +403,48 @@ _mm256_dp_ps(__m256 a, __m256 b, const int c)
#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
-#define _mm_cmp_pd(a, b, c) \
- (__m128d)__builtin_ia32_cmppd((__v2df)(a), (__v2df)(b), (c))
+#define _mm_cmp_pd(a, b, c) __extension__ ({ \
+ __m128d __a = (a); \
+ __m128d __b = (b); \
+ (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); })
-#define _mm_cmp_ps(a, b, c) \
- (__m128)__builtin_ia32_cmpps((__v4sf)(a), (__v4sf)(b), (c))
+#define _mm_cmp_ps(a, b, c) __extension__ ({ \
+ __m128 __a = (a); \
+ __m128 __b = (b); \
+ (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); })
-#define _mm256_cmp_pd(a, b, c) \
- (__m256d)__builtin_ia32_cmppd256((__v4df)(a), (__v4df)(b), (c))
+#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
+ __m256d __a = (a); \
+ __m256d __b = (b); \
+ (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); })
-#define _mm256_cmp_ps(a, b, c) \
- (__m256)__builtin_ia32_cmpps256((__v8sf)(a), (__v8sf)(b), (c))
+#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
+ __m256 __a = (a); \
+ __m256 __b = (b); \
+ (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); })
-#define _mm_cmp_sd(a, b, c) \
- (__m128d)__builtin_ia32_cmpsd((__v2df)(a), (__v2df)(b), (c))
+#define _mm_cmp_sd(a, b, c) __extension__ ({ \
+ __m128d __a = (a); \
+ __m128d __b = (b); \
+ (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); })
-#define _mm_cmp_ss(a, b, c) \
- (__m128)__builtin_ia32_cmpss((__v4sf)(a), (__v4sf)(b), (c))
+#define _mm_cmp_ss(a, b, c) __extension__ ({ \
+ __m128 __a = (a); \
+ __m128 __b = (b); \
+ (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); })
/* Vector extract */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
-_mm256_extractf128_pd(__m256d a, const int o)
-{
- return (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)a, o);
-}
+#define _mm256_extractf128_pd(A, O) __extension__ ({ \
+ __m256d __A = (A); \
+ (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)__A, (O)); })
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
-_mm256_extractf128_ps(__m256 a, const int o)
-{
- return (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)a, o);
-}
+#define _mm256_extractf128_ps(A, O) __extension__ ({ \
+ __m256 __A = (A); \
+ (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)__A, (O)); })
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
-_mm256_extractf128_si256(__m256i a, const int o)
-{
- return (__m128i)__builtin_ia32_vextractf128_si256((__v8si)a, o);
-}
+#define _mm256_extractf128_si256(A, O) __extension__ ({ \
+ __m256i __A = (A); \
+ (__m128i)__builtin_ia32_vextractf128_si256((__v8si)__A, (O)); })
static __inline int __attribute__((__always_inline__, __nodebug__))
_mm256_extract_epi32(__m256i a, int const imm)
@@ -453,23 +477,20 @@ _mm256_extract_epi64(__m256i a, const int imm)
#endif
/* Vector insert */
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_insertf128_pd(__m256d a, __m128d b, const int o)
-{
- return (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)a, (__v2df)b, o);
-}
+#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \
+ __m256d __V1 = (V1); \
+ __m128d __V2 = (V2); \
+ (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); })
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_insertf128_ps(__m256 a, __m128 b, const int o)
-{
- return (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)a, (__v4sf)b, o);
-}
+#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m128 __V2 = (V2); \
+ (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); })
-static __inline __m256i __attribute__((__always_inline__, __nodebug__))
-_mm256_insertf128_si256(__m256i a, __m128i b, const int o)
-{
- return (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)a, (__v4si)b, o);
-}
+#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); })
static __inline __m256i __attribute__((__always_inline__, __nodebug__))
_mm256_insert_epi32(__m256i a, int b, int const imm)
@@ -762,13 +783,19 @@ _mm256_load_ps(float const *p)
static __inline __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_loadu_pd(double const *p)
{
- return (__m256d)__builtin_ia32_loadupd256(p);
+ struct __loadu_pd {
+ __m256d v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_pd*)p)->v;
}
static __inline __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_loadu_ps(float const *p)
{
- return (__m256)__builtin_ia32_loadups256(p);
+ struct __loadu_ps {
+ __m256 v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_ps*)p)->v;
}
static __inline __m256i __attribute__((__always_inline__, __nodebug__))
@@ -780,7 +807,10 @@ _mm256_load_si256(__m256i const *p)
static __inline __m256i __attribute__((__always_inline__, __nodebug__))
_mm256_loadu_si256(__m256i const *p)
{
- return (__m256i)__builtin_ia32_loaddqu256((char const *)p);
+ struct __loadu_si256 {
+ __m256i v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_si256*)p)->v;
}
static __inline __m256i __attribute__((__always_inline__, __nodebug__))
@@ -1136,3 +1166,70 @@ _mm256_castsi128_si256(__m128i in)
__m128i zero = _mm_setzero_si128();
return __builtin_shufflevector(in, zero, 0, 1, 2, 2);
}
+
+/* SIMD load ops (unaligned) */
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128(float const *addr_hi, float const *addr_lo)
+{
+ struct __loadu_ps {
+ __m128 v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256 v256 = _mm256_castps128_ps256(((struct __loadu_ps*)addr_lo)->v);
+ return _mm256_insertf128_ps(v256, ((struct __loadu_ps*)addr_hi)->v, 1);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128d(double const *addr_hi, double const *addr_lo)
+{
+ struct __loadu_pd {
+ __m128d v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256d v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)addr_lo)->v);
+ return _mm256_insertf128_pd(v256, ((struct __loadu_pd*)addr_hi)->v, 1);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128i(__m128i const *addr_hi, __m128i const *addr_lo)
+{
+ struct __loadu_si128 {
+ __m128i v;
+ } __attribute__((packed, may_alias));
+ __m256i v256 = _mm256_castsi128_si256(((struct __loadu_si128*)addr_lo)->v);
+ return _mm256_insertf128_si256(v256, ((struct __loadu_si128*)addr_hi)->v, 1);
+}
+
+/* SIMD store ops (unaligned) */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128(float *addr_hi, float *addr_lo, __m256 a)
+{
+ __m128 v128;
+
+ v128 = _mm256_castps256_ps128(a);
+ __builtin_ia32_storeups(addr_lo, v128);
+ v128 = _mm256_extractf128_ps(a, 1);
+ __builtin_ia32_storeups(addr_hi, v128);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128d(double *addr_hi, double *addr_lo, __m256d a)
+{
+ __m128d v128;
+
+ v128 = _mm256_castpd256_pd128(a);
+ __builtin_ia32_storeupd(addr_lo, v128);
+ v128 = _mm256_extractf128_pd(a, 1);
+ __builtin_ia32_storeupd(addr_hi, v128);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128i(__m128i *addr_hi, __m128i *addr_lo, __m256i a)
+{
+ __m128i v128;
+
+ v128 = _mm256_castsi256_si128(a);
+ __builtin_ia32_storedqu((char *)addr_lo, (__v16qi)v128);
+ v128 = _mm256_extractf128_si256(a, 1);
+ __builtin_ia32_storedqu((char *)addr_hi, (__v16qi)v128);
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
new file mode 100644
index 0000000..c60b0c4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
@@ -0,0 +1,75 @@
+/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI2__
+# error "BMI2 instruction set not enabled"
+#endif /* __BMI2__ */
+
+#ifndef __BMI2INTRIN_H
+#define __BMI2INTRIN_H
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_bzhi_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bzhi_si(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_pdep_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pdep_si(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_pext_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pext_si(__X, __Y);
+}
+
+#ifdef __x86_64__
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_bzhi_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bzhi_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_pdep_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pdep_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_pext_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pext_di(__X, __Y);
+}
+
+#endif /* !__x86_64__ */
+
+#endif /* __BMI2INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
new file mode 100644
index 0000000..2f7db73
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
@@ -0,0 +1,115 @@
+/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI__
+# error "BMI instruction set not enabled"
+#endif /* __BMI__ */
+
+#ifndef __BMIINTRIN_H
+#define __BMIINTRIN_H
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__tzcnt16(unsigned short __X)
+{
+ return __builtin_ctzs(__X);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__andn_u32(unsigned int __X, unsigned int __Y)
+{
+ return ~__X & __Y;
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__bextr_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsi_u32(unsigned int __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsmsk_u32(unsigned int __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsr_u32(unsigned int __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__tzcnt32(unsigned int __X)
+{
+ return __builtin_ctz(__X);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__andn_u64 (unsigned long long __X, unsigned long long __Y)
+{
+ return ~__X & __Y;
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__bextr_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsi_u64(unsigned long long __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsmsk_u64(unsigned long long __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsr_u64(unsigned long long __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__tzcnt64(unsigned long long __X)
+{
+ return __builtin_ctzll(__X);
+}
+#endif
+
+#endif /* __BMIINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/cpuid.h b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
new file mode 100644
index 0000000..05c293f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
@@ -0,0 +1,33 @@
+/*===---- cpuid.h - X86 cpu model detection --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !(__x86_64__ || __i386__)
+#error this header is for x86 only
+#endif
+
+static inline int __get_cpuid (unsigned int level, unsigned int *eax,
+ unsigned int *ebx, unsigned int *ecx,
+ unsigned int *edx) {
+ asm("cpuid" : "=a"(*eax), "=b" (*ebx), "=c"(*ecx), "=d"(*edx) : "0"(level));
+ return 1;
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
index 903cfde..e10b77d 100644
--- a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
@@ -821,8 +821,9 @@ _mm_xor_si128(__m128i a, __m128i b)
return a ^ b;
}
-#define _mm_slli_si128(VEC, IMM) \
- ((__m128i)__builtin_ia32_pslldqi128((__m128i)(VEC), (IMM)*8))
+#define _mm_slli_si128(a, count) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_ia32_pslldqi128(__a, (count)*8); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_slli_epi16(__m128i a, int count)
@@ -885,8 +886,9 @@ _mm_sra_epi32(__m128i a, __m128i count)
}
-#define _mm_srli_si128(VEC, IMM) \
- ((__m128i)__builtin_ia32_psrldqi128((__m128i)(VEC), (IMM)*8))
+#define _mm_srli_si128(a, count) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_ia32_psrldqi128(__a, (count)*8); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srli_epi16(__m128i a, int count)
@@ -945,7 +947,10 @@ _mm_cmpeq_epi32(__m128i a, __m128i b)
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi8(__m128i a, __m128i b)
{
- return (__m128i)((__v16qi)a > (__v16qi)b);
+ /* This function always performs a signed comparison, but __v16qi is a char
+ which may be signed or unsigned. */
+ typedef signed char __v16qs __attribute__((__vector_size__(16)));
+ return (__m128i)((__v16qs)a > (__v16qs)b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -1259,23 +1264,27 @@ _mm_movemask_epi8(__m128i a)
return __builtin_ia32_pmovmskb128((__v16qi)a);
}
-#define _mm_shuffle_epi32(a, imm) \
- ((__m128i)__builtin_shufflevector((__v4si)(a), (__v4si) _mm_set1_epi32(0), \
- (imm) & 0x3, ((imm) & 0xc) >> 2, \
- ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6))
+#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si) _mm_set1_epi32(0), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
+#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7); })
-#define _mm_shufflelo_epi16(a, imm) \
- ((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) _mm_set1_epi16(0), \
- (imm) & 0x3, ((imm) & 0xc) >> 2, \
- ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
- 4, 5, 6, 7))
-#define _mm_shufflehi_epi16(a, imm) \
- ((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) _mm_set1_epi16(0), 0, 1, 2, 3, \
- 4 + (((imm) & 0x03) >> 0), \
- 4 + (((imm) & 0x0c) >> 2), \
- 4 + (((imm) & 0x30) >> 4), \
- 4 + (((imm) & 0xc0) >> 6)))
+#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_epi8(__m128i a, __m128i b)
@@ -1361,9 +1370,10 @@ _mm_movemask_pd(__m128d a)
return __builtin_ia32_movmskpd(a);
}
-#define _mm_shuffle_pd(a, b, i) \
- (__builtin_shufflevector((__m128d)(a), (__m128d)(b), (i) & 1, \
- (((i) & 2) >> 1) + 2))
+#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
+ __m128d __a = (a); \
+ __m128d __b = (b); \
+ __builtin_shufflevector(__a, __b, (i) & 1, (((i) & 2) >> 1) + 2); })
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_castpd_ps(__m128d in)
diff --git a/contrib/llvm/tools/clang/lib/Headers/float.h b/contrib/llvm/tools/clang/lib/Headers/float.h
index b7cb73a..65b517d 100644
--- a/contrib/llvm/tools/clang/lib/Headers/float.h
+++ b/contrib/llvm/tools/clang/lib/Headers/float.h
@@ -64,6 +64,11 @@
# undef FLT_MIN
# undef DBL_MIN
# undef LDBL_MIN
+# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# undef FLT_TRUE_MIN
+# undef DBL_TRUE_MIN
+# undef LDBL_TRUE_MIN
+# endif
#endif
/* Characteristics of floating point types, C99 5.2.4.2.2 */
@@ -110,4 +115,10 @@
#define DBL_MIN __DBL_MIN__
#define LDBL_MIN __LDBL_MIN__
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# define FLT_TRUE_MIN __FLT_DENORM_MIN__
+# define DBL_TRUE_MIN __DBL_DENORM_MIN__
+# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#endif
+
#endif /* __FLOAT_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h b/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h
new file mode 100644
index 0000000..c30920d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h
@@ -0,0 +1,231 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __FMA4INTRIN_H
+#define __FMA4INTRIN_H
+
+#ifndef __FMA4__
+# error "FMA4 instruction set is not enabled"
+#else
+
+#include <pmmintrin.h>
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#endif /* __FMA4__ */
+
+#endif /* __FMA4INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/immintrin.h b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
index a19deaa..1605525 100644
--- a/contrib/llvm/tools/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
@@ -48,7 +48,7 @@
#include <smmintrin.h>
#endif
-#if defined (__AES__) || defined (__PCLMUL__)
+#if defined (__AES__)
#include <wmmintrin.h>
#endif
@@ -56,4 +56,20 @@
#include <avxintrin.h>
#endif
+#ifdef __AVX2__
+#include <avx2intrin.h>
+#endif
+
+#ifdef __BMI__
+#include <bmiintrin.h>
+#endif
+
+#ifdef __BMI2__
+#include <bmi2intrin.h>
+#endif
+
+#ifdef __LZCNT__
+#include <lzcntintrin.h>
+#endif
+
#endif /* __IMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h b/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h
new file mode 100644
index 0000000..62ab5ca
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h
@@ -0,0 +1,55 @@
+/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __LZCNT__
+# error "LZCNT instruction is not enabled"
+#endif /* __LZCNT__ */
+
+#ifndef __LZCNTINTRIN_H
+#define __LZCNTINTRIN_H
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__lzcnt16(unsigned short __X)
+{
+ return __builtin_clzs(__X);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__lzcnt32(unsigned int __X)
+{
+ return __builtin_clz(__X);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__lzcnt64(unsigned long long __X)
+{
+ return __builtin_clzll(__X);
+}
+#endif
+
+#endif /* __LZCNTINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h b/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
index 2f456ad..d5236f8 100644
--- a/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
+++ b/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
@@ -105,7 +105,7 @@ _m_pfrsqrt(__m64 __m) {
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfrsqrtit1(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfrsqrtit1((__v2sf)__m1, (__v2sf)__m2);
+ return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
diff --git a/contrib/llvm/tools/clang/lib/Headers/module.map b/contrib/llvm/tools/clang/lib/Headers/module.map
new file mode 100644
index 0000000..418ba50
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/module.map
@@ -0,0 +1,108 @@
+module _Builtin_intrinsics [system] {
+ explicit module altivec {
+ requires altivec
+ header "altivec.h"
+ }
+
+ explicit module intel {
+ requires x86
+ export *
+
+ header "immintrin.h"
+ header "x86intrin.h"
+
+ explicit module mm_malloc {
+ header "mm_malloc.h"
+ export * // note: for <stdlib.h> dependency
+ }
+
+ explicit module cpuid {
+ header "cpuid.h"
+ }
+
+ explicit module mmx {
+ requires mmx
+ header "mmintrin.h"
+ }
+
+ explicit module sse {
+ requires sse
+ export mmx
+ export * // note: for hackish <emmintrin.h> dependency
+ header "xmmintrin.h"
+ }
+
+ explicit module sse2 {
+ requires sse2
+ export sse
+ header "emmintrin.h"
+ }
+
+ explicit module sse3 {
+ requires sse3
+ export sse2
+ header "pmmintrin.h"
+ }
+
+ explicit module ssse3 {
+ requires ssse3
+ export sse3
+ header "tmmintrin.h"
+ }
+
+ explicit module sse4_1 {
+ requires sse41
+ export ssse3
+ header "smmintrin.h"
+ }
+
+ explicit module sse4_2 {
+ requires sse42
+ export sse4_1
+ header "nmmintrin.h"
+ }
+
+ explicit module avx {
+ requires avx
+ export sse4_2
+ header "avxintrin.h"
+ }
+
+ explicit module avx2 {
+ requires avx2
+ export avx
+ header "avx2intrin.h"
+ }
+
+ explicit module bmi {
+ requires bmi
+ header "bmiintrin.h"
+ }
+
+ explicit module bmi2 {
+ requires bmi2
+ header "bmi2intrin.h"
+ }
+
+ explicit module fma4 {
+ requires fma4
+ export sse3
+ header "fma4intrin.h"
+ }
+
+ explicit module lzcnt {
+ requires lzcnt
+ header "lzcntintrin.h"
+ }
+
+ explicit module popcnt {
+ requires popcnt
+ header "popcntintrin.h"
+ }
+
+ explicit module mm3dnow {
+ requires mm3dnow
+ header "mm3dnow.h"
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h b/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h
new file mode 100644
index 0000000..d439daa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h
@@ -0,0 +1,45 @@
+/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __POPCNT__
+#error "POPCNT instruction set not enabled"
+#endif
+
+#ifndef _POPCNTINTRIN_H
+#define _POPCNTINTRIN_H
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_popcnt_u32(unsigned int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+#ifdef __x86_64__
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_popcnt_u64(unsigned long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+#endif /* __x86_64__ */
+
+#endif /* _POPCNTINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
index 2b8b321..2fab50e 100644
--- a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
@@ -57,23 +57,34 @@
#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
-#define _mm_round_ps(X, Y) __builtin_ia32_roundps((X), (Y))
-#define _mm_round_ss(X, Y, M) __builtin_ia32_roundss((X), (Y), (M))
-#define _mm_round_pd(X, M) __builtin_ia32_roundpd((X), (M))
-#define _mm_round_sd(X, Y, M) __builtin_ia32_roundsd((X), (Y), (M))
+#define _mm_round_ps(X, M) __extension__ ({ \
+ __m128 __X = (X); \
+ (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); })
+
+#define _mm_round_ss(X, Y, M) __extension__ ({ \
+ __m128 __X = (X); \
+ __m128 __Y = (Y); \
+ (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_round_pd(X, M) __extension__ ({ \
+ __m128d __X = (X); \
+ (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); })
+
+#define _mm_round_sd(X, Y, M) __extension__ ({ \
+ __m128d __X = (X); \
+ __m128d __Y = (Y); \
+ (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); })
/* SSE4 Packed Blending Intrinsics. */
-static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
-_mm_blend_pd (__m128d __V1, __m128d __V2, const int __M)
-{
- return (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, __M);
-}
+#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
+ __m128d __V1 = (V1); \
+ __m128d __V2 = (V2); \
+ (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, (M)); })
-static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
-_mm_blend_ps (__m128 __V1, __m128 __V2, const int __M)
-{
- return (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, __M);
-}
+#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
+ __m128 __V1 = (V1); \
+ __m128 __V2 = (V2); \
+ (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, (M)); })
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
@@ -96,11 +107,10 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
(__v16qi)__M);
}
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_blend_epi16 (__m128i __V1, __m128i __V2, const int __M)
-{
- return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, __M);
-}
+#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
+ __m128i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, (M)); })
/* SSE4 Dword Multiply Instructions. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -116,8 +126,15 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
}
/* SSE4 Floating Point Dot Product Instructions. */
-#define _mm_dp_ps(X, Y, M) __builtin_ia32_dpps ((X), (Y), (M))
-#define _mm_dp_pd(X, Y, M) __builtin_ia32_dppd ((X), (Y), (M))
+#define _mm_dp_ps(X, Y, M) __extension__ ({ \
+ __m128 __X = (X); \
+ __m128 __Y = (Y); \
+ (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_dp_pd(X, Y, M) __extension__ ({\
+ __m128d __X = (X); \
+ __m128d __Y = (Y); \
+ (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); })
/* SSE4 Streaming Load Hint Instruction. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -198,14 +215,14 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/* Insert int into packed integer array at index. */
#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- __a[N] = I; \
+ __a[(N)] = (I); \
__a;}))
#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- __a[N] = I; \
+ __a[(N)] = (I); \
__a;}))
#ifdef __x86_64__
#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[N] = I; \
+ __a[(N)] = (I); \
__a;}))
#endif /* __x86_64__ */
@@ -213,12 +230,12 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
* as a zero extended value, so it is unsigned.
*/
#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- (unsigned char)__a[N];}))
+ (unsigned char)__a[(N)];}))
#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- (unsigned)__a[N];}))
+ (unsigned)__a[(N)];}))
#ifdef __x86_64__
#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[N];}))
+ __a[(N)];}))
#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
@@ -242,13 +259,13 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((V), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pcmpeqq((__v2di)__V1, (__v2di)__V2);
+ return (__m128i)((__v2di)__V1 == (__v2di)__V2);
}
/* SSE4 Packed Integer Sign-Extension. */
@@ -333,7 +350,16 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
}
/* SSE4 Multiple Packed Sums of Absolute Difference. */
-#define _mm_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw128((X), (Y), (M))
+#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
+ __m128i __X = (X); \
+ __m128i __Y = (Y); \
+ (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_minpos_epu16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+}
/* These definitions are normally in nmmintrin.h, but gcc puts them in here
so we'll do the same. */
@@ -371,20 +397,20 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
#define _mm_cmpestrm(A, LA, B, LB, M) \
__builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M))
-#define _mm_cmpestri(X, LX, Y, LY, M) \
+#define _mm_cmpestri(A, LA, B, LB, M) \
__builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M))
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
-#define _mm_cmpistra(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistria128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrc(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistric128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistro(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistrio128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrs(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistris128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrz(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistriz128((A), (LA), (B), (LB), (M))
+#define _mm_cmpistra(A, B, M) \
+ __builtin_ia32_pcmpistria128((A), (B), (M))
+#define _mm_cmpistrc(A, B, M) \
+ __builtin_ia32_pcmpistric128((A), (B), (M))
+#define _mm_cmpistro(A, B, M) \
+ __builtin_ia32_pcmpistrio128((A), (B), (M))
+#define _mm_cmpistrs(A, B, M) \
+ __builtin_ia32_pcmpistris128((A), (B), (M))
+#define _mm_cmpistrz(A, B, M) \
+ __builtin_ia32_pcmpistriz128((A), (B), (M))
#define _mm_cmpestra(A, LA, B, LB, M) \
__builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M))
@@ -401,7 +427,7 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
{
- return __builtin_ia32_pcmpgtq((__v2di)__V1, (__v2di)__V2);
+ return (__m128i)((__v2di)__V1 > (__v2di)__V2);
}
/* SSE4.2 Accumulate CRC32. */
@@ -431,20 +457,9 @@ _mm_crc32_u64(unsigned long long __C, unsigned long long __D)
}
#endif /* __x86_64__ */
-/* SSE4.2 Population Count. */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-_mm_popcnt_u32(unsigned int __A)
-{
- return __builtin_popcount(__A);
-}
-
-#ifdef __x86_64__
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
-_mm_popcnt_u64(unsigned long long __A)
-{
- return __builtin_popcountll(__A);
-}
-#endif /* __x86_64__ */
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
#endif /* __SSE4_2__ */
#endif /* __SSE4_1__ */
diff --git a/contrib/llvm/tools/clang/lib/Headers/tgmath.h b/contrib/llvm/tools/clang/lib/Headers/tgmath.h
index 1b0b9d2..4fa1cf7 100644
--- a/contrib/llvm/tools/clang/lib/Headers/tgmath.h
+++ b/contrib/llvm/tools/clang/lib/Headers/tgmath.h
@@ -540,15 +540,15 @@ static long double
_TG_ATTRS
__tg_fabs(long double __x) {return fabsl(__x);}
-static float _Complex
+static float
_TG_ATTRS
__tg_fabs(float _Complex __x) {return cabsf(__x);}
-static double _Complex
+static double
_TG_ATTRS
__tg_fabs(double _Complex __x) {return cabs(__x);}
-static long double _Complex
+static long double
_TG_ATTRS
__tg_fabs(long double _Complex __x) {return cabsl(__x);}
@@ -976,6 +976,23 @@ static long double
#undef log2
#define log2(__x) __tg_log2(__tg_promote1((__x))(__x))
+// logb
+
+static float
+ _TG_ATTRS
+ __tg_logb(float __x) {return logbf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_logb(double __x) {return logb(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_logb(long double __x) {return logbl(__x);}
+
+#undef logb
+#define logb(__x) __tg_logb(__tg_promote1((__x))(__x))
+
// lrint
static long
diff --git a/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
index 07fea1c..a62c6cc 100644
--- a/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
@@ -66,8 +66,15 @@ _mm_abs_epi32(__m128i a)
return (__m128i)__builtin_ia32_pabsd128((__v4si)a);
}
-#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
-#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
+#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
+ __m128i __a = (a); \
+ __m128i __b = (b); \
+ (__m128i)__builtin_ia32_palignr128((__v16qi)__a, (__v16qi)__b, (n)); })
+
+#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
+ __m64 __a = (a); \
+ __m64 __b = (b); \
+ (__m64)__builtin_ia32_palignr((__v8qi)__a, (__v8qi)__b, (n)); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadd_epi16(__m128i a, __m128i b)
diff --git a/contrib/llvm/tools/clang/lib/Headers/unwind.h b/contrib/llvm/tools/clang/lib/Headers/unwind.h
new file mode 100644
index 0000000..a065920
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/unwind.h
@@ -0,0 +1,124 @@
+/*===---- unwind.h - Stack unwinding ----------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
+
+#if __has_include_next(<unwind.h>)
+/* Darwin and libunwind provide an unwind.h. If that's available, use
+ * it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
+ * so define that around the include.*/
+# ifndef _GNU_SOURCE
+# define _SHOULD_UNDEFINE_GNU_SOURCE
+# define _GNU_SOURCE
+# endif
+// libunwind's unwind.h reflects the current visibility. However, Mozilla
+// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
+// visibility to default and export its contents. gcc also allows users to
+// override its override by #defining HIDE_EXPORTS (but note, this only obeys
+// the user's -fvisibility setting; it doesn't hide any exports on its own). We
+// imitate gcc's header here:
+# ifdef HIDE_EXPORTS
+# include_next <unwind.h>
+# else
+# pragma GCC visibility push(default)
+# include_next <unwind.h>
+# pragma GCC visibility pop
+# endif
+# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
+# undef _GNU_SOURCE
+# undef _SHOULD_UNDEFINE_GNU_SOURCE
+# endif
+#else
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* It is a bit strange for a header to play with the visibility of the
+ symbols it declares, but this matches gcc's behavior and some programs
+ depend on it */
+#pragma GCC visibility push(default)
+
+struct _Unwind_Context;
+typedef enum {
+ _URC_NO_REASON = 0,
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+
+ _URC_FATAL_PHASE2_ERROR = 2,
+ _URC_FATAL_PHASE1_ERROR = 3,
+ _URC_NORMAL_STOP = 4,
+
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8
+} _Unwind_Reason_Code;
+
+
+#ifdef __arm__
+
+typedef enum {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+} _Unwind_VRS_RegClass;
+
+typedef enum {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+} _Unwind_VRS_DataRepresentation;
+
+typedef enum {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+} _Unwind_VRS_Result;
+
+_Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
+ _Unwind_VRS_RegClass regclass,
+ uint32_t regno,
+ _Unwind_VRS_DataRepresentation representation,
+ void *valuep);
+
+#else
+
+uintptr_t _Unwind_GetIP(struct _Unwind_Context* context);
+
+#endif
+
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context*, void*);
+_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void*);
+
+#pragma GCC visibility pop
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
index 6b2e468..8f58850 100644
--- a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
@@ -28,7 +28,7 @@
# error "AES instructions not enabled"
#else
-#include <smmintrin.h>
+#include <xmmintrin.h>
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_aesenc_si128(__m128i __V, __m128i __R)
diff --git a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
index e5e7a6a..f5e4d88 100644
--- a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
@@ -26,6 +26,30 @@
#include <immintrin.h>
-// FIXME: SSE4A, 3dNOW, FMA4, XOP, LWP, ABM, POPCNT
+#ifdef __3dNOW__
+#include <mm3dnow.h>
+#endif
+
+#ifdef __BMI__
+#include <bmiintrin.h>
+#endif
+
+#ifdef __BMI2__
+#include <bmi2intrin.h>
+#endif
+
+#ifdef __LZCNT__
+#include <lzcntintrin.h>
+#endif
+
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
+
+#ifdef __FMA4__
+#include <fma4intrin.h>
+#endif
+
+// FIXME: SSE4A, XOP, LWP, ABM
#endif /* __X86INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
index a0bc0bb..e616157 100644
--- a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
@@ -664,7 +664,7 @@ _mm_storer_ps(float *p, __m128 a)
/* FIXME: We have to #define this because "sel" must be a constant integer, and
Sema doesn't do any form of constant propagation yet. */
-#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, sel))
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_pi(__m64 *p, __m64 a)
@@ -735,8 +735,9 @@ _mm_mulhi_pu16(__m64 a, __m64 b)
return (__m64)__builtin_ia32_pmulhuw((__v4hi)a, (__v4hi)b);
}
-#define _mm_shuffle_pi16(a, n) \
- ((__m64)__builtin_ia32_pshufw(a, n))
+#define _mm_shuffle_pi16(a, n) __extension__ ({ \
+ __m64 __a = (a); \
+ (__m64)__builtin_ia32_pshufw((__v4hi)__a, (n)); })
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_maskmove_si64(__m64 d, __m64 n, char *p)
@@ -774,11 +775,13 @@ _mm_setcsr(unsigned int i)
__builtin_ia32_ldmxcsr(i);
}
-#define _mm_shuffle_ps(a, b, mask) \
- (__builtin_shufflevector((__v4sf)(a), (__v4sf)(b), \
- (mask) & 0x3, ((mask) & 0xc) >> 2, \
- (((mask) & 0x30) >> 4) + 4, \
- (((mask) & 0xc0) >> 6) + 4))
+#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
+ __m128 __a = (a); \
+ __m128 __b = (b); \
+ (__m128)__builtin_shufflevector((__v4sf)__a, (__v4sf)__b, \
+ (mask) & 0x3, ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 4, \
+ (((mask) & 0xc0) >> 6) + 4); })
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_ps(__m128 a, __m128 b)
@@ -935,7 +938,7 @@ _mm_movemask_ps(__m128 a)
#define _MM_FLUSH_ZERO_MASK (0x8000)
#define _MM_FLUSH_ZERO_ON (0x8000)
-#define _MM_FLUSH_ZERO_OFF (0x8000)
+#define _MM_FLUSH_ZERO_OFF (0x0000)
#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
OpenPOWER on IntegriCloud