diff options
author | dim <dim@FreeBSD.org> | 2015-06-21 14:00:56 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2015-06-21 14:00:56 +0000 |
commit | 9dd834653b811ad20382e98a87dff824980c9916 (patch) | |
tree | a764184c2fc9486979b074250b013a0937ee64e5 /test/CodeGen/arm_neon_intrinsics.c | |
parent | bb9760db9b86e93a638ed430d0a14785f7ff9064 (diff) | |
download | FreeBSD-src-9dd834653b811ad20382e98a87dff824980c9916.zip FreeBSD-src-9dd834653b811ad20382e98a87dff824980c9916.tar.gz |
Vendor import of clang trunk r240225:
https://llvm.org/svn/llvm-project/cfe/trunk@240225
Diffstat (limited to 'test/CodeGen/arm_neon_intrinsics.c')
-rw-r--r-- | test/CodeGen/arm_neon_intrinsics.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/test/CodeGen/arm_neon_intrinsics.c b/test/CodeGen/arm_neon_intrinsics.c index 756e3b4..d92c32c 100644 --- a/test/CodeGen/arm_neon_intrinsics.c +++ b/test/CodeGen/arm_neon_intrinsics.c @@ -2399,6 +2399,12 @@ float32_t test_vget_lane_f32(float32x2_t a) { return vget_lane_f32(a, 1); } +// CHECK-LABEL: test_vget_lane_f16 +// CHECK: vmov +float32_t test_vget_lane_f16(float16x4_t a) { + return vget_lane_f16(a, 1); +} + // CHECK-LABEL: test_vgetq_lane_u8 // CHECK: vmov uint8_t test_vgetq_lane_u8(uint8x16_t a) { @@ -2453,6 +2459,12 @@ float32_t test_vgetq_lane_f32(float32x4_t a) { return vgetq_lane_f32(a, 3); } +// CHECK-LABEL: test_vgetq_lane_f16 +// CHECK: vmov +float32_t test_vgetq_lane_f16(float16x8_t a) { + return vgetq_lane_f16(a, 3); +} + // CHECK-LABEL: test_vget_lane_s64 // The optimizer is able to remove all moves now. int64_t test_vget_lane_s64(int64x1_t a) { @@ -9157,6 +9169,12 @@ float32x2_t test_vset_lane_f32(float32_t a, float32x2_t b) { return vset_lane_f32(a, b, 1); } +// CHECK-LABEL: test_vset_lane_f16 +// CHECK: mov +float16x4_t test_vset_lane_f16(float16_t *a, float16x4_t b) { + return vset_lane_f16(*a, b, 1); +} + // CHECK-LABEL: test_vsetq_lane_u8 // CHECK: vmov uint8x16_t test_vsetq_lane_u8(uint8_t a, uint8x16_t b) { @@ -9211,6 +9229,12 @@ float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { return vsetq_lane_f32(a, b, 3); } +// CHECK-LABEL: test_vsetq_lane_f16 +// CHECK: vmov +float16x8_t test_vsetq_lane_f16(float16_t *a, float16x8_t b) { + return vsetq_lane_f16(*a, b, 3); +} + // CHECK-LABEL: test_vset_lane_s64 // The optimizer is able to get rid of all moves now. int64x1_t test_vset_lane_s64(int64_t a, int64x1_t b) { |