summaryrefslogtreecommitdiffstats
path: root/test/Instrumentation
diff options
context:
space:
mode:
Diffstat (limited to 'test/Instrumentation')
-rw-r--r--test/Instrumentation/AddressSanitizer/basic.ll73
-rw-r--r--test/Instrumentation/AddressSanitizer/test64.ll22
-rw-r--r--test/Instrumentation/BoundsChecking/lit.local.cfg1
-rw-r--r--test/Instrumentation/BoundsChecking/many-trap.ll16
-rw-r--r--test/Instrumentation/BoundsChecking/phi.ll52
-rw-r--r--test/Instrumentation/BoundsChecking/simple.ll128
-rw-r--r--test/Instrumentation/ThreadSanitizer/atomic.ll323
7 files changed, 615 insertions, 0 deletions
diff --git a/test/Instrumentation/AddressSanitizer/basic.ll b/test/Instrumentation/AddressSanitizer/basic.ll
new file mode 100644
index 0000000..294ca8a
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/basic.ll
@@ -0,0 +1,73 @@
+; Test basic address sanitizer instrumentation.
+;
+; RUN: opt < %s -asan -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @test_load(i32* %a) address_safety {
+; CHECK: @test_load
+; CHECK-NOT: load
+; CHECK: %[[LOAD_ADDR:[^ ]*]] = ptrtoint i32* %a to i64
+; CHECK: lshr i64 %[[LOAD_ADDR]], 3
+; CHECK: or i64
+; CHECK: %[[LOAD_SHADOW_PTR:[^ ]*]] = inttoptr
+; CHECK: %[[LOAD_SHADOW:[^ ]*]] = load i8* %[[LOAD_SHADOW_PTR]]
+; CHECK: icmp ne i8
+; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
+;
+; First instrumentation block refines the shadow test.
+; CHECK: and i64 %[[LOAD_ADDR]], 7
+; CHECK: add i64 %{{.*}}, 3
+; CHECK: trunc i64 %{{.*}} to i8
+; CHECK: icmp sge i8 %{{.*}}, %[[LOAD_SHADOW]]
+; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
+;
+; The actual load comes next because ASan adds the crash block
+; to the end of the function.
+; CHECK: %tmp1 = load i32* %a
+; CHECK: ret i32 %tmp1
+
+; The crash block reports the error.
+; CHECK: call void @__asan_report_load4(i64 %[[LOAD_ADDR]])
+; CHECK: unreachable
+;
+
+
+entry:
+ %tmp1 = load i32* %a
+ ret i32 %tmp1
+}
+
+define void @test_store(i32* %a) address_safety {
+; CHECK: @test_store
+; CHECK-NOT: store
+; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint i32* %a to i64
+; CHECK: lshr i64 %[[STORE_ADDR]], 3
+; CHECK: or i64
+; CHECK: %[[STORE_SHADOW_PTR:[^ ]*]] = inttoptr
+; CHECK: %[[STORE_SHADOW:[^ ]*]] = load i8* %[[STORE_SHADOW_PTR]]
+; CHECK: icmp ne i8
+; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
+;
+; First instrumentation block refines the shadow test.
+; CHECK: and i64 %[[STORE_ADDR]], 7
+; CHECK: add i64 %{{.*}}, 3
+; CHECK: trunc i64 %{{.*}} to i8
+; CHECK: icmp sge i8 %{{.*}}, %[[STORE_SHADOW]]
+; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
+;
+; The actual load comes next because ASan adds the crash block
+; to the end of the function.
+; CHECK: store i32 42, i32* %a
+; CHECK: ret void
+;
+; The crash block reports the error.
+; CHECK: call void @__asan_report_store4(i64 %[[STORE_ADDR]])
+; CHECK: unreachable
+;
+
+entry:
+ store i32 42, i32* %a
+ ret void
+}
diff --git a/test/Instrumentation/AddressSanitizer/test64.ll b/test/Instrumentation/AddressSanitizer/test64.ll
index fc27de9..d544d77 100644
--- a/test/Instrumentation/AddressSanitizer/test64.ll
+++ b/test/Instrumentation/AddressSanitizer/test64.ll
@@ -12,3 +12,25 @@ entry:
; Check for ASAN's Offset for 64-bit (2^44)
; CHECK-NEXT: 17592186044416
; CHECK: ret
+
+define void @example_atomicrmw(i64* %ptr) nounwind uwtable address_safety {
+entry:
+ %0 = atomicrmw add i64* %ptr, i64 1 seq_cst
+ ret void
+}
+
+; CHECK: @example_atomicrmw
+; CHECK: lshr {{.*}} 3
+; CHECK: atomicrmw
+; CHECK: ret
+
+define void @example_cmpxchg(i64* %ptr, i64 %compare_to, i64 %new_value) nounwind uwtable address_safety {
+entry:
+ %0 = cmpxchg i64* %ptr, i64 %compare_to, i64 %new_value seq_cst
+ ret void
+}
+
+; CHECK: @example_cmpxchg
+; CHECK: lshr {{.*}} 3
+; CHECK: cmpxchg
+; CHECK: ret
diff --git a/test/Instrumentation/BoundsChecking/lit.local.cfg b/test/Instrumentation/BoundsChecking/lit.local.cfg
new file mode 100644
index 0000000..19eebc0
--- /dev/null
+++ b/test/Instrumentation/BoundsChecking/lit.local.cfg
@@ -0,0 +1 @@
+config.suffixes = ['.ll', '.c', '.cpp']
diff --git a/test/Instrumentation/BoundsChecking/many-trap.ll b/test/Instrumentation/BoundsChecking/many-trap.ll
new file mode 100644
index 0000000..0bbb959
--- /dev/null
+++ b/test/Instrumentation/BoundsChecking/many-trap.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -bounds-checking -S | FileCheck %s
+; RUN: opt < %s -bounds-checking -bounds-checking-single-trap -S | FileCheck -check-prefix=SINGLE %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+; CHECK: @f1
+define void @f1(i64 %x) nounwind {
+ %1 = alloca i128, i64 %x
+ %2 = load i128* %1, align 4
+ %3 = load i128* %1, align 4
+ ret void
+; CHECK: call void @llvm.trap()
+; CHECK: call void @llvm.trap()
+; CHECK-NOT: call void @llvm.trap()
+; SINGLE: call void @llvm.trap()
+; SINGLE-NOT: call void @llvm.trap()
+}
diff --git a/test/Instrumentation/BoundsChecking/phi.ll b/test/Instrumentation/BoundsChecking/phi.ll
new file mode 100644
index 0000000..86b5922
--- /dev/null
+++ b/test/Instrumentation/BoundsChecking/phi.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -bounds-checking -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+@global = private unnamed_addr constant [10 x i8] c"ola\00mundo\00", align 1
+
+; CHECK: f1
+; no checks are possible here
+; CHECK-NOT: trap
+define void @f1(i8* nocapture %c) {
+entry:
+ %0 = load i8* %c, align 1
+ %tobool1 = icmp eq i8 %0, 0
+ br i1 %tobool1, label %while.end, label %while.body
+
+while.body:
+ %c.addr.02 = phi i8* [ %incdec.ptr, %while.body ], [ %c, %entry ]
+ %incdec.ptr = getelementptr inbounds i8* %c.addr.02, i64 -1
+ store i8 100, i8* %c.addr.02, align 1
+ %1 = load i8* %incdec.ptr, align 1
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+
+; CHECK: f2
+define void @f2() {
+while.body.i.preheader:
+ %addr = getelementptr inbounds [10 x i8]* @global, i64 0, i64 9
+ br label %while.body.i
+
+while.body.i:
+; CHECK: phi
+; CHECK-NEXT: phi
+; CHECK-NOT: phi
+ %c.addr.02.i = phi i8* [ %incdec.ptr.i, %while.body.i ], [ %addr, %while.body.i.preheader ]
+ %incdec.ptr.i = getelementptr inbounds i8* %c.addr.02.i, i64 -1
+; CHECK: sub i64 10, %0
+; CHECK-NEXT: icmp ult i64 10, %0
+; CHECK-NEXT: icmp ult i64 {{.*}}, 1
+; CHECK-NEXT: or i1
+; CHECK-NEXT: br {{.*}}, label %trap
+ store i8 100, i8* %c.addr.02.i, align 1
+ %0 = load i8* %incdec.ptr.i, align 1
+ %tobool.i = icmp eq i8 %0, 0
+ br i1 %tobool.i, label %fn.exit, label %while.body.i
+
+fn.exit:
+ ret void
+}
diff --git a/test/Instrumentation/BoundsChecking/simple.ll b/test/Instrumentation/BoundsChecking/simple.ll
new file mode 100644
index 0000000..16870c7
--- /dev/null
+++ b/test/Instrumentation/BoundsChecking/simple.ll
@@ -0,0 +1,128 @@
+; RUN: opt < %s -bounds-checking -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+@.str = private constant [8 x i8] c"abcdefg\00" ; <[8 x i8]*>
+
+declare noalias i8* @malloc(i64) nounwind
+declare noalias i8* @calloc(i64, i64) nounwind
+declare noalias i8* @realloc(i8* nocapture, i64) nounwind
+
+; CHECK: @f1
+define void @f1() nounwind {
+ %1 = tail call i8* @malloc(i64 32)
+ %2 = bitcast i8* %1 to i32*
+ %idx = getelementptr inbounds i32* %2, i64 2
+; CHECK-NOT: trap
+ store i32 3, i32* %idx, align 4
+ ret void
+}
+
+; CHECK: @f2
+define void @f2() nounwind {
+ %1 = tail call i8* @malloc(i64 32)
+ %2 = bitcast i8* %1 to i32*
+ %idx = getelementptr inbounds i32* %2, i64 8
+; CHECK: trap
+ store i32 3, i32* %idx, align 4
+ ret void
+}
+
+; CHECK: @f3
+define void @f3(i64 %x) nounwind {
+ %1 = tail call i8* @calloc(i64 4, i64 %x)
+ %2 = bitcast i8* %1 to i32*
+ %idx = getelementptr inbounds i32* %2, i64 8
+; CHECK: mul i64 4, %
+; CHECK: sub i64 {{.*}}, 32
+; CHECK-NEXT: icmp ult i64 {{.*}}, 32
+; CHECK-NEXT: icmp ult i64 {{.*}}, 4
+; CHECK-NEXT: or i1
+; CHECK: trap
+ store i32 3, i32* %idx, align 4
+ ret void
+}
+
+; CHECK: @f4
+define void @f4(i64 %x) nounwind {
+ %1 = tail call i8* @realloc(i8* null, i64 %x) nounwind
+ %2 = bitcast i8* %1 to i32*
+ %idx = getelementptr inbounds i32* %2, i64 8
+; CHECK: trap
+ %3 = load i32* %idx, align 4
+ ret void
+}
+
+; CHECK: @f5
+define void @f5(i64 %x) nounwind {
+ %idx = getelementptr inbounds [8 x i8]* @.str, i64 0, i64 %x
+; CHECK: trap
+ %1 = load i8* %idx, align 4
+ ret void
+}
+
+; CHECK: @f6
+define void @f6(i64 %x) nounwind {
+ %1 = alloca i128
+; CHECK-NOT: trap
+ %2 = load i128* %1, align 4
+ ret void
+}
+
+; CHECK: @f7
+define void @f7(i64 %x) nounwind {
+ %1 = alloca i128, i64 %x
+; CHECK: mul i64 16,
+; CHECK: trap
+ %2 = load i128* %1, align 4
+ ret void
+}
+
+; CHECK: @f8
+define void @f8() nounwind {
+ %1 = alloca i128
+ %2 = alloca i128
+ %3 = select i1 undef, i128* %1, i128* %2
+; CHECK-NOT: trap
+ %4 = load i128* %3, align 4
+ ret void
+}
+
+; CHECK: @f9
+define void @f9(i128* %arg) nounwind {
+ %1 = alloca i128
+ %2 = select i1 undef, i128* %arg, i128* %1
+; CHECK-NOT: trap
+ %3 = load i128* %2, align 4
+ ret void
+}
+
+; CHECK: @f10
+define void @f10(i64 %x, i64 %y) nounwind {
+ %1 = alloca i128, i64 %x
+ %2 = alloca i128, i64 %y
+ %3 = select i1 undef, i128* %1, i128* %2
+; CHECK: select
+; CHECK: select
+; CHECK: trap
+ %4 = load i128* %3, align 4
+ ret void
+}
+
+; CHECK: @f11
+define void @f11(i128* byval %x) nounwind {
+ %1 = bitcast i128* %x to i8*
+ %2 = getelementptr inbounds i8* %1, i64 16
+; CHECK: br label
+ %3 = load i8* %2, align 4
+ ret void
+}
+
+; CHECK: @f12
+define i64 @f12(i64 %x, i64 %y) nounwind {
+ %1 = tail call i8* @calloc(i64 1, i64 %x)
+; CHECK: mul i64 %y, 8
+ %2 = bitcast i8* %1 to i64*
+ %3 = getelementptr inbounds i64* %2, i64 %y
+ %4 = load i64* %3, align 8
+ ret i64 %4
+}
diff --git a/test/Instrumentation/ThreadSanitizer/atomic.ll b/test/Instrumentation/ThreadSanitizer/atomic.ll
new file mode 100644
index 0000000..02bf215
--- /dev/null
+++ b/test/Instrumentation/ThreadSanitizer/atomic.ll
@@ -0,0 +1,323 @@
+; RUN: opt < %s -tsan -S | FileCheck %s
+; Check that atomic memory operations are converted to calls into ThreadSanitizer runtime.
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+define i8 @atomic8_load_unordered(i8* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i8* %a unordered, align 1
+ ret i8 %0
+}
+; CHECK: atomic8_load_unordered
+; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
+
+define i8 @atomic8_load_monotonic(i8* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i8* %a monotonic, align 1
+ ret i8 %0
+}
+; CHECK: atomic8_load_monotonic
+; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
+
+define i8 @atomic8_load_acquire(i8* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i8* %a acquire, align 1
+ ret i8 %0
+}
+; CHECK: atomic8_load_acquire
+; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 4)
+
+define i8 @atomic8_load_seq_cst(i8* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i8* %a seq_cst, align 1
+ ret i8 %0
+}
+; CHECK: atomic8_load_seq_cst
+; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 32)
+
+define void @atomic8_store_unordered(i8* %a) nounwind uwtable {
+entry:
+ store atomic i8 0, i8* %a unordered, align 1
+ ret void
+}
+; CHECK: atomic8_store_unordered
+; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
+
+define void @atomic8_store_monotonic(i8* %a) nounwind uwtable {
+entry:
+ store atomic i8 0, i8* %a monotonic, align 1
+ ret void
+}
+; CHECK: atomic8_store_monotonic
+; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
+
+define void @atomic8_store_release(i8* %a) nounwind uwtable {
+entry:
+ store atomic i8 0, i8* %a release, align 1
+ ret void
+}
+; CHECK: atomic8_store_release
+; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 8)
+
+define void @atomic8_store_seq_cst(i8* %a) nounwind uwtable {
+entry:
+ store atomic i8 0, i8* %a seq_cst, align 1
+ ret void
+}
+; CHECK: atomic8_store_seq_cst
+; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 32)
+
+define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i16* %a unordered, align 2
+ ret i16 %0
+}
+; CHECK: atomic16_load_unordered
+; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
+
+define i16 @atomic16_load_monotonic(i16* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i16* %a monotonic, align 2
+ ret i16 %0
+}
+; CHECK: atomic16_load_monotonic
+; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
+
+define i16 @atomic16_load_acquire(i16* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i16* %a acquire, align 2
+ ret i16 %0
+}
+; CHECK: atomic16_load_acquire
+; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 4)
+
+define i16 @atomic16_load_seq_cst(i16* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i16* %a seq_cst, align 2
+ ret i16 %0
+}
+; CHECK: atomic16_load_seq_cst
+; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 32)
+
+define void @atomic16_store_unordered(i16* %a) nounwind uwtable {
+entry:
+ store atomic i16 0, i16* %a unordered, align 2
+ ret void
+}
+; CHECK: atomic16_store_unordered
+; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
+
+define void @atomic16_store_monotonic(i16* %a) nounwind uwtable {
+entry:
+ store atomic i16 0, i16* %a monotonic, align 2
+ ret void
+}
+; CHECK: atomic16_store_monotonic
+; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
+
+define void @atomic16_store_release(i16* %a) nounwind uwtable {
+entry:
+ store atomic i16 0, i16* %a release, align 2
+ ret void
+}
+; CHECK: atomic16_store_release
+; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 8)
+
+define void @atomic16_store_seq_cst(i16* %a) nounwind uwtable {
+entry:
+ store atomic i16 0, i16* %a seq_cst, align 2
+ ret void
+}
+; CHECK: atomic16_store_seq_cst
+; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 32)
+
+define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i32* %a unordered, align 4
+ ret i32 %0
+}
+; CHECK: atomic32_load_unordered
+; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
+
+define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i32* %a monotonic, align 4
+ ret i32 %0
+}
+; CHECK: atomic32_load_monotonic
+; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
+
+define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i32* %a acquire, align 4
+ ret i32 %0
+}
+; CHECK: atomic32_load_acquire
+; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 4)
+
+define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i32* %a seq_cst, align 4
+ ret i32 %0
+}
+; CHECK: atomic32_load_seq_cst
+; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 32)
+
+define void @atomic32_store_unordered(i32* %a) nounwind uwtable {
+entry:
+ store atomic i32 0, i32* %a unordered, align 4
+ ret void
+}
+; CHECK: atomic32_store_unordered
+; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
+
+define void @atomic32_store_monotonic(i32* %a) nounwind uwtable {
+entry:
+ store atomic i32 0, i32* %a monotonic, align 4
+ ret void
+}
+; CHECK: atomic32_store_monotonic
+; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
+
+define void @atomic32_store_release(i32* %a) nounwind uwtable {
+entry:
+ store atomic i32 0, i32* %a release, align 4
+ ret void
+}
+; CHECK: atomic32_store_release
+; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 8)
+
+define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable {
+entry:
+ store atomic i32 0, i32* %a seq_cst, align 4
+ ret void
+}
+; CHECK: atomic32_store_seq_cst
+; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 32)
+
+define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i64* %a unordered, align 8
+ ret i64 %0
+}
+; CHECK: atomic64_load_unordered
+; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
+
+define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i64* %a monotonic, align 8
+ ret i64 %0
+}
+; CHECK: atomic64_load_monotonic
+; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
+
+define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i64* %a acquire, align 8
+ ret i64 %0
+}
+; CHECK: atomic64_load_acquire
+; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 4)
+
+define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i64* %a seq_cst, align 8
+ ret i64 %0
+}
+; CHECK: atomic64_load_seq_cst
+; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 32)
+
+define void @atomic64_store_unordered(i64* %a) nounwind uwtable {
+entry:
+ store atomic i64 0, i64* %a unordered, align 8
+ ret void
+}
+; CHECK: atomic64_store_unordered
+; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
+
+define void @atomic64_store_monotonic(i64* %a) nounwind uwtable {
+entry:
+ store atomic i64 0, i64* %a monotonic, align 8
+ ret void
+}
+; CHECK: atomic64_store_monotonic
+; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
+
+define void @atomic64_store_release(i64* %a) nounwind uwtable {
+entry:
+ store atomic i64 0, i64* %a release, align 8
+ ret void
+}
+; CHECK: atomic64_store_release
+; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 8)
+
+define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable {
+entry:
+ store atomic i64 0, i64* %a seq_cst, align 8
+ ret void
+}
+; CHECK: atomic64_store_seq_cst
+; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 32)
+
+define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i128* %a unordered, align 16
+ ret i128 %0
+}
+; CHECK: atomic128_load_unordered
+; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
+
+define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i128* %a monotonic, align 16
+ ret i128 %0
+}
+; CHECK: atomic128_load_monotonic
+; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
+
+define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i128* %a acquire, align 16
+ ret i128 %0
+}
+; CHECK: atomic128_load_acquire
+; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 4)
+
+define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable {
+entry:
+ %0 = load atomic i128* %a seq_cst, align 16
+ ret i128 %0
+}
+; CHECK: atomic128_load_seq_cst
+; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 32)
+
+define void @atomic128_store_unordered(i128* %a) nounwind uwtable {
+entry:
+ store atomic i128 0, i128* %a unordered, align 16
+ ret void
+}
+; CHECK: atomic128_store_unordered
+; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
+
+define void @atomic128_store_monotonic(i128* %a) nounwind uwtable {
+entry:
+ store atomic i128 0, i128* %a monotonic, align 16
+ ret void
+}
+; CHECK: atomic128_store_monotonic
+; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
+
+define void @atomic128_store_release(i128* %a) nounwind uwtable {
+entry:
+ store atomic i128 0, i128* %a release, align 16
+ ret void
+}
+; CHECK: atomic128_store_release
+; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 8)
+
+define void @atomic128_store_seq_cst(i128* %a) nounwind uwtable {
+entry:
+ store atomic i128 0, i128* %a seq_cst, align 16
+ ret void
+}
+; CHECK: atomic128_store_seq_cst
+; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 32)
OpenPOWER on IntegriCloud