summaryrefslogtreecommitdiffstats
path: root/test/Instrumentation/ThreadSanitizer
diff options
context:
space:
mode:
Diffstat (limited to 'test/Instrumentation/ThreadSanitizer')
-rw-r--r--test/Instrumentation/ThreadSanitizer/atomic.ll250
-rw-r--r--test/Instrumentation/ThreadSanitizer/read_from_global.ll2
-rw-r--r--test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll26
-rw-r--r--test/Instrumentation/ThreadSanitizer/tsan_basic.ll33
-rw-r--r--test/Instrumentation/ThreadSanitizer/vptr_read.ll13
5 files changed, 298 insertions, 26 deletions
diff --git a/test/Instrumentation/ThreadSanitizer/atomic.ll b/test/Instrumentation/ThreadSanitizer/atomic.ll
index 107dbdc..70b6cbb 100644
--- a/test/Instrumentation/ThreadSanitizer/atomic.ll
+++ b/test/Instrumentation/ThreadSanitizer/atomic.ll
@@ -114,6 +114,14 @@ entry:
; CHECK: atomic8_xor_monotonic
; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 0)
+define void @atomic8_nand_monotonic(i8* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i8* %a, i8 0 monotonic
+ ret void
+}
+; CHECK: atomic8_nand_monotonic
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 0)
+
define void @atomic8_xchg_acquire(i8* %a) nounwind uwtable {
entry:
atomicrmw xchg i8* %a, i8 0 acquire
@@ -162,6 +170,14 @@ entry:
; CHECK: atomic8_xor_acquire
; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 2)
+define void @atomic8_nand_acquire(i8* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i8* %a, i8 0 acquire
+ ret void
+}
+; CHECK: atomic8_nand_acquire
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 2)
+
define void @atomic8_xchg_release(i8* %a) nounwind uwtable {
entry:
atomicrmw xchg i8* %a, i8 0 release
@@ -210,6 +226,14 @@ entry:
; CHECK: atomic8_xor_release
; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 3)
+define void @atomic8_nand_release(i8* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i8* %a, i8 0 release
+ ret void
+}
+; CHECK: atomic8_nand_release
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 3)
+
define void @atomic8_xchg_acq_rel(i8* %a) nounwind uwtable {
entry:
atomicrmw xchg i8* %a, i8 0 acq_rel
@@ -258,6 +282,14 @@ entry:
; CHECK: atomic8_xor_acq_rel
; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 4)
+define void @atomic8_nand_acq_rel(i8* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i8* %a, i8 0 acq_rel
+ ret void
+}
+; CHECK: atomic8_nand_acq_rel
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 4)
+
define void @atomic8_xchg_seq_cst(i8* %a) nounwind uwtable {
entry:
atomicrmw xchg i8* %a, i8 0 seq_cst
@@ -306,13 +338,21 @@ entry:
; CHECK: atomic8_xor_seq_cst
; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 5)
+define void @atomic8_nand_seq_cst(i8* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i8* %a, i8 0 seq_cst
+ ret void
+}
+; CHECK: atomic8_nand_seq_cst
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 5)
+
define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable {
entry:
cmpxchg i8* %a, i8 0, i8 1 monotonic
ret void
}
; CHECK: atomic8_cas_monotonic
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0)
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0, i32 0)
define void @atomic8_cas_acquire(i8* %a) nounwind uwtable {
entry:
@@ -320,7 +360,7 @@ entry:
ret void
}
; CHECK: atomic8_cas_acquire
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2)
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2, i32 2)
define void @atomic8_cas_release(i8* %a) nounwind uwtable {
entry:
@@ -328,7 +368,7 @@ entry:
ret void
}
; CHECK: atomic8_cas_release
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3)
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3, i32 0)
define void @atomic8_cas_acq_rel(i8* %a) nounwind uwtable {
entry:
@@ -336,7 +376,7 @@ entry:
ret void
}
; CHECK: atomic8_cas_acq_rel
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4)
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4, i32 2)
define void @atomic8_cas_seq_cst(i8* %a) nounwind uwtable {
entry:
@@ -344,7 +384,7 @@ entry:
ret void
}
; CHECK: atomic8_cas_seq_cst
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5)
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5, i32 5)
define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable {
entry:
@@ -458,6 +498,14 @@ entry:
; CHECK: atomic16_xor_monotonic
; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 0)
+define void @atomic16_nand_monotonic(i16* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i16* %a, i16 0 monotonic
+ ret void
+}
+; CHECK: atomic16_nand_monotonic
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 0)
+
define void @atomic16_xchg_acquire(i16* %a) nounwind uwtable {
entry:
atomicrmw xchg i16* %a, i16 0 acquire
@@ -506,6 +554,14 @@ entry:
; CHECK: atomic16_xor_acquire
; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 2)
+define void @atomic16_nand_acquire(i16* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i16* %a, i16 0 acquire
+ ret void
+}
+; CHECK: atomic16_nand_acquire
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 2)
+
define void @atomic16_xchg_release(i16* %a) nounwind uwtable {
entry:
atomicrmw xchg i16* %a, i16 0 release
@@ -554,6 +610,14 @@ entry:
; CHECK: atomic16_xor_release
; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 3)
+define void @atomic16_nand_release(i16* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i16* %a, i16 0 release
+ ret void
+}
+; CHECK: atomic16_nand_release
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 3)
+
define void @atomic16_xchg_acq_rel(i16* %a) nounwind uwtable {
entry:
atomicrmw xchg i16* %a, i16 0 acq_rel
@@ -602,6 +666,14 @@ entry:
; CHECK: atomic16_xor_acq_rel
; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 4)
+define void @atomic16_nand_acq_rel(i16* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i16* %a, i16 0 acq_rel
+ ret void
+}
+; CHECK: atomic16_nand_acq_rel
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 4)
+
define void @atomic16_xchg_seq_cst(i16* %a) nounwind uwtable {
entry:
atomicrmw xchg i16* %a, i16 0 seq_cst
@@ -650,13 +722,21 @@ entry:
; CHECK: atomic16_xor_seq_cst
; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 5)
+define void @atomic16_nand_seq_cst(i16* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i16* %a, i16 0 seq_cst
+ ret void
+}
+; CHECK: atomic16_nand_seq_cst
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 5)
+
define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable {
entry:
cmpxchg i16* %a, i16 0, i16 1 monotonic
ret void
}
; CHECK: atomic16_cas_monotonic
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0)
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0, i32 0)
define void @atomic16_cas_acquire(i16* %a) nounwind uwtable {
entry:
@@ -664,7 +744,7 @@ entry:
ret void
}
; CHECK: atomic16_cas_acquire
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2)
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2, i32 2)
define void @atomic16_cas_release(i16* %a) nounwind uwtable {
entry:
@@ -672,7 +752,7 @@ entry:
ret void
}
; CHECK: atomic16_cas_release
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3)
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3, i32 0)
define void @atomic16_cas_acq_rel(i16* %a) nounwind uwtable {
entry:
@@ -680,7 +760,7 @@ entry:
ret void
}
; CHECK: atomic16_cas_acq_rel
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4)
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4, i32 2)
define void @atomic16_cas_seq_cst(i16* %a) nounwind uwtable {
entry:
@@ -688,7 +768,7 @@ entry:
ret void
}
; CHECK: atomic16_cas_seq_cst
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5)
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5, i32 5)
define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable {
entry:
@@ -802,6 +882,14 @@ entry:
; CHECK: atomic32_xor_monotonic
; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 0)
+define void @atomic32_nand_monotonic(i32* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i32* %a, i32 0 monotonic
+ ret void
+}
+; CHECK: atomic32_nand_monotonic
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 0)
+
define void @atomic32_xchg_acquire(i32* %a) nounwind uwtable {
entry:
atomicrmw xchg i32* %a, i32 0 acquire
@@ -850,6 +938,14 @@ entry:
; CHECK: atomic32_xor_acquire
; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 2)
+define void @atomic32_nand_acquire(i32* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i32* %a, i32 0 acquire
+ ret void
+}
+; CHECK: atomic32_nand_acquire
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 2)
+
define void @atomic32_xchg_release(i32* %a) nounwind uwtable {
entry:
atomicrmw xchg i32* %a, i32 0 release
@@ -898,6 +994,14 @@ entry:
; CHECK: atomic32_xor_release
; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 3)
+define void @atomic32_nand_release(i32* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i32* %a, i32 0 release
+ ret void
+}
+; CHECK: atomic32_nand_release
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 3)
+
define void @atomic32_xchg_acq_rel(i32* %a) nounwind uwtable {
entry:
atomicrmw xchg i32* %a, i32 0 acq_rel
@@ -946,6 +1050,14 @@ entry:
; CHECK: atomic32_xor_acq_rel
; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 4)
+define void @atomic32_nand_acq_rel(i32* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i32* %a, i32 0 acq_rel
+ ret void
+}
+; CHECK: atomic32_nand_acq_rel
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 4)
+
define void @atomic32_xchg_seq_cst(i32* %a) nounwind uwtable {
entry:
atomicrmw xchg i32* %a, i32 0 seq_cst
@@ -994,13 +1106,21 @@ entry:
; CHECK: atomic32_xor_seq_cst
; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 5)
+define void @atomic32_nand_seq_cst(i32* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i32* %a, i32 0 seq_cst
+ ret void
+}
+; CHECK: atomic32_nand_seq_cst
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 5)
+
define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable {
entry:
cmpxchg i32* %a, i32 0, i32 1 monotonic
ret void
}
; CHECK: atomic32_cas_monotonic
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0)
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0, i32 0)
define void @atomic32_cas_acquire(i32* %a) nounwind uwtable {
entry:
@@ -1008,7 +1128,7 @@ entry:
ret void
}
; CHECK: atomic32_cas_acquire
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2)
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2, i32 2)
define void @atomic32_cas_release(i32* %a) nounwind uwtable {
entry:
@@ -1016,7 +1136,7 @@ entry:
ret void
}
; CHECK: atomic32_cas_release
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3)
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3, i32 0)
define void @atomic32_cas_acq_rel(i32* %a) nounwind uwtable {
entry:
@@ -1024,7 +1144,7 @@ entry:
ret void
}
; CHECK: atomic32_cas_acq_rel
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4)
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4, i32 2)
define void @atomic32_cas_seq_cst(i32* %a) nounwind uwtable {
entry:
@@ -1032,7 +1152,7 @@ entry:
ret void
}
; CHECK: atomic32_cas_seq_cst
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5)
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5, i32 5)
define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable {
entry:
@@ -1146,6 +1266,14 @@ entry:
; CHECK: atomic64_xor_monotonic
; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 0)
+define void @atomic64_nand_monotonic(i64* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i64* %a, i64 0 monotonic
+ ret void
+}
+; CHECK: atomic64_nand_monotonic
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 0)
+
define void @atomic64_xchg_acquire(i64* %a) nounwind uwtable {
entry:
atomicrmw xchg i64* %a, i64 0 acquire
@@ -1194,6 +1322,14 @@ entry:
; CHECK: atomic64_xor_acquire
; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 2)
+define void @atomic64_nand_acquire(i64* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i64* %a, i64 0 acquire
+ ret void
+}
+; CHECK: atomic64_nand_acquire
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 2)
+
define void @atomic64_xchg_release(i64* %a) nounwind uwtable {
entry:
atomicrmw xchg i64* %a, i64 0 release
@@ -1242,6 +1378,14 @@ entry:
; CHECK: atomic64_xor_release
; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 3)
+define void @atomic64_nand_release(i64* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i64* %a, i64 0 release
+ ret void
+}
+; CHECK: atomic64_nand_release
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 3)
+
define void @atomic64_xchg_acq_rel(i64* %a) nounwind uwtable {
entry:
atomicrmw xchg i64* %a, i64 0 acq_rel
@@ -1290,6 +1434,14 @@ entry:
; CHECK: atomic64_xor_acq_rel
; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 4)
+define void @atomic64_nand_acq_rel(i64* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i64* %a, i64 0 acq_rel
+ ret void
+}
+; CHECK: atomic64_nand_acq_rel
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 4)
+
define void @atomic64_xchg_seq_cst(i64* %a) nounwind uwtable {
entry:
atomicrmw xchg i64* %a, i64 0 seq_cst
@@ -1338,13 +1490,21 @@ entry:
; CHECK: atomic64_xor_seq_cst
; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 5)
+define void @atomic64_nand_seq_cst(i64* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i64* %a, i64 0 seq_cst
+ ret void
+}
+; CHECK: atomic64_nand_seq_cst
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 5)
+
define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable {
entry:
cmpxchg i64* %a, i64 0, i64 1 monotonic
ret void
}
; CHECK: atomic64_cas_monotonic
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0)
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0, i32 0)
define void @atomic64_cas_acquire(i64* %a) nounwind uwtable {
entry:
@@ -1352,7 +1512,7 @@ entry:
ret void
}
; CHECK: atomic64_cas_acquire
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2)
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2, i32 2)
define void @atomic64_cas_release(i64* %a) nounwind uwtable {
entry:
@@ -1360,7 +1520,7 @@ entry:
ret void
}
; CHECK: atomic64_cas_release
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3)
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3, i32 0)
define void @atomic64_cas_acq_rel(i64* %a) nounwind uwtable {
entry:
@@ -1368,7 +1528,7 @@ entry:
ret void
}
; CHECK: atomic64_cas_acq_rel
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4)
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4, i32 2)
define void @atomic64_cas_seq_cst(i64* %a) nounwind uwtable {
entry:
@@ -1376,7 +1536,7 @@ entry:
ret void
}
; CHECK: atomic64_cas_seq_cst
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5)
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 5)
define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
entry:
@@ -1490,6 +1650,14 @@ entry:
; CHECK: atomic128_xor_monotonic
; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 0)
+define void @atomic128_nand_monotonic(i128* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i128* %a, i128 0 monotonic
+ ret void
+}
+; CHECK: atomic128_nand_monotonic
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 0)
+
define void @atomic128_xchg_acquire(i128* %a) nounwind uwtable {
entry:
atomicrmw xchg i128* %a, i128 0 acquire
@@ -1538,6 +1706,14 @@ entry:
; CHECK: atomic128_xor_acquire
; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 2)
+define void @atomic128_nand_acquire(i128* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i128* %a, i128 0 acquire
+ ret void
+}
+; CHECK: atomic128_nand_acquire
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 2)
+
define void @atomic128_xchg_release(i128* %a) nounwind uwtable {
entry:
atomicrmw xchg i128* %a, i128 0 release
@@ -1586,6 +1762,14 @@ entry:
; CHECK: atomic128_xor_release
; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 3)
+define void @atomic128_nand_release(i128* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i128* %a, i128 0 release
+ ret void
+}
+; CHECK: atomic128_nand_release
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 3)
+
define void @atomic128_xchg_acq_rel(i128* %a) nounwind uwtable {
entry:
atomicrmw xchg i128* %a, i128 0 acq_rel
@@ -1634,6 +1818,14 @@ entry:
; CHECK: atomic128_xor_acq_rel
; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 4)
+define void @atomic128_nand_acq_rel(i128* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i128* %a, i128 0 acq_rel
+ ret void
+}
+; CHECK: atomic128_nand_acq_rel
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 4)
+
define void @atomic128_xchg_seq_cst(i128* %a) nounwind uwtable {
entry:
atomicrmw xchg i128* %a, i128 0 seq_cst
@@ -1682,13 +1874,21 @@ entry:
; CHECK: atomic128_xor_seq_cst
; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 5)
+define void @atomic128_nand_seq_cst(i128* %a) nounwind uwtable {
+entry:
+ atomicrmw nand i128* %a, i128 0 seq_cst
+ ret void
+}
+; CHECK: atomic128_nand_seq_cst
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 5)
+
define void @atomic128_cas_monotonic(i128* %a) nounwind uwtable {
entry:
cmpxchg i128* %a, i128 0, i128 1 monotonic
ret void
}
; CHECK: atomic128_cas_monotonic
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 0)
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 0, i32 0)
define void @atomic128_cas_acquire(i128* %a) nounwind uwtable {
entry:
@@ -1696,7 +1896,7 @@ entry:
ret void
}
; CHECK: atomic128_cas_acquire
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 2)
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 2, i32 2)
define void @atomic128_cas_release(i128* %a) nounwind uwtable {
entry:
@@ -1704,7 +1904,7 @@ entry:
ret void
}
; CHECK: atomic128_cas_release
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 3)
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 3, i32 0)
define void @atomic128_cas_acq_rel(i128* %a) nounwind uwtable {
entry:
@@ -1712,7 +1912,7 @@ entry:
ret void
}
; CHECK: atomic128_cas_acq_rel
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 4)
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 4, i32 2)
define void @atomic128_cas_seq_cst(i128* %a) nounwind uwtable {
entry:
@@ -1720,7 +1920,7 @@ entry:
ret void
}
; CHECK: atomic128_cas_seq_cst
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 5)
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 5, i32 5)
define void @atomic_signal_fence_acquire() nounwind uwtable {
entry:
diff --git a/test/Instrumentation/ThreadSanitizer/read_from_global.ll b/test/Instrumentation/ThreadSanitizer/read_from_global.ll
index a08453a..7b6b94e 100644
--- a/test/Instrumentation/ThreadSanitizer/read_from_global.ll
+++ b/test/Instrumentation/ThreadSanitizer/read_from_global.ll
@@ -48,7 +48,7 @@ entry:
}
; CHECK: define void @call_virtual_func
-; CHECK: __tsan_read
+; CHECK: __tsan_vptr_read
; CHECK: = load
; CHECK-NOT: __tsan_read
; CHECK: = load
diff --git a/test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll b/test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll
new file mode 100644
index 0000000..a83a274
--- /dev/null
+++ b/test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -basicaa -gvn -tsan -S | FileCheck %s
+; TSAN conflicts with load widening. Make sure the load widening is off with -tsan.
+
+; 32-bit little endian target.
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+
+%struct_of_8_bytes_4_aligned = type { i32, i8, i8, i8, i8}
+
+@f = global %struct_of_8_bytes_4_aligned zeroinitializer, align 4
+
+; Accessing bytes 4 and 6, not ok to widen to i32 if sanitize_thread is set.
+
+define i32 @test_widening_bad(i8* %P) nounwind ssp noredzone sanitize_thread {
+entry:
+ %tmp = load i8* getelementptr inbounds (%struct_of_8_bytes_4_aligned* @f, i64 0, i32 1), align 4
+ %conv = zext i8 %tmp to i32
+ %tmp1 = load i8* getelementptr inbounds (%struct_of_8_bytes_4_aligned* @f, i64 0, i32 3), align 1
+ %conv2 = zext i8 %tmp1 to i32
+ %add = add nsw i32 %conv, %conv2
+ ret i32 %add
+; CHECK: @test_widening_bad
+; CHECK: call void @__tsan_read1
+; CHECK: call void @__tsan_read1
+; CHECK-NOT: call void @__tsan_read4
+; CHECK: ret i32
+}
diff --git a/test/Instrumentation/ThreadSanitizer/tsan_basic.ll b/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
index 33c703b..0ecff40 100644
--- a/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
+++ b/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
@@ -20,3 +20,36 @@ entry:
; CHECK: ret i32
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
+declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+
+
+; Check that tsan converts mem intrinsics back to function calls.
+
+define void @MemCpyTest(i8* nocapture %x, i8* nocapture %y) {
+entry:
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
+ ret void
+; CHECK: define void @MemCpyTest
+; CHECK: call i8* @memcpy
+; CHECK: ret void
+}
+
+define void @MemMoveTest(i8* nocapture %x, i8* nocapture %y) {
+entry:
+ tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
+ ret void
+; CHECK: define void @MemMoveTest
+; CHECK: call i8* @memmove
+; CHECK: ret void
+}
+
+define void @MemSetTest(i8* nocapture %x) {
+entry:
+ tail call void @llvm.memset.p0i8.i64(i8* %x, i8 77, i64 16, i32 4, i1 false)
+ ret void
+; CHECK define void @MemSetTest
+; CHECK: call i8* @memset
+; CHECK: ret void
+}
diff --git a/test/Instrumentation/ThreadSanitizer/vptr_read.ll b/test/Instrumentation/ThreadSanitizer/vptr_read.ll
new file mode 100644
index 0000000..404ca3f
--- /dev/null
+++ b/test/Instrumentation/ThreadSanitizer/vptr_read.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -tsan -S | FileCheck %s
+; Check that vptr reads are treated in a special way.
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+define i8 @Foo(i8* %a) nounwind uwtable {
+entry:
+; CHECK: call void @__tsan_vptr_read
+ %0 = load i8* %a, align 8, !tbaa !0
+ ret i8 %0
+}
+!0 = metadata !{metadata !"vtable pointer", metadata !1}
+!1 = metadata !{metadata !"Simple C/C++ TBAA", null}
+
OpenPOWER on IntegriCloud