summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorandrew <andrew@FreeBSD.org>2012-07-30 10:58:13 +0000
committerandrew <andrew@FreeBSD.org>2012-07-30 10:58:13 +0000
commitcfeab007a554034f0b3ab4a677cf9dd2696c12f9 (patch)
tree40cc44a3d02ed86de24f2117a55680e4f0eb01a0 /lib
parent07af089f1449ec5506ca7ede5b593e11a0f48603 (diff)
downloadFreeBSD-src-cfeab007a554034f0b3ab4a677cf9dd2696c12f9.zip
FreeBSD-src-cfeab007a554034f0b3ab4a677cf9dd2696c12f9.tar.gz
Import compiler-rt r160957.
Diffstat (limited to 'lib')
-rw-r--r--lib/CMakeLists.txt264
-rw-r--r--lib/Makefile.mk10
-rw-r--r--lib/absvti2.c4
-rw-r--r--lib/adddf3.c4
-rw-r--r--lib/addsf3.c4
-rw-r--r--lib/addvti3.c4
-rw-r--r--lib/arm/CMakeLists.txt0
-rw-r--r--lib/arm/aeabi_idivmod.S27
-rw-r--r--lib/arm/aeabi_ldivmod.S30
-rw-r--r--lib/arm/aeabi_memcmp.S19
-rw-r--r--lib/arm/aeabi_memcpy.S19
-rw-r--r--lib/arm/aeabi_memmove.S19
-rw-r--r--lib/arm/aeabi_memset.S32
-rw-r--r--lib/arm/aeabi_uidivmod.S28
-rw-r--r--lib/arm/aeabi_uldivmod.S30
-rw-r--r--lib/asan/CMakeLists.txt82
-rw-r--r--lib/asan/Makefile.mk4
-rw-r--r--lib/asan/Makefile.old117
-rw-r--r--lib/asan/README.txt1
-rw-r--r--lib/asan/asan_allocator.cc566
-rw-r--r--lib/asan/asan_allocator.h74
-rw-r--r--lib/asan/asan_flags.h97
-rw-r--r--lib/asan/asan_globals.cc64
-rw-r--r--lib/asan/asan_interceptors.cc670
-rw-r--r--lib/asan/asan_interceptors.h124
-rw-r--r--lib/asan/asan_interface.h96
-rw-r--r--lib/asan/asan_internal.h210
-rw-r--r--lib/asan/asan_linux.cc144
-rw-r--r--lib/asan/asan_lock.h76
-rw-r--r--lib/asan/asan_mac.cc404
-rw-r--r--lib/asan/asan_mac.h92
-rw-r--r--lib/asan/asan_malloc_linux.cc84
-rw-r--r--lib/asan/asan_malloc_mac.cc202
-rw-r--r--lib/asan/asan_malloc_win.cc141
-rw-r--r--lib/asan/asan_mapping.h62
-rw-r--r--lib/asan/asan_new_delete.cc56
-rw-r--r--lib/asan/asan_poisoning.cc78
-rw-r--r--lib/asan/asan_posix.cc126
-rw-r--r--lib/asan/asan_printf.cc150
-rw-r--r--lib/asan/asan_rtl.cc921
-rw-r--r--lib/asan/asan_stack.cc258
-rw-r--r--lib/asan/asan_stack.h96
-rw-r--r--lib/asan/asan_stats.cc44
-rw-r--r--lib/asan/asan_stats.h42
-rw-r--r--lib/asan/asan_thread.cc198
-rw-r--r--lib/asan/asan_thread.h52
-rw-r--r--lib/asan/asan_thread_registry.cc157
-rw-r--r--lib/asan/asan_thread_registry.h35
-rw-r--r--lib/asan/asan_win.cc181
-rw-r--r--lib/asan/output_tests/clone_test.cc34
-rw-r--r--lib/asan/output_tests/deep_tail_call.cc15
-rw-r--r--lib/asan/output_tests/default_options.cc12
-rw-r--r--lib/asan/output_tests/dlclose-test-so.cc (renamed from lib/asan/tests/dlclose-test-so.cc)2
-rw-r--r--lib/asan/output_tests/dlclose-test.cc (renamed from lib/asan/tests/dlclose-test.cc)3
-rw-r--r--lib/asan/output_tests/global-overflow.cc (renamed from lib/asan/tests/global-overflow.cc)4
-rw-r--r--lib/asan/output_tests/heap-overflow.cc22
-rw-r--r--lib/asan/output_tests/interception_failure_test-linux.cc17
-rw-r--r--lib/asan/output_tests/interception_malloc_test-linux.cc19
-rw-r--r--lib/asan/output_tests/interception_test-linux.cc18
-rw-r--r--lib/asan/output_tests/large_func_test.cc48
-rw-r--r--lib/asan/output_tests/memcmp_test.cc10
-rw-r--r--lib/asan/output_tests/null_deref.cc17
-rw-r--r--lib/asan/output_tests/shared-lib-test-so.cc (renamed from lib/asan/tests/shared-lib-test-so.cc)2
-rw-r--r--lib/asan/output_tests/shared-lib-test.cc (renamed from lib/asan/tests/shared-lib-test.cc)9
-rw-r--r--lib/asan/output_tests/stack-overflow.cc11
-rw-r--r--lib/asan/output_tests/stack-use-after-return.cc.disabled (renamed from lib/asan/tests/stack-use-after-return.cc)3
-rw-r--r--lib/asan/output_tests/strncpy-overflow.cc24
-rwxr-xr-xlib/asan/output_tests/test_output.sh79
-rw-r--r--lib/asan/output_tests/use-after-free.c (renamed from lib/asan/tests/use-after-free.c)3
-rw-r--r--lib/asan/output_tests/use-after-free.cc31
-rwxr-xr-xlib/asan/scripts/asan_symbolize.py88
-rw-r--r--lib/asan/sysinfo/LICENSE.TXT29
-rw-r--r--lib/asan/sysinfo/basictypes.h321
-rw-r--r--lib/asan/sysinfo/sysinfo.cc617
-rw-r--r--lib/asan/sysinfo/sysinfo.h234
-rw-r--r--lib/asan/tests/CMakeLists.txt118
-rw-r--r--lib/asan/tests/asan_benchmarks_test.cc2
-rw-r--r--lib/asan/tests/asan_break_optimization.cc3
-rw-r--r--lib/asan/tests/asan_globals_test.cc2
-rw-r--r--lib/asan/tests/asan_interface_test.cc334
-rw-r--r--lib/asan/tests/asan_mac_test.h5
-rw-r--r--lib/asan/tests/asan_mac_test.mm46
-rw-r--r--lib/asan/tests/asan_noinst_test.cc410
-rw-r--r--lib/asan/tests/asan_racy_double_free_test.cc32
-rw-r--r--lib/asan/tests/asan_test.cc642
-rw-r--r--lib/asan/tests/asan_test_config.h4
-rw-r--r--lib/asan/tests/asan_test_utils.h28
-rw-r--r--lib/asan/tests/dlclose-test.tmpl1
-rw-r--r--lib/asan/tests/global-overflow.tmpl3
-rw-r--r--lib/asan/tests/heap-overflow.cc9
-rw-r--r--lib/asan/tests/heap-overflow.tmpl6
-rw-r--r--lib/asan/tests/heap-overflow.tmpl.Darwin8
-rw-r--r--lib/asan/tests/large_func_test.cc33
-rw-r--r--lib/asan/tests/large_func_test.tmpl8
-rwxr-xr-xlib/asan/tests/match_output.py35
-rw-r--r--lib/asan/tests/null_deref.cc7
-rw-r--r--lib/asan/tests/null_deref.tmpl4
-rw-r--r--lib/asan/tests/shared-lib-test.tmpl7
-rw-r--r--lib/asan/tests/stack-overflow.cc7
-rw-r--r--lib/asan/tests/stack-overflow.tmpl3
-rw-r--r--lib/asan/tests/stack-use-after-return.disabled3
-rw-r--r--lib/asan/tests/strncpy-overflow.cc9
-rw-r--r--lib/asan/tests/strncpy-overflow.tmpl7
-rwxr-xr-xlib/asan/tests/test_output.sh47
-rw-r--r--lib/asan/tests/use-after-free.cc6
-rw-r--r--lib/asan/tests/use-after-free.tmpl10
-rw-r--r--lib/ashldi3.c2
-rw-r--r--lib/ashlti3.c4
-rw-r--r--lib/ashrdi3.c2
-rw-r--r--lib/ashrti3.c4
-rw-r--r--lib/assembly.h3
-rw-r--r--lib/atomic.c315
-rw-r--r--lib/clzti2.c4
-rw-r--r--lib/cmpti2.c4
-rw-r--r--lib/ctzti2.c4
-rw-r--r--lib/divdf3.c2
-rw-r--r--lib/divmoddi4.c2
-rw-r--r--lib/divsf3.c2
-rw-r--r--lib/divsi3.c10
-rw-r--r--lib/divti3.c4
-rw-r--r--lib/extendsfdf2.c2
-rw-r--r--lib/ffsti2.c4
-rw-r--r--lib/fixdfdi.c2
-rw-r--r--lib/fixdfsi.c2
-rw-r--r--lib/fixdfti.c4
-rw-r--r--lib/fixsfdi.c2
-rw-r--r--lib/fixsfsi.c2
-rw-r--r--lib/fixsfti.c4
-rw-r--r--lib/fixunsdfdi.c2
-rw-r--r--lib/fixunsdfsi.c2
-rw-r--r--lib/fixunsdfti.c4
-rw-r--r--lib/fixunssfdi.c2
-rw-r--r--lib/fixunssfsi.c2
-rw-r--r--lib/fixunssfti.c4
-rw-r--r--lib/fixunsxfti.c4
-rw-r--r--lib/fixxfti.c4
-rw-r--r--lib/floatdidf.c2
-rw-r--r--lib/floatdisf.c2
-rw-r--r--lib/floatsidf.c2
-rw-r--r--lib/floatsisf.c2
-rw-r--r--lib/floattidf.c4
-rw-r--r--lib/floattisf.c4
-rw-r--r--lib/floattixf.c4
-rw-r--r--lib/floatundidf.c2
-rw-r--r--lib/floatundisf.c2
-rw-r--r--lib/floatunsidf.c2
-rw-r--r--lib/floatunsisf.c2
-rw-r--r--lib/floatuntidf.c4
-rw-r--r--lib/floatuntisf.c4
-rw-r--r--lib/floatuntixf.c4
-rw-r--r--lib/fp_lib.h2
-rw-r--r--lib/i386/CMakeLists.txt3
-rw-r--r--lib/int_endianness.h9
-rw-r--r--lib/int_util.c13
-rw-r--r--lib/int_util.h7
-rw-r--r--lib/interception/CMakeLists.txt37
-rw-r--r--lib/interception/Makefile.mk23
-rw-r--r--lib/interception/interception.h168
-rw-r--r--lib/interception/interception_linux.cc29
-rw-r--r--lib/interception/interception_linux.h35
-rw-r--r--lib/interception/interception_mac.cc33
-rw-r--r--lib/interception/interception_mac.h47
-rw-r--r--lib/interception/interception_win.cc149
-rw-r--r--lib/interception/interception_win.h42
-rw-r--r--lib/interception/mach_override/LICENSE.TXT (renamed from lib/asan/mach_override/LICENSE.TXT)0
-rw-r--r--lib/interception/mach_override/Makefile.mk (renamed from lib/asan/mach_override/Makefile.mk)8
-rw-r--r--lib/interception/mach_override/README.txt (renamed from lib/asan/mach_override/README.txt)0
-rw-r--r--lib/interception/mach_override/mach_override.c (renamed from lib/asan/mach_override/mach_override.c)118
-rw-r--r--lib/interception/mach_override/mach_override.h (renamed from lib/asan/mach_override/mach_override.h)13
-rw-r--r--lib/lshrdi3.c2
-rw-r--r--lib/lshrti3.c4
-rw-r--r--lib/modti3.c4
-rw-r--r--lib/muldf3.c4
-rw-r--r--lib/muldi3.c2
-rw-r--r--lib/muloti4.c4
-rw-r--r--lib/mulsf3.c4
-rw-r--r--lib/multi3.c4
-rw-r--r--lib/mulvti3.c4
-rw-r--r--lib/negdf2.c2
-rw-r--r--lib/negsf2.c2
-rw-r--r--lib/negti2.c4
-rw-r--r--lib/negvti2.c4
-rw-r--r--lib/parityti2.c4
-rw-r--r--lib/popcountti2.c4
-rw-r--r--lib/powitf2.c4
-rw-r--r--lib/ppc/CMakeLists.txt12
-rw-r--r--lib/profile/GCDAProfiling.c93
-rw-r--r--lib/sanitizer_common/CMakeLists.txt35
-rw-r--r--lib/sanitizer_common/Makefile.mk (renamed from lib/asan/sysinfo/Makefile.mk)10
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc59
-rw-r--r--lib/sanitizer_common/sanitizer_allocator64.h488
-rw-r--r--lib/sanitizer_common/sanitizer_atomic.h65
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_clang.h122
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_msvc.h112
-rw-r--r--lib/sanitizer_common/sanitizer_common.cc100
-rw-r--r--lib/sanitizer_common/sanitizer_common.h123
-rw-r--r--lib/sanitizer_common/sanitizer_flags.cc82
-rw-r--r--lib/sanitizer_common/sanitizer_flags.h27
-rw-r--r--lib/sanitizer_common/sanitizer_interface_defs.h56
-rw-r--r--lib/sanitizer_common/sanitizer_internal_defs.h163
-rw-r--r--lib/sanitizer_common/sanitizer_libc.cc182
-rw-r--r--lib/sanitizer_common/sanitizer_libc.h69
-rw-r--r--lib/sanitizer_common/sanitizer_linux.cc348
-rw-r--r--lib/sanitizer_common/sanitizer_list.h120
-rw-r--r--lib/sanitizer_common/sanitizer_mac.cc243
-rw-r--r--lib/sanitizer_common/sanitizer_mutex.h100
-rw-r--r--lib/sanitizer_common/sanitizer_placement_new.h33
-rw-r--r--lib/sanitizer_common/sanitizer_posix.cc164
-rw-r--r--lib/sanitizer_common/sanitizer_printf.cc185
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps.h82
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer.cc144
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer.h100
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc200
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator64_test.cc257
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc99
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc56
-rw-r--r--lib/sanitizer_common/tests/sanitizer_common_test.cc66
-rw-r--r--lib/sanitizer_common/tests/sanitizer_flags_test.cc72
-rw-r--r--lib/sanitizer_common/tests/sanitizer_list_test.cc157
-rw-r--r--lib/subdf3.c2
-rw-r--r--lib/subsf3.c2
-rw-r--r--lib/subvti3.c4
-rw-r--r--lib/truncdfsf2.c2
-rw-r--r--lib/tsan/CMakeLists.txt8
-rw-r--r--lib/tsan/Makefile.mk18
-rw-r--r--lib/tsan/Makefile.old106
-rwxr-xr-xlib/tsan/analyze_libtsan.sh43
-rw-r--r--lib/tsan/benchmarks/mini_bench_local.cc49
-rw-r--r--lib/tsan/benchmarks/mini_bench_shared.cc51
-rw-r--r--lib/tsan/benchmarks/start_many_threads.cc52
-rw-r--r--lib/tsan/benchmarks/vts_many_threads_bench.cc120
-rwxr-xr-xlib/tsan/check_analyze.sh43
-rwxr-xr-xlib/tsan/go/buildgo.sh78
-rw-r--r--lib/tsan/go/test.c51
-rw-r--r--lib/tsan/go/tsan_go.cc185
-rw-r--r--lib/tsan/output_tests/free_race.c43
-rw-r--r--lib/tsan/output_tests/free_race2.c26
-rw-r--r--lib/tsan/output_tests/heap_race.cc19
-rw-r--r--lib/tsan/output_tests/memcpy_race.cc40
-rw-r--r--lib/tsan/output_tests/mop_with_offset.cc36
-rw-r--r--lib/tsan/output_tests/mop_with_offset2.cc36
-rw-r--r--lib/tsan/output_tests/race_on_barrier.c31
-rw-r--r--lib/tsan/output_tests/race_on_barrier2.c30
-rw-r--r--lib/tsan/output_tests/race_on_mutex.c41
-rw-r--r--lib/tsan/output_tests/race_with_finished_thread.cc43
-rw-r--r--lib/tsan/output_tests/simple_race.c25
-rw-r--r--lib/tsan/output_tests/simple_race.cc24
-rw-r--r--lib/tsan/output_tests/simple_stack.c65
-rw-r--r--lib/tsan/output_tests/simple_stack2.cc46
-rw-r--r--lib/tsan/output_tests/static_init1.cc25
-rw-r--r--lib/tsan/output_tests/static_init2.cc31
-rw-r--r--lib/tsan/output_tests/static_init3.cc46
-rw-r--r--lib/tsan/output_tests/static_init4.cc35
-rw-r--r--lib/tsan/output_tests/static_init5.cc40
-rw-r--r--lib/tsan/output_tests/suppress_same_address.cc27
-rw-r--r--lib/tsan/output_tests/suppress_same_stacks.cc27
-rwxr-xr-xlib/tsan/output_tests/test_output.sh49
-rw-r--r--lib/tsan/output_tests/thread_leak.c15
-rw-r--r--lib/tsan/output_tests/thread_leak2.c15
-rw-r--r--lib/tsan/output_tests/thread_leak3.c14
-rw-r--r--lib/tsan/output_tests/tiny_race.c14
-rw-r--r--lib/tsan/output_tests/virtual_inheritance_compile_bug.cc13
-rw-r--r--lib/tsan/output_tests/vptr_benign_race.cc50
-rw-r--r--lib/tsan/output_tests/vptr_harmful_race.cc48
-rw-r--r--lib/tsan/rtl/Makefile.mk23
-rw-r--r--lib/tsan/rtl/Makefile.old59
-rw-r--r--lib/tsan/rtl/tsan_clock.cc118
-rw-r--r--lib/tsan/rtl/tsan_clock.h82
-rw-r--r--lib/tsan/rtl/tsan_defs.h139
-rw-r--r--lib/tsan/rtl/tsan_flags.cc79
-rw-r--r--lib/tsan/rtl/tsan_flags.h71
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc1596
-rw-r--r--lib/tsan/rtl/tsan_interface.cc42
-rw-r--r--lib/tsan/rtl/tsan_interface.h51
-rw-r--r--lib/tsan/rtl/tsan_interface_ann.cc352
-rw-r--r--lib/tsan/rtl/tsan_interface_ann.h31
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc321
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.h121
-rw-r--r--lib/tsan/rtl/tsan_interface_inl.h65
-rw-r--r--lib/tsan/rtl/tsan_md5.cc245
-rw-r--r--lib/tsan/rtl/tsan_mman.cc123
-rw-r--r--lib/tsan/rtl/tsan_mman.h114
-rw-r--r--lib/tsan/rtl/tsan_mutex.cc259
-rw-r--r--lib/tsan/rtl/tsan_mutex.h78
-rw-r--r--lib/tsan/rtl/tsan_platform.h101
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc238
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc112
-rw-r--r--lib/tsan/rtl/tsan_printf.cc39
-rw-r--r--lib/tsan/rtl/tsan_report.cc167
-rw-r--r--lib/tsan/rtl/tsan_report.h102
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc534
-rw-r--r--lib/tsan/rtl/tsan_rtl.h491
-rw-r--r--lib/tsan/rtl/tsan_rtl_amd64.S71
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc220
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc372
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc394
-rw-r--r--lib/tsan/rtl/tsan_stat.cc249
-rw-r--r--lib/tsan/rtl/tsan_stat.h254
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc163
-rw-r--r--lib/tsan/rtl/tsan_suppressions.h43
-rw-r--r--lib/tsan/rtl/tsan_symbolize.cc78
-rw-r--r--lib/tsan/rtl/tsan_symbolize.h31
-rw-r--r--lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc193
-rw-r--r--lib/tsan/rtl/tsan_sync.cc219
-rw-r--r--lib/tsan/rtl/tsan_sync.h106
-rw-r--r--lib/tsan/rtl/tsan_trace.h71
-rw-r--r--lib/tsan/rtl/tsan_update_shadow_word_inl.h79
-rw-r--r--lib/tsan/rtl/tsan_vector.h110
-rw-r--r--lib/tsan/rtl_tests/tsan_bench.cc105
-rw-r--r--lib/tsan/rtl_tests/tsan_mop.cc233
-rw-r--r--lib/tsan/rtl_tests/tsan_mutex.cc221
-rw-r--r--lib/tsan/rtl_tests/tsan_posix.cc146
-rw-r--r--lib/tsan/rtl_tests/tsan_string.cc82
-rw-r--r--lib/tsan/rtl_tests/tsan_test.cc44
-rw-r--r--lib/tsan/rtl_tests/tsan_test_util.h122
-rw-r--r--lib/tsan/rtl_tests/tsan_test_util_linux.cc465
-rw-r--r--lib/tsan/rtl_tests/tsan_thread.cc59
-rw-r--r--lib/tsan/unit_tests/tsan_clock_test.cc123
-rw-r--r--lib/tsan/unit_tests/tsan_flags_test.cc38
-rw-r--r--lib/tsan/unit_tests/tsan_mman_test.cc109
-rw-r--r--lib/tsan/unit_tests/tsan_mutex_test.cc126
-rw-r--r--lib/tsan/unit_tests/tsan_platform_test.cc88
-rw-r--r--lib/tsan/unit_tests/tsan_printf_test.cc106
-rw-r--r--lib/tsan/unit_tests/tsan_shadow_test.cc47
-rw-r--r--lib/tsan/unit_tests/tsan_suppressions_test.cc128
-rw-r--r--lib/tsan/unit_tests/tsan_sync_test.cc65
-rw-r--r--lib/tsan/unit_tests/tsan_vector_test.cc45
-rw-r--r--lib/ucmpti2.c4
-rw-r--r--lib/udivmoddi4.c2
-rw-r--r--lib/udivmodti4.c4
-rw-r--r--lib/udivsi3.c3
-rw-r--r--lib/udivti3.c4
-rw-r--r--lib/umodti3.c4
-rw-r--r--lib/x86_64/CMakeLists.txt5
334 files changed, 22851 insertions, 4874 deletions
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index e29474a..6701965 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -1,81 +1,197 @@
-#
-# Create a library called "CompilerRT" which includes the source files.
+# Compute the Clang version from the LLVM version.
+# FIXME: We should be able to reuse CLANG_VERSION variable calculated
+# in Clang cmake files, instead of copying the rules here.
+string(REGEX MATCH "[0-9]+\\.[0-9]+(\\.[0-9]+)?" CLANG_VERSION
+ ${PACKAGE_VERSION})
-#INCLUDE_DIRECTORIES(
-# ${CMAKE_CURRENT_BINARY_DIR}
-#)
+# Call add_clang_runtime_static_library(<target_library>) to make
+# sure that static <target_library> is built in the directory
+# where Clang driver expects to find it.
+if (APPLE)
+ set(CLANG_RUNTIME_LIB_DIR
+ ${LLVM_BINARY_DIR}/lib/clang/${CLANG_VERSION}/lib/darwin)
+elseif (UNIX)
+ # Assume Linux.
+ set(CLANG_RUNTIME_LIB_DIR
+ ${LLVM_BINARY_DIR}/lib/clang/${CLANG_VERSION}/lib/linux)
+endif()
+function(add_clang_runtime_static_library target_name)
+ set_target_properties(${target_name} PROPERTIES
+ ARCHIVE_OUTPUT_DIRECTORY ${CLANG_RUNTIME_LIB_DIR})
+endfunction()
-# Generic functions needed for each architecture
-# libcompiler_rt.Generic.a libcompiler_rt.Optimized.a
+# First, add the subdirectories which contain feature-based runtime libraries
+# and several convenience helper libraries.
+add_subdirectory(asan)
+add_subdirectory(interception)
+add_subdirectory(sanitizer_common)
-# Generic
-SET( Generic_SRCS
- absvdi2.c absvsi2.c addvdi3.c addvsi3.c ashldi3.c ashrdi3.c
- clzdi2.c clzsi2.c cmpdi2.c ctzdi2.c ctzsi2.c
- divdc3.c divdi3.c divsc3.c ffsdi2.c
- fixdfdi.c fixsfdi.c fixunsdfdi.c fixunsdfsi.c fixunssfdi.c
- fixunssfsi.c floatdidf.c floatdisf.c floatundidf.c floatundisf.c
- gcc_personality_v0.c lshrdi3.c moddi3.c muldc3.c muldi3.c
- mulsc3.c mulvdi3.c mulvsi3.c negdi2.c negvdi2.c negvsi2.c
- paritydi2.c paritysi2.c popcountdi2.c popcountsi2.c powidf2.c
- powisf2.c subvdi3.c subvsi3.c ucmpdi2.c udivdi3.c
- udivmoddi4.c umoddi3.c apple_versioning.c eprintf.c
- )
+# FIXME: Add support for the profile library.
-# Optimized functions for each architecture
-# Commenting out for the min until the basics are working first.
-# ADD_SUBDIRECTORY( ppc )
-# ADD_SUBDIRECTORY( x86_64 )
-# ADD_SUBDIRECTORY( i386 )
-# ADD_SUBDIRECTORY( arm )
+# The top-level lib directory contains a large amount of C code which provides
+# generic implementations of the core runtime library along with optimized
+# architecture-specific code in various subdirectories.
-# List of functions needed for each architecture.
-SET( i386_Functions
- divxc3.c fixunsxfdi.c fixunsxfsi.c fixxfdi.c floatdixf.c
- floatundixf.c mulxc3.c powixf2.c clear_cache.c enable_execute_stack.c
- )
-
-SET( x86_64_Functions
- absvti2.c addvti3.c ashlti3.c ashrti3.c clzti2.c cmpti2.c
- ctzti2.c divti3.c divxc3.c ffsti2.c fixdfti.c fixsfti.c
- fixunsdfti.c fixunssfti.c fixunsxfdi.c fixunsxfsi.c
- fixunsxfti.c fixxfdi.c fixxfti.c floatdixf.c floattidf.c
- floattisf.c floattixf.c floatundixf.c floatuntidf.c
- floatuntisf.c floatuntixf.c lshrti3.c modti3.c multi3.c
- mulvti3.c mulxc3.c negti2.c negvti2.c parityti2.c
- popcountti2.c powixf2.c subvti3.c ucmpti2.c udivmodti4.c
- udivti3.c umodti3.c clear_cache.c enable_execute_stack.c
- )
-
-SET( PPC_Functions
- divtc3.c fixtfdi.c fixunstfdi.c floatditf.c floatunditf.c
- gcc_qadd.c gcc_qdiv.c gcc_qmul.c gcc_qsub.c multc3.c
- powitf2.c restFP.c saveFP.c trampoline_setup.c
- clear_cache.c enable_execute_stack.c
- )
-
-SET( ARM_Functions
- adddf3vfp.c addsf3vfp.c bswapdi2.c bswapsi2.c divdf3vfp.c
- divsf3vfp.c eqdf2vfp.c eqsf2vfp.c extendsfdf2vfp.c
- fixdfsivfp.c fixsfsivfp.c fixunsdfsivfp.c fixunssfsivfp.c
- floatsidfvfp.c floatsisfvfp.c floatunssidfvfp.c floatunssisfvfp.c
- gedf2vfp.c gesf2vfp.c gtdf2vfp.c gtsf2vfp.c
- ledf2vfp.c lesf2vfp.c ltdf2vfp.c ltsf2vfp.c
- muldf3vfp.c mulsf3vfp.c
- nedf2vfp.c negdf2vfp.c negsf2vfp.c nesf2vfp.c
- subdf3vfp.c subsf3vfp.c truncdfsf2vfp.c unorddf2vfp.c unordsf2vfp.c
- modsi3.c umodsi3.c udivsi3.c divsi3.c switch.c
- )
-
-#FOREACH( LOOP_VAR ${Achitectures} )
-# See ARCHIVE_OUTPUT_DIRECTORY docs.
-#${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/${LOOP_VAR}
-#ENDFOREACH
-
-ADD_LIBRARY( ${PROJECT_NAME}-Generic STATIC ${Generic_SRCS} )
-#ADD_LIBRARY( ${PROJECT_NAME}-i386 STATIC ${i386_Functions} )
-
-# [[debug|optimized|general]
-#TARGET_LINK_LIBRARIES( ${PROJECT_NAME} ${PROJECT_NAME}-Common optimized ${PROJECT_NAME}-i386 )
+set(GENERIC_SOURCES
+ absvdi2.c
+ absvsi2.c
+ absvti2.c
+ adddf3.c
+ addsf3.c
+ addvdi3.c
+ addvsi3.c
+ addvti3.c
+ apple_versioning.c
+ ashldi3.c
+ ashlti3.c
+ ashrdi3.c
+ ashrti3.c
+ clear_cache.c
+ clzdi2.c
+ clzsi2.c
+ clzti2.c
+ cmpdi2.c
+ cmpti2.c
+ comparedf2.c
+ comparesf2.c
+ ctzdi2.c
+ ctzsi2.c
+ ctzti2.c
+ divdc3.c
+ divdf3.c
+ divdi3.c
+ divmoddi4.c
+ divmodsi4.c
+ divsc3.c
+ divsf3.c
+ divsi3.c
+ divti3.c
+ divxc3.c
+ enable_execute_stack.c
+ eprintf.c
+ extendsfdf2.c
+ ffsdi2.c
+ ffsti2.c
+ fixdfdi.c
+ fixdfsi.c
+ fixdfti.c
+ fixsfdi.c
+ fixsfsi.c
+ fixsfti.c
+ fixunsdfdi.c
+ fixunsdfsi.c
+ fixunsdfti.c
+ fixunssfdi.c
+ fixunssfsi.c
+ fixunssfti.c
+ fixunsxfdi.c
+ fixunsxfsi.c
+ fixunsxfti.c
+ fixxfdi.c
+ fixxfti.c
+ floatdidf.c
+ floatdisf.c
+ floatdixf.c
+ floatsidf.c
+ floatsisf.c
+ floattidf.c
+ floattisf.c
+ floattixf.c
+ floatundidf.c
+ floatundisf.c
+ floatundixf.c
+ floatunsidf.c
+ floatunsisf.c
+ floatuntidf.c
+ floatuntisf.c
+ floatuntixf.c
+ gcc_personality_v0.c
+ int_util.c
+ lshrdi3.c
+ lshrti3.c
+ moddi3.c
+ modsi3.c
+ modti3.c
+ muldc3.c
+ muldf3.c
+ muldi3.c
+ mulodi4.c
+ mulosi4.c
+ muloti4.c
+ mulsc3.c
+ mulsf3.c
+ multi3.c
+ mulvdi3.c
+ mulvsi3.c
+ mulvti3.c
+ mulxc3.c
+ negdf2.c
+ negdi2.c
+ negsf2.c
+ negti2.c
+ negvdi2.c
+ negvsi2.c
+ negvti2.c
+ paritydi2.c
+ paritysi2.c
+ parityti2.c
+ popcountdi2.c
+ popcountsi2.c
+ popcountti2.c
+ powidf2.c
+ powisf2.c
+ powitf2.c
+ powixf2.c
+ subdf3.c
+ subsf3.c
+ subvdi3.c
+ subvsi3.c
+ subvti3.c
+ trampoline_setup.c
+ truncdfsf2.c
+ ucmpdi2.c
+ ucmpti2.c
+ udivdi3.c
+ udivmoddi4.c
+ udivmodsi4.c
+ udivmodti4.c
+ udivsi3.c
+ udivti3.c
+ umoddi3.c
+ umodsi3.c
+ umodti3.c
+ )
+if(CAN_TARGET_X86_64)
+ add_library(clang_rt.x86_64 STATIC
+ x86_64/floatdidf.c
+ x86_64/floatdisf.c
+ x86_64/floatdixf.c
+ x86_64/floatundidf.S
+ x86_64/floatundisf.S
+ x86_64/floatundixf.S
+ ${GENERIC_SOURCES}
+ )
+ set_target_properties(clang_rt.x86_64 PROPERTIES COMPILE_FLAGS "-std=c99 ${TARGET_X86_64_CFLAGS}")
+endif()
+if(CAN_TARGET_I386)
+ add_library(clang_rt.i386 STATIC
+ i386/ashldi3.S
+ i386/ashrdi3.S
+ i386/divdi3.S
+ i386/floatdidf.S
+ i386/floatdisf.S
+ i386/floatdixf.S
+ i386/floatundidf.S
+ i386/floatundisf.S
+ i386/floatundixf.S
+ i386/lshrdi3.S
+ i386/moddi3.S
+ i386/muldi3.S
+ i386/udivdi3.S
+ i386/umoddi3.S
+ ${GENERIC_SOURCES}
+ )
+ set_target_properties(clang_rt.i386 PROPERTIES COMPILE_FLAGS "-std=c99 ${TARGET_I386_CFLAGS}")
+endif()
diff --git a/lib/Makefile.mk b/lib/Makefile.mk
index 8394af3..791921a 100644
--- a/lib/Makefile.mk
+++ b/lib/Makefile.mk
@@ -15,10 +15,18 @@ SubDirs += i386 ppc x86_64 arm
# Add other submodules.
SubDirs += asan
+SubDirs += interception
SubDirs += profile
+SubDirs += sanitizer_common
+SubDirs += tsan
+
+# FIXME: We don't currently support building an atomic library, and as it must
+# be a separate library from the runtime library, we need to remove its source
+# code from the source files list.
+ExcludedSources := atomic.c
# Define the variables for this specific directory.
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
+Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(filter-out $(ExcludedSources),$(notdir $(file))))
ObjNames := $(Sources:%.c=%.o)
Implementation := Generic
diff --git a/lib/absvti2.c b/lib/absvti2.c
index 8f2bddc..c1c7277 100644
--- a/lib/absvti2.c
+++ b/lib/absvti2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: absolute value */
/* Effects: aborts if abs(x) < 0 */
diff --git a/lib/adddf3.c b/lib/adddf3.c
index 7eb40a1..a55e82d 100644
--- a/lib/adddf3.c
+++ b/lib/adddf3.c
@@ -15,7 +15,7 @@
#define DOUBLE_PRECISION
#include "fp_lib.h"
-ARM_EABI_FNALIAS(dadd, adddf3);
+ARM_EABI_FNALIAS(dadd, adddf3)
COMPILER_RT_ABI fp_t
__adddf3(fp_t a, fp_t b) {
@@ -85,7 +85,7 @@ __adddf3(fp_t a, fp_t b) {
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
- const int align = aExponent - bExponent;
+ const unsigned int align = aExponent - bExponent;
if (align) {
if (align < typeWidth) {
const bool sticky = bSignificand << (typeWidth - align);
diff --git a/lib/addsf3.c b/lib/addsf3.c
index e57270a..0268324 100644
--- a/lib/addsf3.c
+++ b/lib/addsf3.c
@@ -15,7 +15,7 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
-ARM_EABI_FNALIAS(fadd, addsf3);
+ARM_EABI_FNALIAS(fadd, addsf3)
fp_t __addsf3(fp_t a, fp_t b) {
@@ -84,7 +84,7 @@ fp_t __addsf3(fp_t a, fp_t b) {
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
- const int align = aExponent - bExponent;
+ const unsigned int align = aExponent - bExponent;
if (align) {
if (align < typeWidth) {
const bool sticky = bSignificand << (typeWidth - align);
diff --git a/lib/addvti3.c b/lib/addvti3.c
index 9105c17..2efcf3b 100644
--- a/lib/addvti3.c
+++ b/lib/addvti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: a + b */
/* Effects: aborts if a + b overflows */
diff --git a/lib/arm/CMakeLists.txt b/lib/arm/CMakeLists.txt
deleted file mode 100644
index e69de29..0000000
--- a/lib/arm/CMakeLists.txt
+++ /dev/null
diff --git a/lib/arm/aeabi_idivmod.S b/lib/arm/aeabi_idivmod.S
new file mode 100644
index 0000000..0237f22
--- /dev/null
+++ b/lib/arm/aeabi_idivmod.S
@@ -0,0 +1,27 @@
+//===-- aeabi_idivmod.S - EABI idivmod implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { int quot, int rem} __aeabi_idivmod(int numerator, int denominator) {
+// int rem, quot;
+// quot = __divmodsi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+ .syntax unified
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
+ push { lr }
+ sub sp, sp, #4
+ mov r2, sp
+ bl SYMBOL_NAME(__divmodsi4)
+ ldr r1, [sp]
+ add sp, sp, #4
+ pop { pc }
diff --git a/lib/arm/aeabi_ldivmod.S b/lib/arm/aeabi_ldivmod.S
new file mode 100644
index 0000000..197c459
--- /dev/null
+++ b/lib/arm/aeabi_ldivmod.S
@@ -0,0 +1,30 @@
+//===-- aeabi_ldivmod.S - EABI ldivmod implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { int64_t quot, int64_t rem}
+// __aeabi_ldivmod(int64_t numerator, int64_t denominator) {
+// int64_t rem, quot;
+// quot = __divmoddi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+ .syntax unified
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
+ push {r11, lr}
+ sub sp, sp, #16
+ add r12, sp, #8
+ str r12, [sp]
+ bl SYMBOL_NAME(__divmoddi4)
+ ldr r2, [sp, #8]
+ ldr r3, [sp, #12]
+ add sp, sp, #16
+ pop {r11, pc}
diff --git a/lib/arm/aeabi_memcmp.S b/lib/arm/aeabi_memcmp.S
new file mode 100644
index 0000000..ca29c10
--- /dev/null
+++ b/lib/arm/aeabi_memcmp.S
@@ -0,0 +1,19 @@
+//===-- aeabi_memcmp.S - EABI memcmp implementation -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// void __aeabi_memcmp(void *dest, void *src, size_t n) { memcmp(dest, src, n); }
+
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memcmp)
+ b memcmp
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp4, __aeabi_memcmp)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp8, __aeabi_memcmp)
diff --git a/lib/arm/aeabi_memcpy.S b/lib/arm/aeabi_memcpy.S
new file mode 100644
index 0000000..8b9c7fd
--- /dev/null
+++ b/lib/arm/aeabi_memcpy.S
@@ -0,0 +1,19 @@
+//===-- aeabi_memcpy.S - EABI memcpy implementation -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// void __aeabi_memcpy(void *dest, void *src, size_t n) { memcpy(dest, src, n); }
+
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memcpy)
+ b memcpy
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy4, __aeabi_memcpy)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy8, __aeabi_memcpy)
diff --git a/lib/arm/aeabi_memmove.S b/lib/arm/aeabi_memmove.S
new file mode 100644
index 0000000..c94ed2b
--- /dev/null
+++ b/lib/arm/aeabi_memmove.S
@@ -0,0 +1,19 @@
+//===-- aeabi_memmove.S - EABI memmove implementation --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// void __aeabi_memmove(void *dest, void *src, size_t n) { memmove(dest, src, n); }
+
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memmove)
+ b memmove
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove4, __aeabi_memmove)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove8, __aeabi_memmove)
diff --git a/lib/arm/aeabi_memset.S b/lib/arm/aeabi_memset.S
new file mode 100644
index 0000000..30ab4ba
--- /dev/null
+++ b/lib/arm/aeabi_memset.S
@@ -0,0 +1,32 @@
+//===-- aeabi_memset.S - EABI memset implementation -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// void __aeabi_memset(void *dest, size_t n, int c) { memset(dest, c, n); }
+// void __aeabi_memclr(void *dest, size_t n) { __aeabi_memset(dest, n, 0); }
+
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memset)
+ mov r3, r1
+ mov r1, r2
+ mov r2, r3
+ b memset
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset4, __aeabi_memset)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset8, __aeabi_memset)
+
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memclr)
+ mov r2, r1
+ mov r1, #0
+ b memset
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr4, __aeabi_memclr)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr8, __aeabi_memclr)
+
diff --git a/lib/arm/aeabi_uidivmod.S b/lib/arm/aeabi_uidivmod.S
new file mode 100644
index 0000000..f7e1d2e
--- /dev/null
+++ b/lib/arm/aeabi_uidivmod.S
@@ -0,0 +1,28 @@
+//===-- aeabi_uidivmod.S - EABI uidivmod implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { unsigned quot, unsigned rem}
+// __aeabi_uidivmod(unsigned numerator, unsigned denominator) {
+// unsigned rem, quot;
+// quot = __udivmodsi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+ .syntax unified
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
+ push { lr }
+ sub sp, sp, #4
+ mov r2, sp
+ bl SYMBOL_NAME(__udivmodsi4)
+ ldr r1, [sp]
+ add sp, sp, #4
+ pop { pc }
diff --git a/lib/arm/aeabi_uldivmod.S b/lib/arm/aeabi_uldivmod.S
new file mode 100644
index 0000000..724049d
--- /dev/null
+++ b/lib/arm/aeabi_uldivmod.S
@@ -0,0 +1,30 @@
+//===-- aeabi_uldivmod.S - EABI uldivmod implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { uint64_t quot, uint64_t rem}
+// __aeabi_uldivmod(uint64_t numerator, uint64_t denominator) {
+// uint64_t rem, quot;
+// quot = __udivmoddi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+ .syntax unified
+ .align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
+ push {r11, lr}
+ sub sp, sp, #16
+ add r12, sp, #8
+ str r12, [sp]
+ bl SYMBOL_NAME(__udivmoddi4)
+ ldr r2, [sp, #8]
+ ldr r3, [sp, #12]
+ add sp, sp, #16
+ pop {r11, pc} \ No newline at end of file
diff --git a/lib/asan/CMakeLists.txt b/lib/asan/CMakeLists.txt
new file mode 100644
index 0000000..ce985f5
--- /dev/null
+++ b/lib/asan/CMakeLists.txt
@@ -0,0 +1,82 @@
+# Build for the AddressSanitizer runtime support library.
+
+set(ASAN_SOURCES
+ asan_allocator.cc
+ asan_globals.cc
+ asan_interceptors.cc
+ asan_linux.cc
+ asan_mac.cc
+ asan_malloc_linux.cc
+ asan_malloc_mac.cc
+ asan_malloc_win.cc
+ asan_new_delete.cc
+ asan_poisoning.cc
+ asan_posix.cc
+ asan_printf.cc
+ asan_rtl.cc
+ asan_stack.cc
+ asan_stats.cc
+ asan_thread.cc
+ asan_thread_registry.cc
+ asan_win.cc
+ )
+
+include_directories(..)
+
+set(ASAN_CFLAGS
+ -fPIC
+ -fno-exceptions
+ -funwind-tables
+ -fvisibility=hidden
+ -fno-builtin
+ -fomit-frame-pointer
+ -O3
+ )
+if (SUPPORTS_NO_VARIADIC_MACROS_FLAG)
+ list(APPEND ASAN_CFLAGS -Wno-variadic-macros)
+endif ()
+
+if (APPLE)
+ list(APPEND ASAN_CFLAGS -mmacosx-version-min=10.5)
+endif()
+
+set(ASAN_COMMON_DEFINITIONS
+ ASAN_HAS_EXCEPTIONS=1
+ ASAN_NEEDS_SEGV=1
+ )
+
+# FIXME: We need to build universal binaries on OS X instead of
+# two arch-specific binaries.
+
+if(CAN_TARGET_X86_64)
+ add_library(clang_rt.asan-x86_64 STATIC
+ ${ASAN_SOURCES}
+ $<TARGET_OBJECTS:RTInterception.x86_64>
+ $<TARGET_OBJECTS:RTSanitizerCommon.x86_64>
+ )
+ set_target_compile_flags(clang_rt.asan-x86_64
+ ${ASAN_CFLAGS}
+ ${TARGET_X86_64_CFLAGS}
+ )
+ set_property(TARGET clang_rt.asan-x86_64 APPEND PROPERTY COMPILE_DEFINITIONS
+ ${ASAN_COMMON_DEFINITIONS})
+ add_clang_runtime_static_library(clang_rt.asan-x86_64)
+endif()
+if(CAN_TARGET_I386)
+ add_library(clang_rt.asan-i386 STATIC
+ ${ASAN_SOURCES}
+ $<TARGET_OBJECTS:RTInterception.i386>
+ $<TARGET_OBJECTS:RTSanitizerCommon.i386>
+ )
+ set_target_compile_flags(clang_rt.asan-i386
+ ${ASAN_CFLAGS}
+ ${TARGET_I386_CFLAGS}
+ )
+ set_property(TARGET clang_rt.asan-i386 APPEND PROPERTY COMPILE_DEFINITIONS
+ ${ASAN_COMMON_DEFINITIONS})
+ add_clang_runtime_static_library(clang_rt.asan-i386)
+endif()
+
+if(LLVM_INCLUDE_TESTS)
+ add_subdirectory(tests)
+endif()
diff --git a/lib/asan/Makefile.mk b/lib/asan/Makefile.mk
index 4d9e58d..9d1a2e8 100644
--- a/lib/asan/Makefile.mk
+++ b/lib/asan/Makefile.mk
@@ -8,7 +8,7 @@
#===------------------------------------------------------------------------===#
ModuleName := asan
-SubDirs := mach_override sysinfo
+SubDirs :=
Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
ObjNames := $(Sources:%.cc=%.o)
@@ -17,6 +17,8 @@ Implementation := Generic
# FIXME: use automatic dependencies?
Dependencies := $(wildcard $(Dir)/*.h)
+Dependencies += $(wildcard $(Dir)/interception/*.h)
+Dependencies += $(wildcard $(Dir)/interception/mach_override/*.h)
# Define a convenience variable for all the asan functions.
AsanFunctions := $(Sources:%.cc=%)
diff --git a/lib/asan/Makefile.old b/lib/asan/Makefile.old
index a96ff42..4ab80e2 100644
--- a/lib/asan/Makefile.old
+++ b/lib/asan/Makefile.old
@@ -58,9 +58,11 @@ ifeq ($(ARCH), arm)
endif
CLANG_FLAGS=
+CLANG_VERSION=3.2
CLANG_BUILD=$(ROOT)/../../../../build/Release+Asserts
CLANG_CC=$(CLANG_BUILD)/bin/clang $(CLANG_FLAGS)
CLANG_CXX=$(CLANG_BUILD)/bin/clang++ $(CLANG_FLAGS)
+FILE_CHECK=$(CLANG_BUILD)/bin/FileCheck
CC=$(CLANG_CC)
CXX=$(CLANG_CXX)
@@ -77,7 +79,6 @@ ARCH=x86_64
ASAN_STACK=1
ASAN_GLOBALS=1
-ASAN_USE_CALL=1
ASAN_SCALE=0 # default will be used
ASAN_OFFSET=-1 #default will be used
ASAN_UAR=0
@@ -120,8 +121,8 @@ endif
# This will build libasan on linux for both x86_64 and i386 in the
# desired location. The Mac library is already build by the clang's make.
-# $(CLANG_BUILD)/lib/clang/3.1/lib/$(OS)/libclang_rt.asan-$(ARCH).a
-LIBASAN_INST_DIR=$(CLANG_BUILD)/lib/clang/3.1/lib/$(OS)
+# $(CLANG_BUILD)/lib/clang/$(CLANG_VERSION)/lib/$(OS)/libclang_rt.asan-$(ARCH).a
+LIBASAN_INST_DIR=$(CLANG_BUILD)/lib/clang/$(CLANG_VERSION)/lib/$(OS)
LIBASAN_A=$(LIBASAN_INST_DIR)/libclang_rt.asan-$(ARCH).a
BLACKLIST=
@@ -140,7 +141,6 @@ CLANG_ASAN_CXX=$(CLANG_CXX) \
$(BLACKLIST) \
-mllvm -asan-stack=$(ASAN_STACK) \
-mllvm -asan-globals=$(ASAN_GLOBALS) \
- -mllvm -asan-use-call=$(ASAN_USE_CALL) \
-mllvm -asan-mapping-scale=$(ASAN_SCALE) \
-mllvm -asan-mapping-offset-log=$(ASAN_OFFSET) \
-mllvm -asan-use-after-return=$(ASAN_UAR) \
@@ -169,37 +169,25 @@ ifeq ($(ASAN_COMPILER), gcc)
ASAN_LD_TAIL=$(LIBASAN_A)
endif
-RTL_HDR=asan_allocator.h \
- asan_internal.h \
- asan_interceptors.h \
- asan_interface.h \
- asan_lock.h \
- asan_mac.h \
- asan_mapping.h \
- asan_stack.h \
- asan_stats.h \
- asan_thread.h \
- asan_thread_registry.h \
- mach_override/mach_override.h \
- sysinfo/basictypes.h \
- sysinfo/sysinfo.h
-
-LIBASAN_OBJ=$(BIN)/asan_rtl$(SUFF).o \
- $(BIN)/asan_allocator$(SUFF).o \
- $(BIN)/asan_globals$(SUFF).o \
- $(BIN)/asan_interceptors$(SUFF).o \
- $(BIN)/asan_linux$(SUFF).o \
- $(BIN)/asan_mac$(SUFF).o \
- $(BIN)/asan_malloc_linux$(SUFF).o \
- $(BIN)/asan_malloc_mac$(SUFF).o \
- $(BIN)/asan_poisoning$(SUFF).o \
- $(BIN)/asan_printf$(SUFF).o \
- $(BIN)/asan_stack$(SUFF).o \
- $(BIN)/asan_stats$(SUFF).o \
- $(BIN)/asan_thread$(SUFF).o \
- $(BIN)/asan_thread_registry$(SUFF).o \
- $(BIN)/mach_override/mach_override$(SUFF).o \
- $(BIN)/sysinfo/sysinfo$(SUFF).o
+INTERCEPTION=../interception
+MACH_OVERRIDE=$(INTERCEPTION)/mach_override
+COMMON=../sanitizer_common
+
+RTL_HDR=$(wildcard *.h) \
+ $(wildcard $(INTERCEPTION)/*.h) \
+ $(wildcard $(MACH_OVERRIDE)/*.h) \
+ $(wildcard $(COMMON)/*.h)
+
+LIBTSAN_SRC=$(wildcard *.cc)
+INTERCEPTION_SRC=$(wildcard $(INTERCEPTION)/*.cc)
+MACH_OVERRIDE_SRC=$(wildcard $(MACH_OVERRIDE)/*.c)
+COMMON_SRC=$(wildcard $(COMMON)/*.cc)
+
+
+LIBASAN_OBJ=$(patsubst %.cc,$(BIN)/%$(SUFF).o,$(LIBTSAN_SRC)) \
+ $(patsubst $(INTERCEPTION)/%.cc,$(BIN)/%$(SUFF).o,$(INTERCEPTION_SRC)) \
+ $(patsubst $(COMMON)/%.cc,$(BIN)/%$(SUFF).o,$(COMMON_SRC)) \
+ $(patsubst $(MACH_OVERRIDE)/%.c,$(BIN)/%$(SUFF).o,$(MACH_OVERRIDE_SRC))
GTEST_ROOT=third_party/googletest
GTEST_INCLUDE=-I$(GTEST_ROOT)/include
@@ -209,29 +197,28 @@ GTEST_LIB=$(GTEST_MAKE_DIR)/gtest-all.o
all: b64 b32
test: t64 t32 output_tests lint
+ @echo "ALL TESTS PASSED"
output_tests: b32 b64
- cd tests && ./test_output.sh $(CLANG_CXX) $(CLANG_CC)
+ cd output_tests && ./test_output.sh $(CLANG_CXX) $(CLANG_CC) $(FILE_CHECK)
t64: b64
$(BIN)/asan_test64
t32: b32
$(BIN)/asan_test32
-b64: | $(BIN)
+b64: | mk_bin_dir
$(MAKE) -f $(MAKEFILE) ARCH=x86_64 asan_test asan_benchmarks
-b32: | $(BIN)
+b32: | mk_bin_dir
$(MAKE) -f $(MAKEFILE) ARCH=i386 asan_test asan_benchmarks
lib64:
- $(MAKE) $(MAKEFILE) ARCH=x86_64 lib
+ $(MAKE) -f $(MAKEFILE) ARCH=x86_64 lib
lib32:
- $(MAKE) $(MAKEFILE) ARCH=i386 lib
+ $(MAKE) -f $(MAKEFILE) ARCH=i386 lib
-$(BIN):
+mk_bin_dir:
mkdir -p $(BIN)
- mkdir -p $(BIN)/sysinfo
- mkdir -p $(BIN)/mach_override
clang:
cd ../ && llvm/rebuild_clang_and_asan.sh > /dev/null
@@ -250,29 +237,37 @@ install_clang: | $(INSTALL_DIR)
# cp -v $(CLANG_BUILD)/lib/libasan*.a $(INSTALL_DIR)/lib
$(BIN)/asan_noinst_test$(SUFF).o: tests/asan_noinst_test.cc $(RTL_HDR) $(MAKEFILE)
- $(CLEANROOM_CXX) $(PIE) $(CFLAGS) $(GTEST_INCLUDE) -I. -g -c $< -O2 -o $@
+ $(CLEANROOM_CXX) $(PIE) $(CFLAGS) $(GTEST_INCLUDE) -I. -I.. -g -c $< -O2 -o $@
$(BIN)/asan_break_optimization$(SUFF).o: tests/asan_break_optimization.cc $(MAKEFILE)
$(CLEANROOM_CXX) $(PIE) $(CFLAGS) -c $< -O0 -o $@
$(BIN)/%_test$(SUFF).o: tests/%_test.cc $(RTL_HDR) $(MAKEFILE)
- $(ASAN_CXX) $(GTEST_INCLUDE) -I. -g -c $< -O2 -o $@ $(PIE) $(CFLAGS)
+ $(ASAN_CXX) $(GTEST_INCLUDE) -I. -I.. -g -c $< -O2 -o $@ $(PIE) $(CFLAGS)
$(BIN)/%_test$(SUFF).o: tests/%_test.mm $(RTL_HDR) $(MAKEFILE)
- $(ASAN_CXX) $(GTEST_INCLUDE) -I. -g -c $< -O2 -o $@ -ObjC $(PIE) $(CFLAGS)
+ $(ASAN_CXX) $(GTEST_INCLUDE) -I. -I.. -g -c $< -O2 -o $@ -ObjC $(PIE) $(CFLAGS)
+
+RTL_COMMON_FLAGS=$(PIE) $(CFLAGS) -fPIC -c -O2 -fno-exceptions -funwind-tables \
+ -Ithird_party -I.. $(ASAN_FLAGS)
+
+$(BIN)/%$(SUFF).o: $(INTERCEPTION)/%.cc $(RTL_HDR) $(MAKEFILE)
+ $(CXX) $(RTL_COMMON_FLAGS) -o $@ -g $<
+
+$(BIN)/%$(SUFF).o: $(COMMON)/%.cc $(RTL_HDR) $(MAKEFILE)
+ $(CXX) $(RTL_COMMON_FLAGS) -o $@ -g $<
+
+$(BIN)/%$(SUFF).o: $(MACH_OVERRIDE)/%.c $(RTL_HDR) $(MAKEFILE)
+ $(CC) $(RTL_COMMON_FLAGS) -o $@ -g $<
$(BIN)/%$(SUFF).o: %.cc $(RTL_HDR) $(MAKEFILE)
- $(CXX) $(PIE) $(CFLAGS) -fPIC -c -O2 -fno-exceptions -funwind-tables \
- -o $@ -g $< -Ithird_party \
- -DASAN_USE_SYSINFO=1 \
+ $(CXX) $(RTL_COMMON_FLAGS) -o $@ -g $< \
-DASAN_NEEDS_SEGV=$(ASAN_NEEDS_SEGV) \
-DASAN_HAS_EXCEPTIONS=$(ASAN_HAS_EXCEPTIONS) \
- -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=$(ASAN_FLEXIBLE_MAPPING_AND_OFFSET) \
- $(ASAN_FLAGS)
+ -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=$(ASAN_FLEXIBLE_MAPPING_AND_OFFSET)
$(BIN)/%$(SUFF).o: %.c $(RTL_HDR) $(MAKEFILE)
$(CC) $(PIE) $(CFLAGS) -fPIC -c -O2 -o $@ -g $< -Ithird_party \
- -DASAN_USE_SYSINFO=1 \
$(ASAN_FLAGS)
ifeq ($(OS),darwin)
@@ -283,17 +278,16 @@ endif
lib: $(LIBASAN_A)
-$(LIBASAN_A): $(BIN) $(LIBASAN_OBJ) $(MAKEFILE)
+$(LIBASAN_A): mk_bin_dir $(LIBASAN_OBJ) $(MAKEFILE)
mkdir -p $(LIBASAN_INST_DIR)
ar ru $@ $(LIBASAN_OBJ)
$(CXX) -shared $(CFLAGS) $(LIBASAN_OBJ) $(LD_FLAGS) -o $(BIN)/libasan$(SUFF).so
TEST_OBJECTS_COMMON=\
- $(BIN)/asan_test$(SUFF).o \
$(BIN)/asan_globals_test$(SUFF).o \
$(BIN)/asan_break_optimization$(SUFF).o \
$(BIN)/asan_noinst_test$(SUFF).o \
- $(BIN)/asan_interface_test$(SUFF).o
+ $(BIN)/asan_test$(SUFF).o
BENCHMARK_OBJECTS=\
$(BIN)/asan_benchmarks_test$(SUFF).o \
@@ -323,11 +317,11 @@ $(GTEST_LIB):
mkdir -p $(GTEST_MAKE_DIR) && \
cd $(GTEST_MAKE_DIR) && \
$(MAKE) -f ../make/Makefile CXXFLAGS="$(PIE) $(CFLAGS) -g -w" \
- CXX="$(CLANG_ASAN_CXX)"
+ CXX="$(CLANG_CXX)"
-RTL_LINT_FITLER=-readability/casting,-readability/check,-build/include,-build/header_guard,-build/class,-legal/copyright
+RTL_LINT_FILTER=-readability/casting,-readability/check,-build/include,-build/header_guard,-build/class,-legal/copyright,-build/namespaces
# TODO(kcc): remove these filters one by one
-TEST_LINT_FITLER=-readability/casting,-build/include,-legal/copyright,-whitespace/newline,-runtime/sizeof,-runtime/int,-runtime/printf
+TEST_LINT_FILTER=-readability/casting,-build/include,-legal/copyright,-whitespace/newline,-runtime/sizeof,-runtime/int,-runtime/printf
LLVM_LINT_FILTER=-,+whitespace
@@ -335,8 +329,10 @@ ADDRESS_SANITIZER_CPP=../../../../lib/Transforms/Instrumentation/AddressSanitize
lint:
third_party/cpplint/cpplint.py --filter=$(LLVM_LINT_FILTER) $(ADDRESS_SANITIZER_CPP)
- third_party/cpplint/cpplint.py --filter=$(RTL_LINT_FITLER) asan_*.cc asan_*.h
- third_party/cpplint/cpplint.py --filter=$(TEST_LINT_FITLER) tests/*.cc
+ third_party/cpplint/cpplint.py --filter=$(RTL_LINT_FILTER) asan_*.cc asan_*.h
+ third_party/cpplint/cpplint.py --filter=$(RTL_LINT_FILTER) $(INTERCEPTION)/interception*.h $(INTERCEPTION)/interception*.cc
+ third_party/cpplint/cpplint.py --filter=$(RTL_LINT_FILTER) $(COMMON)/sanitizer_*.h $(COMMON)/sanitizer_*.cc
+ third_party/cpplint/cpplint.py --filter=$(TEST_LINT_FILTER) tests/*.cc output_tests/*.cc
get_third_party:
rm -rf third_party
@@ -348,5 +344,6 @@ get_third_party:
clean:
rm -f *.o *.ll *.S *.a *.log asan_test64* asan_test32* a.out perf.data log
+ rm -f $(LIBASAN_INST_DIR)/libclang_rt.asan-*.a
rm -rf $(BIN)
rm -rf $(GTEST_ROOT)/make-*
diff --git a/lib/asan/README.txt b/lib/asan/README.txt
index 00ae3c4..5e66004 100644
--- a/lib/asan/README.txt
+++ b/lib/asan/README.txt
@@ -10,7 +10,6 @@ Makefile.mk : Currently a stub for a proper makefile. not usable.
Makefile.old : Old out-of-tree makefile, the only usable one so far.
asan_*.{cc,h} : Sources of the asan run-time lirbary.
mach_override/* : Utility to override functions on Darwin (MIT License).
-sysinfo/* : Portable utility to iterate over /proc/maps (BSD License).
scripts/* : Helper scripts.
Temporary build instructions (verified on linux):
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index f86dc0b..352cce00 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -1,4 +1,4 @@
-//===-- asan_allocator.cc ---------------------------------------*- C++ -*-===//
+//===-- asan_allocator.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -34,50 +34,68 @@
#include "asan_stats.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
+#include "sanitizer_common/sanitizer_atomic.h"
-#include <stdint.h>
-#include <string.h>
-#include <unistd.h>
+#if defined(_WIN32) && !defined(__clang__)
+#include <intrin.h>
+#endif
namespace __asan {
-#define REDZONE FLAG_redzone
-static const size_t kMinAllocSize = REDZONE * 2;
-static const size_t kMinMmapSize = 4UL << 20; // 4M
-static const uint64_t kMaxAvailableRam = 128ULL << 30; // 128G
-static const size_t kMaxThreadLocalQuarantine = 1 << 20; // 1M
-static const size_t kMaxSizeForThreadLocalFreeList = 1 << 17;
+#define REDZONE ((uptr)(flags()->redzone))
+static const uptr kMinAllocSize = REDZONE * 2;
+static const u64 kMaxAvailableRam = 128ULL << 30; // 128G
+static const uptr kMaxThreadLocalQuarantine = 1 << 20; // 1M
+
+static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20;
+static const uptr kMaxSizeForThreadLocalFreeList =
+ (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17;
// Size classes less than kMallocSizeClassStep are powers of two.
// All other size classes are multiples of kMallocSizeClassStep.
-static const size_t kMallocSizeClassStepLog = 26;
-static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
+static const uptr kMallocSizeClassStepLog = 26;
+static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
-#if __WORDSIZE == 32
-static const size_t kMaxAllowedMallocSize = 3UL << 30; // 3G
-#else
-static const size_t kMaxAllowedMallocSize = 8UL << 30; // 8G
-#endif
+static const uptr kMaxAllowedMallocSize =
+ (__WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
-static inline bool IsAligned(uintptr_t a, uintptr_t alignment) {
+static inline bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
-static inline size_t Log2(size_t x) {
+static inline uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x));
+#if !defined(_WIN32) || defined(__clang__)
return __builtin_ctzl(x);
+#elif defined(_WIN64)
+ unsigned long ret; // NOLINT
+ _BitScanForward64(&ret, x);
+ return ret;
+#else
+ unsigned long ret; // NOLINT
+ _BitScanForward(&ret, x);
+ return ret;
+#endif
}
-static inline size_t RoundUpToPowerOfTwo(size_t size) {
+static inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
if (IsPowerOfTwo(size)) return size;
- size_t up = __WORDSIZE - __builtin_clzl(size);
- CHECK(size < (1ULL << up));
- CHECK(size > (1ULL << (up - 1)));
- return 1UL << up;
+
+ unsigned long up; // NOLINT
+#if !defined(_WIN32) || defined(__clang__)
+ up = __WORDSIZE - 1 - __builtin_clzl(size);
+#elif defined(_WIN64)
+ _BitScanReverse64(&up, size);
+#else
+ _BitScanReverse(&up, size);
+#endif
+ CHECK(size < (1ULL << (up + 1)));
+ CHECK(size > (1ULL << up));
+ return 1UL << (up + 1);
}
-static inline size_t SizeClassToSize(uint8_t size_class) {
+static inline uptr SizeClassToSize(u8 size_class) {
CHECK(size_class < kNumberOfSizeClasses);
if (size_class <= kMallocSizeClassStepLog) {
return 1UL << size_class;
@@ -86,10 +104,10 @@ static inline size_t SizeClassToSize(uint8_t size_class) {
}
}
-static inline uint8_t SizeToSizeClass(size_t size) {
- uint8_t res = 0;
+static inline u8 SizeToSizeClass(uptr size) {
+ u8 res = 0;
if (size <= kMallocSizeClassStep) {
- size_t rounded = RoundUpToPowerOfTwo(size);
+ uptr rounded = RoundUpToPowerOfTwo(size);
res = Log2(rounded);
} else {
res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
@@ -102,7 +120,7 @@ static inline uint8_t SizeToSizeClass(size_t size) {
// Given REDZONE bytes, we need to mark first size bytes
// as addressable and the rest REDZONE-size bytes as unaddressable.
-static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) {
+static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) {
CHECK(size <= REDZONE);
CHECK(IsAligned(mem, REDZONE));
CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
@@ -112,11 +130,11 @@ static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) {
kAsanHeapRightRedzoneMagic);
}
-static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
+static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
CHECK(IsAligned(size, kPageSize));
- uint8_t *res = (uint8_t*)AsanMmapSomewhereOrDie(size, __FUNCTION__);
- PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic);
- if (FLAG_debug) {
+ u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
+ PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
+ if (flags()->debug) {
Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
}
return res;
@@ -128,103 +146,114 @@ static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
//
// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
-// the beginning of a AsanChunk (in which case 'next' contains the address
-// of the AsanChunk).
+// the beginning of a AsanChunk (in which the actual chunk resides at
+// this - this->used_size).
//
// The magic numbers for the enum values are taken randomly.
enum {
- CHUNK_AVAILABLE = 0x573B,
- CHUNK_ALLOCATED = 0x3204,
- CHUNK_QUARANTINE = 0x1978,
- CHUNK_MEMALIGN = 0xDC68,
+ CHUNK_AVAILABLE = 0x57,
+ CHUNK_ALLOCATED = 0x32,
+ CHUNK_QUARANTINE = 0x19,
+ CHUNK_MEMALIGN = 0xDC
};
struct ChunkBase {
- uint16_t chunk_state;
- uint8_t size_class;
- uint32_t offset; // User-visible memory starts at this+offset (beg()).
- int32_t alloc_tid;
- int32_t free_tid;
- size_t used_size; // Size requested by the user.
+ // First 8 bytes.
+ uptr chunk_state : 8;
+ uptr alloc_tid : 24;
+ uptr size_class : 8;
+ uptr free_tid : 24;
+
+ // Second 8 bytes.
+ uptr alignment_log : 8;
+ uptr used_size : FIRST_32_SECOND_64(32, 56); // Size requested by the user.
+
+ // This field may overlap with the user area and thus should not
+ // be used while the chunk is in CHUNK_ALLOCATED state.
AsanChunk *next;
- uintptr_t beg() { return (uintptr_t)this + offset; }
- size_t Size() { return SizeClassToSize(size_class); }
- uint8_t SizeClass() { return size_class; }
+ // Typically the beginning of the user-accessible memory is 'this'+REDZONE
+ // and is also aligned by REDZONE. However, if the memory is allocated
+ // by memalign, the alignment might be higher and the user-accessible memory
+ // starts at the first properly aligned address after 'this'.
+ uptr Beg() { return RoundUpTo((uptr)this + 1, 1 << alignment_log); }
+ uptr Size() { return SizeClassToSize(size_class); }
+ u8 SizeClass() { return size_class; }
};
struct AsanChunk: public ChunkBase {
- uint32_t *compressed_alloc_stack() {
- CHECK(REDZONE >= sizeof(ChunkBase));
- return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase));
+ u32 *compressed_alloc_stack() {
+ return (u32*)((uptr)this + sizeof(ChunkBase));
}
- uint32_t *compressed_free_stack() {
- CHECK(REDZONE >= sizeof(ChunkBase));
- return (uint32_t*)((uintptr_t)this + REDZONE);
+ u32 *compressed_free_stack() {
+ return (u32*)((uptr)this + Max((uptr)REDZONE, (uptr)sizeof(ChunkBase)));
}
// The left redzone after the ChunkBase is given to the alloc stack trace.
- size_t compressed_alloc_stack_size() {
- return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t);
+ uptr compressed_alloc_stack_size() {
+ if (REDZONE < sizeof(ChunkBase)) return 0;
+ return (REDZONE - sizeof(ChunkBase)) / sizeof(u32);
}
- size_t compressed_free_stack_size() {
- return (REDZONE) / sizeof(uint32_t);
+ uptr compressed_free_stack_size() {
+ if (REDZONE < sizeof(ChunkBase)) return 0;
+ return (REDZONE) / sizeof(u32);
}
- bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) {
- if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) {
- *offset = addr - beg();
+ bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
+ if (addr >= Beg() && (addr + access_size) <= (Beg() + used_size)) {
+ *offset = addr - Beg();
return true;
}
return false;
}
- bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) {
- if (addr < beg()) {
- *offset = beg() - addr;
+ bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
+ if (addr < Beg()) {
+ *offset = Beg() - addr;
return true;
}
return false;
}
- bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) {
- if (addr + access_size >= beg() + used_size) {
- if (addr <= beg() + used_size)
+ bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
+ if (addr + access_size >= Beg() + used_size) {
+ if (addr <= Beg() + used_size)
*offset = 0;
else
- *offset = addr - (beg() + used_size);
+ *offset = addr - (Beg() + used_size);
return true;
}
return false;
}
- void DescribeAddress(uintptr_t addr, size_t access_size) {
- size_t offset;
- Printf("%p is located ", addr);
+ void DescribeAddress(uptr addr, uptr access_size) {
+ uptr offset;
+ AsanPrintf("%p is located ", (void*)addr);
if (AddrIsInside(addr, access_size, &offset)) {
- Printf("%ld bytes inside of", offset);
+ AsanPrintf("%zu bytes inside of", offset);
} else if (AddrIsAtLeft(addr, access_size, &offset)) {
- Printf("%ld bytes to the left of", offset);
+ AsanPrintf("%zu bytes to the left of", offset);
} else if (AddrIsAtRight(addr, access_size, &offset)) {
- Printf("%ld bytes to the right of", offset);
+ AsanPrintf("%zu bytes to the right of", offset);
} else {
- Printf(" somewhere around (this is AddressSanitizer bug!)");
+ AsanPrintf(" somewhere around (this is AddressSanitizer bug!)");
}
- Printf(" %lu-byte region [%p,%p)\n",
- used_size, beg(), beg() + used_size);
+ AsanPrintf(" %zu-byte region [%p,%p)\n",
+ used_size, (void*)Beg(), (void*)(Beg() + used_size));
}
};
-static AsanChunk *PtrToChunk(uintptr_t ptr) {
+static AsanChunk *PtrToChunk(uptr ptr) {
AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
if (m->chunk_state == CHUNK_MEMALIGN) {
- m = m->next;
+ m = (AsanChunk*)((uptr)m - m->used_size);
}
return m;
}
void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
+ CHECK(q->size() > 0);
if (last_) {
CHECK(first_);
CHECK(!last_->next);
@@ -234,13 +263,16 @@ void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
CHECK(!first_);
last_ = q->last_;
first_ = q->first_;
+ CHECK(first_);
}
+ CHECK(last_);
+ CHECK(!last_->next);
size_ += q->size();
q->clear();
}
void AsanChunkFifoList::Push(AsanChunk *n) {
- CHECK(n->next == NULL);
+ CHECK(n->next == 0);
if (last_) {
CHECK(first_);
CHECK(!last_->next);
@@ -260,8 +292,8 @@ AsanChunk *AsanChunkFifoList::Pop() {
CHECK(first_);
AsanChunk *res = first_;
first_ = first_->next;
- if (first_ == NULL)
- last_ = NULL;
+ if (first_ == 0)
+ last_ = 0;
CHECK(size_ >= res->Size());
size_ -= res->Size();
if (last_) {
@@ -272,11 +304,11 @@ AsanChunk *AsanChunkFifoList::Pop() {
// All pages we ever allocated.
struct PageGroup {
- uintptr_t beg;
- uintptr_t end;
- size_t size_of_chunk;
- uintptr_t last_chunk;
- bool InRange(uintptr_t addr) {
+ uptr beg;
+ uptr end;
+ uptr size_of_chunk;
+ uptr last_chunk;
+ bool InRange(uptr addr) {
return addr >= beg && addr < end;
}
};
@@ -286,12 +318,12 @@ class MallocInfo {
explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
- AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) {
- AsanChunk *m = NULL;
+ AsanChunk *AllocateChunks(u8 size_class, uptr n_chunks) {
+ AsanChunk *m = 0;
AsanChunk **fl = &free_lists_[size_class];
{
ScopedLock lock(&mu_);
- for (size_t i = 0; i < n_chunks; i++) {
+ for (uptr i = 0; i < n_chunks; i++) {
if (!(*fl)) {
*fl = GetNewChunks(size_class);
}
@@ -307,17 +339,17 @@ class MallocInfo {
void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
bool eat_free_lists) {
- CHECK(FLAG_quarantine_size > 0);
+ CHECK(flags()->quarantine_size > 0);
ScopedLock lock(&mu_);
AsanChunkFifoList *q = &x->quarantine_;
if (q->size() > 0) {
quarantine_.PushList(q);
- while (quarantine_.size() > FLAG_quarantine_size) {
+ while (quarantine_.size() > (uptr)flags()->quarantine_size) {
QuarantinePop();
}
}
if (eat_free_lists) {
- for (size_t size_class = 0; size_class < kNumberOfSizeClasses;
+ for (uptr size_class = 0; size_class < kNumberOfSizeClasses;
size_class++) {
AsanChunk *m = x->free_lists_[size_class];
while (m) {
@@ -336,15 +368,13 @@ class MallocInfo {
quarantine_.Push(chunk);
}
- AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) {
+ AsanChunk *FindMallocedOrFreed(uptr addr, uptr access_size) {
ScopedLock lock(&mu_);
return FindChunkByAddr(addr);
}
- // TODO(glider): AllocationSize() may become very slow if the size of
- // page_groups_ grows. This can be fixed by increasing kMinMmapSize,
- // but a better solution is to speed up the search somehow.
- size_t AllocationSize(uintptr_t ptr) {
+ uptr AllocationSize(uptr ptr) {
+ if (!ptr) return 0;
ScopedLock lock(&mu_);
// first, check if this is our memory
@@ -368,40 +398,60 @@ class MallocInfo {
void PrintStatus() {
ScopedLock lock(&mu_);
- size_t malloced = 0;
+ uptr malloced = 0;
- Printf(" MallocInfo: in quarantine: %ld malloced: %ld; ",
+ Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
quarantine_.size() >> 20, malloced >> 20);
- for (size_t j = 1; j < kNumberOfSizeClasses; j++) {
+ for (uptr j = 1; j < kNumberOfSizeClasses; j++) {
AsanChunk *i = free_lists_[j];
if (!i) continue;
- size_t t = 0;
+ uptr t = 0;
for (; i; i = i->next) {
t += i->Size();
}
- Printf("%ld:%ld ", j, t >> 20);
+ Printf("%zu:%zu ", j, t >> 20);
}
Printf("\n");
}
- PageGroup *FindPageGroup(uintptr_t addr) {
+ PageGroup *FindPageGroup(uptr addr) {
ScopedLock lock(&mu_);
return FindPageGroupUnlocked(addr);
}
private:
- PageGroup *FindPageGroupUnlocked(uintptr_t addr) {
- for (int i = 0; i < n_page_groups_; i++) {
- PageGroup *g = page_groups_[i];
- if (g->InRange(addr)) {
- return g;
+ PageGroup *FindPageGroupUnlocked(uptr addr) {
+ int n = atomic_load(&n_page_groups_, memory_order_relaxed);
+ // If the page groups are not sorted yet, sort them.
+ if (n_sorted_page_groups_ < n) {
+ SortArray((uptr*)page_groups_, n);
+ n_sorted_page_groups_ = n;
+ }
+ // Binary search over the page groups.
+ int beg = 0, end = n;
+ while (beg < end) {
+ int med = (beg + end) / 2;
+ uptr g = (uptr)page_groups_[med];
+ if (addr > g) {
+ // 'g' points to the end of the group, so 'addr'
+ // may not belong to page_groups_[med] or any previous group.
+ beg = med + 1;
+ } else {
+ // 'addr' may belong to page_groups_[med] or a previous group.
+ end = med;
}
}
- return NULL;
+ if (beg >= n)
+ return 0;
+ PageGroup *g = page_groups_[beg];
+ CHECK(g);
+ if (g->InRange(addr))
+ return g;
+ return 0;
}
// We have an address between two chunks, and we want to report just one.
- AsanChunk *ChooseChunk(uintptr_t addr,
+ AsanChunk *ChooseChunk(uptr addr,
AsanChunk *left_chunk, AsanChunk *right_chunk) {
// Prefer an allocated chunk or a chunk from quarantine.
if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
@@ -411,7 +461,7 @@ class MallocInfo {
left_chunk->chunk_state != CHUNK_AVAILABLE)
return left_chunk;
// Choose based on offset.
- size_t l_offset = 0, r_offset = 0;
+ uptr l_offset = 0, r_offset = 0;
CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
if (l_offset < r_offset)
@@ -419,33 +469,33 @@ class MallocInfo {
return right_chunk;
}
- AsanChunk *FindChunkByAddr(uintptr_t addr) {
+ AsanChunk *FindChunkByAddr(uptr addr) {
PageGroup *g = FindPageGroupUnlocked(addr);
if (!g) return 0;
CHECK(g->size_of_chunk);
- uintptr_t offset_from_beg = addr - g->beg;
- uintptr_t this_chunk_addr = g->beg +
+ uptr offset_from_beg = addr - g->beg;
+ uptr this_chunk_addr = g->beg +
(offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
CHECK(g->InRange(this_chunk_addr));
AsanChunk *m = (AsanChunk*)this_chunk_addr;
CHECK(m->chunk_state == CHUNK_ALLOCATED ||
m->chunk_state == CHUNK_AVAILABLE ||
m->chunk_state == CHUNK_QUARANTINE);
- size_t offset = 0;
+ uptr offset = 0;
if (m->AddrIsInside(addr, 1, &offset))
return m;
if (m->AddrIsAtRight(addr, 1, &offset)) {
if (this_chunk_addr == g->last_chunk) // rightmost chunk
return m;
- uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk;
+ uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk;
CHECK(g->InRange(right_chunk_addr));
return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
} else {
CHECK(m->AddrIsAtLeft(addr, 1, &offset));
if (this_chunk_addr == g->beg) // leftmost chunk
return m;
- uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk;
+ uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk;
CHECK(g->InRange(left_chunk_addr));
return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
}
@@ -459,10 +509,11 @@ class MallocInfo {
CHECK(m->chunk_state == CHUNK_QUARANTINE);
m->chunk_state = CHUNK_AVAILABLE;
+ PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic);
CHECK(m->alloc_tid >= 0);
CHECK(m->free_tid >= 0);
- size_t size_class = m->SizeClass();
+ uptr size_class = m->SizeClass();
m->next = free_lists_[size_class];
free_lists_[size_class] = m;
@@ -475,12 +526,12 @@ class MallocInfo {
}
// Get a list of newly allocated chunks.
- AsanChunk *GetNewChunks(uint8_t size_class) {
- size_t size = SizeClassToSize(size_class);
+ AsanChunk *GetNewChunks(u8 size_class) {
+ uptr size = SizeClassToSize(size_class);
CHECK(IsPowerOfTwo(kMinMmapSize));
CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
- size_t mmap_size = Max(size, kMinMmapSize);
- size_t n_chunks = mmap_size / size;
+ uptr mmap_size = Max(size, kMinMmapSize);
+ uptr n_chunks = mmap_size / size;
CHECK(n_chunks * size == mmap_size);
if (size < kPageSize) {
// Size is small, just poison the last chunk.
@@ -490,7 +541,7 @@ class MallocInfo {
mmap_size += kPageSize;
}
CHECK(n_chunks > 0);
- uint8_t *mem = MmapNewPagesAndPoisonShadow(mmap_size);
+ u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
@@ -498,8 +549,8 @@ class MallocInfo {
thread_stats.mmaped += mmap_size;
thread_stats.mmaped_by_size[size_class] += n_chunks;
- AsanChunk *res = NULL;
- for (size_t i = 0; i < n_chunks; i++) {
+ AsanChunk *res = 0;
+ for (uptr i = 0; i < n_chunks; i++) {
AsanChunk *m = (AsanChunk*)(mem + i * size);
m->chunk_state = CHUNK_AVAILABLE;
m->size_class = size_class;
@@ -508,13 +559,13 @@ class MallocInfo {
}
PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
// This memory is already poisoned, no need to poison it again.
- pg->beg = (uintptr_t)mem;
+ pg->beg = (uptr)mem;
pg->end = pg->beg + mmap_size;
pg->size_of_chunk = size;
- pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1));
- int page_group_idx = AtomicInc(&n_page_groups_) - 1;
- CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_));
- page_groups_[page_group_idx] = pg;
+ pg->last_chunk = (uptr)(mem + size * (n_chunks - 1));
+ int idx = atomic_fetch_add(&n_page_groups_, 1, memory_order_relaxed);
+ CHECK(idx < (int)ASAN_ARRAY_SIZE(page_groups_));
+ page_groups_[idx] = pg;
return res;
}
@@ -523,7 +574,8 @@ class MallocInfo {
AsanLock mu_;
PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
- int n_page_groups_; // atomic
+ atomic_uint32_t n_page_groups_;
+ int n_sorted_page_groups_;
};
static MallocInfo malloc_info(LINKER_INITIALIZED);
@@ -532,7 +584,7 @@ void AsanThreadLocalMallocStorage::CommitBack() {
malloc_info.SwallowThreadLocalMallocStorage(this, true);
}
-static void Describe(uintptr_t addr, size_t access_size) {
+static void Describe(uptr addr, uptr access_size) {
AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
if (!m) return;
m->DescribeAddress(addr, access_size);
@@ -544,55 +596,56 @@ static void Describe(uintptr_t addr, size_t access_size) {
m->compressed_alloc_stack_size());
AsanThread *t = asanThreadRegistry().GetCurrent();
CHECK(t);
- if (m->free_tid >= 0) {
+ if (m->free_tid != kInvalidTid) {
AsanThreadSummary *free_thread =
asanThreadRegistry().FindByTid(m->free_tid);
- Printf("freed by thread T%d here:\n", free_thread->tid());
+ AsanPrintf("freed by thread T%d here:\n", free_thread->tid());
AsanStackTrace free_stack;
AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
m->compressed_free_stack_size());
free_stack.PrintStack();
- Printf("previously allocated by thread T%d here:\n",
- alloc_thread->tid());
+ AsanPrintf("previously allocated by thread T%d here:\n",
+ alloc_thread->tid());
alloc_stack.PrintStack();
t->summary()->Announce();
free_thread->Announce();
alloc_thread->Announce();
} else {
- Printf("allocated by thread T%d here:\n", alloc_thread->tid());
+ AsanPrintf("allocated by thread T%d here:\n", alloc_thread->tid());
alloc_stack.PrintStack();
t->summary()->Announce();
alloc_thread->Announce();
}
}
-static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
+static u8 *Allocate(uptr alignment, uptr size, AsanStackTrace *stack) {
__asan_init();
CHECK(stack);
if (size == 0) {
size = 1; // TODO(kcc): do something smarter
}
CHECK(IsPowerOfTwo(alignment));
- size_t rounded_size = RoundUpTo(size, REDZONE);
- size_t needed_size = rounded_size + REDZONE;
+ uptr rounded_size = RoundUpTo(size, REDZONE);
+ uptr needed_size = rounded_size + REDZONE;
if (alignment > REDZONE) {
needed_size += alignment;
}
CHECK(IsAligned(needed_size, REDZONE));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
- Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size);
+ Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
+ (void*)size);
return 0;
}
- uint8_t size_class = SizeToSizeClass(needed_size);
- size_t size_to_allocate = SizeClassToSize(size_class);
+ u8 size_class = SizeToSizeClass(needed_size);
+ uptr size_to_allocate = SizeClassToSize(size_class);
CHECK(size_to_allocate >= kMinAllocSize);
CHECK(size_to_allocate >= needed_size);
CHECK(IsAligned(size_to_allocate, REDZONE));
- if (FLAG_v >= 2) {
- Printf("Allocate align: %ld size: %ld class: %d real: %ld\n",
+ if (flags()->verbosity >= 3) {
+ Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
alignment, size, size_class, size_to_allocate);
}
@@ -604,7 +657,7 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
thread_stats.malloced_redzones += size_to_allocate - size;
thread_stats.malloced_by_size[size_class]++;
- AsanChunk *m = NULL;
+ AsanChunk *m = 0;
if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
// get directly from global storage.
m = malloc_info.AllocateChunks(size_class, 1);
@@ -613,7 +666,7 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
// get from the thread-local storage.
AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
if (!*fl) {
- size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
+ uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
*fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
thread_stats.malloc_small_slow++;
}
@@ -623,24 +676,27 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
CHECK(m);
CHECK(m->chunk_state == CHUNK_AVAILABLE);
m->chunk_state = CHUNK_ALLOCATED;
- m->next = NULL;
+ m->next = 0;
CHECK(m->Size() == size_to_allocate);
- uintptr_t addr = (uintptr_t)m + REDZONE;
- CHECK(addr == (uintptr_t)m->compressed_free_stack());
+ uptr addr = (uptr)m + REDZONE;
+ CHECK(addr <= (uptr)m->compressed_free_stack());
if (alignment > REDZONE && (addr & (alignment - 1))) {
addr = RoundUpTo(addr, alignment);
CHECK((addr & (alignment - 1)) == 0);
AsanChunk *p = (AsanChunk*)(addr - REDZONE);
p->chunk_state = CHUNK_MEMALIGN;
- p->next = m;
+ p->used_size = (uptr)p - (uptr)m;
+ m->alignment_log = Log2(alignment);
+ CHECK(m->Beg() == addr);
+ } else {
+ m->alignment_log = Log2(REDZONE);
}
CHECK(m == PtrToChunk(addr));
m->used_size = size;
- m->offset = addr - (uintptr_t)m;
- CHECK(m->beg() == addr);
+ CHECK(m->Beg() == addr);
m->alloc_tid = t ? t->tid() : 0;
- m->free_tid = AsanThread::kInvalidTid;
+ m->free_tid = kInvalidTid;
AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(),
m->compressed_alloc_stack_size());
PoisonShadow(addr, rounded_size, 0);
@@ -648,42 +704,49 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
size & (REDZONE - 1));
}
- if (size <= FLAG_max_malloc_fill_size) {
- real_memset((void*)addr, 0, rounded_size);
+ if (size <= (uptr)(flags()->max_malloc_fill_size)) {
+ REAL(memset)((void*)addr, 0, rounded_size);
}
- return (uint8_t*)addr;
+ return (u8*)addr;
}
-static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
+static void Deallocate(u8 *ptr, AsanStackTrace *stack) {
if (!ptr) return;
CHECK(stack);
- if (FLAG_debug) {
- CHECK(malloc_info.FindPageGroup((uintptr_t)ptr));
+ if (flags()->debug) {
+ CHECK(malloc_info.FindPageGroup((uptr)ptr));
}
// Printf("Deallocate %p\n", ptr);
- AsanChunk *m = PtrToChunk((uintptr_t)ptr);
- if (m->chunk_state == CHUNK_QUARANTINE) {
- Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr);
+ AsanChunk *m = PtrToChunk((uptr)ptr);
+
+ // Flip the chunk_state atomically to avoid race on double-free.
+ u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
+ memory_order_acq_rel);
+
+ if (old_chunk_state == CHUNK_QUARANTINE) {
+ AsanReport("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr);
stack->PrintStack();
- m->DescribeAddress((uintptr_t)ptr, 1);
+ Describe((uptr)ptr, 1);
ShowStatsAndAbort();
- } else if (m->chunk_state != CHUNK_ALLOCATED) {
- Report("ERROR: AddressSanitizer attempting free on address which was not"
- " malloc()-ed: %p\n", ptr);
+ } else if (old_chunk_state != CHUNK_ALLOCATED) {
+ AsanReport("ERROR: AddressSanitizer attempting free on address "
+ "which was not malloc()-ed: %p\n", ptr);
stack->PrintStack();
ShowStatsAndAbort();
}
- CHECK(m->chunk_state == CHUNK_ALLOCATED);
- CHECK(m->free_tid == AsanThread::kInvalidTid);
+ CHECK(old_chunk_state == CHUNK_ALLOCATED);
+ // With REDZONE==16 m->next is in the user area, otherwise it should be 0.
+ CHECK(REDZONE <= 16 || !m->next);
+ CHECK(m->free_tid == kInvalidTid);
CHECK(m->alloc_tid >= 0);
AsanThread *t = asanThreadRegistry().GetCurrent();
m->free_tid = t ? t->tid() : 0;
AsanStackTrace::CompressStack(stack, m->compressed_free_stack(),
m->compressed_free_stack_size());
- size_t rounded_size = RoundUpTo(m->used_size, REDZONE);
- PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic);
+ uptr rounded_size = RoundUpTo(m->used_size, REDZONE);
+ PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic);
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
@@ -691,22 +754,21 @@ static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
thread_stats.freed += m->used_size;
thread_stats.freed_by_size[m->SizeClass()]++;
- m->chunk_state = CHUNK_QUARANTINE;
+ CHECK(m->chunk_state == CHUNK_QUARANTINE);
+
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
- CHECK(!m->next);
ms->quarantine_.Push(m);
if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
malloc_info.SwallowThreadLocalMallocStorage(ms, false);
}
} else {
- CHECK(!m->next);
malloc_info.BypassThreadLocalQuarantine(m);
}
}
-static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
+static u8 *Reallocate(u8 *old_ptr, uptr new_size,
AsanStackTrace *stack) {
CHECK(old_ptr && new_size);
@@ -715,13 +777,14 @@ static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
thread_stats.reallocs++;
thread_stats.realloced += new_size;
- AsanChunk *m = PtrToChunk((uintptr_t)old_ptr);
+ AsanChunk *m = PtrToChunk((uptr)old_ptr);
CHECK(m->chunk_state == CHUNK_ALLOCATED);
- size_t old_size = m->used_size;
- size_t memcpy_size = Min(new_size, old_size);
- uint8_t *new_ptr = Allocate(0, new_size, stack);
+ uptr old_size = m->used_size;
+ uptr memcpy_size = Min(new_size, old_size);
+ u8 *new_ptr = Allocate(0, new_size, stack);
if (new_ptr) {
- real_memcpy(new_ptr, old_ptr, memcpy_size);
+ CHECK(REAL(memcpy) != 0);
+ REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Deallocate(old_ptr, stack);
}
return new_ptr;
@@ -738,9 +801,9 @@ static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
// program must provide implementation of this hook.
// If macro is undefined, the hook is no-op.
#ifdef ASAN_NEW_HOOK
-extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size);
+extern "C" void ASAN_NEW_HOOK(void *ptr, uptr size);
#else
-static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { }
+static inline void ASAN_NEW_HOOK(void *ptr, uptr size) { }
#endif
#ifdef ASAN_DELETE_HOOK
@@ -751,7 +814,7 @@ static inline void ASAN_DELETE_HOOK(void *ptr) { }
namespace __asan {
-void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
+void *asan_memalign(uptr alignment, uptr size, AsanStackTrace *stack) {
void *ptr = (void*)Allocate(alignment, size, stack);
ASAN_NEW_HOOK(ptr, size);
return ptr;
@@ -759,43 +822,43 @@ void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
void asan_free(void *ptr, AsanStackTrace *stack) {
ASAN_DELETE_HOOK(ptr);
- Deallocate((uint8_t*)ptr, stack);
+ Deallocate((u8*)ptr, stack);
}
-void *asan_malloc(size_t size, AsanStackTrace *stack) {
+void *asan_malloc(uptr size, AsanStackTrace *stack) {
void *ptr = (void*)Allocate(0, size, stack);
ASAN_NEW_HOOK(ptr, size);
return ptr;
}
-void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) {
+void *asan_calloc(uptr nmemb, uptr size, AsanStackTrace *stack) {
void *ptr = (void*)Allocate(0, nmemb * size, stack);
if (ptr)
- real_memset(ptr, 0, nmemb * size);
+ REAL(memset)(ptr, 0, nmemb * size);
ASAN_NEW_HOOK(ptr, nmemb * size);
return ptr;
}
-void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) {
- if (p == NULL) {
+void *asan_realloc(void *p, uptr size, AsanStackTrace *stack) {
+ if (p == 0) {
void *ptr = (void*)Allocate(0, size, stack);
ASAN_NEW_HOOK(ptr, size);
return ptr;
} else if (size == 0) {
ASAN_DELETE_HOOK(p);
- Deallocate((uint8_t*)p, stack);
- return NULL;
+ Deallocate((u8*)p, stack);
+ return 0;
}
- return Reallocate((uint8_t*)p, size, stack);
+ return Reallocate((u8*)p, size, stack);
}
-void *asan_valloc(size_t size, AsanStackTrace *stack) {
+void *asan_valloc(uptr size, AsanStackTrace *stack) {
void *ptr = (void*)Allocate(kPageSize, size, stack);
ASAN_NEW_HOOK(ptr, size);
return ptr;
}
-void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
+void *asan_pvalloc(uptr size, AsanStackTrace *stack) {
size = RoundUpTo(size, kPageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
@@ -806,61 +869,76 @@ void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
return ptr;
}
-int asan_posix_memalign(void **memptr, size_t alignment, size_t size,
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
AsanStackTrace *stack) {
void *ptr = Allocate(alignment, size, stack);
- CHECK(IsAligned((uintptr_t)ptr, alignment));
+ CHECK(IsAligned((uptr)ptr, alignment));
ASAN_NEW_HOOK(ptr, size);
*memptr = ptr;
return 0;
}
-size_t __asan_mz_size(const void *ptr) {
- return malloc_info.AllocationSize((uintptr_t)ptr);
+uptr asan_malloc_usable_size(void *ptr, AsanStackTrace *stack) {
+ CHECK(stack);
+ if (ptr == 0) return 0;
+ uptr usable_size = malloc_info.AllocationSize((uptr)ptr);
+ if (flags()->check_malloc_usable_size && (usable_size == 0)) {
+ AsanReport("ERROR: AddressSanitizer attempting to call "
+ "malloc_usable_size() for pointer which is "
+ "not owned: %p\n", ptr);
+ stack->PrintStack();
+ Describe((uptr)ptr, 1);
+ ShowStatsAndAbort();
+ }
+ return usable_size;
+}
+
+uptr asan_mz_size(const void *ptr) {
+ return malloc_info.AllocationSize((uptr)ptr);
}
-void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) {
+void DescribeHeapAddress(uptr addr, uptr access_size) {
Describe(addr, access_size);
}
-void __asan_mz_force_lock() {
+void asan_mz_force_lock() {
malloc_info.ForceLock();
}
-void __asan_mz_force_unlock() {
+void asan_mz_force_unlock() {
malloc_info.ForceUnlock();
}
// ---------------------- Fake stack-------------------- {{{1
FakeStack::FakeStack() {
- CHECK(real_memset);
- real_memset(this, 0, sizeof(*this));
+ CHECK(REAL(memset) != 0);
+ REAL(memset)(this, 0, sizeof(*this));
}
-bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) {
- uintptr_t mem = allocated_size_classes_[size_class];
- uintptr_t size = ClassMmapSize(size_class);
+bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
+ uptr mem = allocated_size_classes_[size_class];
+ uptr size = ClassMmapSize(size_class);
bool res = mem && addr >= mem && addr < mem + size;
return res;
}
-uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) {
- for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
+uptr FakeStack::AddrIsInFakeStack(uptr addr) {
+ for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
}
return 0;
}
// We may want to compute this during compilation.
-inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) {
- size_t rounded_size = RoundUpToPowerOfTwo(alloc_size);
- size_t log = Log2(rounded_size);
+inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
+ uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
+ uptr log = Log2(rounded_size);
CHECK(alloc_size <= (1UL << log));
if (!(alloc_size > (1UL << (log-1)))) {
- Printf("alloc_size %ld log %ld\n", alloc_size, log);
+ Printf("alloc_size %zu log %zu\n", alloc_size, log);
}
CHECK(alloc_size > (1UL << (log-1)));
- size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
+ uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
CHECK(res < kNumberOfSizeClasses);
CHECK(ClassSize(res) >= rounded_size);
return res;
@@ -892,36 +970,36 @@ FakeFrame *FakeFrameFifo::FifoPop() {
return res;
}
-void FakeStack::Init(size_t stack_size) {
+void FakeStack::Init(uptr stack_size) {
stack_size_ = stack_size;
alive_ = true;
}
void FakeStack::Cleanup() {
alive_ = false;
- for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
- uintptr_t mem = allocated_size_classes_[i];
+ for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
+ uptr mem = allocated_size_classes_[i];
if (mem) {
PoisonShadow(mem, ClassMmapSize(i), 0);
allocated_size_classes_[i] = 0;
- AsanUnmapOrDie((void*)mem, ClassMmapSize(i));
+ UnmapOrDie((void*)mem, ClassMmapSize(i));
}
}
}
-size_t FakeStack::ClassMmapSize(size_t size_class) {
+uptr FakeStack::ClassMmapSize(uptr size_class) {
return RoundUpToPowerOfTwo(stack_size_);
}
-void FakeStack::AllocateOneSizeClass(size_t size_class) {
+void FakeStack::AllocateOneSizeClass(uptr size_class) {
CHECK(ClassMmapSize(size_class) >= kPageSize);
- uintptr_t new_mem = (uintptr_t)AsanMmapSomewhereOrDie(
+ uptr new_mem = (uptr)MmapOrDie(
ClassMmapSize(size_class), __FUNCTION__);
- // Printf("T%d new_mem[%ld]: %p-%p mmap %ld\n",
+ // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
// asanThreadRegistry().GetCurrent()->tid(),
// size_class, new_mem, new_mem + ClassMmapSize(size_class),
// ClassMmapSize(size_class));
- size_t i;
+ uptr i;
for (i = 0; i < ClassMmapSize(size_class);
i += ClassSize(size_class)) {
size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
@@ -930,10 +1008,10 @@ void FakeStack::AllocateOneSizeClass(size_t size_class) {
allocated_size_classes_[size_class] = new_mem;
}
-uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
+uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
if (!alive_) return real_stack;
CHECK(size <= kMaxStackMallocSize && size > 1);
- size_t size_class = ComputeSizeClass(size);
+ uptr size_class = ComputeSizeClass(size);
if (!allocated_size_classes_[size_class]) {
AllocateOneSizeClass(size_class);
}
@@ -947,23 +1025,23 @@ uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
DeallocateFrame(top);
}
call_stack_.LifoPush(fake_frame);
- uintptr_t ptr = (uintptr_t)fake_frame;
+ uptr ptr = (uptr)fake_frame;
PoisonShadow(ptr, size, 0);
return ptr;
}
void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
CHECK(alive_);
- size_t size = fake_frame->size_minus_one + 1;
- size_t size_class = ComputeSizeClass(size);
+ uptr size = fake_frame->size_minus_one + 1;
+ uptr size_class = ComputeSizeClass(size);
CHECK(allocated_size_classes_[size_class]);
- uintptr_t ptr = (uintptr_t)fake_frame;
+ uptr ptr = (uptr)fake_frame;
CHECK(AddrIsInSizeClass(ptr, size_class));
CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
size_classes_[size_class].FifoPush(fake_frame);
}
-void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
+void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
FakeFrame *fake_frame = (FakeFrame*)ptr;
CHECK(fake_frame->magic = kRetiredStackFrameMagic);
CHECK(fake_frame->descr != 0);
@@ -976,20 +1054,20 @@ void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-size_t __asan_stack_malloc(size_t size, size_t real_stack) {
- if (!FLAG_use_fake_stack) return real_stack;
+uptr __asan_stack_malloc(uptr size, uptr real_stack) {
+ if (!flags()->use_fake_stack) return real_stack;
AsanThread *t = asanThreadRegistry().GetCurrent();
if (!t) {
// TSD is gone, use the real stack.
return real_stack;
}
- size_t ptr = t->fake_stack().AllocateStack(size, real_stack);
- // Printf("__asan_stack_malloc %p %ld %p\n", ptr, size, real_stack);
+ uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
+ // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
return ptr;
}
-void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
- if (!FLAG_use_fake_stack) return;
+void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
+ if (!flags()->use_fake_stack) return;
if (ptr != real_stack) {
FakeStack::OnFree(ptr, size, real_stack);
}
@@ -997,23 +1075,25 @@ void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size".
-size_t __asan_get_estimated_allocated_size(size_t size) {
+uptr __asan_get_estimated_allocated_size(uptr size) {
if (size == 0) return 1;
return Min(size, kMaxAllowedMallocSize);
}
bool __asan_get_ownership(const void *p) {
- return (p == NULL) ||
- (malloc_info.AllocationSize((uintptr_t)p) > 0);
+ return malloc_info.AllocationSize((uptr)p) > 0;
}
-size_t __asan_get_allocated_size(const void *p) {
- if (p == NULL) return 0;
- size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p);
+uptr __asan_get_allocated_size(const void *p) {
+ if (p == 0) return 0;
+ uptr allocated_size = malloc_info.AllocationSize((uptr)p);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
- Printf("__asan_get_allocated_size failed, ptr=%p is not owned\n", p);
+ AsanReport("ERROR: AddressSanitizer attempting to call "
+ "__asan_get_allocated_size() for pointer which is "
+ "not owned: %p\n", p);
PRINT_CURRENT_STACK();
+ Describe((uptr)p, 1);
ShowStatsAndAbort();
}
return allocated_size;
diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h
index 9b691e0..2aed598 100644
--- a/lib/asan/asan_allocator.h
+++ b/lib/asan/asan_allocator.h
@@ -20,7 +20,7 @@
namespace __asan {
-static const size_t kNumberOfSizeClasses = 255;
+static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
class AsanChunkFifoList {
@@ -30,23 +30,23 @@ class AsanChunkFifoList {
void Push(AsanChunk *n);
void PushList(AsanChunkFifoList *q);
AsanChunk *Pop();
- size_t size() { return size_; }
+ uptr size() { return size_; }
void clear() {
- first_ = last_ = NULL;
+ first_ = last_ = 0;
size_ = 0;
}
private:
AsanChunk *first_;
AsanChunk *last_;
- size_t size_;
+ uptr size_;
};
struct AsanThreadLocalMallocStorage {
explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
: quarantine_(x) { }
AsanThreadLocalMallocStorage() {
- CHECK(real_memset);
- real_memset(this, 0, sizeof(AsanThreadLocalMallocStorage));
+ CHECK(REAL(memset));
+ REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
}
AsanChunkFifoList quarantine_;
@@ -57,11 +57,11 @@ struct AsanThreadLocalMallocStorage {
// Fake stack frame contains local variables of one function.
// This struct should fit into a stack redzone (32 bytes).
struct FakeFrame {
- uintptr_t magic; // Modified by the instrumented code.
- uintptr_t descr; // Modified by the instrumented code.
+ uptr magic; // Modified by the instrumented code.
+ uptr descr; // Modified by the instrumented code.
FakeFrame *next;
- uint64_t real_stack : 48;
- uint64_t size_minus_one : 16;
+ u64 real_stack : 48;
+ u64 size_minus_one : 16;
};
struct FakeFrameFifo {
@@ -100,58 +100,60 @@ class FakeStack {
public:
FakeStack();
explicit FakeStack(LinkerInitialized) {}
- void Init(size_t stack_size);
+ void Init(uptr stack_size);
void StopUsingFakeStack() { alive_ = false; }
void Cleanup();
- uintptr_t AllocateStack(size_t size, size_t real_stack);
- static void OnFree(size_t ptr, size_t size, size_t real_stack);
+ uptr AllocateStack(uptr size, uptr real_stack);
+ static void OnFree(uptr ptr, uptr size, uptr real_stack);
// Return the bottom of the maped region.
- uintptr_t AddrIsInFakeStack(uintptr_t addr);
+ uptr AddrIsInFakeStack(uptr addr);
+ bool StackSize() { return stack_size_; }
private:
- static const size_t kMinStackFrameSizeLog = 9; // Min frame is 512B.
- static const size_t kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
- static const size_t kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
- static const size_t kNumberOfSizeClasses =
+ static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B.
+ static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
+ static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
+ static const uptr kNumberOfSizeClasses =
kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
- bool AddrIsInSizeClass(uintptr_t addr, size_t size_class);
+ bool AddrIsInSizeClass(uptr addr, uptr size_class);
// Each size class should be large enough to hold all frames.
- size_t ClassMmapSize(size_t size_class);
+ uptr ClassMmapSize(uptr size_class);
- size_t ClassSize(size_t size_class) {
+ uptr ClassSize(uptr size_class) {
return 1UL << (size_class + kMinStackFrameSizeLog);
}
void DeallocateFrame(FakeFrame *fake_frame);
- size_t ComputeSizeClass(size_t alloc_size);
- void AllocateOneSizeClass(size_t size_class);
+ uptr ComputeSizeClass(uptr alloc_size);
+ void AllocateOneSizeClass(uptr size_class);
- size_t stack_size_;
+ uptr stack_size_;
bool alive_;
- uintptr_t allocated_size_classes_[kNumberOfSizeClasses];
+ uptr allocated_size_classes_[kNumberOfSizeClasses];
FakeFrameFifo size_classes_[kNumberOfSizeClasses];
FakeFrameLifo call_stack_;
};
-void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack);
+void *asan_memalign(uptr alignment, uptr size, AsanStackTrace *stack);
void asan_free(void *ptr, AsanStackTrace *stack);
-void *asan_malloc(size_t size, AsanStackTrace *stack);
-void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack);
-void *asan_realloc(void *p, size_t size, AsanStackTrace *stack);
-void *asan_valloc(size_t size, AsanStackTrace *stack);
-void *asan_pvalloc(size_t size, AsanStackTrace *stack);
+void *asan_malloc(uptr size, AsanStackTrace *stack);
+void *asan_calloc(uptr nmemb, uptr size, AsanStackTrace *stack);
+void *asan_realloc(void *p, uptr size, AsanStackTrace *stack);
+void *asan_valloc(uptr size, AsanStackTrace *stack);
+void *asan_pvalloc(uptr size, AsanStackTrace *stack);
-int asan_posix_memalign(void **memptr, size_t alignment, size_t size,
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
AsanStackTrace *stack);
+uptr asan_malloc_usable_size(void *ptr, AsanStackTrace *stack);
-size_t __asan_mz_size(const void *ptr);
-void __asan_mz_force_lock();
-void __asan_mz_force_unlock();
-void DescribeHeapAddress(uintptr_t addr, size_t access_size);
+uptr asan_mz_size(const void *ptr);
+void asan_mz_force_lock();
+void asan_mz_force_unlock();
+void DescribeHeapAddress(uptr addr, uptr access_size);
} // namespace __asan
#endif // ASAN_ALLOCATOR_H
diff --git a/lib/asan/asan_flags.h b/lib/asan/asan_flags.h
new file mode 100644
index 0000000..ca9cf84
--- /dev/null
+++ b/lib/asan/asan_flags.h
@@ -0,0 +1,97 @@
+//===-- asan_flags.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan runtime flags.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_FLAGS_H
+#define ASAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_interface_defs.h"
+
+// ASan flag values can be defined in three ways:
+// 1) initialized with default values at startup.
+// 2) overriden from string returned by user-specified function
+// __asan_default_options().
+// 3) overriden from env variable ASAN_OPTIONS.
+
+extern "C" {
+// Can be overriden by user.
+const char *__asan_default_options() SANITIZER_WEAK_ATTRIBUTE;
+} // extern "C"
+
+namespace __asan {
+
+struct Flags {
+ // Size (in bytes) of quarantine used to detect use-after-free errors.
+ // Lower value may reduce memory usage but increase the chance of
+ // false negatives.
+ int quarantine_size;
+ // If set, uses in-process symbolizer from common sanitizer runtime.
+ bool symbolize;
+ // Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
+ int verbosity;
+ // Size (in bytes) of redzones around heap objects.
+ // Requirement: redzone >= 32, is a power of two.
+ int redzone;
+ // If set, prints some debugging information and does additional checks.
+ bool debug;
+ // Controls the way to handle globals (0 - don't detect buffer overflow
+ // on globals, 1 - detect buffer overflow, 2 - print data about registered
+ // globals).
+ int report_globals;
+ // Max number of stack frames kept for each allocation.
+ int malloc_context_size;
+ // If set, uses custom wrappers and replacements for libc string functions
+ // to find more errors.
+ bool replace_str;
+ // If set, uses custom wrappers for memset/memcpy/memmove intinsics.
+ bool replace_intrin;
+ // Used on Mac only. See comments in asan_mac.cc and asan_malloc_mac.cc.
+ bool replace_cfallocator;
+ // Used on Mac only.
+ bool mac_ignore_invalid_free;
+ // ASan allocator flag. See asan_allocator.cc.
+ bool use_fake_stack;
+ // ASan allocator flag. Sets the maximal size of allocation request
+ // that would return memory filled with zero bytes.
+ int max_malloc_fill_size;
+ // Override exit status if something was reported.
+ int exitcode;
+ // If set, user may manually mark memory regions as poisoned or unpoisoned.
+ bool allow_user_poisoning;
+ // Number of seconds to sleep between printing an error report and
+ // terminating application. Useful for debug purposes (when one needs
+ // to attach gdb, for example).
+ int sleep_before_dying;
+ // If set, registers ASan custom segv handler.
+ bool handle_segv;
+ // If set, uses alternate stack for signal handling.
+ bool use_sigaltstack;
+ // Allow the users to work around the bug in Nvidia drivers prior to 295.*.
+ bool check_malloc_usable_size;
+ // If set, explicitly unmaps (huge) shadow at exit.
+ bool unmap_shadow_on_exit;
+ // If set, calls abort() instead of _exit() after printing an error report.
+ bool abort_on_error;
+ // If set, prints ASan exit stats even after program terminates successfully.
+ bool atexit;
+ // By default, disable core dumper on 64-bit - it makes little sense
+ // to dump 16T+ core.
+ bool disable_core;
+};
+
+Flags *flags();
+void InitializeFlags(Flags *f, const char *env);
+
+} // namespace __asan
+
+#endif // ASAN_FLAGS_H
diff --git a/lib/asan/asan_globals.cc b/lib/asan/asan_globals.cc
index f53bf38..f8c4040 100644
--- a/lib/asan/asan_globals.cc
+++ b/lib/asan/asan_globals.cc
@@ -1,4 +1,4 @@
-//===-- asan_globals.cc -----------------------------------------*- C++ -*-===//
+//===-- asan_globals.cc ---------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -36,16 +36,16 @@ static ListOfGlobals *list_of_globals;
static LowLevelAllocator allocator_for_globals(LINKER_INITIALIZED);
void PoisonRedZones(const Global &g) {
- size_t shadow_rz_size = kGlobalAndStackRedzone >> SHADOW_SCALE;
+ uptr shadow_rz_size = kGlobalAndStackRedzone >> SHADOW_SCALE;
CHECK(shadow_rz_size == 1 || shadow_rz_size == 2 || shadow_rz_size == 4);
// full right redzone
- size_t g_aligned_size = kGlobalAndStackRedzone *
+ uptr g_aligned_size = kGlobalAndStackRedzone *
((g.size + kGlobalAndStackRedzone - 1) / kGlobalAndStackRedzone);
PoisonShadow(g.beg + g_aligned_size,
kGlobalAndStackRedzone, kAsanGlobalRedzoneMagic);
if ((g.size % kGlobalAndStackRedzone) != 0) {
// partial right redzone
- uint64_t g_aligned_down_size = kGlobalAndStackRedzone *
+ u64 g_aligned_down_size = kGlobalAndStackRedzone *
(g.size / kGlobalAndStackRedzone);
CHECK(g_aligned_down_size == g_aligned_size - kGlobalAndStackRedzone);
PoisonShadowPartialRightRedzone(g.beg + g_aligned_down_size,
@@ -55,47 +55,47 @@ void PoisonRedZones(const Global &g) {
}
}
-static size_t GetAlignedSize(size_t size) {
+static uptr GetAlignedSize(uptr size) {
return ((size + kGlobalAndStackRedzone - 1) / kGlobalAndStackRedzone)
* kGlobalAndStackRedzone;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
void PrintIfASCII(const Global &g) {
- for (size_t p = g.beg; p < g.beg + g.size - 1; p++) {
+ for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
if (!isascii(*(char*)p)) return;
}
if (*(char*)(g.beg + g.size - 1) != 0) return;
- Printf(" '%s' is ascii string '%s'\n", g.name, g.beg);
+ AsanPrintf(" '%s' is ascii string '%s'\n", g.name, (char*)g.beg);
}
-bool DescribeAddrIfMyRedZone(const Global &g, uintptr_t addr) {
+bool DescribeAddrIfMyRedZone(const Global &g, uptr addr) {
if (addr < g.beg - kGlobalAndStackRedzone) return false;
if (addr >= g.beg + g.size_with_redzone) return false;
- Printf("%p is located ", addr);
+ AsanPrintf("%p is located ", (void*)addr);
if (addr < g.beg) {
- Printf("%d bytes to the left", g.beg - addr);
+ AsanPrintf("%zd bytes to the left", g.beg - addr);
} else if (addr >= g.beg + g.size) {
- Printf("%d bytes to the right", addr - (g.beg + g.size));
+ AsanPrintf("%zd bytes to the right", addr - (g.beg + g.size));
} else {
- Printf("%d bytes inside", addr - g.beg); // Can it happen?
+ AsanPrintf("%zd bytes inside", addr - g.beg); // Can it happen?
}
- Printf(" of global variable '%s' (0x%lx) of size %ld\n",
- g.name, g.beg, g.size);
+ AsanPrintf(" of global variable '%s' (0x%zx) of size %zu\n",
+ g.name, g.beg, g.size);
PrintIfASCII(g);
return true;
}
-bool DescribeAddrIfGlobal(uintptr_t addr) {
- if (!FLAG_report_globals) return false;
+bool DescribeAddrIfGlobal(uptr addr) {
+ if (!flags()->report_globals) return false;
ScopedLock lock(&mu_for_globals);
bool res = false;
for (ListOfGlobals *l = list_of_globals; l; l = l->next) {
const Global &g = *l->g;
- if (FLAG_report_globals >= 2)
- Printf("Search Global: beg=%p size=%ld name=%s\n",
- g.beg, g.size, g.name);
+ if (flags()->report_globals >= 2)
+ AsanPrintf("Search Global: beg=%p size=%zu name=%s\n",
+ (void*)g.beg, g.size, (char*)g.name);
res |= DescribeAddrIfMyRedZone(g, addr);
}
return res;
@@ -106,7 +106,7 @@ bool DescribeAddrIfGlobal(uintptr_t addr) {
// so we store the globals in a map.
static void RegisterGlobal(const Global *g) {
CHECK(asan_inited);
- CHECK(FLAG_report_globals);
+ CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
@@ -116,14 +116,14 @@ static void RegisterGlobal(const Global *g) {
l->g = g;
l->next = list_of_globals;
list_of_globals = l;
- if (FLAG_report_globals >= 2)
- Report("Added Global: beg=%p size=%ld name=%s\n",
- g->beg, g->size, g->name);
+ if (flags()->report_globals >= 2)
+ Report("Added Global: beg=%p size=%zu name=%s\n",
+ (void*)g->beg, g->size, g->name);
}
static void UnregisterGlobal(const Global *g) {
CHECK(asan_inited);
- CHECK(FLAG_report_globals);
+ CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
@@ -139,9 +139,9 @@ static void UnregisterGlobal(const Global *g) {
using namespace __asan; // NOLINT
// Register one global with a default redzone.
-void __asan_register_global(uintptr_t addr, size_t size,
+void __asan_register_global(uptr addr, uptr size,
const char *name) {
- if (!FLAG_report_globals) return;
+ if (!flags()->report_globals) return;
ScopedLock lock(&mu_for_globals);
Global *g = (Global *)allocator_for_globals.Allocate(sizeof(Global));
g->beg = addr;
@@ -152,20 +152,20 @@ void __asan_register_global(uintptr_t addr, size_t size,
}
// Register an array of globals.
-void __asan_register_globals(__asan_global *globals, size_t n) {
- if (!FLAG_report_globals) return;
+void __asan_register_globals(__asan_global *globals, uptr n) {
+ if (!flags()->report_globals) return;
ScopedLock lock(&mu_for_globals);
- for (size_t i = 0; i < n; i++) {
+ for (uptr i = 0; i < n; i++) {
RegisterGlobal(&globals[i]);
}
}
// Unregister an array of globals.
// We must do it when a shared objects gets dlclosed.
-void __asan_unregister_globals(__asan_global *globals, size_t n) {
- if (!FLAG_report_globals) return;
+void __asan_unregister_globals(__asan_global *globals, uptr n) {
+ if (!flags()->report_globals) return;
ScopedLock lock(&mu_for_globals);
- for (size_t i = 0; i < n; i++) {
+ for (uptr i = 0; i < n; i++) {
UnregisterGlobal(&globals[i]);
}
}
diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc
index 53ef91a..2ce5826 100644
--- a/lib/asan/asan_interceptors.cc
+++ b/lib/asan/asan_interceptors.cc
@@ -1,4 +1,4 @@
-//===-- asan_interceptors.cc ------------------------------------*- C++ -*-===//
+//===-- asan_interceptors.cc ----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -9,7 +9,7 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
-// Intercept various libc functions to catch buggy memory accesses there.
+// Intercept various libc functions.
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
@@ -19,40 +19,107 @@
#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_stats.h"
+#include "asan_thread_registry.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+// Use macro to describe if specific function should be
+// intercepted on a given platform.
+#if !defined(_WIN32)
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
+#else
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
+#endif
-#include <ctype.h>
-#include <dlfcn.h>
-#include <string.h>
-#include <strings.h>
+#if !defined(__APPLE__)
+# define ASAN_INTERCEPT_STRNLEN 1
+#else
+# define ASAN_INTERCEPT_STRNLEN 0
+#endif
-namespace __asan {
+#if defined(ANDROID) || defined(_WIN32)
+# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
+#else
+# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
+#endif
+
+// Use extern declarations of intercepted functions on Mac and Windows
+// to avoid including system headers.
+#if defined(__APPLE__) || (defined(_WIN32) && !defined(_DLL))
+extern "C" {
+// signal.h
+# if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
+struct sigaction;
+int sigaction(int sig, const struct sigaction *act,
+ struct sigaction *oldact);
+void *signal(int signum, void *handler);
+# endif
+
+// setjmp.h
+void longjmp(void* env, int value);
+# if !defined(_WIN32)
+void _longjmp(void *env, int value);
+# endif
+
+// string.h / strings.h
+int memcmp(const void *a1, const void *a2, uptr size);
+void* memmove(void *to, const void *from, uptr size);
+void* memcpy(void *to, const void *from, uptr size);
+void* memset(void *block, int c, uptr size);
+char* strchr(const char *str, int c);
+# if defined(__APPLE__)
+char* index(const char *string, int c);
+# endif
+char* strcat(char *to, const char* from); // NOLINT
+char *strncat(char *to, const char* from, uptr size);
+char* strcpy(char *to, const char* from); // NOLINT
+char* strncpy(char *to, const char* from, uptr size);
+int strcmp(const char *s1, const char* s2);
+int strncmp(const char *s1, const char* s2, uptr size);
+# if !defined(_WIN32)
+int strcasecmp(const char *s1, const char *s2);
+int strncasecmp(const char *s1, const char *s2, uptr n);
+char* strdup(const char *s);
+# endif
+uptr strlen(const char *s);
+# if ASAN_INTERCEPT_STRNLEN
+uptr strnlen(const char *s, uptr maxlen);
+# endif
+
+// stdlib.h
+int atoi(const char *nptr);
+long atol(const char *nptr); // NOLINT
+long strtol(const char *nptr, char **endptr, int base); // NOLINT
+# if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
+long long atoll(const char *nptr); // NOLINT
+long long strtoll(const char *nptr, char **endptr, int base); // NOLINT
+# endif
+
+// Windows threads.
+# if defined(_WIN32)
+__declspec(dllimport)
+void* __stdcall CreateThread(void *sec, uptr st, void* start,
+ void *arg, DWORD fl, DWORD *id);
+# endif
+
+// Posix threads.
+# if !defined(_WIN32)
+int pthread_create(void *thread, void *attr, void *(*start_routine)(void*),
+ void *arg);
+# endif
+} // extern "C"
+#endif
-index_f real_index;
-memcmp_f real_memcmp;
-memcpy_f real_memcpy;
-memmove_f real_memmove;
-memset_f real_memset;
-strcasecmp_f real_strcasecmp;
-strcat_f real_strcat;
-strchr_f real_strchr;
-strcmp_f real_strcmp;
-strcpy_f real_strcpy;
-strdup_f real_strdup;
-strlen_f real_strlen;
-strncasecmp_f real_strncasecmp;
-strncmp_f real_strncmp;
-strncpy_f real_strncpy;
-strnlen_f real_strnlen;
+namespace __asan {
// Instruments read/write access to a single byte in memory.
// On error calls __asan_report_error, which aborts the program.
-__attribute__((noinline))
-static void AccessAddress(uintptr_t address, bool isWrite) {
- if (__asan_address_is_poisoned((void*)address)) {
- GET_BP_PC_SP;
- __asan_report_error(pc, bp, sp, address, isWrite, /* access_size */ 1);
- }
-}
+#define ACCESS_ADDRESS(address, isWrite) do { \
+ if (!AddrIsInMem(address) || AddressIsPoisoned(address)) { \
+ GET_CURRENT_PC_BP_SP; \
+ __asan_report_error(pc, bp, sp, address, isWrite, /* access_size */ 1); \
+ } \
+} while (0)
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
// and ASAN_WRITE_RANGE as macro instead of function so
@@ -64,9 +131,9 @@ static void AccessAddress(uintptr_t address, bool isWrite) {
// checking the first and the last byte of a range.
#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
if (size > 0) { \
- uintptr_t ptr = (uintptr_t)(offset); \
- AccessAddress(ptr, isWrite); \
- AccessAddress(ptr + (size) - 1, isWrite); \
+ uptr ptr = (uptr)(offset); \
+ ACCESS_ADDRESS(ptr, isWrite); \
+ ACCESS_ADDRESS(ptr + (size) - 1, isWrite); \
} \
} while (0)
@@ -81,17 +148,17 @@ static void AccessAddress(uintptr_t address, bool isWrite) {
// Behavior of functions like "memcpy" or "strcpy" is undefined
// if memory intervals overlap. We report error in this case.
// Macro is used to avoid creation of new frames.
-static inline bool RangesOverlap(const char *offset1, size_t length1,
- const char *offset2, size_t length2) {
+static inline bool RangesOverlap(const char *offset1, uptr length1,
+ const char *offset2, uptr length2) {
return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1));
}
#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \
const char *offset1 = (const char*)_offset1; \
const char *offset2 = (const char*)_offset2; \
if (RangesOverlap(offset1, length1, offset2, length2)) { \
- Report("ERROR: AddressSanitizer %s-param-overlap: " \
- "memory ranges [%p,%p) and [%p, %p) overlap\n", \
- name, offset1, offset1 + length1, offset2, offset2 + length2); \
+ AsanReport("ERROR: AddressSanitizer %s-param-overlap: " \
+ "memory ranges [%p,%p) and [%p, %p) overlap\n", \
+ name, offset1, offset1 + length1, offset2, offset2 + length2); \
PRINT_CURRENT_STACK(); \
ShowStatsAndAbort(); \
} \
@@ -104,87 +171,139 @@ static inline bool RangesOverlap(const char *offset1, size_t length1,
} \
} while (0)
-size_t internal_strlen(const char *s) {
- size_t i = 0;
- while (s[i]) i++;
- return i;
+static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
+#if ASAN_INTERCEPT_STRNLEN
+ if (REAL(strnlen) != 0) {
+ return REAL(strnlen)(s, maxlen);
+ }
+#endif
+ return internal_strnlen(s, maxlen);
}
-size_t internal_strnlen(const char *s, size_t maxlen) {
- if (real_strnlen != NULL) {
- return real_strnlen(s, maxlen);
- }
- size_t i = 0;
- while (i < maxlen && s[i]) i++;
- return i;
+} // namespace __asan
+
+// ---------------------- Wrappers ---------------- {{{1
+using namespace __asan; // NOLINT
+
+static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
+ AsanThread *t = (AsanThread*)arg;
+ asanThreadRegistry().SetCurrent(t);
+ return t->ThreadStart();
}
-void* internal_memchr(const void* s, int c, size_t n) {
- const char* t = (char*)s;
- for (size_t i = 0; i < n; ++i, ++t)
- if (*t == c)
- return (void*)t;
- return NULL;
+#ifndef _WIN32
+INTERCEPTOR(int, pthread_create, void *thread,
+ void *attr, void *(*start_routine)(void*), void *arg) {
+ GET_STACK_TRACE_HERE(kStackTraceMax);
+ u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
+ AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
+ asanThreadRegistry().RegisterThread(t);
+ return REAL(pthread_create)(thread, attr, asan_thread_start, t);
}
+#endif // !_WIN32
-int internal_memcmp(const void* s1, const void* s2, size_t n) {
- const char* t1 = (char*)s1;
- const char* t2 = (char*)s2;
- for (size_t i = 0; i < n; ++i, ++t1, ++t2)
- if (*t1 != *t2)
- return *t1 < *t2 ? -1 : 1;
+#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
+INTERCEPTOR(void*, signal, int signum, void *handler) {
+ if (!AsanInterceptsSignal(signum)) {
+ return REAL(signal)(signum, handler);
+ }
return 0;
}
-void InitializeAsanInterceptors() {
-#ifndef __APPLE__
- INTERCEPT_FUNCTION(index);
-#else
- OVERRIDE_FUNCTION(index, WRAP(strchr));
+INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
+ struct sigaction *oldact) {
+ if (!AsanInterceptsSignal(signum)) {
+ return REAL(sigaction)(signum, act, oldact);
+ }
+ return 0;
+}
+#elif ASAN_POSIX
+// We need to have defined REAL(sigaction) on posix systems.
+DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
+ struct sigaction *oldact);
+#endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
+
+INTERCEPTOR(void, longjmp, void *env, int val) {
+ __asan_handle_no_return();
+ REAL(longjmp)(env, val);
+}
+
+#if !defined(_WIN32)
+INTERCEPTOR(void, _longjmp, void *env, int val) {
+ __asan_handle_no_return();
+ REAL(_longjmp)(env, val);
+}
+
+INTERCEPTOR(void, siglongjmp, void *env, int val) {
+ __asan_handle_no_return();
+ REAL(siglongjmp)(env, val);
+}
#endif
- INTERCEPT_FUNCTION(memcmp);
- INTERCEPT_FUNCTION(memcpy);
- INTERCEPT_FUNCTION(memmove);
- INTERCEPT_FUNCTION(memset);
- INTERCEPT_FUNCTION(strcasecmp);
- INTERCEPT_FUNCTION(strcat); // NOLINT
- INTERCEPT_FUNCTION(strchr);
- INTERCEPT_FUNCTION(strcmp);
- INTERCEPT_FUNCTION(strcpy); // NOLINT
- INTERCEPT_FUNCTION(strdup);
- INTERCEPT_FUNCTION(strlen);
- INTERCEPT_FUNCTION(strncasecmp);
- INTERCEPT_FUNCTION(strncmp);
- INTERCEPT_FUNCTION(strncpy);
-#ifndef __APPLE__
- INTERCEPT_FUNCTION(strnlen);
+
+#if ASAN_HAS_EXCEPTIONS == 1
+#ifdef __APPLE__
+extern "C" void __cxa_throw(void *a, void *b, void *c);
+#endif // __APPLE__
+
+INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
+ CHECK(REAL(__cxa_throw));
+ __asan_handle_no_return();
+ REAL(__cxa_throw)(a, b, c);
+}
#endif
- if (FLAG_v > 0) {
- Printf("AddressSanitizer: libc interceptors initialized\n");
- }
+
+// intercept mlock and friends.
+// Since asan maps 16T of RAM, mlock is completely unfriendly to asan.
+// All functions return 0 (success).
+static void MlockIsUnsupported() {
+ static bool printed = 0;
+ if (printed) return;
+ printed = true;
+ Printf("INFO: AddressSanitizer ignores mlock/mlockall/munlock/munlockall\n");
}
-} // namespace __asan
+extern "C" {
+INTERCEPTOR_ATTRIBUTE
+int mlock(const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
-// ---------------------- Wrappers ---------------- {{{1
-using namespace __asan; // NOLINT
+INTERCEPTOR_ATTRIBUTE
+int munlock(const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR_ATTRIBUTE
+int mlockall(int flags) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR_ATTRIBUTE
+int munlockall(void) {
+ MlockIsUnsupported();
+ return 0;
+}
+} // extern "C"
static inline int CharCmp(unsigned char c1, unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
static inline int CharCaseCmp(unsigned char c1, unsigned char c2) {
- int c1_low = tolower(c1);
- int c2_low = tolower(c2);
+ int c1_low = ToLower(c1);
+ int c2_low = ToLower(c2);
return c1_low - c2_low;
}
-int WRAP(memcmp)(const void *a1, const void *a2, size_t size) {
+INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
ENSURE_ASAN_INITED();
unsigned char c1 = 0, c2 = 0;
const unsigned char *s1 = (const unsigned char*)a1;
const unsigned char *s2 = (const unsigned char*)a2;
- size_t i;
+ uptr i;
for (i = 0; i < size; i++) {
c1 = s1[i];
c2 = s2[i];
@@ -195,65 +314,70 @@ int WRAP(memcmp)(const void *a1, const void *a2, size_t size) {
return CharCmp(c1, c2);
}
-void *WRAP(memcpy)(void *to, const void *from, size_t size) {
+INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
// memcpy is called during __asan_init() from the internals
// of printf(...).
if (asan_init_is_running) {
- return real_memcpy(to, from, size);
+ return REAL(memcpy)(to, from, size);
}
ENSURE_ASAN_INITED();
- if (FLAG_replace_intrin) {
- CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
+ if (flags()->replace_intrin) {
+ if (to != from) {
+ // We do not treat memcpy with to==from as a bug.
+ // See http://llvm.org/bugs/show_bug.cgi?id=11763.
+ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
+ }
ASAN_WRITE_RANGE(from, size);
ASAN_READ_RANGE(to, size);
}
- return real_memcpy(to, from, size);
+ return REAL(memcpy)(to, from, size);
}
-void *WRAP(memmove)(void *to, const void *from, size_t size) {
+INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
+ if (asan_init_is_running) {
+ return REAL(memmove)(to, from, size);
+ }
ENSURE_ASAN_INITED();
- if (FLAG_replace_intrin) {
+ if (flags()->replace_intrin) {
ASAN_WRITE_RANGE(from, size);
ASAN_READ_RANGE(to, size);
}
- return real_memmove(to, from, size);
+ return REAL(memmove)(to, from, size);
}
-void *WRAP(memset)(void *block, int c, size_t size) {
- // memset is called inside INTERCEPT_FUNCTION on Mac.
+INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
+ // memset is called inside Printf.
if (asan_init_is_running) {
- return real_memset(block, c, size);
+ return REAL(memset)(block, c, size);
}
ENSURE_ASAN_INITED();
- if (FLAG_replace_intrin) {
+ if (flags()->replace_intrin) {
ASAN_WRITE_RANGE(block, size);
}
- return real_memset(block, c, size);
+ return REAL(memset)(block, c, size);
}
-// Note that on Linux index and strchr are definined differently depending on
-// the compiler (gcc vs clang).
-// see __CORRECT_ISO_CPP_STRING_H_PROTO in /usr/include/string.h
-
-#ifndef __APPLE__
-char *WRAP(index)(const char *str, int c)
- __attribute__((alias(WRAPPER_NAME(strchr))));
-#endif
-
-char *WRAP(strchr)(const char *str, int c) {
+INTERCEPTOR(char*, strchr, const char *str, int c) {
ENSURE_ASAN_INITED();
- char *result = real_strchr(str, c);
- if (FLAG_replace_str) {
- size_t bytes_read = (result ? result - str : real_strlen(str)) + 1;
+ char *result = REAL(strchr)(str, c);
+ if (flags()->replace_str) {
+ uptr bytes_read = (result ? result - str : REAL(strlen)(str)) + 1;
ASAN_READ_RANGE(str, bytes_read);
}
return result;
}
-int WRAP(strcasecmp)(const char *s1, const char *s2) {
+#ifdef __linux__
+INTERCEPTOR(char*, index, const char *string, int c)
+ ALIAS(WRAPPER_NAME(strchr));
+#else
+DEFINE_REAL(char*, index, const char *string, int c)
+#endif
+
+INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
ENSURE_ASAN_INITED();
unsigned char c1, c2;
- size_t i;
+ uptr i;
for (i = 0; ; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
@@ -264,29 +388,44 @@ int WRAP(strcasecmp)(const char *s1, const char *s2) {
return CharCaseCmp(c1, c2);
}
-char *WRAP(strcat)(char *to, const char *from) { // NOLINT
+INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT
ENSURE_ASAN_INITED();
- if (FLAG_replace_str) {
- size_t from_length = real_strlen(from);
+ if (flags()->replace_str) {
+ uptr from_length = REAL(strlen)(from);
ASAN_READ_RANGE(from, from_length + 1);
if (from_length > 0) {
- size_t to_length = real_strlen(to);
+ uptr to_length = REAL(strlen)(to);
ASAN_READ_RANGE(to, to_length);
ASAN_WRITE_RANGE(to + to_length, from_length + 1);
CHECK_RANGES_OVERLAP("strcat", to, to_length + 1, from, from_length + 1);
}
}
- return real_strcat(to, from);
+ return REAL(strcat)(to, from); // NOLINT
}
-int WRAP(strcmp)(const char *s1, const char *s2) {
- // strcmp is called from malloc_default_purgeable_zone()
- // in __asan::ReplaceSystemAlloc() on Mac.
- if (asan_init_is_running) {
- return real_strcmp(s1, s2);
+INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
+ ENSURE_ASAN_INITED();
+ if (flags()->replace_str && size > 0) {
+ uptr from_length = MaybeRealStrnlen(from, size);
+ ASAN_READ_RANGE(from, Min(size, from_length + 1));
+ uptr to_length = REAL(strlen)(to);
+ ASAN_READ_RANGE(to, to_length);
+ ASAN_WRITE_RANGE(to + to_length, from_length + 1);
+ if (from_length > 0) {
+ CHECK_RANGES_OVERLAP("strncat", to, to_length + 1,
+ from, Min(size, from_length + 1));
+ }
}
+ return REAL(strncat)(to, from, size);
+}
+
+INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
+ if (!asan_inited) {
+ return internal_strcmp(s1, s2);
+ }
+ ENSURE_ASAN_INITED();
unsigned char c1, c2;
- size_t i;
+ uptr i;
for (i = 0; ; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
@@ -297,67 +436,68 @@ int WRAP(strcmp)(const char *s1, const char *s2) {
return CharCmp(c1, c2);
}
-char *WRAP(strcpy)(char *to, const char *from) { // NOLINT
+INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
// strcpy is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (asan_init_is_running) {
- return real_strcpy(to, from);
+ return REAL(strcpy)(to, from); // NOLINT
}
ENSURE_ASAN_INITED();
- if (FLAG_replace_str) {
- size_t from_size = real_strlen(from) + 1;
+ if (flags()->replace_str) {
+ uptr from_size = REAL(strlen)(from) + 1;
CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
ASAN_READ_RANGE(from, from_size);
ASAN_WRITE_RANGE(to, from_size);
}
- return real_strcpy(to, from);
+ return REAL(strcpy)(to, from); // NOLINT
}
-char *WRAP(strdup)(const char *s) {
+INTERCEPTOR(char*, strdup, const char *s) {
ENSURE_ASAN_INITED();
- if (FLAG_replace_str) {
- size_t length = real_strlen(s);
+ if (flags()->replace_str) {
+ uptr length = REAL(strlen)(s);
ASAN_READ_RANGE(s, length + 1);
}
- return real_strdup(s);
+ return REAL(strdup)(s);
}
-size_t WRAP(strlen)(const char *s) {
+INTERCEPTOR(uptr, strlen, const char *s) {
// strlen is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (asan_init_is_running) {
- return real_strlen(s);
+ return REAL(strlen)(s);
}
ENSURE_ASAN_INITED();
- size_t length = real_strlen(s);
- if (FLAG_replace_str) {
+ uptr length = REAL(strlen)(s);
+ if (flags()->replace_str) {
ASAN_READ_RANGE(s, length + 1);
}
return length;
}
-int WRAP(strncasecmp)(const char *s1, const char *s2, size_t size) {
+INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, uptr n) {
ENSURE_ASAN_INITED();
unsigned char c1 = 0, c2 = 0;
- size_t i;
- for (i = 0; i < size; i++) {
+ uptr i;
+ for (i = 0; i < n; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
}
- ASAN_READ_RANGE(s1, Min(i + 1, size));
- ASAN_READ_RANGE(s2, Min(i + 1, size));
+ ASAN_READ_RANGE(s1, Min(i + 1, n));
+ ASAN_READ_RANGE(s2, Min(i + 1, n));
return CharCaseCmp(c1, c2);
}
-int WRAP(strncmp)(const char *s1, const char *s2, size_t size) {
+INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
// strncmp is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (asan_init_is_running) {
- return real_strncmp(s1, s2, size);
+ return REAL(strncmp)(s1, s2, size);
}
+ ENSURE_ASAN_INITED();
unsigned char c1 = 0, c2 = 0;
- size_t i;
+ uptr i;
for (i = 0; i < size; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
@@ -368,24 +508,234 @@ int WRAP(strncmp)(const char *s1, const char *s2, size_t size) {
return CharCmp(c1, c2);
}
-char *WRAP(strncpy)(char *to, const char *from, size_t size) {
+INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
ENSURE_ASAN_INITED();
- if (FLAG_replace_str) {
- size_t from_size = Min(size, internal_strnlen(from, size) + 1);
+ if (flags()->replace_str) {
+ uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
ASAN_READ_RANGE(from, from_size);
ASAN_WRITE_RANGE(to, size);
}
- return real_strncpy(to, from, size);
+ return REAL(strncpy)(to, from, size);
}
-#ifndef __APPLE__
-size_t WRAP(strnlen)(const char *s, size_t maxlen) {
+#if ASAN_INTERCEPT_STRNLEN
+INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
ENSURE_ASAN_INITED();
- size_t length = real_strnlen(s, maxlen);
- if (FLAG_replace_str) {
+ uptr length = REAL(strnlen)(s, maxlen);
+ if (flags()->replace_str) {
ASAN_READ_RANGE(s, Min(length + 1, maxlen));
}
return length;
}
+#endif // ASAN_INTERCEPT_STRNLEN
+
+static inline bool IsValidStrtolBase(int base) {
+ return (base == 0) || (2 <= base && base <= 36);
+}
+
+static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) {
+ CHECK(endptr != 0);
+ if (nptr == *endptr) {
+ // No digits were found at strtol call, we need to find out the last
+ // symbol accessed by strtoll on our own.
+ // We get this symbol by skipping leading blanks and optional +/- sign.
+ while (IsSpace(*nptr)) nptr++;
+ if (*nptr == '+' || *nptr == '-') nptr++;
+ *endptr = (char*)nptr;
+ }
+ CHECK(*endptr >= nptr);
+}
+
+INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
+ char **endptr, int base) {
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(strtol)(nptr, endptr, base);
+ }
+ char *real_endptr;
+ long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT
+ if (endptr != 0) {
+ *endptr = real_endptr;
+ }
+ if (IsValidStrtolBase(base)) {
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ }
+ return result;
+}
+
+INTERCEPTOR(int, atoi, const char *nptr) {
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(atoi)(nptr);
+ }
+ char *real_endptr;
+ // "man atoi" tells that behavior of atoi(nptr) is the same as
+ // strtol(nptr, 0, 10), i.e. it sets errno to ERANGE if the
+ // parsed integer can't be stored in *long* type (even if it's
+ // different from int). So, we just imitate this behavior.
+ int result = REAL(strtol)(nptr, &real_endptr, 10);
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ return result;
+}
+
+INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(atol)(nptr);
+ }
+ char *real_endptr;
+ long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ return result;
+}
+
+#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
+INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT
+ char **endptr, int base) {
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(strtoll)(nptr, endptr, base);
+ }
+ char *real_endptr;
+ long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT
+ if (endptr != 0) {
+ *endptr = real_endptr;
+ }
+ // If base has unsupported value, strtoll can exit with EINVAL
+ // without reading any characters. So do additional checks only
+ // if base is valid.
+ if (IsValidStrtolBase(base)) {
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ }
+ return result;
+}
+
+INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(atoll)(nptr);
+ }
+ char *real_endptr;
+ long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ return result;
+}
+#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
+
+#define ASAN_INTERCEPT_FUNC(name) do { \
+ if (!INTERCEPT_FUNCTION(name) && flags()->verbosity > 0) \
+ Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
+ } while (0)
+
+#if defined(_WIN32)
+INTERCEPTOR_WINAPI(DWORD, CreateThread,
+ void* security, uptr stack_size,
+ DWORD (__stdcall *start_routine)(void*), void* arg,
+ DWORD flags, void* tid) {
+ GET_STACK_TRACE_HERE(kStackTraceMax);
+ u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
+ AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
+ asanThreadRegistry().RegisterThread(t);
+ return REAL(CreateThread)(security, stack_size,
+ asan_thread_start, t, flags, tid);
+}
+
+namespace __asan {
+void InitializeWindowsInterceptors() {
+ ASAN_INTERCEPT_FUNC(CreateThread);
+}
+
+} // namespace __asan
+#endif
+
+// ---------------------- InitializeAsanInterceptors ---------------- {{{1
+namespace __asan {
+void InitializeAsanInterceptors() {
+ static bool was_called_once;
+ CHECK(was_called_once == false);
+ was_called_once = true;
+ // Intercept mem* functions.
+ ASAN_INTERCEPT_FUNC(memcmp);
+ ASAN_INTERCEPT_FUNC(memmove);
+ ASAN_INTERCEPT_FUNC(memset);
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+ ASAN_INTERCEPT_FUNC(memcpy);
+ } else {
+ REAL(memcpy) = REAL(memmove);
+ }
+
+ // Intercept str* functions.
+ ASAN_INTERCEPT_FUNC(strcat); // NOLINT
+ ASAN_INTERCEPT_FUNC(strchr);
+ ASAN_INTERCEPT_FUNC(strcmp);
+ ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
+ ASAN_INTERCEPT_FUNC(strlen);
+ ASAN_INTERCEPT_FUNC(strncat);
+ ASAN_INTERCEPT_FUNC(strncmp);
+ ASAN_INTERCEPT_FUNC(strncpy);
+#if !defined(_WIN32)
+ ASAN_INTERCEPT_FUNC(strcasecmp);
+ ASAN_INTERCEPT_FUNC(strdup);
+ ASAN_INTERCEPT_FUNC(strncasecmp);
+# ifndef __APPLE__
+ ASAN_INTERCEPT_FUNC(index);
+# else
+ CHECK(OVERRIDE_FUNCTION(index, WRAP(strchr)));
+# endif
+#endif
+#if ASAN_INTERCEPT_STRNLEN
+ ASAN_INTERCEPT_FUNC(strnlen);
#endif
+
+ ASAN_INTERCEPT_FUNC(atoi);
+ ASAN_INTERCEPT_FUNC(atol);
+ ASAN_INTERCEPT_FUNC(strtol);
+#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
+ ASAN_INTERCEPT_FUNC(atoll);
+ ASAN_INTERCEPT_FUNC(strtoll);
+#endif
+
+ // Intecept signal- and jump-related functions.
+ ASAN_INTERCEPT_FUNC(longjmp);
+#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
+ ASAN_INTERCEPT_FUNC(sigaction);
+ ASAN_INTERCEPT_FUNC(signal);
+#endif
+
+#if !defined(_WIN32)
+ ASAN_INTERCEPT_FUNC(_longjmp);
+ INTERCEPT_FUNCTION(__cxa_throw);
+# if !defined(__APPLE__)
+ // On Darwin siglongjmp tailcalls longjmp, so we don't want to intercept it
+ // there.
+ ASAN_INTERCEPT_FUNC(siglongjmp);
+# endif
+#endif
+
+ // Intercept threading-related functions
+#if !defined(_WIN32)
+ ASAN_INTERCEPT_FUNC(pthread_create);
+#endif
+
+ // Some Windows-specific interceptors.
+#if defined(_WIN32)
+ InitializeWindowsInterceptors();
+#endif
+
+ // Some Mac-specific interceptors.
+#if defined(__APPLE__)
+ InitializeMacInterceptors();
+#endif
+
+ if (flags()->verbosity > 0) {
+ Report("AddressSanitizer: libc interceptors initialized\n");
+ }
+}
+
+} // namespace __asan
diff --git a/lib/asan/asan_interceptors.h b/lib/asan/asan_interceptors.h
index 07b9420..3281692 100644
--- a/lib/asan/asan_interceptors.h
+++ b/lib/asan/asan_interceptors.h
@@ -15,119 +15,25 @@
#define ASAN_INTERCEPTORS_H
#include "asan_internal.h"
-
-// To replace weak system functions on Linux we just need to declare functions
-// with same names in our library and then obtain the real function pointers
-// using dlsym(). This is not so on Mac OS, where the two-level namespace makes
-// our replacement functions invisible to other libraries. This may be overcomed
-// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
-// libraries in Chromium were noticed when doing so.
-// Instead we use mach_override, a handy framework for patching functions at
-// runtime. To avoid possible name clashes, our replacement functions have
-// the "wrap_" prefix on Mac.
-//
-// After interception, the calls to system functions will be substituted by
-// calls to our interceptors. We store pointers to system function f()
-// in __asan::real_f().
-//
-// TODO(glider): mach_override_ptr() tends to spend too much time
-// in allocateBranchIsland(). This should be ok for real-word
-// application, but slows down our tests which fork too many children.
-#ifdef __APPLE__
-#include "mach_override/mach_override.h"
-#define WRAP(x) wrap_##x
-#define WRAPPER_NAME(x) "wrap_"#x
-
-#define OVERRIDE_FUNCTION(oldfunc, newfunc) \
- CHECK(0 == __asan_mach_override_ptr((void*)(oldfunc), \
- (void*)(newfunc), \
- (void**)&real_##oldfunc)); \
- CHECK(real_##oldfunc != NULL);
-
-#define OVERRIDE_FUNCTION_IF_EXISTS(oldfunc, newfunc) \
- do { __asan_mach_override_ptr((void*)(oldfunc), \
- (void*)(newfunc), \
- (void**)&real_##oldfunc); } while (0)
-
-#define INTERCEPT_FUNCTION(func) \
- OVERRIDE_FUNCTION(func, WRAP(func))
-
-#define INTERCEPT_FUNCTION_IF_EXISTS(func) \
- OVERRIDE_FUNCTION_IF_EXISTS(func, WRAP(func))
-
-#else // __linux__
-#define WRAP(x) x
-#define WRAPPER_NAME(x) #x
-
-#define INTERCEPT_FUNCTION(func) \
- CHECK((real_##func = (func##_f)dlsym(RTLD_NEXT, #func)));
-
-#define INTERCEPT_FUNCTION_IF_EXISTS(func) \
- do { real_##func = (func##_f)dlsym(RTLD_NEXT, #func); } while (0)
-#endif
-
-#ifdef __APPLE__
-int WRAP(memcmp)(const void *a1, const void *a2, size_t size);
-void *WRAP(memcpy)(void *to, const void *from, size_t size);
-void *WRAP(memmove)(void *to, const void *from, size_t size);
-void *WRAP(memset)(void *block, int c, size_t size);
-int WRAP(strcasecmp)(const char *s1, const char *s2);
-char *WRAP(strcat)(char *to, const char *from); // NOLINT
-char *WRAP(strchr)(const char *string, int c);
-int WRAP(strcmp)(const char *s1, const char *s2);
-char *WRAP(strcpy)(char *to, const char *from); // NOLINT
-char *WRAP(strdup)(const char *s);
-size_t WRAP(strlen)(const char *s);
-int WRAP(strncasecmp)(const char *s1, const char *s2, size_t n);
-int WRAP(strncmp)(const char *s1, const char *s2, size_t size);
-char *WRAP(strncpy)(char *to, const char *from, size_t size);
-#endif
+#include "interception/interception.h"
+
+DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
+DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
+DECLARE_REAL(void*, memset, void *block, int c, uptr size)
+DECLARE_REAL(char*, strchr, const char *str, int c)
+DECLARE_REAL(uptr, strlen, const char *s)
+DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
+DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
+struct sigaction;
+DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act,
+ struct sigaction *oldact)
namespace __asan {
-typedef void* (*index_f)(const char *string, int c);
-typedef int (*memcmp_f)(const void *a1, const void *a2, size_t size);
-typedef void* (*memcpy_f)(void *to, const void *from, size_t size);
-typedef void* (*memmove_f)(void *to, const void *from, size_t size);
-typedef void* (*memset_f)(void *block, int c, size_t size);
-typedef int (*strcasecmp_f)(const char *s1, const char *s2);
-typedef char* (*strcat_f)(char *to, const char *from);
-typedef char* (*strchr_f)(const char *str, int c);
-typedef int (*strcmp_f)(const char *s1, const char *s2);
-typedef char* (*strcpy_f)(char *to, const char *from);
-typedef char* (*strdup_f)(const char *s);
-typedef size_t (*strlen_f)(const char *s);
-typedef int (*strncasecmp_f)(const char *s1, const char *s2, size_t n);
-typedef int (*strncmp_f)(const char *s1, const char *s2, size_t size);
-typedef char* (*strncpy_f)(char *to, const char *from, size_t size);
-typedef size_t (*strnlen_f)(const char *s, size_t maxlen);
-
-// __asan::real_X() holds pointer to library implementation of X().
-extern index_f real_index;
-extern memcmp_f real_memcmp;
-extern memcpy_f real_memcpy;
-extern memmove_f real_memmove;
-extern memset_f real_memset;
-extern strcasecmp_f real_strcasecmp;
-extern strcat_f real_strcat;
-extern strchr_f real_strchr;
-extern strcmp_f real_strcmp;
-extern strcpy_f real_strcpy;
-extern strdup_f real_strdup;
-extern strlen_f real_strlen;
-extern strncasecmp_f real_strncasecmp;
-extern strncmp_f real_strncmp;
-extern strncpy_f real_strncpy;
-extern strnlen_f real_strnlen;
-
-// __asan::internal_X() is the implementation of X() for use in RTL.
-size_t internal_strlen(const char *s);
-size_t internal_strnlen(const char *s, size_t maxlen);
-void* internal_memchr(const void* s, int c, size_t n);
-int internal_memcmp(const void* s1, const void* s2, size_t n);
-
-// Initializes pointers to str*/mem* functions.
void InitializeAsanInterceptors();
+#if defined(__APPLE__)
+void InitializeMacInterceptors();
+#endif // __APPLE__
} // namespace __asan
diff --git a/lib/asan/asan_interface.h b/lib/asan/asan_interface.h
index 7506586..c625a62 100644
--- a/lib/asan/asan_interface.h
+++ b/lib/asan/asan_interface.h
@@ -15,46 +15,46 @@
#ifndef ASAN_INTERFACE_H
#define ASAN_INTERFACE_H
-#include <stdint.h> // for __WORDSIZE
-#include <stdlib.h> // for size_t
-
+#include "sanitizer_common/sanitizer_interface_defs.h"
+// ----------- ATTENTION -------------
// This header should NOT include any other headers from ASan runtime.
// All functions in this header are extern "C" and start with __asan_.
+using __sanitizer::uptr;
+
extern "C" {
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
- void __asan_init()
- __attribute__((visibility("default")));
+ void __asan_init() SANITIZER_INTERFACE_ATTRIBUTE;
// This function should be called by the instrumented code.
// 'addr' is the address of a global variable called 'name' of 'size' bytes.
- void __asan_register_global(uintptr_t addr, size_t size, const char *name)
- __attribute__((visibility("default")));
+ void __asan_register_global(uptr addr, uptr size, const char *name)
+ SANITIZER_INTERFACE_ATTRIBUTE;
// This structure describes an instrumented global variable.
struct __asan_global {
- size_t beg; // The address of the global.
- size_t size; // The original size of the global.
- size_t size_with_redzone; // The size with the redzone.
+ uptr beg; // The address of the global.
+ uptr size; // The original size of the global.
+ uptr size_with_redzone; // The size with the redzone.
const char *name; // Name as a C string.
};
// These two functions should be called by the instrumented code.
// 'globals' is an array of structures describing 'n' globals.
- void __asan_register_globals(__asan_global *globals, size_t n)
- __attribute__((visibility("default")));
- void __asan_unregister_globals(__asan_global *globals, size_t n)
- __attribute__((visibility("default")));
+ void __asan_register_globals(__asan_global *globals, uptr n)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+ void __asan_unregister_globals(__asan_global *globals, uptr n)
+ SANITIZER_INTERFACE_ATTRIBUTE;
// These two functions are used by the instrumented code in the
// use-after-return mode. __asan_stack_malloc allocates size bytes of
// fake stack and __asan_stack_free poisons it. real_stack is a pointer to
// the real stack region.
- size_t __asan_stack_malloc(size_t size, size_t real_stack)
- __attribute__((visibility("default")));
- void __asan_stack_free(size_t ptr, size_t size, size_t real_stack)
- __attribute__((visibility("default")));
+ uptr __asan_stack_malloc(uptr size, uptr real_stack)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+ void __asan_stack_free(uptr ptr, uptr size, uptr real_stack)
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Marks memory region [addr, addr+size) as unaddressable.
// This memory must be previously allocated by the user program. Accessing
@@ -64,7 +64,8 @@ extern "C" {
// to ASan alignment restrictions.
// Method is NOT thread-safe in the sense that no two threads can
// (un)poison memory in the same memory region simultaneously.
- void __asan_poison_memory_region(void const volatile *addr, size_t size);
+ void __asan_poison_memory_region(void const volatile *addr, uptr size)
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Marks memory region [addr, addr+size) as addressable.
// This memory must be previously allocated by the user program. Accessing
// addresses in this region is allowed until this region is poisoned again.
@@ -72,10 +73,15 @@ extern "C" {
// ASan alignment restrictions.
// Method is NOT thread-safe in the sense that no two threads can
// (un)poison memory in the same memory region simultaneously.
- void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
+ void __asan_unpoison_memory_region(void const volatile *addr, uptr size)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+
+ // Performs cleanup before a NoReturn function. Must be called before things
+ // like _exit and execl to avoid false positives on stack.
+ void __asan_handle_no_return() SANITIZER_INTERFACE_ATTRIBUTE;
// User code should use macro instead of functions.
-#if defined(__has_feature) && __has_feature(address_sanitizer)
+#if __has_feature(address_sanitizer)
#define ASAN_POISON_MEMORY_REGION(addr, size) \
__asan_poison_memory_region((addr), (size))
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
@@ -89,47 +95,65 @@ extern "C" {
// Returns true iff addr is poisoned (i.e. 1-byte read/write access to this
// address will result in error report from AddressSanitizer).
- bool __asan_address_is_poisoned(void const volatile *addr);
+ bool __asan_address_is_poisoned(void const volatile *addr)
+ SANITIZER_INTERFACE_ATTRIBUTE;
// This is an internal function that is called to report an error.
// However it is still a part of the interface because users may want to
// set a breakpoint on this function in a debugger.
- void __asan_report_error(uintptr_t pc, uintptr_t bp, uintptr_t sp,
- uintptr_t addr, bool is_write, size_t access_size)
- __attribute__((visibility("default")));
+ void __asan_report_error(uptr pc, uptr bp, uptr sp,
+ uptr addr, bool is_write, uptr access_size)
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Sets the exit code to use when reporting an error.
// Returns the old value.
- int __asan_set_error_exit_code(int exit_code);
+ int __asan_set_error_exit_code(int exit_code)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+
+ // Sets the callback to be called right before death on error.
+ // Passing 0 will unset the callback.
+ void __asan_set_death_callback(void (*callback)(void))
+ SANITIZER_INTERFACE_ATTRIBUTE;
+
+ void __asan_set_error_report_callback(void (*callback)(const char*))
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Returns the estimated number of bytes that will be reserved by allocator
// for request of "size" bytes. If ASan allocator can't allocate that much
// memory, returns the maximal possible allocation size, otherwise returns
// "size".
- size_t __asan_get_estimated_allocated_size(size_t size);
- // Returns true if p is NULL or if p was returned by the ASan allocator and
+ uptr __asan_get_estimated_allocated_size(uptr size)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+ // Returns true if p was returned by the ASan allocator and
// is not yet freed.
- bool __asan_get_ownership(const void *p);
+ bool __asan_get_ownership(const void *p)
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Returns the number of bytes reserved for the pointer p.
- // Requires (get_ownership(p) == true).
- size_t __asan_get_allocated_size(const void *p);
+ // Requires (get_ownership(p) == true) or (p == 0).
+ uptr __asan_get_allocated_size(const void *p)
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Number of bytes, allocated and not yet freed by the application.
- size_t __asan_get_current_allocated_bytes();
+ uptr __asan_get_current_allocated_bytes()
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Number of bytes, mmaped by asan allocator to fulfill allocation requests.
// Generally, for request of X bytes, allocator can reserve and add to free
// lists a large number of chunks of size X to use them for future requests.
// All these chunks count toward the heap size. Currently, allocator never
// releases memory to OS (instead, it just puts freed chunks to free lists).
- size_t __asan_get_heap_size();
+ uptr __asan_get_heap_size()
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Number of bytes, mmaped by asan allocator, which can be used to fulfill
// allocation requests. When a user program frees memory chunk, it can first
// fall into quarantine and will count toward __asan_get_free_bytes() later.
- size_t __asan_get_free_bytes();
+ uptr __asan_get_free_bytes()
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Number of bytes in unmapped pages, that are released to OS. Currently,
// always returns 0.
- size_t __asan_get_unmapped_bytes();
+ uptr __asan_get_unmapped_bytes()
+ SANITIZER_INTERFACE_ATTRIBUTE;
// Prints accumulated stats to stderr. Used for debugging.
- void __asan_print_accumulated_stats();
+ void __asan_print_accumulated_stats()
+ SANITIZER_INTERFACE_ATTRIBUTE;
} // namespace
#endif // ASAN_INTERFACE_H
diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h
index d0790a7..8c1f320 100644
--- a/lib/asan/asan_internal.h
+++ b/lib/asan/asan_internal.h
@@ -14,40 +14,49 @@
#ifndef ASAN_INTERNAL_H
#define ASAN_INTERNAL_H
-#if !defined(__linux__) && !defined(__APPLE__)
+#include "asan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
# error "This operating system is not supported by AddressSanitizer"
#endif
-#include <stdint.h> // for __WORDSIZE
-#include <stdlib.h> // for size_t
-#include <unistd.h> // for _exit
+#if defined(_WIN32)
+extern "C" void* _ReturnAddress(void);
+# pragma intrinsic(_ReturnAddress)
+#endif // defined(_WIN32)
+
+#define ASAN_DEFAULT_FAILURE_EXITCODE 1
-// If __WORDSIZE was undefined by the platform, define it in terms of the
-// compiler built-in __LP64__.
-#ifndef __WORDSIZE
-#if __LP64__
-#define __WORDSIZE 64
+#if defined(__linux__)
+# define ASAN_LINUX 1
#else
-#define __WORDSIZE 32
+# define ASAN_LINUX 0
#endif
+
+#if defined(__APPLE__)
+# define ASAN_MAC 1
+#else
+# define ASAN_MAC 0
#endif
-#ifdef ANDROID
-#include <sys/atomics.h>
+#if defined(_WIN32)
+# define ASAN_WINDOWS 1
+#else
+# define ASAN_WINDOWS 0
#endif
-#if defined(__has_feature) && __has_feature(address_sanitizer)
+#define ASAN_POSIX (ASAN_LINUX || ASAN_MAC)
+
+#if __has_feature(address_sanitizer)
# error "The AddressSanitizer run-time should not be"
" instrumented by AddressSanitizer"
#endif
// Build-time configuration options.
-// If set, sysinfo/sysinfo.h will be used to iterate over /proc/maps.
-#ifndef ASAN_USE_SYSINFO
-# define ASAN_USE_SYSINFO 1
-#endif
-
// If set, asan will install its own SEGV signal handler.
#ifndef ASAN_NEEDS_SEGV
# define ASAN_NEEDS_SEGV 1
@@ -64,6 +73,12 @@
# define ASAN_FLEXIBLE_MAPPING_AND_OFFSET 0
#endif
+// If set, values like allocator chunk size, as well as defaults for some flags
+// will be changed towards less memory overhead.
+#ifndef ASAN_LOW_MEMORY
+# define ASAN_LOW_MEMORY 0
+#endif
+
// All internal functions in asan reside inside the __asan namespace
// to avoid namespace collisions with the user programs.
// Seperate namespace also makes it simpler to distinguish the asan run-time
@@ -74,109 +89,81 @@ class AsanThread;
struct AsanStackTrace;
// asan_rtl.cc
-void CheckFailed(const char *cond, const char *file, int line);
-void ShowStatsAndAbort();
+void NORETURN ShowStatsAndAbort();
// asan_globals.cc
-bool DescribeAddrIfGlobal(uintptr_t addr);
+bool DescribeAddrIfGlobal(uptr addr);
+void ReplaceOperatorsNewAndDelete();
// asan_malloc_linux.cc / asan_malloc_mac.cc
void ReplaceSystemMalloc();
-void OutOfMemoryMessageAndDie(const char *mem_type, size_t size);
-
-// asan_linux.cc / asan_mac.cc
+// asan_linux.cc / asan_mac.cc / asan_win.cc
void *AsanDoesNotSupportStaticLinkage();
-int AsanOpenReadonly(const char* filename);
-void *AsanMmapFixedNoReserve(uintptr_t fixed_addr, size_t size);
-void *AsanMmapFixedReserve(uintptr_t fixed_addr, size_t size);
-void *AsanMprotect(uintptr_t fixed_addr, size_t size);
-void *AsanMmapSomewhereOrDie(size_t size, const char *where);
-void AsanUnmapOrDie(void *ptr, size_t size);
+void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
-ssize_t AsanRead(int fd, void *buf, size_t count);
-ssize_t AsanWrite(int fd, const void *buf, size_t count);
-int AsanClose(int fd);
+bool AsanInterceptsSignal(int signum);
+void SetAlternateSignalStack();
+void UnsetAlternateSignalStack();
+void InstallSignalHandlers();
+void AsanPlatformThreadInit();
-// asan_printf.cc
-void RawWrite(const char *buffer);
-int SNPrint(char *buffer, size_t length, const char *format, ...);
-void Printf(const char *format, ...);
-void Report(const char *format, ...);
+// Wrapper for TLS/TSD.
+void AsanTSDInit(void (*destructor)(void *tsd));
+void *AsanTSDGet();
+void AsanTSDSet(void *tsd);
-// Don't use std::min and std::max, to minimize dependency on libstdc++.
-template<class T> T Min(T a, T b) { return a < b ? a : b; }
-template<class T> T Max(T a, T b) { return a > b ? a : b; }
+void AppendToErrorMessageBuffer(const char *buffer);
+// asan_printf.cc
+void AsanPrintf(const char *format, ...);
+void AsanReport(const char *format, ...);
// asan_poisoning.cc
// Poisons the shadow memory for "size" bytes starting from "addr".
-void PoisonShadow(uintptr_t addr, size_t size, uint8_t value);
+void PoisonShadow(uptr addr, uptr size, u8 value);
// Poisons the shadow memory for "redzone_size" bytes starting from
// "addr + size".
-void PoisonShadowPartialRightRedzone(uintptr_t addr,
- uintptr_t size,
- uintptr_t redzone_size,
- uint8_t value);
-
-extern size_t FLAG_quarantine_size;
-extern int FLAG_demangle;
-extern bool FLAG_symbolize;
-extern int FLAG_v;
-extern size_t FLAG_redzone;
-extern int FLAG_debug;
-extern bool FLAG_poison_shadow;
-extern int FLAG_report_globals;
-extern size_t FLAG_malloc_context_size;
-extern bool FLAG_replace_str;
-extern bool FLAG_replace_intrin;
-extern bool FLAG_replace_cfallocator;
-extern bool FLAG_fast_unwind;
-extern bool FLAG_use_fake_stack;
-extern size_t FLAG_max_malloc_fill_size;
-extern int FLAG_exitcode;
-extern bool FLAG_allow_user_poisoning;
+void PoisonShadowPartialRightRedzone(uptr addr,
+ uptr size,
+ uptr redzone_size,
+ u8 value);
+
+// Platfrom-specific options.
+#ifdef __APPLE__
+bool PlatformHasDifferentMemcpyAndMemmove();
+# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
+ (PlatformHasDifferentMemcpyAndMemmove())
+#else
+# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
+#endif // __APPLE__
extern int asan_inited;
// Used to avoid infinite recursion in __asan_init().
extern bool asan_init_is_running;
+extern void (*death_callback)(void);
enum LinkerInitialized { LINKER_INITIALIZED = 0 };
-#ifndef ASAN_DIE
-#define ASAN_DIE _exit(FLAG_exitcode)
-#endif // ASAN_DIE
-
-#define CHECK(cond) do { if (!(cond)) { \
- CheckFailed(#cond, __FILE__, __LINE__); \
-}}while(0)
-
-#define RAW_CHECK_MSG(expr, msg) do { \
- if (!(expr)) { \
- RawWrite(msg); \
- ASAN_DIE; \
- } \
-} while (0)
-
-#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr)
-
-#define UNIMPLEMENTED() CHECK("unimplemented" && 0)
-
#define ASAN_ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
-const size_t kWordSize = __WORDSIZE / 8;
-const size_t kWordSizeInBits = 8 * kWordSize;
-const size_t kPageSizeBits = 12;
-const size_t kPageSize = 1UL << kPageSizeBits;
-
-#define GET_CALLER_PC() (uintptr_t)__builtin_return_address(0)
-#define GET_CURRENT_FRAME() (uintptr_t)__builtin_frame_address(0)
+#if !defined(_WIN32) || defined(__clang__)
+# define GET_CALLER_PC() (uptr)__builtin_return_address(0)
+# define GET_CURRENT_FRAME() (uptr)__builtin_frame_address(0)
+#else
+# define GET_CALLER_PC() (uptr)_ReturnAddress()
+// CaptureStackBackTrace doesn't need to know BP on Windows.
+// FIXME: This macro is still used when printing error reports though it's not
+// clear if the BP value is needed in the ASan reports on Windows.
+# define GET_CURRENT_FRAME() (uptr)0xDEADBEEF
+#endif
-#define GET_BP_PC_SP \
- uintptr_t bp = GET_CURRENT_FRAME(); \
- uintptr_t pc = GET_CALLER_PC(); \
- uintptr_t local_stack; \
- uintptr_t sp = (uintptr_t)&local_stack;
+#ifdef _WIN32
+# ifndef ASAN_USE_EXTERNAL_SYMBOLIZER
+# define ASAN_USE_EXTERNAL_SYMBOLIZER __asan_WinSymbolize
+bool __asan_WinSymbolize(const void *addr, char *out_buffer, int buffer_size);
+# endif
+#endif // _WIN32
// These magic values are written to shadow for better error reporting.
const int kAsanHeapLeftRedzoneMagic = 0xfa;
@@ -191,18 +178,8 @@ const int kAsanUserPoisonedMemoryMagic = 0xf7;
const int kAsanGlobalRedzoneMagic = 0xf9;
const int kAsanInternalHeapMagic = 0xfe;
-static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
-static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
-
-// --------------------------- Bit twiddling ------- {{{1
-inline bool IsPowerOfTwo(size_t x) {
- return (x & (x - 1)) == 0;
-}
-
-inline size_t RoundUpTo(size_t size, size_t boundary) {
- CHECK(IsPowerOfTwo(boundary));
- return (size + boundary - 1) & ~(boundary - 1);
-}
+static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
+static const uptr kRetiredStackFrameMagic = 0x45E0360E;
// -------------------------- LowLevelAllocator ----- {{{1
// A simple low-level memory allocator for internal use.
@@ -211,29 +188,12 @@ class LowLevelAllocator {
explicit LowLevelAllocator(LinkerInitialized) {}
// 'size' must be a power of two.
// Requires an external lock.
- void *Allocate(size_t size);
+ void *Allocate(uptr size);
private:
char *allocated_end_;
char *allocated_current_;
};
-// -------------------------- Atomic ---------------- {{{1
-static inline int AtomicInc(int *a) {
-#ifdef ANDROID
- return __atomic_inc(a) + 1;
-#else
- return __sync_add_and_fetch(a, 1);
-#endif
-}
-
-static inline int AtomicDec(int *a) {
-#ifdef ANDROID
- return __atomic_dec(a) - 1;
-#else
- return __sync_add_and_fetch(a, -1);
-#endif
-}
-
} // namespace __asan
#endif // ASAN_INTERNAL_H
diff --git a/lib/asan/asan_linux.cc b/lib/asan/asan_linux.cc
index 26a08ae..9a3d6bd 100644
--- a/lib/asan/asan_linux.cc
+++ b/lib/asan/asan_linux.cc
@@ -13,87 +13,131 @@
//===----------------------------------------------------------------------===//
#ifdef __linux__
+#include "asan_interceptors.h"
#include "asan_internal.h"
+#include "asan_lock.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include <sys/time.h>
+#include <sys/resource.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <fcntl.h>
+#include <pthread.h>
+#include <stdio.h>
#include <unistd.h>
+#include <unwind.h>
-extern char _DYNAMIC[];
+#ifndef ANDROID
+// FIXME: where to get ucontext on Android?
+#include <sys/ucontext.h>
+#endif
+
+extern "C" void* _DYNAMIC;
namespace __asan {
void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static.
- return &_DYNAMIC;
-}
-
-static void *asan_mmap(void *addr, size_t length, int prot, int flags,
- int fd, uint64_t offset) {
-# if __WORDSIZE == 64
- return (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
-# else
- return (void *)syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
-# endif
+ return &_DYNAMIC; // defined in link.h
}
-void *AsanMmapSomewhereOrDie(size_t size, const char *mem_type) {
- size = RoundUpTo(size, kPageSize);
- void *res = asan_mmap(0, size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
- if (res == (void*)-1) {
- OutOfMemoryMessageAndDie(mem_type, size);
- }
- return res;
+void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+#ifdef ANDROID
+ *pc = *sp = *bp = 0;
+#elif defined(__arm__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.arm_pc;
+ *bp = ucontext->uc_mcontext.arm_fp;
+ *sp = ucontext->uc_mcontext.arm_sp;
+# elif defined(__x86_64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.gregs[REG_RIP];
+ *bp = ucontext->uc_mcontext.gregs[REG_RBP];
+ *sp = ucontext->uc_mcontext.gregs[REG_RSP];
+# elif defined(__i386__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.gregs[REG_EIP];
+ *bp = ucontext->uc_mcontext.gregs[REG_EBP];
+ *sp = ucontext->uc_mcontext.gregs[REG_ESP];
+#else
+# error "Unsupported arch"
+#endif
}
-void *AsanMmapFixedNoReserve(uintptr_t fixed_addr, size_t size) {
- return asan_mmap((void*)fixed_addr, size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
- 0, 0);
+bool AsanInterceptsSignal(int signum) {
+ return signum == SIGSEGV && flags()->handle_segv;
}
-void *AsanMmapFixedReserve(uintptr_t fixed_addr, size_t size) {
- return asan_mmap((void*)fixed_addr, size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- 0, 0);
+void AsanPlatformThreadInit() {
+ // Nothing here for now.
}
-void *AsanMprotect(uintptr_t fixed_addr, size_t size) {
- return asan_mmap((void*)fixed_addr, size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
- 0, 0);
+AsanLock::AsanLock(LinkerInitialized) {
+ // We assume that pthread_mutex_t initialized to all zeroes is a valid
+ // unlocked mutex. We can not use PTHREAD_MUTEX_INITIALIZER as it triggers
+ // a gcc warning:
+ // extended initializer lists only available with -std=c++0x or -std=gnu++0x
}
-void AsanUnmapOrDie(void *addr, size_t size) {
- if (!addr || !size) return;
- int res = syscall(__NR_munmap, addr, size);
- if (res != 0) {
- Report("Failed to unmap\n");
- ASAN_DIE;
- }
+void AsanLock::Lock() {
+ CHECK(sizeof(pthread_mutex_t) <= sizeof(opaque_storage_));
+ pthread_mutex_lock((pthread_mutex_t*)&opaque_storage_);
+ CHECK(!owner_);
+ owner_ = (uptr)pthread_self();
}
-ssize_t AsanWrite(int fd, const void *buf, size_t count) {
- return (ssize_t)syscall(__NR_write, fd, buf, count);
+void AsanLock::Unlock() {
+ CHECK(owner_ == (uptr)pthread_self());
+ owner_ = 0;
+ pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
}
-int AsanOpenReadonly(const char* filename) {
- return open(filename, O_RDONLY);
+#ifdef __arm__
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#ifdef __arm__
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return _Unwind_GetIP(ctx);
+#endif
}
-ssize_t AsanRead(int fd, void *buf, size_t count) {
- return (ssize_t)syscall(__NR_read, fd, buf, count);
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx,
+ void *param) {
+ AsanStackTrace *b = (AsanStackTrace*)param;
+ CHECK(b->size < b->max_size);
+ uptr pc = Unwind_GetIP(ctx);
+ b->trace[b->size++] = pc;
+ if (b->size == b->max_size) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
}
-int AsanClose(int fd) {
- return close(fd);
+void AsanStackTrace::GetStackTrace(uptr max_s, uptr pc, uptr bp) {
+ size = 0;
+ trace[0] = pc;
+ if ((max_s) > 1) {
+ max_size = max_s;
+#ifdef __arm__
+ _Unwind_Backtrace(Unwind_Trace, this);
+#else
+ FastUnwindStack(pc, bp);
+#endif
+ }
}
} // namespace __asan
diff --git a/lib/asan/asan_lock.h b/lib/asan/asan_lock.h
index 030fae6..edee49a 100644
--- a/lib/asan/asan_lock.h
+++ b/lib/asan/asan_lock.h
@@ -14,86 +14,28 @@
#ifndef ASAN_LOCK_H
#define ASAN_LOCK_H
+#include "sanitizer_common/sanitizer_mutex.h"
#include "asan_internal.h"
// The locks in ASan are global objects and they are never destroyed to avoid
// at-exit races (that is, a lock is being used by other threads while the main
// thread is doing atexit destructors).
+// We define the class using opaque storage to avoid including system headers.
-#ifdef __APPLE__
-#include <pthread.h>
-
-#include <libkern/OSAtomic.h>
namespace __asan {
-class AsanLock {
- public:
- explicit AsanLock(LinkerInitialized) :
- mu_(OS_SPINLOCK_INIT),
- owner_(0),
- is_locked_(false) {}
-
- void Lock() {
- CHECK(owner_ != pthread_self());
- OSSpinLockLock(&mu_);
- is_locked_ = true;
- owner_ = pthread_self();
- }
- void Unlock() {
- owner_ = 0;
- is_locked_ = false;
- OSSpinLockUnlock(&mu_);
- }
-
- bool IsLocked() {
- // This is not atomic, e.g. one thread may get different values if another
- // one is about to release the lock.
- return is_locked_;
- }
- private:
- OSSpinLock mu_;
- volatile pthread_t owner_; // for debugging purposes
- bool is_locked_; // for silly malloc_introspection_t interface
-};
-} // namespace __asan
-#else // assume linux
-#include <pthread.h>
-namespace __asan {
class AsanLock {
public:
- explicit AsanLock(LinkerInitialized) {
- // We assume that pthread_mutex_t initialized to all zeroes is a valid
- // unlocked mutex. We can not use PTHREAD_MUTEX_INITIALIZER as it triggers
- // a gcc warning:
- // extended initializer lists only available with -std=c++0x or -std=gnu++0x
- }
- void Lock() {
- pthread_mutex_lock(&mu_);
- // pthread_spin_lock(&mu_);
- }
- void Unlock() {
- pthread_mutex_unlock(&mu_);
- // pthread_spin_unlock(&mu_);
- }
+ explicit AsanLock(LinkerInitialized);
+ void Lock();
+ void Unlock();
+ bool IsLocked() { return owner_ != 0; }
private:
- pthread_mutex_t mu_;
- // pthread_spinlock_t mu_;
+ uptr opaque_storage_[10];
+ uptr owner_; // for debugging and for malloc_introspection_t interface
};
-} // namespace __asan
-#endif
-namespace __asan {
-class ScopedLock {
- public:
- explicit ScopedLock(AsanLock *mu) : mu_(mu) {
- mu_->Lock();
- }
- ~ScopedLock() {
- mu_->Unlock();
- }
- private:
- AsanLock *mu_;
-};
+typedef GenericScopedLock<AsanLock> ScopedLock;
} // namespace __asan
diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc
index b202e63..a3d39e7 100644
--- a/lib/asan/asan_mac.cc
+++ b/lib/asan/asan_mac.cc
@@ -14,93 +14,165 @@
#ifdef __APPLE__
-#include "asan_mac.h"
-
+#include "asan_interceptors.h"
#include "asan_internal.h"
+#include "asan_mac.h"
+#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include <crt_externs.h> // for _NSGetEnviron
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <sys/ucontext.h>
#include <fcntl.h>
+#include <pthread.h>
+#include <stdlib.h> // for free()
#include <unistd.h>
-
-#include <new>
+#include <libkern/OSAtomic.h>
+#include <CoreFoundation/CFString.h>
namespace __asan {
-extern dispatch_async_f_f real_dispatch_async_f;
-extern dispatch_sync_f_f real_dispatch_sync_f;
-extern dispatch_after_f_f real_dispatch_after_f;
-extern dispatch_barrier_async_f_f real_dispatch_barrier_async_f;
-extern dispatch_group_async_f_f real_dispatch_group_async_f;
-extern pthread_workqueue_additem_np_f real_pthread_workqueue_additem_np;
+void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+ ucontext_t *ucontext = (ucontext_t*)context;
+# if __WORDSIZE == 64
+ *pc = ucontext->uc_mcontext->__ss.__rip;
+ *bp = ucontext->uc_mcontext->__ss.__rbp;
+ *sp = ucontext->uc_mcontext->__ss.__rsp;
+# else
+ *pc = ucontext->uc_mcontext->__ss.__eip;
+ *bp = ucontext->uc_mcontext->__ss.__ebp;
+ *sp = ucontext->uc_mcontext->__ss.__esp;
+# endif // __WORDSIZE
+}
-// No-op. Mac does not support static linkage anyway.
-void *AsanDoesNotSupportStaticLinkage() {
- return NULL;
+int GetMacosVersion() {
+ int mib[2] = { CTL_KERN, KERN_OSRELEASE };
+ char version[100];
+ uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
+ for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
+ // Get the version length.
+ CHECK(sysctl(mib, 2, 0, &len, 0, 0) != -1);
+ CHECK(len < maxlen);
+ CHECK(sysctl(mib, 2, version, &len, 0, 0) != -1);
+ switch (version[0]) {
+ case '9': return MACOS_VERSION_LEOPARD;
+ case '1': {
+ switch (version[1]) {
+ case '0': return MACOS_VERSION_SNOW_LEOPARD;
+ case '1': return MACOS_VERSION_LION;
+ default: return MACOS_VERSION_UNKNOWN;
+ }
+ }
+ default: return MACOS_VERSION_UNKNOWN;
+ }
}
-static void *asan_mmap(void *addr, size_t length, int prot, int flags,
- int fd, uint64_t offset) {
- return mmap(addr, length, prot, flags, fd, offset);
+bool PlatformHasDifferentMemcpyAndMemmove() {
+ // On OS X 10.7 memcpy() and memmove() are both resolved
+ // into memmove$VARIANT$sse42.
+ // See also http://code.google.com/p/address-sanitizer/issues/detail?id=34.
+ // TODO(glider): need to check dynamically that memcpy() and memmove() are
+ // actually the same function.
+ return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
}
-ssize_t AsanWrite(int fd, const void *buf, size_t count) {
- return write(fd, buf, count);
+// No-op. Mac does not support static linkage anyway.
+void *AsanDoesNotSupportStaticLinkage() {
+ return 0;
}
-void *AsanMmapSomewhereOrDie(size_t size, const char *mem_type) {
- size = RoundUpTo(size, kPageSize);
- void *res = asan_mmap(0, size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
- if (res == (void*)-1) {
- OutOfMemoryMessageAndDie(mem_type, size);
- }
- return res;
+bool AsanInterceptsSignal(int signum) {
+ return (signum == SIGSEGV || signum == SIGBUS) && flags()->handle_segv;
}
-void *AsanMmapFixedNoReserve(uintptr_t fixed_addr, size_t size) {
- return asan_mmap((void*)fixed_addr, size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
- 0, 0);
+void AsanPlatformThreadInit() {
+ ReplaceCFAllocator();
}
-void *AsanMmapFixedReserve(uintptr_t fixed_addr, size_t size) {
- return asan_mmap((void*)fixed_addr, size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- 0, 0);
+AsanLock::AsanLock(LinkerInitialized) {
+ // We assume that OS_SPINLOCK_INIT is zero
}
-void *AsanMprotect(uintptr_t fixed_addr, size_t size) {
- return asan_mmap((void*)fixed_addr, size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
- 0, 0);
+void AsanLock::Lock() {
+ CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
+ CHECK(OS_SPINLOCK_INIT == 0);
+ CHECK(owner_ != (uptr)pthread_self());
+ OSSpinLockLock((OSSpinLock*)&opaque_storage_);
+ CHECK(!owner_);
+ owner_ = (uptr)pthread_self();
}
-void AsanUnmapOrDie(void *addr, size_t size) {
- if (!addr || !size) return;
- int res = munmap(addr, size);
- if (res != 0) {
- Report("Failed to unmap\n");
- ASAN_DIE;
- }
+void AsanLock::Unlock() {
+ CHECK(owner_ == (uptr)pthread_self());
+ owner_ = 0;
+ OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
-int AsanOpenReadonly(const char* filename) {
- return open(filename, O_RDONLY);
+void AsanStackTrace::GetStackTrace(uptr max_s, uptr pc, uptr bp) {
+ size = 0;
+ trace[0] = pc;
+ if ((max_s) > 1) {
+ max_size = max_s;
+ FastUnwindStack(pc, bp);
+ }
}
-ssize_t AsanRead(int fd, void *buf, size_t count) {
- return read(fd, buf, count);
+// The range of pages to be used for escape islands.
+// TODO(glider): instead of mapping a fixed range we must find a range of
+// unmapped pages in vmmap and take them.
+// These constants were chosen empirically and may not work if the shadow
+// memory layout changes. Unfortunately they do necessarily depend on
+// kHighMemBeg or kHighMemEnd.
+static void *island_allocator_pos = 0;
+
+#if __WORDSIZE == 32
+# define kIslandEnd (0xffdf0000 - kPageSize)
+# define kIslandBeg (kIslandEnd - 256 * kPageSize)
+#else
+# define kIslandEnd (0x7fffffdf0000 - kPageSize)
+# define kIslandBeg (kIslandEnd - 256 * kPageSize)
+#endif
+
+extern "C"
+mach_error_t __interception_allocate_island(void **ptr,
+ uptr unused_size,
+ void *unused_hint) {
+ if (!island_allocator_pos) {
+ island_allocator_pos =
+ internal_mmap((void*)kIslandBeg, kIslandEnd - kIslandBeg,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ -1, 0);
+ if (island_allocator_pos != (void*)kIslandBeg) {
+ return KERN_NO_SPACE;
+ }
+ if (flags()->verbosity) {
+ Report("Mapped pages %p--%p for branch islands.\n",
+ (void*)kIslandBeg, (void*)kIslandEnd);
+ }
+ // Should not be very performance-critical.
+ internal_memset(island_allocator_pos, 0xCC, kIslandEnd - kIslandBeg);
+ };
+ *ptr = island_allocator_pos;
+ island_allocator_pos = (char*)island_allocator_pos + kPageSize;
+ if (flags()->verbosity) {
+ Report("Branch island allocated at %p\n", *ptr);
+ }
+ return err_none;
}
-int AsanClose(int fd) {
- return close(fd);
+extern "C"
+mach_error_t __interception_deallocate_island(void *ptr) {
+ // Do nothing.
+ // TODO(glider): allow to free and reuse the island memory.
+ return err_none;
}
// Support for the following functions from libdispatch on Mac OS:
@@ -132,34 +204,56 @@ int AsanClose(int fd) {
// The implementation details are at
// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
+typedef void* pthread_workqueue_t;
+typedef void* pthread_workitem_handle_t;
+
+typedef void* dispatch_group_t;
+typedef void* dispatch_queue_t;
+typedef u64 dispatch_time_t;
+typedef void (*dispatch_function_t)(void *block);
+typedef void* (*worker_t)(void *block);
+
+// A wrapper for the ObjC blocks used to support libdispatch.
+typedef struct {
+ void *block;
+ dispatch_function_t func;
+ u32 parent_tid;
+} asan_block_context_t;
+
+// We use extern declarations of libdispatch functions here instead
+// of including <dispatch/dispatch.h>. This header is not present on
+// Mac OS X Leopard and eariler, and although we don't expect ASan to
+// work on legacy systems, it's bad to break the build of
+// LLVM compiler-rt there.
+extern "C" {
+void dispatch_async_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func);
+void dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func);
+void dispatch_after_f(dispatch_time_t when, dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func);
+void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func);
+void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
+ void *ctxt, dispatch_function_t func);
+int pthread_workqueue_additem_np(pthread_workqueue_t workq,
+ void *(*workitem_func)(void *), void * workitem_arg,
+ pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
+} // extern "C"
+
extern "C"
void asan_dispatch_call_block_and_release(void *block) {
- GET_STACK_TRACE_HERE(kStackTraceMax, /*fast_unwind*/false);
+ GET_STACK_TRACE_HERE(kStackTraceMax);
asan_block_context_t *context = (asan_block_context_t*)block;
- if (FLAG_v >= 2) {
+ if (flags()->verbosity >= 2) {
Report("asan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
block, pthread_self());
}
AsanThread *t = asanThreadRegistry().GetCurrent();
- if (t) {
- // We've already executed a job on this worker thread. Let's reuse the
- // AsanThread object.
- if (t != asanThreadRegistry().GetMain()) {
- // Flush the statistics and update the current thread's tid.
- asanThreadRegistry().UnregisterThread(t);
- asanThreadRegistry().RegisterThread(t, context->parent_tid, &stack);
- }
- // Otherwise the worker is being executed on the main thread -- we are
- // draining the dispatch queue.
- // TODO(glider): any checks for that?
- } else {
- // It's incorrect to assert that the current thread is not dying: at least
- // the callbacks from dispatch_sync() are sometimes called after the TSD is
- // destroyed.
- t = (AsanThread*)asan_malloc(sizeof(AsanThread), &stack);
- new(t) AsanThread(context->parent_tid,
- /*start_routine*/NULL, /*arg*/NULL, &stack);
+ if (!t) {
+ t = AsanThread::Create(context->parent_tid, 0, 0, &stack);
+ asanThreadRegistry().RegisterThread(t);
t->Init();
asanThreadRegistry().SetCurrent(t);
}
@@ -181,94 +275,75 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
asan_ctxt->block = ctxt;
asan_ctxt->func = func;
- AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
- if (FLAG_debug) {
- // Sometimes at Chromium teardown this assertion is violated:
- // -- a task is created via dispatch_async() on the "CFMachPort"
- // thread while doing _dispatch_queue_drain();
- // -- a task is created via dispatch_async_f() on the
- // "com.apple.root.default-overcommit-priority" thread while doing
- // _dispatch_dispose().
- // TODO(glider): find out what's going on.
- CHECK(curr_thread || asanThreadRegistry().IsCurrentThreadDying());
- }
- asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
+ asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
return asan_ctxt;
}
// TODO(glider): can we reduce code duplication by introducing a macro?
-extern "C"
-int WRAP(dispatch_async_f)(dispatch_queue_t dq,
- void *ctxt,
- dispatch_function_t func) {
- GET_STACK_TRACE_HERE(kStackTraceMax, /*fast_unwind*/false);
+INTERCEPTOR(void, dispatch_async_f, dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_HERE(kStackTraceMax);
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (FLAG_v >= 2) {
+ if (flags()->verbosity >= 2) {
Report("dispatch_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self());
PRINT_CURRENT_STACK();
}
- return real_dispatch_async_f(dq, (void*)asan_ctxt,
- asan_dispatch_call_block_and_release);
+ return REAL(dispatch_async_f)(dq, (void*)asan_ctxt,
+ asan_dispatch_call_block_and_release);
}
-extern "C"
-int WRAP(dispatch_sync_f)(dispatch_queue_t dq,
- void *ctxt,
- dispatch_function_t func) {
- GET_STACK_TRACE_HERE(kStackTraceMax, /*fast_unwind*/false);
+INTERCEPTOR(void, dispatch_sync_f, dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_HERE(kStackTraceMax);
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (FLAG_v >= 2) {
+ if (flags()->verbosity >= 2) {
Report("dispatch_sync_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self());
PRINT_CURRENT_STACK();
}
- return real_dispatch_sync_f(dq, (void*)asan_ctxt,
- asan_dispatch_call_block_and_release);
+ return REAL(dispatch_sync_f)(dq, (void*)asan_ctxt,
+ asan_dispatch_call_block_and_release);
}
-extern "C"
-int WRAP(dispatch_after_f)(dispatch_time_t when,
- dispatch_queue_t dq,
- void *ctxt,
- dispatch_function_t func) {
- GET_STACK_TRACE_HERE(kStackTraceMax, /*fast_unwind*/false);
+INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
+ dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_HERE(kStackTraceMax);
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (FLAG_v >= 2) {
+ if (flags()->verbosity >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt);
PRINT_CURRENT_STACK();
}
- return real_dispatch_after_f(when, dq, (void*)asan_ctxt,
- asan_dispatch_call_block_and_release);
+ return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
+ asan_dispatch_call_block_and_release);
}
-extern "C"
-void WRAP(dispatch_barrier_async_f)(dispatch_queue_t dq,
- void *ctxt, dispatch_function_t func) {
- GET_STACK_TRACE_HERE(kStackTraceMax, /*fast_unwind*/false);
+INTERCEPTOR(void, dispatch_barrier_async_f, dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_HERE(kStackTraceMax);
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (FLAG_v >= 2) {
+ if (flags()->verbosity >= 2) {
Report("dispatch_barrier_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self());
PRINT_CURRENT_STACK();
}
- real_dispatch_barrier_async_f(dq, (void*)asan_ctxt,
- asan_dispatch_call_block_and_release);
+ REAL(dispatch_barrier_async_f)(dq, (void*)asan_ctxt,
+ asan_dispatch_call_block_and_release);
}
-extern "C"
-void WRAP(dispatch_group_async_f)(dispatch_group_t group,
- dispatch_queue_t dq,
- void *ctxt, dispatch_function_t func) {
- GET_STACK_TRACE_HERE(kStackTraceMax, /*fast_unwind*/false);
+INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_HERE(kStackTraceMax);
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (FLAG_v >= 2) {
+ if (flags()->verbosity >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self());
PRINT_CURRENT_STACK();
}
- real_dispatch_group_async_f(group, dq, (void*)asan_ctxt,
- asan_dispatch_call_block_and_release);
+ REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
+ asan_dispatch_call_block_and_release);
}
// The following stuff has been extremely helpful while looking for the
@@ -279,33 +354,90 @@ void WRAP(dispatch_group_async_f)(dispatch_group_t group,
// libdispatch API.
extern "C"
void *wrap_workitem_func(void *arg) {
- if (FLAG_v >= 2) {
+ if (flags()->verbosity >= 2) {
Report("wrap_workitem_func: %p, pthread_self: %p\n", arg, pthread_self());
}
asan_block_context_t *ctxt = (asan_block_context_t*)arg;
worker_t fn = (worker_t)(ctxt->func);
void *result = fn(ctxt->block);
- GET_STACK_TRACE_HERE(kStackTraceMax, /*fast_unwind*/false);
+ GET_STACK_TRACE_HERE(kStackTraceMax);
asan_free(arg, &stack);
return result;
}
-extern "C"
-int WRAP(pthread_workqueue_additem_np)(pthread_workqueue_t workq,
+INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
void *(*workitem_func)(void *), void * workitem_arg,
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
- GET_STACK_TRACE_HERE(kStackTraceMax, /*fast_unwind*/false);
+ GET_STACK_TRACE_HERE(kStackTraceMax);
asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
asan_ctxt->block = workitem_arg;
asan_ctxt->func = (dispatch_function_t)workitem_func;
- asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
- if (FLAG_v >= 2) {
+ asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
+ if (flags()->verbosity >= 2) {
Report("pthread_workqueue_additem_np: %p\n", asan_ctxt);
PRINT_CURRENT_STACK();
}
- return real_pthread_workqueue_additem_np(workq, wrap_workitem_func, asan_ctxt,
- itemhandlep, gencountp);
+ return REAL(pthread_workqueue_additem_np)(workq, wrap_workitem_func,
+ asan_ctxt, itemhandlep,
+ gencountp);
}
+// See http://opensource.apple.com/source/CF/CF-635.15/CFString.c
+int __CFStrIsConstant(CFStringRef str) {
+ CFRuntimeBase *base = (CFRuntimeBase*)str;
+#if __LP64__
+ return base->_rc == 0;
+#else
+ return (base->_cfinfo[CF_RC_BITS]) == 0;
+#endif
+}
+
+INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc,
+ CFStringRef str) {
+ if (__CFStrIsConstant(str)) {
+ return str;
+ } else {
+ return REAL(CFStringCreateCopy)(alloc, str);
+ }
+}
+
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+
+extern "C"
+void __CFInitialize();
+DECLARE_REAL_AND_INTERCEPTOR(void, __CFInitialize)
+
+namespace __asan {
+
+void InitializeMacInterceptors() {
+ CHECK(INTERCEPT_FUNCTION(dispatch_async_f));
+ CHECK(INTERCEPT_FUNCTION(dispatch_sync_f));
+ CHECK(INTERCEPT_FUNCTION(dispatch_after_f));
+ CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f));
+ CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f));
+ // We don't need to intercept pthread_workqueue_additem_np() to support the
+ // libdispatch API, but it helps us to debug the unsupported functions. Let's
+ // intercept it only during verbose runs.
+ if (flags()->verbosity >= 2) {
+ CHECK(INTERCEPT_FUNCTION(pthread_workqueue_additem_np));
+ }
+ // Normally CFStringCreateCopy should not copy constant CF strings.
+ // Replacing the default CFAllocator causes constant strings to be copied
+ // rather than just returned, which leads to bugs in big applications like
+ // Chromium and WebKit, see
+ // http://code.google.com/p/address-sanitizer/issues/detail?id=10
+ // Until this problem is fixed we need to check that the string is
+ // non-constant before calling CFStringCreateCopy.
+ CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy));
+ // Some of the library functions call free() directly, so we have to
+ // intercept it.
+ CHECK(INTERCEPT_FUNCTION(free));
+ if (flags()->replace_cfallocator) {
+ CHECK(INTERCEPT_FUNCTION(__CFInitialize));
+ }
+}
+
+} // namespace __asan
+
#endif // __APPLE__
diff --git a/lib/asan/asan_mac.h b/lib/asan/asan_mac.h
index 32739e7..6c65765 100644
--- a/lib/asan/asan_mac.h
+++ b/lib/asan/asan_mac.h
@@ -9,79 +9,45 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
-// ASan-private header for asan_mac.cc
+// Mac-specific ASan definitions.
//===----------------------------------------------------------------------===//
-#ifdef __APPLE__
-
#ifndef ASAN_MAC_H
#define ASAN_MAC_H
-#include "asan_interceptors.h"
-
-// TODO(glider): need to check if the OS X version is 10.6 or greater.
-#include <dispatch/dispatch.h>
-#include <setjmp.h>
+// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
+// and subject to change in further CoreFoundation versions. Apple does not
+// guarantee any binary compatibility from release to release.
-typedef void* pthread_workqueue_t;
-typedef void* pthread_workitem_handle_t;
+// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
+#if defined(__BIG_ENDIAN__)
+#define CF_RC_BITS 0
+#endif
-typedef void (*dispatch_function_t)(void *block);
-typedef void* (*worker_t)(void *block);
-typedef int (*dispatch_async_f_f)(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func);
-typedef int (*dispatch_sync_f_f)(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func);
-typedef int (*dispatch_after_f_f)(dispatch_time_t when,
- dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func);
-typedef void (*dispatch_barrier_async_f_f)(dispatch_queue_t dq,
- void *ctxt,
- dispatch_function_t func);
-typedef void (*dispatch_group_async_f_f)(dispatch_group_t group,
- dispatch_queue_t dq,
- void *ctxt, dispatch_function_t func);
-typedef int (*pthread_workqueue_additem_np_f)(pthread_workqueue_t workq,
- void *(*workitem_func)(void *), void * workitem_arg,
- pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
+#if defined(__LITTLE_ENDIAN__)
+#define CF_RC_BITS 3
+#endif
+// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
+typedef struct __CFRuntimeBase {
+ uptr _cfisa;
+ u8 _cfinfo[4];
+#if __LP64__
+ u32 _rc;
+#endif
+} CFRuntimeBase;
-// A wrapper for the ObjC blocks used to support libdispatch.
-typedef struct {
- void *block;
- dispatch_function_t func;
- int parent_tid;
-} asan_block_context_t;
+enum {
+ MACOS_VERSION_UNKNOWN = 0,
+ MACOS_VERSION_LEOPARD,
+ MACOS_VERSION_SNOW_LEOPARD,
+ MACOS_VERSION_LION
+};
+namespace __asan {
-extern "C" {
-// dispatch_barrier_async_f() is not declared in <dispatch/dispatch.h>.
-void dispatch_barrier_async_f(dispatch_queue_t dq,
- void *ctxt, dispatch_function_t func);
-// Neither is pthread_workqueue_additem_np().
-int pthread_workqueue_additem_np(pthread_workqueue_t workq,
- void *(*workitem_func)(void *), void * workitem_arg,
- pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
+int GetMacosVersion();
+void ReplaceCFAllocator();
-int WRAP(dispatch_async_f)(dispatch_queue_t dq,
- void *ctxt,
- dispatch_function_t func);
-int WRAP(dispatch_sync_f)(dispatch_queue_t dq,
- void *ctxt,
- dispatch_function_t func);
-int WRAP(dispatch_after_f)(dispatch_time_t when,
- dispatch_queue_t dq,
- void *ctxt,
- dispatch_function_t func);
-void WRAP(dispatch_barrier_async_f)(dispatch_queue_t dq,
- void *ctxt, dispatch_function_t func);
-void WRAP(dispatch_group_async_f)(dispatch_group_t group,
- dispatch_queue_t dq,
- void *ctxt, dispatch_function_t func);
-int WRAP(pthread_workqueue_additem_np)(pthread_workqueue_t workq,
- void *(*workitem_func)(void *), void * workitem_arg,
- pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
-}
+} // namespace __asan
#endif // ASAN_MAC_H
-
-#endif // __APPLE__
diff --git a/lib/asan/asan_malloc_linux.cc b/lib/asan/asan_malloc_linux.cc
index 9dbc7a1..1046f4c 100644
--- a/lib/asan/asan_malloc_linux.cc
+++ b/lib/asan/asan_malloc_linux.cc
@@ -1,4 +1,4 @@
-//===-- asan_malloc_linux.cc ------------------------------------*- C++ -*-===//
+//===-- asan_malloc_linux.cc ----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,20 +20,16 @@
#include "asan_internal.h"
#include "asan_stack.h"
-#include <malloc.h>
-
-#define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
-
#ifdef ANDROID
struct MallocDebug {
- void* (*malloc)(size_t bytes);
+ void* (*malloc)(uptr bytes);
void (*free)(void* mem);
- void* (*calloc)(size_t n_elements, size_t elem_size);
- void* (*realloc)(void* oldMem, size_t bytes);
- void* (*memalign)(size_t alignment, size_t bytes);
+ void* (*calloc)(uptr n_elements, uptr elem_size);
+ void* (*realloc)(void* oldMem, uptr bytes);
+ void* (*memalign)(uptr alignment, uptr bytes);
};
-const MallocDebug asan_malloc_dispatch __attribute__((aligned(32))) = {
+const MallocDebug asan_malloc_dispatch ALIGNED(32) = {
malloc, free, calloc, realloc, memalign
};
@@ -56,33 +52,28 @@ void ReplaceSystemMalloc() {
// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan; // NOLINT
-extern "C" {
-INTERCEPTOR_ATTRIBUTE
-void free(void *ptr) {
+INTERCEPTOR(void, free, void *ptr) {
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
asan_free(ptr, &stack);
}
-INTERCEPTOR_ATTRIBUTE
-void cfree(void *ptr) {
+INTERCEPTOR(void, cfree, void *ptr) {
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
asan_free(ptr, &stack);
}
-INTERCEPTOR_ATTRIBUTE
-void *malloc(size_t size) {
+INTERCEPTOR(void*, malloc, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
return asan_malloc(size, &stack);
}
-INTERCEPTOR_ATTRIBUTE
-void *calloc(size_t nmemb, size_t size) {
+INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (!asan_inited) {
- // Hack: dlsym calls calloc before real_calloc is retrieved from dlsym.
- const size_t kCallocPoolSize = 1024;
- static uintptr_t calloc_memory_for_dlsym[kCallocPoolSize];
- static size_t allocated;
- size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ const uptr kCallocPoolSize = 1024;
+ static uptr calloc_memory_for_dlsym[kCallocPoolSize];
+ static uptr allocated;
+ uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
allocated += size_in_words;
CHECK(allocated < kCallocPoolSize);
@@ -92,51 +83,56 @@ void *calloc(size_t nmemb, size_t size) {
return asan_calloc(nmemb, size, &stack);
}
-INTERCEPTOR_ATTRIBUTE
-void *realloc(void *ptr, size_t size) {
+INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
return asan_realloc(ptr, size, &stack);
}
-INTERCEPTOR_ATTRIBUTE
-void *memalign(size_t boundary, size_t size) {
+INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
return asan_memalign(boundary, size, &stack);
}
-void* __libc_memalign(size_t align, size_t s)
- __attribute__((alias("memalign")));
+INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
+ ALIAS("memalign");
+
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+ GET_STACK_TRACE_HERE_FOR_MALLOC;
+ return asan_malloc_usable_size(ptr, &stack);
+}
+
+// We avoid including malloc.h for portability reasons.
+// man mallinfo says the fields are "long", but the implementation uses int.
+// It doesn't matter much -- we just need to make sure that the libc's mallinfo
+// is not called.
+struct fake_mallinfo {
+ int x[10];
+};
-INTERCEPTOR_ATTRIBUTE
-struct mallinfo mallinfo() {
- struct mallinfo res;
- real_memset(&res, 0, sizeof(res));
+INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
+ struct fake_mallinfo res;
+ REAL(memset)(&res, 0, sizeof(res));
return res;
}
-INTERCEPTOR_ATTRIBUTE
-int mallopt(int cmd, int value) {
+INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
-INTERCEPTOR_ATTRIBUTE
-int posix_memalign(void **memptr, size_t alignment, size_t size) {
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
- // Printf("posix_memalign: %lx %ld\n", alignment, size);
+ // Printf("posix_memalign: %zx %zu\n", alignment, size);
return asan_posix_memalign(memptr, alignment, size, &stack);
}
-INTERCEPTOR_ATTRIBUTE
-void *valloc(size_t size) {
+INTERCEPTOR(void*, valloc, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
return asan_valloc(size, &stack);
}
-INTERCEPTOR_ATTRIBUTE
-void *pvalloc(size_t size) {
+INTERCEPTOR(void*, pvalloc, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
return asan_pvalloc(size, &stack);
}
-} // extern "C"
#endif // __linux__
diff --git a/lib/asan/asan_malloc_mac.cc b/lib/asan/asan_malloc_mac.cc
index 8a6f1bc..1a6c840 100644
--- a/lib/asan/asan_malloc_mac.cc
+++ b/lib/asan/asan_malloc_mac.cc
@@ -1,4 +1,4 @@
-//===-- asan_rtl.cc ---------------------------------------------*- C++ -*-===//
+//===-- asan_rtl.cc -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,12 +16,14 @@
#include <AvailabilityMacros.h>
#include <CoreFoundation/CFBase.h>
+#include <dlfcn.h>
#include <malloc/malloc.h>
#include <setjmp.h>
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
+#include "asan_mac.h"
#include "asan_stack.h"
// Similar code is used in Google Perftools,
@@ -30,12 +32,26 @@
// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan; // NOLINT
+// TODO(glider): do we need both zones?
+static malloc_zone_t *system_malloc_zone = 0;
+static malloc_zone_t *system_purgeable_zone = 0;
+static malloc_zone_t asan_zone;
+CFAllocatorRef cf_asan = 0;
+
// The free() implementation provided by OS X calls malloc_zone_from_ptr()
-// to find the owner of |ptr|. If the result is NULL, an invalid free() is
+// to find the owner of |ptr|. If the result is 0, an invalid free() is
// reported. Our implementation falls back to asan_free() in this case
// in order to print an ASan-style report.
-extern "C"
-void free(void *ptr) {
+//
+// For the objects created by _CFRuntimeCreateInstance a CFAllocatorRef is
+// placed at the beginning of the allocated chunk and the pointer returned by
+// our allocator is off by sizeof(CFAllocatorRef). This pointer can be then
+// passed directly to free(), which will lead to errors.
+// To overcome this we're checking whether |ptr-sizeof(CFAllocatorRef)|
+// contains a pointer to our CFAllocator (assuming no other allocator is used).
+// See http://code.google.com/p/address-sanitizer/issues/detail?id=70 for more
+// info.
+INTERCEPTOR(void, free, void *ptr) {
malloc_zone_t *zone = malloc_zone_from_ptr(ptr);
if (zone) {
#if defined(MAC_OS_X_VERSION_10_6) && \
@@ -49,27 +65,48 @@ void free(void *ptr) {
malloc_zone_free(zone, ptr);
#endif
} else {
+ if (flags()->replace_cfallocator) {
+ // Make sure we're not hitting the previous page. This may be incorrect
+ // if ASan's malloc returns an address ending with 0xFF8, which will be
+ // then padded to a page boundary with a CFAllocatorRef.
+ uptr arith_ptr = (uptr)ptr;
+ if ((arith_ptr & 0xFFF) > sizeof(CFAllocatorRef)) {
+ CFAllocatorRef *saved =
+ (CFAllocatorRef*)(arith_ptr - sizeof(CFAllocatorRef));
+ if ((*saved == cf_asan) && asan_mz_size(saved)) ptr = (void*)saved;
+ }
+ }
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
asan_free(ptr, &stack);
}
}
-// TODO(glider): do we need both zones?
-static malloc_zone_t *system_malloc_zone = NULL;
-static malloc_zone_t *system_purgeable_zone = NULL;
+namespace __asan {
+ void ReplaceCFAllocator();
+}
+
+// We can't always replace the default CFAllocator with cf_asan right in
+// ReplaceSystemMalloc(), because it is sometimes called before
+// __CFInitialize(), when the default allocator is invalid and replacing it may
+// crash the program. Instead we wait for the allocator to initialize and jump
+// in just after __CFInitialize(). Nobody is going to allocate memory using
+// CFAllocators before that, so we won't miss anything.
+//
+// See http://code.google.com/p/address-sanitizer/issues/detail?id=87
+// and http://opensource.apple.com/source/CF/CF-550.43/CFRuntime.c
+INTERCEPTOR(void, __CFInitialize) {
+ CHECK(flags()->replace_cfallocator);
+ CHECK(asan_inited);
+ REAL(__CFInitialize)();
+ if (!cf_asan) ReplaceCFAllocator();
+}
-// We need to provide wrappers around all the libc functions.
namespace {
+
// TODO(glider): the mz_* functions should be united with the Linux wrappers,
// as they are basically copied from there.
size_t mz_size(malloc_zone_t* zone, const void* ptr) {
- // Fast path: check whether this pointer belongs to the original malloc zone.
- // We cannot just call malloc_zone_from_ptr(), because it in turn
- // calls our mz_size().
- if (system_malloc_zone) {
- if ((system_malloc_zone->size)(system_malloc_zone, ptr)) return 0;
- }
- return __asan_mz_size(ptr);
+ return asan_mz_size(ptr);
}
void *mz_malloc(malloc_zone_t *zone, size_t size) {
@@ -92,9 +129,9 @@ void *cf_malloc(CFIndex size, CFOptionFlags hint, void *info) {
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (!asan_inited) {
- // Hack: dlsym calls calloc before real_calloc is retrieved from dlsym.
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024;
- static uintptr_t calloc_memory_for_dlsym[kCallocPoolSize];
+ static uptr calloc_memory_for_dlsym[kCallocPoolSize];
static size_t allocated;
size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
@@ -119,35 +156,27 @@ void print_zone_for_ptr(void *ptr) {
malloc_zone_t *orig_zone = malloc_zone_from_ptr(ptr);
if (orig_zone) {
if (orig_zone->zone_name) {
- Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n",
- ptr, orig_zone, orig_zone->zone_name);
+ AsanPrintf("malloc_zone_from_ptr(%p) = %p, which is %s\n",
+ ptr, orig_zone, orig_zone->zone_name);
} else {
- Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n",
- ptr, orig_zone);
+ AsanPrintf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n",
+ ptr, orig_zone);
}
} else {
- Printf("malloc_zone_from_ptr(%p) = NULL\n", ptr);
+ AsanPrintf("malloc_zone_from_ptr(%p) = 0\n", ptr);
}
}
-// TODO(glider): the allocation callbacks need to be refactored.
-void mz_free(malloc_zone_t *zone, void *ptr) {
+void ALWAYS_INLINE free_common(void *context, void *ptr) {
if (!ptr) return;
- malloc_zone_t *orig_zone = malloc_zone_from_ptr(ptr);
- // For some reason Chromium calls mz_free() for pointers that belong to
- // DefaultPurgeableMallocZone instead of asan_zone. We might want to
- // fix this someday.
- if (orig_zone == system_purgeable_zone) {
- system_purgeable_zone->free(system_purgeable_zone, ptr);
- return;
- }
- if (__asan_mz_size(ptr)) {
+ if (!flags()->mac_ignore_invalid_free || asan_mz_size(ptr)) {
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
asan_free(ptr, &stack);
} else {
// Let us just leak this memory for now.
- Printf("mz_free(%p) -- attempting to free unallocated memory.\n"
- "AddressSanitizer is ignoring this error on Mac OS now.\n", ptr);
+ AsanPrintf("free_common(%p) -- attempting to free unallocated memory.\n"
+ "AddressSanitizer is ignoring this error on Mac OS now.\n",
+ ptr);
print_zone_for_ptr(ptr);
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
stack.PrintStack();
@@ -155,28 +184,13 @@ void mz_free(malloc_zone_t *zone, void *ptr) {
}
}
+// TODO(glider): the allocation callbacks need to be refactored.
+void mz_free(malloc_zone_t *zone, void *ptr) {
+ free_common(zone, ptr);
+}
+
void cf_free(void *ptr, void *info) {
- if (!ptr) return;
- malloc_zone_t *orig_zone = malloc_zone_from_ptr(ptr);
- // For some reason Chromium calls mz_free() for pointers that belong to
- // DefaultPurgeableMallocZone instead of asan_zone. We might want to
- // fix this someday.
- if (orig_zone == system_purgeable_zone) {
- system_purgeable_zone->free(system_purgeable_zone, ptr);
- return;
- }
- if (__asan_mz_size(ptr)) {
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
- asan_free(ptr, &stack);
- } else {
- // Let us just leak this memory for now.
- Printf("cf_free(%p) -- attempting to free unallocated memory.\n"
- "AddressSanitizer is ignoring this error on Mac OS now.\n", ptr);
- print_zone_for_ptr(ptr);
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
- stack.PrintStack();
- return;
- }
+ free_common(info, ptr);
}
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
@@ -184,20 +198,21 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
return asan_malloc(size, &stack);
} else {
- if (__asan_mz_size(ptr)) {
+ if (asan_mz_size(ptr)) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
return asan_realloc(ptr, size, &stack);
} else {
// We can't recover from reallocating an unknown address, because
// this would require reading at most |size| bytes from
// potentially unaccessible memory.
- Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
- "This is an unrecoverable problem, exiting now.\n", ptr);
+ AsanPrintf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
+ "This is an unrecoverable problem, exiting now.\n",
+ ptr);
print_zone_for_ptr(ptr);
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
stack.PrintStack();
ShowStatsAndAbort();
- return NULL; // unreachable
+ return 0; // unreachable
}
}
}
@@ -207,27 +222,28 @@ void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
return asan_malloc(size, &stack);
} else {
- if (__asan_mz_size(ptr)) {
+ if (asan_mz_size(ptr)) {
GET_STACK_TRACE_HERE_FOR_MALLOC;
return asan_realloc(ptr, size, &stack);
} else {
// We can't recover from reallocating an unknown address, because
// this would require reading at most |size| bytes from
// potentially unaccessible memory.
- Printf("cf_realloc(%p) -- attempting to realloc unallocated memory.\n"
- "This is an unrecoverable problem, exiting now.\n", ptr);
+ AsanPrintf("cf_realloc(%p) -- attempting to realloc unallocated memory.\n"
+ "This is an unrecoverable problem, exiting now.\n",
+ ptr);
print_zone_for_ptr(ptr);
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
stack.PrintStack();
ShowStatsAndAbort();
- return NULL; // unreachable
+ return 0; // unreachable
}
}
}
void mz_destroy(malloc_zone_t* zone) {
// A no-op -- we will not be destroyed!
- Printf("mz_destroy() called -- ignoring\n");
+ AsanPrintf("mz_destroy() called -- ignoring\n");
}
// from AvailabilityMacros.h
#if defined(MAC_OS_X_VERSION_10_6) && \
@@ -279,11 +295,11 @@ void mi_log(malloc_zone_t *zone, void *address) {
}
void mi_force_lock(malloc_zone_t *zone) {
- __asan_mz_force_lock();
+ asan_mz_force_lock();
}
void mi_force_unlock(malloc_zone_t *zone) {
- __asan_mz_force_unlock();
+ asan_mz_force_unlock();
}
// This function is currently unused, and we build with -Werror.
@@ -298,19 +314,38 @@ void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
}
#endif
+#if defined(MAC_OS_X_VERSION_10_6) && \
+ MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
boolean_t mi_zone_locked(malloc_zone_t *zone) {
// UNIMPLEMENTED();
return false;
}
+#endif
} // unnamed namespace
-extern bool kCFUseCollectableAllocator; // is GC on?
+extern int __CFRuntimeClassTableSize;
namespace __asan {
+void ReplaceCFAllocator() {
+ static CFAllocatorContext asan_context = {
+ /*version*/ 0, /*info*/ &asan_zone,
+ /*retain*/ 0, /*release*/ 0,
+ /*copyDescription*/0,
+ /*allocate*/ &cf_malloc,
+ /*reallocate*/ &cf_realloc,
+ /*deallocate*/ &cf_free,
+ /*preferredSize*/ 0 };
+ if (!cf_asan)
+ cf_asan = CFAllocatorCreate(kCFAllocatorUseContext, &asan_context);
+ if (CFAllocatorGetDefault() != cf_asan)
+ CFAllocatorSetDefault(cf_asan);
+}
+
void ReplaceSystemMalloc() {
static malloc_introspection_t asan_introspection;
- __asan::real_memset(&asan_introspection, 0, sizeof(asan_introspection));
+ // Ok to use internal_memset, these places are not performance-critical.
+ internal_memset(&asan_introspection, 0, sizeof(asan_introspection));
asan_introspection.enumerator = &mi_enumerator;
asan_introspection.good_size = &mi_good_size;
@@ -320,8 +355,7 @@ void ReplaceSystemMalloc() {
asan_introspection.force_lock = &mi_force_lock;
asan_introspection.force_unlock = &mi_force_unlock;
- static malloc_zone_t asan_zone;
- __asan::real_memset(&asan_zone, 0, sizeof(malloc_zone_t));
+ internal_memset(&asan_zone, 0, sizeof(malloc_zone_t));
// Start with a version 4 zone which is used for OS X 10.4 and 10.5.
asan_zone.version = 4;
@@ -333,8 +367,8 @@ void ReplaceSystemMalloc() {
asan_zone.free = &mz_free;
asan_zone.realloc = &mz_realloc;
asan_zone.destroy = &mz_destroy;
- asan_zone.batch_malloc = NULL;
- asan_zone.batch_free = NULL;
+ asan_zone.batch_malloc = 0;
+ asan_zone.batch_free = 0;
asan_zone.introspect = &asan_introspection;
// from AvailabilityMacros.h
@@ -371,18 +405,16 @@ void ReplaceSystemMalloc() {
// Make sure the default allocator was replaced.
CHECK(malloc_default_zone() == &asan_zone);
- if (FLAG_replace_cfallocator) {
- static CFAllocatorContext asan_context =
- { /*version*/ 0, /*info*/ &asan_zone,
- /*retain*/ NULL, /*release*/ NULL,
- /*copyDescription*/NULL,
- /*allocate*/ &cf_malloc,
- /*reallocate*/ &cf_realloc,
- /*deallocate*/ &cf_free,
- /*preferredSize*/ NULL };
- CFAllocatorRef cf_asan =
- CFAllocatorCreate(kCFAllocatorUseContext, &asan_context);
- CFAllocatorSetDefault(cf_asan);
+ if (flags()->replace_cfallocator) {
+ // If __CFInitialize() hasn't been called yet, cf_asan will be created and
+ // installed as the default allocator after __CFInitialize() finishes (see
+ // the interceptor for __CFInitialize() above). Otherwise install cf_asan
+ // right now. On both Snow Leopard and Lion __CFInitialize() calls
+ // __CFAllocatorInitialize(), which initializes the _base._cfisa field of
+ // the default allocators we check here.
+ if (((CFRuntimeBase*)kCFAllocatorSystemDefault)->_cfisa) {
+ ReplaceCFAllocator();
+ }
}
}
} // namespace __asan
diff --git a/lib/asan/asan_malloc_win.cc b/lib/asan/asan_malloc_win.cc
new file mode 100644
index 0000000..6c00e77
--- /dev/null
+++ b/lib/asan/asan_malloc_win.cc
@@ -0,0 +1,141 @@
+//===-- asan_malloc_win.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Windows-specific malloc interception.
+//===----------------------------------------------------------------------===//
+#ifdef _WIN32
+
+#include "asan_allocator.h"
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_stack.h"
+
+#include "interception/interception.h"
+
+// ---------------------- Replacement functions ---------------- {{{1
+using namespace __asan; // NOLINT
+
+// FIXME: Simply defining functions with the same signature in *.obj
+// files overrides the standard functions in *.lib
+// This works well for simple helloworld-like tests but might need to be
+// revisited in the future.
+
+extern "C" {
+void free(void *ptr) {
+ GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ return asan_free(ptr, &stack);
+}
+
+void _free_dbg(void* ptr, int) {
+ free(ptr);
+}
+
+void cfree(void *ptr) {
+ CHECK(!"cfree() should not be used on Windows?");
+}
+
+void *malloc(size_t size) {
+ GET_STACK_TRACE_HERE_FOR_MALLOC;
+ return asan_malloc(size, &stack);
+}
+
+void* _malloc_dbg(size_t size, int , const char*, int) {
+ return malloc(size);
+}
+
+void *calloc(size_t nmemb, size_t size) {
+ GET_STACK_TRACE_HERE_FOR_MALLOC;
+ return asan_calloc(nmemb, size, &stack);
+}
+
+void* _calloc_dbg(size_t n, size_t size, int, const char*, int) {
+ return calloc(n, size);
+}
+
+void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
+ return calloc(nmemb, size);
+}
+
+void *realloc(void *ptr, size_t size) {
+ GET_STACK_TRACE_HERE_FOR_MALLOC;
+ return asan_realloc(ptr, size, &stack);
+}
+
+void *_realloc_dbg(void *ptr, size_t size, int) {
+ CHECK(!"_realloc_dbg should not exist!");
+ return 0;
+}
+
+void* _recalloc(void* p, size_t n, size_t elem_size) {
+ if (!p)
+ return calloc(n, elem_size);
+ const size_t size = n * elem_size;
+ if (elem_size != 0 && size / elem_size != n)
+ return 0;
+ return realloc(p, size);
+}
+
+size_t _msize(void *ptr) {
+ GET_STACK_TRACE_HERE_FOR_MALLOC;
+ return asan_malloc_usable_size(ptr, &stack);
+}
+
+int _CrtDbgReport(int, const char*, int,
+ const char*, const char*, ...) {
+ ShowStatsAndAbort();
+}
+
+int _CrtDbgReportW(int reportType, const wchar_t*, int,
+ const wchar_t*, const wchar_t*, ...) {
+ ShowStatsAndAbort();
+}
+
+int _CrtSetReportMode(int, int) {
+ return 0;
+}
+} // extern "C"
+
+using __interception::GetRealFunctionAddress;
+
+// We don't want to include "windows.h" in this file to avoid extra attributes
+// set on malloc/free etc (e.g. dllimport), so declare a few things manually:
+extern "C" int __stdcall VirtualProtect(void* addr, size_t size,
+ DWORD prot, DWORD *old_prot);
+const int PAGE_EXECUTE_READWRITE = 0x40;
+
+namespace __asan {
+void ReplaceSystemMalloc() {
+#if defined(_DLL)
+# ifdef _WIN64
+# error ReplaceSystemMalloc was not tested on x64
+# endif
+ char *crt_malloc;
+ if (GetRealFunctionAddress("malloc", (void**)&crt_malloc)) {
+ // Replace malloc in the CRT dll with a jump to our malloc.
+ DWORD old_prot, unused;
+ CHECK(VirtualProtect(crt_malloc, 16, PAGE_EXECUTE_READWRITE, &old_prot));
+ REAL(memset)(crt_malloc, 0xCC /* int 3 */, 16); // just in case.
+
+ ptrdiff_t jmp_offset = (char*)malloc - (char*)crt_malloc - 5;
+ crt_malloc[0] = 0xE9; // jmp, should be followed by an offset.
+ REAL(memcpy)(crt_malloc + 1, &jmp_offset, sizeof(jmp_offset));
+
+ CHECK(VirtualProtect(crt_malloc, 16, old_prot, &unused));
+
+ // FYI: FlushInstructionCache is needed on Itanium etc but not on x86/x64.
+ }
+
+ // FIXME: investigate whether anything else is needed.
+#endif
+}
+} // namespace __asan
+
+#endif // _WIN32
diff --git a/lib/asan/asan_mapping.h b/lib/asan/asan_mapping.h
index 63aba10..8e0c6ec 100644
--- a/lib/asan/asan_mapping.h
+++ b/lib/asan/asan_mapping.h
@@ -20,26 +20,32 @@
// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
-extern __attribute__((visibility("default"))) uintptr_t __asan_mapping_scale;
-extern __attribute__((visibility("default"))) uintptr_t __asan_mapping_offset;
-#define SHADOW_SCALE (__asan_mapping_scale)
-#define SHADOW_OFFSET (__asan_mapping_offset)
+extern __attribute__((visibility("default"))) uptr __asan_mapping_scale;
+extern __attribute__((visibility("default"))) uptr __asan_mapping_offset;
+# define SHADOW_SCALE (__asan_mapping_scale)
+# define SHADOW_OFFSET (__asan_mapping_offset)
#else
-#define SHADOW_SCALE (3)
-#if __WORDSIZE == 32
-#define SHADOW_OFFSET (1 << 29)
-#else
-#define SHADOW_OFFSET (1ULL << 44)
-#endif
+# ifdef ANDROID
+# define SHADOW_SCALE (3)
+# define SHADOW_OFFSET (0)
+# else
+# define SHADOW_SCALE (3)
+# if __WORDSIZE == 32
+# define SHADOW_OFFSET (1 << 29)
+# else
+# define SHADOW_OFFSET (1ULL << 44)
+# endif
+# endif
#endif // ASAN_FLEXIBLE_MAPPING_AND_OFFSET
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) | (SHADOW_OFFSET))
+#define SHADOW_TO_MEM(shadow) (((shadow) - SHADOW_OFFSET) << SHADOW_SCALE)
#if __WORDSIZE == 64
- static const size_t kHighMemEnd = 0x00007fffffffffffUL;
+ static const uptr kHighMemEnd = 0x00007fffffffffffUL;
#else // __WORDSIZE == 32
- static const size_t kHighMemEnd = 0xffffffff;
+ static const uptr kHighMemEnd = 0xffffffff;
#endif // __WORDSIZE
@@ -62,39 +68,55 @@ extern __attribute__((visibility("default"))) uintptr_t __asan_mapping_offset;
namespace __asan {
-static inline bool AddrIsInLowMem(uintptr_t a) {
+static inline bool AddrIsInLowMem(uptr a) {
return a < kLowMemEnd;
}
-static inline bool AddrIsInLowShadow(uintptr_t a) {
+static inline bool AddrIsInLowShadow(uptr a) {
return a >= kLowShadowBeg && a <= kLowShadowEnd;
}
-static inline bool AddrIsInHighMem(uintptr_t a) {
+static inline bool AddrIsInHighMem(uptr a) {
return a >= kHighMemBeg && a <= kHighMemEnd;
}
-static inline bool AddrIsInMem(uintptr_t a) {
+static inline bool AddrIsInMem(uptr a) {
return AddrIsInLowMem(a) || AddrIsInHighMem(a);
}
-static inline uintptr_t MemToShadow(uintptr_t p) {
+static inline uptr MemToShadow(uptr p) {
CHECK(AddrIsInMem(p));
return MEM_TO_SHADOW(p);
}
-static inline bool AddrIsInHighShadow(uintptr_t a) {
+static inline bool AddrIsInHighShadow(uptr a) {
return a >= kHighShadowBeg && a <= kHighMemEnd;
}
-static inline bool AddrIsInShadow(uintptr_t a) {
+static inline bool AddrIsInShadow(uptr a) {
return AddrIsInLowShadow(a) || AddrIsInHighShadow(a);
}
-static inline bool AddrIsAlignedByGranularity(uintptr_t a) {
+static inline bool AddrIsInShadowGap(uptr a) {
+ return a >= kShadowGapBeg && a <= kShadowGapEnd;
+}
+
+static inline bool AddrIsAlignedByGranularity(uptr a) {
return (a & (SHADOW_GRANULARITY - 1)) == 0;
}
+static inline bool AddressIsPoisoned(uptr a) {
+ const uptr kAccessSize = 1;
+ u8 *shadow_address = (u8*)MemToShadow(a);
+ s8 shadow_value = *shadow_address;
+ if (shadow_value) {
+ u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1))
+ + kAccessSize - 1;
+ return (last_accessed_byte >= shadow_value);
+ }
+ return false;
+}
+
} // namespace __asan
#endif // ASAN_MAPPING_H
diff --git a/lib/asan/asan_new_delete.cc b/lib/asan/asan_new_delete.cc
new file mode 100644
index 0000000..4a72758
--- /dev/null
+++ b/lib/asan/asan_new_delete.cc
@@ -0,0 +1,56 @@
+//===-- asan_interceptors.cc ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+
+#include "asan_allocator.h"
+#include "asan_internal.h"
+#include "asan_stack.h"
+
+#include <stddef.h>
+#include <new>
+
+namespace __asan {
+// This function is a no-op. We need it to make sure that object file
+// with our replacements will actually be loaded from static ASan
+// run-time library at link-time.
+void ReplaceOperatorsNewAndDelete() { }
+}
+
+using namespace __asan; // NOLINT
+
+#define OPERATOR_NEW_BODY \
+ GET_STACK_TRACE_HERE_FOR_MALLOC;\
+ return asan_memalign(0, size, &stack);
+
+#ifdef ANDROID
+void *operator new(size_t size) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size) { OPERATOR_NEW_BODY; }
+#else
+void *operator new(size_t size) throw(std::bad_alloc) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size) throw(std::bad_alloc) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size, std::nothrow_t const&) throw()
+{ OPERATOR_NEW_BODY; }
+void *operator new[](size_t size, std::nothrow_t const&) throw()
+{ OPERATOR_NEW_BODY; }
+#endif
+
+#define OPERATOR_DELETE_BODY \
+ GET_STACK_TRACE_HERE_FOR_FREE(ptr);\
+ asan_free(ptr, &stack);
+
+void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; }
+void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; }
+void operator delete(void *ptr, std::nothrow_t const&) throw()
+{ OPERATOR_DELETE_BODY; }
+void operator delete[](void *ptr, std::nothrow_t const&) throw()
+{ OPERATOR_DELETE_BODY; }
diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc
index daa1ad6..3b9d9f6 100644
--- a/lib/asan/asan_poisoning.cc
+++ b/lib/asan/asan_poisoning.cc
@@ -1,4 +1,4 @@
-//===-- asan_poisoning.cc ---------------------------------------*- C++ -*-===//
+//===-- asan_poisoning.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,21 +19,22 @@
namespace __asan {
-void PoisonShadow(uintptr_t addr, size_t size, uint8_t value) {
+void PoisonShadow(uptr addr, uptr size, u8 value) {
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
- uintptr_t shadow_beg = MemToShadow(addr);
- uintptr_t shadow_end = MemToShadow(addr + size);
- real_memset((void*)shadow_beg, value, shadow_end - shadow_beg);
+ uptr shadow_beg = MemToShadow(addr);
+ uptr shadow_end = MemToShadow(addr + size);
+ CHECK(REAL(memset) != 0);
+ REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
}
-void PoisonShadowPartialRightRedzone(uintptr_t addr,
- uintptr_t size,
- uintptr_t redzone_size,
- uint8_t value) {
+void PoisonShadowPartialRightRedzone(uptr addr,
+ uptr size,
+ uptr redzone_size,
+ u8 value) {
CHECK(AddrIsAlignedByGranularity(addr));
- uint8_t *shadow = (uint8_t*)MemToShadow(addr);
- for (uintptr_t i = 0; i < redzone_size;
+ u8 *shadow = (u8*)MemToShadow(addr);
+ for (uptr i = 0; i < redzone_size;
i += SHADOW_GRANULARITY, shadow++) {
if (i + SHADOW_GRANULARITY <= size) {
*shadow = 0; // fully addressable
@@ -47,12 +48,12 @@ void PoisonShadowPartialRightRedzone(uintptr_t addr,
struct ShadowSegmentEndpoint {
- uint8_t *chunk;
- int8_t offset; // in [0, SHADOW_GRANULARITY)
- int8_t value; // = *chunk;
+ u8 *chunk;
+ s8 offset; // in [0, SHADOW_GRANULARITY)
+ s8 value; // = *chunk;
- explicit ShadowSegmentEndpoint(uintptr_t address) {
- chunk = (uint8_t*)MemToShadow(address);
+ explicit ShadowSegmentEndpoint(uptr address) {
+ chunk = (u8*)MemToShadow(address);
offset = address & (SHADOW_GRANULARITY - 1);
value = *chunk;
}
@@ -73,18 +74,19 @@ using namespace __asan; // NOLINT
// at least [left, AlignDown(right)).
// * if user asks to unpoison region [left, right), the program unpoisons
// at most [AlignDown(left), right).
-void __asan_poison_memory_region(void const volatile *addr, size_t size) {
- if (!FLAG_allow_user_poisoning || size == 0) return;
- uintptr_t beg_addr = (uintptr_t)addr;
- uintptr_t end_addr = beg_addr + size;
- if (FLAG_v >= 1) {
- Printf("Trying to poison memory region [%p, %p)\n", beg_addr, end_addr);
+void __asan_poison_memory_region(void const volatile *addr, uptr size) {
+ if (!flags()->allow_user_poisoning || size == 0) return;
+ uptr beg_addr = (uptr)addr;
+ uptr end_addr = beg_addr + size;
+ if (flags()->verbosity >= 1) {
+ Printf("Trying to poison memory region [%p, %p)\n",
+ (void*)beg_addr, (void*)end_addr);
}
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
CHECK(beg.offset < end.offset);
- int8_t value = beg.value;
+ s8 value = beg.value;
CHECK(value == end.value);
// We can only poison memory if the byte in end.offset is unaddressable.
// No need to re-poison memory if it is poisoned already.
@@ -107,25 +109,26 @@ void __asan_poison_memory_region(void const volatile *addr, size_t size) {
}
beg.chunk++;
}
- real_memset(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
+ REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
// Poison if byte in end.offset is unaddressable.
if (end.value > 0 && end.value <= end.offset) {
*end.chunk = kAsanUserPoisonedMemoryMagic;
}
}
-void __asan_unpoison_memory_region(void const volatile *addr, size_t size) {
- if (!FLAG_allow_user_poisoning || size == 0) return;
- uintptr_t beg_addr = (uintptr_t)addr;
- uintptr_t end_addr = beg_addr + size;
- if (FLAG_v >= 1) {
- Printf("Trying to unpoison memory region [%p, %p)\n", beg_addr, end_addr);
+void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
+ if (!flags()->allow_user_poisoning || size == 0) return;
+ uptr beg_addr = (uptr)addr;
+ uptr end_addr = beg_addr + size;
+ if (flags()->verbosity >= 1) {
+ Printf("Trying to unpoison memory region [%p, %p)\n",
+ (void*)beg_addr, (void*)end_addr);
}
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
CHECK(beg.offset < end.offset);
- int8_t value = beg.value;
+ s8 value = beg.value;
CHECK(value == end.value);
// We unpoison memory bytes up to enbytes up to end.offset if it is not
// unpoisoned already.
@@ -139,21 +142,12 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size) {
*beg.chunk = 0;
beg.chunk++;
}
- real_memset(beg.chunk, 0, end.chunk - beg.chunk);
+ REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
if (end.offset > 0 && end.value != 0) {
*end.chunk = Max(end.value, end.offset);
}
}
bool __asan_address_is_poisoned(void const volatile *addr) {
- const size_t kAccessSize = 1;
- uintptr_t address = (uintptr_t)addr;
- uint8_t *shadow_address = (uint8_t*)MemToShadow(address);
- int8_t shadow_value = *shadow_address;
- if (shadow_value) {
- uint8_t last_accessed_byte = (address & (SHADOW_GRANULARITY - 1))
- + kAccessSize - 1;
- return (last_accessed_byte >= shadow_value);
- }
- return false;
+ return __asan::AddressIsPoisoned((uptr)addr);
}
diff --git a/lib/asan/asan_posix.cc b/lib/asan/asan_posix.cc
new file mode 100644
index 0000000..061bb19
--- /dev/null
+++ b/lib/asan/asan_posix.cc
@@ -0,0 +1,126 @@
+//===-- asan_linux.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Posix-specific details.
+//===----------------------------------------------------------------------===//
+#if defined(__linux__) || defined(__APPLE__)
+
+#include "asan_internal.h"
+#include "asan_interceptors.h"
+#include "asan_mapping.h"
+#include "asan_stack.h"
+#include "asan_thread_registry.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+#include <pthread.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <unistd.h>
+
+static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
+
+namespace __asan {
+
+static void MaybeInstallSigaction(int signum,
+ void (*handler)(int, siginfo_t *, void *)) {
+ if (!AsanInterceptsSignal(signum))
+ return;
+ struct sigaction sigact;
+ REAL(memset)(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = handler;
+ sigact.sa_flags = SA_SIGINFO;
+ if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
+ CHECK(0 == REAL(sigaction)(signum, &sigact, 0));
+ if (flags()->verbosity >= 1) {
+ Report("Installed the sigaction for signal %d\n", signum);
+ }
+}
+
+static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) {
+ uptr addr = (uptr)siginfo->si_addr;
+ // Write the first message using the bullet-proof write.
+ if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die();
+ uptr pc, sp, bp;
+ GetPcSpBp(context, &pc, &sp, &bp);
+ AsanReport("ERROR: AddressSanitizer crashed on unknown address %p"
+ " (pc %p sp %p bp %p T%d)\n",
+ (void*)addr, (void*)pc, (void*)sp, (void*)bp,
+ asanThreadRegistry().GetCurrentTidOrInvalid());
+ AsanPrintf("AddressSanitizer can not provide additional info. ABORTING\n");
+ GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp);
+ stack.PrintStack();
+ ShowStatsAndAbort();
+}
+
+void SetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ CHECK(0 == sigaltstack(0, &oldstack));
+ // If the alternate stack is already in place, do nothing.
+ if ((oldstack.ss_flags & SS_DISABLE) == 0) return;
+ // TODO(glider): the mapped stack should have the MAP_STACK flag in the
+ // future. It is not required by man 2 sigaltstack now (they're using
+ // malloc()).
+ void* base = MmapOrDie(kAltStackSize, __FUNCTION__);
+ altstack.ss_sp = base;
+ altstack.ss_flags = 0;
+ altstack.ss_size = kAltStackSize;
+ CHECK(0 == sigaltstack(&altstack, 0));
+ if (flags()->verbosity > 0) {
+ Report("Alternative stack for T%d set: [%p,%p)\n",
+ asanThreadRegistry().GetCurrentTidOrInvalid(),
+ altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
+ }
+}
+
+void UnsetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ altstack.ss_sp = 0;
+ altstack.ss_flags = SS_DISABLE;
+ altstack.ss_size = 0;
+ CHECK(0 == sigaltstack(&altstack, &oldstack));
+ UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
+}
+
+void InstallSignalHandlers() {
+ // Set the alternate signal stack for the main thread.
+ // This will cause SetAlternateSignalStack to be called twice, but the stack
+ // will be actually set only once.
+ if (flags()->use_sigaltstack) SetAlternateSignalStack();
+ MaybeInstallSigaction(SIGSEGV, ASAN_OnSIGSEGV);
+ MaybeInstallSigaction(SIGBUS, ASAN_OnSIGSEGV);
+}
+
+// ---------------------- TSD ---------------- {{{1
+
+static pthread_key_t tsd_key;
+static bool tsd_key_inited = false;
+void AsanTSDInit(void (*destructor)(void *tsd)) {
+ CHECK(!tsd_key_inited);
+ tsd_key_inited = true;
+ CHECK(0 == pthread_key_create(&tsd_key, destructor));
+}
+
+void *AsanTSDGet() {
+ CHECK(tsd_key_inited);
+ return pthread_getspecific(tsd_key);
+}
+
+void AsanTSDSet(void *tsd) {
+ CHECK(tsd_key_inited);
+ pthread_setspecific(tsd_key, tsd);
+}
+
+} // namespace __asan
+
+#endif // __linux__ || __APPLE_
diff --git a/lib/asan/asan_printf.cc b/lib/asan/asan_printf.cc
index a3d06ff..e1304f0 100644
--- a/lib/asan/asan_printf.cc
+++ b/lib/asan/asan_printf.cc
@@ -1,4 +1,4 @@
-//===-- asan_printf.cc ------------------------------------------*- C++ -*-===//
+//===-- asan_printf.cc ----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,131 +16,19 @@
#include "asan_internal.h"
#include "asan_interceptors.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_common.h"
#include <stdarg.h>
+#include <stdio.h>
-namespace __asan {
-
-void RawWrite(const char *buffer) {
- static const char *kRawWriteError = "RawWrite can't output requested buffer!";
- ssize_t length = (ssize_t)internal_strlen(buffer);
- if (length != AsanWrite(2, buffer, length)) {
- AsanWrite(2, kRawWriteError, internal_strlen(kRawWriteError));
- ASAN_DIE;
- }
-}
-
-static inline int AppendChar(char **buff, const char *buff_end, char c) {
- if (*buff < buff_end) {
- **buff = c;
- (*buff)++;
- }
- return 1;
-}
-
-// Appends number in a given base to buffer. If its length is less than
-// "minimal_num_length", it is padded with leading zeroes.
-static int AppendUnsigned(char **buff, const char *buff_end, uint64_t num,
- uint8_t base, uint8_t minimal_num_length) {
- size_t const kMaxLen = 30;
- RAW_CHECK(base == 10 || base == 16);
- RAW_CHECK(minimal_num_length < kMaxLen);
- size_t num_buffer[kMaxLen];
- size_t pos = 0;
- do {
- RAW_CHECK_MSG(pos < kMaxLen, "appendNumber buffer overflow");
- num_buffer[pos++] = num % base;
- num /= base;
- } while (num > 0);
- while (pos < minimal_num_length) num_buffer[pos++] = 0;
- int result = 0;
- while (pos-- > 0) {
- size_t digit = num_buffer[pos];
- result += AppendChar(buff, buff_end, (digit < 10) ? '0' + digit
- : 'a' + digit - 10);
- }
- return result;
-}
-
-static inline int AppendSignedDecimal(char **buff, const char *buff_end,
- int64_t num) {
- int result = 0;
- if (num < 0) {
- result += AppendChar(buff, buff_end, '-');
- num = -num;
- }
- result += AppendUnsigned(buff, buff_end, (uint64_t)num, 10, 0);
- return result;
-}
-
-static inline int AppendString(char **buff, const char *buff_end,
- const char *s) {
- // Avoid library functions like stpcpy here.
- RAW_CHECK(s);
- int result = 0;
- for (; *s; s++) {
- result += AppendChar(buff, buff_end, *s);
- }
- return result;
-}
-
-static inline int AppendPointer(char **buff, const char *buff_end,
- uint64_t ptr_value) {
- int result = 0;
- result += AppendString(buff, buff_end, "0x");
- result += AppendUnsigned(buff, buff_end, ptr_value, 16,
- (__WORDSIZE == 64) ? 12 : 8);
- return result;
-}
+namespace __sanitizer {
+int VSNPrintf(char *buff, int buff_length, const char *format, va_list args);
+} // namespace __sanitizer
-static int VSNPrintf(char *buff, int buff_length,
- const char *format, va_list args) {
- static const char *kPrintfFormatsHelp = "Supported Printf formats: "
- "%%[l]{d,u,x}; %%p; %%s";
- RAW_CHECK(format);
- RAW_CHECK(buff_length > 0);
- const char *buff_end = &buff[buff_length - 1];
- const char *cur = format;
- int result = 0;
- for (; *cur; cur++) {
- if (*cur == '%') {
- cur++;
- bool have_l = (*cur == 'l');
- cur += have_l;
- int64_t dval;
- uint64_t uval, xval;
- switch (*cur) {
- case 'd': dval = have_l ? va_arg(args, intptr_t)
- : va_arg(args, int);
- result += AppendSignedDecimal(&buff, buff_end, dval);
- break;
- case 'u': uval = have_l ? va_arg(args, uintptr_t)
- : va_arg(args, unsigned int);
- result += AppendUnsigned(&buff, buff_end, uval, 10, 0);
- break;
- case 'x': xval = have_l ? va_arg(args, uintptr_t)
- : va_arg(args, unsigned int);
- result += AppendUnsigned(&buff, buff_end, xval, 16, 0);
- break;
- case 'p': RAW_CHECK_MSG(!have_l, kPrintfFormatsHelp);
- result += AppendPointer(&buff, buff_end,
- va_arg(args, uintptr_t));
- break;
- case 's': RAW_CHECK_MSG(!have_l, kPrintfFormatsHelp);
- result += AppendString(&buff, buff_end, va_arg(args, char*));
- break;
- default: RAW_CHECK_MSG(false, kPrintfFormatsHelp);
- }
- } else {
- result += AppendChar(&buff, buff_end, *cur);
- }
- }
- RAW_CHECK(buff <= buff_end);
- AppendChar(&buff, buff_end + 1, '\0');
- return result;
-}
+namespace __asan {
-void Printf(const char *format, ...) {
+void AsanPrintf(const char *format, ...) {
const int kLen = 1024 * 4;
char buffer[kLen];
va_list args;
@@ -149,25 +37,14 @@ void Printf(const char *format, ...) {
va_end(args);
RAW_CHECK_MSG(needed_length < kLen, "Buffer in Printf is too short!\n");
RawWrite(buffer);
+ AppendToErrorMessageBuffer(buffer);
}
-// Writes at most "length" symbols to "buffer" (including trailing '\0').
-// Returns the number of symbols that should have been written to buffer
-// (not including trailing '\0'). Thus, the string is truncated
-// iff return value is not less than "length".
-int SNPrintf(char *buffer, size_t length, const char *format, ...) {
- va_list args;
- va_start(args, format);
- int needed_length = VSNPrintf(buffer, length, format, args);
- va_end(args);
- return needed_length;
-}
-
-// Like Printf, but prints the current PID before the output string.
-void Report(const char *format, ...) {
+// Like AsanPrintf, but prints the current PID before the output string.
+void AsanReport(const char *format, ...) {
const int kLen = 1024 * 4;
char buffer[kLen];
- int needed_length = SNPrintf(buffer, kLen, "==%d== ", getpid());
+ int needed_length = internal_snprintf(buffer, kLen, "==%d== ", GetPid());
RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
va_list args;
va_start(args, format);
@@ -176,6 +53,7 @@ void Report(const char *format, ...) {
va_end(args);
RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
RawWrite(buffer);
+ AppendToErrorMessageBuffer(buffer);
}
} // namespace __asan
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index c876f6d..34324fa 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -1,4 +1,4 @@
-//===-- asan_rtl.cc ---------------------------------------------*- C++ -*-===//
+//===-- asan_rtl.cc -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,254 +16,276 @@
#include "asan_interface.h"
#include "asan_internal.h"
#include "asan_lock.h"
-#include "asan_mac.h"
#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+namespace __sanitizer {
+using namespace __asan;
+
+void Die() {
+ static atomic_uint32_t num_calls;
+ if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
+ // Don't die twice - run a busy loop.
+ while (1) { }
+ }
+ if (flags()->sleep_before_dying) {
+ Report("Sleeping for %zd second(s)\n", flags()->sleep_before_dying);
+ SleepForSeconds(flags()->sleep_before_dying);
+ }
+ if (flags()->unmap_shadow_on_exit)
+ UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
+ if (death_callback)
+ death_callback();
+ if (flags()->abort_on_error)
+ Abort();
+ Exit(flags()->exitcode);
+}
+
+void CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) {
+ AsanReport("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n",
+ file, line, cond, (uptr)v1, (uptr)v2);
+ PRINT_CURRENT_STACK();
+ ShowStatsAndAbort();
+}
-#include <new>
-#include <dlfcn.h>
-#include <execinfo.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <signal.h>
-#include <stdarg.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#ifndef ANDROID
-#include <sys/ucontext.h>
-#endif
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <unistd.h>
-// must not include <setjmp.h> on Linux
+} // namespace __sanitizer
namespace __asan {
// -------------------------- Flags ------------------------- {{{1
-static const size_t kMallocContextSize = 30;
-static int FLAG_atexit;
-bool FLAG_fast_unwind = true;
-
-size_t FLAG_redzone; // power of two, >= 32
-size_t FLAG_quarantine_size;
-int FLAG_demangle;
-bool FLAG_symbolize;
-int FLAG_v;
-int FLAG_debug;
-bool FLAG_poison_shadow;
-int FLAG_report_globals;
-size_t FLAG_malloc_context_size = kMallocContextSize;
-uintptr_t FLAG_large_malloc;
-bool FLAG_lazy_shadow;
-bool FLAG_handle_segv;
-bool FLAG_handle_sigill;
-bool FLAG_replace_str;
-bool FLAG_replace_intrin;
-bool FLAG_replace_cfallocator; // Used on Mac only.
-size_t FLAG_max_malloc_fill_size = 0;
-bool FLAG_use_fake_stack;
-int FLAG_exitcode = EXIT_FAILURE;
-bool FLAG_allow_user_poisoning;
+static const int kMallocContextSize = 30;
+
+static Flags asan_flags;
+
+Flags *flags() {
+ return &asan_flags;
+}
+
+static void ParseFlagsFromString(Flags *f, const char *str) {
+ ParseFlag(str, &f->quarantine_size, "quarantine_size");
+ ParseFlag(str, &f->symbolize, "symbolize");
+ ParseFlag(str, &f->verbosity, "verbosity");
+ ParseFlag(str, &f->redzone, "redzone");
+ CHECK(f->redzone >= 16);
+ CHECK(IsPowerOfTwo(f->redzone));
+
+ ParseFlag(str, &f->debug, "debug");
+ ParseFlag(str, &f->report_globals, "report_globals");
+ ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
+ CHECK(f->malloc_context_size <= kMallocContextSize);
+
+ ParseFlag(str, &f->replace_str, "replace_str");
+ ParseFlag(str, &f->replace_intrin, "replace_intrin");
+ ParseFlag(str, &f->replace_cfallocator, "replace_cfallocator");
+ ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free");
+ ParseFlag(str, &f->use_fake_stack, "use_fake_stack");
+ ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size");
+ ParseFlag(str, &f->exitcode, "exitcode");
+ ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning");
+ ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying");
+ ParseFlag(str, &f->handle_segv, "handle_segv");
+ ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack");
+ ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size");
+ ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit");
+ ParseFlag(str, &f->abort_on_error, "abort_on_error");
+ ParseFlag(str, &f->atexit, "atexit");
+ ParseFlag(str, &f->disable_core, "disable_core");
+}
+
+extern "C" {
+const char* WEAK __asan_default_options() { return ""; }
+} // extern "C"
+
+void InitializeFlags(Flags *f, const char *env) {
+ internal_memset(f, 0, sizeof(*f));
+
+ f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 24 : 1UL << 28;
+ f->symbolize = false;
+ f->verbosity = 0;
+ f->redzone = (ASAN_LOW_MEMORY) ? 64 : 128;
+ f->debug = false;
+ f->report_globals = 1;
+ f->malloc_context_size = kMallocContextSize;
+ f->replace_str = true;
+ f->replace_intrin = true;
+ f->replace_cfallocator = true;
+ f->mac_ignore_invalid_free = false;
+ f->use_fake_stack = true;
+ f->max_malloc_fill_size = 0;
+ f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
+ f->allow_user_poisoning = true;
+ f->sleep_before_dying = 0;
+ f->handle_segv = ASAN_NEEDS_SEGV;
+ f->use_sigaltstack = false;
+ f->check_malloc_usable_size = true;
+ f->unmap_shadow_on_exit = false;
+ f->abort_on_error = false;
+ f->atexit = false;
+ f->disable_core = (__WORDSIZE == 64);
+
+ // Override from user-specified string.
+ ParseFlagsFromString(f, __asan_default_options());
+ if (flags()->verbosity) {
+ Report("Using the defaults from __asan_default_options: %s\n",
+ __asan_default_options());
+ }
+
+ // Override from command line.
+ ParseFlagsFromString(f, env);
+}
// -------------------------- Globals --------------------- {{{1
int asan_inited;
bool asan_init_is_running;
-
-// -------------------------- Interceptors ---------------- {{{1
-typedef int (*sigaction_f)(int signum, const struct sigaction *act,
- struct sigaction *oldact);
-typedef sig_t (*signal_f)(int signum, sig_t handler);
-typedef void (*longjmp_f)(void *env, int val);
-typedef longjmp_f _longjmp_f;
-typedef longjmp_f siglongjmp_f;
-typedef void (*__cxa_throw_f)(void *, void *, void *);
-typedef int (*pthread_create_f)(pthread_t *thread, const pthread_attr_t *attr,
- void *(*start_routine) (void *), void *arg);
-#ifdef __APPLE__
-dispatch_async_f_f real_dispatch_async_f;
-dispatch_sync_f_f real_dispatch_sync_f;
-dispatch_after_f_f real_dispatch_after_f;
-dispatch_barrier_async_f_f real_dispatch_barrier_async_f;
-dispatch_group_async_f_f real_dispatch_group_async_f;
-pthread_workqueue_additem_np_f real_pthread_workqueue_additem_np;
-#endif
-
-sigaction_f real_sigaction;
-signal_f real_signal;
-longjmp_f real_longjmp;
-_longjmp_f real__longjmp;
-siglongjmp_f real_siglongjmp;
-__cxa_throw_f real___cxa_throw;
-pthread_create_f real_pthread_create;
+void (*death_callback)(void);
+static void (*error_report_callback)(const char*);
+char *error_message_buffer = 0;
+uptr error_message_buffer_pos = 0;
+uptr error_message_buffer_size = 0;
// -------------------------- Misc ---------------- {{{1
void ShowStatsAndAbort() {
__asan_print_accumulated_stats();
- ASAN_DIE;
+ Die();
}
-static void PrintBytes(const char *before, uintptr_t *a) {
- uint8_t *bytes = (uint8_t*)a;
- size_t byte_num = (__WORDSIZE) / 8;
- Printf("%s%p:", before, (uintptr_t)a);
- for (size_t i = 0; i < byte_num; i++) {
- Printf(" %lx%lx", bytes[i] >> 4, bytes[i] & 15);
+static void PrintBytes(const char *before, uptr *a) {
+ u8 *bytes = (u8*)a;
+ uptr byte_num = (__WORDSIZE) / 8;
+ AsanPrintf("%s%p:", before, (void*)a);
+ for (uptr i = 0; i < byte_num; i++) {
+ AsanPrintf(" %x%x", bytes[i] >> 4, bytes[i] & 15);
}
- Printf("\n");
-}
-
-// Opens the file 'file_name" and reads up to 'max_len' bytes.
-// The resulting buffer is mmaped and stored in '*buff'.
-// Returns the number of read bytes or -1 if file can not be opened.
-static ssize_t ReadFileToBuffer(const char *file_name, char **buff,
- size_t max_len) {
- const size_t kMinFileLen = kPageSize;
- ssize_t read_len = -1;
- *buff = 0;
- size_t maped_size = 0;
- // The files we usually open are not seekable, so try different buffer sizes.
- for (size_t size = kMinFileLen; size <= max_len; size *= 2) {
- int fd = AsanOpenReadonly(file_name);
- if (fd < 0) return -1;
- AsanUnmapOrDie(*buff, maped_size);
- maped_size = size;
- *buff = (char*)AsanMmapSomewhereOrDie(size, __FUNCTION__);
- read_len = AsanRead(fd, *buff, size);
- AsanClose(fd);
- if (read_len < size) // We've read the whole file.
- break;
+ AsanPrintf("\n");
+}
+
+void AppendToErrorMessageBuffer(const char *buffer) {
+ if (error_message_buffer) {
+ uptr length = internal_strlen(buffer);
+ CHECK_GE(error_message_buffer_size, error_message_buffer_pos);
+ uptr remaining = error_message_buffer_size - error_message_buffer_pos;
+ internal_strncpy(error_message_buffer + error_message_buffer_pos,
+ buffer, remaining);
+ error_message_buffer[error_message_buffer_size - 1] = '\0';
+ // FIXME: reallocate the buffer instead of truncating the message.
+ error_message_buffer_pos += remaining > length ? length : remaining;
}
- return read_len;
-}
-
-// Like getenv, but reads env directly from /proc and does not use libc.
-// This function should be called first inside __asan_init.
-static const char* GetEnvFromProcSelfEnviron(const char* name) {
- static char *environ;
- static ssize_t len;
- static bool inited;
- if (!inited) {
- inited = true;
- len = ReadFileToBuffer("/proc/self/environ", &environ, 1 << 20);
- }
- if (!environ || len <= 0) return NULL;
- size_t namelen = internal_strlen(name);
- const char *p = environ;
- while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
- // proc file has the format NAME=value\0NAME=value\0NAME=value\0...
- const char* endp =
- (char*)internal_memchr(p, '\0', len - (p - environ));
- if (endp == NULL) // this entry isn't NUL terminated
- return NULL;
- else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
- return p + namelen + 1; // point after =
- p = endp + 1;
- }
- return NULL; // Not found.
-}
-
-// ---------------------- Thread ------------------------- {{{1
-static void *asan_thread_start(void *arg) {
- AsanThread *t= (AsanThread*)arg;
- asanThreadRegistry().SetCurrent(t);
- return t->ThreadStart();
}
// ---------------------- mmap -------------------- {{{1
-void OutOfMemoryMessageAndDie(const char *mem_type, size_t size) {
- Report("ERROR: AddressSanitizer failed to allocate "
- "0x%lx (%ld) bytes of %s\n",
- size, size, mem_type);
- PRINT_CURRENT_STACK();
- ShowStatsAndAbort();
-}
-
// Reserve memory range [beg, end].
-static void ReserveShadowMemoryRange(uintptr_t beg, uintptr_t end) {
+static void ReserveShadowMemoryRange(uptr beg, uptr end) {
CHECK((beg % kPageSize) == 0);
CHECK(((end + 1) % kPageSize) == 0);
- size_t size = end - beg + 1;
- void *res = AsanMmapFixedNoReserve(beg, size);
+ uptr size = end - beg + 1;
+ void *res = MmapFixedNoReserve(beg, size);
CHECK(res == (void*)beg && "ReserveShadowMemoryRange failed");
}
// ---------------------- LowLevelAllocator ------------- {{{1
-void *LowLevelAllocator::Allocate(size_t size) {
+void *LowLevelAllocator::Allocate(uptr size) {
CHECK((size & (size - 1)) == 0 && "size must be a power of two");
- if (allocated_end_ - allocated_current_ < size) {
- size_t size_to_allocate = Max(size, kPageSize);
+ if (allocated_end_ - allocated_current_ < (sptr)size) {
+ uptr size_to_allocate = Max(size, kPageSize);
allocated_current_ =
- (char*)AsanMmapSomewhereOrDie(size_to_allocate, __FUNCTION__);
+ (char*)MmapOrDie(size_to_allocate, __FUNCTION__);
allocated_end_ = allocated_current_ + size_to_allocate;
- PoisonShadow((uintptr_t)allocated_current_, size_to_allocate,
+ PoisonShadow((uptr)allocated_current_, size_to_allocate,
kAsanInternalHeapMagic);
}
- CHECK(allocated_end_ - allocated_current_ >= size);
+ CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
void *res = allocated_current_;
allocated_current_ += size;
return res;
}
// ---------------------- DescribeAddress -------------------- {{{1
-static bool DescribeStackAddress(uintptr_t addr, uintptr_t access_size) {
+static bool DescribeStackAddress(uptr addr, uptr access_size) {
AsanThread *t = asanThreadRegistry().FindThreadByStackAddress(addr);
if (!t) return false;
- const intptr_t kBufSize = 4095;
+ const sptr kBufSize = 4095;
char buf[kBufSize];
- uintptr_t offset = 0;
+ uptr offset = 0;
const char *frame_descr = t->GetFrameNameByAddr(addr, &offset);
// This string is created by the compiler and has the following form:
// "FunctioName n alloc_1 alloc_2 ... alloc_n"
// where alloc_i looks like "offset size len ObjectName ".
CHECK(frame_descr);
// Report the function name and the offset.
- const char *name_end = real_strchr(frame_descr, ' ');
+ const char *name_end = internal_strchr(frame_descr, ' ');
CHECK(name_end);
buf[0] = 0;
- strncat(buf, frame_descr,
- Min(kBufSize, static_cast<intptr_t>(name_end - frame_descr)));
- Printf("Address %p is located at offset %ld "
- "in frame <%s> of T%d's stack:\n",
- addr, offset, buf, t->tid());
+ internal_strncat(buf, frame_descr,
+ Min(kBufSize,
+ static_cast<sptr>(name_end - frame_descr)));
+ AsanPrintf("Address %p is located at offset %zu "
+ "in frame <%s> of T%d's stack:\n",
+ (void*)addr, offset, buf, t->tid());
// Report the number of stack objects.
char *p;
- size_t n_objects = strtol(name_end, &p, 10);
+ uptr n_objects = internal_simple_strtoll(name_end, &p, 10);
CHECK(n_objects > 0);
- Printf(" This frame has %ld object(s):\n", n_objects);
+ AsanPrintf(" This frame has %zu object(s):\n", n_objects);
// Report all objects in this frame.
- for (size_t i = 0; i < n_objects; i++) {
- size_t beg, size;
- intptr_t len;
- beg = strtol(p, &p, 10);
- size = strtol(p, &p, 10);
- len = strtol(p, &p, 10);
+ for (uptr i = 0; i < n_objects; i++) {
+ uptr beg, size;
+ sptr len;
+ beg = internal_simple_strtoll(p, &p, 10);
+ size = internal_simple_strtoll(p, &p, 10);
+ len = internal_simple_strtoll(p, &p, 10);
if (beg <= 0 || size <= 0 || len < 0 || *p != ' ') {
- Printf("AddressSanitizer can't parse the stack frame descriptor: |%s|\n",
- frame_descr);
+ AsanPrintf("AddressSanitizer can't parse the stack frame "
+ "descriptor: |%s|\n", frame_descr);
break;
}
p++;
buf[0] = 0;
- strncat(buf, p, Min(kBufSize, len));
+ internal_strncat(buf, p, Min(kBufSize, len));
p += len;
- Printf(" [%ld, %ld) '%s'\n", beg, beg + size, buf);
+ AsanPrintf(" [%zu, %zu) '%s'\n", beg, beg + size, buf);
}
- Printf("HINT: this may be a false positive if your program uses "
- "some custom stack unwind mechanism\n"
- " (longjmp and C++ exceptions *are* supported)\n");
+ AsanPrintf("HINT: this may be a false positive if your program uses "
+ "some custom stack unwind mechanism\n"
+ " (longjmp and C++ exceptions *are* supported)\n");
t->summary()->Announce();
return true;
}
-__attribute__((noinline))
-static void DescribeAddress(uintptr_t addr, uintptr_t access_size) {
+static bool DescribeAddrIfShadow(uptr addr) {
+ if (AddrIsInMem(addr))
+ return false;
+ static const char kAddrInShadowReport[] =
+ "Address %p is located in the %s.\n";
+ if (AddrIsInShadowGap(addr)) {
+ AsanPrintf(kAddrInShadowReport, addr, "shadow gap area");
+ return true;
+ }
+ if (AddrIsInHighShadow(addr)) {
+ AsanPrintf(kAddrInShadowReport, addr, "high shadow area");
+ return true;
+ }
+ if (AddrIsInLowShadow(addr)) {
+ AsanPrintf(kAddrInShadowReport, addr, "low shadow area");
+ return true;
+ }
+
+ CHECK(0); // Unreachable.
+ return false;
+}
+
+static NOINLINE void DescribeAddress(uptr addr, uptr access_size) {
+ // Check if this is shadow or shadow gap.
+ if (DescribeAddrIfShadow(addr))
+ return;
+
+ CHECK(AddrIsInMem(addr));
+
// Check if this is a global.
if (DescribeAddrIfGlobal(addr))
return;
@@ -276,92 +298,12 @@ static void DescribeAddress(uintptr_t addr, uintptr_t access_size) {
}
// -------------------------- Run-time entry ------------------- {{{1
-void GetPcSpBpAx(void *context,
- uintptr_t *pc, uintptr_t *sp, uintptr_t *bp, uintptr_t *ax) {
-#ifndef ANDROID
- ucontext_t *ucontext = (ucontext_t*)context;
-#endif
-#ifdef __APPLE__
-# if __WORDSIZE == 64
- *pc = ucontext->uc_mcontext->__ss.__rip;
- *bp = ucontext->uc_mcontext->__ss.__rbp;
- *sp = ucontext->uc_mcontext->__ss.__rsp;
- *ax = ucontext->uc_mcontext->__ss.__rax;
-# else
- *pc = ucontext->uc_mcontext->__ss.__eip;
- *bp = ucontext->uc_mcontext->__ss.__ebp;
- *sp = ucontext->uc_mcontext->__ss.__esp;
- *ax = ucontext->uc_mcontext->__ss.__eax;
-# endif // __WORDSIZE
-#else // assume linux
-# if defined (ANDROID)
- *pc = *sp = *bp = *ax = 0;
-# elif defined(__arm__)
- *pc = ucontext->uc_mcontext.arm_pc;
- *bp = ucontext->uc_mcontext.arm_fp;
- *sp = ucontext->uc_mcontext.arm_sp;
- *ax = ucontext->uc_mcontext.arm_r0;
-# elif __WORDSIZE == 64
- *pc = ucontext->uc_mcontext.gregs[REG_RIP];
- *bp = ucontext->uc_mcontext.gregs[REG_RBP];
- *sp = ucontext->uc_mcontext.gregs[REG_RSP];
- *ax = ucontext->uc_mcontext.gregs[REG_RAX];
-# else
- *pc = ucontext->uc_mcontext.gregs[REG_EIP];
- *bp = ucontext->uc_mcontext.gregs[REG_EBP];
- *sp = ucontext->uc_mcontext.gregs[REG_ESP];
- *ax = ucontext->uc_mcontext.gregs[REG_EAX];
-# endif // __WORDSIZE
-#endif
-}
-
-static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) {
- uintptr_t addr = (uintptr_t)siginfo->si_addr;
- if (AddrIsInShadow(addr) && FLAG_lazy_shadow) {
- // We traped on access to a shadow address. Just map a large chunk around
- // this address.
- const uintptr_t chunk_size = kPageSize << 10; // 4M
- uintptr_t chunk = addr & ~(chunk_size - 1);
- AsanMmapFixedReserve(chunk, chunk_size);
- return;
- }
- // Write the first message using the bullet-proof write.
- if (13 != AsanWrite(2, "ASAN:SIGSEGV\n", 13)) ASAN_DIE;
- uintptr_t pc, sp, bp, ax;
- GetPcSpBpAx(context, &pc, &sp, &bp, &ax);
- Report("ERROR: AddressSanitizer crashed on unknown address %p"
- " (pc %p sp %p bp %p ax %p T%d)\n",
- addr, pc, sp, bp, ax,
- asanThreadRegistry().GetCurrentTidOrMinusOne());
- Printf("AddressSanitizer can not provide additional info. ABORTING\n");
- GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, false, pc, bp);
- stack.PrintStack();
- ShowStatsAndAbort();
-}
-
-static void ASAN_OnSIGILL(int, siginfo_t *siginfo, void *context) {
- // Write the first message using the bullet-proof write.
- if (12 != AsanWrite(2, "ASAN:SIGILL\n", 12)) ASAN_DIE;
- uintptr_t pc, sp, bp, ax;
- GetPcSpBpAx(context, &pc, &sp, &bp, &ax);
-
- uintptr_t addr = ax;
-
- uint8_t *insn = (uint8_t*)pc;
- CHECK(insn[0] == 0x0f && insn[1] == 0x0b); // ud2
- unsigned access_size_and_type = insn[2] - 0x50;
- CHECK(access_size_and_type < 16);
- bool is_write = access_size_and_type & 8;
- int access_size = 1 << (access_size_and_type & 7);
- __asan_report_error(pc, bp, sp, addr, is_write, access_size);
-}
-
// exported functions
#define ASAN_REPORT_ERROR(type, is_write, size) \
-extern "C" void __asan_report_ ## type ## size(uintptr_t addr) \
- __attribute__((visibility("default"))) __attribute__((noinline)); \
-extern "C" void __asan_report_ ## type ## size(uintptr_t addr) { \
- GET_BP_PC_SP; \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_ ## type ## size(uptr addr); \
+void __asan_report_ ## type ## size(uptr addr) { \
+ GET_CALLER_PC_BP_SP; \
__asan_report_error(pc, bp, sp, addr, is_write, size); \
}
@@ -381,210 +323,85 @@ ASAN_REPORT_ERROR(store, true, 16)
// dynamic libraries access the symbol even if it is not used by the executable
// itself. This should help if the build system is removing dead code at link
// time.
-static void force_interface_symbols() {
+static NOINLINE void force_interface_symbols() {
volatile int fake_condition = 0; // prevent dead condition elimination.
if (fake_condition) {
- __asan_report_load1(NULL);
- __asan_report_load2(NULL);
- __asan_report_load4(NULL);
- __asan_report_load8(NULL);
- __asan_report_load16(NULL);
- __asan_report_store1(NULL);
- __asan_report_store2(NULL);
- __asan_report_store4(NULL);
- __asan_report_store8(NULL);
- __asan_report_store16(NULL);
- __asan_register_global(0, 0, NULL);
- __asan_register_globals(NULL, 0);
- __asan_unregister_globals(NULL, 0);
+ __asan_report_load1(0);
+ __asan_report_load2(0);
+ __asan_report_load4(0);
+ __asan_report_load8(0);
+ __asan_report_load16(0);
+ __asan_report_store1(0);
+ __asan_report_store2(0);
+ __asan_report_store4(0);
+ __asan_report_store8(0);
+ __asan_report_store16(0);
+ __asan_register_global(0, 0, 0);
+ __asan_register_globals(0, 0);
+ __asan_unregister_globals(0, 0);
+ __asan_set_death_callback(0);
+ __asan_set_error_report_callback(0);
+ __asan_handle_no_return();
}
}
// -------------------------- Init ------------------- {{{1
-static int64_t IntFlagValue(const char *flags, const char *flag,
- int64_t default_val) {
- if (!flags) return default_val;
- const char *str = strstr(flags, flag);
- if (!str) return default_val;
- return atoll(str + internal_strlen(flag));
-}
-
static void asan_atexit() {
- Printf("AddressSanitizer exit stats:\n");
+ AsanPrintf("AddressSanitizer exit stats:\n");
__asan_print_accumulated_stats();
}
-void CheckFailed(const char *cond, const char *file, int line) {
- Report("CHECK failed: %s at %s:%d, pthread_self=%p\n",
- cond, file, line, pthread_self());
- PRINT_CURRENT_STACK();
- ShowStatsAndAbort();
-}
-
} // namespace __asan
-// -------------------------- Interceptors ------------------- {{{1
+// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-#define OPERATOR_NEW_BODY \
- GET_STACK_TRACE_HERE_FOR_MALLOC;\
- return asan_memalign(0, size, &stack);
-
-#ifdef ANDROID
-void *operator new(size_t size) { OPERATOR_NEW_BODY; }
-void *operator new[](size_t size) { OPERATOR_NEW_BODY; }
-#else
-void *operator new(size_t size) throw(std::bad_alloc) { OPERATOR_NEW_BODY; }
-void *operator new[](size_t size) throw(std::bad_alloc) { OPERATOR_NEW_BODY; }
-void *operator new(size_t size, std::nothrow_t const&) throw()
-{ OPERATOR_NEW_BODY; }
-void *operator new[](size_t size, std::nothrow_t const&) throw()
-{ OPERATOR_NEW_BODY; }
-#endif
-
-#define OPERATOR_DELETE_BODY \
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);\
- asan_free(ptr, &stack);
-
-void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; }
-void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; }
-void operator delete(void *ptr, std::nothrow_t const&) throw()
-{ OPERATOR_DELETE_BODY; }
-void operator delete[](void *ptr, std::nothrow_t const&) throw()
-{ OPERATOR_DELETE_BODY;}
-
-extern "C"
-#ifndef __APPLE__
-__attribute__((visibility("default")))
-#endif
-int WRAP(pthread_create)(pthread_t *thread, const pthread_attr_t *attr,
- void *(*start_routine) (void *), void *arg) {
- GET_STACK_TRACE_HERE(kStackTraceMax, /*fast_unwind*/false);
- AsanThread *t = (AsanThread*)asan_malloc(sizeof(AsanThread), &stack);
- AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
- CHECK(curr_thread || asanThreadRegistry().IsCurrentThreadDying());
- new(t) AsanThread(asanThreadRegistry().GetCurrentTidOrMinusOne(),
- start_routine, arg, &stack);
- return real_pthread_create(thread, attr, asan_thread_start, t);
-}
-
-static bool MySignal(int signum) {
- if (FLAG_handle_sigill && signum == SIGILL) return true;
- if (FLAG_handle_segv && signum == SIGSEGV) return true;
-#ifdef __APPLE__
- if (FLAG_handle_segv && signum == SIGBUS) return true;
-#endif
- return false;
-}
-
-static void MaybeInstallSigaction(int signum,
- void (*handler)(int, siginfo_t *, void *)) {
- if (!MySignal(signum))
- return;
- struct sigaction sigact;
- real_memset(&sigact, 0, sizeof(sigact));
- sigact.sa_sigaction = handler;
- sigact.sa_flags = SA_SIGINFO;
- CHECK(0 == real_sigaction(signum, &sigact, 0));
-}
-
-extern "C"
-sig_t WRAP(signal)(int signum, sig_t handler) {
- if (!MySignal(signum)) {
- return real_signal(signum, handler);
- }
- return NULL;
-}
-
-extern "C"
-int WRAP(sigaction)(int signum, const struct sigaction *act,
- struct sigaction *oldact) {
- if (!MySignal(signum)) {
- return real_sigaction(signum, act, oldact);
- }
- return 0;
+int __asan_set_error_exit_code(int exit_code) {
+ int old = flags()->exitcode;
+ flags()->exitcode = exit_code;
+ return old;
}
-
-static void UnpoisonStackFromHereToTop() {
+void NOINLINE __asan_handle_no_return() {
int local_stack;
AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
CHECK(curr_thread);
- uintptr_t top = curr_thread->stack_top();
- uintptr_t bottom = ((uintptr_t)&local_stack - kPageSize) & ~(kPageSize-1);
+ uptr top = curr_thread->stack_top();
+ uptr bottom = ((uptr)&local_stack - kPageSize) & ~(kPageSize-1);
PoisonShadow(bottom, top - bottom, 0);
}
-extern "C" void WRAP(longjmp)(void *env, int val) {
- UnpoisonStackFromHereToTop();
- real_longjmp(env, val);
+void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
+ death_callback = callback;
}
-extern "C" void WRAP(_longjmp)(void *env, int val) {
- UnpoisonStackFromHereToTop();
- real__longjmp(env, val);
-}
-
-extern "C" void WRAP(siglongjmp)(void *env, int val) {
- UnpoisonStackFromHereToTop();
- real_siglongjmp(env, val);
-}
-
-extern "C" void __cxa_throw(void *a, void *b, void *c);
-
-#if ASAN_HAS_EXCEPTIONS == 1
-extern "C" void WRAP(__cxa_throw)(void *a, void *b, void *c) {
- CHECK(&real___cxa_throw);
- UnpoisonStackFromHereToTop();
- real___cxa_throw(a, b, c);
-}
-#endif
-
-extern "C" {
-// intercept mlock and friends.
-// Since asan maps 16T of RAM, mlock is completely unfriendly to asan.
-// All functions return 0 (success).
-static void MlockIsUnsupported() {
- static bool printed = 0;
- if (printed) return;
- printed = true;
- Printf("INFO: AddressSanitizer ignores mlock/mlockall/munlock/munlockall\n");
-}
-int mlock(const void *addr, size_t len) {
- MlockIsUnsupported();
- return 0;
-}
-int munlock(const void *addr, size_t len) {
- MlockIsUnsupported();
- return 0;
-}
-int mlockall(int flags) {
- MlockIsUnsupported();
- return 0;
-}
-int munlockall(void) {
- MlockIsUnsupported();
- return 0;
-}
-} // extern "C"
-
-// ---------------------- Interface ---------------- {{{1
-int __asan_set_error_exit_code(int exit_code) {
- int old = FLAG_exitcode;
- FLAG_exitcode = exit_code;
- return old;
+void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
+ error_report_callback = callback;
+ if (callback) {
+ error_message_buffer_size = 1 << 16;
+ error_message_buffer =
+ (char*)MmapOrDie(error_message_buffer_size, __FUNCTION__);
+ error_message_buffer_pos = 0;
+ }
}
-void __asan_report_error(uintptr_t pc, uintptr_t bp, uintptr_t sp,
- uintptr_t addr, bool is_write, size_t access_size) {
- // Do not print more than one report, otherwise they will mix up.
- static int num_calls = 0;
- if (AtomicInc(&num_calls) > 1) return;
+void __asan_report_error(uptr pc, uptr bp, uptr sp,
+ uptr addr, bool is_write, uptr access_size) {
+ static atomic_uint32_t num_calls;
+ if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
+ // Do not print more than one report, otherwise they will mix up.
+ // We can not return here because the function is marked as never-return.
+ AsanPrintf("AddressSanitizer: while reporting a bug found another one."
+ "Ignoring.\n");
+ SleepForSeconds(5);
+ Die();
+ }
- Printf("=================================================================\n");
+ AsanPrintf("===================================================="
+ "=============\n");
const char *bug_descr = "unknown-crash";
if (AddrIsInMem(addr)) {
- uint8_t *shadow_addr = (uint8_t*)MemToShadow(addr);
+ u8 *shadow_addr = (u8*)MemToShadow(addr);
// If we are accessing 16 bytes, look at the second shadow byte.
if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY)
shadow_addr++;
@@ -620,7 +437,7 @@ void __asan_report_error(uintptr_t pc, uintptr_t bp, uintptr_t sp,
}
AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
- int curr_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
+ u32 curr_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
if (curr_thread) {
// We started reporting an error message. Stop using the fake stack
@@ -628,47 +445,49 @@ void __asan_report_error(uintptr_t pc, uintptr_t bp, uintptr_t sp,
curr_thread->fake_stack().StopUsingFakeStack();
}
- Report("ERROR: AddressSanitizer %s on address "
- "%p at pc 0x%lx bp 0x%lx sp 0x%lx\n",
- bug_descr, addr, pc, bp, sp);
+ AsanReport("ERROR: AddressSanitizer %s on address "
+ "%p at pc 0x%zx bp 0x%zx sp 0x%zx\n",
+ bug_descr, (void*)addr, pc, bp, sp);
- Printf("%s of size %d at %p thread T%d\n",
- access_size ? (is_write ? "WRITE" : "READ") : "ACCESS",
- access_size, addr, curr_tid);
+ AsanPrintf("%s of size %zu at %p thread T%d\n",
+ access_size ? (is_write ? "WRITE" : "READ") : "ACCESS",
+ access_size, (void*)addr, curr_tid);
- if (FLAG_debug) {
- PrintBytes("PC: ", (uintptr_t*)pc);
+ if (flags()->debug) {
+ PrintBytes("PC: ", (uptr*)pc);
}
- GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax,
- false, // FLAG_fast_unwind,
- pc, bp);
+ GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp);
stack.PrintStack();
- CHECK(AddrIsInMem(addr));
-
DescribeAddress(addr, access_size);
- uintptr_t shadow_addr = MemToShadow(addr);
- Report("ABORTING\n");
- __asan_print_accumulated_stats();
- Printf("Shadow byte and word:\n");
- Printf(" %p: %x\n", shadow_addr, *(unsigned char*)shadow_addr);
- uintptr_t aligned_shadow = shadow_addr & ~(kWordSize - 1);
- PrintBytes(" ", (uintptr_t*)(aligned_shadow));
- Printf("More shadow bytes:\n");
- PrintBytes(" ", (uintptr_t*)(aligned_shadow-4*kWordSize));
- PrintBytes(" ", (uintptr_t*)(aligned_shadow-3*kWordSize));
- PrintBytes(" ", (uintptr_t*)(aligned_shadow-2*kWordSize));
- PrintBytes(" ", (uintptr_t*)(aligned_shadow-1*kWordSize));
- PrintBytes("=>", (uintptr_t*)(aligned_shadow+0*kWordSize));
- PrintBytes(" ", (uintptr_t*)(aligned_shadow+1*kWordSize));
- PrintBytes(" ", (uintptr_t*)(aligned_shadow+2*kWordSize));
- PrintBytes(" ", (uintptr_t*)(aligned_shadow+3*kWordSize));
- PrintBytes(" ", (uintptr_t*)(aligned_shadow+4*kWordSize));
- ASAN_DIE;
+ if (AddrIsInMem(addr)) {
+ uptr shadow_addr = MemToShadow(addr);
+ AsanReport("ABORTING\n");
+ __asan_print_accumulated_stats();
+ AsanPrintf("Shadow byte and word:\n");
+ AsanPrintf(" %p: %x\n", (void*)shadow_addr, *(unsigned char*)shadow_addr);
+ uptr aligned_shadow = shadow_addr & ~(kWordSize - 1);
+ PrintBytes(" ", (uptr*)(aligned_shadow));
+ AsanPrintf("More shadow bytes:\n");
+ PrintBytes(" ", (uptr*)(aligned_shadow-4*kWordSize));
+ PrintBytes(" ", (uptr*)(aligned_shadow-3*kWordSize));
+ PrintBytes(" ", (uptr*)(aligned_shadow-2*kWordSize));
+ PrintBytes(" ", (uptr*)(aligned_shadow-1*kWordSize));
+ PrintBytes("=>", (uptr*)(aligned_shadow+0*kWordSize));
+ PrintBytes(" ", (uptr*)(aligned_shadow+1*kWordSize));
+ PrintBytes(" ", (uptr*)(aligned_shadow+2*kWordSize));
+ PrintBytes(" ", (uptr*)(aligned_shadow+3*kWordSize));
+ PrintBytes(" ", (uptr*)(aligned_shadow+4*kWordSize));
+ }
+ if (error_report_callback) {
+ error_report_callback(error_message_buffer);
+ }
+ Die();
}
+
void __asan_init() {
if (asan_inited) return;
asan_init_is_running = true;
@@ -676,125 +495,75 @@ void __asan_init() {
// Make sure we are not statically linked.
AsanDoesNotSupportStaticLinkage();
- // flags
- const char *options = GetEnvFromProcSelfEnviron("ASAN_OPTIONS");
- FLAG_malloc_context_size =
- IntFlagValue(options, "malloc_context_size=", kMallocContextSize);
- CHECK(FLAG_malloc_context_size <= kMallocContextSize);
-
- FLAG_max_malloc_fill_size =
- IntFlagValue(options, "max_malloc_fill_size=", 0);
-
- FLAG_v = IntFlagValue(options, "verbosity=", 0);
-
- FLAG_redzone = IntFlagValue(options, "redzone=", 128);
- CHECK(FLAG_redzone >= 32);
- CHECK((FLAG_redzone & (FLAG_redzone - 1)) == 0);
-
- FLAG_atexit = IntFlagValue(options, "atexit=", 0);
- FLAG_poison_shadow = IntFlagValue(options, "poison_shadow=", 1);
- FLAG_report_globals = IntFlagValue(options, "report_globals=", 1);
- FLAG_lazy_shadow = IntFlagValue(options, "lazy_shadow=", 0);
- FLAG_handle_segv = IntFlagValue(options, "handle_segv=", ASAN_NEEDS_SEGV);
- FLAG_handle_sigill = IntFlagValue(options, "handle_sigill=", 0);
- FLAG_symbolize = IntFlagValue(options, "symbolize=", 1);
- FLAG_demangle = IntFlagValue(options, "demangle=", 1);
- FLAG_debug = IntFlagValue(options, "debug=", 0);
- FLAG_replace_cfallocator = IntFlagValue(options, "replace_cfallocator=", 1);
- FLAG_fast_unwind = IntFlagValue(options, "fast_unwind=", 1);
- FLAG_replace_str = IntFlagValue(options, "replace_str=", 1);
- FLAG_replace_intrin = IntFlagValue(options, "replace_intrin=", 1);
- FLAG_use_fake_stack = IntFlagValue(options, "use_fake_stack=", 1);
- FLAG_exitcode = IntFlagValue(options, "exitcode=", EXIT_FAILURE);
- FLAG_allow_user_poisoning = IntFlagValue(options,
- "allow_user_poisoning=", 1);
-
- if (FLAG_atexit) {
- atexit(asan_atexit);
+ // Initialize flags.
+ const char *options = GetEnv("ASAN_OPTIONS");
+ InitializeFlags(flags(), options);
+
+ if (flags()->verbosity && options) {
+ Report("Parsed ASAN_OPTIONS: %s\n", options);
}
- FLAG_quarantine_size =
- IntFlagValue(options, "quarantine_size=", 1UL << 28);
+ if (flags()->atexit) {
+ Atexit(asan_atexit);
+ }
// interceptors
InitializeAsanInterceptors();
ReplaceSystemMalloc();
+ ReplaceOperatorsNewAndDelete();
- INTERCEPT_FUNCTION(sigaction);
- INTERCEPT_FUNCTION(signal);
- INTERCEPT_FUNCTION(longjmp);
- INTERCEPT_FUNCTION(_longjmp);
- INTERCEPT_FUNCTION_IF_EXISTS(__cxa_throw);
- INTERCEPT_FUNCTION(pthread_create);
-#ifdef __APPLE__
- INTERCEPT_FUNCTION(dispatch_async_f);
- INTERCEPT_FUNCTION(dispatch_sync_f);
- INTERCEPT_FUNCTION(dispatch_after_f);
- INTERCEPT_FUNCTION(dispatch_barrier_async_f);
- INTERCEPT_FUNCTION(dispatch_group_async_f);
- // We don't need to intercept pthread_workqueue_additem_np() to support the
- // libdispatch API, but it helps us to debug the unsupported functions. Let's
- // intercept it only during verbose runs.
- if (FLAG_v >= 2) {
- INTERCEPT_FUNCTION(pthread_workqueue_additem_np);
- }
-#else
- // On Darwin siglongjmp tailcalls longjmp, so we don't want to intercept it
- // there.
- INTERCEPT_FUNCTION(siglongjmp);
-#endif
-
- MaybeInstallSigaction(SIGSEGV, ASAN_OnSIGSEGV);
- MaybeInstallSigaction(SIGBUS, ASAN_OnSIGSEGV);
- MaybeInstallSigaction(SIGILL, ASAN_OnSIGILL);
-
- if (FLAG_v) {
- Printf("|| `[%p, %p]` || HighMem ||\n", kHighMemBeg, kHighMemEnd);
+ if (flags()->verbosity) {
+ Printf("|| `[%p, %p]` || HighMem ||\n",
+ (void*)kHighMemBeg, (void*)kHighMemEnd);
Printf("|| `[%p, %p]` || HighShadow ||\n",
- kHighShadowBeg, kHighShadowEnd);
+ (void*)kHighShadowBeg, (void*)kHighShadowEnd);
Printf("|| `[%p, %p]` || ShadowGap ||\n",
- kShadowGapBeg, kShadowGapEnd);
+ (void*)kShadowGapBeg, (void*)kShadowGapEnd);
Printf("|| `[%p, %p]` || LowShadow ||\n",
- kLowShadowBeg, kLowShadowEnd);
- Printf("|| `[%p, %p]` || LowMem ||\n", kLowMemBeg, kLowMemEnd);
+ (void*)kLowShadowBeg, (void*)kLowShadowEnd);
+ Printf("|| `[%p, %p]` || LowMem ||\n",
+ (void*)kLowMemBeg, (void*)kLowMemEnd);
Printf("MemToShadow(shadow): %p %p %p %p\n",
- MEM_TO_SHADOW(kLowShadowBeg),
- MEM_TO_SHADOW(kLowShadowEnd),
- MEM_TO_SHADOW(kHighShadowBeg),
- MEM_TO_SHADOW(kHighShadowEnd));
- Printf("red_zone=%ld\n", FLAG_redzone);
- Printf("malloc_context_size=%ld\n", (int)FLAG_malloc_context_size);
- Printf("fast_unwind=%d\n", (int)FLAG_fast_unwind);
-
- Printf("SHADOW_SCALE: %lx\n", SHADOW_SCALE);
- Printf("SHADOW_GRANULARITY: %lx\n", SHADOW_GRANULARITY);
- Printf("SHADOW_OFFSET: %lx\n", SHADOW_OFFSET);
+ (void*)MEM_TO_SHADOW(kLowShadowBeg),
+ (void*)MEM_TO_SHADOW(kLowShadowEnd),
+ (void*)MEM_TO_SHADOW(kHighShadowBeg),
+ (void*)MEM_TO_SHADOW(kHighShadowEnd));
+ Printf("red_zone=%zu\n", (uptr)flags()->redzone);
+ Printf("malloc_context_size=%zu\n", (uptr)flags()->malloc_context_size);
+
+ Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE);
+ Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY);
+ Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET);
CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
}
- if (__WORDSIZE == 64) {
- // Disable core dumper -- it makes little sense to dump 16T+ core.
- struct rlimit nocore;
- nocore.rlim_cur = 0;
- nocore.rlim_max = 0;
- setrlimit(RLIMIT_CORE, &nocore);
+ if (flags()->disable_core) {
+ DisableCoreDumper();
}
- {
- if (!FLAG_lazy_shadow) {
- if (kLowShadowBeg != kLowShadowEnd) {
- // mmap the low shadow plus one page.
- ReserveShadowMemoryRange(kLowShadowBeg - kPageSize, kLowShadowEnd);
- }
- // mmap the high shadow.
- ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
+ uptr shadow_start = kLowShadowBeg;
+ if (kLowShadowBeg > 0) shadow_start -= kMmapGranularity;
+ uptr shadow_end = kHighShadowEnd;
+ if (MemoryRangeIsAvailable(shadow_start, shadow_end)) {
+ if (kLowShadowBeg != kLowShadowEnd) {
+ // mmap the low shadow plus at least one page.
+ ReserveShadowMemoryRange(kLowShadowBeg - kMmapGranularity, kLowShadowEnd);
}
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
// protect the gap
- void *prot = AsanMprotect(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ void *prot = Mprotect(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
CHECK(prot == (void*)kShadowGapBeg);
+ } else {
+ Report("Shadow memory range interleaves with an existing memory mapping. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ DumpProcessMap();
+ Die();
}
+ InstallSignalHandlers();
+
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
asan_inited = 1;
@@ -804,7 +573,21 @@ void __asan_init() {
asanThreadRegistry().GetMain()->ThreadStart();
force_interface_symbols(); // no-op.
- if (FLAG_v) {
+ if (flags()->verbosity) {
Report("AddressSanitizer Init done\n");
}
}
+
+#if defined(ASAN_USE_PREINIT_ARRAY)
+ // On Linux, we force __asan_init to be called before anyone else
+ // by placing it into .preinit_array section.
+ // FIXME: do we have anything like this on Mac?
+ __attribute__((section(".preinit_array")))
+ typeof(__asan_init) *__asan_preinit =__asan_init;
+#elif defined(_WIN32) && defined(_DLL)
+ // On Windows, when using dynamic CRT (/MD), we can put a pointer
+ // to __asan_init into the global list of C initializers.
+ // See crt0dat.c in the CRT sources for the details.
+ #pragma section(".CRT$XIB", long, read) // NOLINT
+ __declspec(allocate(".CRT$XIB")) void (*__asan_preinit)() = __asan_init;
+#endif
diff --git a/lib/asan/asan_stack.cc b/lib/asan/asan_stack.cc
index 8163983..d6103c2 100644
--- a/lib/asan/asan_stack.cc
+++ b/lib/asan/asan_stack.cc
@@ -1,4 +1,4 @@
-//===-- asan_stack.cc -------------------------------------------*- C++ -*-===//
+//===-- asan_stack.cc -----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,12 +16,8 @@
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
-
-#include <string.h>
-
-#if ASAN_USE_SYSINFO == 1
-#include "sysinfo/sysinfo.h"
-#endif
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
#ifdef ASAN_USE_EXTERNAL_SYMBOLIZER
extern bool
@@ -30,183 +26,139 @@ ASAN_USE_EXTERNAL_SYMBOLIZER(const void *pc, char *out, int out_size);
namespace __asan {
-// ----------------------- ProcSelfMaps ----------------------------- {{{1
-#if ASAN_USE_SYSINFO == 1
-class ProcSelfMaps {
- public:
- void Init() {
- ScopedLock lock(&mu_);
- if (map_size_ != 0) return; // already inited
- if (FLAG_v >= 2) {
- Printf("ProcSelfMaps::Init()\n");
- }
- ProcMapsIterator it(0, &proc_self_maps_); // 0 means "current pid"
-
- uint64 start, end, offset;
- int64 inode;
- char *flags, *filename;
- CHECK(map_size_ == 0);
- while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) {
- CHECK(map_size_ < kMaxProcSelfMapsSize);
- Mapping &mapping = memory_map[map_size_];
- mapping.beg = start;
- mapping.end = end;
- mapping.offset = offset;
- real_strncpy(mapping.name,
- filename, ASAN_ARRAY_SIZE(mapping.name));
- mapping.name[ASAN_ARRAY_SIZE(mapping.name) - 1] = 0;
- if (FLAG_v >= 2) {
- Printf("[%ld] [%p,%p] off %p %s\n", map_size_,
- mapping.beg, mapping.end, mapping.offset, mapping.name);
- }
- map_size_++;
- }
- }
-
- void Print() {
- Printf("%s\n", proc_self_maps_);
- }
-
- void PrintPc(uintptr_t pc, int idx) {
- for (size_t i = 0; i < map_size_; i++) {
- Mapping &m = memory_map[i];
- if (pc >= m.beg && pc < m.end) {
- uintptr_t offset = pc - m.beg;
- if (i == 0) offset = pc;
- Printf(" #%d 0x%lx (%s+0x%lx)\n", idx, pc, m.name, offset);
- return;
- }
- }
- Printf(" #%d 0x%lx\n", idx, pc);
- }
-
- private:
- void copy_until_new_line(const char *str, char *dest, size_t max_size) {
- size_t i = 0;
- for (; str[i] && str[i] != '\n' && i < max_size - 1; i++) {
- dest[i] = str[i];
- }
- dest[i] = 0;
- }
-
-
- struct Mapping {
- uintptr_t beg, end, offset;
- char name[1000];
- };
- static const size_t kMaxNumMapEntries = 4096;
- static const size_t kMaxProcSelfMapsSize = 1 << 20;
- ProcMapsIterator::Buffer proc_self_maps_;
- size_t map_size_;
- Mapping memory_map[kMaxNumMapEntries];
-
- static AsanLock mu_;
-};
-
-static ProcSelfMaps proc_self_maps;
-AsanLock ProcSelfMaps::mu_(LINKER_INITIALIZED);
-
-
-void AsanStackTrace::PrintStack(uintptr_t *addr, size_t size) {
- proc_self_maps.Init();
- for (size_t i = 0; i < size && addr[i]; i++) {
- uintptr_t pc = addr[i];
- // int line;
- proc_self_maps.PrintPc(pc, i);
- // Printf(" #%ld 0x%lx %s\n", i, pc, rtn.c_str());
- }
+// ----------------------- AsanStackTrace ----------------------------- {{{1
+// PCs in stack traces are actually the return addresses, that is,
+// addresses of the next instructions after the call. That's why we
+// decrement them.
+static uptr patch_pc(uptr pc) {
+#ifdef __arm__
+ // Cancel Thumb bit.
+ pc = pc & (~1);
+#endif
+ return pc - 1;
}
-#elif defined(ASAN_USE_EXTERNAL_SYMBOLIZER)
-void AsanStackTrace::PrintStack(uintptr_t *addr, size_t size) {
- for (size_t i = 0; i < size && addr[i]; i++) {
- uintptr_t pc = addr[i];
+
+#if defined(ASAN_USE_EXTERNAL_SYMBOLIZER)
+void AsanStackTrace::PrintStack(uptr *addr, uptr size) {
+ for (uptr i = 0; i < size && addr[i]; i++) {
+ uptr pc = addr[i];
+ if (i < size - 1 && addr[i + 1])
+ pc = patch_pc(pc);
char buff[4096];
ASAN_USE_EXTERNAL_SYMBOLIZER((void*)pc, buff, sizeof(buff));
- Printf(" #%ld 0x%lx %s\n", i, pc, buff);
+ AsanPrintf(" #%zu 0x%zx %s\n", i, pc, buff);
}
}
-#else // ASAN_USE_SYSINFO
-void AsanStackTrace::PrintStack(uintptr_t *addr, size_t size) {
- for (size_t i = 0; i < size && addr[i]; i++) {
- uintptr_t pc = addr[i];
- Printf(" #%ld 0x%lx\n", i, pc);
+#else // ASAN_USE_EXTERNAL_SYMBOLIZER
+void AsanStackTrace::PrintStack(uptr *addr, uptr size) {
+ ProcessMaps proc_maps;
+ uptr frame_num = 0;
+ for (uptr i = 0; i < size && addr[i]; i++) {
+ uptr pc = addr[i];
+ if (i < size - 1 && addr[i + 1])
+ pc = patch_pc(pc);
+ AddressInfo addr_frames[64];
+ uptr addr_frames_num = 0;
+ if (flags()->symbolize) {
+ addr_frames_num = SymbolizeCode(pc, addr_frames,
+ ASAN_ARRAY_SIZE(addr_frames));
+ }
+ if (addr_frames_num > 0) {
+ for (uptr j = 0; j < addr_frames_num; j++) {
+ AddressInfo &info = addr_frames[j];
+ AsanPrintf(" #%zu 0x%zx", frame_num, pc);
+ if (info.function) {
+ AsanPrintf(" in %s", info.function);
+ }
+ if (info.file) {
+ AsanPrintf(" %s:%d:%d", info.file, info.line, info.column);
+ } else if (info.module) {
+ AsanPrintf(" (%s+0x%zx)", info.module, info.module_offset);
+ }
+ AsanPrintf("\n");
+ info.Clear();
+ frame_num++;
+ }
+ } else {
+ uptr offset;
+ char filename[4096];
+ if (proc_maps.GetObjectNameAndOffset(pc, &offset,
+ filename, sizeof(filename))) {
+ AsanPrintf(" #%zu 0x%zx (%s+0x%zx)\n", frame_num, pc, filename,
+ offset);
+ } else {
+ AsanPrintf(" #%zu 0x%zx\n", frame_num, pc);
+ }
+ frame_num++;
+ }
}
}
-#endif // ASAN_USE_SYSINFO
+#endif // ASAN_USE_EXTERNAL_SYMBOLIZER
-#ifdef __arm__
-#define UNWIND_STOP _URC_END_OF_STACK
-#define UNWIND_CONTINUE _URC_OK
-#else
-#define UNWIND_STOP _URC_NORMAL_STOP
-#define UNWIND_CONTINUE _URC_NO_REASON
-#endif
-
-// ----------------------- AsanStackTrace ----------------------------- {{{1
-uintptr_t AsanStackTrace::GetCurrentPc() {
+uptr AsanStackTrace::GetCurrentPc() {
return GET_CALLER_PC();
}
-void AsanStackTrace::FastUnwindStack(uintptr_t pc, uintptr_t bp) {
+void AsanStackTrace::FastUnwindStack(uptr pc, uptr bp) {
CHECK(size == 0 && trace[0] == pc);
size = 1;
if (!asan_inited) return;
AsanThread *t = asanThreadRegistry().GetCurrent();
if (!t) return;
- uintptr_t *frame = (uintptr_t*)bp;
- uintptr_t *prev_frame = frame;
- uintptr_t *top = (uintptr_t*)t->stack_top();
- uintptr_t *bottom = (uintptr_t*)t->stack_bottom();
+ uptr *frame = (uptr*)bp;
+ uptr *prev_frame = frame;
+ uptr *top = (uptr*)t->stack_top();
+ uptr *bottom = (uptr*)t->stack_bottom();
while (frame >= prev_frame &&
- frame < top &&
+ frame < top - 2 &&
frame > bottom &&
size < max_size) {
- uintptr_t pc1 = frame[1];
+ uptr pc1 = frame[1];
if (pc1 != pc) {
trace[size++] = pc1;
}
prev_frame = frame;
- frame = (uintptr_t*)frame[0];
+ frame = (uptr*)frame[0];
}
}
// On 32-bits we don't compress stack traces.
// On 64-bits we compress stack traces: if a given pc differes slightly from
// the previous one, we record a 31-bit offset instead of the full pc.
-size_t AsanStackTrace::CompressStack(AsanStackTrace *stack,
- uint32_t *compressed, size_t size) {
+uptr AsanStackTrace::CompressStack(AsanStackTrace *stack,
+ u32 *compressed, uptr size) {
#if __WORDSIZE == 32
// Don't compress, just copy.
- size_t res = 0;
- for (size_t i = 0; i < stack->size && i < size; i++) {
+ uptr res = 0;
+ for (uptr i = 0; i < stack->size && i < size; i++) {
compressed[i] = stack->trace[i];
res++;
}
if (stack->size < size)
compressed[stack->size] = 0;
#else // 64 bits, compress.
- uintptr_t prev_pc = 0;
- const uintptr_t kMaxOffset = (1ULL << 30) - 1;
- uintptr_t c_index = 0;
- size_t res = 0;
- for (size_t i = 0, n = stack->size; i < n; i++) {
- uintptr_t pc = stack->trace[i];
+ uptr prev_pc = 0;
+ const uptr kMaxOffset = (1ULL << 30) - 1;
+ uptr c_index = 0;
+ uptr res = 0;
+ for (uptr i = 0, n = stack->size; i < n; i++) {
+ uptr pc = stack->trace[i];
if (!pc) break;
- if ((int64_t)pc < 0) break;
- // Printf("C pc[%ld] %lx\n", i, pc);
+ if ((s64)pc < 0) break;
+ // Printf("C pc[%zu] %zx\n", i, pc);
if (prev_pc - pc < kMaxOffset || pc - prev_pc < kMaxOffset) {
- uintptr_t offset = (int64_t)(pc - prev_pc);
+ uptr offset = (s64)(pc - prev_pc);
offset |= (1U << 31);
if (c_index >= size) break;
- // Printf("C co[%ld] offset %lx\n", i, offset);
+ // Printf("C co[%zu] offset %zx\n", i, offset);
compressed[c_index++] = offset;
} else {
- uintptr_t hi = pc >> 32;
- uintptr_t lo = (pc << 32) >> 32;
+ uptr hi = pc >> 32;
+ uptr lo = (pc << 32) >> 32;
CHECK((hi & (1 << 31)) == 0);
if (c_index + 1 >= size) break;
- // Printf("C co[%ld] hi/lo: %lx %lx\n", c_index, hi, lo);
+ // Printf("C co[%zu] hi/lo: %zx %zx\n", c_index, hi, lo);
compressed[c_index++] = hi;
compressed[c_index++] = lo;
}
@@ -224,53 +176,53 @@ size_t AsanStackTrace::CompressStack(AsanStackTrace *stack,
AsanStackTrace check_stack;
UncompressStack(&check_stack, compressed, size);
if (res < check_stack.size) {
- Printf("res %ld check_stack.size %ld; c_size %ld\n", res,
+ Printf("res %zu check_stack.size %zu; c_size %zu\n", res,
check_stack.size, size);
}
// |res| may be greater than check_stack.size, because
// UncompressStack(CompressStack(stack)) eliminates the 0x0 frames.
CHECK(res >= check_stack.size);
- CHECK(0 == real_memcmp(check_stack.trace, stack->trace,
- check_stack.size * sizeof(uintptr_t)));
+ CHECK(0 == REAL(memcmp)(check_stack.trace, stack->trace,
+ check_stack.size * sizeof(uptr)));
#endif
return res;
}
void AsanStackTrace::UncompressStack(AsanStackTrace *stack,
- uint32_t *compressed, size_t size) {
+ u32 *compressed, uptr size) {
#if __WORDSIZE == 32
// Don't uncompress, just copy.
stack->size = 0;
- for (size_t i = 0; i < size && i < kStackTraceMax; i++) {
+ for (uptr i = 0; i < size && i < kStackTraceMax; i++) {
if (!compressed[i]) break;
stack->size++;
stack->trace[i] = compressed[i];
}
#else // 64 bits, uncompress
- uintptr_t prev_pc = 0;
+ uptr prev_pc = 0;
stack->size = 0;
- for (size_t i = 0; i < size && stack->size < kStackTraceMax; i++) {
- uint32_t x = compressed[i];
- uintptr_t pc = 0;
+ for (uptr i = 0; i < size && stack->size < kStackTraceMax; i++) {
+ u32 x = compressed[i];
+ uptr pc = 0;
if (x & (1U << 31)) {
- // Printf("U co[%ld] offset: %x\n", i, x);
+ // Printf("U co[%zu] offset: %x\n", i, x);
// this is an offset
- int32_t offset = x;
+ s32 offset = x;
offset = (offset << 1) >> 1; // remove the 31-byte and sign-extend.
pc = prev_pc + offset;
CHECK(pc);
} else {
// CHECK(i + 1 < size);
if (i + 1 >= size) break;
- uintptr_t hi = x;
- uintptr_t lo = compressed[i+1];
- // Printf("U co[%ld] hi/lo: %lx %lx\n", i, hi, lo);
+ uptr hi = x;
+ uptr lo = compressed[i+1];
+ // Printf("U co[%zu] hi/lo: %zx %zx\n", i, hi, lo);
i++;
pc = (hi << 32) | lo;
if (!pc) break;
}
- // Printf("U pc[%ld] %lx\n", stack->size, pc);
+ // Printf("U pc[%zu] %zx\n", stack->size, pc);
stack->trace[stack->size++] = pc;
prev_pc = pc;
}
diff --git a/lib/asan/asan_stack.h b/lib/asan/asan_stack.h
index 97aefd6..6ca9a0b 100644
--- a/lib/asan/asan_stack.h
+++ b/lib/asan/asan_stack.h
@@ -18,77 +18,87 @@
namespace __asan {
-static const size_t kStackTraceMax = 64;
+static const uptr kStackTraceMax = 64;
struct AsanStackTrace {
- size_t size;
- size_t max_size;
- uintptr_t trace[kStackTraceMax];
- static void PrintStack(uintptr_t *addr, size_t size);
+ uptr size;
+ uptr max_size;
+ uptr trace[kStackTraceMax];
+ static void PrintStack(uptr *addr, uptr size);
void PrintStack() {
PrintStack(this->trace, this->size);
}
- void CopyTo(uintptr_t *dst, size_t dst_size) {
- for (size_t i = 0; i < size && i < dst_size; i++)
+ void CopyTo(uptr *dst, uptr dst_size) {
+ for (uptr i = 0; i < size && i < dst_size; i++)
dst[i] = trace[i];
- for (size_t i = size; i < dst_size; i++)
+ for (uptr i = size; i < dst_size; i++)
dst[i] = 0;
}
- void CopyFrom(uintptr_t *src, size_t src_size) {
+ void CopyFrom(uptr *src, uptr src_size) {
size = src_size;
if (size > kStackTraceMax) size = kStackTraceMax;
- for (size_t i = 0; i < size; i++) {
+ for (uptr i = 0; i < size; i++) {
trace[i] = src[i];
}
}
- void FastUnwindStack(uintptr_t pc, uintptr_t bp);
-// static _Unwind_Reason_Code Unwind_Trace(
-// struct _Unwind_Context *ctx, void *param);
- static uintptr_t GetCurrentPc();
+ void GetStackTrace(uptr max_s, uptr pc, uptr bp);
- static size_t CompressStack(AsanStackTrace *stack,
- uint32_t *compressed, size_t size);
+ void FastUnwindStack(uptr pc, uptr bp);
+
+ static uptr GetCurrentPc();
+
+ static uptr CompressStack(AsanStackTrace *stack,
+ u32 *compressed, uptr size);
static void UncompressStack(AsanStackTrace *stack,
- uint32_t *compressed, size_t size);
- size_t full_frame_count;
+ u32 *compressed, uptr size);
};
} // namespace __asan
+// Use this macro if you want to print stack trace with the caller
+// of the current function in the top frame.
+#define GET_CALLER_PC_BP_SP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = GET_CALLER_PC(); \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
+// Use this macro if you want to print stack trace with the current
+// function in the top frame.
+#define GET_CURRENT_PC_BP_SP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = AsanStackTrace::GetCurrentPc(); \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
// fast_unwind is currently unused.
-#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, fast_unwind, pc, bp) \
- AsanStackTrace stack; \
- { \
- uintptr_t saved_pc = pc; \
- uintptr_t saved_bp = bp; \
- stack.size = 0; \
- stack.full_frame_count = 0; \
- stack.trace[0] = saved_pc; \
- if ((max_s) > 1) { \
- stack.max_size = max_s; \
- stack.FastUnwindStack(saved_pc, saved_bp); \
- } \
- } \
-
-#define GET_STACK_TRACE_HERE(max_size, fast_unwind) \
- GET_STACK_TRACE_WITH_PC_AND_BP(max_size, fast_unwind, \
- AsanStackTrace::GetCurrentPc(), GET_CURRENT_FRAME()) \
-
-#define GET_STACK_TRACE_HERE_FOR_MALLOC \
- GET_STACK_TRACE_HERE(FLAG_malloc_context_size, FLAG_fast_unwind)
-
-#define GET_STACK_TRACE_HERE_FOR_FREE(ptr) \
- GET_STACK_TRACE_HERE(FLAG_malloc_context_size, FLAG_fast_unwind)
+#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp) \
+ AsanStackTrace stack; \
+ stack.GetStackTrace(max_s, pc, bp)
+
+// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
+// as early as possible (in functions exposed to the user), as we generally
+// don't want stack trace to contain functions from ASan internals.
+
+#define GET_STACK_TRACE_HERE(max_size) \
+ GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
+ AsanStackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
+
+#define GET_STACK_TRACE_HERE_FOR_MALLOC \
+ GET_STACK_TRACE_HERE(flags()->malloc_context_size)
+
+#define GET_STACK_TRACE_HERE_FOR_FREE(ptr) \
+ GET_STACK_TRACE_HERE(flags()->malloc_context_size)
#define PRINT_CURRENT_STACK() \
{ \
- GET_STACK_TRACE_HERE(kStackTraceMax, false); \
+ GET_STACK_TRACE_HERE(kStackTraceMax); \
stack.PrintStack(); \
- } \
+ }
#endif // ASAN_STACK_H
diff --git a/lib/asan/asan_stats.cc b/lib/asan/asan_stats.cc
index 3e4d1b4..ef5e53a 100644
--- a/lib/asan/asan_stats.cc
+++ b/lib/asan/asan_stats.cc
@@ -1,4 +1,4 @@
-//===-- asan_stats.cc -------------------------------------------*- C++ -*-===//
+//===-- asan_stats.cc -----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,36 +21,36 @@
namespace __asan {
AsanStats::AsanStats() {
- CHECK(real_memset != NULL);
- real_memset(this, 0, sizeof(AsanStats));
+ CHECK(REAL(memset) != 0);
+ REAL(memset)(this, 0, sizeof(AsanStats));
}
static void PrintMallocStatsArray(const char *prefix,
- size_t (&array)[kNumberOfSizeClasses]) {
- Printf("%s", prefix);
- for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
+ uptr (&array)[kNumberOfSizeClasses]) {
+ AsanPrintf("%s", prefix);
+ for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
if (!array[i]) continue;
- Printf("%ld:%ld; ", i, array[i]);
+ AsanPrintf("%zu:%zu; ", i, array[i]);
}
- Printf("\n");
+ AsanPrintf("\n");
}
void AsanStats::Print() {
- Printf("Stats: %ldM malloced (%ldM for red zones) by %ld calls\n",
- malloced>>20, malloced_redzones>>20, mallocs);
- Printf("Stats: %ldM realloced by %ld calls\n", realloced>>20, reallocs);
- Printf("Stats: %ldM freed by %ld calls\n", freed>>20, frees);
- Printf("Stats: %ldM really freed by %ld calls\n",
- really_freed>>20, real_frees);
- Printf("Stats: %ldM (%ld full pages) mmaped in %ld calls\n",
- mmaped>>20, mmaped / kPageSize, mmaps);
+ AsanPrintf("Stats: %zuM malloced (%zuM for red zones) by %zu calls\n",
+ malloced>>20, malloced_redzones>>20, mallocs);
+ AsanPrintf("Stats: %zuM realloced by %zu calls\n", realloced>>20, reallocs);
+ AsanPrintf("Stats: %zuM freed by %zu calls\n", freed>>20, frees);
+ AsanPrintf("Stats: %zuM really freed by %zu calls\n",
+ really_freed>>20, real_frees);
+ AsanPrintf("Stats: %zuM (%zu full pages) mmaped in %zu calls\n",
+ mmaped>>20, mmaped / kPageSize, mmaps);
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
PrintMallocStatsArray(" frees by size class: ", freed_by_size);
PrintMallocStatsArray(" rfrees by size class: ", really_freed_by_size);
- Printf("Stats: malloc large: %ld small slow: %ld\n",
- malloc_large, malloc_small_slow);
+ AsanPrintf("Stats: malloc large: %zu small slow: %zu\n",
+ malloc_large, malloc_small_slow);
}
static AsanLock print_lock(LINKER_INITIALIZED);
@@ -67,19 +67,19 @@ static void PrintAccumulatedStats() {
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-size_t __asan_get_current_allocated_bytes() {
+uptr __asan_get_current_allocated_bytes() {
return asanThreadRegistry().GetCurrentAllocatedBytes();
}
-size_t __asan_get_heap_size() {
+uptr __asan_get_heap_size() {
return asanThreadRegistry().GetHeapSize();
}
-size_t __asan_get_free_bytes() {
+uptr __asan_get_free_bytes() {
return asanThreadRegistry().GetFreeBytes();
}
-size_t __asan_get_unmapped_bytes() {
+uptr __asan_get_unmapped_bytes() {
return 0;
}
diff --git a/lib/asan/asan_stats.h b/lib/asan/asan_stats.h
index d6dd084..b4c63f4 100644
--- a/lib/asan/asan_stats.h
+++ b/lib/asan/asan_stats.h
@@ -23,27 +23,27 @@ namespace __asan {
// Each AsanThread has its own AsanStats, which are sometimes flushed
// to the accumulated AsanStats.
struct AsanStats {
- // AsanStats must be a struct consisting of size_t fields only.
- // When merging two AsanStats structs, we treat them as arrays of size_t.
- size_t mallocs;
- size_t malloced;
- size_t malloced_redzones;
- size_t frees;
- size_t freed;
- size_t real_frees;
- size_t really_freed;
- size_t really_freed_redzones;
- size_t reallocs;
- size_t realloced;
- size_t mmaps;
- size_t mmaped;
- size_t mmaped_by_size[kNumberOfSizeClasses];
- size_t malloced_by_size[kNumberOfSizeClasses];
- size_t freed_by_size[kNumberOfSizeClasses];
- size_t really_freed_by_size[kNumberOfSizeClasses];
-
- size_t malloc_large;
- size_t malloc_small_slow;
+ // AsanStats must be a struct consisting of uptr fields only.
+ // When merging two AsanStats structs, we treat them as arrays of uptr.
+ uptr mallocs;
+ uptr malloced;
+ uptr malloced_redzones;
+ uptr frees;
+ uptr freed;
+ uptr real_frees;
+ uptr really_freed;
+ uptr really_freed_redzones;
+ uptr reallocs;
+ uptr realloced;
+ uptr mmaps;
+ uptr mmaped;
+ uptr mmaped_by_size[kNumberOfSizeClasses];
+ uptr malloced_by_size[kNumberOfSizeClasses];
+ uptr freed_by_size[kNumberOfSizeClasses];
+ uptr really_freed_by_size[kNumberOfSizeClasses];
+
+ uptr malloc_large;
+ uptr malloc_small_slow;
// Ctor for global AsanStats (accumulated stats and main thread stats).
explicit AsanStats(LinkerInitialized) { }
diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc
index 329197d..05a41ea 100644
--- a/lib/asan/asan_thread.cc
+++ b/lib/asan/asan_thread.cc
@@ -1,4 +1,4 @@
-//===-- asan_thread.cc ------------------------------------------*- C++ -*-===//
+//===-- asan_thread.cc ----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,19 +13,11 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_interceptors.h"
+#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "asan_mapping.h"
-
-#if ASAN_USE_SYSINFO == 1
-#include "sysinfo/sysinfo.h"
-#endif
-
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <pthread.h>
-#include <stdlib.h>
-#include <string.h>
+#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
@@ -34,67 +26,105 @@ AsanThread::AsanThread(LinkerInitialized x)
malloc_storage_(x),
stats_(x) { }
-AsanThread::AsanThread(int parent_tid, void *(*start_routine) (void *),
- void *arg, AsanStackTrace *stack)
- : start_routine_(start_routine),
- arg_(arg) {
- asanThreadRegistry().RegisterThread(this, parent_tid, stack);
+static AsanLock mu_for_thread_summary(LINKER_INITIALIZED);
+static LowLevelAllocator allocator_for_thread_summary(LINKER_INITIALIZED);
+
+AsanThread *AsanThread::Create(u32 parent_tid, thread_callback_t start_routine,
+ void *arg, AsanStackTrace *stack) {
+ uptr size = RoundUpTo(sizeof(AsanThread), kPageSize);
+ AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
+ thread->start_routine_ = start_routine;
+ thread->arg_ = arg;
+
+ const uptr kSummaryAllocSize = 1024;
+ CHECK_LE(sizeof(AsanThreadSummary), kSummaryAllocSize);
+ AsanThreadSummary *summary;
+ {
+ ScopedLock lock(&mu_for_thread_summary);
+ summary = (AsanThreadSummary*)
+ allocator_for_thread_summary.Allocate(kSummaryAllocSize);
+ }
+ summary->Init(parent_tid, stack);
+ summary->set_thread(thread);
+ thread->set_summary(summary);
+
+ return thread;
+}
+
+void AsanThreadSummary::TSDDtor(void *tsd) {
+ AsanThreadSummary *summary = (AsanThreadSummary*)tsd;
+ if (flags()->verbosity >= 1) {
+ Report("T%d TSDDtor\n", summary->tid());
+ }
+ if (summary->thread()) {
+ summary->thread()->Destroy();
+ }
}
-AsanThread::~AsanThread() {
+void AsanThread::Destroy() {
+ if (flags()->verbosity >= 1) {
+ Report("T%d exited\n", tid());
+ }
+
asanThreadRegistry().UnregisterThread(this);
- fake_stack().Cleanup();
+ CHECK(summary()->thread() == 0);
// We also clear the shadow on thread destruction because
// some code may still be executing in later TSD destructors
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStack();
-}
-
-void AsanThread::ClearShadowForThreadStack() {
- uintptr_t shadow_bot = MemToShadow(stack_bottom_);
- uintptr_t shadow_top = MemToShadow(stack_top_);
- real_memset((void*)shadow_bot, 0, shadow_top - shadow_bot);
+ fake_stack().Cleanup();
+ uptr size = RoundUpTo(sizeof(AsanThread), kPageSize);
+ UnmapOrDie(this, size);
}
void AsanThread::Init() {
SetThreadStackTopAndBottom();
- fake_stack_.Init(stack_size());
- if (FLAG_v >= 1) {
- int local = 0;
- Report("T%d: stack [%p,%p) size 0x%lx; local=%p, pthread_self=%p\n",
- tid(), stack_bottom_, stack_top_,
- stack_top_ - stack_bottom_, &local, pthread_self());
- }
-
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_));
-
ClearShadowForThreadStack();
+ if (flags()->verbosity >= 1) {
+ int local = 0;
+ Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n",
+ tid(), (void*)stack_bottom_, (void*)stack_top_,
+ stack_top_ - stack_bottom_, &local);
+ }
+ fake_stack_.Init(stack_size());
+ AsanPlatformThreadInit();
}
-void *AsanThread::ThreadStart() {
+thread_return_t AsanThread::ThreadStart() {
Init();
+ if (flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) {
- // start_routine_ == NULL if we're on the main thread or on one of the
+ // start_routine_ == 0 if we're on the main thread or on one of the
// OS X libdispatch worker threads. But nobody is supposed to call
// ThreadStart() for the worker threads.
CHECK(tid() == 0);
return 0;
}
- void *res = start_routine_(arg_);
+ thread_return_t res = start_routine_(arg_);
malloc_storage().CommitBack();
+ if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
- if (FLAG_v >= 1) {
- Report("T%d exited\n", tid());
- }
+ this->Destroy();
return res;
}
-const char *AsanThread::GetFrameNameByAddr(uintptr_t addr, uintptr_t *offset) {
- uintptr_t bottom = 0;
+void AsanThread::SetThreadStackTopAndBottom() {
+ GetThreadStackTopAndBottom(tid() == 0, &stack_top_, &stack_bottom_);
+ int local;
+ CHECK(AddrIsInStack((uptr)&local));
+}
+
+void AsanThread::ClearShadowForThreadStack() {
+ PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
+}
+
+const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset) {
+ uptr bottom = 0;
bool is_fake_stack = false;
if (AddrIsInStack(addr)) {
bottom = stack_bottom();
@@ -103,76 +133,30 @@ const char *AsanThread::GetFrameNameByAddr(uintptr_t addr, uintptr_t *offset) {
CHECK(bottom);
is_fake_stack = true;
}
- uintptr_t aligned_addr = addr & ~(__WORDSIZE/8 - 1); // align addr.
- uintptr_t *ptr = (uintptr_t*)aligned_addr;
- while (ptr >= (uintptr_t*)bottom) {
- if (ptr[0] == kCurrentStackFrameMagic ||
- (is_fake_stack && ptr[0] == kRetiredStackFrameMagic)) {
- *offset = addr - (uintptr_t)ptr;
- return (const char*)ptr[1];
- }
- ptr--;
+ uptr aligned_addr = addr & ~(__WORDSIZE/8 - 1); // align addr.
+ u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
+ u8 *shadow_bottom = (u8*)MemToShadow(bottom);
+
+ while (shadow_ptr >= shadow_bottom &&
+ *shadow_ptr != kAsanStackLeftRedzoneMagic) {
+ shadow_ptr--;
}
- *offset = 0;
- return "UNKNOWN";
-}
-void AsanThread::SetThreadStackTopAndBottom() {
-#ifdef __APPLE__
- size_t stacksize = pthread_get_stacksize_np(pthread_self());
- void *stackaddr = pthread_get_stackaddr_np(pthread_self());
- stack_top_ = (uintptr_t)stackaddr;
- stack_bottom_ = stack_top_ - stacksize;
- int local;
- CHECK(AddrIsInStack((uintptr_t)&local));
-#else
-#if ASAN_USE_SYSINFO == 1
- if (tid() == 0) {
- // This is the main thread. Libpthread may not be initialized yet.
- struct rlimit rl;
- CHECK(getrlimit(RLIMIT_STACK, &rl) == 0);
-
- // Find the mapping that contains a stack variable.
- ProcMapsIterator it(0);
- uint64_t start, end;
- uint64_t prev_end = 0;
- while (it.Next(&start, &end, NULL, NULL, NULL, NULL)) {
- if ((uintptr_t)&rl < end)
- break;
- prev_end = end;
- }
- CHECK((uintptr_t)&rl >= start && (uintptr_t)&rl < end);
-
- // Get stacksize from rlimit, but clip it so that it does not overlap
- // with other mappings.
- size_t stacksize = rl.rlim_cur;
- if (stacksize > end - prev_end)
- stacksize = end - prev_end;
- if (stacksize > kMaxThreadStackSize)
- stacksize = kMaxThreadStackSize;
- stack_top_ = end;
- stack_bottom_ = end - stacksize;
- CHECK(AddrIsInStack((uintptr_t)&rl));
- return;
+ while (shadow_ptr >= shadow_bottom &&
+ *shadow_ptr == kAsanStackLeftRedzoneMagic) {
+ shadow_ptr--;
}
-#endif
- pthread_attr_t attr;
- CHECK(pthread_getattr_np(pthread_self(), &attr) == 0);
- size_t stacksize = 0;
- void *stackaddr = NULL;
- pthread_attr_getstack(&attr, &stackaddr, &stacksize);
- pthread_attr_destroy(&attr);
-
- stack_top_ = (uintptr_t)stackaddr + stacksize;
- stack_bottom_ = (uintptr_t)stackaddr;
- // When running with unlimited stack size, we still want to set some limit.
- // The unlimited stack size is caused by 'ulimit -s unlimited'.
- // Also, for some reason, GNU make spawns subrocesses with unlimited stack.
- if (stacksize > kMaxThreadStackSize) {
- stack_bottom_ = stack_top_ - kMaxThreadStackSize;
+
+ if (shadow_ptr < shadow_bottom) {
+ *offset = 0;
+ return "UNKNOWN";
}
- CHECK(AddrIsInStack((uintptr_t)&attr));
-#endif
+
+ uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
+ CHECK((ptr[0] == kCurrentStackFrameMagic) ||
+ (is_fake_stack && ptr[0] == kRetiredStackFrameMagic));
+ *offset = addr - (uptr)ptr;
+ return (const char*)ptr[1];
}
} // namespace __asan
diff --git a/lib/asan/asan_thread.h b/lib/asan/asan_thread.h
index c382c85..9a032fe 100644
--- a/lib/asan/asan_thread.h
+++ b/lib/asan/asan_thread.h
@@ -18,10 +18,11 @@
#include "asan_internal.h"
#include "asan_stack.h"
#include "asan_stats.h"
+#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
-const size_t kMaxThreadStackSize = 16 * (1 << 20); // 16M
+const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
class AsanThread;
@@ -30,12 +31,12 @@ class AsanThread;
class AsanThreadSummary {
public:
explicit AsanThreadSummary(LinkerInitialized) { } // for T0.
- AsanThreadSummary(int tid, int parent_tid, AsanStackTrace *stack)
- : tid_(tid),
- parent_tid_(parent_tid),
- announced_(false) {
+ void Init(u32 parent_tid, AsanStackTrace *stack) {
+ parent_tid_ = parent_tid;
+ announced_ = false;
+ tid_ = kInvalidTid;
if (stack) {
- stack_ = *stack;
+ internal_memcpy(&stack_, stack, sizeof(*stack));
}
thread_ = 0;
}
@@ -43,16 +44,19 @@ class AsanThreadSummary {
if (tid_ == 0) return; // no need to announce the main thread.
if (!announced_) {
announced_ = true;
- Printf("Thread T%d created by T%d here:\n", tid_, parent_tid_);
+ AsanPrintf("Thread T%d created by T%d here:\n", tid_, parent_tid_);
stack_.PrintStack();
}
}
- int tid() { return tid_; }
+ u32 tid() { return tid_; }
+ void set_tid(u32 tid) { tid_ = tid; }
AsanThread *thread() { return thread_; }
void set_thread(AsanThread *thread) { thread_ = thread; }
+ static void TSDDtor(void *tsd);
+
private:
- int tid_;
- int parent_tid_;
+ u32 tid_;
+ u32 parent_tid_;
bool announced_;
AsanStackTrace stack_;
AsanThread *thread_;
@@ -62,23 +66,23 @@ class AsanThreadSummary {
class AsanThread {
public:
explicit AsanThread(LinkerInitialized); // for T0.
- AsanThread(int parent_tid, void *(*start_routine) (void *),
- void *arg, AsanStackTrace *stack);
- ~AsanThread();
+ static AsanThread *Create(u32 parent_tid, thread_callback_t start_routine,
+ void *arg, AsanStackTrace *stack);
+ void Destroy();
void Init(); // Should be called from the thread itself.
- void *ThreadStart();
+ thread_return_t ThreadStart();
- uintptr_t stack_top() { return stack_top_; }
- uintptr_t stack_bottom() { return stack_bottom_; }
- size_t stack_size() { return stack_top_ - stack_bottom_; }
- int tid() { return summary_->tid(); }
+ uptr stack_top() { return stack_top_; }
+ uptr stack_bottom() { return stack_bottom_; }
+ uptr stack_size() { return stack_top_ - stack_bottom_; }
+ u32 tid() { return summary_->tid(); }
AsanThreadSummary *summary() { return summary_; }
void set_summary(AsanThreadSummary *summary) { summary_ = summary; }
- const char *GetFrameNameByAddr(uintptr_t addr, uintptr_t *offset);
+ const char *GetFrameNameByAddr(uptr addr, uptr *offset);
- bool AddrIsInStack(uintptr_t addr) {
+ bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_;
}
@@ -86,17 +90,15 @@ class AsanThread {
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; }
- static const int kInvalidTid = -1;
-
private:
void SetThreadStackTopAndBottom();
void ClearShadowForThreadStack();
AsanThreadSummary *summary_;
- void *(*start_routine_) (void *param);
+ thread_callback_t start_routine_;
void *arg_;
- uintptr_t stack_top_;
- uintptr_t stack_bottom_;
+ uptr stack_top_;
+ uptr stack_bottom_;
FakeStack fake_stack_;
AsanThreadLocalMallocStorage malloc_storage_;
diff --git a/lib/asan/asan_thread_registry.cc b/lib/asan/asan_thread_registry.cc
index 39fba44..4540d58 100644
--- a/lib/asan/asan_thread_registry.cc
+++ b/lib/asan/asan_thread_registry.cc
@@ -1,4 +1,4 @@
-//===-- asan_thread_registry.cc ---------------------------------*- C++ -*-===//
+//===-- asan_thread_registry.cc -------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,8 +16,7 @@
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
-
-#include <limits.h>
+#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
@@ -27,48 +26,6 @@ AsanThreadRegistry &asanThreadRegistry() {
return asan_thread_registry;
}
-#ifdef ANDROID
-#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
-#define PTHREAD_DESTRUCTOR_ITERATIONS 4
-#endif
-#endif
-
-// Dark magic below. In order to be able to notice that we're not handling
-// some thread creation routines (e.g. on Mac OS) we want to distinguish the
-// thread that used to have a corresponding AsanThread object from the thread
-// that never had one. That's why upon AsanThread destruction we set the
-// pthread_key value to some odd number (that's not a valid pointer), instead
-// of NULL.
-// Because the TSD destructor for a non-NULL key value is called iteratively,
-// we increase the value by two, keeping it an invalid pointer.
-// Because the TSD implementations are allowed to call such a destructor
-// infinitely (see
-// http://pubs.opengroup.org/onlinepubs/009604499/functions/pthread_key_create.html
-// ), we exit the program after a certain number of iterations.
-static void DestroyAsanTsd(void *tsd) {
- intptr_t iter = (intptr_t)tsd;
- if (iter % 2 == 0) {
- // The pointer is valid.
- AsanThread *t = (AsanThread*)tsd;
- if (t != asanThreadRegistry().GetMain()) {
- delete t;
- }
- iter = 1;
- } else {
- // The pointer is invalid -- we've already destroyed the TSD before.
- // If |iter| is too big, we're in the infinite loop. This should be
- // impossible on the systems AddressSanitizer was tested on.
- CHECK(iter < 4 * PTHREAD_DESTRUCTOR_ITERATIONS);
- iter += 2;
- }
- CHECK(0 == pthread_setspecific(asanThreadRegistry().GetTlsKey(),
- (void*)iter));
- if (FLAG_v >= 2) {
- Report("DestroyAsanTsd: writing %p to the TSD slot of thread %p\n",
- (void*)iter, pthread_self());
- }
-}
-
AsanThreadRegistry::AsanThreadRegistry(LinkerInitialized x)
: main_thread_(x),
main_thread_summary_(x),
@@ -76,26 +33,25 @@ AsanThreadRegistry::AsanThreadRegistry(LinkerInitialized x)
mu_(x) { }
void AsanThreadRegistry::Init() {
- CHECK(0 == pthread_key_create(&tls_key_, DestroyAsanTsd));
- tls_key_created_ = true;
- SetCurrent(&main_thread_);
+ AsanTSDInit(AsanThreadSummary::TSDDtor);
main_thread_.set_summary(&main_thread_summary_);
main_thread_summary_.set_thread(&main_thread_);
- thread_summaries_[0] = &main_thread_summary_;
- n_threads_ = 1;
+ RegisterThread(&main_thread_);
+ SetCurrent(&main_thread_);
+ // At this point only one thread exists.
+ inited_ = true;
}
-void AsanThreadRegistry::RegisterThread(AsanThread *thread, int parent_tid,
- AsanStackTrace *stack) {
+void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
ScopedLock lock(&mu_);
- CHECK(n_threads_ > 0);
- int tid = n_threads_;
+ u32 tid = n_threads_;
n_threads_++;
CHECK(n_threads_ < kMaxNumberOfThreads);
- AsanThreadSummary *summary = new AsanThreadSummary(tid, parent_tid, stack);
- summary->set_thread(thread);
+
+ AsanThreadSummary *summary = thread->summary();
+ CHECK(summary != 0);
+ summary->set_tid(tid);
thread_summaries_[tid] = summary;
- thread->set_summary(summary);
}
void AsanThreadRegistry::UnregisterThread(AsanThread *thread) {
@@ -103,7 +59,7 @@ void AsanThreadRegistry::UnregisterThread(AsanThread *thread) {
FlushToAccumulatedStatsUnlocked(&thread->stats());
AsanThreadSummary *summary = thread->summary();
CHECK(summary);
- summary->set_thread(NULL);
+ summary->set_thread(0);
}
AsanThread *AsanThreadRegistry::GetMain() {
@@ -111,45 +67,35 @@ AsanThread *AsanThreadRegistry::GetMain() {
}
AsanThread *AsanThreadRegistry::GetCurrent() {
- CHECK(tls_key_created_);
- AsanThread *thread = (AsanThread*)pthread_getspecific(tls_key_);
- if ((!thread || (intptr_t)thread % 2) && FLAG_v >= 2) {
- Report("GetCurrent: %p for thread %p\n", thread, pthread_self());
- }
- if ((intptr_t)thread % 2) {
- // Invalid pointer -- we've deleted the AsanThread already. Return NULL as
- // if the TSD was empty.
- // TODO(glider): if the code in the client TSD destructor calls
- // pthread_create(), we'll set the parent tid of the spawned thread to NULL,
- // although the creation stack will belong to the current thread. This may
- // confuse the user, but is quite unlikely.
- return NULL;
- } else {
- // NULL or valid pointer to AsanThread.
- return thread;
+ AsanThreadSummary *summary = (AsanThreadSummary *)AsanTSDGet();
+ if (!summary) {
+#ifdef ANDROID
+ // On Android, libc constructor is called _after_ asan_init, and cleans up
+ // TSD. Try to figure out if this is still the main thread by the stack
+ // address. We are not entirely sure that we have correct main thread
+ // limits, so only do this magic on Android, and only if the found thread is
+ // the main thread.
+ AsanThread* thread = FindThreadByStackAddress((uptr)&summary);
+ if (thread && thread->tid() == 0) {
+ SetCurrent(thread);
+ return thread;
+ }
+#endif
+ return 0;
}
+ return summary->thread();
}
void AsanThreadRegistry::SetCurrent(AsanThread *t) {
- if (FLAG_v >=2) {
- Report("SetCurrent: %p for thread %p\n", t, pthread_self());
+ CHECK(t->summary());
+ if (flags()->verbosity >= 2) {
+ Report("SetCurrent: %p for thread %p\n",
+ t->summary(), (void*)GetThreadSelf());
}
// Make sure we do not reset the current AsanThread.
- intptr_t old_key = (intptr_t)pthread_getspecific(tls_key_);
- CHECK(!old_key || old_key % 2);
- CHECK(0 == pthread_setspecific(tls_key_, t));
- CHECK(pthread_getspecific(tls_key_) == t);
-}
-
-pthread_key_t AsanThreadRegistry::GetTlsKey() {
- return tls_key_;
-}
-
-// Returns true iff DestroyAsanTsd() was already called for this thread.
-bool AsanThreadRegistry::IsCurrentThreadDying() {
- CHECK(tls_key_created_);
- intptr_t thread = (intptr_t)pthread_getspecific(tls_key_);
- return (bool)(thread % 2);
+ CHECK(AsanTSDGet() == 0);
+ AsanTSDSet(t->summary());
+ CHECK(AsanTSDGet() == t->summary());
}
AsanStats &AsanThreadRegistry::GetCurrentThreadStats() {
@@ -163,19 +109,19 @@ AsanStats AsanThreadRegistry::GetAccumulatedStats() {
return accumulated_stats_;
}
-size_t AsanThreadRegistry::GetCurrentAllocatedBytes() {
+uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
ScopedLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
return accumulated_stats_.malloced - accumulated_stats_.freed;
}
-size_t AsanThreadRegistry::GetHeapSize() {
+uptr AsanThreadRegistry::GetHeapSize() {
ScopedLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
return accumulated_stats_.mmaped;
}
-size_t AsanThreadRegistry::GetFreeBytes() {
+uptr AsanThreadRegistry::GetFreeBytes() {
ScopedLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
return accumulated_stats_.mmaped
@@ -185,18 +131,17 @@ size_t AsanThreadRegistry::GetFreeBytes() {
+ accumulated_stats_.really_freed_redzones;
}
-AsanThreadSummary *AsanThreadRegistry::FindByTid(int tid) {
- CHECK(tid >= 0);
+AsanThreadSummary *AsanThreadRegistry::FindByTid(u32 tid) {
CHECK(tid < n_threads_);
CHECK(thread_summaries_[tid]);
return thread_summaries_[tid];
}
-AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uintptr_t addr) {
+AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uptr addr) {
ScopedLock lock(&mu_);
- for (int tid = 0; tid < n_threads_; tid++) {
+ for (u32 tid = 0; tid < n_threads_; tid++) {
AsanThread *t = thread_summaries_[tid]->thread();
- if (!t) continue;
+ if (!t || !(t->fake_stack().StackSize())) continue;
if (t->fake_stack().AddrIsInFakeStack(addr) || t->AddrIsInStack(addr)) {
return t;
}
@@ -205,20 +150,20 @@ AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uintptr_t addr) {
}
void AsanThreadRegistry::UpdateAccumulatedStatsUnlocked() {
- for (int tid = 0; tid < n_threads_; tid++) {
+ for (u32 tid = 0; tid < n_threads_; tid++) {
AsanThread *t = thread_summaries_[tid]->thread();
- if (t != NULL) {
+ if (t != 0) {
FlushToAccumulatedStatsUnlocked(&t->stats());
}
}
}
void AsanThreadRegistry::FlushToAccumulatedStatsUnlocked(AsanStats *stats) {
- // AsanStats consists of variables of type size_t only.
- size_t *dst = (size_t*)&accumulated_stats_;
- size_t *src = (size_t*)stats;
- size_t num_fields = sizeof(AsanStats) / sizeof(size_t);
- for (size_t i = 0; i < num_fields; i++) {
+ // AsanStats consists of variables of type uptr only.
+ uptr *dst = (uptr*)&accumulated_stats_;
+ uptr *src = (uptr*)stats;
+ uptr num_fields = sizeof(AsanStats) / sizeof(uptr);
+ for (uptr i = 0; i < num_fields; i++) {
dst[i] += src[i];
src[i] = 0;
}
diff --git a/lib/asan/asan_thread_registry.h b/lib/asan/asan_thread_registry.h
index b80dd4d..7037b9e 100644
--- a/lib/asan/asan_thread_registry.h
+++ b/lib/asan/asan_thread_registry.h
@@ -30,34 +30,32 @@ class AsanThreadRegistry {
public:
explicit AsanThreadRegistry(LinkerInitialized);
void Init();
- void RegisterThread(AsanThread *thread, int parent_tid,
- AsanStackTrace *stack);
+ void RegisterThread(AsanThread *thread);
void UnregisterThread(AsanThread *thread);
AsanThread *GetMain();
- // Get the current thread. May return NULL.
+ // Get the current thread. May return 0.
AsanThread *GetCurrent();
void SetCurrent(AsanThread *t);
- pthread_key_t GetTlsKey();
- bool IsCurrentThreadDying();
- int GetCurrentTidOrMinusOne() {
+ u32 GetCurrentTidOrInvalid() {
+ if (!inited_) return 0;
AsanThread *t = GetCurrent();
- return t ? t->tid() : -1;
+ return t ? t->tid() : kInvalidTid;
}
// Returns stats for GetCurrent(), or stats for
- // T0 if GetCurrent() returns NULL.
+ // T0 if GetCurrent() returns 0.
AsanStats &GetCurrentThreadStats();
// Flushes all thread-local stats to accumulated stats, and returns
// a copy of accumulated stats.
AsanStats GetAccumulatedStats();
- size_t GetCurrentAllocatedBytes();
- size_t GetHeapSize();
- size_t GetFreeBytes();
+ uptr GetCurrentAllocatedBytes();
+ uptr GetHeapSize();
+ uptr GetFreeBytes();
- AsanThreadSummary *FindByTid(int tid);
- AsanThread *FindThreadByStackAddress(uintptr_t addr);
+ AsanThreadSummary *FindByTid(u32 tid);
+ AsanThread *FindThreadByStackAddress(uptr addr);
private:
void UpdateAccumulatedStatsUnlocked();
@@ -65,19 +63,14 @@ class AsanThreadRegistry {
// and fills "stats" with zeroes.
void FlushToAccumulatedStatsUnlocked(AsanStats *stats);
- static const int kMaxNumberOfThreads = (1 << 22); // 4M
+ static const u32 kMaxNumberOfThreads = (1 << 22); // 4M
AsanThreadSummary *thread_summaries_[kMaxNumberOfThreads];
AsanThread main_thread_;
AsanThreadSummary main_thread_summary_;
AsanStats accumulated_stats_;
- int n_threads_;
+ u32 n_threads_;
AsanLock mu_;
- // For each thread tls_key_ stores the pointer to the corresponding
- // AsanThread.
- pthread_key_t tls_key_;
- // This flag is updated only once at program startup, and then read
- // by concurrent threads.
- bool tls_key_created_;
+ bool inited_;
};
// Returns a single instance of registry.
diff --git a/lib/asan/asan_win.cc b/lib/asan/asan_win.cc
new file mode 100644
index 0000000..9e899d5
--- /dev/null
+++ b/lib/asan/asan_win.cc
@@ -0,0 +1,181 @@
+//===-- asan_win.cc -------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Windows-specific details.
+//===----------------------------------------------------------------------===//
+#ifdef _WIN32
+#include <windows.h>
+
+#include <dbghelp.h>
+#include <stdlib.h>
+
+#include <new> // FIXME: temporarily needed for placement new in AsanLock.
+
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_lock.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+namespace __asan {
+
+// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
+static AsanLock dbghelp_lock(LINKER_INITIALIZED);
+static bool dbghelp_initialized = false;
+#pragma comment(lib, "dbghelp.lib")
+
+void AsanStackTrace::GetStackTrace(uptr max_s, uptr pc, uptr bp) {
+ max_size = max_s;
+ void *tmp[kStackTraceMax];
+
+ // FIXME: CaptureStackBackTrace might be too slow for us.
+ // FIXME: Compare with StackWalk64.
+ // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
+ uptr cs_ret = CaptureStackBackTrace(1, max_size, tmp, 0),
+ offset = 0;
+ // Skip the RTL frames by searching for the PC in the stacktrace.
+ // FIXME: this doesn't work well for the malloc/free stacks yet.
+ for (uptr i = 0; i < cs_ret; i++) {
+ if (pc != (uptr)tmp[i])
+ continue;
+ offset = i;
+ break;
+ }
+
+ size = cs_ret - offset;
+ for (uptr i = 0; i < size; i++)
+ trace[i] = (uptr)tmp[i + offset];
+}
+
+bool __asan_WinSymbolize(const void *addr, char *out_buffer, int buffer_size) {
+ ScopedLock lock(&dbghelp_lock);
+ if (!dbghelp_initialized) {
+ SymSetOptions(SYMOPT_DEFERRED_LOADS |
+ SYMOPT_UNDNAME |
+ SYMOPT_LOAD_LINES);
+ CHECK(SymInitialize(GetCurrentProcess(), 0, TRUE));
+ // FIXME: We don't call SymCleanup() on exit yet - should we?
+ dbghelp_initialized = true;
+ }
+
+ // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
+ char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
+ PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ symbol->MaxNameLen = MAX_SYM_NAME;
+ DWORD64 offset = 0;
+ BOOL got_objname = SymFromAddr(GetCurrentProcess(),
+ (DWORD64)addr, &offset, symbol);
+ if (!got_objname)
+ return false;
+
+ DWORD unused;
+ IMAGEHLP_LINE64 info;
+ info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+ BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(),
+ (DWORD64)addr, &unused, &info);
+ int written = 0;
+ out_buffer[0] = '\0';
+ // FIXME: it might be useful to print out 'obj' or 'obj+offset' info too.
+ if (got_fileline) {
+ written += internal_snprintf(out_buffer + written, buffer_size - written,
+ " %s %s:%d", symbol->Name,
+ info.FileName, info.LineNumber);
+ } else {
+ written += internal_snprintf(out_buffer + written, buffer_size - written,
+ " %s+0x%p", symbol->Name, offset);
+ }
+ return true;
+}
+
+// ---------------------- AsanLock ---------------- {{{1
+enum LockState {
+ LOCK_UNINITIALIZED = 0,
+ LOCK_READY = -1,
+};
+
+AsanLock::AsanLock(LinkerInitialized li) {
+ // FIXME: see comments in AsanLock::Lock() for the details.
+ CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
+
+ CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
+ InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+ owner_ = LOCK_READY;
+}
+
+void AsanLock::Lock() {
+ if (owner_ == LOCK_UNINITIALIZED) {
+ // FIXME: hm, global AsanLock objects are not initialized?!?
+ // This might be a side effect of the clang+cl+link Frankenbuild...
+ new(this) AsanLock((LinkerInitialized)(LINKER_INITIALIZED + 1));
+
+ // FIXME: If it turns out the linker doesn't invoke our
+ // constructors, we should probably manually Lock/Unlock all the global
+ // locks while we're starting in one thread to avoid double-init races.
+ }
+ EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+ CHECK(owner_ == LOCK_READY);
+ owner_ = GetThreadSelf();
+}
+
+void AsanLock::Unlock() {
+ CHECK(owner_ == GetThreadSelf());
+ owner_ = LOCK_READY;
+ LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+}
+
+// ---------------------- TSD ---------------- {{{1
+static bool tsd_key_inited = false;
+
+static __declspec(thread) void *fake_tsd = 0;
+
+void AsanTSDInit(void (*destructor)(void *tsd)) {
+ // FIXME: we're ignoring the destructor for now.
+ tsd_key_inited = true;
+}
+
+void *AsanTSDGet() {
+ CHECK(tsd_key_inited);
+ return fake_tsd;
+}
+
+void AsanTSDSet(void *tsd) {
+ CHECK(tsd_key_inited);
+ fake_tsd = tsd;
+}
+
+// ---------------------- Various stuff ---------------- {{{1
+void *AsanDoesNotSupportStaticLinkage() {
+#if defined(_DEBUG)
+#error Please build the runtime with a non-debug CRT: /MD or /MT
+#endif
+ return 0;
+}
+
+void SetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void UnsetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void InstallSignalHandlers() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void AsanPlatformThreadInit() {
+ // Nothing here for now.
+}
+
+} // namespace __asan
+
+#endif // _WIN32
diff --git a/lib/asan/output_tests/clone_test.cc b/lib/asan/output_tests/clone_test.cc
new file mode 100644
index 0000000..b18d255
--- /dev/null
+++ b/lib/asan/output_tests/clone_test.cc
@@ -0,0 +1,34 @@
+#ifdef __linux__
+#include <stdio.h>
+#include <sched.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+int Child(void *arg) {
+ char x[32] = {0}; // Stack gets poisoned.
+ printf("Child: %p\n", x);
+ _exit(1); // NoReturn, stack will remain unpoisoned unless we do something.
+}
+
+int main(int argc, char **argv) {
+ const int kStackSize = 1 << 20;
+ char child_stack[kStackSize + 1];
+ char *sp = child_stack + kStackSize; // Stack grows down.
+ printf("Parent: %p\n", sp);
+ pid_t clone_pid = clone(Child, sp, CLONE_FILES | CLONE_VM, NULL, 0, 0, 0);
+ waitpid(clone_pid, NULL, 0);
+ for (int i = 0; i < kStackSize; i++)
+ child_stack[i] = i;
+ int ret = child_stack[argc - 1];
+ printf("PASSED\n");
+ return ret;
+}
+#else // not __linux__
+#include <stdio.h>
+int main() {
+ printf("PASSED\n");
+ // Check-Common: PASSED
+}
+#endif
diff --git a/lib/asan/output_tests/deep_tail_call.cc b/lib/asan/output_tests/deep_tail_call.cc
new file mode 100644
index 0000000..cb69e89
--- /dev/null
+++ b/lib/asan/output_tests/deep_tail_call.cc
@@ -0,0 +1,15 @@
+// Check-Common: AddressSanitizer global-buffer-overflow
+int global[10];
+// Check-Common: {{#0.*call4}}
+void __attribute__((noinline)) call4(int i) { global[i+10]++; }
+// Check-Common: {{#1.*call3}}
+void __attribute__((noinline)) call3(int i) { call4(i); }
+// Check-Common: {{#2.*call2}}
+void __attribute__((noinline)) call2(int i) { call3(i); }
+// Check-Common: {{#3.*call1}}
+void __attribute__((noinline)) call1(int i) { call2(i); }
+// Check-Common: {{#4.*main}}
+int main(int argc, char **argv) {
+ call1(argc);
+ return global[0];
+}
diff --git a/lib/asan/output_tests/default_options.cc b/lib/asan/output_tests/default_options.cc
new file mode 100644
index 0000000..d6c7029
--- /dev/null
+++ b/lib/asan/output_tests/default_options.cc
@@ -0,0 +1,12 @@
+const char *kAsanDefaultOptions="verbosity=1 foo=bar";
+
+extern "C"
+__attribute__((no_address_safety_analysis))
+const char *__asan_default_options() {
+ return kAsanDefaultOptions;
+}
+
+int main() {
+ // Check-Common: foo=bar
+ return 0;
+}
diff --git a/lib/asan/tests/dlclose-test-so.cc b/lib/asan/output_tests/dlclose-test-so.cc
index fae2f81..73e0050 100644
--- a/lib/asan/tests/dlclose-test-so.cc
+++ b/lib/asan/output_tests/dlclose-test-so.cc
@@ -1,4 +1,4 @@
-//===-- asan_rtl.cc ------------*- C++ -*-===//
+//===----------- dlclose-test-so.cc -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/asan/tests/dlclose-test.cc b/lib/asan/output_tests/dlclose-test.cc
index 3078866..16126eb 100644
--- a/lib/asan/tests/dlclose-test.cc
+++ b/lib/asan/output_tests/dlclose-test.cc
@@ -1,4 +1,4 @@
-//===-- asan_rtl.cc ------------*- C++ -*-===//
+//===----------- dlclose-test.cc --------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -69,5 +69,6 @@ int main(int argc, char *argv[]) {
}
addr[1] = 2; // BOOM (if the bug is not fixed).
printf("PASS\n");
+ // Check-Common: PASS
return 0;
}
diff --git a/lib/asan/tests/global-overflow.cc b/lib/asan/output_tests/global-overflow.cc
index b85c4d2..a63eb73 100644
--- a/lib/asan/tests/global-overflow.cc
+++ b/lib/asan/output_tests/global-overflow.cc
@@ -7,6 +7,10 @@ int main(int argc, char **argv) {
memset(YYY, 0, 10);
memset(ZZZ, 0, 10);
int res = YYY[argc * 10]; // BOOOM
+ // Check-Common: {{READ of size 1 at 0x.* thread T0}}
+ // Check-Common: {{ #0 0x.* in main .*global-overflow.cc:9}}
+ // Check-Common: {{0x.* is located 0 bytes to the right of global variable}}
+ // Check-Common: {{.*YYY.* of size 10}}
res += XXX[argc] + ZZZ[argc];
return res;
}
diff --git a/lib/asan/output_tests/heap-overflow.cc b/lib/asan/output_tests/heap-overflow.cc
new file mode 100644
index 0000000..534fbe0
--- /dev/null
+++ b/lib/asan/output_tests/heap-overflow.cc
@@ -0,0 +1,22 @@
+#include <stdlib.h>
+#include <string.h>
+int main(int argc, char **argv) {
+ char *x = (char*)malloc(10 * sizeof(char));
+ memset(x, 0, 10);
+ int res = x[argc * 10]; // BOOOM
+ free(x);
+ return res;
+}
+
+// Check-Common: {{READ of size 1 at 0x.* thread T0}}
+// Check-Common: {{ #0 0x.* in main .*heap-overflow.cc:6}}
+// Check-Common: {{0x.* is located 0 bytes to the right of 10-byte region}}
+// Check-Common: {{allocated by thread T0 here:}}
+
+// Check-Linux: {{ #0 0x.* in .*malloc}}
+// Check-Linux: {{ #1 0x.* in main .*heap-overflow.cc:4}}
+
+// Check-Darwin: {{ #0 0x.* in .*mz_malloc.*}}
+// Check-Darwin: {{ #1 0x.* in malloc_zone_malloc.*}}
+// Check-Darwin: {{ #2 0x.* in malloc.*}}
+// Check-Darwin: {{ #3 0x.* in main heap-overflow.cc:4}}
diff --git a/lib/asan/output_tests/interception_failure_test-linux.cc b/lib/asan/output_tests/interception_failure_test-linux.cc
new file mode 100644
index 0000000..9e8b753
--- /dev/null
+++ b/lib/asan/output_tests/interception_failure_test-linux.cc
@@ -0,0 +1,17 @@
+#include <stdlib.h>
+#include <stdio.h>
+
+extern "C" long strtol(const char *nptr, char **endptr, int base) {
+ fprintf(stderr, "my_strtol_interceptor\n");
+ return 0;
+}
+
+int main() {
+ char *x = (char*)malloc(10 * sizeof(char));
+ free(x);
+ return (int)strtol(x, 0, 10);
+}
+
+// Check-Common: my_strtol_interceptor
+// CHECK-NOT: heap-use-after-free
+
diff --git a/lib/asan/output_tests/interception_malloc_test-linux.cc b/lib/asan/output_tests/interception_malloc_test-linux.cc
new file mode 100644
index 0000000..4bb3bd6
--- /dev/null
+++ b/lib/asan/output_tests/interception_malloc_test-linux.cc
@@ -0,0 +1,19 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+
+extern "C" void *__interceptor_malloc(size_t size);
+extern "C" void *malloc(size_t size) {
+ write(2, "malloc call\n", sizeof("malloc call\n") - 1);
+ return __interceptor_malloc(size);
+}
+
+int main() {
+ char *x = (char*)malloc(10 * sizeof(char));
+ free(x);
+ return (int)strtol(x, 0, 10);
+}
+
+// Check-Common: malloc call
+// Check-Common: heap-use-after-free
+
diff --git a/lib/asan/output_tests/interception_test-linux.cc b/lib/asan/output_tests/interception_test-linux.cc
new file mode 100644
index 0000000..0523510
--- /dev/null
+++ b/lib/asan/output_tests/interception_test-linux.cc
@@ -0,0 +1,18 @@
+#include <stdlib.h>
+#include <stdio.h>
+
+extern "C" long __interceptor_strtol(const char *nptr, char **endptr, int base);
+extern "C" long strtol(const char *nptr, char **endptr, int base) {
+ fprintf(stderr, "my_strtol_interceptor\n");
+ return __interceptor_strtol(nptr, endptr, base);
+}
+
+int main() {
+ char *x = (char*)malloc(10 * sizeof(char));
+ free(x);
+ return (int)strtol(x, 0, 10);
+}
+
+// Check-Common: my_strtol_interceptor
+// Check-Common: heap-use-after-free
+
diff --git a/lib/asan/output_tests/large_func_test.cc b/lib/asan/output_tests/large_func_test.cc
new file mode 100644
index 0000000..49751b3
--- /dev/null
+++ b/lib/asan/output_tests/large_func_test.cc
@@ -0,0 +1,48 @@
+#include <stdlib.h>
+__attribute__((noinline))
+static void LargeFunction(int *x, int zero) {
+ x[0]++;
+ x[1]++;
+ x[2]++;
+ x[3]++;
+ x[4]++;
+ x[5]++;
+ x[6]++;
+ x[7]++;
+ x[8]++;
+ x[9]++;
+
+ x[zero + 111]++; // we should report this exact line
+
+ x[10]++;
+ x[11]++;
+ x[12]++;
+ x[13]++;
+ x[14]++;
+ x[15]++;
+ x[16]++;
+ x[17]++;
+ x[18]++;
+ x[19]++;
+}
+
+int main(int argc, char **argv) {
+ int *x = new int[100];
+ LargeFunction(x, argc - 1);
+ delete x;
+}
+
+// Check-Common: {{.*ERROR: AddressSanitizer heap-buffer-overflow on address}}
+// Check-Common: {{0x.* at pc 0x.* bp 0x.* sp 0x.*}}
+// Check-Common: {{READ of size 4 at 0x.* thread T0}}
+
+// atos incorrectly extracts the symbol name for the static functions on
+// Darwin.
+// Check-Linux: {{ #0 0x.* in LargeFunction.*large_func_test.cc:15}}
+// Check-Darwin: {{ #0 0x.* in .*LargeFunction.*large_func_test.cc:15}}
+
+// Check-Common: {{ #1 0x.* in main .*large_func_test.cc:31}}
+// Check-Common: {{0x.* is located 44 bytes to the right of 400-byte region}}
+// Check-Common: {{allocated by thread T0 here:}}
+// Check-Common: {{ #0 0x.* in operator new.*}}
+// Check-Common: {{ #1 0x.* in main .*large_func_test.cc:30}}
diff --git a/lib/asan/output_tests/memcmp_test.cc b/lib/asan/output_tests/memcmp_test.cc
new file mode 100644
index 0000000..d0e5a43
--- /dev/null
+++ b/lib/asan/output_tests/memcmp_test.cc
@@ -0,0 +1,10 @@
+#include <string.h>
+int main(int argc, char **argv) {
+ char a1[] = {argc, 2, 3, 4};
+ char a2[] = {1, 2*argc, 3, 4};
+// Check-Common: AddressSanitizer stack-buffer-overflow
+// Check-Common: {{#0.*memcmp}}
+// Check-Common: {{#1.*main}}
+ int res = memcmp(a1, a2, 4 + argc); // BOOM
+ return res;
+}
diff --git a/lib/asan/output_tests/null_deref.cc b/lib/asan/output_tests/null_deref.cc
new file mode 100644
index 0000000..c152a42
--- /dev/null
+++ b/lib/asan/output_tests/null_deref.cc
@@ -0,0 +1,17 @@
+__attribute__((noinline))
+static void NullDeref(int *ptr) {
+ ptr[10]++;
+}
+int main() {
+ NullDeref((int*)0);
+}
+
+// Check-Common: {{.*ERROR: AddressSanitizer crashed on unknown address}}
+// Check-Common: {{0x0*00028 .*pc 0x.*}}
+// Check-Common: {{AddressSanitizer can not provide additional info. ABORTING}}
+
+// atos on Mac cannot extract the symbol name correctly.
+// Check-Linux: {{ #0 0x.* in NullDeref.*null_deref.cc:3}}
+// Check-Darwin: {{ #0 0x.* in .*NullDeref.*null_deref.cc:3}}
+
+// Check-Common: {{ #1 0x.* in main.*null_deref.cc:6}}
diff --git a/lib/asan/tests/shared-lib-test-so.cc b/lib/asan/output_tests/shared-lib-test-so.cc
index c3b3bc2..686a245 100644
--- a/lib/asan/tests/shared-lib-test-so.cc
+++ b/lib/asan/output_tests/shared-lib-test-so.cc
@@ -1,4 +1,4 @@
-//===-- asan_rtl.cc ------------*- C++ -*-===//
+//===----------- shared-lib-test-so.cc --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/asan/tests/shared-lib-test.cc b/lib/asan/output_tests/shared-lib-test.cc
index e492572..060fcde 100644
--- a/lib/asan/tests/shared-lib-test.cc
+++ b/lib/asan/output_tests/shared-lib-test.cc
@@ -1,4 +1,4 @@
-//===-- asan_rtl.cc ------------*- C++ -*-===//
+//===----------- shared-lib-test.cc -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -32,6 +32,11 @@ int main(int argc, char *argv[]) {
if (!inc) return 1;
printf("ok\n");
inc(1);
- inc(-1);
+ inc(-1); // BOOM
return 0;
}
+
+// Check-Common: {{.*ERROR: AddressSanitizer global-buffer-overflow}}
+// Check-Common: {{READ of size 4 at 0x.* thread T0}}
+// Check-Common: {{ #0 0x.*}}
+// Check-Common: {{ #1 0x.* in main .*shared-lib-test.cc:35}}
diff --git a/lib/asan/output_tests/stack-overflow.cc b/lib/asan/output_tests/stack-overflow.cc
new file mode 100644
index 0000000..35fa8a6
--- /dev/null
+++ b/lib/asan/output_tests/stack-overflow.cc
@@ -0,0 +1,11 @@
+#include <string.h>
+int main(int argc, char **argv) {
+ char x[10];
+ memset(x, 0, 10);
+ int res = x[argc * 10]; // BOOOM
+ return res;
+}
+
+// Check-Common: {{READ of size 1 at 0x.* thread T0}}
+// Check-Common: {{ #0 0x.* in main .*stack-overflow.cc:5}}
+// Check-Common: {{Address 0x.* is .* frame <main>}}
diff --git a/lib/asan/tests/stack-use-after-return.cc b/lib/asan/output_tests/stack-use-after-return.cc.disabled
index 9098edf..f497157 100644
--- a/lib/asan/tests/stack-use-after-return.cc
+++ b/lib/asan/output_tests/stack-use-after-return.cc.disabled
@@ -16,6 +16,9 @@ __attribute__((noinline))
void Func2(char *x) {
fprintf(stderr, "2: %p\n", x);
*x = 1;
+ // Check-Common: {{WRITE of size 1 .* thread T0}}
+ // Check-Common: {{ #0.*Func2.*stack-use-after-return.cc:18}}
+ // Check-Common: {{is located in frame <.*Func1.*> of T0's stack}}
}
int main(int argc, char **argv) {
diff --git a/lib/asan/output_tests/strncpy-overflow.cc b/lib/asan/output_tests/strncpy-overflow.cc
new file mode 100644
index 0000000..66d5810
--- /dev/null
+++ b/lib/asan/output_tests/strncpy-overflow.cc
@@ -0,0 +1,24 @@
+#include <string.h>
+#include <stdlib.h>
+int main(int argc, char **argv) {
+ char *hello = (char*)malloc(6);
+ strcpy(hello, "hello");
+ char *short_buffer = (char*)malloc(9);
+ strncpy(short_buffer, hello, 10); // BOOM
+ return short_buffer[8];
+}
+
+// Check-Common: {{WRITE of size 1 at 0x.* thread T0}}
+// Check-Linux: {{ #0 0x.* in .*strncpy}}
+// Check-Darwin: {{ #0 0x.* in wrap_strncpy}}
+// Check-Common: {{ #1 0x.* in main .*strncpy-overflow.cc:7}}
+// Check-Common: {{0x.* is located 0 bytes to the right of 9-byte region}}
+// Check-Common: {{allocated by thread T0 here:}}
+
+// Check-Linux: {{ #0 0x.* in .*malloc}}
+// Check-Linux: {{ #1 0x.* in main .*strncpy-overflow.cc:6}}
+
+// Check-Darwin: {{ #0 0x.* in .*mz_malloc.*}}
+// Check-Darwin: {{ #1 0x.* in malloc_zone_malloc.*}}
+// Check-Darwin: {{ #2 0x.* in malloc.*}}
+// Check-Darwin: {{ #3 0x.* in main .*strncpy-overflow.cc:6}}
diff --git a/lib/asan/output_tests/test_output.sh b/lib/asan/output_tests/test_output.sh
new file mode 100755
index 0000000..6510043
--- /dev/null
+++ b/lib/asan/output_tests/test_output.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+set -e # fail on any error
+
+OS=`uname`
+CXX=$1
+CC=$2
+FILE_CHECK=$3
+CXXFLAGS="-mno-omit-leaf-frame-pointer -fno-omit-frame-pointer -fno-optimize-sibling-calls -g"
+SYMBOLIZER=../scripts/asan_symbolize.py
+TMP_ASAN_REPORT=asan_report.tmp
+
+run_program() {
+ ./$1 2>&1 | $SYMBOLIZER 2> /dev/null | c++filt > $TMP_ASAN_REPORT
+}
+
+# check_program exe_file source_file check_prefixf
+check_program() {
+ run_program $1
+ $FILE_CHECK $2 --check-prefix=$3 < $TMP_ASAN_REPORT
+ rm -f $TMP_ASAN_REPORT
+}
+
+C_TEST=use-after-free
+echo "Sanity checking a test in pure C"
+$CC -g -faddress-sanitizer -O2 $C_TEST.c
+check_program a.out $C_TEST.c CHECK
+rm ./a.out
+
+echo "Sanity checking a test in pure C with -pie"
+$CC -g -faddress-sanitizer -O2 $C_TEST.c -pie
+check_program a.out $C_TEST.c CHECK
+rm ./a.out
+
+echo "Testing sleep_before_dying"
+$CC -g -faddress-sanitizer -O2 $C_TEST.c
+export ASAN_OPTIONS="sleep_before_dying=1"
+check_program a.out $C_TEST.c CHECKSLEEP
+export ASAN_OPTIONS=""
+rm ./a.out
+
+# FIXME: some tests do not need to be ran for all the combinations of arch
+# and optimization mode.
+for t in *.cc; do
+ for b in 32 64; do
+ for O in 0 1 2 3; do
+ c=`basename $t .cc`
+ if [[ "$c" == *"-so" ]]; then
+ continue
+ fi
+ if [[ "$c" == *"-linux" ]]; then
+ if [[ "$OS" != "Linux" ]]; then
+ continue
+ fi
+ fi
+ c_so=$c-so
+ exe=$c.$b.O$O
+ so=$c.$b.O$O-so.so
+ echo testing $exe
+ build_command="$CXX $CXXFLAGS -m$b -faddress-sanitizer -O$O $c.cc -o $exe"
+ [ "$DEBUG" == "1" ] && echo $build_command
+ $build_command
+ [ -e "$c_so.cc" ] && $CXX $CXXFLAGS -m$b -faddress-sanitizer -O$O $c_so.cc -fPIC -shared -o $so
+ run_program $exe
+ # Check common expected lines for OS.
+ $FILE_CHECK $c.cc --check-prefix="Check-Common" < $TMP_ASAN_REPORT
+ # Check OS-specific lines.
+ if [ `grep -c "Check-$OS" $c.cc` -gt 0 ]
+ then
+ $FILE_CHECK $c.cc --check-prefix="Check-$OS" < $TMP_ASAN_REPORT
+ fi
+ rm ./$exe
+ rm ./$TMP_ASAN_REPORT
+ [ -e "$so" ] && rm ./$so
+ done
+ done
+done
+
+exit 0
diff --git a/lib/asan/tests/use-after-free.c b/lib/asan/output_tests/use-after-free.c
index 60626bf..801d3f6 100644
--- a/lib/asan/tests/use-after-free.c
+++ b/lib/asan/output_tests/use-after-free.c
@@ -4,3 +4,6 @@ int main() {
free(x);
return x[5];
}
+
+// CHECK: heap-use-after-free
+// CHECKSLEEP: Sleeping for 1 second
diff --git a/lib/asan/output_tests/use-after-free.cc b/lib/asan/output_tests/use-after-free.cc
new file mode 100644
index 0000000..c3e9dbe
--- /dev/null
+++ b/lib/asan/output_tests/use-after-free.cc
@@ -0,0 +1,31 @@
+#include <stdlib.h>
+int main() {
+ char *x = (char*)malloc(10 * sizeof(char));
+ free(x);
+ return x[5];
+}
+
+// Check-Common: {{.*ERROR: AddressSanitizer heap-use-after-free on address}}
+// Check-Common: {{0x.* at pc 0x.* bp 0x.* sp 0x.*}}
+// Check-Common: {{READ of size 1 at 0x.* thread T0}}
+// Check-Common: {{ #0 0x.* in main .*use-after-free.cc:5}}
+// Check-Common: {{0x.* is located 5 bytes inside of 10-byte region .0x.*,0x.*}}
+// Check-Common: {{freed by thread T0 here:}}
+
+// Check-Linux: {{ #0 0x.* in .*free}}
+// Check-Linux: {{ #1 0x.* in main .*use-after-free.cc:4}}
+
+// Check-Darwin: {{ #0 0x.* in .*mz_free.*}}
+// We override free() on Darwin, thus no malloc_zone_free
+// Check-Darwin: {{ #1 0x.* in wrap_free}}
+// Check-Darwin: {{ #2 0x.* in main .*use-after-free.cc:4}}
+
+// Check-Common: {{previously allocated by thread T0 here:}}
+
+// Check-Linux: {{ #0 0x.* in .*malloc}}
+// Check-Linux: {{ #1 0x.* in main .*use-after-free.cc:3}}
+
+// Check-Darwin: {{ #0 0x.* in .*mz_malloc.*}}
+// Check-Darwin: {{ #1 0x.* in malloc_zone_malloc.*}}
+// Check-Darwin: {{ #2 0x.* in malloc.*}}
+// Check-Darwin: {{ #3 0x.* in main .*use-after-free.cc:3}}
diff --git a/lib/asan/scripts/asan_symbolize.py b/lib/asan/scripts/asan_symbolize.py
index 80b5927..e4897d0 100755
--- a/lib/asan/scripts/asan_symbolize.py
+++ b/lib/asan/scripts/asan_symbolize.py
@@ -14,21 +14,16 @@ import string
import subprocess
pipes = {}
+filetypes = {}
+DEBUG=False
+
+def fix_filename(file_name):
+ for path_to_cut in sys.argv[1:]:
+ file_name = re.sub(".*" + path_to_cut, "", file_name)
+ file_name = re.sub(".*asan_[a-z_]*.cc:[0-9]*", "_asan_rtl_", file_name)
+ file_name = re.sub(".*crtstuff.c:0", "???:0", file_name)
+ return file_name
-def patch_address(frameno, addr_s):
- ''' Subtracts 1 or 2 from the top frame's address.
- Top frame is normally the return address from asan_report*
- call, which is not expected to return at all. Because of that, this
- address often belongs to the next source code line, or even to a different
- function. '''
- if frameno == '0':
- addr = int(addr_s, 16)
- if os.uname()[4].startswith('arm'):
- # Cancel the Thumb bit
- addr = addr & (~1)
- addr -= 1
- return hex(addr)
- return addr_s
# TODO(glider): need some refactoring here
def symbolize_addr2line(line):
@@ -38,7 +33,6 @@ def symbolize_addr2line(line):
frameno = match.group(2)
binary = match.group(3)
addr = match.group(4)
- addr = patch_address(frameno, addr)
if not pipes.has_key(binary):
pipes[binary] = subprocess.Popen(["addr2line", "-f", "-e", binary],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@@ -50,15 +44,25 @@ def symbolize_addr2line(line):
except:
function_name = ""
file_name = ""
- for path_to_cut in sys.argv[1:]:
- file_name = re.sub(".*" + path_to_cut, "", file_name)
- file_name = re.sub(".*asan_[a-z_]*.cc:[0-9]*", "_asan_rtl_", file_name)
- file_name = re.sub(".*crtstuff.c:0", "???:0", file_name)
+ file_name = fix_filename(file_name)
print match.group(1), "in", function_name, file_name
else:
print line.rstrip()
+
+def get_macho_filetype(binary):
+ if not filetypes.has_key(binary):
+ otool_pipe = subprocess.Popen(["otool", "-Vh", binary],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ otool_line = "".join(otool_pipe.stdout.readlines())
+ for t in ["DYLIB", "EXECUTE"]:
+ if t in otool_line:
+ filetypes[binary] = t
+ otool_pipe.stdin.close()
+ return filetypes[binary]
+
+
def symbolize_atos(line):
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
match = re.match('^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)', line)
@@ -66,27 +70,49 @@ def symbolize_atos(line):
#print line
prefix = match.group(1)
frameno = match.group(2)
- addr = match.group(3)
+ orig_addr = match.group(3)
binary = match.group(4)
offset = match.group(5)
- addr = patch_address(frameno, addr)
- load_addr = int(addr, 16) - int(offset, 16)
+ addr = orig_addr
+ load_addr = hex(int(orig_addr, 16) - int(offset, 16))
+ filetype = get_macho_filetype(binary)
+
if not pipes.has_key(binary):
- #print "atos -o %s -l %s" % (binary, hex(load_addr))
- pipes[binary] = subprocess.Popen(["atos", "-o", binary],
- stdin=subprocess.PIPE, stdout=subprocess.PIPE,)
+ # Guess which arch we're running. 10 = len("0x") + 8 hex digits.
+ if len(addr) > 10:
+ arch = "x86_64"
+ else:
+ arch = "i386"
+
+ if filetype == "DYLIB":
+ load_addr = "0x0"
+ if DEBUG:
+ print "atos -o %s -arch %s -l %s" % (binary, arch, load_addr)
+ cmd = ["atos", "-o", binary, "-arch", arch, "-l", load_addr]
+ pipes[binary] = subprocess.Popen(cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
p = pipes[binary]
- # TODO(glider): how to tell if the address is absolute?
- if ".app/" in binary and not ".framework" in binary:
- print >>p.stdin, "%s" % addr
- else:
+ if filetype == "DYLIB":
print >>p.stdin, "%s" % offset
+ else:
+ print >>p.stdin, "%s" % addr
# TODO(glider): it's more efficient to make a batch atos run for each binary.
p.stdin.close()
atos_line = p.stdout.readline().rstrip()
+ # A well-formed atos response looks like this:
+ # foo(type1, type2) (in object.name) (filename.cc:80)
+ match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
+ #print "atos_line: ", atos_line
+ if match:
+ function_name = match.group(1)
+ function_name = re.sub("\(.*?\)", "", function_name)
+ file_name = fix_filename(match.group(3))
+ print "%s%s in %s %s" % (prefix, addr, function_name, file_name)
+ else:
+ print "%s%s in %s" % (prefix, addr, atos_line)
del pipes[binary]
-
- print "%s%s in %s" % (prefix, addr, atos_line)
else:
print line.rstrip()
diff --git a/lib/asan/sysinfo/LICENSE.TXT b/lib/asan/sysinfo/LICENSE.TXT
deleted file mode 100644
index b519af5..0000000
--- a/lib/asan/sysinfo/LICENSE.TXT
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (c) 2005, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/lib/asan/sysinfo/basictypes.h b/lib/asan/sysinfo/basictypes.h
deleted file mode 100644
index ac21f8c..0000000
--- a/lib/asan/sysinfo/basictypes.h
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef _BASICTYPES_H_
-#define _BASICTYPES_H_
-
-#include <inttypes.h> // uint16_t might be here; PRId64 too.
-#include <stdint.h> // to get uint16_t (ISO naming madness)
-#include <sys/types.h> // our last best hope for uint16_t
-
-// Standard typedefs
-// All Google code is compiled with -funsigned-char to make "char"
-// unsigned. Google code therefore doesn't need a "uchar" type.
-// TODO(csilvers): how do we make sure unsigned-char works on non-gcc systems?
-typedef signed char schar;
-typedef int8_t int8;
-typedef int16_t int16;
-typedef int32_t int32;
-typedef int64_t int64;
-
-// NOTE: unsigned types are DANGEROUS in loops and other arithmetical
-// places. Use the signed types unless your variable represents a bit
-// pattern (eg a hash value) or you really need the extra bit. Do NOT
-// use 'unsigned' to express "this value should always be positive";
-// use assertions for this.
-
-typedef uint8_t uint8;
-typedef uint16_t uint16;
-typedef uint32_t uint32;
-typedef uint64_t uint64;
-
-const uint16 kuint16max = ( (uint16) 0xFFFF);
-const uint32 kuint32max = ( (uint32) 0xFFFFFFFF);
-const uint64 kuint64max = ( (((uint64) kuint32max) << 32) | kuint32max );
-
-const int8 kint8max = ( ( int8) 0x7F);
-const int16 kint16max = ( ( int16) 0x7FFF);
-const int32 kint32max = ( ( int32) 0x7FFFFFFF);
-const int64 kint64max = ( ((( int64) kint32max) << 32) | kuint32max );
-
-const int8 kint8min = ( ( int8) 0x80);
-const int16 kint16min = ( ( int16) 0x8000);
-const int32 kint32min = ( ( int32) 0x80000000);
-const int64 kint64min = ( ((( int64) kint32min) << 32) | 0 );
-
-// Define the "portable" printf and scanf macros, if they're not
-// already there (via the inttypes.h we #included above, hopefully).
-// Mostly it's old systems that don't support inttypes.h, so we assume
-// they're 32 bit.
-#ifndef PRIx64
-#define PRIx64 "llx"
-#endif
-#ifndef SCNx64
-#define SCNx64 "llx"
-#endif
-#ifndef PRId64
-#define PRId64 "lld"
-#endif
-#ifndef SCNd64
-#define SCNd64 "lld"
-#endif
-#ifndef PRIu64
-#define PRIu64 "llu"
-#endif
-#ifndef PRIxPTR
-#define PRIxPTR "lx"
-#endif
-
-// Also allow for printing of a pthread_t.
-#define GPRIuPTHREAD "lu"
-#define GPRIxPTHREAD "lx"
-#if defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__APPLE__) || defined(__FreeBSD__)
-#define PRINTABLE_PTHREAD(pthreadt) reinterpret_cast<uintptr_t>(pthreadt)
-#else
-#define PRINTABLE_PTHREAD(pthreadt) pthreadt
-#endif
-
-// A macro to disallow the evil copy constructor and operator= functions
-// This should be used in the private: declarations for a class
-#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
-
-// An alternate name that leaves out the moral judgment... :-)
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) DISALLOW_EVIL_CONSTRUCTORS(TypeName)
-
-// The COMPILE_ASSERT macro can be used to verify that a compile time
-// expression is true. For example, you could use it to verify the
-// size of a static array:
-//
-// COMPILE_ASSERT(sizeof(num_content_type_names) == sizeof(int),
-// content_type_names_incorrect_size);
-//
-// or to make sure a struct is smaller than a certain size:
-//
-// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
-//
-// The second argument to the macro is the name of the variable. If
-// the expression is false, most compilers will issue a warning/error
-// containing the name of the variable.
-//
-// Implementation details of COMPILE_ASSERT:
-//
-// - COMPILE_ASSERT works by defining an array type that has -1
-// elements (and thus is invalid) when the expression is false.
-//
-// - The simpler definition
-//
-// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
-//
-// does not work, as gcc supports variable-length arrays whose sizes
-// are determined at run-time (this is gcc's extension and not part
-// of the C++ standard). As a result, gcc fails to reject the
-// following code with the simple definition:
-//
-// int foo;
-// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
-// // not a compile-time constant.
-//
-// - By using the type CompileAssert<(bool(expr))>, we ensures that
-// expr is a compile-time constant. (Template arguments must be
-// determined at compile-time.)
-//
-// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
-// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
-//
-// CompileAssert<bool(expr)>
-//
-// instead, these compilers will refuse to compile
-//
-// COMPILE_ASSERT(5 > 0, some_message);
-//
-// (They seem to think the ">" in "5 > 0" marks the end of the
-// template argument list.)
-//
-// - The array size is (bool(expr) ? 1 : -1), instead of simply
-//
-// ((expr) ? 1 : -1).
-//
-// This is to avoid running into a bug in MS VC 7.1, which
-// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
-
-template <bool>
-struct CompileAssert {
-};
-
-#define COMPILE_ASSERT(expr, msg) \
- typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
-
-#define arraysize(a) (sizeof(a) / sizeof(*(a)))
-
-#define OFFSETOF_MEMBER(strct, field) \
- (reinterpret_cast<char*>(&reinterpret_cast<strct*>(16)->field) - \
- reinterpret_cast<char*>(16))
-
-#ifdef HAVE___ATTRIBUTE__
-# define ATTRIBUTE_WEAK __attribute__((weak))
-# define ATTRIBUTE_NOINLINE __attribute__((noinline))
-#else
-# define ATTRIBUTE_WEAK
-# define ATTRIBUTE_NOINLINE
-#endif
-
-// Section attributes are supported for both ELF and Mach-O, but in
-// very different ways. Here's the API we provide:
-// 1) ATTRIBUTE_SECTION: put this with the declaration of all functions
-// you want to be in the same linker section
-// 2) DEFINE_ATTRIBUTE_SECTION_VARS: must be called once per unique
-// name. You want to make sure this is executed before any
-// DECLARE_ATTRIBUTE_SECTION_VARS; the easiest way is to put them
-// in the same .cc file. Put this call at the global level.
-// 3) INIT_ATTRIBUTE_SECTION_VARS: you can scatter calls to this in
-// multiple places to help ensure execution before any
-// DECLARE_ATTRIBUTE_SECTION_VARS. You must have at least one
-// DEFINE, but you can have many INITs. Put each in its own scope.
-// 4) DECLARE_ATTRIBUTE_SECTION_VARS: must be called before using
-// ATTRIBUTE_SECTION_START or ATTRIBUTE_SECTION_STOP on a name.
-// Put this call at the global level.
-// 5) ATTRIBUTE_SECTION_START/ATTRIBUTE_SECTION_STOP: call this to say
-// where in memory a given section is. All functions declared with
-// ATTRIBUTE_SECTION are guaranteed to be between START and STOP.
-
-#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
-# define ATTRIBUTE_SECTION(name) __attribute__ ((section (#name)))
-
- // Weak section declaration to be used as a global declaration
- // for ATTRIBUTE_SECTION_START|STOP(name) to compile and link
- // even without functions with ATTRIBUTE_SECTION(name).
-# define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
- extern char __start_##name[] ATTRIBUTE_WEAK; \
- extern char __stop_##name[] ATTRIBUTE_WEAK
-# define INIT_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
-# define DEFINE_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
-
- // Return void* pointers to start/end of a section of code with functions
- // having ATTRIBUTE_SECTION(name), or 0 if no such function exists.
- // One must DECLARE_ATTRIBUTE_SECTION(name) for this to compile and link.
-# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
-# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
-# define HAVE_ATTRIBUTE_SECTION_START 1
-
-#elif defined(HAVE___ATTRIBUTE__) && defined(__MACH__)
-# define ATTRIBUTE_SECTION(name) __attribute__ ((section ("__TEXT, " #name)))
-
-#include <mach-o/getsect.h>
-#include <mach-o/dyld.h>
-class AssignAttributeStartEnd {
- public:
- AssignAttributeStartEnd(const char* name, char** pstart, char** pend) {
- // Find out what dynamic library name is defined in
- if (_dyld_present()) {
- for (int i = _dyld_image_count() - 1; i >= 0; --i) {
- const mach_header* hdr = _dyld_get_image_header(i);
-#ifdef MH_MAGIC_64
- if (hdr->magic == MH_MAGIC_64) {
- uint64_t len;
- *pstart = getsectdatafromheader_64((mach_header_64*)hdr,
- "__TEXT", name, &len);
- if (*pstart) { // NULL if not defined in this dynamic library
- *pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
- *pend = *pstart + len;
- return;
- }
- }
-#endif
- if (hdr->magic == MH_MAGIC) {
- uint32_t len;
- *pstart = getsectdatafromheader(hdr, "__TEXT", name, &len);
- if (*pstart) { // NULL if not defined in this dynamic library
- *pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
- *pend = *pstart + len;
- return;
- }
- }
- }
- }
- // If we get here, not defined in a dll at all. See if defined statically.
- unsigned long len; // don't ask me why this type isn't uint32_t too...
- *pstart = getsectdata("__TEXT", name, &len);
- *pend = *pstart + len;
- }
-};
-
-#define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
- extern char* __start_##name; \
- extern char* __stop_##name
-
-#define INIT_ATTRIBUTE_SECTION_VARS(name) \
- DECLARE_ATTRIBUTE_SECTION_VARS(name); \
- static const AssignAttributeStartEnd __assign_##name( \
- #name, &__start_##name, &__stop_##name)
-
-#define DEFINE_ATTRIBUTE_SECTION_VARS(name) \
- char* __start_##name, *__stop_##name; \
- INIT_ATTRIBUTE_SECTION_VARS(name)
-
-# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
-# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
-# define HAVE_ATTRIBUTE_SECTION_START 1
-
-#else // not HAVE___ATTRIBUTE__ && __ELF__, nor HAVE___ATTRIBUTE__ && __MACH__
-# define ATTRIBUTE_SECTION(name)
-# define DECLARE_ATTRIBUTE_SECTION_VARS(name)
-# define INIT_ATTRIBUTE_SECTION_VARS(name)
-# define DEFINE_ATTRIBUTE_SECTION_VARS(name)
-# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(0))
-# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(0))
-
-#endif // HAVE___ATTRIBUTE__ and __ELF__ or __MACH__
-
-#if defined(HAVE___ATTRIBUTE__) && (defined(__i386__) || defined(__x86_64__))
-# define CACHELINE_SIZE 64
-# define CACHELINE_ALIGNED __attribute__((aligned(CACHELINE_SIZE)))
-#else
-# define CACHELINE_ALIGNED
-#endif // defined(HAVE___ATTRIBUTE__) && (__i386__ || __x86_64__)
-
-
-// The following enum should be used only as a constructor argument to indicate
-// that the variable has static storage class, and that the constructor should
-// do nothing to its state. It indicates to the reader that it is legal to
-// declare a static nistance of the class, provided the constructor is given
-// the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a
-// static variable that has a constructor or a destructor because invocation
-// order is undefined. However, IF the type can be initialized by filling with
-// zeroes (which the loader does for static variables), AND the destructor also
-// does nothing to the storage, then a constructor declared as
-// explicit MyClass(base::LinkerInitialized x) {}
-// and invoked as
-// static MyClass my_variable_name(base::LINKER_INITIALIZED);
-namespace base {
-enum LinkerInitialized { LINKER_INITIALIZED };
-}
-
-#endif // _BASICTYPES_H_
diff --git a/lib/asan/sysinfo/sysinfo.cc b/lib/asan/sysinfo/sysinfo.cc
deleted file mode 100644
index ee06735..0000000
--- a/lib/asan/sysinfo/sysinfo.cc
+++ /dev/null
@@ -1,617 +0,0 @@
-// Copyright (c) 2006, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <assert.h>
-#include <stdlib.h> // for getenv()
-#include <stdio.h> // for snprintf(), sscanf()
-#include <string.h> // for memmove(), memchr(), etc.
-#include <fcntl.h> // for open()
-#include <errno.h> // for errno
-#include <unistd.h> // for read()
-#if defined __MACH__ // Mac OS X, almost certainly
-#include <mach-o/dyld.h> // for iterating over dll's in ProcMapsIter
-#include <mach-o/loader.h> // for iterating over dll's in ProcMapsIter
-#include <sys/types.h>
-#include <sys/sysctl.h> // how we figure out numcpu's on OS X
-#elif defined __FreeBSD__
-#include <sys/sysctl.h>
-#elif defined __sun__ // Solaris
-#include <procfs.h> // for, e.g., prmap_t
-#elif defined(PLATFORM_WINDOWS)
-#include <process.h> // for getpid() (actually, _getpid())
-#include <shlwapi.h> // for SHGetValueA()
-#include <tlhelp32.h> // for Module32First()
-#endif
-#include "sysinfo.h"
-
-#ifdef PLATFORM_WINDOWS
-#ifdef MODULEENTRY32
-// In a change from the usual W-A pattern, there is no A variant of
-// MODULEENTRY32. Tlhelp32.h #defines the W variant, but not the A.
-// In unicode mode, tlhelp32.h #defines MODULEENTRY32 to be
-// MODULEENTRY32W. These #undefs are the only way I see to get back
-// access to the original, ascii struct (and related functions).
-#undef MODULEENTRY32
-#undef Module32First
-#undef Module32Next
-#undef PMODULEENTRY32
-#undef LPMODULEENTRY32
-#endif /* MODULEENTRY32 */
-// MinGW doesn't seem to define this, perhaps some windowsen don't either.
-#ifndef TH32CS_SNAPMODULE32
-#define TH32CS_SNAPMODULE32 0
-#endif /* TH32CS_SNAPMODULE32 */
-#endif /* PLATFORM_WINDOWS */
-
-// Re-run fn until it doesn't cause EINTR.
-#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
-
-// open/read/close can set errno, which may be illegal at this
-// time, so prefer making the syscalls directly if we can.
-#ifdef HAVE_SYS_SYSCALL_H
-# include <sys/syscall.h>
-# define safeopen(filename, mode) syscall(SYS_open, filename, mode)
-# define saferead(fd, buffer, size) syscall(SYS_read, fd, buffer, size)
-# define safeclose(fd) syscall(SYS_close, fd)
-#else
-# define safeopen(filename, mode) open(filename, mode)
-# define saferead(fd, buffer, size) read(fd, buffer, size)
-# define safeclose(fd) close(fd)
-#endif
-
-
-// ----------------------------------------------------------------------
-// HasPosixThreads()
-// Return true if we're running POSIX (e.g., NPTL on Linux)
-// threads, as opposed to a non-POSIX thread libary. The thing
-// that we care about is whether a thread's pid is the same as
-// the thread that spawned it. If so, this function returns
-// true.
-// ----------------------------------------------------------------------
-bool HasPosixThreads() {
-#if defined(__linux__) and !defined(ANDROID)
-#ifndef _CS_GNU_LIBPTHREAD_VERSION
-#define _CS_GNU_LIBPTHREAD_VERSION 3
-#endif
- char buf[32];
- // We assume that, if confstr() doesn't know about this name, then
- // the same glibc is providing LinuxThreads.
- if (confstr(_CS_GNU_LIBPTHREAD_VERSION, buf, sizeof(buf)) == 0)
- return false;
- return strncmp(buf, "NPTL", 4) == 0;
-#elif defined(PLATFORM_WINDOWS) || defined(__CYGWIN__) || defined(__CYGWIN32__)
- return false;
-#else // other OS
- return true; // Assume that everything else has Posix
-#endif // else OS_LINUX
-}
-
-// ----------------------------------------------------------------------
-
-#define CHECK_LT(x, y) do { assert((x) < (y)); } while (0)
-
-#if defined __linux__ || defined __FreeBSD__ || defined __sun__ || defined __CYGWIN__ || defined __CYGWIN32__
-static void ConstructFilename(const char* spec, pid_t pid,
- char* buf, int buf_size) {
- CHECK_LT(snprintf(buf, buf_size,
- spec,
- static_cast<int>(pid ? pid : getpid())), buf_size);
-}
-#endif
-
-// A templatized helper function instantiated for Mach (OS X) only.
-// It can handle finding info for both 32 bits and 64 bits.
-// Returns true if it successfully handled the hdr, false else.
-#ifdef __MACH__ // Mac OS X, almost certainly
-template<uint32_t kMagic, uint32_t kLCSegment,
- typename MachHeader, typename SegmentCommand>
-static bool NextExtMachHelper(const mach_header* hdr,
- int current_image, int current_load_cmd,
- uint64 *start, uint64 *end, char **flags,
- uint64 *offset, int64 *inode, char **filename,
- uint64 *file_mapping, uint64 *file_pages,
- uint64 *anon_mapping, uint64 *anon_pages,
- dev_t *dev) {
- static char kDefaultPerms[5] = "r-xp";
- if (hdr->magic != kMagic)
- return false;
- const char* lc = (const char *)hdr + sizeof(MachHeader);
- // TODO(csilvers): make this not-quadradic (increment and hold state)
- for (int j = 0; j < current_load_cmd; j++) // advance to *our* load_cmd
- lc += ((const load_command *)lc)->cmdsize;
- if (((const load_command *)lc)->cmd == kLCSegment) {
- const intptr_t dlloff = _dyld_get_image_vmaddr_slide(current_image);
- const SegmentCommand* sc = (const SegmentCommand *)lc;
- if (start) *start = sc->vmaddr + dlloff;
- if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
- if (flags) *flags = kDefaultPerms; // can we do better?
- if (offset) *offset = sc->fileoff;
- if (inode) *inode = 0;
- if (filename)
- *filename = const_cast<char*>(_dyld_get_image_name(current_image));
- if (file_mapping) *file_mapping = 0;
- if (file_pages) *file_pages = 0; // could we use sc->filesize?
- if (anon_mapping) *anon_mapping = 0;
- if (anon_pages) *anon_pages = 0;
- if (dev) *dev = 0;
- return true;
- }
-
- return false;
-}
-#endif
-
-ProcMapsIterator::ProcMapsIterator(pid_t pid) {
- Init(pid, NULL, false);
-}
-
-ProcMapsIterator::ProcMapsIterator(pid_t pid, Buffer *buffer) {
- Init(pid, buffer, false);
-}
-
-ProcMapsIterator::ProcMapsIterator(pid_t pid, Buffer *buffer,
- bool use_maps_backing) {
- Init(pid, buffer, use_maps_backing);
-}
-
-void ProcMapsIterator::Init(pid_t pid, Buffer *buffer,
- bool use_maps_backing) {
- pid_ = pid;
- using_maps_backing_ = use_maps_backing;
- dynamic_buffer_ = NULL;
- if (!buffer) {
- // If the user didn't pass in any buffer storage, allocate it
- // now. This is the normal case; the signal handler passes in a
- // static buffer.
- buffer = dynamic_buffer_ = new Buffer;
- } else {
- dynamic_buffer_ = NULL;
- }
-
- ibuf_ = buffer->buf_;
-
- stext_ = etext_ = nextline_ = ibuf_;
- ebuf_ = ibuf_ + Buffer::kBufSize - 1;
- nextline_ = ibuf_;
-
-#if defined(__linux__) || defined(__CYGWIN__) || defined(__CYGWIN32__)
- if (use_maps_backing) { // don't bother with clever "self" stuff in this case
- ConstructFilename("/proc/%d/maps_backing", pid, ibuf_, Buffer::kBufSize);
- } else if (pid == 0) {
- // We have to kludge a bit to deal with the args ConstructFilename
- // expects. The 1 is never used -- it's only impt. that it's not 0.
- ConstructFilename("/proc/self/maps", 1, ibuf_, Buffer::kBufSize);
- } else {
- ConstructFilename("/proc/%d/maps", pid, ibuf_, Buffer::kBufSize);
- }
- // No error logging since this can be called from the crash dump
- // handler at awkward moments. Users should call Valid() before
- // using.
- NO_INTR(fd_ = open(ibuf_, O_RDONLY));
-#elif defined(__FreeBSD__)
- // We don't support maps_backing on freebsd
- if (pid == 0) {
- ConstructFilename("/proc/curproc/map", 1, ibuf_, Buffer::kBufSize);
- } else {
- ConstructFilename("/proc/%d/map", pid, ibuf_, Buffer::kBufSize);
- }
- NO_INTR(fd_ = open(ibuf_, O_RDONLY));
-#elif defined(__sun__)
- if (pid == 0) {
- ConstructFilename("/proc/self/map", 1, ibuf_, Buffer::kBufSize);
- } else {
- ConstructFilename("/proc/%d/map", pid, ibuf_, Buffer::kBufSize);
- }
- NO_INTR(fd_ = open(ibuf_, O_RDONLY));
-#elif defined(__MACH__)
- current_image_ = _dyld_image_count(); // count down from the top
- current_load_cmd_ = -1;
-#elif defined(PLATFORM_WINDOWS)
- snapshot_ = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE |
- TH32CS_SNAPMODULE32,
- GetCurrentProcessId());
- memset(&module_, 0, sizeof(module_));
-#else
- fd_ = -1; // so Valid() is always false
-#endif
-
-}
-
-ProcMapsIterator::~ProcMapsIterator() {
-#if defined(PLATFORM_WINDOWS)
- if (snapshot_ != INVALID_HANDLE_VALUE) CloseHandle(snapshot_);
-#elif defined(__MACH__)
- // no cleanup necessary!
-#else
- if (fd_ >= 0) NO_INTR(close(fd_));
-#endif
- delete dynamic_buffer_;
-}
-
-bool ProcMapsIterator::Valid() const {
-#if defined(PLATFORM_WINDOWS)
- return snapshot_ != INVALID_HANDLE_VALUE;
-#elif defined(__MACH__)
- return 1;
-#else
- return fd_ != -1;
-#endif
-}
-
-bool ProcMapsIterator::Next(uint64 *start, uint64 *end, char **flags,
- uint64 *offset, int64 *inode, char **filename) {
- return NextExt(start, end, flags, offset, inode, filename, NULL, NULL,
- NULL, NULL, NULL);
-}
-
-// This has too many arguments. It should really be building
-// a map object and returning it. The problem is that this is called
-// when the memory allocator state is undefined, hence the arguments.
-bool ProcMapsIterator::NextExt(uint64 *start, uint64 *end, char **flags,
- uint64 *offset, int64 *inode, char **filename,
- uint64 *file_mapping, uint64 *file_pages,
- uint64 *anon_mapping, uint64 *anon_pages,
- dev_t *dev) {
-
-#if defined(__linux__) || defined(__FreeBSD__) || defined(__CYGWIN__) || defined(__CYGWIN32__)
- do {
- // Advance to the start of the next line
- stext_ = nextline_;
-
- // See if we have a complete line in the buffer already
- nextline_ = static_cast<char *>(memchr (stext_, '\n', etext_ - stext_));
- if (!nextline_) {
- // Shift/fill the buffer so we do have a line
- int count = etext_ - stext_;
-
- // Move the current text to the start of the buffer
- memmove(ibuf_, stext_, count);
- stext_ = ibuf_;
- etext_ = ibuf_ + count;
-
- int nread = 0; // fill up buffer with text
- while (etext_ < ebuf_) {
- NO_INTR(nread = read(fd_, etext_, ebuf_ - etext_));
- if (nread > 0)
- etext_ += nread;
- else
- break;
- }
-
- // Zero out remaining characters in buffer at EOF to avoid returning
- // garbage from subsequent calls.
- if (etext_ != ebuf_ && nread == 0) {
- memset(etext_, 0, ebuf_ - etext_);
- }
- *etext_ = '\n'; // sentinel; safe because ibuf extends 1 char beyond ebuf
- nextline_ = static_cast<char *>(memchr (stext_, '\n', etext_ + 1 - stext_));
- }
- *nextline_ = 0; // turn newline into nul
- nextline_ += ((nextline_ < etext_)? 1 : 0); // skip nul if not end of text
- // stext_ now points at a nul-terminated line
- uint64 tmpstart, tmpend, tmpoffset;
- int64 tmpinode;
- int major, minor;
- unsigned filename_offset = 0;
-#if defined(__linux__)
- // for now, assume all linuxes have the same format
- if (sscanf(stext_, "%"SCNx64"-%"SCNx64" %4s %"SCNx64" %x:%x %"SCNd64" %n",
- (unsigned long long *)(start ? start : &tmpstart),
- (unsigned long long *)(end ? end : &tmpend),
- flags_,
- (unsigned long long *)(offset ? offset : &tmpoffset),
- &major, &minor,
- (unsigned long long *)(inode ? inode : &tmpinode),
- &filename_offset) != 7) continue;
-#elif defined(__CYGWIN__) || defined(__CYGWIN32__)
- // cygwin is like linux, except the third field is the "entry point"
- // rather than the offset (see format_process_maps at
- // http://cygwin.com/cgi-bin/cvsweb.cgi/src/winsup/cygwin/fhandler_process.cc?rev=1.89&content-type=text/x-cvsweb-markup&cvsroot=src
- // Offset is always be 0 on cygwin: cygwin implements an mmap
- // by loading the whole file and then calling NtMapViewOfSection.
- // Cygwin also seems to set its flags kinda randomly; use windows default.
- char tmpflags[5];
- if (offset)
- *offset = 0;
- strcpy(flags_, "r-xp");
- if (sscanf(stext_, "%llx-%llx %4s %llx %x:%x %lld %n",
- start ? start : &tmpstart,
- end ? end : &tmpend,
- tmpflags,
- &tmpoffset,
- &major, &minor,
- inode ? inode : &tmpinode, &filename_offset) != 7) continue;
-#elif defined(__FreeBSD__)
- // For the format, see http://www.freebsd.org/cgi/cvsweb.cgi/src/sys/fs/procfs/procfs_map.c?rev=1.31&content-type=text/x-cvsweb-markup
- tmpstart = tmpend = tmpoffset = 0;
- tmpinode = 0;
- major = minor = 0; // can't get this info in freebsd
- if (inode)
- *inode = 0; // nor this
- if (offset)
- *offset = 0; // seems like this should be in there, but maybe not
- // start end resident privateresident obj(?) prot refcnt shadowcnt
- // flags copy_on_write needs_copy type filename:
- // 0x8048000 0x804a000 2 0 0xc104ce70 r-x 1 0 0x0 COW NC vnode /bin/cat
- if (sscanf(stext_, "0x%"SCNx64" 0x%"SCNx64" %*d %*d %*p %3s %*d %*d 0x%*x %*s %*s %*s %n",
- start ? start : &tmpstart,
- end ? end : &tmpend,
- flags_,
- &filename_offset) != 3) continue;
-#endif
-
- // Depending on the Linux kernel being used, there may or may not be a space
- // after the inode if there is no filename. sscanf will in such situations
- // nondeterministically either fill in filename_offset or not (the results
- // differ on multiple calls in the same run even with identical arguments).
- // We don't want to wander off somewhere beyond the end of the string.
- size_t stext_length = strlen(stext_);
- if (filename_offset == 0 || filename_offset > stext_length)
- filename_offset = stext_length;
-
- // We found an entry
- if (flags) *flags = flags_;
- if (filename) *filename = stext_ + filename_offset;
- if (dev) *dev = minor | (major << 8);
-
- if (using_maps_backing_) {
- // Extract and parse physical page backing info.
- char *backing_ptr = stext_ + filename_offset +
- strlen(stext_+filename_offset);
-
- // find the second '('
- int paren_count = 0;
- while (--backing_ptr > stext_) {
- if (*backing_ptr == '(') {
- ++paren_count;
- if (paren_count >= 2) {
- uint64 tmp_file_mapping;
- uint64 tmp_file_pages;
- uint64 tmp_anon_mapping;
- uint64 tmp_anon_pages;
-
- sscanf(backing_ptr+1, "F %"SCNx64" %"SCNd64") (A %"SCNx64" %"SCNd64")",
- (unsigned long long *)(file_mapping ?
- file_mapping : &tmp_file_mapping),
- (unsigned long long *)(file_pages ?
- file_pages : &tmp_file_pages),
- (unsigned long long *)(anon_mapping
- ? anon_mapping : &tmp_anon_mapping),
- (unsigned long long *)(anon_pages
- ? anon_pages : &tmp_anon_pages));
- // null terminate the file name (there is a space
- // before the first (.
- backing_ptr[-1] = 0;
- break;
- }
- }
- }
- }
-
- return true;
- } while (etext_ > ibuf_);
-#elif defined(__sun__)
- // This is based on MA_READ == 4, MA_WRITE == 2, MA_EXEC == 1
- static char kPerms[8][4] = { "---", "--x", "-w-", "-wx",
- "r--", "r-x", "rw-", "rwx" };
- COMPILE_ASSERT(MA_READ == 4, solaris_ma_read_must_equal_4);
- COMPILE_ASSERT(MA_WRITE == 2, solaris_ma_write_must_equal_2);
- COMPILE_ASSERT(MA_EXEC == 1, solaris_ma_exec_must_equal_1);
- Buffer object_path;
- int nread = 0; // fill up buffer with text
- NO_INTR(nread = read(fd_, ibuf_, sizeof(prmap_t)));
- if (nread == sizeof(prmap_t)) {
- long inode_from_mapname = 0;
- prmap_t* mapinfo = reinterpret_cast<prmap_t*>(ibuf_);
- // Best-effort attempt to get the inode from the filename. I think the
- // two middle ints are major and minor device numbers, but I'm not sure.
- sscanf(mapinfo->pr_mapname, "ufs.%*d.%*d.%ld", &inode_from_mapname);
-
- if (pid_ == 0) {
- CHECK_LT(snprintf(object_path.buf_, Buffer::kBufSize,
- "/proc/self/path/%s", mapinfo->pr_mapname),
- Buffer::kBufSize);
- } else {
- CHECK_LT(snprintf(object_path.buf_, Buffer::kBufSize,
- "/proc/%d/path/%s",
- static_cast<int>(pid_), mapinfo->pr_mapname),
- Buffer::kBufSize);
- }
- ssize_t len = readlink(object_path.buf_, current_filename_, PATH_MAX);
- CHECK_LT(len, PATH_MAX);
- if (len < 0)
- len = 0;
- current_filename_[len] = '\0';
-
- if (start) *start = mapinfo->pr_vaddr;
- if (end) *end = mapinfo->pr_vaddr + mapinfo->pr_size;
- if (flags) *flags = kPerms[mapinfo->pr_mflags & 7];
- if (offset) *offset = mapinfo->pr_offset;
- if (inode) *inode = inode_from_mapname;
- if (filename) *filename = current_filename_;
- if (file_mapping) *file_mapping = 0;
- if (file_pages) *file_pages = 0;
- if (anon_mapping) *anon_mapping = 0;
- if (anon_pages) *anon_pages = 0;
- if (dev) *dev = 0;
- return true;
- }
-#elif defined(__MACH__)
- // We return a separate entry for each segment in the DLL. (TODO(csilvers):
- // can we do better?) A DLL ("image") has load-commands, some of which
- // talk about segment boundaries.
- // cf image_for_address from http://svn.digium.com/view/asterisk/team/oej/minivoicemail/dlfcn.c?revision=53912
- for (; current_image_ >= 0; current_image_--) {
- const mach_header* hdr = _dyld_get_image_header(current_image_);
- if (!hdr) continue;
- if (current_load_cmd_ < 0) // set up for this image
- current_load_cmd_ = hdr->ncmds; // again, go from the top down
-
- // We start with the next load command (we've already looked at this one).
- for (current_load_cmd_--; current_load_cmd_ >= 0; current_load_cmd_--) {
-#ifdef MH_MAGIC_64
- if (NextExtMachHelper<MH_MAGIC_64, LC_SEGMENT_64,
- struct mach_header_64, struct segment_command_64>(
- hdr, current_image_, current_load_cmd_,
- start, end, flags, offset, inode, filename,
- file_mapping, file_pages, anon_mapping,
- anon_pages, dev)) {
- return true;
- }
-#endif
- if (NextExtMachHelper<MH_MAGIC, LC_SEGMENT,
- struct mach_header, struct segment_command>(
- hdr, current_image_, current_load_cmd_,
- start, end, flags, offset, inode, filename,
- file_mapping, file_pages, anon_mapping,
- anon_pages, dev)) {
- return true;
- }
- }
- // If we get here, no more load_cmd's in this image talk about
- // segments. Go on to the next image.
- }
-#elif defined(PLATFORM_WINDOWS)
- static char kDefaultPerms[5] = "r-xp";
- BOOL ok;
- if (module_.dwSize == 0) { // only possible before first call
- module_.dwSize = sizeof(module_);
- ok = Module32First(snapshot_, &module_);
- } else {
- ok = Module32Next(snapshot_, &module_);
- }
- if (ok) {
- uint64 base_addr = reinterpret_cast<DWORD_PTR>(module_.modBaseAddr);
- if (start) *start = base_addr;
- if (end) *end = base_addr + module_.modBaseSize;
- if (flags) *flags = kDefaultPerms;
- if (offset) *offset = 0;
- if (inode) *inode = 0;
- if (filename) *filename = module_.szExePath;
- if (file_mapping) *file_mapping = 0;
- if (file_pages) *file_pages = 0;
- if (anon_mapping) *anon_mapping = 0;
- if (anon_pages) *anon_pages = 0;
- if (dev) *dev = 0;
- return true;
- }
-#endif
-
- // We didn't find anything
- return false;
-}
-
-int ProcMapsIterator::FormatLine(char* buffer, int bufsize,
- uint64 start, uint64 end, const char *flags,
- uint64 offset, int64 inode,
- const char *filename, dev_t dev) {
- // We assume 'flags' looks like 'rwxp' or 'rwx'.
- char r = (flags && flags[0] == 'r') ? 'r' : '-';
- char w = (flags && flags[0] && flags[1] == 'w') ? 'w' : '-';
- char x = (flags && flags[0] && flags[1] && flags[2] == 'x') ? 'x' : '-';
- // p always seems set on linux, so we set the default to 'p', not '-'
- char p = (flags && flags[0] && flags[1] && flags[2] && flags[3] != 'p')
- ? '-' : 'p';
-
- const int rc = snprintf(buffer, bufsize,
- "%08"PRIx64"-%08"PRIx64" %c%c%c%c %08"PRIx64" %02x:%02x %-11"PRId64" %s\n",
- (unsigned long long)start, (unsigned long long)end, r,w,x,p,
- (unsigned long long)offset,
- static_cast<int>(dev/256), static_cast<int>(dev%256),
- (unsigned long long)inode, filename);
- return (rc < 0 || rc >= bufsize) ? 0 : rc;
-}
-
-// Helper to add the list of mapped shared libraries to a profile.
-// Fill formatted "/proc/self/maps" contents into buffer 'buf' of size 'size'
-// and return the actual size occupied in 'buf'. We fill wrote_all to true
-// if we successfully wrote all proc lines to buf, false else.
-// We do not provision for 0-terminating 'buf'.
-int FillProcSelfMaps(char buf[], int size, bool* wrote_all) {
- ProcMapsIterator::Buffer iterbuf;
- ProcMapsIterator it(0, &iterbuf); // 0 means "current pid"
-
- uint64 start, end, offset;
- int64 inode;
- char *flags, *filename;
- int bytes_written = 0;
- *wrote_all = true;
- while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) {
- const int line_length = it.FormatLine(buf + bytes_written,
- size - bytes_written,
- start, end, flags, offset,
- inode, filename, 0);
- if (line_length == 0)
- *wrote_all = false; // failed to write this line out
- else
- bytes_written += line_length;
-
- }
- return bytes_written;
-}
-
-// Dump the same data as FillProcSelfMaps reads to fd.
-// It seems easier to repeat parts of FillProcSelfMaps here than to
-// reuse it via a call.
-void DumpProcSelfMaps(RawFD fd) {
- ProcMapsIterator::Buffer iterbuf;
- ProcMapsIterator it(0, &iterbuf); // 0 means "current pid"
-
- uint64 start, end, offset;
- int64 inode;
- char *flags, *filename;
- ProcMapsIterator::Buffer linebuf;
- while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) {
- int written = it.FormatLine(linebuf.buf_, sizeof(linebuf.buf_),
- start, end, flags, offset, inode, filename,
- 0);
- RawWrite(fd, linebuf.buf_, written);
- }
-}
-
-// Re-run fn until it doesn't cause EINTR.
-#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
-
-RawFD RawOpenForWriting(const char* filename) {
- return open(filename, O_WRONLY|O_CREAT|O_TRUNC, 0664);
-}
-
-void RawWrite(RawFD fd, const char* buf, size_t len) {
- while (len > 0) {
- ssize_t r;
- NO_INTR(r = write(fd, buf, len));
- if (r <= 0) break;
- buf += r;
- len -= r;
- }
-}
-
-void RawClose(RawFD fd) {
- NO_INTR(close(fd));
-}
diff --git a/lib/asan/sysinfo/sysinfo.h b/lib/asan/sysinfo/sysinfo.h
deleted file mode 100644
index 707687e..0000000
--- a/lib/asan/sysinfo/sysinfo.h
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright (c) 2006, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// All functions here are thread-hostile due to file caching unless
-// commented otherwise.
-
-#ifndef _SYSINFO_H_
-#define _SYSINFO_H_
-
-#include <time.h>
-#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
-#include <windows.h> // for DWORD
-#include <TlHelp32.h> // for CreateToolhelp32Snapshot
-#endif
-#ifdef HAVE_UNISTD_H
-#include <unistd.h> // for pid_t
-#endif
-#include <stddef.h> // for size_t
-#include <limits.h> // for PATH_MAX
-#include "basictypes.h"
-
-// This getenv function is safe to call before the C runtime is initialized.
-// On Windows, it utilizes GetEnvironmentVariable() and on unix it uses
-// /proc/self/environ instead calling getenv(). It's intended to be used in
-// routines that run before main(), when the state required for getenv() may
-// not be set up yet. In particular, errno isn't set up until relatively late
-// (after the pthreads library has a chance to make it threadsafe), and
-// getenv() doesn't work until then.
-// On some platforms, this call will utilize the same, static buffer for
-// repeated GetenvBeforeMain() calls. Callers should not expect pointers from
-// this routine to be long lived.
-// Note that on unix, /proc only has the environment at the time the
-// application was started, so this routine ignores setenv() calls/etc. Also
-// note it only reads the first 16K of the environment.
-extern const char* GetenvBeforeMain(const char* name);
-
-// This takes as an argument an environment-variable name (like
-// CPUPROFILE) whose value is supposed to be a file-path, and sets
-// path to that path, and returns true. Non-trivial for surprising
-// reasons, as documented in sysinfo.cc. path must have space PATH_MAX.
-extern bool GetUniquePathFromEnv(const char* env_name, char* path);
-
-extern int NumCPUs();
-
-// processor cycles per second of each processor. Thread-safe.
-extern double CyclesPerSecond(void);
-
-
-// Return true if we're running POSIX (e.g., NPTL on Linux) threads,
-// as opposed to a non-POSIX thread libary. The thing that we care
-// about is whether a thread's pid is the same as the thread that
-// spawned it. If so, this function returns true.
-// Thread-safe.
-// Note: We consider false negatives to be OK.
-bool HasPosixThreads();
-
-#ifndef SWIG // SWIG doesn't like struct Buffer and variable arguments.
-
-// A ProcMapsIterator abstracts access to /proc/maps for a given
-// process. Needs to be stack-allocatable and avoid using stdio/malloc
-// so it can be used in the google stack dumper, heap-profiler, etc.
-//
-// On Windows and Mac OS X, this iterator iterates *only* over DLLs
-// mapped into this process space. For Linux, FreeBSD, and Solaris,
-// it iterates over *all* mapped memory regions, including anonymous
-// mmaps. For other O/Ss, it is unlikely to work at all, and Valid()
-// will always return false. Also note: this routine only works on
-// FreeBSD if procfs is mounted: make sure this is in your /etc/fstab:
-// proc /proc procfs rw 0 0
-class ProcMapsIterator {
- public:
- struct Buffer {
-#ifdef __FreeBSD__
- // FreeBSD requires us to read all of the maps file at once, so
- // we have to make a buffer that's "always" big enough
- static const size_t kBufSize = 102400;
-#else // a one-line buffer is good enough
- static const size_t kBufSize = PATH_MAX + 1024;
-#endif
- char buf_[kBufSize];
- };
-
-
- // Create a new iterator for the specified pid. pid can be 0 for "self".
- explicit ProcMapsIterator(pid_t pid);
-
- // Create an iterator with specified storage (for use in signal
- // handler). "buffer" should point to a ProcMapsIterator::Buffer
- // buffer can be NULL in which case a bufer will be allocated.
- ProcMapsIterator(pid_t pid, Buffer *buffer);
-
- // Iterate through maps_backing instead of maps if use_maps_backing
- // is true. Otherwise the same as above. buffer can be NULL and
- // it will allocate a buffer itself.
- ProcMapsIterator(pid_t pid, Buffer *buffer,
- bool use_maps_backing);
-
- // Returns true if the iterator successfully initialized;
- bool Valid() const;
-
- // Returns a pointer to the most recently parsed line. Only valid
- // after Next() returns true, and until the iterator is destroyed or
- // Next() is called again. This may give strange results on non-Linux
- // systems. Prefer FormatLine() if that may be a concern.
- const char *CurrentLine() const { return stext_; }
-
- // Writes the "canonical" form of the /proc/xxx/maps info for a single
- // line to the passed-in buffer. Returns the number of bytes written,
- // or 0 if it was not able to write the complete line. (To guarantee
- // success, buffer should have size at least Buffer::kBufSize.)
- // Takes as arguments values set via a call to Next(). The
- // "canonical" form of the line (taken from linux's /proc/xxx/maps):
- // <start_addr(hex)>-<end_addr(hex)> <perms(rwxp)> <offset(hex)> +
- // <major_dev(hex)>:<minor_dev(hex)> <inode> <filename> Note: the
- // eg
- // 08048000-0804c000 r-xp 00000000 03:01 3793678 /bin/cat
- // If you don't have the dev_t (dev), feel free to pass in 0.
- // (Next() doesn't return a dev_t, though NextExt does.)
- //
- // Note: if filename and flags were obtained via a call to Next(),
- // then the output of this function is only valid if Next() returned
- // true, and only until the iterator is destroyed or Next() is
- // called again. (Since filename, at least, points into CurrentLine.)
- static int FormatLine(char* buffer, int bufsize,
- uint64 start, uint64 end, const char *flags,
- uint64 offset, int64 inode, const char *filename,
- dev_t dev);
-
- // Find the next entry in /proc/maps; return true if found or false
- // if at the end of the file.
- //
- // Any of the result pointers can be NULL if you're not interested
- // in those values.
- //
- // If "flags" and "filename" are passed, they end up pointing to
- // storage within the ProcMapsIterator that is valid only until the
- // iterator is destroyed or Next() is called again. The caller may
- // modify the contents of these strings (up as far as the first NUL,
- // and only until the subsequent call to Next()) if desired.
-
- // The offsets are all uint64 in order to handle the case of a
- // 32-bit process running on a 64-bit kernel
- //
- // IMPORTANT NOTE: see top-of-class notes for details about what
- // mapped regions Next() iterates over, depending on O/S.
- // TODO(csilvers): make flags and filename const.
- bool Next(uint64 *start, uint64 *end, char **flags,
- uint64 *offset, int64 *inode, char **filename);
-
- bool NextExt(uint64 *start, uint64 *end, char **flags,
- uint64 *offset, int64 *inode, char **filename,
- uint64 *file_mapping, uint64 *file_pages,
- uint64 *anon_mapping, uint64 *anon_pages,
- dev_t *dev);
-
- ~ProcMapsIterator();
-
- private:
- void Init(pid_t pid, Buffer *buffer, bool use_maps_backing);
-
- char *ibuf_; // input buffer
- char *stext_; // start of text
- char *etext_; // end of text
- char *nextline_; // start of next line
- char *ebuf_; // end of buffer (1 char for a nul)
-#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
- HANDLE snapshot_; // filehandle on dll info
- // In a change from the usual W-A pattern, there is no A variant of
- // MODULEENTRY32. Tlhelp32.h #defines the W variant, but not the A.
- // We want the original A variants, and this #undef is the only
- // way I see to get them. Redefining it when we're done prevents us
- // from affecting other .cc files.
-# ifdef MODULEENTRY32 // Alias of W
-# undef MODULEENTRY32
- MODULEENTRY32 module_; // info about current dll (and dll iterator)
-# define MODULEENTRY32 MODULEENTRY32W
-# else // It's the ascii, the one we want.
- MODULEENTRY32 module_; // info about current dll (and dll iterator)
-# endif
-#elif defined(__MACH__)
- int current_image_; // dll's are called "images" in macos parlance
- int current_load_cmd_; // the segment of this dll we're examining
-#elif defined(__sun__) // Solaris
- int fd_;
- char current_filename_[PATH_MAX];
-#else
- int fd_; // filehandle on /proc/*/maps
-#endif
- pid_t pid_;
- char flags_[10];
- Buffer* dynamic_buffer_; // dynamically-allocated Buffer
- bool using_maps_backing_; // true if we are looking at maps_backing instead of maps.
-};
-
-#endif /* #ifndef SWIG */
-
-// Helper routines
-typedef int RawFD;
-const RawFD kIllegalRawFD = -1; // what open returns if it fails
-
-RawFD RawOpenForWriting(const char* filename); // uses default permissions
-void RawWrite(RawFD fd, const char* buf, size_t len);
-void RawClose(RawFD fd);
-
-int FillProcSelfMaps(char buf[], int size, bool* wrote_all);
-void DumpProcSelfMaps(RawFD fd);
-
-#endif /* #ifndef _SYSINFO_H_ */
diff --git a/lib/asan/tests/CMakeLists.txt b/lib/asan/tests/CMakeLists.txt
new file mode 100644
index 0000000..d409d50
--- /dev/null
+++ b/lib/asan/tests/CMakeLists.txt
@@ -0,0 +1,118 @@
+# Testing rules for AddressSanitizer.
+#
+# These are broken into two buckets. One set of tests directly interacts with
+# the runtime library and checks its functionality. These are the
+# no-instrumentation tests.
+#
+# Another group of tests relies upon the ability to compile the test with
+# address sanitizer instrumentation pass. These tests form "integration" tests
+# and have some elements of version skew -- they test the *host* compiler's
+# instrumentation against the just-built runtime library.
+
+include(CheckCXXCompilerFlag)
+
+include_directories(..)
+include_directories(../..)
+
+set(ASAN_UNITTEST_COMMON_CFLAGS
+ -Wall
+ -Wno-format
+ -fvisibility=hidden
+)
+# Support 64-bit and 32-bit builds.
+if(LLVM_BUILD_32_BITS)
+ list(APPEND ASAN_UNITTEST_COMMON_CFLAGS -m32)
+else()
+ list(APPEND ASAN_UNITTEST_COMMON_CFLAGS -m64)
+endif()
+
+set(ASAN_GTEST_INCLUDE_CFLAGS
+ -I${LLVM_MAIN_SRC_DIR}/utils/unittest/googletest/include
+ -I${LLVM_MAIN_SRC_DIR}/include
+ -I${LLVM_BINARY_DIR}/include
+ -D__STDC_CONSTANT_MACROS
+ -D__STDC_LIMIT_MACROS
+)
+
+set(ASAN_UNITTEST_INSTRUMENTED_CFLAGS
+ ${ASAN_UNITTEST_COMMON_CFLAGS}
+ ${ASAN_GTEST_INCLUDE_CFLAGS}
+ -faddress-sanitizer
+ -O2
+ -g
+ -mllvm "-asan-blacklist=${CMAKE_CURRENT_SOURCE_DIR}/asan_test.ignore"
+ -DASAN_HAS_BLACKLIST=1
+ -DASAN_HAS_EXCEPTIONS=1
+ -DASAN_NEEDS_SEGV=1
+ -DASAN_UAR=0
+)
+
+add_custom_target(AsanTests)
+set_target_properties(AsanTests PROPERTIES FOLDER "ASan tests")
+function(add_asan_test testname)
+ add_unittest(AsanTests ${testname} ${ARGN})
+ if(LLVM_BUILD_32_BITS)
+ target_link_libraries(${testname} clang_rt.asan-i386)
+ else()
+ target_link_libraries(${testname} clang_rt.asan-x86_64)
+ endif()
+ if (APPLE)
+ # Darwin-specific linker flags.
+ set_property(TARGET ${testname} APPEND PROPERTY
+ LINK_FLAGS "-framework Foundation")
+ elseif (UNIX)
+ # Linux-specific linker flags.
+ set_property(TARGET ${testname} APPEND PROPERTY
+ LINK_FLAGS "-lpthread -ldl -export-dynamic")
+ endif()
+ set(add_compile_flags "")
+ get_property(compile_flags TARGET ${testname} PROPERTY COMPILE_FLAGS)
+ foreach(arg ${ASAN_UNITTEST_COMMON_CFLAGS})
+ set(add_compile_flags "${add_compile_flags} ${arg}")
+ endforeach(arg ${ASAN_UNITTEST_COMMON_CFLAGS})
+ set_property(TARGET ${testname} PROPERTY COMPILE_FLAGS
+ "${compile_flags} ${add_compile_flags}")
+endfunction()
+
+set(ASAN_NOINST_TEST_SOURCES
+ asan_noinst_test.cc
+ asan_break_optimization.cc
+)
+
+set(ASAN_INST_TEST_OBJECTS)
+
+# We only support building instrumented tests when we're not cross compiling
+# and targeting a unix-like system where we can predict viable compilation and
+# linking strategies.
+if("${CMAKE_HOST_SYSTEM}" STREQUAL "${CMAKE_SYSTEM}" AND UNIX)
+
+ # This function is a custom routine to manage manually compiling source files
+ # for unit tests with the just-built Clang binary, using the ASan
+ # instrumentation, and linking them into a test executable.
+ function(add_asan_compile_command source extra_cflags)
+ set(output_obj "${source}.asan.o")
+ add_custom_command(
+ OUTPUT ${output_obj}
+ COMMAND clang
+ ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS}
+ ${extra_cflags}
+ -c -o "${output_obj}"
+ ${CMAKE_CURRENT_SOURCE_DIR}/${source}
+ MAIN_DEPENDENCY ${source}
+ DEPENDS clang clang_rt.asan-i386 clang_rt.asan-x86_64 ${ARGN}
+ )
+ endfunction()
+
+ add_asan_compile_command(asan_globals_test.cc "")
+ add_asan_compile_command(asan_test.cc "")
+ list(APPEND ASAN_INST_TEST_OBJECTS asan_globals_test.cc.asan.o
+ asan_test.cc.asan.o)
+ if (APPLE)
+ add_asan_compile_command(asan_mac_test.mm "-ObjC")
+ list(APPEND ASAN_INST_TEST_OBJECTS asan_mac_test.mm.asan.o)
+ endif()
+
+endif()
+
+add_asan_test(AsanTest ${ASAN_NOINST_TEST_SOURCES}
+ ${ASAN_INST_TEST_OBJECTS})
diff --git a/lib/asan/tests/asan_benchmarks_test.cc b/lib/asan/tests/asan_benchmarks_test.cc
index b72cc3f..a142fd2 100644
--- a/lib/asan/tests/asan_benchmarks_test.cc
+++ b/lib/asan/tests/asan_benchmarks_test.cc
@@ -1,4 +1,4 @@
-//===-- asan_benchmarks_test.cc ------------*- C++ -*-===//
+//===-- asan_benchmarks_test.cc ----------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/asan/tests/asan_break_optimization.cc b/lib/asan/tests/asan_break_optimization.cc
index acd0427..022a9f8 100644
--- a/lib/asan/tests/asan_break_optimization.cc
+++ b/lib/asan/tests/asan_break_optimization.cc
@@ -1,4 +1,4 @@
-//===-- asan_break_optimization.cc ------------*- C++ -*-===//
+//===-- asan_break_optimization.cc ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,4 +15,5 @@
// Have this function in a separate file to avoid inlining.
// (Yes, we know about cross-file inlining, but let's assume we don't use it).
extern "C" void break_optimization(void *x) {
+ (void)x;
}
diff --git a/lib/asan/tests/asan_globals_test.cc b/lib/asan/tests/asan_globals_test.cc
index 2303f8b..6467524 100644
--- a/lib/asan/tests/asan_globals_test.cc
+++ b/lib/asan/tests/asan_globals_test.cc
@@ -1,4 +1,4 @@
-//===-- asan_globals_test.cc ------------*- C++ -*-===//
+//===-- asan_globals_test.cc ----------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/asan/tests/asan_interface_test.cc b/lib/asan/tests/asan_interface_test.cc
deleted file mode 100644
index c26ed92..0000000
--- a/lib/asan/tests/asan_interface_test.cc
+++ /dev/null
@@ -1,334 +0,0 @@
-//===-- asan_interface_test.cc ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-//===----------------------------------------------------------------------===//
-#include <pthread.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "asan_test_config.h"
-#include "asan_test_utils.h"
-#include "asan_interface.h"
-
-TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
- EXPECT_EQ(1, __asan_get_estimated_allocated_size(0));
- const size_t sizes[] = { 1, 30, 1<<30 };
- for (size_t i = 0; i < 3; i++) {
- EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
- }
-}
-
-static const char* kGetAllocatedSizeErrorMsg =
- "__asan_get_allocated_size failed";
-
-TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
- const size_t kArraySize = 100;
- char *array = Ident((char*)malloc(kArraySize));
- int *int_ptr = Ident(new int);
-
- // Allocated memory is owned by allocator. Allocated size should be
- // equal to requested size.
- EXPECT_EQ(true, __asan_get_ownership(array));
- EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
- EXPECT_EQ(true, __asan_get_ownership(int_ptr));
- EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
-
- // We cannot call GetAllocatedSize from the memory we didn't map,
- // and from the interior pointers (not returned by previous malloc).
- void *wild_addr = (void*)0x1;
- EXPECT_EQ(false, __asan_get_ownership(wild_addr));
- EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
- EXPECT_EQ(false, __asan_get_ownership(array + kArraySize / 2));
- EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
- kGetAllocatedSizeErrorMsg);
-
- // NULL is a valid argument and is owned.
- EXPECT_EQ(true, __asan_get_ownership(NULL));
- EXPECT_EQ(0, __asan_get_allocated_size(NULL));
-
- // When memory is freed, it's not owned, and call to GetAllocatedSize
- // is forbidden.
- free(array);
- EXPECT_EQ(false, __asan_get_ownership(array));
- EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
-
- delete int_ptr;
-}
-
-TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
- size_t before_malloc, after_malloc, after_free;
- char *array;
- const size_t kMallocSize = 100;
- before_malloc = __asan_get_current_allocated_bytes();
-
- array = Ident((char*)malloc(kMallocSize));
- after_malloc = __asan_get_current_allocated_bytes();
- EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
-
- free(array);
- after_free = __asan_get_current_allocated_bytes();
- EXPECT_EQ(before_malloc, after_free);
-}
-
-static void DoDoubleFree() {
- int *x = Ident(new int);
- delete Ident(x);
- delete Ident(x);
-}
-
-// This test is run in a separate process, so that large malloced
-// chunk won't remain in the free lists after the test.
-// Note: use ASSERT_* instead of EXPECT_* here.
-static void RunGetHeapSizeTestAndDie() {
- size_t old_heap_size, new_heap_size, heap_growth;
- // We unlikely have have chunk of this size in free list.
- static const size_t kLargeMallocSize = 1 << 29; // 512M
- old_heap_size = __asan_get_heap_size();
- fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
- free(Ident(malloc(kLargeMallocSize)));
- new_heap_size = __asan_get_heap_size();
- heap_growth = new_heap_size - old_heap_size;
- fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
- ASSERT_GE(heap_growth, kLargeMallocSize);
- ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
-
- // Now large chunk should fall into free list, and can be
- // allocated without increasing heap size.
- old_heap_size = new_heap_size;
- free(Ident(malloc(kLargeMallocSize)));
- heap_growth = __asan_get_heap_size() - old_heap_size;
- fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
- ASSERT_LT(heap_growth, kLargeMallocSize);
-
- // Test passed. Now die with expected double-free.
- DoDoubleFree();
-}
-
-TEST(AddressSanitizerInterface, GetHeapSizeTest) {
- EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
-}
-
-// Note: use ASSERT_* instead of EXPECT_* here.
-static void DoLargeMallocForGetFreeBytesTestAndDie() {
- size_t old_free_bytes, new_free_bytes;
- static const size_t kLargeMallocSize = 1 << 29; // 512M
- // If we malloc and free a large memory chunk, it will not fall
- // into quarantine and will be available for future requests.
- old_free_bytes = __asan_get_free_bytes();
- fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
- fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
- free(Ident(malloc(kLargeMallocSize)));
- new_free_bytes = __asan_get_free_bytes();
- fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
- ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
- // Test passed.
- DoDoubleFree();
-}
-
-TEST(AddressSanitizerInterface, GetFreeBytesTest) {
- static const size_t kNumOfChunks = 100;
- static const size_t kChunkSize = 100;
- char *chunks[kNumOfChunks];
- size_t i;
- size_t old_free_bytes, new_free_bytes;
- // Allocate a small chunk. Now allocator probably has a lot of these
- // chunks to fulfill future requests. So, future requests will decrease
- // the number of free bytes.
- chunks[0] = Ident((char*)malloc(kChunkSize));
- old_free_bytes = __asan_get_free_bytes();
- for (i = 1; i < kNumOfChunks; i++) {
- chunks[i] = Ident((char*)malloc(kChunkSize));
- new_free_bytes = __asan_get_free_bytes();
- EXPECT_LT(new_free_bytes, old_free_bytes);
- old_free_bytes = new_free_bytes;
- }
- // Deleting these chunks will move them to quarantine, number of free
- // bytes won't increase.
- for (i = 0; i < kNumOfChunks; i++) {
- free(chunks[i]);
- EXPECT_EQ(old_free_bytes, __asan_get_free_bytes());
- }
- EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
-}
-
-static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
-static const size_t kManyThreadsIterations = 250;
-static const size_t kManyThreadsNumThreads = 200;
-
-void *ManyThreadsWithStatsWorker(void *arg) {
- for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
- for (size_t size_index = 0; size_index < 4; size_index++) {
- free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
- }
- }
- return 0;
-}
-
-TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
- size_t before_test, after_test, i;
- pthread_t threads[kManyThreadsNumThreads];
- before_test = __asan_get_current_allocated_bytes();
- for (i = 0; i < kManyThreadsNumThreads; i++) {
- pthread_create(&threads[i], 0,
- (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
- }
- for (i = 0; i < kManyThreadsNumThreads; i++) {
- pthread_join(threads[i], 0);
- }
- after_test = __asan_get_current_allocated_bytes();
- // ASan stats also reflect memory usage of internal ASan RTL structs,
- // so we can't check for equality here.
- EXPECT_LT(after_test, before_test + (1UL<<20));
-}
-
-TEST(AddressSanitizerInterface, ExitCode) {
- int original_exit_code = __asan_set_error_exit_code(7);
- EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
- EXPECT_EQ(7, __asan_set_error_exit_code(8));
- EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
- EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
- EXPECT_EXIT(DoDoubleFree(),
- ::testing::ExitedWithCode(original_exit_code), "");
-}
-
-static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
-
-#define ACCESS(ptr, offset) Ident(*(ptr + offset))
-
-#define DIE_ON_ACCESS(ptr, offset) \
- EXPECT_DEATH(Ident(*(ptr + offset)), kUseAfterPoisonErrorMessage)
-
-TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
- char *array = Ident((char*)malloc(120));
- // poison array[40..80)
- ASAN_POISON_MEMORY_REGION(array + 40, 40);
- ACCESS(array, 39);
- ACCESS(array, 80);
- DIE_ON_ACCESS(array, 40);
- DIE_ON_ACCESS(array, 60);
- DIE_ON_ACCESS(array, 79);
- ASAN_UNPOISON_MEMORY_REGION(array + 40, 40);
- // access previously poisoned memory.
- ACCESS(array, 40);
- ACCESS(array, 79);
- free(array);
-}
-
-TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
- char *array = Ident((char*)malloc(120));
- // Poison [0..40) and [80..120)
- ASAN_POISON_MEMORY_REGION(array, 40);
- ASAN_POISON_MEMORY_REGION(array + 80, 40);
- DIE_ON_ACCESS(array, 20);
- ACCESS(array, 60);
- DIE_ON_ACCESS(array, 100);
- // Poison whole array - [0..120)
- ASAN_POISON_MEMORY_REGION(array, 120);
- DIE_ON_ACCESS(array, 60);
- // Unpoison [24..96)
- ASAN_UNPOISON_MEMORY_REGION(array + 24, 72);
- DIE_ON_ACCESS(array, 23);
- ACCESS(array, 24);
- ACCESS(array, 60);
- ACCESS(array, 95);
- DIE_ON_ACCESS(array, 96);
- free(array);
-}
-
-TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
- // Vector of capacity 20
- char *vec = Ident((char*)malloc(20));
- ASAN_POISON_MEMORY_REGION(vec, 20);
- for (size_t i = 0; i < 7; i++) {
- // Simulate push_back.
- ASAN_UNPOISON_MEMORY_REGION(vec + i, 1);
- ACCESS(vec, i);
- DIE_ON_ACCESS(vec, i + 1);
- }
- for (size_t i = 7; i > 0; i--) {
- // Simulate pop_back.
- ASAN_POISON_MEMORY_REGION(vec + i - 1, 1);
- DIE_ON_ACCESS(vec, i - 1);
- if (i > 1) ACCESS(vec, i - 2);
- }
- free(vec);
-}
-
-// Make sure that each aligned block of size "2^granularity" doesn't have
-// "true" value before "false" value.
-static void MakeShadowValid(bool *shadow, int length, int granularity) {
- bool can_be_poisoned = true;
- for (int i = length - 1; i >= 0; i--) {
- can_be_poisoned &= shadow[i];
- shadow[i] &= can_be_poisoned;
- if (i % (1 << granularity) == 0) {
- can_be_poisoned = true;
- }
- }
-}
-
-TEST(AddressSanitizerInterface, PoisoningStressTest) {
- const size_t kSize = 24;
- bool expected[kSize];
- char *arr = Ident((char*)malloc(kSize));
- for (size_t l1 = 0; l1 < kSize; l1++) {
- for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
- for (size_t l2 = 0; l2 < kSize; l2++) {
- for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
- // Poison [l1, l1+s1), [l2, l2+s2) and check result.
- ASAN_UNPOISON_MEMORY_REGION(arr, kSize);
- ASAN_POISON_MEMORY_REGION(arr + l1, s1);
- ASAN_POISON_MEMORY_REGION(arr + l2, s2);
- memset(expected, false, kSize);
- memset(expected + l1, true, s1);
- MakeShadowValid(expected, 24, /*granularity*/ 3);
- memset(expected + l2, true, s2);
- MakeShadowValid(expected, 24, /*granularity*/ 3);
- for (size_t i = 0; i < kSize; i++) {
- ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
- }
- // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
- ASAN_POISON_MEMORY_REGION(arr, kSize);
- ASAN_UNPOISON_MEMORY_REGION(arr + l1, s1);
- ASAN_UNPOISON_MEMORY_REGION(arr + l2, s2);
- memset(expected, true, kSize);
- memset(expected + l1, false, s1);
- MakeShadowValid(expected, 24, /*granularity*/ 3);
- memset(expected + l2, false, s2);
- MakeShadowValid(expected, 24, /*granularity*/ 3);
- for (size_t i = 0; i < kSize; i++) {
- ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
- }
- }
- }
- }
- }
-}
-
-static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
-static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
-
-TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
- char *array = Ident((char*)malloc(120));
- ASAN_UNPOISON_MEMORY_REGION(array, 120);
- // Try to unpoison not owned memory
- EXPECT_DEATH(ASAN_UNPOISON_MEMORY_REGION(array, 121),
- kInvalidUnpoisonMessage);
- EXPECT_DEATH(ASAN_UNPOISON_MEMORY_REGION(array - 1, 120),
- kInvalidUnpoisonMessage);
-
- ASAN_POISON_MEMORY_REGION(array, 120);
- // Try to poison not owned memory.
- EXPECT_DEATH(ASAN_POISON_MEMORY_REGION(array, 121), kInvalidPoisonMessage);
- EXPECT_DEATH(ASAN_POISON_MEMORY_REGION(array - 1, 120),
- kInvalidPoisonMessage);
- free(array);
-}
diff --git a/lib/asan/tests/asan_mac_test.h b/lib/asan/tests/asan_mac_test.h
index e3ad827..441547a 100644
--- a/lib/asan/tests/asan_mac_test.h
+++ b/lib/asan/tests/asan_mac_test.h
@@ -1,5 +1,5 @@
extern "C" {
- void CFAllocatorDefaultDoubleFree();
+ void *CFAllocatorDefaultDoubleFree(void *unused);
void CFAllocatorSystemDefaultDoubleFree();
void CFAllocatorMallocDoubleFree();
void CFAllocatorMallocZoneDoubleFree();
@@ -13,4 +13,7 @@ extern "C" {
void TestGCDSourceEvent();
void TestGCDSourceCancel();
void TestGCDGroupAsync();
+ void TestOOBNSObjects();
+ void TestNSURLDeallocation();
+ void TestPassCFMemoryToAnotherThread();
}
diff --git a/lib/asan/tests/asan_mac_test.mm b/lib/asan/tests/asan_mac_test.mm
index b5dbbde..4e5873b 100644
--- a/lib/asan/tests/asan_mac_test.mm
+++ b/lib/asan/tests/asan_mac_test.mm
@@ -7,11 +7,14 @@
#import <CoreFoundation/CFBase.h>
#import <Foundation/NSObject.h>
+#import <Foundation/NSURL.h>
-void CFAllocatorDefaultDoubleFree() {
+// This is a (void*)(void*) function so it can be passed to pthread_create.
+void *CFAllocatorDefaultDoubleFree(void *unused) {
void *mem = CFAllocatorAllocate(kCFAllocatorDefault, 5, 0);
CFAllocatorDeallocate(kCFAllocatorDefault, mem);
CFAllocatorDeallocate(kCFAllocatorDefault, mem);
+ return 0;
}
void CFAllocatorSystemDefaultDoubleFree() {
@@ -32,6 +35,10 @@ void CFAllocatorMallocZoneDoubleFree() {
CFAllocatorDeallocate(kCFAllocatorMallocZone, mem);
}
+__attribute__((noinline))
+void access_memory(char *a) {
+ *a = 0;
+}
// Test the +load instrumentation.
// Because the +load methods are invoked before anything else is initialized,
@@ -51,7 +58,7 @@ char kStartupStr[] =
+(void) load {
for (int i = 0; i < strlen(kStartupStr); i++) {
- volatile char ch = kStartupStr[i]; // make sure no optimizations occur.
+ access_memory(&kStartupStr[i]); // make sure no optimizations occur.
}
// Don't print anything here not to interfere with the death tests.
}
@@ -66,7 +73,7 @@ void worker_do_alloc(int size) {
void worker_do_crash(int size) {
char * volatile mem = malloc(size);
- mem[size] = 0; // BOOM
+ access_memory(&mem[size]); // BOOM
free(mem);
}
@@ -162,7 +169,7 @@ void TestGCDSourceEvent() {
dispatch_source_set_timer(timer, milestone, DISPATCH_TIME_FOREVER, 0);
char * volatile mem = malloc(10);
dispatch_source_set_event_handler(timer, ^{
- mem[10] = 1;
+ access_memory(&mem[10]);
});
dispatch_resume(timer);
sleep(2);
@@ -186,7 +193,7 @@ void TestGCDSourceCancel() {
dispatch_source_cancel(timer);
});
dispatch_source_set_cancel_handler(timer, ^{
- mem[10] = 1;
+ access_memory(&mem[10]);
});
dispatch_resume(timer);
sleep(2);
@@ -197,7 +204,34 @@ void TestGCDGroupAsync() {
dispatch_group_t group = dispatch_group_create();
char * volatile mem = malloc(10);
dispatch_group_async(group, queue, ^{
- mem[10] = 1;
+ access_memory(&mem[10]);
});
dispatch_group_wait(group, DISPATCH_TIME_FOREVER);
}
+
+@interface FixedArray : NSObject {
+ int items[10];
+}
+@end
+
+@implementation FixedArray
+-(int) access: (int)index {
+ return items[index];
+}
+@end
+
+void TestOOBNSObjects() {
+ id anObject = [FixedArray new];
+ [anObject access:1];
+ [anObject access:11];
+ [anObject release];
+}
+
+void TestNSURLDeallocation() {
+ NSURL *base =
+ [[NSURL alloc] initWithString:@"file://localhost/Users/glider/Library/"];
+ volatile NSURL *u =
+ [[NSURL alloc] initWithString:@"Saved Application State"
+ relativeToURL:base];
+ [u release];
+}
diff --git a/lib/asan/tests/asan_noinst_test.cc b/lib/asan/tests/asan_noinst_test.cc
index 204c0da..44d4c3c 100644
--- a/lib/asan/tests/asan_noinst_test.cc
+++ b/lib/asan/tests/asan_noinst_test.cc
@@ -1,4 +1,4 @@
-//===-- asan_noinst_test.cc ------------*- C++ -*-===//
+//===-- asan_noinst_test.cc ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,17 +21,18 @@
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
-#include <vector>
+#include <string.h> // for memset()
#include <algorithm>
+#include <vector>
#include "gtest/gtest.h"
// Simple stand-alone pseudorandom number generator.
// Current algorithm is ANSI C linear congruential PRNG.
-static inline uint32_t my_rand(uint32_t* state) {
+static inline u32 my_rand(u32* state) {
return (*state = *state * 1103515245 + 12345) >> 16;
}
-static uint32_t global_seed = 0;
+static u32 global_seed = 0;
TEST(AddressSanitizer, InternalSimpleDeathTest) {
@@ -39,7 +40,7 @@ TEST(AddressSanitizer, InternalSimpleDeathTest) {
}
static void MallocStress(size_t n) {
- uint32_t seed = my_rand(&global_seed);
+ u32 seed = my_rand(&global_seed);
__asan::AsanStackTrace stack1;
stack1.trace[0] = 0xa123;
stack1.trace[1] = 0xa456;
@@ -92,16 +93,16 @@ TEST(AddressSanitizer, NoInstMallocTest) {
#endif
}
-static void PrintShadow(const char *tag, uintptr_t ptr, size_t size) {
+static void PrintShadow(const char *tag, uptr ptr, size_t size) {
fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
- uintptr_t prev_shadow = 0;
- for (intptr_t i = -32; i < (intptr_t)size + 32; i++) {
- uintptr_t shadow = __asan::MemToShadow(ptr + i);
- if (i == 0 || i == (intptr_t)size)
+ uptr prev_shadow = 0;
+ for (sptr i = -32; i < (sptr)size + 32; i++) {
+ uptr shadow = __asan::MemToShadow(ptr + i);
+ if (i == 0 || i == (sptr)size)
fprintf(stderr, ".");
if (shadow != prev_shadow) {
prev_shadow = shadow;
- fprintf(stderr, "%02x", (int)*(uint8_t*)shadow);
+ fprintf(stderr, "%02x", (int)*(u8*)shadow);
}
}
fprintf(stderr, "\n");
@@ -110,13 +111,13 @@ static void PrintShadow(const char *tag, uintptr_t ptr, size_t size) {
TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
for (size_t size = 1; size <= 513; size++) {
char *ptr = new char[size];
- PrintShadow("m", (uintptr_t)ptr, size);
+ PrintShadow("m", (uptr)ptr, size);
delete [] ptr;
- PrintShadow("f", (uintptr_t)ptr, size);
+ PrintShadow("f", (uptr)ptr, size);
}
}
-static uintptr_t pc_array[] = {
+static uptr pc_array[] = {
#if __WORDSIZE == 64
0x7effbf756068ULL,
0x7effbf75e5abULL,
@@ -207,19 +208,20 @@ static uintptr_t pc_array[] = {
};
void CompressStackTraceTest(size_t n_iter) {
- uint32_t seed = my_rand(&global_seed);
+ u32 seed = my_rand(&global_seed);
const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
- uint32_t compressed[2 * kNumPcs];
+ u32 compressed[2 * kNumPcs];
for (size_t iter = 0; iter < n_iter; iter++) {
std::random_shuffle(pc_array, pc_array + kNumPcs);
__asan::AsanStackTrace stack0, stack1;
stack0.CopyFrom(pc_array, kNumPcs);
- stack0.size = std::max((size_t)1, (size_t)my_rand(&seed) % stack0.size);
+ stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
size_t compress_size =
std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
size_t n_frames =
__asan::AsanStackTrace::CompressStack(&stack0, compressed, compress_size);
+ Ident(n_frames);
assert(n_frames <= stack0.size);
__asan::AsanStackTrace::UncompressStack(&stack1, compressed, compress_size);
assert(stack1.size == n_frames);
@@ -235,7 +237,7 @@ TEST(AddressSanitizer, CompressStackTraceTest) {
void CompressStackTraceBenchmark(size_t n_iter) {
const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
- uint32_t compressed[2 * kNumPcs];
+ u32 compressed[2 * kNumPcs];
std::random_shuffle(pc_array, pc_array + kNumPcs);
__asan::AsanStackTrace stack0;
@@ -274,7 +276,8 @@ TEST(AddressSanitizer, QuarantineTest) {
}
void *ThreadedQuarantineTestWorker(void *unused) {
- uint32_t seed = my_rand(&global_seed);
+ (void)unused;
+ u32 seed = my_rand(&global_seed);
__asan::AsanStackTrace stack;
stack.trace[0] = 0x890;
stack.size = 1;
@@ -301,6 +304,7 @@ TEST(AddressSanitizer, ThreadedQuarantineTest) {
}
void *ThreadedOneSizeMallocStress(void *unused) {
+ (void)unused;
__asan::AsanStackTrace stack;
stack.trace[0] = 0x890;
stack.size = 1;
@@ -327,3 +331,371 @@ TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
pthread_join(t[i], 0);
}
}
+
+TEST(AddressSanitizer, MemsetWildAddressTest) {
+ typedef void*(*memset_p)(void*, int, size_t);
+ // Prevent inlining of memset().
+ volatile memset_p libc_memset = (memset_p)memset;
+ EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + kPageSize), 0, 100),
+ "unknown-crash.*low shadow");
+ EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + kPageSize), 0, 100),
+ "unknown-crash.*shadow gap");
+ EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + kPageSize), 0, 100),
+ "unknown-crash.*high shadow");
+}
+
+TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
+ EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
+ const size_t sizes[] = { 1, 30, 1<<30 };
+ for (size_t i = 0; i < 3; i++) {
+ EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
+ }
+}
+
+static const char* kGetAllocatedSizeErrorMsg =
+ "attempting to call __asan_get_allocated_size()";
+
+TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
+ const size_t kArraySize = 100;
+ char *array = Ident((char*)malloc(kArraySize));
+ int *int_ptr = Ident(new int);
+
+ // Allocated memory is owned by allocator. Allocated size should be
+ // equal to requested size.
+ EXPECT_EQ(true, __asan_get_ownership(array));
+ EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
+ EXPECT_EQ(true, __asan_get_ownership(int_ptr));
+ EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
+
+ // We cannot call GetAllocatedSize from the memory we didn't map,
+ // and from the interior pointers (not returned by previous malloc).
+ void *wild_addr = (void*)0x1;
+ EXPECT_EQ(false, __asan_get_ownership(wild_addr));
+ EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
+ EXPECT_EQ(false, __asan_get_ownership(array + kArraySize / 2));
+ EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
+ kGetAllocatedSizeErrorMsg);
+
+ // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
+ EXPECT_EQ(false, __asan_get_ownership(NULL));
+ EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
+
+ // When memory is freed, it's not owned, and call to GetAllocatedSize
+ // is forbidden.
+ free(array);
+ EXPECT_EQ(false, __asan_get_ownership(array));
+ EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
+
+ delete int_ptr;
+}
+
+TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
+ size_t before_malloc, after_malloc, after_free;
+ char *array;
+ const size_t kMallocSize = 100;
+ before_malloc = __asan_get_current_allocated_bytes();
+
+ array = Ident((char*)malloc(kMallocSize));
+ after_malloc = __asan_get_current_allocated_bytes();
+ EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
+
+ free(array);
+ after_free = __asan_get_current_allocated_bytes();
+ EXPECT_EQ(before_malloc, after_free);
+}
+
+static void DoDoubleFree() {
+ int *x = Ident(new int);
+ delete Ident(x);
+ delete Ident(x);
+}
+
+// This test is run in a separate process, so that large malloced
+// chunk won't remain in the free lists after the test.
+// Note: use ASSERT_* instead of EXPECT_* here.
+static void RunGetHeapSizeTestAndDie() {
+ size_t old_heap_size, new_heap_size, heap_growth;
+ // We unlikely have have chunk of this size in free list.
+ static const size_t kLargeMallocSize = 1 << 29; // 512M
+ old_heap_size = __asan_get_heap_size();
+ fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
+ free(Ident(malloc(kLargeMallocSize)));
+ new_heap_size = __asan_get_heap_size();
+ heap_growth = new_heap_size - old_heap_size;
+ fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
+ ASSERT_GE(heap_growth, kLargeMallocSize);
+ ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
+
+ // Now large chunk should fall into free list, and can be
+ // allocated without increasing heap size.
+ old_heap_size = new_heap_size;
+ free(Ident(malloc(kLargeMallocSize)));
+ heap_growth = __asan_get_heap_size() - old_heap_size;
+ fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
+ ASSERT_LT(heap_growth, kLargeMallocSize);
+
+ // Test passed. Now die with expected double-free.
+ DoDoubleFree();
+}
+
+TEST(AddressSanitizerInterface, GetHeapSizeTest) {
+ EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
+}
+
+// Note: use ASSERT_* instead of EXPECT_* here.
+static void DoLargeMallocForGetFreeBytesTestAndDie() {
+ size_t old_free_bytes, new_free_bytes;
+ static const size_t kLargeMallocSize = 1 << 29; // 512M
+ // If we malloc and free a large memory chunk, it will not fall
+ // into quarantine and will be available for future requests.
+ old_free_bytes = __asan_get_free_bytes();
+ fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
+ fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
+ free(Ident(malloc(kLargeMallocSize)));
+ new_free_bytes = __asan_get_free_bytes();
+ fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
+ ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
+ // Test passed.
+ DoDoubleFree();
+}
+
+TEST(AddressSanitizerInterface, GetFreeBytesTest) {
+ static const size_t kNumOfChunks = 100;
+ static const size_t kChunkSize = 100;
+ char *chunks[kNumOfChunks];
+ size_t i;
+ size_t old_free_bytes, new_free_bytes;
+ // Allocate a small chunk. Now allocator probably has a lot of these
+ // chunks to fulfill future requests. So, future requests will decrease
+ // the number of free bytes.
+ chunks[0] = Ident((char*)malloc(kChunkSize));
+ old_free_bytes = __asan_get_free_bytes();
+ for (i = 1; i < kNumOfChunks; i++) {
+ chunks[i] = Ident((char*)malloc(kChunkSize));
+ new_free_bytes = __asan_get_free_bytes();
+ EXPECT_LT(new_free_bytes, old_free_bytes);
+ old_free_bytes = new_free_bytes;
+ }
+ EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
+}
+
+static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
+static const size_t kManyThreadsIterations = 250;
+static const size_t kManyThreadsNumThreads = (__WORDSIZE == 32) ? 40 : 200;
+
+void *ManyThreadsWithStatsWorker(void *arg) {
+ (void)arg;
+ for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
+ for (size_t size_index = 0; size_index < 4; size_index++) {
+ free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
+ }
+ }
+ return 0;
+}
+
+TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
+ size_t before_test, after_test, i;
+ pthread_t threads[kManyThreadsNumThreads];
+ before_test = __asan_get_current_allocated_bytes();
+ for (i = 0; i < kManyThreadsNumThreads; i++) {
+ pthread_create(&threads[i], 0,
+ (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
+ }
+ for (i = 0; i < kManyThreadsNumThreads; i++) {
+ pthread_join(threads[i], 0);
+ }
+ after_test = __asan_get_current_allocated_bytes();
+ // ASan stats also reflect memory usage of internal ASan RTL structs,
+ // so we can't check for equality here.
+ EXPECT_LT(after_test, before_test + (1UL<<20));
+}
+
+TEST(AddressSanitizerInterface, ExitCode) {
+ int original_exit_code = __asan_set_error_exit_code(7);
+ EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
+ EXPECT_EQ(7, __asan_set_error_exit_code(8));
+ EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
+ EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
+ EXPECT_EXIT(DoDoubleFree(),
+ ::testing::ExitedWithCode(original_exit_code), "");
+}
+
+static void MyDeathCallback() {
+ fprintf(stderr, "MyDeathCallback\n");
+}
+
+TEST(AddressSanitizerInterface, DeathCallbackTest) {
+ __asan_set_death_callback(MyDeathCallback);
+ EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
+ __asan_set_death_callback(NULL);
+}
+
+static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
+
+#define GOOD_ACCESS(ptr, offset) \
+ EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
+
+#define BAD_ACCESS(ptr, offset) \
+ EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
+
+TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
+ char *array = Ident((char*)malloc(120));
+ // poison array[40..80)
+ __asan_poison_memory_region(array + 40, 40);
+ GOOD_ACCESS(array, 39);
+ GOOD_ACCESS(array, 80);
+ BAD_ACCESS(array, 40);
+ BAD_ACCESS(array, 60);
+ BAD_ACCESS(array, 79);
+ EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
+ kUseAfterPoisonErrorMessage);
+ __asan_unpoison_memory_region(array + 40, 40);
+ // access previously poisoned memory.
+ GOOD_ACCESS(array, 40);
+ GOOD_ACCESS(array, 79);
+ free(array);
+}
+
+TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
+ char *array = Ident((char*)malloc(120));
+ // Poison [0..40) and [80..120)
+ __asan_poison_memory_region(array, 40);
+ __asan_poison_memory_region(array + 80, 40);
+ BAD_ACCESS(array, 20);
+ GOOD_ACCESS(array, 60);
+ BAD_ACCESS(array, 100);
+ // Poison whole array - [0..120)
+ __asan_poison_memory_region(array, 120);
+ BAD_ACCESS(array, 60);
+ // Unpoison [24..96)
+ __asan_unpoison_memory_region(array + 24, 72);
+ BAD_ACCESS(array, 23);
+ GOOD_ACCESS(array, 24);
+ GOOD_ACCESS(array, 60);
+ GOOD_ACCESS(array, 95);
+ BAD_ACCESS(array, 96);
+ free(array);
+}
+
+TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
+ // Vector of capacity 20
+ char *vec = Ident((char*)malloc(20));
+ __asan_poison_memory_region(vec, 20);
+ for (size_t i = 0; i < 7; i++) {
+ // Simulate push_back.
+ __asan_unpoison_memory_region(vec + i, 1);
+ GOOD_ACCESS(vec, i);
+ BAD_ACCESS(vec, i + 1);
+ }
+ for (size_t i = 7; i > 0; i--) {
+ // Simulate pop_back.
+ __asan_poison_memory_region(vec + i - 1, 1);
+ BAD_ACCESS(vec, i - 1);
+ if (i > 1) GOOD_ACCESS(vec, i - 2);
+ }
+ free(vec);
+}
+
+// Make sure that each aligned block of size "2^granularity" doesn't have
+// "true" value before "false" value.
+static void MakeShadowValid(bool *shadow, int length, int granularity) {
+ bool can_be_poisoned = true;
+ for (int i = length - 1; i >= 0; i--) {
+ if (!shadow[i])
+ can_be_poisoned = false;
+ if (!can_be_poisoned)
+ shadow[i] = false;
+ if (i % (1 << granularity) == 0) {
+ can_be_poisoned = true;
+ }
+ }
+}
+
+TEST(AddressSanitizerInterface, PoisoningStressTest) {
+ const size_t kSize = 24;
+ bool expected[kSize];
+ char *arr = Ident((char*)malloc(kSize));
+ for (size_t l1 = 0; l1 < kSize; l1++) {
+ for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
+ for (size_t l2 = 0; l2 < kSize; l2++) {
+ for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
+ // Poison [l1, l1+s1), [l2, l2+s2) and check result.
+ __asan_unpoison_memory_region(arr, kSize);
+ __asan_poison_memory_region(arr + l1, s1);
+ __asan_poison_memory_region(arr + l2, s2);
+ memset(expected, false, kSize);
+ memset(expected + l1, true, s1);
+ MakeShadowValid(expected, kSize, /*granularity*/ 3);
+ memset(expected + l2, true, s2);
+ MakeShadowValid(expected, kSize, /*granularity*/ 3);
+ for (size_t i = 0; i < kSize; i++) {
+ ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
+ }
+ // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
+ __asan_poison_memory_region(arr, kSize);
+ __asan_unpoison_memory_region(arr + l1, s1);
+ __asan_unpoison_memory_region(arr + l2, s2);
+ memset(expected, true, kSize);
+ memset(expected + l1, false, s1);
+ MakeShadowValid(expected, kSize, /*granularity*/ 3);
+ memset(expected + l2, false, s2);
+ MakeShadowValid(expected, kSize, /*granularity*/ 3);
+ for (size_t i = 0; i < kSize; i++) {
+ ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
+ }
+ }
+ }
+ }
+ }
+}
+
+static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
+static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
+
+TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
+ char *array = Ident((char*)malloc(120));
+ __asan_unpoison_memory_region(array, 120);
+ // Try to unpoison not owned memory
+ EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
+ kInvalidUnpoisonMessage);
+ EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
+ kInvalidUnpoisonMessage);
+
+ __asan_poison_memory_region(array, 120);
+ // Try to poison not owned memory.
+ EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
+ EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
+ kInvalidPoisonMessage);
+ free(array);
+}
+
+static void ErrorReportCallbackOneToZ(const char *report) {
+ write(2, "ABCDEF", 6);
+}
+
+TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
+ __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
+ EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1), "ABCDEF");
+ __asan_set_error_report_callback(NULL);
+}
+
+TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
+ std::vector<char *> pointers;
+ std::vector<size_t> sizes;
+ const size_t kNumMallocs =
+ (__WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
+ for (size_t i = 0; i < kNumMallocs; i++) {
+ size_t size = i * 100 + 1;
+ pointers.push_back((char*)malloc(size));
+ sizes.push_back(size);
+ }
+ for (size_t i = 0; i < 4000000; i++) {
+ EXPECT_FALSE(__asan_get_ownership(&pointers));
+ EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
+ size_t idx = i % kNumMallocs;
+ EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
+ EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
+ }
+ for (size_t i = 0, n = pointers.size(); i < n; i++)
+ free(pointers[i]);
+}
diff --git a/lib/asan/tests/asan_racy_double_free_test.cc b/lib/asan/tests/asan_racy_double_free_test.cc
new file mode 100644
index 0000000..deeeb4f
--- /dev/null
+++ b/lib/asan/tests/asan_racy_double_free_test.cc
@@ -0,0 +1,32 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+const int N = 1000;
+void *x[N];
+
+void *Thread1(void *unused) {
+ for (int i = 0; i < N; i++) {
+ fprintf(stderr, "%s %d\n", __FUNCTION__, i);
+ free(x[i]);
+ }
+ return NULL;
+}
+
+void *Thread2(void *unused) {
+ for (int i = 0; i < N; i++) {
+ fprintf(stderr, "%s %d\n", __FUNCTION__, i);
+ free(x[i]);
+ }
+ return NULL;
+}
+
+int main() {
+ for (int i = 0; i < N; i++)
+ x[i] = malloc(128);
+ pthread_t t[2];
+ pthread_create(&t[0], 0, Thread1, 0);
+ pthread_create(&t[1], 0, Thread2, 0);
+ pthread_join(t[0], 0);
+ pthread_join(t[1], 0);
+}
diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc
index 0ff72d3..8e967e9 100644
--- a/lib/asan/tests/asan_test.cc
+++ b/lib/asan/tests/asan_test.cc
@@ -1,4 +1,4 @@
-//===-- asan_test.cc ------------*- C++ -*-===//
+//===-- asan_test.cc ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,7 +20,7 @@
#include <setjmp.h>
#include <assert.h>
-#if defined(__i386__) or defined(__x86_64__)
+#if defined(__i386__) || defined(__x86_64__)
#include <emmintrin.h>
#endif
@@ -29,13 +29,10 @@
#ifndef __APPLE__
#include <malloc.h>
-#endif // __APPLE__
-
-#ifdef __APPLE__
-static bool APPLE = true;
#else
-static bool APPLE = false;
-#endif
+#include <AvailabilityMacros.h> // For MAC_OS_X_VERSION_*
+#include <CoreFoundation/CFString.h>
+#endif // __APPLE__
#if ASAN_HAS_EXCEPTIONS
# define ASAN_THROW(x) throw (x)
@@ -61,99 +58,14 @@ static inline uint32_t my_rand(uint32_t* state) {
static uint32_t global_seed = 0;
-class ObjdumpOfMyself {
- public:
- explicit ObjdumpOfMyself(const string &binary) {
- is_correct = true;
- string objdump_name = APPLE ? "gobjdump" : "objdump";
- string prog = objdump_name + " -d " + binary;
- // TODO(glider): popen() succeeds even if the file does not exist.
- FILE *pipe = popen(prog.c_str(), "r");
- string objdump;
- if (pipe) {
- const int kBuffSize = 4096;
- char buff[kBuffSize+1];
- int read_bytes;
- while ((read_bytes = fread(buff, 1, kBuffSize, pipe)) > 0) {
- buff[read_bytes] = 0;
- objdump.append(buff);
- }
- pclose(pipe);
- } else {
- is_correct = false;
- }
- // cut the objdump into functions
- string fn, next_fn;
- size_t next_start;
- for (size_t start = fn_start(objdump, 0, &fn);
- start != string::npos;
- start = next_start, fn = next_fn) {
- next_start = fn_start(objdump, start, &next_fn);
- // fprintf(stderr, "start: %d next_start = %d fn: %s\n",
- // (int)start, (int)next_start, fn.c_str());
- // Mac OS adds the "_" prefix to function names.
- if (fn.find(APPLE ? "_Disasm" : "Disasm") == string::npos) {
- continue;
- }
- string fn_body = objdump.substr(start, next_start - start);
- // fprintf(stderr, "%s:\n%s", fn.c_str(), fn_body.c_str());
- functions_[fn] = fn_body;
- }
- }
-
- string &GetFuncDisasm(const string &fn) {
- return functions_[fn];
- }
-
- int CountInsnInFunc(const string &fn, const vector<string> &insns) {
- // Mac OS adds the "_" prefix to function names.
- string fn_ref = APPLE ? "_" + fn : fn;
- const string &disasm = GetFuncDisasm(fn_ref);
- if (disasm.empty()) return -1;
- size_t counter = 0;
- for (size_t i = 0; i < insns.size(); i++) {
- size_t pos = 0;
- while ((pos = disasm.find(insns[i], pos)) != string::npos) {
- counter++;
- pos++;
- }
- }
- return counter;
- }
-
- bool IsCorrect() { return is_correct; }
-
- private:
- size_t fn_start(const string &objdump, size_t start_pos, string *fn) {
- size_t pos = objdump.find(">:\n", start_pos);
- if (pos == string::npos)
- return string::npos;
- size_t beg = pos;
- while (beg > 0 && objdump[beg - 1] != '<')
- beg--;
- *fn = objdump.substr(beg, pos - beg);
- return pos + 3;
- }
-
- map<string, string> functions_;
- bool is_correct;
-};
-
-static ObjdumpOfMyself *objdump_of_myself() {
- static ObjdumpOfMyself *o = new ObjdumpOfMyself(progname);
- return o;
-}
-
const size_t kLargeMalloc = 1 << 24;
-template<class T>
-__attribute__((noinline))
-void asan_write(T *a) {
+template<typename T>
+NOINLINE void asan_write(T *a) {
*a = 0;
}
-__attribute__((noinline))
-void asan_write_sized_aligned(uint8_t *p, size_t size) {
+NOINLINE void asan_write_sized_aligned(uint8_t *p, size_t size) {
EXPECT_EQ(0, ((uintptr_t)p % size));
if (size == 1) asan_write((uint8_t*)p);
else if (size == 2) asan_write((uint16_t*)p);
@@ -161,45 +73,41 @@ void asan_write_sized_aligned(uint8_t *p, size_t size) {
else if (size == 8) asan_write((uint64_t*)p);
}
-__attribute__((noinline)) void *malloc_fff(size_t size) {
+NOINLINE void *malloc_fff(size_t size) {
void *res = malloc/**/(size); break_optimization(0); return res;}
-__attribute__((noinline)) void *malloc_eee(size_t size) {
+NOINLINE void *malloc_eee(size_t size) {
void *res = malloc_fff(size); break_optimization(0); return res;}
-__attribute__((noinline)) void *malloc_ddd(size_t size) {
+NOINLINE void *malloc_ddd(size_t size) {
void *res = malloc_eee(size); break_optimization(0); return res;}
-__attribute__((noinline)) void *malloc_ccc(size_t size) {
+NOINLINE void *malloc_ccc(size_t size) {
void *res = malloc_ddd(size); break_optimization(0); return res;}
-__attribute__((noinline)) void *malloc_bbb(size_t size) {
+NOINLINE void *malloc_bbb(size_t size) {
void *res = malloc_ccc(size); break_optimization(0); return res;}
-__attribute__((noinline)) void *malloc_aaa(size_t size) {
+NOINLINE void *malloc_aaa(size_t size) {
void *res = malloc_bbb(size); break_optimization(0); return res;}
#ifndef __APPLE__
-__attribute__((noinline)) void *memalign_fff(size_t alignment, size_t size) {
+NOINLINE void *memalign_fff(size_t alignment, size_t size) {
void *res = memalign/**/(alignment, size); break_optimization(0); return res;}
-__attribute__((noinline)) void *memalign_eee(size_t alignment, size_t size) {
+NOINLINE void *memalign_eee(size_t alignment, size_t size) {
void *res = memalign_fff(alignment, size); break_optimization(0); return res;}
-__attribute__((noinline)) void *memalign_ddd(size_t alignment, size_t size) {
+NOINLINE void *memalign_ddd(size_t alignment, size_t size) {
void *res = memalign_eee(alignment, size); break_optimization(0); return res;}
-__attribute__((noinline)) void *memalign_ccc(size_t alignment, size_t size) {
+NOINLINE void *memalign_ccc(size_t alignment, size_t size) {
void *res = memalign_ddd(alignment, size); break_optimization(0); return res;}
-__attribute__((noinline)) void *memalign_bbb(size_t alignment, size_t size) {
+NOINLINE void *memalign_bbb(size_t alignment, size_t size) {
void *res = memalign_ccc(alignment, size); break_optimization(0); return res;}
-__attribute__((noinline)) void *memalign_aaa(size_t alignment, size_t size) {
+NOINLINE void *memalign_aaa(size_t alignment, size_t size) {
void *res = memalign_bbb(alignment, size); break_optimization(0); return res;}
#endif // __APPLE__
-__attribute__((noinline))
- void free_ccc(void *p) { free(p); break_optimization(0);}
-__attribute__((noinline))
- void free_bbb(void *p) { free_ccc(p); break_optimization(0);}
-__attribute__((noinline))
- void free_aaa(void *p) { free_bbb(p); break_optimization(0);}
+NOINLINE void free_ccc(void *p) { free(p); break_optimization(0);}
+NOINLINE void free_bbb(void *p) { free_ccc(p); break_optimization(0);}
+NOINLINE void free_aaa(void *p) { free_bbb(p); break_optimization(0);}
-template<class T>
-__attribute__((noinline))
-void oob_test(int size, int off) {
+template<typename T>
+NOINLINE void oob_test(int size, int off) {
char *p = (char*)malloc_aaa(size);
// fprintf(stderr, "writing %d byte(s) into [%p,%p) with offset %d\n",
// sizeof(T), p, p + size, off);
@@ -208,9 +116,8 @@ void oob_test(int size, int off) {
}
-template<class T>
-__attribute__((noinline))
-void uaf_test(int size, int off) {
+template<typename T>
+NOINLINE void uaf_test(int size, int off) {
char *p = (char *)malloc_aaa(size);
free_aaa(p);
for (int i = 1; i < 100; i++)
@@ -255,13 +162,15 @@ TEST(AddressSanitizer, VariousMallocsTest) {
*c = 0;
delete c;
-#ifndef __APPLE__
+#if !defined(__APPLE__) && !defined(ANDROID)
// fprintf(stderr, "posix_memalign\n");
int *pm;
int pm_res = posix_memalign((void**)&pm, kPageSize, kPageSize);
EXPECT_EQ(0, pm_res);
free(pm);
+#endif
+#if !defined(__APPLE__)
int *ma = (int*)memalign(kPageSize, kPageSize);
EXPECT_EQ(0, (uintptr_t)ma % kPageSize);
ma[123] = 0;
@@ -295,48 +204,6 @@ TEST(AddressSanitizer, PvallocTest) {
}
#endif // __APPLE__
-void NoOpSignalHandler(int unused) {
- fprintf(stderr, "NoOpSignalHandler (should not happen). Aborting\n");
- abort();
-}
-
-void NoOpSigaction(int, siginfo_t *siginfo, void *context) {
- fprintf(stderr, "NoOpSigaction (should not happen). Aborting\n");
- abort();
-}
-
-TEST(AddressSanitizer, SignalTest) {
- signal(SIGSEGV, NoOpSignalHandler);
- signal(SIGILL, NoOpSignalHandler);
- // If asan did not intercept sigaction NoOpSigaction will fire.
- char *x = Ident((char*)malloc(5));
- EXPECT_DEATH(x[6]++, "is located 1 bytes to the right");
- free(Ident(x));
-}
-
-TEST(AddressSanitizer, SigactionTest) {
- {
- struct sigaction sigact;
- memset(&sigact, 0, sizeof(sigact));
- sigact.sa_sigaction = NoOpSigaction;;
- sigact.sa_flags = SA_SIGINFO;
- sigaction(SIGSEGV, &sigact, 0);
- }
-
- {
- struct sigaction sigact;
- memset(&sigact, 0, sizeof(sigact));
- sigact.sa_sigaction = NoOpSigaction;;
- sigact.sa_flags = SA_SIGINFO;
- sigaction(SIGILL, &sigact, 0);
- }
-
- // If asan did not intercept sigaction NoOpSigaction will fire.
- char *x = Ident((char*)malloc(5));
- EXPECT_DEATH(x[6]++, "is located 1 bytes to the right");
- free(Ident(x));
-}
-
void *TSDWorker(void *test_key) {
if (test_key) {
pthread_setspecific(*(pthread_key_t*)test_key, (void*)0xfeedface);
@@ -367,7 +234,7 @@ TEST(AddressSanitizer, DISABLED_TSDTest) {
pthread_key_delete(test_key);
}
-template<class T>
+template<typename T>
void OOBTest() {
char expected_str[100];
for (int size = sizeof(T); size < 20; size += 5) {
@@ -531,7 +398,7 @@ static void MallocStress(size_t n) {
}
TEST(AddressSanitizer, MallocStressTest) {
- MallocStress(200000);
+ MallocStress((ASAN_LOW_MEMORY) ? 20000 : 200000);
}
static void TestLargeMalloc(size_t size) {
@@ -546,24 +413,29 @@ TEST(AddressSanitizer, LargeMallocTest) {
}
}
+#if ASAN_LOW_MEMORY != 1
TEST(AddressSanitizer, HugeMallocTest) {
#ifdef __APPLE__
// It was empirically found out that 1215 megabytes is the maximum amount of
- // memory available to the process under AddressSanitizer on Darwin.
+ // memory available to the process under AddressSanitizer on 32-bit Mac 10.6.
+ // 32-bit Mac 10.7 gives even less (< 1G).
// (the libSystem malloc() allows allocating up to 2300 megabytes without
// ASan).
- size_t n_megs = __WORDSIZE == 32 ? 1200 : 4100;
+ size_t n_megs = __WORDSIZE == 32 ? 500 : 4100;
#else
size_t n_megs = __WORDSIZE == 32 ? 2600 : 4100;
#endif
TestLargeMalloc(n_megs << 20);
}
+#endif
TEST(AddressSanitizer, ThreadedMallocStressTest) {
const int kNumThreads = 4;
+ const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
pthread_t t[kNumThreads];
for (int i = 0; i < kNumThreads; i++) {
- pthread_create(&t[i], 0, (void* (*)(void *x))MallocStress, (void*)100000);
+ pthread_create(&t[i], 0, (void* (*)(void *x))MallocStress,
+ (void*)kNumIterations);
}
for (int i = 0; i < kNumThreads; i++) {
pthread_join(t[i], 0);
@@ -601,6 +473,25 @@ TEST(AddressSanitizer, ReallocTest) {
}
}
+#ifndef __APPLE__
+static const char *kMallocUsableSizeErrorMsg =
+ "AddressSanitizer attempting to call malloc_usable_size()";
+
+TEST(AddressSanitizer, MallocUsableSizeTest) {
+ const size_t kArraySize = 100;
+ char *array = Ident((char*)malloc(kArraySize));
+ int *int_ptr = Ident(new int);
+ EXPECT_EQ(0, malloc_usable_size(NULL));
+ EXPECT_EQ(kArraySize, malloc_usable_size(array));
+ EXPECT_EQ(sizeof(int), malloc_usable_size(int_ptr));
+ EXPECT_DEATH(malloc_usable_size((void*)0x123), kMallocUsableSizeErrorMsg);
+ EXPECT_DEATH(malloc_usable_size(array + kArraySize / 2),
+ kMallocUsableSizeErrorMsg);
+ free(array);
+ EXPECT_DEATH(malloc_usable_size(array), kMallocUsableSizeErrorMsg);
+}
+#endif
+
void WrongFree() {
int *x = (int*)malloc(100 * sizeof(int));
// Use the allocated memory, otherwise Clang will optimize it out.
@@ -623,12 +514,15 @@ void DoubleFree() {
}
TEST(AddressSanitizer, DoubleFreeTest) {
- EXPECT_DEATH(DoubleFree(), "ERROR: AddressSanitizer attempting double-free");
+ EXPECT_DEATH(DoubleFree(), ASAN_PCRE_DOTALL
+ "ERROR: AddressSanitizer attempting double-free"
+ ".*is located 0 bytes inside of 400-byte region"
+ ".*freed by thread T0 here"
+ ".*previously allocated by thread T0 here");
}
template<int kSize>
-__attribute__((noinline))
-void SizedStackTest() {
+NOINLINE void SizedStackTest() {
char a[kSize];
char *A = Ident((char*)&a);
for (size_t i = 0; i < kSize; i++)
@@ -669,8 +563,7 @@ TEST(AddressSanitizer, ManyStackObjectsTest) {
EXPECT_DEATH(Ident(ZZZ)[-1] = 0, ASAN_PCRE_DOTALL "XXX.*YYY.*ZZZ");
}
-__attribute__((noinline))
-static void Frame0(int frame, char *a, char *b, char *c) {
+NOINLINE static void Frame0(int frame, char *a, char *b, char *c) {
char d[4] = {0};
char *D = Ident(d);
switch (frame) {
@@ -680,15 +573,15 @@ static void Frame0(int frame, char *a, char *b, char *c) {
case 0: D[5]++; break;
}
}
-__attribute__((noinline)) static void Frame1(int frame, char *a, char *b) {
+NOINLINE static void Frame1(int frame, char *a, char *b) {
char c[4] = {0}; Frame0(frame, a, b, c);
break_optimization(0);
}
-__attribute__((noinline)) static void Frame2(int frame, char *a) {
+NOINLINE static void Frame2(int frame, char *a) {
char b[4] = {0}; Frame1(frame, a, b);
break_optimization(0);
}
-__attribute__((noinline)) static void Frame3(int frame) {
+NOINLINE static void Frame3(int frame) {
char a[4] = {0}; Frame2(frame, a);
break_optimization(0);
}
@@ -706,8 +599,7 @@ TEST(AddressSanitizer, GuiltyStackFrame3Test) {
EXPECT_DEATH(Frame3(3), "located .*in frame <.*Frame3");
}
-__attribute__((noinline))
-void LongJmpFunc1(jmp_buf buf) {
+NOINLINE void LongJmpFunc1(jmp_buf buf) {
// create three red zones for these two stack objects.
int a;
int b;
@@ -718,8 +610,7 @@ void LongJmpFunc1(jmp_buf buf) {
longjmp(buf, 1);
}
-__attribute__((noinline))
-void UnderscopeLongJmpFunc1(jmp_buf buf) {
+NOINLINE void UnderscopeLongJmpFunc1(jmp_buf buf) {
// create three red zones for these two stack objects.
int a;
int b;
@@ -730,8 +621,7 @@ void UnderscopeLongJmpFunc1(jmp_buf buf) {
_longjmp(buf, 1);
}
-__attribute__((noinline))
-void SigLongJmpFunc1(sigjmp_buf buf) {
+NOINLINE void SigLongJmpFunc1(sigjmp_buf buf) {
// create three red zones for these two stack objects.
int a;
int b;
@@ -743,8 +633,7 @@ void SigLongJmpFunc1(sigjmp_buf buf) {
}
-__attribute__((noinline))
-void TouchStackFunc() {
+NOINLINE void TouchStackFunc() {
int a[100]; // long array will intersect with redzones from LongJmpFunc1.
int *A = Ident(a);
for (int i = 0; i < 100; i++)
@@ -780,8 +669,7 @@ TEST(AddressSanitizer, SigLongJmpTest) {
}
#ifdef __EXCEPTIONS
-__attribute__((noinline))
-void ThrowFunc() {
+NOINLINE void ThrowFunc() {
// create three red zones for these two stack objects.
int a;
int b;
@@ -828,7 +716,7 @@ TEST(AddressSanitizer, ThreadStackReuseTest) {
pthread_join(t, 0);
}
-#if defined(__i386__) or defined(__x86_64__)
+#if defined(__i386__) || defined(__x86_64__)
TEST(AddressSanitizer, Store128Test) {
char *a = Ident((char*)malloc(Ident(12)));
char *p = a;
@@ -860,7 +748,7 @@ static string LeftOOBErrorMessage(int oob_distance) {
return string(expected_str);
}
-template<class T>
+template<typename T>
void MemSetOOBTestTemplate(size_t length) {
if (length == 0) return;
size_t size = Ident(sizeof(T) * length);
@@ -917,7 +805,7 @@ TEST(AddressSanitizer, MemSetOOBTest) {
}
// Same test for memcpy and memmove functions
-template <class T, class M>
+template <typename T, class M>
void MemTransferOOBTestTemplate(size_t length) {
if (length == 0) return;
size_t size = Ident(sizeof(T) * length);
@@ -1051,11 +939,14 @@ TEST(AddressSanitizer, StrLenOOBTest) {
free(heap_string);
}
-static inline char* MallocAndMemsetString(size_t size) {
+static inline char* MallocAndMemsetString(size_t size, char ch) {
char *s = Ident((char*)malloc(size));
- memset(s, 'z', size);
+ memset(s, ch, size);
return s;
}
+static inline char* MallocAndMemsetString(size_t size) {
+ return MallocAndMemsetString(size, 'z');
+}
#ifndef __APPLE__
TEST(AddressSanitizer, StrNLenOOBTest) {
@@ -1355,6 +1246,47 @@ TEST(AddressSanitizer, StrCatOOBTest) {
EXPECT_DEATH(strcat(to, from), RightOOBErrorMessage(0));
// length of "to" is just enough.
strcat(to, from + 1);
+
+ free(to);
+ free(from);
+}
+
+TEST(AddressSanitizer, StrNCatOOBTest) {
+ size_t to_size = Ident(100);
+ char *to = MallocAndMemsetString(to_size);
+ to[0] = '\0';
+ size_t from_size = Ident(20);
+ char *from = MallocAndMemsetString(from_size);
+ // Normal strncat calls.
+ strncat(to, from, 0);
+ strncat(to, from, from_size);
+ from[from_size - 1] = '\0';
+ strncat(to, from, 2 * from_size);
+ // Catenating empty string is not an error.
+ strncat(to - 1, from, 0);
+ strncat(to, from + from_size - 1, 10);
+ // One of arguments points to not allocated memory.
+ EXPECT_DEATH(strncat(to - 1, from, 2), LeftOOBErrorMessage(1));
+ EXPECT_DEATH(strncat(to, from - 1, 2), LeftOOBErrorMessage(1));
+ EXPECT_DEATH(strncat(to + to_size, from, 2), RightOOBErrorMessage(0));
+ EXPECT_DEATH(strncat(to, from + from_size, 2), RightOOBErrorMessage(0));
+
+ memset(from, 'z', from_size);
+ memset(to, 'z', to_size);
+ to[0] = '\0';
+ // "from" is too short.
+ EXPECT_DEATH(strncat(to, from, from_size + 1), RightOOBErrorMessage(0));
+ // "to" is not zero-terminated.
+ EXPECT_DEATH(strncat(to + 1, from, 1), RightOOBErrorMessage(0));
+ // "to" is too short to fit "from".
+ to[0] = 'z';
+ to[to_size - from_size + 1] = '\0';
+ EXPECT_DEATH(strncat(to, from, from_size - 1), RightOOBErrorMessage(0));
+ // "to" is just enough.
+ strncat(to, from, from_size - 2);
+
+ free(to);
+ free(from);
}
static string OverlapErrorMessage(const string &func) {
@@ -1365,14 +1297,22 @@ TEST(AddressSanitizer, StrArgsOverlapTest) {
size_t size = Ident(100);
char *str = Ident((char*)malloc(size));
+// Do not check memcpy() on OS X 10.7 and later, where it actually aliases
+// memmove().
+#if !defined(__APPLE__) || !defined(MAC_OS_X_VERSION_10_7) || \
+ (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7)
// Check "memcpy". Use Ident() to avoid inlining.
memset(str, 'z', size);
Ident(memcpy)(str + 1, str + 11, 10);
Ident(memcpy)(str, str, 0);
EXPECT_DEATH(Ident(memcpy)(str, str + 14, 15), OverlapErrorMessage("memcpy"));
EXPECT_DEATH(Ident(memcpy)(str + 14, str, 15), OverlapErrorMessage("memcpy"));
- EXPECT_DEATH(Ident(memcpy)(str + 20, str + 20, 1),
- OverlapErrorMessage("memcpy"));
+#endif
+
+ // We do not treat memcpy with to==from as a bug.
+ // See http://llvm.org/bugs/show_bug.cgi?id=11763.
+ // EXPECT_DEATH(Ident(memcpy)(str + 20, str + 20, 1),
+ // OverlapErrorMessage("memcpy"));
// Check "strcpy".
memset(str, 'z', size);
@@ -1403,9 +1343,117 @@ TEST(AddressSanitizer, StrArgsOverlapTest) {
EXPECT_DEATH(strcat(str + 9, str), OverlapErrorMessage("strcat"));
EXPECT_DEATH(strcat(str + 10, str), OverlapErrorMessage("strcat"));
+ // Check "strncat".
+ memset(str, 'z', size);
+ str[10] = '\0';
+ strncat(str, str + 10, 10); // from is empty
+ strncat(str, str + 11, 10);
+ str[10] = '\0';
+ str[20] = '\0';
+ strncat(str + 5, str, 5);
+ str[10] = '\0';
+ EXPECT_DEATH(strncat(str + 5, str, 6), OverlapErrorMessage("strncat"));
+ EXPECT_DEATH(strncat(str, str + 9, 10), OverlapErrorMessage("strncat"));
+
free(str);
}
+void CallAtoi(const char *nptr) {
+ Ident(atoi(nptr));
+}
+void CallAtol(const char *nptr) {
+ Ident(atol(nptr));
+}
+void CallAtoll(const char *nptr) {
+ Ident(atoll(nptr));
+}
+typedef void(*PointerToCallAtoi)(const char*);
+
+void RunAtoiOOBTest(PointerToCallAtoi Atoi) {
+ char *array = MallocAndMemsetString(10, '1');
+ // Invalid pointer to the string.
+ EXPECT_DEATH(Atoi(array + 11), RightOOBErrorMessage(1));
+ EXPECT_DEATH(Atoi(array - 1), LeftOOBErrorMessage(1));
+ // Die if a buffer doesn't have terminating NULL.
+ EXPECT_DEATH(Atoi(array), RightOOBErrorMessage(0));
+ // Make last symbol a terminating NULL or other non-digit.
+ array[9] = '\0';
+ Atoi(array);
+ array[9] = 'a';
+ Atoi(array);
+ Atoi(array + 9);
+ // Sometimes we need to detect overflow if no digits are found.
+ memset(array, ' ', 10);
+ EXPECT_DEATH(Atoi(array), RightOOBErrorMessage(0));
+ array[9] = '-';
+ EXPECT_DEATH(Atoi(array), RightOOBErrorMessage(0));
+ EXPECT_DEATH(Atoi(array + 9), RightOOBErrorMessage(0));
+ array[8] = '-';
+ Atoi(array);
+ delete array;
+}
+
+TEST(AddressSanitizer, AtoiAndFriendsOOBTest) {
+ RunAtoiOOBTest(&CallAtoi);
+ RunAtoiOOBTest(&CallAtol);
+ RunAtoiOOBTest(&CallAtoll);
+}
+
+void CallStrtol(const char *nptr, char **endptr, int base) {
+ Ident(strtol(nptr, endptr, base));
+}
+void CallStrtoll(const char *nptr, char **endptr, int base) {
+ Ident(strtoll(nptr, endptr, base));
+}
+typedef void(*PointerToCallStrtol)(const char*, char**, int);
+
+void RunStrtolOOBTest(PointerToCallStrtol Strtol) {
+ char *array = MallocAndMemsetString(3);
+ char *endptr = NULL;
+ array[0] = '1';
+ array[1] = '2';
+ array[2] = '3';
+ // Invalid pointer to the string.
+ EXPECT_DEATH(Strtol(array + 3, NULL, 0), RightOOBErrorMessage(0));
+ EXPECT_DEATH(Strtol(array - 1, NULL, 0), LeftOOBErrorMessage(1));
+ // Buffer overflow if there is no terminating null (depends on base).
+ Strtol(array, &endptr, 3);
+ EXPECT_EQ(array + 2, endptr);
+ EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBErrorMessage(0));
+ array[2] = 'z';
+ Strtol(array, &endptr, 35);
+ EXPECT_EQ(array + 2, endptr);
+ EXPECT_DEATH(Strtol(array, NULL, 36), RightOOBErrorMessage(0));
+ // Add terminating zero to get rid of overflow.
+ array[2] = '\0';
+ Strtol(array, NULL, 36);
+ // Don't check for overflow if base is invalid.
+ Strtol(array - 1, NULL, -1);
+ Strtol(array + 3, NULL, 1);
+ // Sometimes we need to detect overflow if no digits are found.
+ array[0] = array[1] = array[2] = ' ';
+ EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBErrorMessage(0));
+ array[2] = '+';
+ EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBErrorMessage(0));
+ array[2] = '-';
+ EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBErrorMessage(0));
+ array[1] = '+';
+ Strtol(array, NULL, 0);
+ array[1] = array[2] = 'z';
+ Strtol(array, &endptr, 0);
+ EXPECT_EQ(array, endptr);
+ Strtol(array + 2, NULL, 0);
+ EXPECT_EQ(array, endptr);
+ delete array;
+}
+
+TEST(AddressSanitizer, StrtollOOBTest) {
+ RunStrtolOOBTest(&CallStrtoll);
+}
+TEST(AddressSanitizer, StrtolOOBTest) {
+ RunStrtolOOBTest(&CallStrtol);
+}
+
// At the moment we instrument memcpy/memove/memset calls at compile time so we
// can't handle OOB error if these functions are called by pointer, see disabled
// MemIntrinsicCallByPointerTest below
@@ -1446,8 +1494,7 @@ TEST(AddressSanitizer, DISABLED_MemIntrinsicUnalignedAccessTest) {
// TODO(samsonov): Add a test with malloc(0)
// TODO(samsonov): Add tests for str* and mem* functions.
-__attribute__((noinline))
-static int LargeFunction(bool do_bad_access) {
+NOINLINE static int LargeFunction(bool do_bad_access) {
int *x = new int[100];
x[0]++;
x[1]++;
@@ -1544,8 +1591,7 @@ TEST(AddressSanitizer, ShadowGapTest) {
#endif // ASAN_NEEDS_SEGV
extern "C" {
-__attribute__((noinline))
-static void UseThenFreeThenUse() {
+NOINLINE static void UseThenFreeThenUse() {
char *x = Ident((char*)malloc(8));
*x = 1;
free_aaa(x);
@@ -1561,76 +1607,6 @@ TEST(AddressSanitizer, StrDupTest) {
free(strdup(Ident("123")));
}
-TEST(AddressSanitizer, ObjdumpTest) {
- ObjdumpOfMyself *o = objdump_of_myself();
- EXPECT_TRUE(o->IsCorrect());
-}
-
-extern "C" {
-__attribute__((noinline))
-static void DisasmSimple() {
- Ident(0);
-}
-
-__attribute__((noinline))
-static void DisasmParamWrite(int *a) {
- *a = 1;
-}
-
-__attribute__((noinline))
-static void DisasmParamInc(int *a) {
- (*a)++;
-}
-
-__attribute__((noinline))
-static void DisasmParamReadIfWrite(int *a) {
- if (*a)
- *a = 1;
-}
-
-__attribute__((noinline))
-static int DisasmParamIfReadWrite(int *a, int cond) {
- int res = 0;
- if (cond)
- res = *a;
- *a = 0;
- return res;
-}
-
-static int GLOBAL;
-
-__attribute__((noinline))
-static void DisasmWriteGlob() {
- GLOBAL = 1;
-}
-} // extern "C"
-
-TEST(AddressSanitizer, DisasmTest) {
- int a;
- DisasmSimple();
- DisasmParamWrite(&a);
- DisasmParamInc(&a);
- Ident(DisasmWriteGlob)();
- DisasmParamReadIfWrite(&a);
-
- a = 7;
- EXPECT_EQ(7, DisasmParamIfReadWrite(&a, Ident(1)));
- EXPECT_EQ(0, a);
-
- ObjdumpOfMyself *o = objdump_of_myself();
- vector<string> insns;
- insns.push_back("ud2");
- insns.push_back("__asan_report_");
- EXPECT_EQ(0, o->CountInsnInFunc("DisasmSimple", insns));
- EXPECT_EQ(1, o->CountInsnInFunc("DisasmParamWrite", insns));
- EXPECT_EQ(1, o->CountInsnInFunc("DisasmParamInc", insns));
- EXPECT_EQ(0, o->CountInsnInFunc("DisasmWriteGlob", insns));
-
- // TODO(kcc): implement these (needs just one __asan_report).
- EXPECT_EQ(2, o->CountInsnInFunc("DisasmParamReadIfWrite", insns));
- EXPECT_EQ(2, o->CountInsnInFunc("DisasmParamIfReadWrite", insns));
-}
-
// Currently we create and poison redzone at right of global variables.
char glob5[5];
static char static110[110];
@@ -1718,8 +1694,7 @@ TEST(AddressSanitizer, LocalReferenceReturnTest) {
#endif
template <int kSize>
-__attribute__((noinline))
-static void FuncWithStack() {
+NOINLINE static void FuncWithStack() {
char x[kSize];
Ident(x)[0] = 0;
Ident(x)[kSize-1] = 0;
@@ -1758,9 +1733,21 @@ TEST(AddressSanitizer, ThreadedStressStackReuseTest) {
}
}
+static void *PthreadExit(void *a) {
+ pthread_exit(0);
+ return 0;
+}
+
+TEST(AddressSanitizer, PthreadExitTest) {
+ pthread_t t;
+ for (int i = 0; i < 1000; i++) {
+ pthread_create(&t, 0, PthreadExit, 0);
+ pthread_join(t, 0);
+ }
+}
+
#ifdef __EXCEPTIONS
-__attribute__((noinline))
-static void StackReuseAndException() {
+NOINLINE static void StackReuseAndException() {
int large_stack[1000];
Ident(large_stack);
ASAN_THROW(1);
@@ -1784,6 +1771,28 @@ TEST(AddressSanitizer, MlockTest) {
EXPECT_EQ(0, munlock((void*)0x987, 0x654));
}
+struct LargeStruct {
+ int foo[100];
+};
+
+// Test for bug http://llvm.org/bugs/show_bug.cgi?id=11763.
+// Struct copy should not cause asan warning even if lhs == rhs.
+TEST(AddressSanitizer, LargeStructCopyTest) {
+ LargeStruct a;
+ *Ident(&a) = *Ident(&a);
+}
+
+__attribute__((no_address_safety_analysis))
+static void NoAddressSafety() {
+ char *foo = new char[10];
+ Ident(foo)[10] = 0;
+ delete [] foo;
+}
+
+TEST(AddressSanitizer, AttributeNoAddressSafetyTest) {
+ Ident(NoAddressSafety)();
+}
+
// ------------------ demo tests; run each one-by-one -------------
// e.g. --gtest_filter=*DemoOOBLeftHigh --gtest_also_run_disabled_tests
TEST(AddressSanitizer, DISABLED_DemoThreadedTest) {
@@ -1873,23 +1882,75 @@ TEST(AddressSanitizer, DISABLED_DemoTooMuchMemoryTest) {
}
}
+// http://code.google.com/p/address-sanitizer/issues/detail?id=66
+TEST(AddressSanitizer, BufferOverflowAfterManyFrees) {
+ for (int i = 0; i < 1000000; i++) {
+ delete [] (Ident(new char [8644]));
+ }
+ char *x = new char[8192];
+ EXPECT_DEATH(x[Ident(8192)] = 0, "AddressSanitizer heap-buffer-overflow");
+ delete [] Ident(x);
+}
+
#ifdef __APPLE__
#include "asan_mac_test.h"
-// TODO(glider): figure out whether we still need these tests. Is it correct
-// to intercept CFAllocator?
-TEST(AddressSanitizerMac, DISABLED_CFAllocatorDefaultDoubleFree) {
+TEST(AddressSanitizerMac, CFAllocatorDefaultDoubleFree) {
EXPECT_DEATH(
- CFAllocatorDefaultDoubleFree(),
+ CFAllocatorDefaultDoubleFree(NULL),
"attempting double-free");
}
+void CFAllocator_DoubleFreeOnPthread() {
+ pthread_t child;
+ pthread_create(&child, NULL, CFAllocatorDefaultDoubleFree, NULL);
+ pthread_join(child, NULL); // Shouldn't be reached.
+}
+
+TEST(AddressSanitizerMac, CFAllocatorDefaultDoubleFree_ChildPhread) {
+ EXPECT_DEATH(CFAllocator_DoubleFreeOnPthread(), "attempting double-free");
+}
+
+namespace {
+
+void *GLOB;
+
+void *CFAllocatorAllocateToGlob(void *unused) {
+ GLOB = CFAllocatorAllocate(NULL, 100, /*hint*/0);
+ return NULL;
+}
+
+void *CFAllocatorDeallocateFromGlob(void *unused) {
+ char *p = (char*)GLOB;
+ p[100] = 'A'; // ASan should report an error here.
+ CFAllocatorDeallocate(NULL, GLOB);
+ return NULL;
+}
+
+void CFAllocator_PassMemoryToAnotherThread() {
+ pthread_t th1, th2;
+ pthread_create(&th1, NULL, CFAllocatorAllocateToGlob, NULL);
+ pthread_join(th1, NULL);
+ pthread_create(&th2, NULL, CFAllocatorDeallocateFromGlob, NULL);
+ pthread_join(th2, NULL);
+}
+
+TEST(AddressSanitizerMac, CFAllocator_PassMemoryToAnotherThread) {
+ EXPECT_DEATH(CFAllocator_PassMemoryToAnotherThread(),
+ "heap-buffer-overflow");
+}
+
+} // namespace
+
+// TODO(glider): figure out whether we still need these tests. Is it correct
+// to intercept the non-default CFAllocators?
TEST(AddressSanitizerMac, DISABLED_CFAllocatorSystemDefaultDoubleFree) {
EXPECT_DEATH(
CFAllocatorSystemDefaultDoubleFree(),
"attempting double-free");
}
-TEST(AddressSanitizerMac, DISABLED_CFAllocatorMallocDoubleFree) {
+// We're intercepting malloc, so kCFAllocatorMalloc is routed to ASan.
+TEST(AddressSanitizerMac, CFAllocatorMallocDoubleFree) {
EXPECT_DEATH(CFAllocatorMallocDoubleFree(), "attempting double-free");
}
@@ -2012,8 +2073,37 @@ TEST(AddressSanitizerMac, DISABLED_TSDWorkqueueTest) {
pthread_join(th, NULL);
pthread_key_delete(test_key);
}
+
+// Test that CFStringCreateCopy does not copy constant strings.
+TEST(AddressSanitizerMac, CFStringCreateCopy) {
+ CFStringRef str = CFSTR("Hello world!\n");
+ CFStringRef str2 = CFStringCreateCopy(0, str);
+ EXPECT_EQ(str, str2);
+}
+
+TEST(AddressSanitizerMac, NSObjectOOB) {
+ // Make sure that our allocators are used for NSObjects.
+ EXPECT_DEATH(TestOOBNSObjects(), "heap-buffer-overflow");
+}
+
+// Make sure that correct pointer is passed to free() when deallocating a
+// NSURL object.
+// See http://code.google.com/p/address-sanitizer/issues/detail?id=70.
+TEST(AddressSanitizerMac, NSURLDeallocation) {
+ TestNSURLDeallocation();
+}
#endif // __APPLE__
+// Test that instrumentation of stack allocations takes into account
+// AllocSize of a type, and not its StoreSize (16 vs 10 bytes for long double).
+// See http://llvm.org/bugs/show_bug.cgi?id=12047 for more details.
+TEST(AddressSanitizer, LongDoubleNegativeTest) {
+ long double a, b;
+ static long double c;
+ memcpy(Ident(&a), Ident(&b), sizeof(long double));
+ memcpy(Ident(&c), Ident(&b), sizeof(long double));
+};
+
int main(int argc, char **argv) {
progname = argv[0];
testing::GTEST_FLAG(death_test_style) = "threadsafe";
diff --git a/lib/asan/tests/asan_test_config.h b/lib/asan/tests/asan_test_config.h
index de4ae95..6cf0e69 100644
--- a/lib/asan/tests/asan_test_config.h
+++ b/lib/asan/tests/asan_test_config.h
@@ -39,6 +39,10 @@ using std::map;
# error "please define ASAN_NEEDS_SEGV"
#endif
+#ifndef ASAN_LOW_MEMORY
+#define ASAN_LOW_MEMORY 0
+#endif
+
#define ASAN_PCRE_DOTALL ""
#endif // ASAN_TEST_CONFIG_H
diff --git a/lib/asan/tests/asan_test_utils.h b/lib/asan/tests/asan_test_utils.h
index a480981..fb509cc 100644
--- a/lib/asan/tests/asan_test_utils.h
+++ b/lib/asan/tests/asan_test_utils.h
@@ -14,13 +14,39 @@
#ifndef ASAN_TEST_UTILS_H
#define ASAN_TEST_UTILS_H
+#if defined(_WIN32)
+typedef unsigned __int8 uint8_t;
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+typedef __int8 int8_t;
+typedef __int16 int16_t;
+typedef __int32 int32_t;
+typedef __int64 int64_t;
+# define NOINLINE __declspec(noinline)
+#else // defined(_WIN32)
+# define NOINLINE __attribute__((noinline))
+#endif // defined(_WIN32)
+
+#if !defined(__has_feature)
+#define __has_feature(x) 0
+#endif
+
+#ifndef __WORDSIZE
+#if __LP64__ || defined(_WIN64)
+#define __WORDSIZE 64
+#else
+#define __WORDSIZE 32
+#endif
+#endif
+
// Make the compiler think that something is going on there.
extern "C" void break_optimization(void *);
// This function returns its parameter but in such a way that compiler
// can not prove it.
template<class T>
-__attribute__((noinline))
+NOINLINE
static T Ident(T t) {
T ret = t;
break_optimization(&ret);
diff --git a/lib/asan/tests/dlclose-test.tmpl b/lib/asan/tests/dlclose-test.tmpl
deleted file mode 100644
index 7ef22e9..0000000
--- a/lib/asan/tests/dlclose-test.tmpl
+++ /dev/null
@@ -1 +0,0 @@
-PASS
diff --git a/lib/asan/tests/global-overflow.tmpl b/lib/asan/tests/global-overflow.tmpl
deleted file mode 100644
index c5d5442..0000000
--- a/lib/asan/tests/global-overflow.tmpl
+++ /dev/null
@@ -1,3 +0,0 @@
-READ of size 1 at 0x.* thread T0
- #0 0x.* in main .*global-overflow.cc:9
-0x.* is located 0 bytes to the right of global variable .*YYY.* of size 10
diff --git a/lib/asan/tests/heap-overflow.cc b/lib/asan/tests/heap-overflow.cc
deleted file mode 100644
index 475d163..0000000
--- a/lib/asan/tests/heap-overflow.cc
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <stdlib.h>
-#include <string.h>
-int main(int argc, char **argv) {
- char *x = (char*)malloc(10 * sizeof(char));
- memset(x, 0, 10);
- int res = x[argc * 10]; // BOOOM
- free(x);
- return res;
-}
diff --git a/lib/asan/tests/heap-overflow.tmpl b/lib/asan/tests/heap-overflow.tmpl
deleted file mode 100644
index e2ab65f..0000000
--- a/lib/asan/tests/heap-overflow.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-READ of size 1 at 0x.* thread T0
- #0 0x.* in main .*heap-overflow.cc:6
-0x.* is located 0 bytes to the right of 10-byte region
-allocated by thread T0 here:
- #0 0x.* in malloc
- #1 0x.* in main .*heap-overflow.cc:[45]
diff --git a/lib/asan/tests/heap-overflow.tmpl.Darwin b/lib/asan/tests/heap-overflow.tmpl.Darwin
deleted file mode 100644
index e4611d0..0000000
--- a/lib/asan/tests/heap-overflow.tmpl.Darwin
+++ /dev/null
@@ -1,8 +0,0 @@
-READ of size 1 at 0x.* thread T0
- #0 0x.* in main .*heap-overflow.cc:6
-0x.* is located 0 bytes to the right of 10-byte region
-allocated by thread T0 here:
- #0 0x.* in .*mz_malloc.* _asan_rtl_
- #1 0x.* in malloc_zone_malloc.*
- #2 0x.* in malloc.*
- #3 0x.* in main heap-overflow.cc:4
diff --git a/lib/asan/tests/large_func_test.cc b/lib/asan/tests/large_func_test.cc
deleted file mode 100644
index 70bc36f..0000000
--- a/lib/asan/tests/large_func_test.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-#include <stdlib.h>
-__attribute__((noinline))
-static void LargeFunction(int *x, int zero) {
- x[0]++;
- x[1]++;
- x[2]++;
- x[3]++;
- x[4]++;
- x[5]++;
- x[6]++;
- x[7]++;
- x[8]++;
- x[9]++;
-
- x[zero + 111]++; // we should report this exact line
-
- x[10]++;
- x[11]++;
- x[12]++;
- x[13]++;
- x[14]++;
- x[15]++;
- x[16]++;
- x[17]++;
- x[18]++;
- x[19]++;
-}
-
-int main(int argc, char **argv) {
- int *x = new int[100];
- LargeFunction(x, argc - 1);
- delete x;
-}
diff --git a/lib/asan/tests/large_func_test.tmpl b/lib/asan/tests/large_func_test.tmpl
deleted file mode 100644
index 45a13d0..0000000
--- a/lib/asan/tests/large_func_test.tmpl
+++ /dev/null
@@ -1,8 +0,0 @@
-.*ERROR: AddressSanitizer heap-buffer-overflow on address 0x.* at pc 0x.* bp 0x.* sp 0x.*
-READ of size 4 at 0x.* thread T0
- #0 0x.* in LargeFunction .*large_func_test.cc:15
- #1 0x.* in main .*large_func_test.cc:3[012]
-0x.* is located 44 bytes to the right of 400-byte region
-allocated by thread T0 here:
- #0 0x.* in operator new.*
- #1 0x.* in main .*large_func_test.cc:30
diff --git a/lib/asan/tests/match_output.py b/lib/asan/tests/match_output.py
deleted file mode 100755
index 31095f3..0000000
--- a/lib/asan/tests/match_output.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/python
-
-import re
-import sys
-
-def matchFile(f, f_re):
- for line_re in f_re:
- line_re = line_re.rstrip()
- if not line_re:
- continue
- if line_re[0] == '#':
- continue
- match = False
- for line in f:
- line = line.rstrip()
- # print line
- if re.search(line_re, line):
- match = True
- #print 'match: %s =~ %s' % (line, line_re)
- break
- if not match:
- print 'no match for: %s' % (line_re)
- return False
- return True
-
-if len(sys.argv) != 2:
- print >>sys.stderr, 'Usage: %s <template file>'
- sys.exit(1)
-
-f = sys.stdin
-f_re = open(sys.argv[1])
-
-if not matchFile(f, f_re):
- print >>sys.stderr, 'File does not match the template'
- sys.exit(1)
diff --git a/lib/asan/tests/null_deref.cc b/lib/asan/tests/null_deref.cc
deleted file mode 100644
index f7ba4dd..0000000
--- a/lib/asan/tests/null_deref.cc
+++ /dev/null
@@ -1,7 +0,0 @@
-__attribute__((noinline))
-static void NullDeref(int *ptr) {
- ptr[10]++;
-}
-int main() {
- NullDeref((int*)0);
-}
diff --git a/lib/asan/tests/null_deref.tmpl b/lib/asan/tests/null_deref.tmpl
deleted file mode 100644
index d27cccc..0000000
--- a/lib/asan/tests/null_deref.tmpl
+++ /dev/null
@@ -1,4 +0,0 @@
-.*ERROR: AddressSanitizer crashed on unknown address 0x0*00028 .*pc 0x.*
-AddressSanitizer can not provide additional info. ABORTING
- #0 0x.* in NullDeref.*null_deref.cc:3
- #1 0x.* in main.*null_deref.cc:[67]
diff --git a/lib/asan/tests/shared-lib-test.tmpl b/lib/asan/tests/shared-lib-test.tmpl
deleted file mode 100644
index 564e3eb..0000000
--- a/lib/asan/tests/shared-lib-test.tmpl
+++ /dev/null
@@ -1,7 +0,0 @@
-#.*ERROR: AddressSanitizer global-buffer-overflow on address 0x.* at pc 0x.* bp 0x.* sp 0x.*
-#READ of size 4 at 0x.* thread T0
-# #0 0x.* in inc .*shared-lib-test-so.cc:11
-# #1 0x.* in main .*shared-lib-test.cc:33
-# #2 0x.* in __libc_start_main.*
-#0x.* is located 4 bytes to the left of global variable 'GLOB' (.*) of size 40
-#0x.* is located 52 bytes to the right of global variable 'pad' (.*) of size 40
diff --git a/lib/asan/tests/stack-overflow.cc b/lib/asan/tests/stack-overflow.cc
deleted file mode 100644
index dd86aa3..0000000
--- a/lib/asan/tests/stack-overflow.cc
+++ /dev/null
@@ -1,7 +0,0 @@
-#include <string.h>
-int main(int argc, char **argv) {
- char x[10];
- memset(x, 0, 10);
- int res = x[argc * 10]; // BOOOM
- return res;
-}
diff --git a/lib/asan/tests/stack-overflow.tmpl b/lib/asan/tests/stack-overflow.tmpl
deleted file mode 100644
index 6aa717a..0000000
--- a/lib/asan/tests/stack-overflow.tmpl
+++ /dev/null
@@ -1,3 +0,0 @@
-READ of size 1 at 0x.* thread T0
- #0 0x.* in main .*stack-overflow.cc:5
-Address 0x.* is .* frame <main>
diff --git a/lib/asan/tests/stack-use-after-return.disabled b/lib/asan/tests/stack-use-after-return.disabled
deleted file mode 100644
index 02729bc..0000000
--- a/lib/asan/tests/stack-use-after-return.disabled
+++ /dev/null
@@ -1,3 +0,0 @@
-WRITE of size 1 .* thread T0
-#0.*Func2.*stack-use-after-return.cc:18
-is located in frame <.*Func1.*> of T0's stack
diff --git a/lib/asan/tests/strncpy-overflow.cc b/lib/asan/tests/strncpy-overflow.cc
deleted file mode 100644
index 044f649..0000000
--- a/lib/asan/tests/strncpy-overflow.cc
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <string.h>
-#include <stdlib.h>
-int main(int argc, char **argv) {
- char *hello = (char*)malloc(6);
- strcpy(hello, "hello");
- char *short_buffer = (char*)malloc(9);
- strncpy(short_buffer, hello, 10); // BOOM
- return short_buffer[8];
-}
diff --git a/lib/asan/tests/strncpy-overflow.tmpl b/lib/asan/tests/strncpy-overflow.tmpl
deleted file mode 100644
index 3780aa8..0000000
--- a/lib/asan/tests/strncpy-overflow.tmpl
+++ /dev/null
@@ -1,7 +0,0 @@
-WRITE of size 1 at 0x.* thread T0
- #0 0x.* in strncpy
- #1 0x.* in main .*strncpy-overflow.cc:[78]
-0x.* is located 0 bytes to the right of 9-byte region
-allocated by thread T0 here:
- #0 0x.* in malloc
- #1 0x.* in main .*strncpy-overflow.cc:6
diff --git a/lib/asan/tests/test_output.sh b/lib/asan/tests/test_output.sh
deleted file mode 100755
index c54b2364..0000000
--- a/lib/asan/tests/test_output.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-set -e # fail on any error
-
-OS=`uname`
-CXX=$1
-CC=$2
-CXXFLAGS="-mno-omit-leaf-frame-pointer -fno-omit-frame-pointer"
-SYMBOLIZER=../scripts/asan_symbolize.py
-
-C_TEST=use-after-free
-echo "Sanity checking a test in pure C"
-$CC -g -faddress-sanitizer -O2 $C_TEST.c
-./a.out 2>&1 | grep "heap-use-after-free" > /dev/null
-rm ./a.out
-
-echo "Sanity checking a test in pure C with -pie"
-$CC -g -faddress-sanitizer -O2 $C_TEST.c -pie
-./a.out 2>&1 | grep "heap-use-after-free" > /dev/null
-rm ./a.out
-
-for t in *.tmpl; do
- for b in 32 64; do
- for O in 0 1 2 3; do
- c=`basename $t .tmpl`
- c_so=$c-so
- exe=$c.$b.O$O
- so=$c.$b.O$O-so.so
- echo testing $exe
- $CXX $CXXFLAGS -g -m$b -faddress-sanitizer -O$O $c.cc -o $exe
- [ -e "$c_so.cc" ] && $CXX $CXXFLAGS -g -m$b -faddress-sanitizer -O$O $c_so.cc -fPIC -shared -o $so
- # If there's an OS-specific template, use it.
- # Please minimize the use of OS-specific templates.
- if [ -e "$t.$OS" ]
- then
- actual_t="$t.$OS"
- else
- actual_t="$t"
- fi
- ./$exe 2>&1 | $SYMBOLIZER 2> /dev/null | c++filt | ./match_output.py $actual_t
- rm ./$exe
- [ -e "$so" ] && rm ./$so
- done
- done
-done
-
-exit 0
diff --git a/lib/asan/tests/use-after-free.cc b/lib/asan/tests/use-after-free.cc
deleted file mode 100644
index 60626bf..0000000
--- a/lib/asan/tests/use-after-free.cc
+++ /dev/null
@@ -1,6 +0,0 @@
-#include <stdlib.h>
-int main() {
- char *x = (char*)malloc(10 * sizeof(char));
- free(x);
- return x[5];
-}
diff --git a/lib/asan/tests/use-after-free.tmpl b/lib/asan/tests/use-after-free.tmpl
deleted file mode 100644
index c4b5c74..0000000
--- a/lib/asan/tests/use-after-free.tmpl
+++ /dev/null
@@ -1,10 +0,0 @@
-.*ERROR: AddressSanitizer heap-use-after-free on address 0x.* at pc 0x.* bp 0x.* sp 0x.*
-READ of size 1 at 0x.* thread T0
- #0 0x.* in main .*use-after-free.cc:5
-0x.* is located 5 bytes inside of 10-byte region .0x.*,0x.*
-freed by thread T0 here:
- #0 0x.* in free
- #1 0x.* in main .*use-after-free.cc:[45]
-previously allocated by thread T0 here:
- #0 0x.* in malloc
- #1 0x.* in main .*use-after-free.cc:3
diff --git a/lib/ashldi3.c b/lib/ashldi3.c
index 6c558fe..eb4698a 100644
--- a/lib/ashldi3.c
+++ b/lib/ashldi3.c
@@ -18,7 +18,7 @@
/* Precondition: 0 <= b < bits_in_dword */
-ARM_EABI_FNALIAS(llsl, ashldi3);
+ARM_EABI_FNALIAS(llsl, ashldi3)
COMPILER_RT_ABI di_int
__ashldi3(di_int a, si_int b)
diff --git a/lib/ashlti3.c b/lib/ashlti3.c
index 7042b53..4bd8219 100644
--- a/lib/ashlti3.c
+++ b/lib/ashlti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: a << b */
/* Precondition: 0 <= b < bits_in_tword */
diff --git a/lib/ashrdi3.c b/lib/ashrdi3.c
index 38ab716..14c878b 100644
--- a/lib/ashrdi3.c
+++ b/lib/ashrdi3.c
@@ -18,7 +18,7 @@
/* Precondition: 0 <= b < bits_in_dword */
-ARM_EABI_FNALIAS(lasr, ashrdi3);
+ARM_EABI_FNALIAS(lasr, ashrdi3)
COMPILER_RT_ABI di_int
__ashrdi3(di_int a, si_int b)
diff --git a/lib/ashrti3.c b/lib/ashrti3.c
index 4d16230..ed43641 100644
--- a/lib/ashrti3.c
+++ b/lib/ashrti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: arithmetic a >> b */
/* Precondition: 0 <= b < bits_in_tword */
diff --git a/lib/assembly.h b/lib/assembly.h
index 83bed12..3d8e50d 100644
--- a/lib/assembly.h
+++ b/lib/assembly.h
@@ -25,9 +25,11 @@
#if defined(__APPLE__)
#define HIDDEN_DIRECTIVE .private_extern
#define LOCAL_LABEL(name) L_##name
+#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
#else
#define HIDDEN_DIRECTIVE .hidden
#define LOCAL_LABEL(name) .L_##name
+#define FILE_LEVEL_DIRECTIVE
#endif
#define GLUE2(a, b) a ## b
@@ -42,6 +44,7 @@
#endif
#define DEFINE_COMPILERRT_FUNCTION(name) \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) \
SYMBOL_NAME(name):
diff --git a/lib/atomic.c b/lib/atomic.c
new file mode 100644
index 0000000..a291f0d
--- /dev/null
+++ b/lib/atomic.c
@@ -0,0 +1,315 @@
+/*===-- atomic.c - Implement support functions for atomic operations.------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ *===----------------------------------------------------------------------===
+ *
+ * atomic.c defines a set of functions for performing atomic accesses on
+ * arbitrary-sized memory locations. This design uses locks that should
+ * be fast in the uncontended case, for two reasons:
+ *
+ * 1) This code must work with C programs that do not link to anything
+ * (including pthreads) and so it should not depend on any pthread
+ * functions.
+ * 2) Atomic operations, rather than explicit mutexes, are most commonly used
+ * on code where contended operations are rate.
+ *
+ * To avoid needing a per-object lock, this code allocates an array of
+ * locks and hashes the object pointers to find the one that it should use.
+ * For operations that must be atomic on two locations, the lower lock is
+ * always acquired first, to avoid deadlock.
+ *
+ *===----------------------------------------------------------------------===
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+// Clang objects if you redefine a builtin. This little hack allows us to
+// define a function with the same name as an intrinsic.
+#pragma redefine_extname __atomic_load_c __atomic_load
+#pragma redefine_extname __atomic_store_c __atomic_store
+#pragma redefine_extname __atomic_exchange_c __atomic_exchange
+#pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
+
+/// Number of locks. This allocates one page on 32-bit platforms, two on
+/// 64-bit. This can be specified externally if a different trade between
+/// memory usage and contention probability is required for a given platform.
+#ifndef SPINLOCK_COUNT
+#define SPINLOCK_COUNT (1<<10)
+#endif
+static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
+
+////////////////////////////////////////////////////////////////////////////////
+// Platform-specific lock implementation. Falls back to spinlocks if none is
+// defined. Each platform should define the Lock type, and corresponding
+// lock() and unlock() functions.
+////////////////////////////////////////////////////////////////////////////////
+#ifdef __FreeBSD__
+#include <errno.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <sys/umtx.h>
+typedef struct _usem Lock;
+inline static void unlock(Lock *l) {
+ __c11_atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
+ __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ if (l->_has_waiters)
+ _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
+}
+inline static void lock(Lock *l) {
+ uint32_t old = 1;
+ while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, &old,
+ 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
+ old = 1;
+ }
+}
+/// locks for atomic operations
+static Lock locks[SPINLOCK_COUNT] = { [0 ... SPINLOCK_COUNT-1] = {0,1,0} };
+#else
+typedef _Atomic(uintptr_t) Lock;
+/// Unlock a lock. This is a release operation.
+inline static void unlock(Lock *l) {
+ __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
+}
+/// Locks a lock. In the current implementation, this is potentially
+/// unbounded in the contended case.
+inline static void lock(Lock *l) {
+ uintptr_t old = 0;
+ while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED))
+ old = 0;
+}
+/// locks for atomic operations
+static Lock locks[SPINLOCK_COUNT];
+#endif
+
+
+/// Returns a lock to use for a given pointer.
+static inline Lock *lock_for_pointer(void *ptr) {
+ intptr_t hash = (intptr_t)ptr;
+ // Disregard the lowest 4 bits. We want all values that may be part of the
+ // same memory operation to hash to the same value and therefore use the same
+ // lock.
+ hash >>= 4;
+ // Use the next bits as the basis for the hash
+ intptr_t low = hash & SPINLOCK_MASK;
+ // Now use the high(er) set of bits to perturb the hash, so that we don't
+ // get collisions from atomic fields in a single object
+ hash >>= 16;
+ hash ^= low;
+ // Return a pointer to the word to use
+ return locks + (hash & SPINLOCK_MASK);
+}
+
+/// Macros for determining whether a size is lock free. Clang can not yet
+/// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are
+/// not lock free.
+#define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
+#define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
+#define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
+#define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
+#define IS_LOCK_FREE_16 0
+
+/// Macro that calls the compiler-generated lock-free versions of functions
+/// when they exist.
+#define LOCK_FREE_CASES() \
+ do {\
+ switch (size) {\
+ case 2:\
+ if (IS_LOCK_FREE_2) {\
+ LOCK_FREE_ACTION(uint16_t);\
+ }\
+ case 4:\
+ if (IS_LOCK_FREE_4) {\
+ LOCK_FREE_ACTION(uint32_t);\
+ }\
+ case 8:\
+ if (IS_LOCK_FREE_8) {\
+ LOCK_FREE_ACTION(uint64_t);\
+ }\
+ case 16:\
+ if (IS_LOCK_FREE_16) {\
+ /* FIXME: __uint128_t isn't available on 32 bit platforms.
+ LOCK_FREE_ACTION(__uint128_t);*/\
+ }\
+ }\
+ } while (0)
+
+
+/// An atomic load operation. This is atomic with respect to the source
+/// pointer only.
+void __atomic_load_c(int size, void *src, void *dest, int model) {
+#define LOCK_FREE_ACTION(type) \
+ *((type*)dest) = __c11_atomic_load((_Atomic(type)*)src, model);\
+ return;
+ LOCK_FREE_CASES();
+#undef LOCK_FREE_ACTION
+ Lock *l = lock_for_pointer(src);
+ lock(l);
+ memcpy(dest, src, size);
+ unlock(l);
+}
+
+/// An atomic store operation. This is atomic with respect to the destination
+/// pointer only.
+void __atomic_store_c(int size, void *dest, void *src, int model) {
+#define LOCK_FREE_ACTION(type) \
+ __c11_atomic_store((_Atomic(type)*)dest, *(type*)dest, model);\
+ return;
+ LOCK_FREE_CASES();
+#undef LOCK_FREE_ACTION
+ Lock *l = lock_for_pointer(dest);
+ lock(l);
+ memcpy(dest, src, size);
+ unlock(l);
+}
+
+/// Atomic compare and exchange operation. If the value at *ptr is identical
+/// to the value at *expected, then this copies value at *desired to *ptr. If
+/// they are not, then this stores the current value from *ptr in *expected.
+///
+/// This function returns 1 if the exchange takes place or 0 if it fails.
+int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
+ void *desired, int success, int failure) {
+#define LOCK_FREE_ACTION(type) \
+ return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, (type*)expected,\
+ *(type*)desired, success, failure)
+ LOCK_FREE_CASES();
+#undef LOCK_FREE_ACTION
+ Lock *l = lock_for_pointer(ptr);
+ lock(l);
+ if (memcmp(ptr, expected, size) == 0) {
+ memcpy(ptr, desired, size);
+ unlock(l);
+ return 1;
+ }
+ memcpy(expected, ptr, size);
+ unlock(l);
+ return 0;
+}
+
+/// Performs an atomic exchange operation between two pointers. This is atomic
+/// with respect to the target address.
+void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
+#define LOCK_FREE_ACTION(type) \
+ *(type*)old = __c11_atomic_exchange((_Atomic(type)*)ptr, *(type*)val,\
+ model);\
+ return;
+ LOCK_FREE_CASES();
+#undef LOCK_FREE_ACTION
+ Lock *l = lock_for_pointer(ptr);
+ lock(l);
+ memcpy(old, ptr, size);
+ memcpy(ptr, val, size);
+ unlock(l);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Where the size is known at compile time, the compiler may emit calls to
+// specialised versions of the above functions.
+////////////////////////////////////////////////////////////////////////////////
+#define OPTIMISED_CASES\
+ OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
+ OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
+ OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
+ OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
+ /* FIXME: __uint128_t isn't available on 32 bit platforms.
+ OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)*/\
+
+#define OPTIMISED_CASE(n, lockfree, type)\
+type __atomic_load_##n(type *src, int model) {\
+ if (lockfree)\
+ return __c11_atomic_load((_Atomic(type)*)src, model);\
+ Lock *l = lock_for_pointer(src);\
+ lock(l);\
+ type val = *src;\
+ unlock(l);\
+ return val;\
+}
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+
+#define OPTIMISED_CASE(n, lockfree, type)\
+void __atomic_store_##n(type *dest, type val, int model) {\
+ if (lockfree) {\
+ __c11_atomic_store((_Atomic(type)*)dest, val, model);\
+ return;\
+ }\
+ Lock *l = lock_for_pointer(dest);\
+ lock(l);\
+ *dest = val;\
+ unlock(l);\
+ return;\
+}
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+
+#define OPTIMISED_CASE(n, lockfree, type)\
+type __atomic_exchange_##n(type *dest, type val, int model) {\
+ if (lockfree)\
+ return __c11_atomic_exchange((_Atomic(type)*)dest, val, model);\
+ Lock *l = lock_for_pointer(dest);\
+ lock(l);\
+ type tmp = *dest;\
+ *dest = val;\
+ unlock(l);\
+ return tmp;\
+}
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+
+#define OPTIMISED_CASE(n, lockfree, type)\
+int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,\
+ int success, int failure) {\
+ if (lockfree)\
+ return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, expected, desired,\
+ success, failure);\
+ Lock *l = lock_for_pointer(ptr);\
+ lock(l);\
+ if (*ptr == *expected) {\
+ *ptr = desired;\
+ unlock(l);\
+ return 1;\
+ }\
+ *expected = *ptr;\
+ unlock(l);\
+ return 0;\
+}
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+
+////////////////////////////////////////////////////////////////////////////////
+// Atomic read-modify-write operations for integers of various sizes.
+////////////////////////////////////////////////////////////////////////////////
+#define ATOMIC_RMW(n, lockfree, type, opname, op) \
+type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {\
+ if (lockfree) \
+ return __c11_atomic_fetch_##opname((_Atomic(type)*)ptr, val, model);\
+ Lock *l = lock_for_pointer(ptr);\
+ lock(l);\
+ type tmp = *ptr;\
+ *ptr = tmp op val;\
+ unlock(l);\
+ return tmp;\
+}
+
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
diff --git a/lib/clzti2.c b/lib/clzti2.c
index 7a650eb..355c20e 100644
--- a/lib/clzti2.c
+++ b/lib/clzti2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: the number of leading 0-bits */
/* Precondition: a != 0 */
diff --git a/lib/cmpti2.c b/lib/cmpti2.c
index b156fce..d0aec45 100644
--- a/lib/cmpti2.c
+++ b/lib/cmpti2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: if (a < b) returns 0
* if (a == b) returns 1
* if (a > b) returns 2
diff --git a/lib/ctzti2.c b/lib/ctzti2.c
index 1c9508f..66dc01b 100644
--- a/lib/ctzti2.c
+++ b/lib/ctzti2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: the number of trailing 0-bits */
/* Precondition: a != 0 */
diff --git a/lib/divdf3.c b/lib/divdf3.c
index cc034dd..efce6bb 100644
--- a/lib/divdf3.c
+++ b/lib/divdf3.c
@@ -19,7 +19,7 @@
#define DOUBLE_PRECISION
#include "fp_lib.h"
-ARM_EABI_FNALIAS(ddiv, divdf3);
+ARM_EABI_FNALIAS(ddiv, divdf3)
fp_t __divdf3(fp_t a, fp_t b) {
diff --git a/lib/divmoddi4.c b/lib/divmoddi4.c
index a2b8714..2fe2b48 100644
--- a/lib/divmoddi4.c
+++ b/lib/divmoddi4.c
@@ -16,8 +16,6 @@
extern COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b);
-ARM_EABI_FNALIAS(ldivmod, divmoddi4);
-
/* Returns: a / b, *rem = a % b */
COMPILER_RT_ABI di_int
diff --git a/lib/divsf3.c b/lib/divsf3.c
index a8230e4..c91c648 100644
--- a/lib/divsf3.c
+++ b/lib/divsf3.c
@@ -19,7 +19,7 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
-ARM_EABI_FNALIAS(fdiv, divsf3);
+ARM_EABI_FNALIAS(fdiv, divsf3)
fp_t __divsf3(fp_t a, fp_t b) {
diff --git a/lib/divsi3.c b/lib/divsi3.c
index 0d81cb8..cd19de9 100644
--- a/lib/divsi3.c
+++ b/lib/divsi3.c
@@ -18,7 +18,7 @@ su_int COMPILER_RT_ABI __udivsi3(su_int n, su_int d);
/* Returns: a / b */
-ARM_EABI_FNALIAS(idiv, divsi3);
+ARM_EABI_FNALIAS(idiv, divsi3)
COMPILER_RT_ABI si_int
__divsi3(si_int a, si_int b)
@@ -29,5 +29,11 @@ __divsi3(si_int a, si_int b)
a = (a ^ s_a) - s_a; /* negate if s_a == -1 */
b = (b ^ s_b) - s_b; /* negate if s_b == -1 */
s_a ^= s_b; /* sign of quotient */
- return (__udivsi3(a, b) ^ s_a) - s_a; /* negate if s_a == -1 */
+ /*
+ * On CPUs without unsigned hardware division support,
+ * this calls __udivsi3 (notice the cast to su_int).
+ * On CPUs with unsigned hardware division support,
+ * this uses the unsigned division instruction.
+ */
+ return ((su_int)a/(su_int)b ^ s_a) - s_a; /* negate if s_a == -1 */
}
diff --git a/lib/divti3.c b/lib/divti3.c
index 4ec3fa3..0242c13 100644
--- a/lib/divti3.c
+++ b/lib/divti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
/* Returns: a / b */
diff --git a/lib/extendsfdf2.c b/lib/extendsfdf2.c
index 9466de7..91fd2b4 100644
--- a/lib/extendsfdf2.c
+++ b/lib/extendsfdf2.c
@@ -66,7 +66,7 @@ static inline dst_t dstFromRep(dst_rep_t x) {
// End helper routines. Conversion implementation follows.
-ARM_EABI_FNALIAS(f2d, extendsfdf2);
+ARM_EABI_FNALIAS(f2d, extendsfdf2)
dst_t __extendsfdf2(src_t a) {
diff --git a/lib/ffsti2.c b/lib/ffsti2.c
index 948c696..27e15d5 100644
--- a/lib/ffsti2.c
+++ b/lib/ffsti2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: the index of the least significant 1-bit in a, or
* the value zero if a is zero. The least significant bit is index one.
*/
diff --git a/lib/fixdfdi.c b/lib/fixdfdi.c
index c6732db..7665ea5 100644
--- a/lib/fixdfdi.c
+++ b/lib/fixdfdi.c
@@ -23,7 +23,7 @@
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
-ARM_EABI_FNALIAS(d2lz, fixdfdi);
+ARM_EABI_FNALIAS(d2lz, fixdfdi)
di_int
__fixdfdi(double a)
diff --git a/lib/fixdfsi.c b/lib/fixdfsi.c
index 3d4379e..614d032 100644
--- a/lib/fixdfsi.c
+++ b/lib/fixdfsi.c
@@ -18,7 +18,7 @@
#include "int_lib.h"
-ARM_EABI_FNALIAS(d2iz, fixdfsi);
+ARM_EABI_FNALIAS(d2iz, fixdfsi)
int __fixdfsi(fp_t a) {
diff --git a/lib/fixdfti.c b/lib/fixdfti.c
index 4140d14..b110a94 100644
--- a/lib/fixdfti.c
+++ b/lib/fixdfti.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a signed long long, rounding toward zero. */
/* Assumption: double is a IEEE 64 bit floating point type
diff --git a/lib/fixsfdi.c b/lib/fixsfdi.c
index 81ceab0..8a06690 100644
--- a/lib/fixsfdi.c
+++ b/lib/fixsfdi.c
@@ -23,7 +23,7 @@
/* seee eeee emmm mmmm mmmm mmmm mmmm mmmm */
-ARM_EABI_FNALIAS(d2lz, fixsfdi);
+ARM_EABI_FNALIAS(d2lz, fixsfdi)
COMPILER_RT_ABI di_int
__fixsfdi(float a)
diff --git a/lib/fixsfsi.c b/lib/fixsfsi.c
index f6de609..e3cc42d 100644
--- a/lib/fixsfsi.c
+++ b/lib/fixsfsi.c
@@ -16,7 +16,7 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
-ARM_EABI_FNALIAS(f2iz, fixsfsi);
+ARM_EABI_FNALIAS(f2iz, fixsfsi)
COMPILER_RT_ABI int
__fixsfsi(fp_t a) {
diff --git a/lib/fixsfti.c b/lib/fixsfti.c
index c64e5ae..c730ae0 100644
--- a/lib/fixsfti.c
+++ b/lib/fixsfti.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a signed long long, rounding toward zero. */
/* Assumption: float is a IEEE 32 bit floating point type
diff --git a/lib/fixunsdfdi.c b/lib/fixunsdfdi.c
index c0ff160..9e63713 100644
--- a/lib/fixunsdfdi.c
+++ b/lib/fixunsdfdi.c
@@ -26,7 +26,7 @@
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
-ARM_EABI_FNALIAS(d2ulz, fixunsdfdi);
+ARM_EABI_FNALIAS(d2ulz, fixunsdfdi)
COMPILER_RT_ABI du_int
__fixunsdfdi(double a)
diff --git a/lib/fixunsdfsi.c b/lib/fixunsdfsi.c
index 2ce4999..c6a3c75 100644
--- a/lib/fixunsdfsi.c
+++ b/lib/fixunsdfsi.c
@@ -26,7 +26,7 @@
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
-ARM_EABI_FNALIAS(d2uiz, fixunsdfsi);
+ARM_EABI_FNALIAS(d2uiz, fixunsdfsi)
COMPILER_RT_ABI su_int
__fixunsdfsi(double a)
diff --git a/lib/fixunsdfti.c b/lib/fixunsdfti.c
index 524a207..fb0336f 100644
--- a/lib/fixunsdfti.c
+++ b/lib/fixunsdfti.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a unsigned long long, rounding toward zero.
* Negative values all become zero.
*/
diff --git a/lib/fixunssfdi.c b/lib/fixunssfdi.c
index 09078db..69d5952 100644
--- a/lib/fixunssfdi.c
+++ b/lib/fixunssfdi.c
@@ -25,7 +25,7 @@
/* seee eeee emmm mmmm mmmm mmmm mmmm mmmm */
-ARM_EABI_FNALIAS(f2ulz, fixunssfdi);
+ARM_EABI_FNALIAS(f2ulz, fixunssfdi)
COMPILER_RT_ABI du_int
__fixunssfdi(float a)
diff --git a/lib/fixunssfsi.c b/lib/fixunssfsi.c
index d80ed18..e034139 100644
--- a/lib/fixunssfsi.c
+++ b/lib/fixunssfsi.c
@@ -26,7 +26,7 @@
/* seee eeee emmm mmmm mmmm mmmm mmmm mmmm */
-ARM_EABI_FNALIAS(f2uiz, fixunssfsi);
+ARM_EABI_FNALIAS(f2uiz, fixunssfsi)
COMPILER_RT_ABI su_int
__fixunssfsi(float a)
diff --git a/lib/fixunssfti.c b/lib/fixunssfti.c
index b807910..8f4c626 100644
--- a/lib/fixunssfti.c
+++ b/lib/fixunssfti.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a unsigned long long, rounding toward zero.
* Negative values all become zero.
*/
diff --git a/lib/fixunsxfti.c b/lib/fixunsxfti.c
index f0e16db..260bfc0 100644
--- a/lib/fixunsxfti.c
+++ b/lib/fixunsxfti.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a unsigned long long, rounding toward zero.
* Negative values all become zero.
*/
diff --git a/lib/fixxfti.c b/lib/fixxfti.c
index 1022770..973dc31 100644
--- a/lib/fixxfti.c
+++ b/lib/fixxfti.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a signed long long, rounding toward zero. */
/* Assumption: long double is an intel 80 bit floating point type padded with 6 bytes
diff --git a/lib/floatdidf.c b/lib/floatdidf.c
index 2af9e10..e53fa25 100644
--- a/lib/floatdidf.c
+++ b/lib/floatdidf.c
@@ -22,7 +22,7 @@
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
-ARM_EABI_FNALIAS(l2d, floatdidf);
+ARM_EABI_FNALIAS(l2d, floatdidf)
#ifndef __SOFT_FP__
/* Support for systems that have hardware floating-point; we'll set the inexact flag
diff --git a/lib/floatdisf.c b/lib/floatdisf.c
index 6607307..3e47580 100644
--- a/lib/floatdisf.c
+++ b/lib/floatdisf.c
@@ -22,7 +22,7 @@
#include "int_lib.h"
-ARM_EABI_FNALIAS(l2f, floatdisf);
+ARM_EABI_FNALIAS(l2f, floatdisf)
COMPILER_RT_ABI float
__floatdisf(di_int a)
diff --git a/lib/floatsidf.c b/lib/floatsidf.c
index 74cb66b..18f378f 100644
--- a/lib/floatsidf.c
+++ b/lib/floatsidf.c
@@ -18,7 +18,7 @@
#include "int_lib.h"
-ARM_EABI_FNALIAS(i2d, floatsidf);
+ARM_EABI_FNALIAS(i2d, floatsidf)
fp_t __floatsidf(int a) {
diff --git a/lib/floatsisf.c b/lib/floatsisf.c
index a981391..8398393 100644
--- a/lib/floatsisf.c
+++ b/lib/floatsisf.c
@@ -18,7 +18,7 @@
#include "int_lib.h"
-ARM_EABI_FNALIAS(i2f, floatsisf);
+ARM_EABI_FNALIAS(i2f, floatsisf)
fp_t __floatsisf(int a) {
diff --git a/lib/floattidf.c b/lib/floattidf.c
index 3cafea8..77749f8 100644
--- a/lib/floattidf.c
+++ b/lib/floattidf.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a double, rounding toward even.*/
/* Assumption: double is a IEEE 64 bit floating point type
diff --git a/lib/floattisf.c b/lib/floattisf.c
index ab33e4a..4776125 100644
--- a/lib/floattisf.c
+++ b/lib/floattisf.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a float, rounding toward even. */
/* Assumption: float is a IEEE 32 bit floating point type
diff --git a/lib/floattixf.c b/lib/floattixf.c
index 852acc7..3813dc6 100644
--- a/lib/floattixf.c
+++ b/lib/floattixf.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a long double, rounding toward even. */
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128 bits
diff --git a/lib/floatundidf.c b/lib/floatundidf.c
index 6791701..e52fa0a 100644
--- a/lib/floatundidf.c
+++ b/lib/floatundidf.c
@@ -22,7 +22,7 @@
#include "int_lib.h"
-ARM_EABI_FNALIAS(ul2d, floatundidf);
+ARM_EABI_FNALIAS(ul2d, floatundidf)
#ifndef __SOFT_FP__
/* Support for systems that have hardware floating-point; we'll set the inexact flag
diff --git a/lib/floatundisf.c b/lib/floatundisf.c
index 1bf5fbb..713a44a 100644
--- a/lib/floatundisf.c
+++ b/lib/floatundisf.c
@@ -22,7 +22,7 @@
#include "int_lib.h"
-ARM_EABI_FNALIAS(ul2f, floatundisf);
+ARM_EABI_FNALIAS(ul2f, floatundisf)
COMPILER_RT_ABI float
__floatundisf(du_int a)
diff --git a/lib/floatunsidf.c b/lib/floatunsidf.c
index 0722248..ba6c2cf 100644
--- a/lib/floatunsidf.c
+++ b/lib/floatunsidf.c
@@ -18,7 +18,7 @@
#include "int_lib.h"
-ARM_EABI_FNALIAS(ui2d, floatunsidf);
+ARM_EABI_FNALIAS(ui2d, floatunsidf)
fp_t __floatunsidf(unsigned int a) {
diff --git a/lib/floatunsisf.c b/lib/floatunsisf.c
index 3dc1cd4..e392c0e 100644
--- a/lib/floatunsisf.c
+++ b/lib/floatunsisf.c
@@ -18,7 +18,7 @@
#include "int_lib.h"
-ARM_EABI_FNALIAS(ui2f, floatunsisf);
+ARM_EABI_FNALIAS(ui2f, floatunsisf)
fp_t __floatunsisf(unsigned int a) {
diff --git a/lib/floatuntidf.c b/lib/floatuntidf.c
index d0889a0..4c1d328 100644
--- a/lib/floatuntidf.c
+++ b/lib/floatuntidf.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a double, rounding toward even. */
/* Assumption: double is a IEEE 64 bit floating point type
diff --git a/lib/floatuntisf.c b/lib/floatuntisf.c
index f552758..c8da260 100644
--- a/lib/floatuntisf.c
+++ b/lib/floatuntisf.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a float, rounding toward even. */
/* Assumption: float is a IEEE 32 bit floating point type
diff --git a/lib/floatuntixf.c b/lib/floatuntixf.c
index 00c07d8..dbce80f 100644
--- a/lib/floatuntixf.c
+++ b/lib/floatuntixf.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: convert a to a long double, rounding toward even. */
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128 bits
diff --git a/lib/fp_lib.h b/lib/fp_lib.h
index de5f17f..661119a 100644
--- a/lib/fp_lib.h
+++ b/lib/fp_lib.h
@@ -124,7 +124,7 @@ static inline void wideLeftShift(rep_t *hi, rep_t *lo, int count) {
*lo = *lo << count;
}
-static inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo, int count) {
+static inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo, unsigned int count) {
if (count < typeWidth) {
const bool sticky = *lo << (typeWidth - count);
*lo = *hi << (typeWidth - count) | *lo >> count | sticky;
diff --git a/lib/i386/CMakeLists.txt b/lib/i386/CMakeLists.txt
deleted file mode 100644
index 1c2861a..0000000
--- a/lib/i386/CMakeLists.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-SET( SRCS
-
- )
diff --git a/lib/int_endianness.h b/lib/int_endianness.h
index 9466ed4..70bd1773 100644
--- a/lib/int_endianness.h
+++ b/lib/int_endianness.h
@@ -31,7 +31,7 @@
/* .. */
-#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
+#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__minix)
#include <sys/endian.h>
#if _BYTE_ORDER == _BIG_ENDIAN
@@ -80,6 +80,13 @@
#endif /* GNU/Linux */
+#if defined(_WIN32)
+
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+
+#endif /* Windows */
+
/* . */
#if !defined(_YUGA_LITTLE_ENDIAN) || !defined(_YUGA_BIG_ENDIAN)
diff --git a/lib/int_util.c b/lib/int_util.c
index f194768..871d191 100644
--- a/lib/int_util.c
+++ b/lib/int_util.c
@@ -29,6 +29,19 @@ void compilerrt_abort_impl(const char *file, int line, const char *function) {
panic("%s:%d: abort in %s", file, line, function);
}
+#elif __APPLE__ && !__STATIC__
+
+/* from libSystem.dylib */
+extern void __assert_rtn(const char *func, const char *file,
+ int line, const char * message) __attribute__((noreturn));
+
+__attribute__((weak))
+__attribute__((visibility("hidden")))
+void compilerrt_abort_impl(const char *file, int line, const char *function) {
+ __assert_rtn(function, file, line, "libcompiler_rt abort");
+}
+
+
#else
/* Get the system definition of abort() */
diff --git a/lib/int_util.h b/lib/int_util.h
index 17d7722..1348b85 100644
--- a/lib/int_util.h
+++ b/lib/int_util.h
@@ -22,11 +22,8 @@
/** \brief Trigger a program abort (or panic for kernel code). */
#define compilerrt_abort() compilerrt_abort_impl(__FILE__, __LINE__, \
__FUNCTION__)
+
void compilerrt_abort_impl(const char *file, int line,
- const char *function)
-#ifndef KERNEL_USE
- __attribute__((weak))
-#endif
- __attribute__((noreturn)) __attribute__((visibility("hidden")));
+ const char *function) __attribute__((noreturn));
#endif /* INT_UTIL_H */
diff --git a/lib/interception/CMakeLists.txt b/lib/interception/CMakeLists.txt
new file mode 100644
index 0000000..033b05f
--- /dev/null
+++ b/lib/interception/CMakeLists.txt
@@ -0,0 +1,37 @@
+# Build for the runtime interception helper library.
+
+set(INTERCEPTION_SOURCES
+ interception_linux.cc
+ interception_mac.cc
+ interception_win.cc
+ )
+
+# Only add this C file if we're building on a Mac. Other source files can be
+# harmlessly compiled on any platform, but the C file is complained about due
+# to pedantic rules about empty translation units.
+if (APPLE)
+ list(APPEND INTERCEPTION_SOURCES mach_override/mach_override.c)
+endif ()
+
+set(INTERCEPTION_CFLAGS "-fPIC -fno-exceptions -funwind-tables -fvisibility=hidden")
+if (SUPPORTS_NO_VARIADIC_MACROS_FLAG)
+ set(INTERCEPTION_CFLAGS "${INTERCEPTION_CFLAGS} -Wno-variadic-macros")
+endif ()
+
+set(INTERCEPTION_COMMON_DEFINITIONS
+ INTERCEPTION_HAS_EXCEPTIONS=1)
+
+if(CAN_TARGET_X86_64)
+ add_library(RTInterception.x86_64 OBJECT ${INTERCEPTION_SOURCES})
+ set_property(TARGET RTInterception.x86_64 PROPERTY COMPILE_FLAGS
+ "${INTERCEPTION_CFLAGS} ${TARGET_X86_64_CFLAGS}")
+ set_property(TARGET RTInterception.x86_64 APPEND PROPERTY COMPILE_DEFINITIONS
+ ${INTERCEPTION_COMMON_DEFINITIONS})
+endif()
+if(CAN_TARGET_I386)
+ add_library(RTInterception.i386 OBJECT ${INTERCEPTION_SOURCES})
+ set_property(TARGET RTInterception.i386 PROPERTY COMPILE_FLAGS
+ "${INTERCEPTION_CFLAGS} ${TARGET_I386_CFLAGS}")
+ set_property(TARGET RTInterception.i386 APPEND PROPERTY COMPILE_DEFINITIONS
+ ${INTERCEPTION_COMMON_DEFINITIONS})
+endif()
diff --git a/lib/interception/Makefile.mk b/lib/interception/Makefile.mk
new file mode 100644
index 0000000..1412a01
--- /dev/null
+++ b/lib/interception/Makefile.mk
@@ -0,0 +1,23 @@
+#===- lib/interception/Makefile.mk -------------------------*- Makefile -*--===#
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===------------------------------------------------------------------------===#
+
+ModuleName := interception
+SubDirs := mach_override
+
+Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
+ObjNames := $(Sources:%.cc=%.o)
+
+Implementation := Generic
+
+# FIXME: use automatic dependencies?
+Dependencies := $(wildcard $(Dir)/*.h)
+Dependencies += $(wildcard $(Dir)/mach_override/*.h)
+
+# Define a convenience variable for all the interception functions.
+InterceptionFunctions := $(Sources:%.cc=%)
diff --git a/lib/interception/interception.h b/lib/interception/interception.h
new file mode 100644
index 0000000..b72bff2
--- /dev/null
+++ b/lib/interception/interception.h
@@ -0,0 +1,168 @@
+//===-- interception.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Machinery for providing replacements/wrappers for system functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef INTERCEPTION_H
+#define INTERCEPTION_H
+
+#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
+# error "Interception doesn't work on this operating system."
+#endif
+
+// How to use this library:
+// 1) Include this header to define your own interceptors
+// (see details below).
+// 2) Build all *.cc files and link against them.
+// On Mac you will also need to:
+// 3) Provide your own implementation for the following functions:
+// mach_error_t __interception::allocate_island(void **ptr,
+// size_t size,
+// void *hint);
+// mach_error_t __interception::deallocate_island(void *ptr);
+// See "interception_mac.h" for more details.
+
+// How to add an interceptor:
+// Suppose you need to wrap/replace system function (generally, from libc):
+// int foo(const char *bar, double baz);
+// You'll need to:
+// 1) define INTERCEPTOR(int, foo, const char *bar, double baz) { ... } in
+// your source file.
+// 2) Call "INTERCEPT_FUNCTION(foo)" prior to the first call of "foo".
+// INTERCEPT_FUNCTION(foo) evaluates to "true" iff the function was
+// intercepted successfully.
+// You can access original function by calling REAL(foo)(bar, baz).
+// By default, REAL(foo) will be visible only inside your interceptor, and if
+// you want to use it in other parts of RTL, you'll need to:
+// 3a) add DECLARE_REAL(int, foo, const char*, double) to a
+// header file.
+// However, if the call "INTERCEPT_FUNCTION(foo)" and definition for
+// INTERCEPTOR(..., foo, ...) are in different files, you'll instead need to:
+// 3b) add DECLARE_REAL_AND_INTERCEPTOR(int, foo, const char*, double)
+// to a header file.
+
+// Notes: 1. Things may not work properly if macro INTERCEPT(...) {...} or
+// DECLARE_REAL(...) are located inside namespaces.
+// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo);" to
+// effectively redirect calls from "foo" to "zoo". In this case
+// you aren't required to implement
+// INTERCEPTOR(int, foo, const char *bar, double baz) {...}
+// but instead you'll have to add
+// DEFINE_REAL(int, foo, const char *bar, double baz) in your
+// source file (to define a pointer to overriden function).
+
+// How it works:
+// To replace system functions on Linux we just need to declare functions
+// with same names in our library and then obtain the real function pointers
+// using dlsym().
+// There is one complication. A user may also intercept some of the functions
+// we intercept. To resolve this we declare our interceptors with __interceptor_
+// prefix, and then make actual interceptors weak aliases to __interceptor_
+// functions.
+// This is not so on Mac OS, where the two-level namespace makes
+// our replacement functions invisible to other libraries. This may be overcomed
+// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
+// libraries in Chromium were noticed when doing so. Instead we use
+// mach_override, a handy framework for patching functions at runtime.
+// To avoid possible name clashes, our replacement functions have
+// the "wrap_" prefix on Mac.
+
+#if defined(__APPLE__)
+# define WRAP(x) wrap_##x
+# define WRAPPER_NAME(x) "wrap_"#x
+# define INTERCEPTOR_ATTRIBUTE
+# define DECLARE_WRAPPER(ret_type, convention, func, ...)
+#elif defined(_WIN32)
+# if defined(_DLL) // DLL CRT
+# define WRAP(x) x
+# define WRAPPER_NAME(x) #x
+# define INTERCEPTOR_ATTRIBUTE
+# else // Static CRT
+# define WRAP(x) wrap_##x
+# define WRAPPER_NAME(x) "wrap_"#x
+# define INTERCEPTOR_ATTRIBUTE
+# endif
+# define DECLARE_WRAPPER(ret_type, convention, func, ...)
+#else
+# define WRAP(x) __interceptor_ ## x
+# define WRAPPER_NAME(x) "__interceptor_" #x
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+# define DECLARE_WRAPPER(ret_type, convention, func, ...) \
+ extern "C" ret_type convention func(__VA_ARGS__) \
+ __attribute__((weak, alias("__interceptor_" #func), visibility("default")));
+#endif
+
+#define PTR_TO_REAL(x) real_##x
+#define REAL(x) __interception::PTR_TO_REAL(x)
+#define FUNC_TYPE(x) x##_f
+
+#define DECLARE_REAL(ret_type, func, ...) \
+ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
+ namespace __interception { \
+ extern FUNC_TYPE(func) PTR_TO_REAL(func); \
+ }
+
+#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
+ DECLARE_REAL(ret_type, func, ##__VA_ARGS__) \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__);
+
+// FIXME(timurrrr): We might need to add DECLARE_REAL_EX etc to support
+// different calling conventions later.
+
+#define DEFINE_REAL_EX(ret_type, convention, func, ...) \
+ typedef ret_type (convention *FUNC_TYPE(func))(__VA_ARGS__); \
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
+ }
+
+// Generally, you don't need to use DEFINE_REAL by itself, as INTERCEPTOR
+// macros does its job. In exceptional cases you may need to call REAL(foo)
+// without defining INTERCEPTOR(..., foo, ...). For example, if you override
+// foo with an interceptor for other function.
+#define DEFAULT_CONVENTION
+
+#define DEFINE_REAL(ret_type, func, ...) \
+ DEFINE_REAL_EX(ret_type, DEFAULT_CONVENTION, func, __VA_ARGS__)
+
+#define INTERCEPTOR_EX(ret_type, convention, func, ...) \
+ DEFINE_REAL_EX(ret_type, convention, func, __VA_ARGS__) \
+ DECLARE_WRAPPER(ret_type, convention, func, __VA_ARGS__) \
+ extern "C" \
+ INTERCEPTOR_ATTRIBUTE \
+ ret_type convention WRAP(func)(__VA_ARGS__)
+
+#define INTERCEPTOR(ret_type, func, ...) \
+ INTERCEPTOR_EX(ret_type, DEFAULT_CONVENTION, func, __VA_ARGS__)
+
+#if defined(_WIN32)
+# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
+ INTERCEPTOR_EX(ret_type, __stdcall, func, __VA_ARGS__)
+#endif
+
+#define INCLUDED_FROM_INTERCEPTION_LIB
+
+#if defined(__linux__)
+# include "interception_linux.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
+#elif defined(__APPLE__)
+# include "interception_mac.h"
+# define OVERRIDE_FUNCTION(old_func, new_func) \
+ OVERRIDE_FUNCTION_MAC(old_func, new_func)
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
+#else // defined(_WIN32)
+# include "interception_win.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)
+#endif
+
+#undef INCLUDED_FROM_INTERCEPTION_LIB
+
+#endif // INTERCEPTION_H
diff --git a/lib/interception/interception_linux.cc b/lib/interception/interception_linux.cc
new file mode 100644
index 0000000..37e5933
--- /dev/null
+++ b/lib/interception/interception_linux.cc
@@ -0,0 +1,29 @@
+//===-- interception_linux.cc -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Linux-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#ifdef __linux__
+
+#include <stddef.h> // for NULL
+#include <dlfcn.h> // for dlsym
+
+namespace __interception {
+bool GetRealFunctionAddress(const char *func_name, void **func_addr,
+ void *real, void *wrapper) {
+ *func_addr = dlsym(RTLD_NEXT, func_name);
+ return real == wrapper;
+}
+} // namespace __interception
+
+
+#endif // __linux__
diff --git a/lib/interception/interception_linux.h b/lib/interception/interception_linux.h
new file mode 100644
index 0000000..76a29c6
--- /dev/null
+++ b/lib/interception/interception_linux.h
@@ -0,0 +1,35 @@
+//===-- interception_linux.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Linux-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#ifdef __linux__
+
+#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
+# error "interception_linux.h should be included from interception library only"
+#endif
+
+#ifndef INTERCEPTION_LINUX_H
+#define INTERCEPTION_LINUX_H
+
+namespace __interception {
+// returns true if a function with the given name was found.
+bool GetRealFunctionAddress(const char *func_name, void **func_addr,
+ void *real, void *wrapper);
+} // namespace __interception
+
+#define INTERCEPT_FUNCTION_LINUX(func) \
+ ::__interception::GetRealFunctionAddress(#func, (void**)&REAL(func), \
+ (void*)&(func), (void*)&WRAP(func))
+
+#endif // INTERCEPTION_LINUX_H
+#endif // __linux__
diff --git a/lib/interception/interception_mac.cc b/lib/interception/interception_mac.cc
new file mode 100644
index 0000000..cc9e4a7
--- /dev/null
+++ b/lib/interception/interception_mac.cc
@@ -0,0 +1,33 @@
+//===-- interception_mac.cc -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Mac-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#ifdef __APPLE__
+
+#define INCLUDED_FROM_INTERCEPTION_LIB
+#include "interception_mac.h"
+#undef INCLUDED_FROM_INTERCEPTION_LIB
+#include "mach_override/mach_override.h"
+
+namespace __interception {
+bool OverrideFunction(void *old_func, void *new_func, void **orig_old_func) {
+ *orig_old_func = NULL;
+ int res = __asan_mach_override_ptr_custom(old_func, new_func,
+ orig_old_func,
+ __interception_allocate_island,
+ __interception_deallocate_island);
+ return (res == 0) && (*orig_old_func != NULL);
+}
+} // namespace __interception
+
+#endif // __APPLE__
diff --git a/lib/interception/interception_mac.h b/lib/interception/interception_mac.h
new file mode 100644
index 0000000..224d961
--- /dev/null
+++ b/lib/interception/interception_mac.h
@@ -0,0 +1,47 @@
+//===-- interception_mac.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Mac-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#ifdef __APPLE__
+
+#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
+# error "interception_mac.h should be included from interception.h only"
+#endif
+
+#ifndef INTERCEPTION_MAC_H
+#define INTERCEPTION_MAC_H
+
+#include <mach/mach_error.h>
+#include <stddef.h>
+
+// Allocate memory for the escape island. This cannot be moved to
+// mach_override, because each user of interceptors may specify its
+// own memory range for escape islands.
+extern "C" {
+mach_error_t __interception_allocate_island(void **ptr, size_t unused_size,
+ void *unused_hint);
+mach_error_t __interception_deallocate_island(void *ptr);
+} // extern "C"
+
+namespace __interception {
+// returns true if the old function existed.
+bool OverrideFunction(void *old_func, void *new_func, void **orig_old_func);
+} // namespace __interception
+
+# define OVERRIDE_FUNCTION_MAC(old_func, new_func) \
+ ::__interception::OverrideFunction((void*)old_func, (void*)new_func, \
+ (void**)&REAL(old_func))
+# define INTERCEPT_FUNCTION_MAC(func) OVERRIDE_FUNCTION_MAC(func, WRAP(func))
+
+#endif // INTERCEPTION_MAC_H
+#endif // __APPLE__
diff --git a/lib/interception/interception_win.cc b/lib/interception/interception_win.cc
new file mode 100644
index 0000000..a60c741
--- /dev/null
+++ b/lib/interception/interception_win.cc
@@ -0,0 +1,149 @@
+//===-- interception_linux.cc -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Windows-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#ifdef _WIN32
+
+#include <windows.h>
+
+namespace __interception {
+
+bool GetRealFunctionAddress(const char *func_name, void **func_addr) {
+ const char *DLLS[] = {
+ "msvcr80.dll",
+ "msvcr90.dll",
+ "kernel32.dll",
+ NULL
+ };
+ *func_addr = NULL;
+ for (size_t i = 0; *func_addr == NULL && DLLS[i]; ++i) {
+ *func_addr = GetProcAddress(GetModuleHandleA(DLLS[i]), func_name);
+ }
+ return (*func_addr != NULL);
+}
+
+// FIXME: internal_str* and internal_mem* functions should be moved from the
+// ASan sources into interception/.
+
+static void _memset(void *p, int value, size_t sz) {
+ for (size_t i = 0; i < sz; ++i)
+ ((char*)p)[i] = (char)value;
+}
+
+static void _memcpy(void *dst, void *src, size_t sz) {
+ char *dst_c = (char*)dst,
+ *src_c = (char*)src;
+ for (size_t i = 0; i < sz; ++i)
+ dst_c[i] = src_c[i];
+}
+
+static void WriteJumpInstruction(char *jmp_from, char *to) {
+ // jmp XXYYZZWW = E9 WW ZZ YY XX, where XXYYZZWW is an offset fromt jmp_from
+ // to the next instruction to the destination.
+ ptrdiff_t offset = to - jmp_from - 5;
+ *jmp_from = '\xE9';
+ *(ptrdiff_t*)(jmp_from + 1) = offset;
+}
+
+bool OverrideFunction(void *old_func, void *new_func, void **orig_old_func) {
+#ifdef _WIN64
+# error OverrideFunction was not tested on x64
+#endif
+ // Basic idea:
+ // We write 5 bytes (jmp-to-new_func) at the beginning of the 'old_func'
+ // to override it. We want to be able to execute the original 'old_func' from
+ // the wrapper, so we need to keep the leading 5+ bytes ('head') of the
+ // original instructions somewhere with a "jmp old_func+head".
+ // We call these 'head'+5 bytes of instructions a "trampoline".
+
+ // Trampolines are allocated from a common pool.
+ const int POOL_SIZE = 1024;
+ static char *pool = NULL;
+ static size_t pool_used = 0;
+ if (pool == NULL) {
+ pool = (char*)VirtualAlloc(NULL, POOL_SIZE,
+ MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+ // FIXME: set PAGE_EXECUTE_READ access after setting all interceptors?
+ if (pool == NULL)
+ return false;
+ _memset(pool, 0xCC /* int 3 */, POOL_SIZE);
+ }
+
+ char* old_bytes = (char*)old_func;
+ char* trampoline = pool + pool_used;
+
+ // Find out the number of bytes of the instructions we need to copy to the
+ // island and store it in 'head'.
+ size_t head = 0;
+ while (head < 5) {
+ switch (old_bytes[head]) {
+ case '\x55': // push ebp
+ case '\x56': // push esi
+ case '\x57': // push edi
+ head++;
+ continue;
+ }
+ switch (*(unsigned short*)(old_bytes + head)) { // NOLINT
+ case 0xFF8B: // 8B FF = mov edi, edi
+ case 0xEC8B: // 8B EC = mov ebp, esp
+ case 0xC033: // 33 C0 = xor eax, eax
+ head += 2;
+ continue;
+ case 0xEC83: // 83 EC XX = sub esp, XX
+ head += 3;
+ continue;
+ case 0xC1F7: // F7 C1 XX YY ZZ WW = test ecx, WWZZYYXX
+ head += 6;
+ continue;
+ }
+ switch (0x00FFFFFF & *(unsigned int*)(old_bytes + head)) {
+ case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
+ case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
+ case 0x24548B: // 8B 54 24 XX = mov edx, dword ptr [esp+XXh]
+ case 0x247C8B: // 8B 7C 24 XX = mov edi, dword ptr [esp+XXh]
+ head += 4;
+ continue;
+ }
+
+ // Unknown instruction!
+ return false;
+ }
+
+ if (pool_used + head + 5 > POOL_SIZE)
+ return false;
+
+ // Now put the "jump to trampoline" instruction into the original code.
+ DWORD old_prot, unused_prot;
+ if (!VirtualProtect(old_func, head, PAGE_EXECUTE_READWRITE, &old_prot))
+ return false;
+
+ // Put the needed instructions into the trampoline bytes.
+ _memcpy(trampoline, old_bytes, head);
+ WriteJumpInstruction(trampoline + head, old_bytes + head);
+ *orig_old_func = trampoline;
+ pool_used += head + 5;
+
+ // Intercept the 'old_func'.
+ WriteJumpInstruction(old_bytes, (char*)new_func);
+ _memset(old_bytes + 5, 0xCC /* int 3 */, head - 5);
+
+ if (!VirtualProtect(old_func, head, old_prot, &unused_prot))
+ return false; // not clear if this failure bothers us.
+
+ return true;
+}
+
+} // namespace __interception
+
+#endif // _WIN32
diff --git a/lib/interception/interception_win.h b/lib/interception/interception_win.h
new file mode 100644
index 0000000..9d1586e
--- /dev/null
+++ b/lib/interception/interception_win.h
@@ -0,0 +1,42 @@
+//===-- interception_linux.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Windows-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#ifdef _WIN32
+
+#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
+# error "interception_win.h should be included from interception library only"
+#endif
+
+#ifndef INTERCEPTION_WIN_H
+#define INTERCEPTION_WIN_H
+
+namespace __interception {
+// returns true if a function with the given name was found.
+bool GetRealFunctionAddress(const char *func_name, void **func_addr);
+
+// returns true if the old function existed, false on failure.
+bool OverrideFunction(void *old_func, void *new_func, void **orig_old_func);
+} // namespace __interception
+
+#if defined(_DLL)
+# define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::GetRealFunctionAddress(#func, (void**)&REAL(func))
+#else
+# define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction((void*)func, (void*)WRAP(func), \
+ (void**)&REAL(func))
+#endif
+
+#endif // INTERCEPTION_WIN_H
+#endif // _WIN32
diff --git a/lib/asan/mach_override/LICENSE.TXT b/lib/interception/mach_override/LICENSE.TXT
index 9446965..9446965 100644
--- a/lib/asan/mach_override/LICENSE.TXT
+++ b/lib/interception/mach_override/LICENSE.TXT
diff --git a/lib/asan/mach_override/Makefile.mk b/lib/interception/mach_override/Makefile.mk
index 78be0b3..8f5ebda 100644
--- a/lib/asan/mach_override/Makefile.mk
+++ b/lib/interception/mach_override/Makefile.mk
@@ -1,4 +1,4 @@
-#===- lib/asan/mach_override/Makefile.mk -------------------*- Makefile -*--===#
+#===- lib/interception/mach_override/Makefile.mk -----------*- Makefile -*--===#
#
# The LLVM Compiler Infrastructure
#
@@ -7,7 +7,7 @@
#
#===------------------------------------------------------------------------===#
-ModuleName := asan
+ModuleName := interception
SubDirs :=
Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
@@ -18,5 +18,5 @@ Implementation := Generic
# FIXME: use automatic dependencies?
Dependencies := $(wildcard $(Dir)/*.h)
-# Define a convenience variable for all the asan functions.
-AsanFunctions += $(Sources:%.c=%)
+# Define a convenience variable for all the interception functions.
+InterceptionFunctions += $(Sources:%.c=%)
diff --git a/lib/asan/mach_override/README.txt b/lib/interception/mach_override/README.txt
index 5f62ad7..5f62ad7 100644
--- a/lib/asan/mach_override/README.txt
+++ b/lib/interception/mach_override/README.txt
diff --git a/lib/asan/mach_override/mach_override.c b/lib/interception/mach_override/mach_override.c
index 640d03d5..499cc02 100644
--- a/lib/asan/mach_override/mach_override.c
+++ b/lib/interception/mach_override/mach_override.c
@@ -114,6 +114,14 @@ allocateBranchIsland(
freeBranchIsland(
BranchIsland *island ) __attribute__((visibility("hidden")));
+ mach_error_t
+defaultIslandMalloc(
+ void **ptr, size_t unused_size, void *hint) __attribute__((visibility("hidden")));
+
+ mach_error_t
+defaultIslandFree(
+ void *ptr) __attribute__((visibility("hidden")));
+
#if defined(__ppc__) || defined(__POWERPC__)
mach_error_t
setBranchIslandTarget(
@@ -149,6 +157,12 @@ fixupInstructions(
void *instructionsToFix,
int instructionCount,
uint8_t *instructionSizes ) __attribute__((visibility("hidden")));
+
+#ifdef DEBUG_DISASM
+ static void
+dump16Bytes(
+ void *ptr);
+#endif // DEBUG_DISASM
#endif
/*******************************************************************************
@@ -175,12 +189,38 @@ mach_error_t makeIslandExecutable(void *address) {
}
#endif
+ mach_error_t
+defaultIslandMalloc(
+ void **ptr, size_t unused_size, void *hint) {
+ return allocateBranchIsland( (BranchIsland**)ptr, kAllocateHigh, hint );
+}
+ mach_error_t
+defaultIslandFree(
+ void *ptr) {
+ return freeBranchIsland(ptr);
+}
+
mach_error_t
__asan_mach_override_ptr(
void *originalFunctionAddress,
const void *overrideFunctionAddress,
void **originalFunctionReentryIsland )
{
+ return __asan_mach_override_ptr_custom(originalFunctionAddress,
+ overrideFunctionAddress,
+ originalFunctionReentryIsland,
+ defaultIslandMalloc,
+ defaultIslandFree);
+}
+
+ mach_error_t
+__asan_mach_override_ptr_custom(
+ void *originalFunctionAddress,
+ const void *overrideFunctionAddress,
+ void **originalFunctionReentryIsland,
+ island_malloc *alloc,
+ island_free *dealloc)
+{
assert( originalFunctionAddress );
assert( overrideFunctionAddress );
@@ -276,10 +316,9 @@ __asan_mach_override_ptr(
// Allocate and target the escape island to the overriding function.
BranchIsland *escapeIsland = NULL;
- if( !err )
- err = allocateBranchIsland( &escapeIsland, kAllocateHigh, originalFunctionAddress );
- if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
-
+ if( !err )
+ err = alloc( (void**)&escapeIsland, sizeof(BranchIsland), originalFunctionAddress );
+ if ( err ) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
#if defined(__ppc__) || defined(__POWERPC__)
if( !err )
@@ -319,7 +358,7 @@ __asan_mach_override_ptr(
// technically our original function.
BranchIsland *reentryIsland = NULL;
if( !err && originalFunctionReentryIsland ) {
- err = allocateBranchIsland( &reentryIsland, kAllocateHigh, escapeIsland);
+ err = alloc( (void**)&reentryIsland, sizeof(BranchIsland), escapeIsland);
if( !err )
*originalFunctionReentryIsland = reentryIsland;
}
@@ -383,9 +422,9 @@ __asan_mach_override_ptr(
// Clean up on error.
if( err ) {
if( reentryIsland )
- freeBranchIsland( reentryIsland );
+ dealloc( reentryIsland );
if( escapeIsland )
- freeBranchIsland( escapeIsland );
+ dealloc( escapeIsland );
}
#ifdef DEBUG_DISASM
@@ -662,6 +701,7 @@ static AsmInstructionMatch possibleInstructions[] = {
{ 0x3, {0xFF, 0xFF, 0x00}, {0xFF, 0x77, 0x00} }, // pushq $imm(%rdi)
{ 0x2, {0xFF, 0xFF}, {0x31, 0xC0} }, // xor %eax, %eax
{ 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0x25, 0x00, 0x00, 0x00, 0x00} }, // and $imm, %eax
+ { 0x3, {0xFF, 0xFF, 0xFF}, {0x80, 0x3F, 0x00} }, // cmpb $imm, (%rdi)
{ 0x8, {0xFF, 0xFF, 0xCF, 0xFF, 0x00, 0x00, 0x00, 0x00},
{0x48, 0x8B, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00}, }, // mov $imm, %{rax,rdx,rsp,rsi}
@@ -678,6 +718,7 @@ static AsmInstructionMatch possibleInstructions[] = {
{ 0x3, {0xFF, 0xFF, 0xFF}, {0x49, 0x89, 0xF8} }, // mov %rdi,%r8
{ 0x3, {0xFF, 0xFF, 0x00}, {0xFF, 0x77, 0x00} }, // pushq $imm(%rdi)
{ 0x2, {0xFF, 0xFF}, {0xDB, 0xE3} }, // fninit
+ { 0x3, {0xFF, 0xFF, 0xFF}, {0x48, 0x85, 0xD2} }, // test %rdx,%rdx
{ 0x0 }
};
#endif
@@ -796,9 +837,21 @@ fixupInstructions(
int instructionCount,
uint8_t *instructionSizes )
{
- int index;
+ void *initialOriginalFunction = originalFunction;
+ int index, fixed_size, code_size = 0;
+ for (index = 0;index < instructionCount;index += 1)
+ code_size += instructionSizes[index];
+
+#ifdef DEBUG_DISASM
+ void *initialInstructionsToFix = instructionsToFix;
+ fprintf(stderr, "BEFORE FIXING:\n");
+ dump16Bytes(initialOriginalFunction);
+ dump16Bytes(initialInstructionsToFix);
+#endif // DEBUG_DISASM
+
for (index = 0;index < instructionCount;index += 1)
{
+ fixed_size = instructionSizes[index];
if ((*(uint8_t*)instructionsToFix == 0xE9) || // 32-bit jump relative
(*(uint8_t*)instructionsToFix == 0xE8)) // 32-bit call relative
{
@@ -806,13 +859,56 @@ fixupInstructions(
uint32_t *jumpOffsetPtr = (uint32_t*)((uintptr_t)instructionsToFix + 1);
*jumpOffsetPtr += offset;
}
-
+ if ((*(uint8_t*)instructionsToFix == 0x74) || // Near jump if equal (je), 2 bytes.
+ (*(uint8_t*)instructionsToFix == 0x77)) // Near jump if above (ja), 2 bytes.
+ {
+ // We replace a near je/ja instruction, "7P JJ", with a 32-bit je/ja, "0F 8P WW XX YY ZZ".
+ // This is critical, otherwise a near jump will likely fall outside the original function.
+ uint32_t offset = (uintptr_t)initialOriginalFunction - (uintptr_t)escapeIsland;
+ uint32_t jumpOffset = *(uint8_t*)((uintptr_t)instructionsToFix + 1);
+ *(uint8_t*)(instructionsToFix + 1) = *(uint8_t*)instructionsToFix + 0x10;
+ *(uint8_t*)instructionsToFix = 0x0F;
+ uint32_t *jumpOffsetPtr = (uint32_t*)((uintptr_t)instructionsToFix + 2 );
+ *jumpOffsetPtr = offset + jumpOffset;
+ fixed_size = 6;
+ }
originalFunction = (void*)((uintptr_t)originalFunction + instructionSizes[index]);
escapeIsland = (void*)((uintptr_t)escapeIsland + instructionSizes[index]);
- instructionsToFix = (void*)((uintptr_t)instructionsToFix + instructionSizes[index]);
- }
+ instructionsToFix = (void*)((uintptr_t)instructionsToFix + fixed_size);
+
+ // Expanding short instructions into longer ones may overwrite the next instructions,
+ // so we must restore them.
+ code_size -= fixed_size;
+ if ((code_size > 0) && (fixed_size != instructionSizes[index])) {
+ bcopy(originalFunction, instructionsToFix, code_size);
+ }
+ }
+#ifdef DEBUG_DISASM
+ fprintf(stderr, "AFTER_FIXING:\n");
+ dump16Bytes(initialOriginalFunction);
+ dump16Bytes(initialInstructionsToFix);
+#endif // DEBUG_DISASM
+}
+
+#ifdef DEBUG_DISASM
+#define HEX_DIGIT(x) ((((x) % 16) < 10) ? ('0' + ((x) % 16)) : ('A' + ((x) % 16 - 10)))
+
+ static void
+dump16Bytes(
+ void *ptr) {
+ int i;
+ char buf[3];
+ uint8_t *bytes = (uint8_t*)ptr;
+ for (i = 0; i < 16; i++) {
+ buf[0] = HEX_DIGIT(bytes[i] / 16);
+ buf[1] = HEX_DIGIT(bytes[i] % 16);
+ buf[2] = ' ';
+ write(2, buf, 3);
+ }
+ write(2, "\n", 1);
}
+#endif // DEBUG_DISASM
#endif
#if defined(__i386__)
diff --git a/lib/asan/mach_override/mach_override.h b/lib/interception/mach_override/mach_override.h
index dcccbcd..7e60cdc 100644
--- a/lib/asan/mach_override/mach_override.h
+++ b/lib/interception/mach_override/mach_override.h
@@ -85,6 +85,19 @@ __asan_mach_override_ptr(
const void *overrideFunctionAddress,
void **originalFunctionReentryIsland );
+// Allow to use custom allocation and deallocation routines with mach_override_ptr().
+// This should help to speed up the things on x86_64.
+typedef mach_error_t island_malloc( void **ptr, size_t size, void *hint );
+typedef mach_error_t island_free( void *ptr );
+
+ mach_error_t
+__asan_mach_override_ptr_custom(
+ void *originalFunctionAddress,
+ const void *overrideFunctionAddress,
+ void **originalFunctionReentryIsland,
+ island_malloc *alloc,
+ island_free *dealloc );
+
/************************************************************************************//**
diff --git a/lib/lshrdi3.c b/lib/lshrdi3.c
index 8af3e0c..6b1ea92 100644
--- a/lib/lshrdi3.c
+++ b/lib/lshrdi3.c
@@ -18,7 +18,7 @@
/* Precondition: 0 <= b < bits_in_dword */
-ARM_EABI_FNALIAS(llsr, lshrdi3);
+ARM_EABI_FNALIAS(llsr, lshrdi3)
COMPILER_RT_ABI di_int
__lshrdi3(di_int a, si_int b)
diff --git a/lib/lshrti3.c b/lib/lshrti3.c
index 5fdd99e..be76814 100644
--- a/lib/lshrti3.c
+++ b/lib/lshrti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: logical a >> b */
/* Precondition: 0 <= b < bits_in_tword */
diff --git a/lib/modti3.c b/lib/modti3.c
index dbe5e94..752202d 100644
--- a/lib/modti3.c
+++ b/lib/modti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
/*Returns: a % b */
diff --git a/lib/muldf3.c b/lib/muldf3.c
index 86d72d8..c38edba 100644
--- a/lib/muldf3.c
+++ b/lib/muldf3.c
@@ -15,7 +15,7 @@
#define DOUBLE_PRECISION
#include "fp_lib.h"
-ARM_EABI_FNALIAS(dmul, muldf3);
+ARM_EABI_FNALIAS(dmul, muldf3)
COMPILER_RT_ABI fp_t
__muldf3(fp_t a, fp_t b) {
@@ -96,7 +96,7 @@ __muldf3(fp_t a, fp_t b) {
// a zero of the appropriate sign. Mathematically there is no need to
// handle this case separately, but we make it a special case to
// simplify the shift logic.
- const int shift = 1 - productExponent;
+ const unsigned int shift = 1U - (unsigned int)productExponent;
if (shift >= typeWidth) return fromRep(productSign);
// Otherwise, shift the significand of the result so that the round
diff --git a/lib/muldi3.c b/lib/muldi3.c
index 3e99630..2dae44c 100644
--- a/lib/muldi3.c
+++ b/lib/muldi3.c
@@ -40,7 +40,7 @@ __muldsi3(su_int a, su_int b)
/* Returns: a * b */
-ARM_EABI_FNALIAS(lmul, muldi3);
+ARM_EABI_FNALIAS(lmul, muldi3)
COMPILER_RT_ABI di_int
__muldi3(di_int a, di_int b)
diff --git a/lib/muloti4.c b/lib/muloti4.c
index 1fcd0ba..f58dd07 100644
--- a/lib/muloti4.c
+++ b/lib/muloti4.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: a * b */
/* Effects: sets *overflow to 1 if a * b overflows */
diff --git a/lib/mulsf3.c b/lib/mulsf3.c
index fce2fd4..861a9ba 100644
--- a/lib/mulsf3.c
+++ b/lib/mulsf3.c
@@ -15,7 +15,7 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
-ARM_EABI_FNALIAS(fmul, mulsf3);
+ARM_EABI_FNALIAS(fmul, mulsf3)
COMPILER_RT_ABI fp_t
__mulsf3(fp_t a, fp_t b) {
@@ -92,7 +92,7 @@ __mulsf3(fp_t a, fp_t b) {
if (productExponent <= 0) {
// Result is denormal before rounding, the exponent is zero and we
// need to shift the significand.
- wideRightShiftWithSticky(&productHi, &productLo, 1 - productExponent);
+ wideRightShiftWithSticky(&productHi, &productLo, 1U - (unsigned)productExponent);
}
else {
diff --git a/lib/multi3.c b/lib/multi3.c
index ad8ab3f..0b8730f 100644
--- a/lib/multi3.c
+++ b/lib/multi3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: a * b */
static
diff --git a/lib/mulvti3.c b/lib/mulvti3.c
index ae65cf8..31f7d2f 100644
--- a/lib/mulvti3.c
+++ b/lib/mulvti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: a * b */
/* Effects: aborts if a * b overflows */
diff --git a/lib/negdf2.c b/lib/negdf2.c
index b11b480..4e17513 100644
--- a/lib/negdf2.c
+++ b/lib/negdf2.c
@@ -14,7 +14,7 @@
#define DOUBLE_PRECISION
#include "fp_lib.h"
-ARM_EABI_FNALIAS(dneg, negdf2);
+ARM_EABI_FNALIAS(dneg, negdf2)
fp_t __negdf2(fp_t a) {
return fromRep(toRep(a) ^ signBit);
diff --git a/lib/negsf2.c b/lib/negsf2.c
index f8ef2d1..29c17be 100644
--- a/lib/negsf2.c
+++ b/lib/negsf2.c
@@ -14,7 +14,7 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
-ARM_EABI_FNALIAS(fneg, negsf2);
+ARM_EABI_FNALIAS(fneg, negsf2)
COMPILER_RT_ABI fp_t
__negsf2(fp_t a) {
diff --git a/lib/negti2.c b/lib/negti2.c
index 774e808..f7e4ad3 100644
--- a/lib/negti2.c
+++ b/lib/negti2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: -a */
ti_int
diff --git a/lib/negvti2.c b/lib/negvti2.c
index ef766bb..05df615 100644
--- a/lib/negvti2.c
+++ b/lib/negvti2.c
@@ -12,10 +12,10 @@
*===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: -a */
/* Effects: aborts if -a overflows */
diff --git a/lib/parityti2.c b/lib/parityti2.c
index 8f85745..a1f47b1 100644
--- a/lib/parityti2.c
+++ b/lib/parityti2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: 1 if number of bits is odd else returns 0 */
si_int __paritydi2(di_int a);
diff --git a/lib/popcountti2.c b/lib/popcountti2.c
index 68d9427..9566673 100644
--- a/lib/popcountti2.c
+++ b/lib/popcountti2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: count of 1 bits */
si_int
diff --git a/lib/powitf2.c b/lib/powitf2.c
index 189632c..d3b9349 100644
--- a/lib/powitf2.c
+++ b/lib/powitf2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if _ARCH_PPC
-
#include "int_lib.h"
+#if _ARCH_PPC
+
/* Returns: a ^ b */
long double
diff --git a/lib/ppc/CMakeLists.txt b/lib/ppc/CMakeLists.txt
deleted file mode 100644
index fb0fcd5..0000000
--- a/lib/ppc/CMakeLists.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-SET( SRCS
- fixtfdi.c
- gcc_qdiv.c
- gcc_qmul.c
- divtc3.c
- gcc_qsub.c
- multc3.c
- floatditf.c
- gcc_qadd.c
- fixunstfdi.c
- floatunditf.c
- )
diff --git a/lib/profile/GCDAProfiling.c b/lib/profile/GCDAProfiling.c
index fd506e9..8f92a91 100644
--- a/lib/profile/GCDAProfiling.c
+++ b/lib/profile/GCDAProfiling.c
@@ -49,10 +49,8 @@ static void write_int32(uint32_t i) {
}
static void write_int64(uint64_t i) {
- uint32_t lo, hi;
- lo = i >> 0;
- hi = i >> 32;
-
+ uint32_t lo = i >> 0;
+ uint32_t hi = i >> 32;
write_int32(lo);
write_int32(hi);
}
@@ -69,39 +67,55 @@ static void write_string(const char *s) {
}
static char *mangle_filename(const char *orig_filename) {
- /* TODO: handle GCOV_PREFIX_STRIP */
- const char *prefix;
char *filename = 0;
-
- prefix = getenv("GCOV_PREFIX");
+ int prefix_len = 0;
+ int prefix_strip = 0;
+ int level = 0;
+ const char *fname = orig_filename, *ptr = NULL;
+ const char *prefix = getenv("GCOV_PREFIX");
+ const char *tmp = getenv("GCOV_PREFIX_STRIP");
if (!prefix)
return strdup(orig_filename);
- filename = malloc(strlen(prefix) + 1 + strlen(orig_filename) + 1);
+ if (tmp) {
+ prefix_strip = atoi(tmp);
+
+ /* Negative GCOV_PREFIX_STRIP values are ignored */
+ if (prefix_strip < 0)
+ prefix_strip = 0;
+ }
+
+ prefix_len = strlen(prefix);
+ filename = malloc(prefix_len + 1 + strlen(orig_filename) + 1);
strcpy(filename, prefix);
- strcat(filename, "/");
- strcat(filename, orig_filename);
+
+ if (prefix[prefix_len - 1] != '/')
+ strcat(filename, "/");
+
+ for (ptr = fname + 1; *ptr != '\0' && level < prefix_strip; ++ptr) {
+ if (*ptr != '/') continue;
+ fname = ptr;
+ ++level;
+ }
+
+ strcat(filename, fname);
return filename;
}
-static void recursive_mkdir(const char *filename) {
- char *pathname;
- int i, e;
+static void recursive_mkdir(char *filename) {
+ int i;
- for (i = 1, e = strlen(filename); i != e; ++i) {
- if (filename[i] == '/') {
- pathname = malloc(i + 1);
- strncpy(pathname, filename, i);
- pathname[i] = '\0';
+ for (i = 1; filename[i] != '\0'; ++i) {
+ if (filename[i] != '/') continue;
+ filename[i] = '\0';
#ifdef _WIN32
- _mkdir(pathname);
+ _mkdir(filename);
#else
- mkdir(pathname, 0750); /* some of these will fail, ignore it. */
+ mkdir(filename, 0755); /* Some of these will fail, ignore it. */
#endif
- free(pathname);
- }
+ filename[i] = '/';
}
}
@@ -114,10 +128,18 @@ static void recursive_mkdir(const char *filename) {
* started at a time.
*/
void llvm_gcda_start_file(const char *orig_filename) {
- char *filename;
- filename = mangle_filename(orig_filename);
- recursive_mkdir(filename);
- output_file = fopen(filename, "wb");
+ char *filename = mangle_filename(orig_filename);
+ output_file = fopen(filename, "w+b");
+
+ if (!output_file) {
+ recursive_mkdir(filename);
+ output_file = fopen(filename, "w+b");
+ if (!output_file) {
+ fprintf(stderr, "profiling:%s: cannot open\n", filename);
+ free(filename);
+ return;
+ }
+ }
/* gcda file, version 404*, stamp LLVM. */
#ifdef __APPLE__
@@ -152,8 +174,9 @@ void llvm_gcda_increment_indirect_counter(uint32_t *predecessor,
++*counter;
#ifdef DEBUG_GCDAPROFILING
else
- printf("llvmgcda: increment_indirect_counter counters=%x, pred=%u\n",
- state_table_row, *predecessor);
+ fprintf(stderr,
+ "llvmgcda: increment_indirect_counter counters=%x, pred=%u\n",
+ state_table_row, *predecessor);
#endif
}
@@ -161,6 +184,7 @@ void llvm_gcda_emit_function(uint32_t ident, const char *function_name) {
#ifdef DEBUG_GCDAPROFILING
printf("llvmgcda: function id=%x\n", ident);
#endif
+ if (!output_file) return;
/* function tag */
fwrite("\0\0\0\1", 4, 1, output_file);
@@ -173,23 +197,24 @@ void llvm_gcda_emit_function(uint32_t ident, const char *function_name) {
void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) {
uint32_t i;
- /* counter #1 (arcs) tag */
+
+ /* Counter #1 (arcs) tag */
+ if (!output_file) return;
fwrite("\0\0\xa1\1", 4, 1, output_file);
write_int32(num_counters * 2);
- for (i = 0; i < num_counters; ++i) {
+ for (i = 0; i < num_counters; ++i)
write_int64(counters[i]);
- }
#ifdef DEBUG_GCDAPROFILING
printf("llvmgcda: %u arcs\n", num_counters);
- for (i = 0; i < num_counters; ++i) {
+ for (i = 0; i < num_counters; ++i)
printf("llvmgcda: %llu\n", (unsigned long long)counters[i]);
- }
#endif
}
void llvm_gcda_end_file() {
/* Write out EOF record. */
+ if (!output_file) return;
fwrite("\0\0\0\0\0\0\0\0", 8, 1, output_file);
fclose(output_file);
output_file = NULL;
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
new file mode 100644
index 0000000..d797a56
--- /dev/null
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -0,0 +1,35 @@
+# Build system for the common Sanitizer runtime support library components.
+# These components are shared between AddressSanitizer and ThreadSanitizer.
+
+set(SANITIZER_SOURCES
+ sanitizer_allocator.cc
+ sanitizer_common.cc
+ sanitizer_flags.cc
+ sanitizer_libc.cc
+ sanitizer_linux.cc
+ sanitizer_mac.cc
+ sanitizer_posix.cc
+ sanitizer_printf.cc
+ sanitizer_symbolizer.cc
+ sanitizer_win.cc
+ )
+
+set(SANITIZER_CFLAGS "-fPIC -fno-exceptions -funwind-tables -fvisibility=hidden")
+
+set(SANITIZER_COMMON_DEFINITIONS
+ SANITIZER_HAS_EXCEPTIONS=1)
+
+if(CAN_TARGET_X86_64)
+ add_library(RTSanitizerCommon.x86_64 OBJECT ${SANITIZER_SOURCES})
+ set_property(TARGET RTSanitizerCommon.x86_64 PROPERTY COMPILE_FLAGS
+ "${SANITIZER_CFLAGS} ${TARGET_X86_64_CFLAGS}")
+ set_property(TARGET RTSanitizerCommon.x86_64 APPEND PROPERTY COMPILE_DEFINITIONS
+ ${SANITIZER_COMMON_DEFINITIONS})
+endif()
+if(CAN_TARGET_I386)
+ add_library(RTSanitizerCommon.i386 OBJECT ${SANITIZER_SOURCES})
+ set_property(TARGET RTSanitizerCommon.i386 PROPERTY COMPILE_FLAGS
+ "${SANITIZER_CFLAGS} ${TARGET_I386_CFLAGS}")
+ set_property(TARGET RTSanitizerCommon.i386 APPEND PROPERTY COMPILE_DEFINITIONS
+ ${SANITIZER_COMMON_DEFINITIONS})
+endif()
diff --git a/lib/asan/sysinfo/Makefile.mk b/lib/sanitizer_common/Makefile.mk
index bc4a2ff..da83c2d 100644
--- a/lib/asan/sysinfo/Makefile.mk
+++ b/lib/sanitizer_common/Makefile.mk
@@ -1,4 +1,4 @@
-#===- lib/asan/sysinfo/Makefile.mk -------------------------*- Makefile -*--===#
+#===- lib/sanitizer_common/Makefile.mk ---------------------*- Makefile -*--===#
#
# The LLVM Compiler Infrastructure
#
@@ -7,16 +7,16 @@
#
#===------------------------------------------------------------------------===#
-ModuleName := asan
+ModuleName := sanitizer_common
SubDirs :=
Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o)
+ObjNames := $(Sources:%.cc=%.o)
Implementation := Generic
# FIXME: use automatic dependencies?
Dependencies := $(wildcard $(Dir)/*.h)
-# Define a convenience variable for all the asan functions.
-AsanFunctions += $(Sources:%.cc=%)
+# Define a convenience variable for all the sanitizer_common functions.
+SanitizerCommonFunctions := $(Sources:%.cc=%)
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
new file mode 100644
index 0000000..816fddf
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -0,0 +1,59 @@
+//===-- sanitizer_allocator.cc --------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// This allocator that is used inside run-times.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common.h"
+
+// FIXME: We should probably use more low-level allocator that would
+// mmap some pages and split them into chunks to fulfill requests.
+#ifdef __linux__
+extern "C" void *__libc_malloc(__sanitizer::uptr size);
+extern "C" void __libc_free(void *ptr);
+# define LIBC_MALLOC __libc_malloc
+# define LIBC_FREE __libc_free
+#else // __linux__
+# include <stdlib.h>
+# define LIBC_MALLOC malloc
+# define LIBC_FREE free
+#endif // __linux__
+
+namespace __sanitizer {
+
+const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
+
+void *InternalAlloc(uptr size) {
+ if (size + sizeof(u64) < size)
+ return 0;
+ void *p = LIBC_MALLOC(size + sizeof(u64));
+ if (p == 0)
+ return 0;
+ ((u64*)p)[0] = kBlockMagic;
+ return (char*)p + sizeof(u64);
+}
+
+void InternalFree(void *addr) {
+ if (addr == 0)
+ return;
+ addr = (char*)addr - sizeof(u64);
+ CHECK_EQ(((u64*)addr)[0], kBlockMagic);
+ ((u64*)addr)[0] = 0;
+ LIBC_FREE(addr);
+}
+
+void *InternalAllocBlock(void *p) {
+ CHECK_NE(p, (void*)0);
+ u64 *pp = (u64*)((uptr)p & ~0x7);
+ for (; pp[0] != kBlockMagic; pp--) {}
+ return pp + 1;
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_allocator64.h b/lib/sanitizer_common/sanitizer_allocator64.h
new file mode 100644
index 0000000..eb79a12
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator64.h
@@ -0,0 +1,488 @@
+//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Specialized allocator which works only in 64-bit address space.
+// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
+// The main feature of this allocator is that the header is located far away
+// from the user memory region, so that the tool does not use extra shadow
+// for the header.
+//
+// Status: not yet ready.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#define SANITIZER_ALLOCATOR_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+// Maps size class id to size and back.
+class DefaultSizeClassMap {
+ private:
+ // Here we use a spline composed of 5 polynomials of oder 1.
+ // The first size class is l0, then the classes go with step s0
+ // untill they reach l1, after which they go with step s1 and so on.
+ // Steps should be powers of two for cheap division.
+ // The size of the last size class should be a power of two.
+ // There should be at most 256 size classes.
+ static const uptr l0 = 1 << 4;
+ static const uptr l1 = 1 << 9;
+ static const uptr l2 = 1 << 12;
+ static const uptr l3 = 1 << 15;
+ static const uptr l4 = 1 << 18;
+ static const uptr l5 = 1 << 21;
+
+ static const uptr s0 = 1 << 4;
+ static const uptr s1 = 1 << 6;
+ static const uptr s2 = 1 << 9;
+ static const uptr s3 = 1 << 12;
+ static const uptr s4 = 1 << 15;
+
+ static const uptr u0 = 0 + (l1 - l0) / s0;
+ static const uptr u1 = u0 + (l2 - l1) / s1;
+ static const uptr u2 = u1 + (l3 - l2) / s2;
+ static const uptr u3 = u2 + (l4 - l3) / s3;
+ static const uptr u4 = u3 + (l5 - l4) / s4;
+
+ public:
+ static const uptr kNumClasses = u4 + 1;
+ static const uptr kMaxSize = l5;
+ static const uptr kMinSize = l0;
+
+ COMPILER_CHECK(kNumClasses <= 256);
+ COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
+
+ static uptr Size(uptr class_id) {
+ if (class_id <= u0) return l0 + s0 * (class_id - 0);
+ if (class_id <= u1) return l1 + s1 * (class_id - u0);
+ if (class_id <= u2) return l2 + s2 * (class_id - u1);
+ if (class_id <= u3) return l3 + s3 * (class_id - u2);
+ if (class_id <= u4) return l4 + s4 * (class_id - u3);
+ return 0;
+ }
+ static uptr ClassID(uptr size) {
+ if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
+ if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
+ if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
+ if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
+ if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
+ return 0;
+ }
+};
+
+struct AllocatorListNode {
+ AllocatorListNode *next;
+};
+
+typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
+
+
+// Space: a portion of address space of kSpaceSize bytes starting at
+// a fixed address (kSpaceBeg). Both constants are powers of two and
+// kSpaceBeg is kSpaceSize-aligned.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
+template <const uptr kSpaceBeg, const uptr kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap>
+class SizeClassAllocator64 {
+ public:
+ void Init() {
+ CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
+ AllocBeg(), AllocSize())));
+ }
+
+ bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *Allocate(uptr size, uptr alignment) {
+ CHECK(CanAllocate(size, alignment));
+ return AllocateBySizeClass(SizeClassMap::ClassID(size));
+ }
+
+ void Deallocate(void *p) {
+ CHECK(PointerIsMine(p));
+ DeallocateBySizeClass(p, GetSizeClass(p));
+ }
+
+ // Allocate several chunks of the given class_id.
+ void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *region = GetRegionInfo(class_id);
+ SpinMutexLock l(&region->mutex);
+ if (region->free_list.empty()) {
+ PopulateFreeList(class_id, region);
+ }
+ CHECK(!region->free_list.empty());
+ // Just take as many chunks as we have in the free list now.
+ // FIXME: this might be too much.
+ free_list->append_front(&region->free_list);
+ CHECK(region->free_list.empty());
+ }
+
+ // Swallow the entire free_list for the given class_id.
+ void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *region = GetRegionInfo(class_id);
+ SpinMutexLock l(&region->mutex);
+ region->free_list.append_front(free_list);
+ }
+
+ bool PointerIsMine(void *p) {
+ return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
+ }
+ uptr GetSizeClass(void *p) {
+ return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return SizeClassMap::Size(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
+ return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
+ }
+
+ static const uptr kNumClasses = 256; // Power of two <= 256
+
+ private:
+ COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
+ static const uptr kRegionSize = kSpaceSize / kNumClasses;
+ COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
+ // Populate the free list with at most this number of bytes at once
+ // or with one element if its size is greater.
+ static const uptr kPopulateSize = 1 << 18;
+
+ struct RegionInfo {
+ SpinMutex mutex;
+ AllocatorFreeList free_list;
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
+
+ uptr AdditionalSize() {
+ uptr res = sizeof(RegionInfo) * kNumClasses;
+ CHECK_EQ(res % kPageSize, 0);
+ return res;
+ }
+ uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
+ uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
+
+ RegionInfo *GetRegionInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
+ return &regions[-1 - class_id];
+ }
+
+ uptr GetChunkIdx(uptr chunk, uptr class_id) {
+ u32 offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
+ // We save 2x by using 32-bit div, but may need to use a 256-way switch.
+ return offset / (u32)SizeClassMap::Size(class_id);
+ }
+
+ void PopulateFreeList(uptr class_id, RegionInfo *region) {
+ uptr size = SizeClassMap::Size(class_id);
+ uptr beg_idx = region->allocated_user;
+ uptr end_idx = beg_idx + kPopulateSize;
+ region->free_list.clear();
+ uptr region_beg = kSpaceBeg + kRegionSize * class_id;
+ uptr idx = beg_idx;
+ uptr i = 0;
+ do { // do-while loop because we need to put at least one item.
+ uptr p = region_beg + idx;
+ region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+ idx += size;
+ i++;
+ } while (idx < end_idx);
+ region->allocated_user += idx - beg_idx;
+ region->allocated_meta += i * kMetadataSize;
+ CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
+ }
+
+ void *AllocateBySizeClass(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *region = GetRegionInfo(class_id);
+ SpinMutexLock l(&region->mutex);
+ if (region->free_list.empty()) {
+ PopulateFreeList(class_id, region);
+ }
+ CHECK(!region->free_list.empty());
+ AllocatorListNode *node = region->free_list.front();
+ region->free_list.pop_front();
+ return reinterpret_cast<void*>(node);
+ }
+
+ void DeallocateBySizeClass(void *p, uptr class_id) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ SpinMutexLock l(&region->mutex);
+ region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+ }
+};
+
+// Objects of this type should be used as local caches for SizeClassAllocator64.
+// Since the typical use of this class is to have one object per thread in TLS,
+// is has to be POD.
+template<const uptr kNumClasses, class SizeClassAllocator>
+struct SizeClassAllocatorLocalCache {
+ // Don't need to call Init if the object is a global (i.e. zero-initialized).
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ AllocatorFreeList *free_list = &free_lists_[class_id];
+ if (free_list->empty())
+ allocator->BulkAllocate(class_id, free_list);
+ CHECK(!free_list->empty());
+ void *res = free_list->front();
+ free_list->pop_front();
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_LT(class_id, kNumClasses);
+ free_lists_[class_id].push_front(reinterpret_cast<AllocatorListNode*>(p));
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ allocator->BulkDeallocate(i, &free_lists_[i]);
+ CHECK(free_lists_[i].empty());
+ }
+ }
+
+ // private:
+ AllocatorFreeList free_lists_[kNumClasses];
+};
+
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+// The result is always page-aligned.
+class LargeMmapAllocator {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+ void *Allocate(uptr size, uptr alignment) {
+ CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it?
+ uptr map_size = RoundUpMapSize(size);
+ void *map = MmapOrDie(map_size, "LargeMmapAllocator");
+ void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
+ + kPageSize);
+ Header *h = GetHeader(res);
+ h->size = size;
+ {
+ SpinMutexLock l(&mutex_);
+ h->next = list_;
+ h->prev = 0;
+ if (list_)
+ list_->prev = h;
+ list_ = h;
+ }
+ return res;
+ }
+
+ void Deallocate(void *p) {
+ Header *h = GetHeader(p);
+ uptr map_size = RoundUpMapSize(h->size);
+ {
+ SpinMutexLock l(&mutex_);
+ Header *prev = h->prev;
+ Header *next = h->next;
+ if (prev)
+ prev->next = next;
+ if (next)
+ next->prev = prev;
+ if (h == list_)
+ list_ = next;
+ }
+ UnmapOrDie(h, map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ SpinMutexLock l(&mutex_);
+ uptr res = 0;
+ for (Header *l = list_; l; l = l->next) {
+ res += RoundUpMapSize(l->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(void *p) {
+ // Fast check.
+ if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
+ SpinMutexLock l(&mutex_);
+ for (Header *l = list_; l; l = l->next) {
+ if (GetUser(l) == p) return true;
+ }
+ return false;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ return RoundUpMapSize(GetHeader(p)->size) - kPageSize;
+ }
+
+ // At least kPageSize/2 metadata bytes is available.
+ void *GetMetaData(void *p) {
+ return GetHeader(p) + 1;
+ }
+
+ private:
+ struct Header {
+ uptr size;
+ Header *next;
+ Header *prev;
+ };
+
+ Header *GetHeader(void *p) {
+ return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
+ }
+
+ void *GetUser(Header *h) {
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, kPageSize) + kPageSize;
+ }
+
+ Header *list_;
+ SpinMutex mutex_;
+};
+
+// This class implements a complete memory allocator by using two
+// internal allocators:
+// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
+// When allocating 2^x bytes it should return 2^x aligned chunk.
+// PrimaryAllocator is used via a local AllocatorCache.
+// SecondaryAllocator can allocate anything, but is not efficient.
+template <class PrimaryAllocator, class AllocatorCache,
+ class SecondaryAllocator> // NOLINT
+class CombinedAllocator {
+ public:
+ void Init() {
+ primary_.Init();
+ secondary_.Init();
+ }
+
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
+ bool cleared = false) {
+ // Returning 0 on malloc(0) may break a lot of code.
+ if (size == 0) size = 1;
+ if (alignment > 8)
+ size = RoundUpTo(size, alignment);
+ void *res;
+ if (primary_.CanAllocate(size, alignment))
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
+ res = secondary_.Allocate(size, alignment);
+ if (alignment > 8)
+ CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ if (cleared)
+ internal_memset(res, 0, size);
+ return res;
+ }
+
+ void Deallocate(AllocatorCache *cache, void *p) {
+ if (!p) return;
+ if (primary_.PointerIsMine(p))
+ cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
+ else
+ secondary_.Deallocate(p);
+ }
+
+ void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
+ uptr alignment) {
+ if (!p)
+ return Allocate(cache, new_size, alignment);
+ if (!new_size) {
+ Deallocate(cache, p);
+ return 0;
+ }
+ CHECK(PointerIsMine(p));
+ uptr old_size = GetActuallyAllocatedSize(p);
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = Allocate(cache, new_size, alignment);
+ if (new_p)
+ internal_memcpy(new_p, p, memcpy_size);
+ Deallocate(cache, p);
+ return new_p;
+ }
+
+ bool PointerIsMine(void *p) {
+ if (primary_.PointerIsMine(p))
+ return true;
+ return secondary_.PointerIsMine(p);
+ }
+
+ void *GetMetaData(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetMetaData(p);
+ return secondary_.GetMetaData(p);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetActuallyAllocatedSize(p);
+ return secondary_.GetActuallyAllocatedSize(p);
+ }
+
+ uptr TotalMemoryUsed() {
+ return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
+ }
+
+ void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+
+ void SwallowCache(AllocatorCache *cache) {
+ cache->Drain(&primary_);
+ }
+
+ private:
+ PrimaryAllocator primary_;
+ SecondaryAllocator secondary_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_H
diff --git a/lib/sanitizer_common/sanitizer_atomic.h b/lib/sanitizer_common/sanitizer_atomic.h
new file mode 100644
index 0000000..61e6dfd
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_atomic.h
@@ -0,0 +1,65 @@
+//===-- sanitizer_atomic.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_H
+#define SANITIZER_ATOMIC_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+enum memory_order {
+ memory_order_relaxed = 1 << 0,
+ memory_order_consume = 1 << 1,
+ memory_order_acquire = 1 << 2,
+ memory_order_release = 1 << 3,
+ memory_order_acq_rel = 1 << 4,
+ memory_order_seq_cst = 1 << 5
+};
+
+struct atomic_uint8_t {
+ typedef u8 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint16_t {
+ typedef u16 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint32_t {
+ typedef u32 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint64_t {
+ typedef u64 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uintptr_t {
+ typedef uptr Type;
+ volatile Type val_dont_use;
+};
+
+} // namespace __sanitizer
+
+#if defined(__GNUC__)
+# include "sanitizer_atomic_clang.h"
+#elif defined(_MSC_VER)
+# include "sanitizer_atomic_msvc.h"
+#else
+# error "Unsupported compiler"
+#endif
+
+#endif // SANITIZER_ATOMIC_H
diff --git a/lib/sanitizer_common/sanitizer_atomic_clang.h b/lib/sanitizer_common/sanitizer_atomic_clang.h
new file mode 100644
index 0000000..af70441
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -0,0 +1,122 @@
+//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_H
+#define SANITIZER_ATOMIC_CLANG_H
+
+namespace __sanitizer {
+
+INLINE void atomic_signal_fence(memory_order) {
+ __asm__ __volatile__("" ::: "memory");
+}
+
+INLINE void atomic_thread_fence(memory_order) {
+ __sync_synchronize();
+}
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+#if defined(__i386__) || defined(__x86_64__)
+ for (int i = 0; i < cnt; i++)
+ __asm__ __volatile__("pause");
+#endif
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ v = a->val_dont_use;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ a->val_dont_use = v;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ if (mo == memory_order_seq_cst)
+ atomic_thread_fence(memory_order_seq_cst);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_fetch_add(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __sync_fetch_and_add(&a->val_dont_use, v);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_fetch_sub(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __sync_fetch_and_add(&a->val_dont_use, -v);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_exchange(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ DCHECK(!((uptr)a % sizeof(*a)));
+ if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
+ __sync_synchronize();
+ v = __sync_lock_test_and_set(&a->val_dont_use, v);
+ if (mo == memory_order_seq_cst)
+ __sync_synchronize();
+ return v;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ typedef typename T::Type Type;
+ Type cmpv = *cmp;
+ Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ return atomic_compare_exchange_strong(a, cmp, xchg, mo);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_CLANG_H
diff --git a/lib/sanitizer_common/sanitizer_atomic_msvc.h b/lib/sanitizer_common/sanitizer_atomic_msvc.h
new file mode 100644
index 0000000..2a15b59
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_atomic_msvc.h
@@ -0,0 +1,112 @@
+//===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_MSVC_H
+#define SANITIZER_ATOMIC_MSVC_H
+
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+extern "C" void _mm_mfence();
+#pragma intrinsic(_mm_mfence)
+extern "C" void _mm_pause();
+#pragma intrinsic(_mm_pause)
+extern "C" long _InterlockedExchangeAdd( // NOLINT
+ long volatile * Addend, long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchangeAdd)
+
+namespace __sanitizer {
+
+INLINE void atomic_signal_fence(memory_order) {
+ _ReadWriteBarrier();
+}
+
+INLINE void atomic_thread_fence(memory_order) {
+ _mm_mfence();
+}
+
+INLINE void proc_yield(int cnt) {
+ for (int i = 0; i < cnt; i++)
+ _mm_pause();
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ v = a->val_dont_use;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ a->val_dont_use = v;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ if (mo == memory_order_seq_cst)
+ atomic_thread_fence(memory_order_seq_cst);
+}
+
+INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, (long)v); // NOLINT
+}
+
+INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
+ u8 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ __asm {
+ mov eax, a
+ mov cl, v
+ xchg [eax], cl // NOLINT
+ mov v, cl
+ }
+ return v;
+}
+
+INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
+ u16 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ __asm {
+ mov eax, a
+ mov cx, v
+ xchg [eax], cx // NOLINT
+ mov v, cx
+ }
+ return v;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_CLANG_H
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
new file mode 100644
index 0000000..6dd1ff9
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -0,0 +1,100 @@
+//===-- sanitizer_common.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+void RawWrite(const char *buffer) {
+ static const char *kRawWriteError = "RawWrite can't output requested buffer!";
+ uptr length = (uptr)internal_strlen(buffer);
+ if (length != internal_write(2, buffer, length)) {
+ internal_write(2, kRawWriteError, internal_strlen(kRawWriteError));
+ Die();
+ }
+}
+
+uptr ReadFileToBuffer(const char *file_name, char **buff,
+ uptr *buff_size, uptr max_len) {
+ const uptr kMinFileLen = kPageSize;
+ uptr read_len = 0;
+ *buff = 0;
+ *buff_size = 0;
+ // The files we usually open are not seekable, so try different buffer sizes.
+ for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
+ fd_t fd = internal_open(file_name, /*write*/ false);
+ if (fd == kInvalidFd) return 0;
+ UnmapOrDie(*buff, *buff_size);
+ *buff = (char*)MmapOrDie(size, __FUNCTION__);
+ *buff_size = size;
+ // Read up to one page at a time.
+ read_len = 0;
+ bool reached_eof = false;
+ while (read_len + kPageSize <= size) {
+ uptr just_read = internal_read(fd, *buff + read_len, kPageSize);
+ if (just_read == 0) {
+ reached_eof = true;
+ break;
+ }
+ read_len += just_read;
+ }
+ internal_close(fd);
+ if (reached_eof) // We've read the whole file.
+ break;
+ }
+ return read_len;
+}
+
+// We don't want to use std::sort to avoid including <algorithm>, as
+// we may end up with two implementation of std::sort - one in instrumented
+// code, and the other in runtime.
+// qsort() from stdlib won't work as it calls malloc(), which results
+// in deadlock in ASan allocator.
+// We re-implement in-place sorting w/o recursion as straightforward heapsort.
+void SortArray(uptr *array, uptr size) {
+ if (size < 2)
+ return;
+ // Stage 1: insert elements to the heap.
+ for (uptr i = 1; i < size; i++) {
+ uptr j, p;
+ for (j = i; j > 0; j = p) {
+ p = (j - 1) / 2;
+ if (array[j] > array[p])
+ Swap(array[j], array[p]);
+ else
+ break;
+ }
+ }
+ // Stage 2: swap largest element with the last one,
+ // and sink the new top.
+ for (uptr i = size - 1; i > 0; i--) {
+ Swap(array[0], array[i]);
+ uptr j, max_ind;
+ for (j = 0; j < i; j = max_ind) {
+ uptr left = 2 * j + 1;
+ uptr right = 2 * j + 2;
+ max_ind = j;
+ if (left < i && array[left] > array[max_ind])
+ max_ind = left;
+ if (right < i && array[right] > array[max_ind])
+ max_ind = right;
+ if (max_ind != j)
+ Swap(array[j], array[max_ind]);
+ else
+ break;
+ }
+ }
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
new file mode 100644
index 0000000..4c7c1e9
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -0,0 +1,123 @@
+//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// It declares common functions and classes that are used in both runtimes.
+// Implementation of some functions are provided in sanitizer_common, while
+// others must be defined by run-time library itself.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_COMMON_H
+#define SANITIZER_COMMON_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// Constants.
+const uptr kWordSize = __WORDSIZE / 8;
+const uptr kWordSizeInBits = 8 * kWordSize;
+const uptr kPageSizeBits = 12;
+const uptr kPageSize = 1UL << kPageSizeBits;
+const uptr kCacheLineSize = 64;
+#ifndef _WIN32
+const uptr kMmapGranularity = kPageSize;
+#else
+const uptr kMmapGranularity = 1UL << 16;
+#endif
+
+// Threads
+int GetPid();
+uptr GetThreadSelf();
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom);
+
+// Memory management
+void *MmapOrDie(uptr size, const char *mem_type);
+void UnmapOrDie(void *addr, uptr size);
+void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
+void *Mprotect(uptr fixed_addr, uptr size);
+// Used to check if we can map shadow memory to a fixed location.
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
+
+// Internal allocator
+void *InternalAlloc(uptr size);
+void InternalFree(void *p);
+// Given the pointer p into a valid allocated block,
+// returns a pointer to the beginning of the block.
+void *InternalAllocBlock(void *p);
+
+// IO
+void RawWrite(const char *buffer);
+void Printf(const char *format, ...);
+void Report(const char *format, ...);
+
+// Opens the file 'file_name" and reads up to 'max_len' bytes.
+// The resulting buffer is mmaped and stored in '*buff'.
+// The size of the mmaped region is stored in '*buff_size',
+// Returns the number of read bytes or 0 if file can not be opened.
+uptr ReadFileToBuffer(const char *file_name, char **buff,
+ uptr *buff_size, uptr max_len);
+// Maps given file to virtual memory, and returns pointer to it
+// (or NULL if the mapping failes). Stores the size of mmaped region
+// in '*buff_size'.
+void *MapFileToMemory(const char *file_name, uptr *buff_size);
+
+const char *GetEnv(const char *name);
+const char *GetPwd();
+
+// Other
+void DisableCoreDumper();
+void DumpProcessMap();
+void SleepForSeconds(int seconds);
+void SleepForMillis(int millis);
+void NORETURN Exit(int exitcode);
+void NORETURN Abort();
+int Atexit(void (*function)(void));
+void SortArray(uptr *array, uptr size);
+
+// Math
+INLINE bool IsPowerOfTwo(uptr x) {
+ return (x & (x - 1)) == 0;
+}
+INLINE uptr RoundUpTo(uptr size, uptr boundary) {
+ CHECK(IsPowerOfTwo(boundary));
+ return (size + boundary - 1) & ~(boundary - 1);
+}
+// Don't use std::min, std::max or std::swap, to minimize dependency
+// on libstdc++.
+template<class T> T Min(T a, T b) { return a < b ? a : b; }
+template<class T> T Max(T a, T b) { return a > b ? a : b; }
+template<class T> void Swap(T& a, T& b) {
+ T tmp = a;
+ a = b;
+ b = tmp;
+}
+
+// Char handling
+INLINE bool IsSpace(int c) {
+ return (c == ' ') || (c == '\n') || (c == '\t') ||
+ (c == '\f') || (c == '\r') || (c == '\v');
+}
+INLINE bool IsDigit(int c) {
+ return (c >= '0') && (c <= '9');
+}
+INLINE int ToLower(int c) {
+ return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
+}
+
+#if __WORDSIZE == 64
+# define FIRST_32_SECOND_64(a, b) (b)
+#else
+# define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_COMMON_H
diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc
new file mode 100644
index 0000000..cdeeb78
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_flags.cc
@@ -0,0 +1,82 @@
+//===-- sanitizer_flags.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_flags.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+static char *GetFlagValue(const char *env, const char *name) {
+ if (env == 0)
+ return 0;
+ const char *pos = internal_strstr(env, name);
+ const char *end;
+ if (pos == 0)
+ return 0;
+ pos += internal_strlen(name);
+ if (pos[0] != '=') {
+ end = pos;
+ } else {
+ pos += 1;
+ if (pos[0] == '"') {
+ pos += 1;
+ end = internal_strchr(pos, '"');
+ } else if (pos[0] == '\'') {
+ pos += 1;
+ end = internal_strchr(pos, '\'');
+ } else {
+ end = internal_strchr(pos, ' ');
+ }
+ if (end == 0)
+ end = pos + internal_strlen(pos);
+ }
+ int len = end - pos;
+ char *f = (char*)InternalAlloc(len + 1);
+ internal_memcpy(f, pos, len);
+ f[len] = '\0';
+ return f;
+}
+
+void ParseFlag(const char *env, bool *flag, const char *name) {
+ char *val = GetFlagValue(env, name);
+ if (val == 0)
+ return;
+ if (0 == internal_strcmp(val, "0") ||
+ 0 == internal_strcmp(val, "no") ||
+ 0 == internal_strcmp(val, "false"))
+ *flag = false;
+ if (0 == internal_strcmp(val, "1") ||
+ 0 == internal_strcmp(val, "yes") ||
+ 0 == internal_strcmp(val, "true"))
+ *flag = true;
+ InternalFree(val);
+}
+
+void ParseFlag(const char *env, int *flag, const char *name) {
+ char *val = GetFlagValue(env, name);
+ if (val == 0)
+ return;
+ *flag = internal_atoll(val);
+ InternalFree(val);
+}
+
+void ParseFlag(const char *env, const char **flag, const char *name) {
+ const char *val = GetFlagValue(env, name);
+ if (val == 0)
+ return;
+ *flag = val;
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_flags.h b/lib/sanitizer_common/sanitizer_flags.h
new file mode 100644
index 0000000..b7ce452
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_flags.h
@@ -0,0 +1,27 @@
+//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAGS_H
+#define SANITIZER_FLAGS_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+void ParseFlag(const char *env, bool *flag, const char *name);
+void ParseFlag(const char *env, int *flag, const char *name);
+void ParseFlag(const char *env, const char **flag, const char *name);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FLAGS_H
diff --git a/lib/sanitizer_common/sanitizer_interface_defs.h b/lib/sanitizer_common/sanitizer_interface_defs.h
new file mode 100644
index 0000000..2395ea5
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_interface_defs.h
@@ -0,0 +1,56 @@
+//===-- sanitizer_interface_defs.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+// It contains basic macro and types.
+// NOTE: This file may be included into user code.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_INTERFACE_DEFS_H
+#define SANITIZER_INTERFACE_DEFS_H
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers to avoid portability issues.
+
+#if defined(_WIN32)
+// FIXME find out what we need on Windows. __declspec(dllexport) ?
+# define SANITIZER_INTERFACE_ATTRIBUTE
+# define SANITIZER_WEAK_ATTRIBUTE
+#elif defined(SANITIZER_GO)
+# define SANITIZER_INTERFACE_ATTRIBUTE
+# define SANITIZER_WEAK_ATTRIBUTE
+#else
+# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
+#endif
+
+// __has_feature
+#if !defined(__has_feature)
+# define __has_feature(x) 0
+#endif
+
+// For portability reasons we do not include stddef.h, stdint.h or any other
+// system header, but we do need some basic types that are not defined
+// in a portable way by the language itself.
+namespace __sanitizer {
+
+typedef unsigned long uptr; // NOLINT
+typedef signed long sptr; // NOLINT
+typedef unsigned char u8;
+typedef unsigned short u16; // NOLINT
+typedef unsigned int u32;
+typedef unsigned long long u64; // NOLINT
+typedef signed char s8;
+typedef signed short s16; // NOLINT
+typedef signed int s32;
+typedef signed long long s64; // NOLINT
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_INTERFACE_DEFS_H
diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h
new file mode 100644
index 0000000..b8cf61f
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -0,0 +1,163 @@
+//===-- sanitizer_internal_defs.h -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+// It contains macro used in run-time libraries code.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_DEFS_H
+#define SANITIZER_DEFS_H
+
+#include "sanitizer_interface_defs.h"
+using namespace __sanitizer; // NOLINT
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers to avoid portability issues.
+
+// Common defs.
+#define INLINE static inline
+#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+#define WEAK SANITIZER_WEAK_ATTRIBUTE
+
+// Platform-specific defs.
+#if defined(_WIN32)
+typedef unsigned long DWORD; // NOLINT
+# define ALWAYS_INLINE __declspec(forceinline)
+// FIXME(timurrrr): do we need this on Windows?
+# define ALIAS(x)
+# define ALIGNED(x) __declspec(align(x))
+# define FORMAT(f, a)
+# define NOINLINE __declspec(noinline)
+# define NORETURN __declspec(noreturn)
+# define THREADLOCAL __declspec(thread)
+# define NOTHROW
+#else // _WIN32
+# define ALWAYS_INLINE __attribute__((always_inline))
+# define ALIAS(x) __attribute__((alias(x)))
+# define ALIGNED(x) __attribute__((aligned(x)))
+# define FORMAT(f, a) __attribute__((format(printf, f, a)))
+# define NOINLINE __attribute__((noinline))
+# define NORETURN __attribute__((noreturn))
+# define THREADLOCAL __thread
+# ifdef __cplusplus
+# define NOTHROW throw()
+# else
+# define NOTHROW __attribute__((__nothrow__))
+#endif
+#endif // _WIN32
+
+// We have no equivalent of these on Windows.
+#ifndef _WIN32
+# define LIKELY(x) __builtin_expect(!!(x), 1)
+# define UNLIKELY(x) __builtin_expect(!!(x), 0)
+# define UNUSED __attribute__((unused))
+# define USED __attribute__((used))
+#endif
+
+#if defined(_WIN32)
+typedef DWORD thread_return_t;
+# define THREAD_CALLING_CONV __stdcall
+#else // _WIN32
+typedef void* thread_return_t;
+# define THREAD_CALLING_CONV
+#endif // _WIN32
+typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
+
+// If __WORDSIZE was undefined by the platform, define it in terms of the
+// compiler built-ins __LP64__ and _WIN64.
+#ifndef __WORDSIZE
+# if __LP64__ || defined(_WIN64)
+# define __WORDSIZE 64
+# else
+# define __WORDSIZE 32
+# endif
+#endif // __WORDSIZE
+
+// NOTE: Functions below must be defined in each run-time.
+namespace __sanitizer {
+void NORETURN Die();
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2);
+} // namespace __sanitizer
+
+// Check macro
+#define RAW_CHECK_MSG(expr, msg) do { \
+ if (!(expr)) { \
+ RawWrite(msg); \
+ Die(); \
+ } \
+} while (0)
+
+#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr)
+
+#define CHECK_IMPL(c1, op, c2) \
+ do { \
+ __sanitizer::u64 v1 = (u64)(c1); \
+ __sanitizer::u64 v2 = (u64)(c2); \
+ if (!(v1 op v2)) \
+ __sanitizer::CheckFailed(__FILE__, __LINE__, \
+ "(" #c1 ") " #op " (" #c2 ")", v1, v2); \
+ } while (false) \
+/**/
+
+#define CHECK(a) CHECK_IMPL((a), !=, 0)
+#define CHECK_EQ(a, b) CHECK_IMPL((a), ==, (b))
+#define CHECK_NE(a, b) CHECK_IMPL((a), !=, (b))
+#define CHECK_LT(a, b) CHECK_IMPL((a), <, (b))
+#define CHECK_LE(a, b) CHECK_IMPL((a), <=, (b))
+#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
+#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
+
+#if TSAN_DEBUG
+#define DCHECK(a) CHECK(a)
+#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
+#define DCHECK_NE(a, b) CHECK_NE(a, b)
+#define DCHECK_LT(a, b) CHECK_LT(a, b)
+#define DCHECK_LE(a, b) CHECK_LE(a, b)
+#define DCHECK_GT(a, b) CHECK_GT(a, b)
+#define DCHECK_GE(a, b) CHECK_GE(a, b)
+#else
+#define DCHECK(a)
+#define DCHECK_EQ(a, b)
+#define DCHECK_NE(a, b)
+#define DCHECK_LT(a, b)
+#define DCHECK_LE(a, b)
+#define DCHECK_GT(a, b)
+#define DCHECK_GE(a, b)
+#endif
+
+#define UNIMPLEMENTED() CHECK("unimplemented" && 0)
+
+#define COMPILER_CHECK(pred) IMPL_COMPILER_ASSERT(pred, __LINE__)
+
+#define IMPL_PASTE(a, b) a##b
+#define IMPL_COMPILER_ASSERT(pred, line) \
+ typedef char IMPL_PASTE(assertion_failed_##_, line)[2*(int)(pred)-1];
+
+// Limits for integral types. We have to redefine it in case we don't
+// have stdint.h (like in Visual Studio 9).
+#if __WORDSIZE == 64
+# define __INT64_C(c) c ## L
+# define __UINT64_C(c) c ## UL
+#else
+# define __INT64_C(c) c ## LL
+# define __UINT64_C(c) c ## ULL
+#endif // __WORDSIZE == 64
+#undef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#undef INT32_MAX
+#define INT32_MAX (2147483647)
+#undef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#undef INT64_MIN
+#define INT64_MIN (-__INT64_C(9223372036854775807)-1)
+#undef INT64_MAX
+#define INT64_MAX (__INT64_C(9223372036854775807))
+#undef UINT64_MAX
+#define UINT64_MAX (__UINT64_C(18446744073709551615))
+
+#endif // SANITIZER_DEFS_H
diff --git a/lib/sanitizer_common/sanitizer_libc.cc b/lib/sanitizer_common/sanitizer_libc.cc
new file mode 100644
index 0000000..c433242
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_libc.cc
@@ -0,0 +1,182 @@
+//===-- sanitizer_libc.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries. See sanitizer_libc.h for details.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+s64 internal_atoll(const char *nptr) {
+ return internal_simple_strtoll(nptr, (char**)0, 10);
+}
+
+void *internal_memchr(const void *s, int c, uptr n) {
+ const char* t = (char*)s;
+ for (uptr i = 0; i < n; ++i, ++t)
+ if (*t == c)
+ return (void*)t;
+ return 0;
+}
+
+int internal_memcmp(const void* s1, const void* s2, uptr n) {
+ const char* t1 = (char*)s1;
+ const char* t2 = (char*)s2;
+ for (uptr i = 0; i < n; ++i, ++t1, ++t2)
+ if (*t1 != *t2)
+ return *t1 < *t2 ? -1 : 1;
+ return 0;
+}
+
+void *internal_memcpy(void *dest, const void *src, uptr n) {
+ char *d = (char*)dest;
+ char *s = (char*)src;
+ for (uptr i = 0; i < n; ++i)
+ d[i] = s[i];
+ return dest;
+}
+
+void *internal_memset(void* s, int c, uptr n) {
+ // The next line prevents Clang from making a call to memset() instead of the
+ // loop below.
+ // FIXME: building the runtime with -ffreestanding is a better idea. However
+ // there currently are linktime problems due to PR12396.
+ char volatile *t = (char*)s;
+ for (uptr i = 0; i < n; ++i, ++t) {
+ *t = c;
+ }
+ return s;
+}
+
+char* internal_strdup(const char *s) {
+ uptr len = internal_strlen(s);
+ char *s2 = (char*)InternalAlloc(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
+int internal_strcmp(const char *s1, const char *s2) {
+ while (true) {
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2) return (c1 < c2) ? -1 : 1;
+ if (c1 == 0) break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+int internal_strncmp(const char *s1, const char *s2, uptr n) {
+ for (uptr i = 0; i < n; i++) {
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2) return (c1 < c2) ? -1 : 1;
+ if (c1 == 0) break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+char* internal_strchr(const char *s, int c) {
+ while (true) {
+ if (*s == (char)c)
+ return (char*)s;
+ if (*s == 0)
+ return 0;
+ s++;
+ }
+}
+
+char *internal_strrchr(const char *s, int c) {
+ const char *res = 0;
+ for (uptr i = 0; s[i]; i++) {
+ if (s[i] == c) res = s + i;
+ }
+ return (char*)res;
+}
+
+uptr internal_strlen(const char *s) {
+ uptr i = 0;
+ while (s[i]) i++;
+ return i;
+}
+
+char *internal_strncat(char *dst, const char *src, uptr n) {
+ uptr len = internal_strlen(dst);
+ uptr i;
+ for (i = 0; i < n && src[i]; i++)
+ dst[len + i] = src[i];
+ dst[len + i] = 0;
+ return dst;
+}
+
+char *internal_strncpy(char *dst, const char *src, uptr n) {
+ uptr i;
+ for (i = 0; i < n && src[i]; i++)
+ dst[i] = src[i];
+ for (; i < n; i++)
+ dst[i] = '\0';
+ return dst;
+}
+
+uptr internal_strnlen(const char *s, uptr maxlen) {
+ uptr i = 0;
+ while (i < maxlen && s[i]) i++;
+ return i;
+}
+
+char *internal_strstr(const char *haystack, const char *needle) {
+ // This is O(N^2), but we are not using it in hot places.
+ uptr len1 = internal_strlen(haystack);
+ uptr len2 = internal_strlen(needle);
+ if (len1 < len2) return 0;
+ for (uptr pos = 0; pos <= len1 - len2; pos++) {
+ if (internal_memcmp(haystack + pos, needle, len2) == 0)
+ return (char*)haystack + pos;
+ }
+ return 0;
+}
+
+s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
+ CHECK_EQ(base, 10);
+ while (IsSpace(*nptr)) nptr++;
+ int sgn = 1;
+ u64 res = 0;
+ bool have_digits = false;
+ char *old_nptr = (char*)nptr;
+ if (*nptr == '+') {
+ sgn = 1;
+ nptr++;
+ } else if (*nptr == '-') {
+ sgn = -1;
+ nptr++;
+ }
+ while (IsDigit(*nptr)) {
+ res = (res <= UINT64_MAX / 10) ? res * 10 : UINT64_MAX;
+ int digit = ((*nptr) - '0');
+ res = (res <= UINT64_MAX - digit) ? res + digit : UINT64_MAX;
+ have_digits = true;
+ nptr++;
+ }
+ if (endptr != 0) {
+ *endptr = (have_digits) ? (char*)nptr : old_nptr;
+ }
+ if (sgn > 0) {
+ return (s64)(Min((u64)INT64_MAX, res));
+ } else {
+ return (res > INT64_MAX) ? INT64_MIN : ((s64)res * -1);
+ }
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_libc.h b/lib/sanitizer_common/sanitizer_libc.h
new file mode 100644
index 0000000..8da4286c
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_libc.h
@@ -0,0 +1,69 @@
+//===-- sanitizer_libc.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// These tools can not use some of the libc functions directly because those
+// functions are intercepted. Instead, we implement a tiny subset of libc here.
+// NOTE: This file may be included into user code.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LIBC_H
+#define SANITIZER_LIBC_H
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers from sanitizer runtime.
+#include "sanitizer_interface_defs.h"
+
+namespace __sanitizer {
+
+// internal_X() is a custom implementation of X() for use in RTL.
+
+// String functions
+s64 internal_atoll(const char *nptr);
+void *internal_memchr(const void *s, int c, uptr n);
+int internal_memcmp(const void* s1, const void* s2, uptr n);
+void *internal_memcpy(void *dest, const void *src, uptr n);
+// Should not be used in performance-critical places.
+void *internal_memset(void *s, int c, uptr n);
+char* internal_strchr(const char *s, int c);
+int internal_strcmp(const char *s1, const char *s2);
+char *internal_strdup(const char *s);
+uptr internal_strlen(const char *s);
+char *internal_strncat(char *dst, const char *src, uptr n);
+int internal_strncmp(const char *s1, const char *s2, uptr n);
+char *internal_strncpy(char *dst, const char *src, uptr n);
+uptr internal_strnlen(const char *s, uptr maxlen);
+char *internal_strrchr(const char *s, int c);
+// This is O(N^2), but we are not using it in hot places.
+char *internal_strstr(const char *haystack, const char *needle);
+// Works only for base=10 and doesn't set errno.
+s64 internal_simple_strtoll(const char *nptr, char **endptr, int base);
+
+// Memory
+void *internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, u64 offset);
+int internal_munmap(void *addr, uptr length);
+
+// I/O
+typedef int fd_t;
+const fd_t kInvalidFd = -1;
+int internal_close(fd_t fd);
+fd_t internal_open(const char *filename, bool write);
+uptr internal_read(fd_t fd, void *buf, uptr count);
+uptr internal_write(fd_t fd, const void *buf, uptr count);
+uptr internal_filesize(fd_t fd); // -1 on error.
+int internal_dup2(int oldfd, int newfd);
+int internal_snprintf(char *buffer, uptr length, const char *format, ...);
+
+// Threading
+int internal_sched_yield();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIBC_H
diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc
new file mode 100644
index 0000000..70e2eb3
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_linux.cc
@@ -0,0 +1,348 @@
+//===-- sanitizer_linux.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+#ifdef __linux__
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_symbolizer.h"
+
+#include <elf.h>
+#include <fcntl.h>
+#include <link.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+namespace __sanitizer {
+
+// --------------- sanitizer_libc.h
+void *internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, u64 offset) {
+#if __WORDSIZE == 64
+ return (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
+#else
+ return (void *)syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
+#endif
+}
+
+int internal_munmap(void *addr, uptr length) {
+ return syscall(__NR_munmap, addr, length);
+}
+
+int internal_close(fd_t fd) {
+ return syscall(__NR_close, fd);
+}
+
+fd_t internal_open(const char *filename, bool write) {
+ return syscall(__NR_open, filename,
+ write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ return (uptr)syscall(__NR_read, fd, buf, count);
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ return (uptr)syscall(__NR_write, fd, buf, count);
+}
+
+uptr internal_filesize(fd_t fd) {
+#if __WORDSIZE == 64
+ struct stat st;
+ if (syscall(__NR_fstat, fd, &st))
+ return -1;
+#else
+ struct stat64 st;
+ if (syscall(__NR_fstat64, fd, &st))
+ return -1;
+#endif
+ return (uptr)st.st_size;
+}
+
+int internal_dup2(int oldfd, int newfd) {
+ return syscall(__NR_dup2, oldfd, newfd);
+}
+
+int internal_sched_yield() {
+ return syscall(__NR_sched_yield);
+}
+
+// ----------------- sanitizer_common.h
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ if (at_initialization) {
+ // This is the main thread. Libpthread may not be initialized yet.
+ struct rlimit rl;
+ CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
+
+ // Find the mapping that contains a stack variable.
+ ProcessMaps proc_maps;
+ uptr start, end, offset;
+ uptr prev_end = 0;
+ while (proc_maps.Next(&start, &end, &offset, 0, 0)) {
+ if ((uptr)&rl < end)
+ break;
+ prev_end = end;
+ }
+ CHECK((uptr)&rl >= start && (uptr)&rl < end);
+
+ // Get stacksize from rlimit, but clip it so that it does not overlap
+ // with other mappings.
+ uptr stacksize = rl.rlim_cur;
+ if (stacksize > end - prev_end)
+ stacksize = end - prev_end;
+ // When running with unlimited stack size, we still want to set some limit.
+ // The unlimited stack size is caused by 'ulimit -s unlimited'.
+ // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
+ if (stacksize > kMaxThreadStackSize)
+ stacksize = kMaxThreadStackSize;
+ *stack_top = end;
+ *stack_bottom = end - stacksize;
+ return;
+ }
+ pthread_attr_t attr;
+ CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
+ uptr stacksize = 0;
+ void *stackaddr = 0;
+ pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
+ pthread_attr_destroy(&attr);
+
+ *stack_top = (uptr)stackaddr + stacksize;
+ *stack_bottom = (uptr)stackaddr;
+ CHECK(stacksize < kMaxThreadStackSize); // Sanity check.
+}
+
+// Like getenv, but reads env directly from /proc and does not use libc.
+// This function should be called first inside __asan_init.
+const char *GetEnv(const char *name) {
+ static char *environ;
+ static uptr len;
+ static bool inited;
+ if (!inited) {
+ inited = true;
+ uptr environ_size;
+ len = ReadFileToBuffer("/proc/self/environ",
+ &environ, &environ_size, 1 << 26);
+ }
+ if (!environ || len == 0) return 0;
+ uptr namelen = internal_strlen(name);
+ const char *p = environ;
+ while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
+ // proc file has the format NAME=value\0NAME=value\0NAME=value\0...
+ const char* endp =
+ (char*)internal_memchr(p, '\0', len - (p - environ));
+ if (endp == 0) // this entry isn't NUL terminated
+ return 0;
+ else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
+ return p + namelen + 1; // point after =
+ p = endp + 1;
+ }
+ return 0; // Not found.
+}
+
+// ------------------ sanitizer_symbolizer.h
+typedef ElfW(Ehdr) Elf_Ehdr;
+typedef ElfW(Shdr) Elf_Shdr;
+typedef ElfW(Phdr) Elf_Phdr;
+
+bool FindDWARFSection(uptr object_file_addr, const char *section_name,
+ DWARFSection *section) {
+ Elf_Ehdr *exe = (Elf_Ehdr*)object_file_addr;
+ Elf_Shdr *sections = (Elf_Shdr*)(object_file_addr + exe->e_shoff);
+ uptr section_names = object_file_addr +
+ sections[exe->e_shstrndx].sh_offset;
+ for (int i = 0; i < exe->e_shnum; i++) {
+ Elf_Shdr *current_section = &sections[i];
+ const char *current_name = (const char*)section_names +
+ current_section->sh_name;
+ if (IsFullNameOfDWARFSection(current_name, section_name)) {
+ section->data = (const char*)object_file_addr +
+ current_section->sh_offset;
+ section->size = current_section->sh_size;
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef ANDROID
+uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules) {
+ UNIMPLEMENTED();
+}
+#else // ANDROID
+struct DlIteratePhdrData {
+ ModuleDIContext *modules;
+ uptr current_n;
+ uptr max_n;
+};
+
+static const uptr kMaxPathLength = 512;
+
+static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
+ DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
+ if (data->current_n == data->max_n)
+ return 0;
+ char *module_name = 0;
+ if (data->current_n == 0) {
+ // First module is the binary itself.
+ module_name = (char*)InternalAlloc(kMaxPathLength);
+ uptr module_name_len = readlink("/proc/self/exe",
+ module_name, kMaxPathLength);
+ CHECK_NE(module_name_len, (uptr)-1);
+ CHECK_LT(module_name_len, kMaxPathLength);
+ module_name[module_name_len] = '\0';
+ } else if (info->dlpi_name) {
+ module_name = internal_strdup(info->dlpi_name);
+ }
+ if (module_name == 0 || module_name[0] == '\0')
+ return 0;
+ void *mem = &data->modules[data->current_n];
+ ModuleDIContext *cur_module = new(mem) ModuleDIContext(module_name,
+ info->dlpi_addr);
+ data->current_n++;
+ for (int i = 0; i < info->dlpi_phnum; i++) {
+ const Elf_Phdr *phdr = &info->dlpi_phdr[i];
+ if (phdr->p_type == PT_LOAD) {
+ uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
+ uptr cur_end = cur_beg + phdr->p_memsz;
+ cur_module->addAddressRange(cur_beg, cur_end);
+ }
+ }
+ InternalFree(module_name);
+ return 0;
+}
+
+uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules) {
+ CHECK(modules);
+ DlIteratePhdrData data = {modules, 0, max_modules};
+ dl_iterate_phdr(dl_iterate_phdr_cb, &data);
+ return data.current_n;
+}
+#endif // ANDROID
+
+// ----------------- sanitizer_procmaps.h
+ProcessMaps::ProcessMaps() {
+ proc_self_maps_buff_len_ =
+ ReadFileToBuffer("/proc/self/maps", &proc_self_maps_buff_,
+ &proc_self_maps_buff_mmaped_size_, 1 << 26);
+ CHECK_GT(proc_self_maps_buff_len_, 0);
+ // internal_write(2, proc_self_maps_buff_, proc_self_maps_buff_len_);
+ Reset();
+}
+
+ProcessMaps::~ProcessMaps() {
+ UnmapOrDie(proc_self_maps_buff_, proc_self_maps_buff_mmaped_size_);
+}
+
+void ProcessMaps::Reset() {
+ current_ = proc_self_maps_buff_;
+}
+
+// Parse a hex value in str and update str.
+static uptr ParseHex(char **str) {
+ uptr x = 0;
+ char *s;
+ for (s = *str; ; s++) {
+ char c = *s;
+ uptr v = 0;
+ if (c >= '0' && c <= '9')
+ v = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ v = c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ v = c - 'A' + 10;
+ else
+ break;
+ x = x * 16 + v;
+ }
+ *str = s;
+ return x;
+}
+
+static bool IsOnOf(char c, char c1, char c2) {
+ return c == c1 || c == c2;
+}
+
+static bool IsDecimal(char c) {
+ return c >= '0' && c <= '9';
+}
+
+bool ProcessMaps::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size) {
+ char *last = proc_self_maps_buff_ + proc_self_maps_buff_len_;
+ if (current_ >= last) return false;
+ uptr dummy;
+ if (!start) start = &dummy;
+ if (!end) end = &dummy;
+ if (!offset) offset = &dummy;
+ char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
+ if (next_line == 0)
+ next_line = last;
+ // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
+ *start = ParseHex(&current_);
+ CHECK_EQ(*current_++, '-');
+ *end = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ CHECK(IsOnOf(*current_++, '-', 'r'));
+ CHECK(IsOnOf(*current_++, '-', 'w'));
+ CHECK(IsOnOf(*current_++, '-', 'x'));
+ CHECK(IsOnOf(*current_++, 's', 'p'));
+ CHECK_EQ(*current_++, ' ');
+ *offset = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ':');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ while (IsDecimal(*current_))
+ current_++;
+ CHECK_EQ(*current_++, ' ');
+ // Skip spaces.
+ while (current_ < next_line && *current_ == ' ')
+ current_++;
+ // Fill in the filename.
+ uptr i = 0;
+ while (current_ < next_line) {
+ if (filename && i < filename_size - 1)
+ filename[i++] = *current_;
+ current_++;
+ }
+ if (filename && i < filename_size)
+ filename[i] = 0;
+ current_ = next_line + 1;
+ return true;
+}
+
+// Gets the object name and the offset by walking ProcessMaps.
+bool ProcessMaps::GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[],
+ uptr filename_size) {
+ return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
+}
+
+} // namespace __sanitizer
+
+#endif // __linux__
diff --git a/lib/sanitizer_common/sanitizer_list.h b/lib/sanitizer_common/sanitizer_list.h
new file mode 100644
index 0000000..ef98eee
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_list.h
@@ -0,0 +1,120 @@
+//===-- sanitizer_list.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains implementation of a list class to be used by
+// ThreadSanitizer, etc run-times.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LIST_H
+#define SANITIZER_LIST_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// Intrusive singly-linked list with size(), push_back(), push_front()
+// pop_front(), append_front() and append_back().
+// This class should be a POD (so that it can be put into TLS)
+// and an object with all zero fields should represent a valid empty list.
+// This class does not have a CTOR, so clear() should be called on all
+// non-zero-initialized objects before using.
+template<class Item>
+struct IntrusiveList {
+ void clear() {
+ first_ = last_ = 0;
+ size_ = 0;
+ }
+
+ bool empty() const { return size_ == 0; }
+ uptr size() const { return size_; }
+
+ void push_back(Item *x) {
+ if (empty()) {
+ x->next = 0;
+ first_ = last_ = x;
+ size_ = 1;
+ } else {
+ x->next = 0;
+ last_->next = x;
+ last_ = x;
+ size_++;
+ }
+ }
+
+ void push_front(Item *x) {
+ if (empty()) {
+ x->next = 0;
+ first_ = last_ = x;
+ size_ = 1;
+ } else {
+ x->next = first_;
+ first_ = x;
+ size_++;
+ }
+ }
+
+ void pop_front() {
+ CHECK(!empty());
+ first_ = first_->next;
+ if (first_ == 0)
+ last_ = 0;
+ size_--;
+ }
+
+ Item *front() { return first_; }
+ Item *back() { return last_; }
+
+ void append_front(IntrusiveList<Item> *l) {
+ CHECK_NE(this, l);
+ if (empty()) {
+ *this = *l;
+ } else if (!l->empty()) {
+ l->last_->next = first_;
+ first_ = l->first_;
+ size_ += l->size();
+ }
+ l->clear();
+ }
+
+ void append_back(IntrusiveList<Item> *l) {
+ CHECK_NE(this, l);
+ if (empty()) {
+ *this = *l;
+ } else {
+ last_->next = l->first_;
+ last_ = l->last_;
+ size_ += l->size();
+ }
+ l->clear();
+ }
+
+ void CheckConsistency() {
+ if (size_ == 0) {
+ CHECK_EQ(first_, 0);
+ CHECK_EQ(last_, 0);
+ } else {
+ uptr count = 0;
+ for (Item *i = first_; ; i = i->next) {
+ count++;
+ if (i == last_) break;
+ }
+ CHECK_EQ(size(), count);
+ CHECK_EQ(last_->next, 0);
+ }
+ }
+
+// private, don't use directly.
+ uptr size_;
+ Item *first_;
+ Item *last_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIST_H
diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc
new file mode 100644
index 0000000..e64c2de
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_mac.cc
@@ -0,0 +1,243 @@
+//===-- sanitizer_mac.cc --------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements mac-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#ifdef __APPLE__
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_symbolizer.h"
+
+#include <crt_externs.h> // for _NSGetEnviron
+#include <fcntl.h>
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+namespace __sanitizer {
+
+// ---------------------- sanitizer_libc.h
+void *internal_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, u64 offset) {
+ return mmap(addr, length, prot, flags, fd, offset);
+}
+
+int internal_munmap(void *addr, uptr length) {
+ return munmap(addr, length);
+}
+
+int internal_close(fd_t fd) {
+ return close(fd);
+}
+
+fd_t internal_open(const char *filename, bool write) {
+ return open(filename,
+ write ? O_WRONLY | O_CREAT : O_RDONLY, 0660);
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ return read(fd, buf, count);
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ return write(fd, buf, count);
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st = {};
+ if (fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+int internal_dup2(int oldfd, int newfd) {
+ return dup2(oldfd, newfd);
+}
+
+int internal_sched_yield() {
+ return sched_yield();
+}
+
+// ----------------- sanitizer_common.h
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ uptr stacksize = pthread_get_stacksize_np(pthread_self());
+ void *stackaddr = pthread_get_stackaddr_np(pthread_self());
+ *stack_top = (uptr)stackaddr;
+ *stack_bottom = *stack_top - stacksize;
+}
+
+const char *GetEnv(const char *name) {
+ char ***env_ptr = _NSGetEnviron();
+ CHECK(env_ptr);
+ char **environ = *env_ptr;
+ CHECK(environ);
+ uptr name_len = internal_strlen(name);
+ while (*environ != 0) {
+ uptr len = internal_strlen(*environ);
+ if (len > name_len) {
+ const char *p = *environ;
+ if (!internal_memcmp(p, name, name_len) &&
+ p[name_len] == '=') { // Match.
+ return *environ + name_len + 1; // String starting after =.
+ }
+ }
+ environ++;
+ }
+ return 0;
+}
+
+// ------------------ sanitizer_symbolizer.h
+bool FindDWARFSection(uptr object_file_addr, const char *section_name,
+ DWARFSection *section) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules) {
+ UNIMPLEMENTED();
+ return 0;
+};
+
+// ----------------- sanitizer_procmaps.h
+
+ProcessMaps::ProcessMaps() {
+ Reset();
+}
+
+ProcessMaps::~ProcessMaps() {
+}
+
+// More information about Mach-O headers can be found in mach-o/loader.h
+// Each Mach-O image has a header (mach_header or mach_header_64) starting with
+// a magic number, and a list of linker load commands directly following the
+// header.
+// A load command is at least two 32-bit words: the command type and the
+// command size in bytes. We're interested only in segment load commands
+// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
+// into the task's address space.
+// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
+// segment_command_64 correspond to the memory address, memory size and the
+// file offset of the current memory segment.
+// Because these fields are taken from the images as is, one needs to add
+// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
+
+void ProcessMaps::Reset() {
+ // Count down from the top.
+ // TODO(glider): as per man 3 dyld, iterating over the headers with
+ // _dyld_image_count is thread-unsafe. We need to register callbacks for
+ // adding and removing images which will invalidate the ProcessMaps state.
+ current_image_ = _dyld_image_count();
+ current_load_cmd_count_ = -1;
+ current_load_cmd_addr_ = 0;
+ current_magic_ = 0;
+}
+
+// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
+// Google Perftools, http://code.google.com/p/google-perftools.
+
+// NextSegmentLoad scans the current image for the next segment load command
+// and returns the start and end addresses and file offset of the corresponding
+// segment.
+// Note that the segment addresses are not necessarily sorted.
+template<u32 kLCSegment, typename SegmentCommand>
+bool ProcessMaps::NextSegmentLoad(
+ uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size) {
+ const char* lc = current_load_cmd_addr_;
+ current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
+ if (((const load_command *)lc)->cmd == kLCSegment) {
+ const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
+ const SegmentCommand* sc = (const SegmentCommand *)lc;
+ if (start) *start = sc->vmaddr + dlloff;
+ if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
+ if (offset) *offset = sc->fileoff;
+ if (filename) {
+ internal_strncpy(filename, _dyld_get_image_name(current_image_),
+ filename_size);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool ProcessMaps::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size) {
+ for (; current_image_ >= 0; current_image_--) {
+ const mach_header* hdr = _dyld_get_image_header(current_image_);
+ if (!hdr) continue;
+ if (current_load_cmd_count_ < 0) {
+ // Set up for this image;
+ current_load_cmd_count_ = hdr->ncmds;
+ current_magic_ = hdr->magic;
+ switch (current_magic_) {
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
+ break;
+ }
+ default: {
+ continue;
+ }
+ }
+ }
+
+ for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
+ switch (current_magic_) {
+ // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
+ start, end, offset, filename, filename_size))
+ return true;
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
+ start, end, offset, filename, filename_size))
+ return true;
+ break;
+ }
+ }
+ }
+ // If we get here, no more load_cmd's in this image talk about
+ // segments. Go on to the next image.
+ }
+ return false;
+}
+
+bool ProcessMaps::GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[],
+ uptr filename_size) {
+ return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
+}
+
+} // namespace __sanitizer
+
+#endif // __APPLE__
diff --git a/lib/sanitizer_common/sanitizer_mutex.h b/lib/sanitizer_common/sanitizer_mutex.h
new file mode 100644
index 0000000..ca3e2f9
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_mutex.h
@@ -0,0 +1,100 @@
+//===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_MUTEX_H
+#define SANITIZER_MUTEX_H
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+class SpinMutex {
+ public:
+ SpinMutex() {
+ atomic_store(&state_, 0, memory_order_relaxed);
+ }
+
+ void Lock() {
+ if (atomic_exchange(&state_, 1, memory_order_acquire) == 0)
+ return;
+ LockSlow();
+ }
+
+ void Unlock() {
+ atomic_store(&state_, 0, memory_order_release);
+ }
+
+ private:
+ atomic_uint8_t state_;
+
+ void NOINLINE LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ if (atomic_load(&state_, memory_order_relaxed) == 0
+ && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
+ return;
+ }
+ }
+
+ SpinMutex(const SpinMutex&);
+ void operator=(const SpinMutex&);
+};
+
+template<typename MutexType>
+class GenericScopedLock {
+ public:
+ explicit GenericScopedLock(MutexType *mu)
+ : mu_(mu) {
+ mu_->Lock();
+ }
+
+ ~GenericScopedLock() {
+ mu_->Unlock();
+ }
+
+ private:
+ MutexType *mu_;
+
+ GenericScopedLock(const GenericScopedLock&);
+ void operator=(const GenericScopedLock&);
+};
+
+template<typename MutexType>
+class GenericScopedReadLock {
+ public:
+ explicit GenericScopedReadLock(MutexType *mu)
+ : mu_(mu) {
+ mu_->ReadLock();
+ }
+
+ ~GenericScopedReadLock() {
+ mu_->ReadUnlock();
+ }
+
+ private:
+ MutexType *mu_;
+
+ GenericScopedReadLock(const GenericScopedReadLock&);
+ void operator=(const GenericScopedReadLock&);
+};
+
+typedef GenericScopedLock<SpinMutex> SpinMutexLock;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MUTEX_H
diff --git a/lib/sanitizer_common/sanitizer_placement_new.h b/lib/sanitizer_common/sanitizer_placement_new.h
new file mode 100644
index 0000000..f133a6f
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_placement_new.h
@@ -0,0 +1,33 @@
+//===-- sanitizer_placement_new.h -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//
+// The file provides 'placement new'.
+// Do not include it into header files, only into source files.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PLACEMENT_NEW_H
+#define SANITIZER_PLACEMENT_NEW_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+#if (__WORDSIZE == 64) || defined(__APPLE__)
+typedef uptr operator_new_ptr_type;
+#else
+typedef u32 operator_new_ptr_type;
+#endif
+} // namespace __sanitizer
+
+inline void *operator new(__sanitizer::operator_new_ptr_type sz, void *p) {
+ return p;
+}
+
+#endif // SANITIZER_PLACEMENT_NEW_H
diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc
new file mode 100644
index 0000000..4caee3b
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_posix.cc
@@ -0,0 +1,164 @@
+//===-- sanitizer_posix.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements POSIX-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+#if defined(__linux__) || defined(__APPLE__)
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_procmaps.h"
+
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+namespace __sanitizer {
+
+// ------------- sanitizer_common.h
+
+int GetPid() {
+ return getpid();
+}
+
+uptr GetThreadSelf() {
+ return (uptr)pthread_self();
+}
+
+void *MmapOrDie(uptr size, const char *mem_type) {
+ size = RoundUpTo(size, kPageSize);
+ void *res = internal_mmap(0, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (res == (void*)-1) {
+ Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s\n",
+ size, size, mem_type);
+ CHECK("unable to mmap" && 0);
+ }
+ return res;
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (!addr || !size) return;
+ int res = internal_munmap(addr, size);
+ if (res != 0) {
+ Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ size, size, addr);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
+ return internal_mmap((void*)fixed_addr, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
+ -1, 0);
+}
+
+void *Mprotect(uptr fixed_addr, uptr size) {
+ return internal_mmap((void*)fixed_addr, size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
+ -1, 0);
+}
+
+void *MapFileToMemory(const char *file_name, uptr *buff_size) {
+ fd_t fd = internal_open(file_name, false);
+ CHECK_NE(fd, kInvalidFd);
+ uptr fsize = internal_filesize(fd);
+ CHECK_NE(fsize, (uptr)-1);
+ CHECK_GT(fsize, 0);
+ *buff_size = RoundUpTo(fsize, kPageSize);
+ void *map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ return (map == MAP_FAILED) ? 0 : map;
+}
+
+
+static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
+ uptr start2, uptr end2) {
+ CHECK(start1 <= end1);
+ CHECK(start2 <= end2);
+ return (end1 < start2) || (end2 < start1);
+}
+
+// FIXME: this is thread-unsafe, but should not cause problems most of the time.
+// When the shadow is mapped only a single thread usually exists (plus maybe
+// several worker threads on Mac, which aren't expected to map big chunks of
+// memory).
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
+ ProcessMaps procmaps;
+ uptr start, end;
+ while (procmaps.Next(&start, &end,
+ /*offset*/0, /*filename*/0, /*filename_size*/0)) {
+ if (!IntervalsAreSeparate(start, end, range_start, range_end))
+ return false;
+ }
+ return true;
+}
+
+void DumpProcessMap() {
+ ProcessMaps proc_maps;
+ uptr start, end;
+ const sptr kBufSize = 4095;
+ char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
+ Report("Process memory map follows:\n");
+ while (proc_maps.Next(&start, &end, /* file_offset */0,
+ filename, kBufSize)) {
+ Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
+ }
+ Report("End of process memory map.\n");
+ UnmapOrDie(filename, kBufSize);
+}
+
+const char *GetPwd() {
+ return GetEnv("PWD");
+}
+
+void DisableCoreDumper() {
+ struct rlimit nocore;
+ nocore.rlim_cur = 0;
+ nocore.rlim_max = 0;
+ setrlimit(RLIMIT_CORE, &nocore);
+}
+
+void SleepForSeconds(int seconds) {
+ sleep(seconds);
+}
+
+void SleepForMillis(int millis) {
+ usleep(millis * 1000);
+}
+
+void Exit(int exitcode) {
+ _exit(exitcode);
+}
+
+void Abort() {
+ abort();
+}
+
+int Atexit(void (*function)(void)) {
+#ifndef SANITIZER_GO
+ return atexit(function);
+#else
+ return 0;
+#endif
+}
+
+} // namespace __sanitizer
+
+#endif // __linux__ || __APPLE_
diff --git a/lib/sanitizer_common/sanitizer_printf.cc b/lib/sanitizer_common/sanitizer_printf.cc
new file mode 100644
index 0000000..7b70c3a
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_printf.cc
@@ -0,0 +1,185 @@
+//===-- sanitizer_printf.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+//
+// Internal printf function, used inside run-time libraries.
+// We can't use libc printf because we intercept some of the functions used
+// inside it.
+//===----------------------------------------------------------------------===//
+
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+#include <stdio.h>
+#include <stdarg.h>
+
+namespace __sanitizer {
+
+static int AppendChar(char **buff, const char *buff_end, char c) {
+ if (*buff < buff_end) {
+ **buff = c;
+ (*buff)++;
+ }
+ return 1;
+}
+
+// Appends number in a given base to buffer. If its length is less than
+// "minimal_num_length", it is padded with leading zeroes.
+static int AppendUnsigned(char **buff, const char *buff_end, u64 num,
+ u8 base, u8 minimal_num_length) {
+ uptr const kMaxLen = 30;
+ RAW_CHECK(base == 10 || base == 16);
+ RAW_CHECK(minimal_num_length < kMaxLen);
+ uptr num_buffer[kMaxLen];
+ uptr pos = 0;
+ do {
+ RAW_CHECK_MSG(pos < kMaxLen, "appendNumber buffer overflow");
+ num_buffer[pos++] = num % base;
+ num /= base;
+ } while (num > 0);
+ while (pos < minimal_num_length) num_buffer[pos++] = 0;
+ int result = 0;
+ while (pos-- > 0) {
+ uptr digit = num_buffer[pos];
+ result += AppendChar(buff, buff_end, (digit < 10) ? '0' + digit
+ : 'a' + digit - 10);
+ }
+ return result;
+}
+
+static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num) {
+ int result = 0;
+ if (num < 0) {
+ result += AppendChar(buff, buff_end, '-');
+ num = -num;
+ }
+ result += AppendUnsigned(buff, buff_end, (u64)num, 10, 0);
+ return result;
+}
+
+static int AppendString(char **buff, const char *buff_end, const char *s) {
+ if (s == 0)
+ s = "<null>";
+ int result = 0;
+ for (; *s; s++) {
+ result += AppendChar(buff, buff_end, *s);
+ }
+ return result;
+}
+
+static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
+ int result = 0;
+ result += AppendString(buff, buff_end, "0x");
+ result += AppendUnsigned(buff, buff_end, ptr_value, 16,
+ (__WORDSIZE == 64) ? 12 : 8);
+ return result;
+}
+
+int VSNPrintf(char *buff, int buff_length,
+ const char *format, va_list args) {
+ static const char *kPrintfFormatsHelp = "Supported Printf formats: "
+ "%%[z]{d,u,x}; %%p; %%s\n";
+ RAW_CHECK(format);
+ RAW_CHECK(buff_length > 0);
+ const char *buff_end = &buff[buff_length - 1];
+ const char *cur = format;
+ int result = 0;
+ for (; *cur; cur++) {
+ if (*cur != '%') {
+ result += AppendChar(&buff, buff_end, *cur);
+ continue;
+ }
+ cur++;
+ bool have_z = (*cur == 'z');
+ cur += have_z;
+ s64 dval;
+ u64 uval;
+ switch (*cur) {
+ case 'd': {
+ dval = have_z ? va_arg(args, sptr)
+ : va_arg(args, int);
+ result += AppendSignedDecimal(&buff, buff_end, dval);
+ break;
+ }
+ case 'u':
+ case 'x': {
+ uval = have_z ? va_arg(args, uptr)
+ : va_arg(args, unsigned);
+ result += AppendUnsigned(&buff, buff_end, uval,
+ (*cur == 'u') ? 10 : 16, 0);
+ break;
+ }
+ case 'p': {
+ RAW_CHECK_MSG(!have_z, kPrintfFormatsHelp);
+ result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
+ break;
+ }
+ case 's': {
+ RAW_CHECK_MSG(!have_z, kPrintfFormatsHelp);
+ result += AppendString(&buff, buff_end, va_arg(args, char*));
+ break;
+ }
+ case '%' : {
+ RAW_CHECK_MSG(!have_z, kPrintfFormatsHelp);
+ result += AppendChar(&buff, buff_end, '%');
+ break;
+ }
+ default: {
+ RAW_CHECK_MSG(false, kPrintfFormatsHelp);
+ }
+ }
+ }
+ RAW_CHECK(buff <= buff_end);
+ AppendChar(&buff, buff_end + 1, '\0');
+ return result;
+}
+
+void Printf(const char *format, ...) {
+ const int kLen = 1024 * 4;
+ char *buffer = (char*)MmapOrDie(kLen, __FUNCTION__);
+ va_list args;
+ va_start(args, format);
+ int needed_length = VSNPrintf(buffer, kLen, format, args);
+ va_end(args);
+ RAW_CHECK_MSG(needed_length < kLen, "Buffer in Printf is too short!\n");
+ RawWrite(buffer);
+ UnmapOrDie(buffer, kLen);
+}
+
+// Writes at most "length" symbols to "buffer" (including trailing '\0').
+// Returns the number of symbols that should have been written to buffer
+// (not including trailing '\0'). Thus, the string is truncated
+// iff return value is not less than "length".
+int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ int needed_length = VSNPrintf(buffer, length, format, args);
+ va_end(args);
+ return needed_length;
+}
+
+// Like Printf, but prints the current PID before the output string.
+void Report(const char *format, ...) {
+ const int kLen = 1024 * 4;
+ char *buffer = (char*)MmapOrDie(kLen, __FUNCTION__);
+ int needed_length = internal_snprintf(buffer, kLen, "==%d== ", GetPid());
+ RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
+ va_list args;
+ va_start(args, format);
+ needed_length += VSNPrintf(buffer + needed_length, kLen - needed_length,
+ format, args);
+ va_end(args);
+ RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
+ RawWrite(buffer);
+ UnmapOrDie(buffer, kLen);
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_procmaps.h b/lib/sanitizer_common/sanitizer_procmaps.h
new file mode 100644
index 0000000..e7f9cac
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_procmaps.h
@@ -0,0 +1,82 @@
+//===-- sanitizer_procmaps.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+//
+// Information about the process mappings.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PROCMAPS_H
+#define SANITIZER_PROCMAPS_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+class ProcessMaps {
+ public:
+ ProcessMaps();
+ bool Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size);
+ void Reset();
+ // Gets the object file name and the offset in that object for a given
+ // address 'addr'. Returns true on success.
+ bool GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[], uptr filename_size);
+ ~ProcessMaps();
+
+ private:
+ // Default implementation of GetObjectNameAndOffset.
+ // Quite slow, because it iterates through the whole process map for each
+ // lookup.
+ bool IterateForObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[], uptr filename_size) {
+ Reset();
+ uptr start, end, file_offset;
+ for (int i = 0; Next(&start, &end, &file_offset, filename, filename_size);
+ i++) {
+ if (addr >= start && addr < end) {
+ // Don't subtract 'start' for the first entry:
+ // * If a binary is compiled w/o -pie, then the first entry in
+ // process maps is likely the binary itself (all dynamic libs
+ // are mapped higher in address space). For such a binary,
+ // instruction offset in binary coincides with the actual
+ // instruction address in virtual memory (as code section
+ // is mapped to a fixed memory range).
+ // * If a binary is compiled with -pie, all the modules are
+ // mapped high at address space (in particular, higher than
+ // shadow memory of the tool), so the module can't be the
+ // first entry.
+ *offset = (addr - (i ? start : 0)) + file_offset;
+ return true;
+ }
+ }
+ if (filename_size)
+ filename[0] = '\0';
+ return false;
+ }
+
+#if defined __linux__
+ char *proc_self_maps_buff_;
+ uptr proc_self_maps_buff_mmaped_size_;
+ uptr proc_self_maps_buff_len_;
+ char *current_;
+#elif defined __APPLE__
+ template<u32 kLCSegment, typename SegmentCommand>
+ bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size);
+ int current_image_;
+ u32 current_magic_;
+ int current_load_cmd_count_;
+ char *current_load_cmd_addr_;
+#endif
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_PROCMAPS_H
diff --git a/lib/sanitizer_common/sanitizer_symbolizer.cc b/lib/sanitizer_common/sanitizer_symbolizer.cc
new file mode 100644
index 0000000..85eb076
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_symbolizer.cc
@@ -0,0 +1,144 @@
+//===-- sanitizer_symbolizer.cc -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a stub for LLVM-based symbolizer.
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries. See sanitizer.h for details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+bool IsFullNameOfDWARFSection(const char *full_name, const char *short_name) {
+ // Skip "__DWARF," prefix.
+ if (0 == internal_strncmp(full_name, "__DWARF,", 8)) {
+ full_name += 8;
+ }
+ // Skip . and _ prefices.
+ while (*full_name == '.' || *full_name == '_') {
+ full_name++;
+ }
+ return 0 == internal_strcmp(full_name, short_name);
+}
+
+void AddressInfo::Clear() {
+ InternalFree(module);
+ InternalFree(function);
+ InternalFree(file);
+ internal_memset(this, 0, sizeof(AddressInfo));
+}
+
+ModuleDIContext::ModuleDIContext(const char *module_name, uptr base_address) {
+ full_name_ = internal_strdup(module_name);
+ short_name_ = internal_strrchr(module_name, '/');
+ if (short_name_ == 0) {
+ short_name_ = full_name_;
+ } else {
+ short_name_++;
+ }
+ base_address_ = base_address;
+ n_ranges_ = 0;
+ mapped_addr_ = 0;
+ mapped_size_ = 0;
+}
+
+void ModuleDIContext::addAddressRange(uptr beg, uptr end) {
+ CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
+ ranges_[n_ranges_].beg = beg;
+ ranges_[n_ranges_].end = end;
+ n_ranges_++;
+}
+
+bool ModuleDIContext::containsAddress(uptr address) const {
+ for (uptr i = 0; i < n_ranges_; i++) {
+ if (ranges_[i].beg <= address && address < ranges_[i].end)
+ return true;
+ }
+ return false;
+}
+
+void ModuleDIContext::getAddressInfo(AddressInfo *info) {
+ info->module = internal_strdup(full_name_);
+ info->module_offset = info->address - base_address_;
+ if (mapped_addr_ == 0)
+ CreateDIContext();
+ // FIXME: Use the actual debug info context here.
+ info->function = 0;
+ info->file = 0;
+ info->line = 0;
+ info->column = 0;
+}
+
+void ModuleDIContext::CreateDIContext() {
+ mapped_addr_ = (uptr)MapFileToMemory(full_name_, &mapped_size_);
+ CHECK(mapped_addr_);
+ DWARFSection debug_info;
+ DWARFSection debug_abbrev;
+ DWARFSection debug_line;
+ DWARFSection debug_aranges;
+ DWARFSection debug_str;
+ FindDWARFSection(mapped_addr_, "debug_info", &debug_info);
+ FindDWARFSection(mapped_addr_, "debug_abbrev", &debug_abbrev);
+ FindDWARFSection(mapped_addr_, "debug_line", &debug_line);
+ FindDWARFSection(mapped_addr_, "debug_aranges", &debug_aranges);
+ FindDWARFSection(mapped_addr_, "debug_str", &debug_str);
+ // FIXME: Construct actual debug info context using mapped_addr,
+ // mapped_size and pointers to DWARF sections in memory.
+}
+
+class Symbolizer {
+ public:
+ uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames) {
+ if (max_frames == 0)
+ return 0;
+ AddressInfo *info = &frames[0];
+ info->Clear();
+ info->address = addr;
+ ModuleDIContext *module = FindModuleForAddress(addr);
+ if (module) {
+ module->getAddressInfo(info);
+ return 1;
+ }
+ return 0;
+ }
+
+ private:
+ ModuleDIContext *FindModuleForAddress(uptr address) {
+ if (modules_ == 0) {
+ modules_ = (ModuleDIContext*)InternalAlloc(
+ kMaxNumberOfModuleContexts * sizeof(ModuleDIContext));
+ CHECK(modules_);
+ n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts);
+ CHECK_GT(n_modules_, 0);
+ CHECK_LT(n_modules_, kMaxNumberOfModuleContexts);
+ }
+ for (uptr i = 0; i < n_modules_; i++) {
+ if (modules_[i].containsAddress(address)) {
+ return &modules_[i];
+ }
+ }
+ return 0;
+ }
+ static const uptr kMaxNumberOfModuleContexts = 4096;
+ // Array of module debug info contexts is leaked.
+ ModuleDIContext *modules_;
+ uptr n_modules_;
+};
+
+static Symbolizer symbolizer; // Linker initialized.
+
+uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
+ return symbolizer.SymbolizeCode(address, frames, max_frames);
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_symbolizer.h b/lib/sanitizer_common/sanitizer_symbolizer.h
new file mode 100644
index 0000000..c813e80
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_symbolizer.h
@@ -0,0 +1,100 @@
+//===-- sanitizer_symbolizer.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Symbolizer is intended to be used by both
+// AddressSanitizer and ThreadSanitizer to symbolize a given
+// address. It is an analogue of addr2line utility and allows to map
+// instruction address to a location in source code at run-time.
+//
+// Symbolizer is planned to use debug information (in DWARF format)
+// in a binary via interface defined in "llvm/DebugInfo/DIContext.h"
+//
+// Symbolizer code should be called from the run-time library of
+// dynamic tools, and generally should not call memory allocation
+// routines or other system library functions intercepted by those tools.
+// Instead, Symbolizer code should use their replacements, defined in
+// "compiler-rt/lib/sanitizer_common/sanitizer_libc.h".
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_H
+#define SANITIZER_SYMBOLIZER_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+// WARNING: Do not include system headers here. See details above.
+
+namespace __sanitizer {
+
+struct AddressInfo {
+ uptr address;
+ char *module;
+ uptr module_offset;
+ char *function;
+ char *file;
+ int line;
+ int column;
+
+ AddressInfo() {
+ internal_memset(this, 0, sizeof(AddressInfo));
+ }
+ // Deletes all strings and sets all fields to zero.
+ void Clear();
+};
+
+// Fills at most "max_frames" elements of "frames" with descriptions
+// for a given address (in all inlined functions). Returns the number
+// of descriptions actually filled.
+// This function should NOT be called from two threads simultaneously.
+uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
+
+// Debug info routines
+struct DWARFSection {
+ const char *data;
+ uptr size;
+ DWARFSection() {
+ data = 0;
+ size = 0;
+ }
+};
+// Returns true on success.
+bool FindDWARFSection(uptr object_file_addr, const char *section_name,
+ DWARFSection *section);
+bool IsFullNameOfDWARFSection(const char *full_name, const char *short_name);
+
+class ModuleDIContext {
+ public:
+ ModuleDIContext(const char *module_name, uptr base_address);
+ void addAddressRange(uptr beg, uptr end);
+ bool containsAddress(uptr address) const;
+ void getAddressInfo(AddressInfo *info);
+
+ const char *full_name() const { return full_name_; }
+
+ private:
+ void CreateDIContext();
+
+ struct AddressRange {
+ uptr beg;
+ uptr end;
+ };
+ char *full_name_;
+ char *short_name_;
+ uptr base_address_;
+ static const uptr kMaxNumberOfAddressRanges = 8;
+ AddressRange ranges_[kMaxNumberOfAddressRanges];
+ uptr n_ranges_;
+ uptr mapped_addr_;
+ uptr mapped_size_;
+};
+
+// OS-dependent function that gets the linked list of all loaded modules.
+uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_H
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
new file mode 100644
index 0000000..c68a1fe
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -0,0 +1,200 @@
+//===-- sanitizer_win.cc --------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements windows-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+#ifdef _WIN32
+#include <windows.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+// --------------------- sanitizer_common.h
+int GetPid() {
+ return GetProcessId(GetCurrentProcess());
+}
+
+uptr GetThreadSelf() {
+ return GetCurrentThreadId();
+}
+
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
+ // FIXME: is it possible for the stack to not be a single allocation?
+ // Are these values what ASan expects to get (reserved, not committed;
+ // including stack guard page) ?
+ *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
+ *stack_bottom = (uptr)mbi.AllocationBase;
+}
+
+
+void *MmapOrDie(uptr size, const char *mem_type) {
+ void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (rv == 0) {
+ Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s\n",
+ size, size, mem_type);
+ CHECK("unable to mmap" && 0);
+ }
+ return rv;
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
+ Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ size, size, addr);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
+ return VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+}
+
+void *Mprotect(uptr fixed_addr, uptr size) {
+ return VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
+}
+
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
+ // FIXME: shall we do anything here on Windows?
+ return true;
+}
+
+void *MapFileToMemory(const char *file_name, uptr *buff_size) {
+ UNIMPLEMENTED();
+}
+
+const char *GetEnv(const char *name) {
+ static char env_buffer[32767] = {};
+
+ // Note: this implementation stores the result in a static buffer so we only
+ // allow it to be called just once.
+ static bool called_once = false;
+ if (called_once)
+ UNIMPLEMENTED();
+ called_once = true;
+
+ DWORD rv = GetEnvironmentVariableA(name, env_buffer, sizeof(env_buffer));
+ if (rv > 0 && rv < sizeof(env_buffer))
+ return env_buffer;
+ return 0;
+}
+
+const char *GetPwd() {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+void DumpProcessMap() {
+ UNIMPLEMENTED();
+}
+
+void DisableCoreDumper() {
+ UNIMPLEMENTED();
+}
+
+void SleepForSeconds(int seconds) {
+ Sleep(seconds * 1000);
+}
+
+void SleepForMillis(int millis) {
+ Sleep(millis);
+}
+
+void Exit(int exitcode) {
+ _exit(exitcode);
+}
+
+void Abort() {
+ abort();
+ _exit(-1); // abort is not NORETURN on Windows.
+}
+
+int Atexit(void (*function)(void)) {
+ return atexit(function);
+}
+
+// ------------------ sanitizer_symbolizer.h
+bool FindDWARFSection(uptr object_file_addr, const char *section_name,
+ DWARFSection *section) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules) {
+ UNIMPLEMENTED();
+};
+
+// ------------------ sanitizer_libc.h
+void *internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, u64 offset) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+int internal_munmap(void *addr, uptr length) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+int internal_close(fd_t fd) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+fd_t internal_open(const char *filename, bool write) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ if (fd != 2)
+ UNIMPLEMENTED();
+ HANDLE err = GetStdHandle(STD_ERROR_HANDLE);
+ if (err == 0)
+ return 0; // FIXME: this might not work on some apps.
+ DWORD ret;
+ if (!WriteFile(err, buf, count, &ret, 0))
+ return 0;
+ return ret;
+}
+
+uptr internal_filesize(fd_t fd) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+int internal_dup2(int oldfd, int newfd) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+int internal_sched_yield() {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+} // namespace __sanitizer
+
+#endif // _WIN32
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
new file mode 100644
index 0000000..1410f26
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
@@ -0,0 +1,257 @@
+//===-- sanitizer_allocator64_test.cc -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Tests for sanitizer_allocator64.h.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator64.h"
+#include "gtest/gtest.h"
+
+#include <algorithm>
+#include <vector>
+
+static const uptr kAllocatorSpace = 0x600000000000ULL;
+static const uptr kAllocatorSize = 0x10000000000; // 1T.
+
+typedef DefaultSizeClassMap SCMap;
+typedef
+ SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 16, SCMap> Allocator;
+typedef SizeClassAllocatorLocalCache<Allocator::kNumClasses, Allocator>
+ AllocatorCache;
+
+TEST(SanitizerCommon, DefaultSizeClassMap) {
+#if 0
+ for (uptr i = 0; i < SCMap::kNumClasses; i++) {
+ // printf("% 3ld: % 5ld (%4lx); ", i, SCMap::Size(i), SCMap::Size(i));
+ printf("c%ld => %ld ", i, SCMap::Size(i));
+ if ((i % 8) == 7)
+ printf("\n");
+ }
+ printf("\n");
+#endif
+
+ for (uptr c = 0; c < SCMap::kNumClasses; c++) {
+ uptr s = SCMap::Size(c);
+ CHECK_EQ(SCMap::ClassID(s), c);
+ if (c != SCMap::kNumClasses - 1)
+ CHECK_EQ(SCMap::ClassID(s + 1), c + 1);
+ CHECK_EQ(SCMap::ClassID(s - 1), c);
+ if (c)
+ CHECK_GT(SCMap::Size(c), SCMap::Size(c-1));
+ }
+ CHECK_EQ(SCMap::ClassID(SCMap::kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= SCMap::kMaxSize; s++) {
+ uptr c = SCMap::ClassID(s);
+ CHECK_LT(c, SCMap::kNumClasses);
+ CHECK_GE(SCMap::Size(c), s);
+ if (c > 0)
+ CHECK_LT(SCMap::Size(c-1), s);
+ }
+}
+
+TEST(SanitizerCommon, SizeClassAllocator64) {
+ Allocator a;
+ a.Init();
+
+ static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
+ 50000, 60000, 100000, 300000, 500000, 1000000, 2000000};
+
+ std::vector<void *> allocated;
+
+ uptr last_total_allocated = 0;
+ for (int i = 0; i < 5; i++) {
+ // Allocate a bunch of chunks.
+ for (uptr s = 0; s < sizeof(sizes) /sizeof(sizes[0]); s++) {
+ uptr size = sizes[s];
+ // printf("s = %ld\n", size);
+ uptr n_iter = std::max((uptr)2, 1000000 / size);
+ for (uptr i = 0; i < n_iter; i++) {
+ void *x = a.Allocate(size, 1);
+ allocated.push_back(x);
+ CHECK(a.PointerIsMine(x));
+ CHECK_GE(a.GetActuallyAllocatedSize(x), size);
+ uptr class_id = a.GetSizeClass(x);
+ CHECK_EQ(class_id, SCMap::ClassID(size));
+ uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ metadata[0] = reinterpret_cast<uptr>(x) + 1;
+ metadata[1] = 0xABCD;
+ }
+ }
+ // Deallocate all.
+ for (uptr i = 0; i < allocated.size(); i++) {
+ void *x = allocated[i];
+ uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
+ CHECK_EQ(metadata[1], 0xABCD);
+ a.Deallocate(x);
+ }
+ allocated.clear();
+ uptr total_allocated = a.TotalMemoryUsed();
+ if (last_total_allocated == 0)
+ last_total_allocated = total_allocated;
+ CHECK_EQ(last_total_allocated, total_allocated);
+ }
+
+ a.TestOnlyUnmap();
+}
+
+
+TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
+ Allocator a;
+ a.Init();
+ static volatile void *sink;
+
+ const uptr kNumAllocs = 10000;
+ void *allocated[kNumAllocs];
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ uptr size = (i % 4096) + 1;
+ void *x = a.Allocate(size, 1);
+ allocated[i] = x;
+ }
+ // Get Metadata kNumAllocs^2 times.
+ for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
+ sink = a.GetMetaData(allocated[i % kNumAllocs]);
+ }
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ a.Deallocate(allocated[i]);
+ }
+
+ a.TestOnlyUnmap();
+ (void)sink;
+}
+
+void FailInAssertionOnOOM() {
+ Allocator a;
+ a.Init();
+ const uptr size = 1 << 20;
+ for (int i = 0; i < 1000000; i++) {
+ a.Allocate(size, 1);
+ }
+
+ a.TestOnlyUnmap();
+}
+
+TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
+ EXPECT_DEATH(FailInAssertionOnOOM(),
+ "allocated_user.*allocated_meta.*kRegionSize");
+}
+
+TEST(SanitizerCommon, LargeMmapAllocator) {
+ LargeMmapAllocator a;
+ a.Init();
+
+ static const int kNumAllocs = 100;
+ void *allocated[kNumAllocs];
+ static const uptr size = 1000;
+ // Allocate some.
+ for (int i = 0; i < kNumAllocs; i++) {
+ allocated[i] = a.Allocate(size, 1);
+ }
+ // Deallocate all.
+ CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+ for (int i = 0; i < kNumAllocs; i++) {
+ void *p = allocated[i];
+ CHECK(a.PointerIsMine(p));
+ a.Deallocate(p);
+ }
+ // Check that non left.
+ CHECK_EQ(a.TotalMemoryUsed(), 0);
+
+ // Allocate some more, also add metadata.
+ for (int i = 0; i < kNumAllocs; i++) {
+ void *x = a.Allocate(size, 1);
+ CHECK_GE(a.GetActuallyAllocatedSize(x), size);
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ *meta = i;
+ allocated[i] = x;
+ }
+ CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+ // Deallocate all in reverse order.
+ for (int i = 0; i < kNumAllocs; i++) {
+ int idx = kNumAllocs - i - 1;
+ void *p = allocated[idx];
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
+ CHECK_EQ(*meta, idx);
+ CHECK(a.PointerIsMine(p));
+ a.Deallocate(p);
+ }
+ CHECK_EQ(a.TotalMemoryUsed(), 0);
+}
+
+TEST(SanitizerCommon, CombinedAllocator) {
+ typedef Allocator PrimaryAllocator;
+ typedef LargeMmapAllocator SecondaryAllocator;
+ typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+ AllocatorCache cache;
+ Allocator a;
+ a.Init();
+ cache.Init();
+ const uptr kNumAllocs = 100000;
+ const uptr kNumIter = 10;
+ for (uptr iter = 0; iter < kNumIter; iter++) {
+ std::vector<void*> allocated;
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ uptr size = (i % (1 << 14)) + 1;
+ if ((i % 1024) == 0)
+ size = 1 << (10 + (i % 14));
+ void *x = a.Allocate(&cache, size, 1);
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ CHECK_EQ(*meta, 0);
+ *meta = size;
+ allocated.push_back(x);
+ }
+
+ random_shuffle(allocated.begin(), allocated.end());
+
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ void *x = allocated[i];
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ CHECK_NE(*meta, 0);
+ CHECK(a.PointerIsMine(x));
+ *meta = 0;
+ a.Deallocate(&cache, x);
+ }
+ allocated.clear();
+ a.SwallowCache(&cache);
+ }
+ a.TestOnlyUnmap();
+}
+
+static THREADLOCAL AllocatorCache static_allocator_cache;
+
+TEST(SanitizerCommon, SizeClassAllocatorLocalCache) {
+ static_allocator_cache.Init();
+
+ Allocator a;
+ AllocatorCache cache;
+
+ a.Init();
+ cache.Init();
+
+ const uptr kNumAllocs = 10000;
+ const int kNumIter = 100;
+ uptr saved_total = 0;
+ for (int i = 0; i < kNumIter; i++) {
+ void *allocated[kNumAllocs];
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ allocated[i] = cache.Allocate(&a, 0);
+ }
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ cache.Deallocate(&a, 0, allocated[i]);
+ }
+ cache.Drain(&a);
+ uptr total_allocated = a.TotalMemoryUsed();
+ if (saved_total)
+ CHECK_EQ(saved_total, total_allocated);
+ saved_total = total_allocated;
+ }
+
+ a.TestOnlyUnmap();
+}
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc b/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc
new file mode 100644
index 0000000..cff7823
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc
@@ -0,0 +1,99 @@
+//===-- sanitizer_allocator64_testlib.cc ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Malloc replacement library based on CombinedAllocator.
+// The primary purpose of this file is an end-to-end integration test
+// for CombinedAllocator.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator64.h"
+#include <stddef.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+
+namespace {
+static const uptr kAllocatorSpace = 0x600000000000ULL;
+static const uptr kAllocatorSize = 0x10000000000; // 1T.
+
+typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 16,
+ DefaultSizeClassMap> PrimaryAllocator;
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator::kNumClasses,
+ PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+static THREADLOCAL AllocatorCache cache;
+static Allocator allocator;
+
+static int inited = 0;
+
+__attribute__((constructor))
+void Init() {
+ if (inited) return;
+ inited = true; // this must happen before any threads are created.
+ allocator.Init();
+}
+
+} // namespace
+
+namespace __sanitizer {
+void NORETURN Die() {
+ _exit(77);
+}
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ fprintf(stderr, "CheckFailed: %s:%d %s (%lld %lld)\n",
+ file, line, cond, v1, v2);
+ Die();
+}
+}
+
+#if 1
+extern "C" {
+void *malloc(size_t size) {
+ Init();
+ assert(inited);
+ return allocator.Allocate(&cache, size, 8);
+}
+
+void free(void *p) {
+ assert(inited);
+ allocator.Deallocate(&cache, p);
+}
+
+void *calloc(size_t nmemb, size_t size) {
+ assert(inited);
+ return allocator.Allocate(&cache, nmemb * size, 8, /*cleared=*/true);
+}
+
+void *realloc(void *p, size_t new_size) {
+ assert(inited);
+ return allocator.Reallocate(&cache, p, new_size, 8);
+}
+
+void *memalign() { assert(0); }
+
+int posix_memalign(void **memptr, size_t alignment, size_t size) {
+ *memptr = allocator.Allocate(&cache, size, alignment);
+ CHECK_EQ(((uptr)*memptr & (alignment - 1)), 0);
+ return 0;
+}
+
+void *valloc(size_t size) {
+ assert(inited);
+ return allocator.Allocate(&cache, size, kPageSize);
+}
+
+void *pvalloc(size_t size) {
+ assert(inited);
+ if (size == 0) size = kPageSize;
+ return allocator.Allocate(&cache, size, kPageSize);
+}
+}
+#endif
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
new file mode 100644
index 0000000..d6c7f56
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -0,0 +1,56 @@
+//===-- sanitizer_allocator_test.cc ---------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "gtest/gtest.h"
+#include <stdlib.h>
+
+namespace __sanitizer {
+
+TEST(Allocator, Basic) {
+ char *p = (char*)InternalAlloc(10);
+ EXPECT_NE(p, (char*)0);
+ char *p2 = (char*)InternalAlloc(20);
+ EXPECT_NE(p2, (char*)0);
+ EXPECT_NE(p2, p);
+ for (int i = 0; i < 10; i++) {
+ p[i] = 42;
+ EXPECT_EQ(p, InternalAllocBlock(p + i));
+ }
+ for (int i = 0; i < 20; i++) {
+ ((char*)p2)[i] = 42;
+ EXPECT_EQ(p2, InternalAllocBlock(p2 + i));
+ }
+ InternalFree(p);
+ InternalFree(p2);
+}
+
+TEST(Allocator, Stress) {
+ const int kCount = 1000;
+ char *ptrs[kCount];
+ unsigned rnd = 42;
+ for (int i = 0; i < kCount; i++) {
+ uptr sz = rand_r(&rnd) % 1000;
+ char *p = (char*)InternalAlloc(sz);
+ EXPECT_NE(p, (char*)0);
+ for (uptr j = 0; j < sz; j++) {
+ p[j] = 42;
+ EXPECT_EQ(p, InternalAllocBlock(p + j));
+ }
+ ptrs[i] = p;
+ }
+ for (int i = 0; i < kCount; i++) {
+ InternalFree(ptrs[i]);
+ }
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_common_test.cc b/lib/sanitizer_common/tests/sanitizer_common_test.cc
new file mode 100644
index 0000000..91570dc
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_common_test.cc
@@ -0,0 +1,66 @@
+//===-- sanitizer_common_test.cc ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "gtest/gtest.h"
+
+namespace __sanitizer {
+
+static bool IsSorted(const uptr *array, uptr n) {
+ for (uptr i = 1; i < n; i++) {
+ if (array[i] < array[i - 1]) return false;
+ }
+ return true;
+}
+
+TEST(SanitizerCommon, SortTest) {
+ uptr array[100];
+ uptr n = 100;
+ // Already sorted.
+ for (uptr i = 0; i < n; i++) {
+ array[i] = i;
+ }
+ SortArray(array, n);
+ EXPECT_TRUE(IsSorted(array, n));
+ // Reverse order.
+ for (uptr i = 0; i < n; i++) {
+ array[i] = n - 1 - i;
+ }
+ SortArray(array, n);
+ EXPECT_TRUE(IsSorted(array, n));
+ // Mixed order.
+ for (uptr i = 0; i < n; i++) {
+ array[i] = (i % 2 == 0) ? i : n - 1 - i;
+ }
+ SortArray(array, n);
+ EXPECT_TRUE(IsSorted(array, n));
+ // All equal.
+ for (uptr i = 0; i < n; i++) {
+ array[i] = 42;
+ }
+ SortArray(array, n);
+ EXPECT_TRUE(IsSorted(array, n));
+ // All but one sorted.
+ for (uptr i = 0; i < n - 1; i++) {
+ array[i] = i;
+ }
+ array[n - 1] = 42;
+ SortArray(array, n);
+ EXPECT_TRUE(IsSorted(array, n));
+ // Minimal case - sort three elements.
+ array[0] = 1;
+ array[1] = 0;
+ SortArray(array, 2);
+ EXPECT_TRUE(IsSorted(array, 2));
+}
+
+} // namespace sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_flags_test.cc b/lib/sanitizer_common/tests/sanitizer_flags_test.cc
new file mode 100644
index 0000000..4b273e5
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_flags_test.cc
@@ -0,0 +1,72 @@
+//===-- sanitizer_flags_test.cc -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "gtest/gtest.h"
+
+#include "tsan_rtl.h" // FIXME: break dependency from TSan runtime.
+using __tsan::ScopedInRtl;
+
+#include <string.h>
+
+namespace __sanitizer {
+
+static const char kFlagName[] = "flag_name";
+
+template <typename T>
+static void TestFlag(T start_value, const char *env, T final_value) {
+ T flag = start_value;
+ ParseFlag(env, &flag, kFlagName);
+ EXPECT_EQ(final_value, flag);
+}
+
+static void TestStrFlag(const char *start_value, const char *env,
+ const char *final_value) {
+ const char *flag = start_value;
+ ParseFlag(env, &flag, kFlagName);
+ EXPECT_STREQ(final_value, flag);
+}
+
+TEST(SanitizerCommon, BooleanFlags) {
+ ScopedInRtl in_rtl;
+ TestFlag(true, "--flag_name", true);
+ TestFlag(false, "flag_name", false);
+ TestFlag(false, "--flag_name=1", true);
+ TestFlag(true, "asdas flag_name=0 asdas", false);
+ TestFlag(true, " --flag_name=0 ", false);
+ TestFlag(false, "flag_name=yes", true);
+ TestFlag(false, "flag_name=true", true);
+ TestFlag(true, "flag_name=no", false);
+ TestFlag(true, "flag_name=false", false);
+}
+
+TEST(SanitizerCommon, IntFlags) {
+ ScopedInRtl in_rtl;
+ TestFlag(-11, 0, -11);
+ TestFlag(-11, "flag_name", 0);
+ TestFlag(-11, "--flag_name=", 0);
+ TestFlag(-11, "--flag_name=42", 42);
+ TestFlag(-11, "--flag_name=-42", -42);
+}
+
+TEST(SanitizerCommon, StrFlags) {
+ ScopedInRtl in_rtl;
+ TestStrFlag("zzz", 0, "zzz");
+ TestStrFlag("zzz", "flag_name", "");
+ TestStrFlag("zzz", "--flag_name=", "");
+ TestStrFlag("", "--flag_name=abc", "abc");
+ TestStrFlag("", "--flag_name='abc zxc'", "abc zxc");
+ TestStrFlag("", "--flag_name=\"abc qwe\" asd", "abc qwe");
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_list_test.cc b/lib/sanitizer_common/tests/sanitizer_list_test.cc
new file mode 100644
index 0000000..d328fbf
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_list_test.cc
@@ -0,0 +1,157 @@
+//===-- sanitizer_list_test.cc --------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_list.h"
+#include "gtest/gtest.h"
+
+namespace __sanitizer {
+
+struct ListItem {
+ ListItem *next;
+};
+
+typedef IntrusiveList<ListItem> List;
+
+// Check that IntrusiveList can be made thread-local.
+static THREADLOCAL List static_list;
+
+static void SetList(List *l, ListItem *x = 0,
+ ListItem *y = 0, ListItem *z = 0) {
+ l->clear();
+ if (x) l->push_back(x);
+ if (y) l->push_back(y);
+ if (z) l->push_back(z);
+}
+
+static void CheckList(List *l, ListItem *i1, ListItem *i2 = 0, ListItem *i3 = 0,
+ ListItem *i4 = 0, ListItem *i5 = 0, ListItem *i6 = 0) {
+ if (i1) {
+ CHECK_EQ(l->front(), i1);
+ l->pop_front();
+ }
+ if (i2) {
+ CHECK_EQ(l->front(), i2);
+ l->pop_front();
+ }
+ if (i3) {
+ CHECK_EQ(l->front(), i3);
+ l->pop_front();
+ }
+ if (i4) {
+ CHECK_EQ(l->front(), i4);
+ l->pop_front();
+ }
+ if (i5) {
+ CHECK_EQ(l->front(), i5);
+ l->pop_front();
+ }
+ if (i6) {
+ CHECK_EQ(l->front(), i6);
+ l->pop_front();
+ }
+ CHECK(l->empty());
+}
+
+TEST(SanitizerCommon, IntrusiveList) {
+ ListItem items[6];
+ CHECK_EQ(static_list.size(), 0);
+
+ List l;
+ l.clear();
+
+ ListItem *x = &items[0];
+ ListItem *y = &items[1];
+ ListItem *z = &items[2];
+ ListItem *a = &items[3];
+ ListItem *b = &items[4];
+ ListItem *c = &items[5];
+
+ CHECK_EQ(l.size(), 0);
+ l.push_back(x);
+ CHECK_EQ(l.size(), 1);
+ CHECK_EQ(l.back(), x);
+ CHECK_EQ(l.front(), x);
+ l.pop_front();
+ CHECK(l.empty());
+ l.CheckConsistency();
+
+ l.push_front(x);
+ CHECK_EQ(l.size(), 1);
+ CHECK_EQ(l.back(), x);
+ CHECK_EQ(l.front(), x);
+ l.pop_front();
+ CHECK(l.empty());
+ l.CheckConsistency();
+
+ l.push_front(x);
+ l.push_front(y);
+ l.push_front(z);
+ CHECK_EQ(l.size(), 3);
+ CHECK_EQ(l.front(), z);
+ CHECK_EQ(l.back(), x);
+ l.CheckConsistency();
+
+ l.pop_front();
+ CHECK_EQ(l.size(), 2);
+ CHECK_EQ(l.front(), y);
+ CHECK_EQ(l.back(), x);
+ l.pop_front();
+ l.pop_front();
+ CHECK(l.empty());
+ l.CheckConsistency();
+
+ l.push_back(x);
+ l.push_back(y);
+ l.push_back(z);
+ CHECK_EQ(l.size(), 3);
+ CHECK_EQ(l.front(), x);
+ CHECK_EQ(l.back(), z);
+ l.CheckConsistency();
+
+ l.pop_front();
+ CHECK_EQ(l.size(), 2);
+ CHECK_EQ(l.front(), y);
+ CHECK_EQ(l.back(), z);
+ l.pop_front();
+ l.pop_front();
+ CHECK(l.empty());
+ l.CheckConsistency();
+
+ List l1, l2;
+ l1.clear();
+ l2.clear();
+
+ l1.append_front(&l2);
+ CHECK(l1.empty());
+ CHECK(l2.empty());
+
+ l1.append_back(&l2);
+ CHECK(l1.empty());
+ CHECK(l2.empty());
+
+ SetList(&l1, x);
+ CheckList(&l1, x);
+
+ SetList(&l1, x, y, z);
+ SetList(&l2, a, b, c);
+ l1.append_back(&l2);
+ CheckList(&l1, x, y, z, a, b, c);
+ CHECK(l2.empty());
+
+ SetList(&l1, x, y);
+ SetList(&l2);
+ l1.append_front(&l2);
+ CheckList(&l1, x, y);
+ CHECK(l2.empty());
+}
+
+} // namespace __sanitizer
diff --git a/lib/subdf3.c b/lib/subdf3.c
index 5eb1853..66fb1a5 100644
--- a/lib/subdf3.c
+++ b/lib/subdf3.c
@@ -18,7 +18,7 @@
fp_t COMPILER_RT_ABI __adddf3(fp_t a, fp_t b);
-ARM_EABI_FNALIAS(dsub, subdf3);
+ARM_EABI_FNALIAS(dsub, subdf3)
// Subtraction; flip the sign bit of b and add.
COMPILER_RT_ABI fp_t
diff --git a/lib/subsf3.c b/lib/subsf3.c
index 351be0ef..3659cd8 100644
--- a/lib/subsf3.c
+++ b/lib/subsf3.c
@@ -17,7 +17,7 @@
fp_t COMPILER_RT_ABI __addsf3(fp_t a, fp_t b);
-ARM_EABI_FNALIAS(fsub, subsf3);
+ARM_EABI_FNALIAS(fsub, subsf3)
// Subtraction; flip the sign bit of b and add.
COMPILER_RT_ABI fp_t
diff --git a/lib/subvti3.c b/lib/subvti3.c
index 44127b7..b32df5e 100644
--- a/lib/subvti3.c
+++ b/lib/subvti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: a - b */
/* Effects: aborts if a - b overflows */
diff --git a/lib/truncdfsf2.c b/lib/truncdfsf2.c
index f57af7e..61c909a 100644
--- a/lib/truncdfsf2.c
+++ b/lib/truncdfsf2.c
@@ -64,7 +64,7 @@ static inline dst_t dstFromRep(dst_rep_t x) {
// End helper routines. Conversion implementation follows.
-ARM_EABI_FNALIAS(d2f, truncdfsf2);
+ARM_EABI_FNALIAS(d2f, truncdfsf2)
COMPILER_RT_ABI dst_t
__truncdfsf2(src_t a) {
diff --git a/lib/tsan/CMakeLists.txt b/lib/tsan/CMakeLists.txt
new file mode 100644
index 0000000..acfb854
--- /dev/null
+++ b/lib/tsan/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Build for the AddressSanitizer runtime support library.
+
+file(GLOB TSAN_SOURCES "*.cc")
+
+if(CAN_TARGET_X86_64)
+ add_library(clang_rt.tsan-x86_64 STATIC ${TSAN_SOURCES})
+ set_target_properties(clang_rt.tsan-x86_64 PROPERTIES COMPILE_FLAGS "${TARGET_X86_64_CFLAGS}")
+endif()
diff --git a/lib/tsan/Makefile.mk b/lib/tsan/Makefile.mk
new file mode 100644
index 0000000..70fb610
--- /dev/null
+++ b/lib/tsan/Makefile.mk
@@ -0,0 +1,18 @@
+#===- lib/tsan/Makefile.mk ---------------------------------*- Makefile -*--===#
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===------------------------------------------------------------------------===#
+
+ModuleName := tsan
+SubDirs := rtl
+Sources :=
+ObjNames :=
+Dependencies :=
+
+Implementation := Generic
+
+TsanFunctions :=
diff --git a/lib/tsan/Makefile.old b/lib/tsan/Makefile.old
new file mode 100644
index 0000000..2091f61
--- /dev/null
+++ b/lib/tsan/Makefile.old
@@ -0,0 +1,106 @@
+DEBUG=0
+LDFLAGS=-ldl -lpthread -pie
+CXXFLAGS = -fPIE -g -Wall -Werror -DTSAN_DEBUG=$(DEBUG)
+# Silence warnings that Clang produces for gtest code.
+# Use -Wno-attributes so that gcc doesn't complain about unknown warning types.
+CXXFLAGS += -Wno-attributes
+ifeq ($(DEBUG), 0)
+ CXXFLAGS += -O3
+endif
+ifeq ($(CXX), clang++)
+ CXXFLAGS+= -Wno-unused-private-field -Wno-static-in-inline
+endif
+
+LIBTSAN=rtl/libtsan.a
+GTEST_ROOT=third_party/googletest
+GTEST_INCLUDE=-I$(GTEST_ROOT)/include
+GTEST_BUILD_DIR=$(GTEST_ROOT)/build
+GTEST_LIB=$(GTEST_BUILD_DIR)/gtest-all.o
+
+SANITIZER_COMMON_TESTS_SRC=$(wildcard ../sanitizer_common/tests/*_test.cc)
+SANITIZER_COMMON_TESTS_OBJ=$(patsubst %.cc,%.o,$(SANITIZER_COMMON_TESTS_SRC))
+RTL_TEST_SRC=$(wildcard rtl_tests/*.cc)
+RTL_TEST_OBJ=$(patsubst %.cc,%.o,$(RTL_TEST_SRC))
+UNIT_TEST_SRC=$(wildcard unit_tests/*_test.cc)
+UNIT_TEST_OBJ=$(patsubst %.cc,%.o,$(UNIT_TEST_SRC))
+UNIT_TEST_HDR=$(wildcard rtl/*.h) $(wildcard ../sanitizer_common/*.h)
+
+INCLUDES=-Irtl -I.. $(GTEST_INCLUDE)
+
+all: libtsan test
+
+help:
+ @ echo "A little help is always welcome!"
+ @ echo "The most useful targets are:"
+ @ echo " make install_deps # Install third-party dependencies required for building"
+ @ echo " make presubmit # Run it every time before committing"
+ @ echo " make lint # Run the style checker"
+ @ echo
+ @ echo "For more info, see http://code.google.com/p/data-race-test/wiki/ThreadSanitizer2"
+
+$(LIBTSAN): libtsan
+
+libtsan:
+ $(MAKE) -C rtl -f Makefile.old DEBUG=$(DEBUG)
+
+%.o: %.cc $(UNIT_TEST_HDR) $(LIBTSAN)
+ $(CXX) $(CXXFLAGS) $(CFLAGS) $(INCLUDES) -o $@ -c $<
+
+tsan_test: $(UNIT_TEST_OBJ) $(RTL_TEST_OBJ) \
+ $(SANITIZER_COMMON_TESTS_OBJ) $(LIBTSAN) $(GTEST_LIB)
+ $(CXX) $^ -o $@ $(LDFLAGS)
+
+test: libtsan tsan_test
+
+run: all
+ (ulimit -s 8192; ./tsan_test)
+ ./output_tests/test_output.sh
+
+presubmit:
+ $(MAKE) -f Makefile.old lint -j 4
+ # Debug build with clang.
+ $(MAKE) -f Makefile.old clean
+ $(MAKE) -f Makefile.old run DEBUG=1 -j 16 CC=clang CXX=clang++
+ # Release build with clang.
+ $(MAKE) -f Makefile.old clean
+ $(MAKE) -f Makefile.old run DEBUG=0 -j 16 CC=clang CXX=clang++
+ # Debug build with gcc
+ $(MAKE) -f Makefile.old clean
+ $(MAKE) -f Makefile.old run DEBUG=1 -j 16 CC=gcc CXX=g++
+ # Release build with gcc
+ $(MAKE) -f Makefile.old clean
+ $(MAKE) -f Makefile.old run DEBUG=0 -j 16 CC=gcc CXX=g++
+ ./check_analyze.sh
+ @ echo PRESUBMIT PASSED
+
+RTL_LINT_FITLER=-legal/copyright,-build/include,-readability/casting,-build/header_guard,-build/namespaces
+
+lint: lint_tsan lint_tests
+lint_tsan:
+ third_party/cpplint/cpplint.py --filter=$(RTL_LINT_FITLER) rtl/*.{cc,h} \
+ ../sanitizer_common/*.{cc,h}
+lint_tests:
+ third_party/cpplint/cpplint.py --filter=$(RTL_LINT_FITLER) \
+ rtl_tests/*.{cc,h} unit_tests/*.cc ../sanitizer_common/tests/*.cc
+
+install_deps:
+ rm -rf third_party
+ mkdir third_party
+ (cd third_party && \
+ svn co -r613 http://googletest.googlecode.com/svn/trunk googletest && \
+ svn co -r82 http://google-styleguide.googlecode.com/svn/trunk/cpplint cpplint \
+ )
+
+# Remove verbose printf from lint. Not strictly necessary.
+hack_cpplint:
+ sed -i "s/ sys.stderr.write('Done processing.*//g" third_party/cpplint/cpplint.py
+
+$(GTEST_LIB):
+ mkdir -p $(GTEST_BUILD_DIR) && \
+ cd $(GTEST_BUILD_DIR) && \
+ $(MAKE) -f ../make/Makefile CXXFLAGS="$(CXXFLAGS)" CFLAGS="$(CFLAGS)" CC=$(CC) CXX=$(CXX)
+
+clean:
+ rm -f asm_*.s libtsan.nm libtsan.objdump */*.o tsan_test
+ rm -rf $(GTEST_BUILD_DIR)
+ $(MAKE) clean -C rtl -f Makefile.old
diff --git a/lib/tsan/analyze_libtsan.sh b/lib/tsan/analyze_libtsan.sh
new file mode 100755
index 0000000..e080561
--- /dev/null
+++ b/lib/tsan/analyze_libtsan.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+set -e
+set -u
+
+get_asm() {
+ grep tsan_$1.: -A 10000 libtsan.objdump | \
+ awk "/[^:]$/ {print;} />:/ {c++; if (c == 2) {exit}}"
+}
+
+list="write1 \
+ write2 \
+ write4 \
+ write8 \
+ read1 \
+ read2 \
+ read4 \
+ read8 \
+ func_entry \
+ func_exit"
+
+BIN=`dirname $0`/tsan_test
+objdump -d $BIN > libtsan.objdump
+nm -S $BIN | grep "__tsan_" > libtsan.nm
+
+for f in $list; do
+ file=asm_$f.s
+ get_asm $f > $file
+ tot=$(wc -l < $file)
+ size=$(grep $f$ libtsan.nm | awk --non-decimal-data '{print ("0x"$2)+0}')
+ rsp=$(grep '(%rsp)' $file | wc -l)
+ push=$(grep 'push' $file | wc -l)
+ pop=$(grep 'pop' $file | wc -l)
+ call=$(grep 'call' $file | wc -l)
+ load=$(egrep 'mov .*\,.*\(.*\)|cmp .*\,.*\(.*\)' $file | wc -l)
+ store=$(egrep 'mov .*\(.*\),' $file | wc -l)
+ mov=$(grep 'mov' $file | wc -l)
+ lea=$(grep 'lea' $file | wc -l)
+ sh=$(grep 'shr\|shl' $file | wc -l)
+ cmp=$(grep 'cmp\|test' $file | wc -l)
+ printf "%10s tot %3d; size %4d; rsp %d; push %d; pop %d; call %d; load %2d; store %2d; sh %3d; mov %3d; lea %3d; cmp %3d\n" \
+ $f $tot $size $rsp $push $pop $call $load $store $sh $mov $lea $cmp;
+done
diff --git a/lib/tsan/benchmarks/mini_bench_local.cc b/lib/tsan/benchmarks/mini_bench_local.cc
new file mode 100644
index 0000000..accdcb6
--- /dev/null
+++ b/lib/tsan/benchmarks/mini_bench_local.cc
@@ -0,0 +1,49 @@
+// Mini-benchmark for tsan: non-shared memory writes.
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+int len;
+int *a;
+const int kNumIter = 1000;
+
+__attribute__((noinline))
+void Run(int idx) {
+ for (int i = 0, n = len; i < n; i++)
+ a[i + idx * n] = i;
+}
+
+void *Thread(void *arg) {
+ long idx = (long)arg;
+ printf("Thread %ld started\n", idx);
+ for (int i = 0; i < kNumIter; i++)
+ Run(idx);
+ printf("Thread %ld done\n", idx);
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ int n_threads = 0;
+ if (argc != 3) {
+ n_threads = 4;
+ len = 1000000;
+ } else {
+ n_threads = atoi(argv[1]);
+ assert(n_threads > 0 && n_threads <= 32);
+ len = atoi(argv[2]);
+ }
+ printf("%s: n_threads=%d len=%d iter=%d\n",
+ __FILE__, n_threads, len, kNumIter);
+ a = new int[n_threads * len];
+ pthread_t *t = new pthread_t[n_threads];
+ for (int i = 0; i < n_threads; i++) {
+ pthread_create(&t[i], 0, Thread, (void*)i);
+ }
+ for (int i = 0; i < n_threads; i++) {
+ pthread_join(t[i], 0);
+ }
+ delete [] t;
+ delete [] a;
+ return 0;
+}
diff --git a/lib/tsan/benchmarks/mini_bench_shared.cc b/lib/tsan/benchmarks/mini_bench_shared.cc
new file mode 100644
index 0000000..f9b9f42
--- /dev/null
+++ b/lib/tsan/benchmarks/mini_bench_shared.cc
@@ -0,0 +1,51 @@
+// Mini-benchmark for tsan: shared memory reads.
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+int len;
+int *a;
+const int kNumIter = 1000;
+
+__attribute__((noinline))
+void Run(int idx) {
+ for (int i = 0, n = len; i < n; i++)
+ if (a[i] != i) abort();
+}
+
+void *Thread(void *arg) {
+ long idx = (long)arg;
+ printf("Thread %ld started\n", idx);
+ for (int i = 0; i < kNumIter; i++)
+ Run(idx);
+ printf("Thread %ld done\n", idx);
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ int n_threads = 0;
+ if (argc != 3) {
+ n_threads = 4;
+ len = 1000000;
+ } else {
+ n_threads = atoi(argv[1]);
+ assert(n_threads > 0 && n_threads <= 32);
+ len = atoi(argv[2]);
+ }
+ printf("%s: n_threads=%d len=%d iter=%d\n",
+ __FILE__, n_threads, len, kNumIter);
+ a = new int[len];
+ for (int i = 0, n = len; i < n; i++)
+ a[i] = i;
+ pthread_t *t = new pthread_t[n_threads];
+ for (int i = 0; i < n_threads; i++) {
+ pthread_create(&t[i], 0, Thread, (void*)i);
+ }
+ for (int i = 0; i < n_threads; i++) {
+ pthread_join(t[i], 0);
+ }
+ delete [] t;
+ delete [] a;
+ return 0;
+}
diff --git a/lib/tsan/benchmarks/start_many_threads.cc b/lib/tsan/benchmarks/start_many_threads.cc
new file mode 100644
index 0000000..1e86fa6
--- /dev/null
+++ b/lib/tsan/benchmarks/start_many_threads.cc
@@ -0,0 +1,52 @@
+// Mini-benchmark for creating a lot of threads.
+//
+// Some facts:
+// a) clang -O1 takes <15ms to start N=500 threads,
+// consuming ~4MB more RAM than N=1.
+// b) clang -O1 -ftsan takes ~26s to start N=500 threads,
+// eats 5GB more RAM than N=1 (which is somewhat expected but still a lot)
+// but then it consumes ~4GB of extra memory when the threads shut down!
+// (definitely not in the barrier_wait interceptor)
+// Also, it takes 26s to run with N=500 vs just 1.1s to run with N=1.
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+pthread_barrier_t all_threads_ready;
+
+void* Thread(void *unused) {
+ pthread_barrier_wait(&all_threads_ready);
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ int n_threads;
+ if (argc == 1) {
+ n_threads = 100;
+ } else if (argc == 2) {
+ n_threads = atoi(argv[1]);
+ } else {
+ printf("Usage: %s n_threads\n", argv[0]);
+ return 1;
+ }
+ printf("%s: n_threads=%d\n", __FILE__, n_threads);
+
+ pthread_barrier_init(&all_threads_ready, NULL, n_threads + 1);
+
+ pthread_t *t = new pthread_t[n_threads];
+ for (int i = 0; i < n_threads; i++) {
+ int status = pthread_create(&t[i], 0, Thread, (void*)i);
+ assert(status == 0);
+ }
+ // sleep(5); // FIXME: simplify measuring the memory usage.
+ pthread_barrier_wait(&all_threads_ready);
+ for (int i = 0; i < n_threads; i++) {
+ pthread_join(t[i], 0);
+ }
+ // sleep(5); // FIXME: simplify measuring the memory usage.
+ delete [] t;
+
+ return 0;
+}
diff --git a/lib/tsan/benchmarks/vts_many_threads_bench.cc b/lib/tsan/benchmarks/vts_many_threads_bench.cc
new file mode 100644
index 0000000..f1056e2
--- /dev/null
+++ b/lib/tsan/benchmarks/vts_many_threads_bench.cc
@@ -0,0 +1,120 @@
+// Mini-benchmark for tsan VTS worst case performance
+// Idea:
+// 1) Spawn M + N threads (M >> N)
+// We'll call the 'M' threads as 'garbage threads'.
+// 2) Make sure all threads have created thus no TIDs were reused
+// 3) Join the garbage threads
+// 4) Do many sync operations on the remaining N threads
+//
+// It turns out that due to O(M+N) VTS complexity the (4) is much slower with
+// when N is large.
+//
+// Some numbers:
+// a) clang++ native O1 with n_iterations=200kk takes
+// 5s regardless of M
+// clang++ tsanv2 O1 with n_iterations=20kk takes
+// 23.5s with M=200
+// 11.5s with M=1
+// i.e. tsanv2 is ~23x to ~47x slower than native, depends on M.
+// b) g++ native O1 with n_iterations=200kk takes
+// 5.5s regardless of M
+// g++ tsanv1 O1 with n_iterations=2kk takes
+// 39.5s with M=200
+// 20.5s with M=1
+// i.e. tsanv1 is ~370x to ~720x slower than native, depends on M.
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+class __attribute__((aligned(64))) Mutex {
+ public:
+ Mutex() { pthread_mutex_init(&m_, NULL); }
+ ~Mutex() { pthread_mutex_destroy(&m_); }
+ void Lock() { pthread_mutex_lock(&m_); }
+ void Unlock() { pthread_mutex_unlock(&m_); }
+
+ private:
+ pthread_mutex_t m_;
+};
+
+const int kNumMutexes = 1024;
+Mutex mutexes[kNumMutexes];
+
+int n_threads, n_iterations;
+
+pthread_barrier_t all_threads_ready, main_threads_ready;
+
+void* GarbageThread(void *unused) {
+ pthread_barrier_wait(&all_threads_ready);
+ return 0;
+}
+
+void *Thread(void *arg) {
+ long idx = (long)arg;
+ pthread_barrier_wait(&all_threads_ready);
+
+ // Wait for the main thread to join the garbage threads.
+ pthread_barrier_wait(&main_threads_ready);
+
+ printf("Thread %ld go!\n", idx);
+ int offset = idx * kNumMutexes / n_threads;
+ for (int i = 0; i < n_iterations; i++) {
+ mutexes[(offset + i) % kNumMutexes].Lock();
+ mutexes[(offset + i) % kNumMutexes].Unlock();
+ }
+ printf("Thread %ld done\n", idx);
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ int n_garbage_threads;
+ if (argc == 1) {
+ n_threads = 2;
+ n_garbage_threads = 200;
+ n_iterations = 20000000;
+ } else if (argc == 4) {
+ n_threads = atoi(argv[1]);
+ assert(n_threads > 0 && n_threads <= 32);
+ n_garbage_threads = atoi(argv[2]);
+ assert(n_garbage_threads > 0 && n_garbage_threads <= 16000);
+ n_iterations = atoi(argv[3]);
+ } else {
+ printf("Usage: %s n_threads n_garbage_threads n_iterations\n", argv[0]);
+ return 1;
+ }
+ printf("%s: n_threads=%d n_garbage_threads=%d n_iterations=%d\n",
+ __FILE__, n_threads, n_garbage_threads, n_iterations);
+
+ pthread_barrier_init(&all_threads_ready, NULL, n_garbage_threads + n_threads + 1);
+ pthread_barrier_init(&main_threads_ready, NULL, n_threads + 1);
+
+ pthread_t *t = new pthread_t[n_threads];
+ {
+ pthread_t *g_t = new pthread_t[n_garbage_threads];
+ for (int i = 0; i < n_garbage_threads; i++) {
+ int status = pthread_create(&g_t[i], 0, GarbageThread, NULL);
+ assert(status == 0);
+ }
+ for (int i = 0; i < n_threads; i++) {
+ int status = pthread_create(&t[i], 0, Thread, (void*)i);
+ assert(status == 0);
+ }
+ pthread_barrier_wait(&all_threads_ready);
+ printf("All threads started! Killing the garbage threads.\n");
+ for (int i = 0; i < n_garbage_threads; i++) {
+ pthread_join(g_t[i], 0);
+ }
+ delete [] g_t;
+ }
+ printf("Resuming the main threads.\n");
+ pthread_barrier_wait(&main_threads_ready);
+
+
+ for (int i = 0; i < n_threads; i++) {
+ pthread_join(t[i], 0);
+ }
+ delete [] t;
+ return 0;
+}
diff --git a/lib/tsan/check_analyze.sh b/lib/tsan/check_analyze.sh
new file mode 100755
index 0000000..39d570b
--- /dev/null
+++ b/lib/tsan/check_analyze.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -u
+
+RES=$(./analyze_libtsan.sh)
+PrintRes() {
+ printf "%s\n" "$RES"
+}
+
+PrintRes
+
+mops="write1 \
+ write2 \
+ write4 \
+ write8 \
+ read1 \
+ read2 \
+ read4 \
+ read8"
+func="func_entry \
+ func_exit"
+
+check() {
+ res=$(PrintRes | egrep "$1 .* $2 $3; ")
+ if [ "$res" == "" ]; then
+ echo FAILED $1 must contain $2 $3
+ exit 1
+ fi
+}
+
+for f in $mops; do
+ check $f rsp 1 # To read caller pc.
+ check $f push 0
+ check $f pop 0
+done
+
+for f in $func; do
+ check $f rsp 0
+ check $f push 0
+ check $f pop 0
+ check $f call 1 # TraceSwitch()
+done
+
+echo LGTM
diff --git a/lib/tsan/go/buildgo.sh b/lib/tsan/go/buildgo.sh
new file mode 100755
index 0000000..a0d2f67
--- /dev/null
+++ b/lib/tsan/go/buildgo.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+set -e
+
+if [ "`uname -a | grep Linux`" != "" ]; then
+ LINUX=1
+ SUFFIX="linux_amd64"
+elif [ "`uname -a | grep Darwin`" != "" ]; then
+ MAC=1
+ SUFFIX="darwin_amd64"
+else
+ echo Unknown platform
+ exit 1
+fi
+
+SRCS="
+ tsan_go.cc
+ ../rtl/tsan_clock.cc
+ ../rtl/tsan_flags.cc
+ ../rtl/tsan_md5.cc
+ ../rtl/tsan_mutex.cc
+ ../rtl/tsan_printf.cc
+ ../rtl/tsan_report.cc
+ ../rtl/tsan_rtl.cc
+ ../rtl/tsan_rtl_mutex.cc
+ ../rtl/tsan_rtl_report.cc
+ ../rtl/tsan_rtl_thread.cc
+ ../rtl/tsan_stat.cc
+ ../rtl/tsan_suppressions.cc
+ ../rtl/tsan_sync.cc
+ ../../sanitizer_common/sanitizer_allocator.cc
+ ../../sanitizer_common/sanitizer_common.cc
+ ../../sanitizer_common/sanitizer_flags.cc
+ ../../sanitizer_common/sanitizer_libc.cc
+ ../../sanitizer_common/sanitizer_posix.cc
+ ../../sanitizer_common/sanitizer_printf.cc
+ ../../sanitizer_common/sanitizer_symbolizer.cc
+"
+
+if [ "$LINUX" != "" ]; then
+ SRCS+="
+ ../rtl/tsan_platform_linux.cc
+ ../../sanitizer_common/sanitizer_linux.cc
+ "
+elif [ "$MAC" != "" ]; then
+ SRCS+="
+ ../rtl/tsan_platform_mac.cc
+ ../../sanitizer_common/sanitizer_mac.cc
+ "
+fi
+
+SRCS+=$ADD_SRCS
+#ASMS="../rtl/tsan_rtl_amd64.S"
+
+rm -f gotsan.cc
+for F in $SRCS; do
+ cat $F >> gotsan.cc
+done
+
+FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -fPIC -g -Wall -Werror -fno-exceptions -DTSAN_GO -DSANITIZER_GO -DTSAN_SHADOW_COUNT=4"
+if [ "$DEBUG" == "" ]; then
+ FLAGS+=" -DTSAN_DEBUG=0 -O3 -fomit-frame-pointer"
+else
+ FLAGS+=" -DTSAN_DEBUG=1 -g"
+fi
+
+if [ "$LINUX" != "" ]; then
+ FLAGS+=" -ffreestanding"
+fi
+
+echo gcc gotsan.cc -S -o tmp.s $FLAGS $CFLAGS
+gcc gotsan.cc -S -o tmp.s $FLAGS $CFLAGS
+cat tmp.s $ASMS > gotsan.s
+echo as gotsan.s -o race_$SUFFIX.syso
+as gotsan.s -o race_$SUFFIX.syso
+
+gcc test.c race_$SUFFIX.syso -lpthread -o test
+TSAN_OPTIONS="exitcode=0" ./test
+
diff --git a/lib/tsan/go/test.c b/lib/tsan/go/test.c
new file mode 100644
index 0000000..a9a5b3d
--- /dev/null
+++ b/lib/tsan/go/test.c
@@ -0,0 +1,51 @@
+//===-- test.c ------------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanity test for Go runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdio.h>
+
+void __tsan_init();
+void __tsan_fini();
+void __tsan_go_start(int pgoid, int chgoid, void *pc);
+void __tsan_go_end(int goid);
+void __tsan_read(int goid, void *addr, void *pc);
+void __tsan_write(int goid, void *addr, void *pc);
+void __tsan_func_enter(int goid, void *pc);
+void __tsan_func_exit(int goid);
+void __tsan_malloc(int goid, void *p, unsigned long sz, void *pc);
+void __tsan_free(void *p);
+void __tsan_acquire(int goid, void *addr);
+void __tsan_release(int goid, void *addr);
+void __tsan_release_merge(int goid, void *addr);
+
+int __tsan_symbolize(void *pc, char **img, char **rtn, char **file, int *l) {
+ return 0;
+}
+
+char buf[10];
+
+int main(void) {
+ __tsan_init();
+ __tsan_func_enter(0, &main);
+ __tsan_malloc(0, buf, 10, 0);
+ __tsan_release(0, buf);
+ __tsan_release_merge(0, buf);
+ __tsan_go_start(0, 1, 0);
+ __tsan_write(1, buf, 0);
+ __tsan_acquire(1, buf);
+ __tsan_go_end(1);
+ __tsan_read(0, buf, 0);
+ __tsan_free(buf);
+ __tsan_func_exit(0);
+ __tsan_fini();
+ return 0;
+}
diff --git a/lib/tsan/go/tsan_go.cc b/lib/tsan/go/tsan_go.cc
new file mode 100644
index 0000000..4b3076c
--- /dev/null
+++ b/lib/tsan/go/tsan_go.cc
@@ -0,0 +1,185 @@
+//===-- tsan_go.cc --------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ThreadSanitizer runtime for Go language.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_rtl.h"
+#include "tsan_symbolize.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include <stdlib.h>
+
+namespace __tsan {
+
+static ThreadState *goroutines[kMaxTid];
+
+void InitializeInterceptors() {
+}
+
+void InitializeDynamicAnnotations() {
+}
+
+bool IsExpectedReport(uptr addr, uptr size) {
+ return false;
+}
+
+void internal_start_thread(void(*func)(void*), void *arg) {
+}
+
+ReportStack *SymbolizeData(uptr addr) {
+ return 0;
+}
+
+ReportStack *NewReportStackEntry(uptr addr) {
+ ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack,
+ sizeof(ReportStack));
+ internal_memset(ent, 0, sizeof(*ent));
+ ent->pc = addr;
+ return ent;
+}
+
+void *internal_alloc(MBlockType typ, uptr sz) {
+ return InternalAlloc(sz);
+}
+
+void internal_free(void *p) {
+ InternalFree(p);
+}
+
+// Callback into Go.
+extern "C" int __tsan_symbolize(uptr pc, char **func, char **file,
+ int *line, int *off);
+
+ReportStack *SymbolizeCode(uptr addr) {
+ ReportStack *s = (ReportStack*)internal_alloc(MBlockReportStack,
+ sizeof(ReportStack));
+ internal_memset(s, 0, sizeof(*s));
+ s->pc = addr;
+ char *func = 0, *file = 0;
+ int line = 0, off = 0;
+ if (__tsan_symbolize(addr, &func, &file, &line, &off)) {
+ s->offset = off;
+ s->func = internal_strdup(func ? func : "??");
+ s->file = internal_strdup(file ? file : "-");
+ s->line = line;
+ s->col = 0;
+ free(func);
+ free(file);
+ }
+ return s;
+}
+
+extern "C" {
+
+static void AllocGoroutine(int tid) {
+ goroutines[tid] = (ThreadState*)internal_alloc(MBlockThreadContex,
+ sizeof(ThreadState));
+ internal_memset(goroutines[tid], 0, sizeof(ThreadState));
+}
+
+void __tsan_init() {
+ AllocGoroutine(0);
+ ThreadState *thr = goroutines[0];
+ thr->in_rtl++;
+ Initialize(thr);
+ thr->in_rtl--;
+}
+
+void __tsan_fini() {
+ // FIXME: Not necessary thread 0.
+ ThreadState *thr = goroutines[0];
+ thr->in_rtl++;
+ int res = Finalize(thr);
+ thr->in_rtl--;
+ exit(res);
+}
+
+void __tsan_read(int goid, void *addr, void *pc) {
+ ThreadState *thr = goroutines[goid];
+ MemoryAccess(thr, (uptr)pc, (uptr)addr, 0, false);
+}
+
+void __tsan_write(int goid, void *addr, void *pc) {
+ ThreadState *thr = goroutines[goid];
+ MemoryAccess(thr, (uptr)pc, (uptr)addr, 0, true);
+}
+
+void __tsan_func_enter(int goid, void *pc) {
+ ThreadState *thr = goroutines[goid];
+ FuncEntry(thr, (uptr)pc);
+}
+
+void __tsan_func_exit(int goid) {
+ ThreadState *thr = goroutines[goid];
+ FuncExit(thr);
+}
+
+void __tsan_malloc(int goid, void *p, uptr sz, void *pc) {
+ ThreadState *thr = goroutines[goid];
+ thr->in_rtl++;
+ MemoryResetRange(thr, (uptr)pc, (uptr)p, sz);
+ MemoryAccessRange(thr, (uptr)pc, (uptr)p, sz, true);
+ thr->in_rtl--;
+}
+
+void __tsan_free(void *p) {
+ (void)p;
+}
+
+void __tsan_go_start(int pgoid, int chgoid, void *pc) {
+ if (chgoid == 0)
+ return;
+ AllocGoroutine(chgoid);
+ ThreadState *thr = goroutines[chgoid];
+ ThreadState *parent = goroutines[pgoid];
+ thr->in_rtl++;
+ parent->in_rtl++;
+ int goid2 = ThreadCreate(parent, (uptr)pc, 0, true);
+ ThreadStart(thr, goid2);
+ parent->in_rtl--;
+ thr->in_rtl--;
+}
+
+void __tsan_go_end(int goid) {
+ ThreadState *thr = goroutines[goid];
+ thr->in_rtl++;
+ ThreadFinish(thr);
+ thr->in_rtl--;
+}
+
+void __tsan_acquire(int goid, void *addr) {
+ ThreadState *thr = goroutines[goid];
+ thr->in_rtl++;
+ Acquire(thr, 0, (uptr)addr);
+ thr->in_rtl--;
+ //internal_free(thr);
+}
+
+void __tsan_release(int goid, void *addr) {
+ ThreadState *thr = goroutines[goid];
+ thr->in_rtl++;
+ ReleaseStore(thr, 0, (uptr)addr);
+ thr->in_rtl--;
+}
+
+void __tsan_release_merge(int goid, void *addr) {
+ ThreadState *thr = goroutines[goid];
+ thr->in_rtl++;
+ Release(thr, 0, (uptr)addr);
+ thr->in_rtl--;
+}
+
+void __tsan_finalizer_goroutine(int goid) {
+ ThreadState *thr = goroutines[goid];
+ ThreadFinalizerGoroutine(thr);
+}
+
+} // extern "C"
+} // namespace __tsan
diff --git a/lib/tsan/output_tests/free_race.c b/lib/tsan/output_tests/free_race.c
new file mode 100644
index 0000000..fb7fbac
--- /dev/null
+++ b/lib/tsan/output_tests/free_race.c
@@ -0,0 +1,43 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+int *mem;
+pthread_mutex_t mtx;
+
+void *Thread1(void *x) {
+ pthread_mutex_lock(&mtx);
+ free(mem);
+ pthread_mutex_unlock(&mtx);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ usleep(1000000);
+ pthread_mutex_lock(&mtx);
+ mem[0] = 42;
+ pthread_mutex_unlock(&mtx);
+ return NULL;
+}
+
+int main() {
+ mem = (int*)malloc(100);
+ pthread_mutex_init(&mtx, 0);
+ pthread_t t;
+ pthread_create(&t, NULL, Thread1, NULL);
+ Thread2(0);
+ pthread_join(t, NULL);
+ pthread_mutex_destroy(&mtx);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: heap-use-after-free
+// CHECK: Write of size 4 at {{.*}} by main thread:
+// CHECK: #0 Thread2
+// CHECK: #1 main
+// CHECK: Previous write of size 8 at {{.*}} by thread 1:
+// CHECK: #0 free
+// CHECK: #1 Thread1
+
diff --git a/lib/tsan/output_tests/free_race2.c b/lib/tsan/output_tests/free_race2.c
new file mode 100644
index 0000000..7b2bdec
--- /dev/null
+++ b/lib/tsan/output_tests/free_race2.c
@@ -0,0 +1,26 @@
+#include <stdlib.h>
+
+void __attribute__((noinline)) foo(int *mem) {
+ free(mem);
+}
+
+void __attribute__((noinline)) bar(int *mem) {
+ mem[0] = 42;
+}
+
+int main() {
+ int *mem = (int*)malloc(100);
+ foo(mem);
+ bar(mem);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: heap-use-after-free
+// CHECK: Write of size 4 at {{.*}} by main thread:
+// CHECK: #0 bar
+// CHECK: #1 main
+// CHECK: Previous write of size 8 at {{.*}} by main thread:
+// CHECK: #0 free
+// CHECK: #1 foo
+// CHECK: #2 main
+
diff --git a/lib/tsan/output_tests/heap_race.cc b/lib/tsan/output_tests/heap_race.cc
new file mode 100644
index 0000000..e92bb37
--- /dev/null
+++ b/lib/tsan/output_tests/heap_race.cc
@@ -0,0 +1,19 @@
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+
+void *Thread(void *a) {
+ ((int*)a)[0]++;
+ return NULL;
+}
+
+int main() {
+ int *p = new int(42);
+ pthread_t t;
+ pthread_create(&t, NULL, Thread, p);
+ p[0]++;
+ pthread_join(t, NULL);
+ delete p;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/memcpy_race.cc b/lib/tsan/output_tests/memcpy_race.cc
new file mode 100644
index 0000000..c6b79a7
--- /dev/null
+++ b/lib/tsan/output_tests/memcpy_race.cc
@@ -0,0 +1,40 @@
+#include <pthread.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+char *data = new char[10];
+char *data1 = new char[10];
+char *data2 = new char[10];
+
+void *Thread1(void *x) {
+ memcpy(data+5, data1, 1);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ usleep(500*1000);
+ memcpy(data+3, data2, 4);
+ return NULL;
+}
+
+int main() {
+ fprintf(stderr, "addr=%p\n", &data[5]);
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ return 0;
+}
+
+// CHECK: addr=[[ADDR:0x[0-9,a-f]+]]
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: Write of size 1 at [[ADDR]] by thread 2:
+// CHECK: #0 memcpy
+// CHECK: #1 Thread2
+// CHECK: Previous write of size 1 at [[ADDR]] by thread 1:
+// CHECK: #0 memcpy
+// CHECK: #1 Thread1
+
diff --git a/lib/tsan/output_tests/mop_with_offset.cc b/lib/tsan/output_tests/mop_with_offset.cc
new file mode 100644
index 0000000..fc497bf
--- /dev/null
+++ b/lib/tsan/output_tests/mop_with_offset.cc
@@ -0,0 +1,36 @@
+#include <pthread.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <unistd.h>
+
+void *Thread1(void *x) {
+ int *p = (int*)x;
+ p[0] = 1;
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ usleep(500*1000);
+ char *p = (char*)x;
+ p[2] = 1;
+ return NULL;
+}
+
+int main() {
+ int *data = new int(42);
+ fprintf(stderr, "ptr1=%p\n", data);
+ fprintf(stderr, "ptr2=%p\n", (char*)data + 2);
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, data);
+ pthread_create(&t[1], NULL, Thread2, data);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ delete data;
+}
+
+// CHECK: ptr1=[[PTR1:0x[0-9,a-f]+]]
+// CHECK: ptr2=[[PTR2:0x[0-9,a-f]+]]
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: Write of size 1 at [[PTR2]] by thread 2:
+// CHECK: Previous write of size 4 at [[PTR1]] by thread 1:
+
diff --git a/lib/tsan/output_tests/mop_with_offset2.cc b/lib/tsan/output_tests/mop_with_offset2.cc
new file mode 100644
index 0000000..bbeda55
--- /dev/null
+++ b/lib/tsan/output_tests/mop_with_offset2.cc
@@ -0,0 +1,36 @@
+#include <pthread.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <unistd.h>
+
+void *Thread1(void *x) {
+ usleep(500*1000);
+ int *p = (int*)x;
+ p[0] = 1;
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ char *p = (char*)x;
+ p[2] = 1;
+ return NULL;
+}
+
+int main() {
+ int *data = new int(42);
+ fprintf(stderr, "ptr1=%p\n", data);
+ fprintf(stderr, "ptr2=%p\n", (char*)data + 2);
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, data);
+ pthread_create(&t[1], NULL, Thread2, data);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ delete data;
+}
+
+// CHECK: ptr1=[[PTR1:0x[0-9,a-f]+]]
+// CHECK: ptr2=[[PTR2:0x[0-9,a-f]+]]
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: Write of size 4 at [[PTR1]] by thread 1:
+// CHECK: Previous write of size 1 at [[PTR2]] by thread 2:
+
diff --git a/lib/tsan/output_tests/race_on_barrier.c b/lib/tsan/output_tests/race_on_barrier.c
new file mode 100644
index 0000000..98d7a1d8
--- /dev/null
+++ b/lib/tsan/output_tests/race_on_barrier.c
@@ -0,0 +1,31 @@
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+pthread_barrier_t B;
+int Global;
+
+void *Thread1(void *x) {
+ pthread_barrier_init(&B, 0, 2);
+ pthread_barrier_wait(&B);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ usleep(1000000);
+ pthread_barrier_wait(&B);
+ return NULL;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, NULL, Thread1, NULL);
+ Thread2(0);
+ pthread_join(t, NULL);
+ pthread_barrier_destroy(&B);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+
diff --git a/lib/tsan/output_tests/race_on_barrier2.c b/lib/tsan/output_tests/race_on_barrier2.c
new file mode 100644
index 0000000..dbdb6b5
--- /dev/null
+++ b/lib/tsan/output_tests/race_on_barrier2.c
@@ -0,0 +1,30 @@
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+pthread_barrier_t B;
+int Global;
+
+void *Thread1(void *x) {
+ if (pthread_barrier_wait(&B) == PTHREAD_BARRIER_SERIAL_THREAD)
+ pthread_barrier_destroy(&B);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ if (pthread_barrier_wait(&B) == PTHREAD_BARRIER_SERIAL_THREAD)
+ pthread_barrier_destroy(&B);
+ return NULL;
+}
+
+int main() {
+ pthread_barrier_init(&B, 0, 2);
+ pthread_t t;
+ pthread_create(&t, NULL, Thread1, NULL);
+ Thread2(0);
+ pthread_join(t, NULL);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/race_on_mutex.c b/lib/tsan/output_tests/race_on_mutex.c
new file mode 100644
index 0000000..45c75be
--- /dev/null
+++ b/lib/tsan/output_tests/race_on_mutex.c
@@ -0,0 +1,41 @@
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+pthread_mutex_t Mtx;
+int Global;
+
+void *Thread1(void *x) {
+ pthread_mutex_init(&Mtx, 0);
+ pthread_mutex_lock(&Mtx);
+ Global = 42;
+ pthread_mutex_unlock(&Mtx);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ usleep(1000000);
+ pthread_mutex_lock(&Mtx);
+ Global = 43;
+ pthread_mutex_unlock(&Mtx);
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ pthread_mutex_destroy(&Mtx);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK-NEXT: Read of size 1 at {{.*}} by thread 2:
+// CHECK-NEXT: #0 pthread_mutex_lock {{.*}} ({{.*}})
+// CHECK-NEXT: #1 Thread2 {{.*}}race_on_mutex.c:19{{(:3)?}} ({{.*}})
+// CHECK-NEXT: Previous write of size 1 at {{.*}} by thread 1:
+// CHECK-NEXT: #0 pthread_mutex_init {{.*}} ({{.*}})
+// CHECK-NEXT: #1 Thread1 {{.*}}race_on_mutex.c:10{{(:3)?}} ({{.*}})
diff --git a/lib/tsan/output_tests/race_with_finished_thread.cc b/lib/tsan/output_tests/race_with_finished_thread.cc
new file mode 100644
index 0000000..1f60f4b
--- /dev/null
+++ b/lib/tsan/output_tests/race_with_finished_thread.cc
@@ -0,0 +1,43 @@
+#include <pthread.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+// Ensure that we can restore a stack of a finished thread.
+
+int g_data;
+
+void __attribute__((noinline)) foobar(int *p) {
+ *p = 42;
+}
+
+void *Thread1(void *x) {
+ foobar(&g_data);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ usleep(1000*1000);
+ g_data = 43;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: Write of size 4 at {{.*}} by thread 2:
+// CHECK: Previous write of size 4 at {{.*}} by thread 1:
+// CHECK: #0 foobar
+// CHECK: #1 Thread1
+// CHECK: Thread 1 (finished) created at:
+// CHECK: #0 pthread_create
+// CHECK: #1 main
+
diff --git a/lib/tsan/output_tests/simple_race.c b/lib/tsan/output_tests/simple_race.c
new file mode 100644
index 0000000..ed831fd
--- /dev/null
+++ b/lib/tsan/output_tests/simple_race.c
@@ -0,0 +1,25 @@
+#include <pthread.h>
+#include <stdio.h>
+
+int Global;
+
+void *Thread1(void *x) {
+ Global = 42;
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ Global = 43;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/simple_race.cc b/lib/tsan/output_tests/simple_race.cc
new file mode 100644
index 0000000..8d2cabf
--- /dev/null
+++ b/lib/tsan/output_tests/simple_race.cc
@@ -0,0 +1,24 @@
+#include <pthread.h>
+#include <stdio.h>
+
+int Global;
+
+void *Thread1(void *x) {
+ Global++;
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ Global--;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/simple_stack.c b/lib/tsan/output_tests/simple_stack.c
new file mode 100644
index 0000000..2e94f23
--- /dev/null
+++ b/lib/tsan/output_tests/simple_stack.c
@@ -0,0 +1,65 @@
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+int Global;
+
+void __attribute__((noinline)) foo1() {
+ Global = 42;
+}
+
+void __attribute__((noinline)) bar1() {
+ volatile int tmp = 42; (void)tmp;
+ foo1();
+}
+
+void __attribute__((noinline)) foo2() {
+ volatile int v = Global; (void)v;
+}
+
+void __attribute__((noinline)) bar2() {
+ volatile int tmp = 42; (void)tmp;
+ foo2();
+}
+
+void *Thread1(void *x) {
+ usleep(1000000);
+ bar1();
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ bar2();
+ return NULL;
+}
+
+void StartThread(pthread_t *t, void *(*f)(void*)) {
+ pthread_create(t, NULL, f, NULL);
+}
+
+int main() {
+ pthread_t t[2];
+ StartThread(&t[0], Thread1);
+ StartThread(&t[1], Thread2);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK-NEXT: Write of size 4 at {{.*}} by thread 1:
+// CHECK-NEXT: #0 foo1 {{.*}}simple_stack.c:8{{(:3)?}} ({{.*}})
+// CHECK-NEXT: #1 bar1 {{.*}}simple_stack.c:13{{(:3)?}} ({{.*}})
+// CHECK-NEXT: #2 Thread1 {{.*}}simple_stack.c:27{{(:3)?}} ({{.*}})
+// CHECK-NEXT: Previous read of size 4 at {{.*}} by thread 2:
+// CHECK-NEXT: #0 foo2 {{.*}}simple_stack.c:17{{(:26)?}} ({{.*}})
+// CHECK-NEXT: #1 bar2 {{.*}}simple_stack.c:22{{(:3)?}} ({{.*}})
+// CHECK-NEXT: #2 Thread2 {{.*}}simple_stack.c:32{{(:3)?}} ({{.*}})
+// CHECK-NEXT: Thread 1 (running) created at:
+// CHECK-NEXT: #0 pthread_create {{.*}} ({{.*}})
+// CHECK-NEXT: #1 StartThread {{.*}}simple_stack.c:37{{(:3)?}} ({{.*}})
+// CHECK-NEXT: #2 main {{.*}}simple_stack.c:42{{(:3)?}} ({{.*}})
+// CHECK-NEXT: Thread 2 ({{.*}}) created at:
+// CHECK-NEXT: #0 pthread_create {{.*}} ({{.*}})
+// CHECK-NEXT: #1 StartThread {{.*}}simple_stack.c:37{{(:3)?}} ({{.*}})
+// CHECK-NEXT: #2 main {{.*}}simple_stack.c:43{{(:3)?}} ({{.*}})
diff --git a/lib/tsan/output_tests/simple_stack2.cc b/lib/tsan/output_tests/simple_stack2.cc
new file mode 100644
index 0000000..336cc9f
--- /dev/null
+++ b/lib/tsan/output_tests/simple_stack2.cc
@@ -0,0 +1,46 @@
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+int Global;
+
+void __attribute__((noinline)) foo1() {
+ Global = 42;
+}
+
+void __attribute__((noinline)) bar1() {
+ volatile int tmp = 42; int tmp2 = tmp; (void)tmp2;
+ foo1();
+}
+
+void __attribute__((noinline)) foo2() {
+ volatile int tmp = Global; int tmp2 = tmp; (void)tmp2;
+}
+
+void __attribute__((noinline)) bar2() {
+ volatile int tmp = 42; int tmp2 = tmp; (void)tmp2;
+ foo2();
+}
+
+void *Thread1(void *x) {
+ usleep(1000000);
+ bar1();
+ return NULL;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, NULL, Thread1, NULL);
+ bar2();
+ pthread_join(t, NULL);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK-NEXT: Write of size 4 at {{.*}} by thread 1:
+// CHECK-NEXT: #0 foo1{{.*}} {{.*}}simple_stack2.cc:8{{(:3)?}} ({{.*}})
+// CHECK-NEXT: #1 bar1{{.*}} {{.*}}simple_stack2.cc:13{{(:3)?}} ({{.*}})
+// CHECK-NEXT: #2 Thread1{{.*}} {{.*}}simple_stack2.cc:27{{(:3)?}} ({{.*}})
+// CHECK-NEXT: Previous read of size 4 at {{.*}} by main thread:
+// CHECK-NEXT: #0 foo2{{.*}} {{.*}}simple_stack2.cc:17{{(:28)?}} ({{.*}})
+// CHECK-NEXT: #1 bar2{{.*}} {{.*}}simple_stack2.cc:22{{(:3)?}} ({{.*}})
+// CHECK-NEXT: #2 main{{.*}} {{.*}}simple_stack2.cc:34{{(:3)?}} ({{.*}})
diff --git a/lib/tsan/output_tests/static_init1.cc b/lib/tsan/output_tests/static_init1.cc
new file mode 100644
index 0000000..75d2819
--- /dev/null
+++ b/lib/tsan/output_tests/static_init1.cc
@@ -0,0 +1,25 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+struct P {
+ int x;
+ int y;
+};
+
+void *Thread(void *x) {
+ static P p = {rand(), rand()};
+ if (p.x > RAND_MAX || p.y > RAND_MAX)
+ exit(1);
+ return 0;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], 0, Thread, 0);
+ pthread_create(&t[1], 0, Thread, 0);
+ pthread_join(t[0], 0);
+ pthread_join(t[1], 0);
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/static_init2.cc b/lib/tsan/output_tests/static_init2.cc
new file mode 100644
index 0000000..f6e9596
--- /dev/null
+++ b/lib/tsan/output_tests/static_init2.cc
@@ -0,0 +1,31 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+struct Cache {
+ int x;
+ Cache(int x)
+ : x(x) {
+ }
+};
+
+void foo(Cache *my) {
+ static Cache *c = my ? my : new Cache(rand());
+ if (c->x >= RAND_MAX)
+ exit(1);
+}
+
+void *Thread(void *x) {
+ foo(new Cache(rand()));
+ return 0;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], 0, Thread, 0);
+ pthread_create(&t[1], 0, Thread, 0);
+ pthread_join(t[0], 0);
+ pthread_join(t[1], 0);
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/static_init3.cc b/lib/tsan/output_tests/static_init3.cc
new file mode 100644
index 0000000..718f811
--- /dev/null
+++ b/lib/tsan/output_tests/static_init3.cc
@@ -0,0 +1,46 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sched.h>
+
+struct Cache {
+ int x;
+};
+
+Cache g_cache;
+
+Cache *CreateCache() {
+ g_cache.x = rand();
+ return &g_cache;
+}
+
+_Atomic(Cache*) queue;
+
+void *Thread1(void *x) {
+ static Cache *c = CreateCache();
+ __c11_atomic_store(&queue, c, 0);
+ return 0;
+}
+
+void *Thread2(void *x) {
+ Cache *c = 0;
+ for (;;) {
+ c = __c11_atomic_load(&queue, 0);
+ if (c)
+ break;
+ sched_yield();
+ }
+ if (c->x >= RAND_MAX)
+ exit(1);
+ return 0;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], 0, Thread1, 0);
+ pthread_create(&t[1], 0, Thread2, 0);
+ pthread_join(t[0], 0);
+ pthread_join(t[1], 0);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/static_init4.cc b/lib/tsan/output_tests/static_init4.cc
new file mode 100644
index 0000000..cdacbce
--- /dev/null
+++ b/lib/tsan/output_tests/static_init4.cc
@@ -0,0 +1,35 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sched.h>
+
+struct Cache {
+ int x;
+ Cache(int x)
+ : x(x) {
+ }
+};
+
+int g_other;
+
+Cache *CreateCache() {
+ g_other = rand();
+ return new Cache(rand());
+}
+
+void *Thread1(void *x) {
+ static Cache *c = CreateCache();
+ if (c->x == g_other)
+ exit(1);
+ return 0;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], 0, Thread1, 0);
+ pthread_create(&t[1], 0, Thread1, 0);
+ pthread_join(t[0], 0);
+ pthread_join(t[1], 0);
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/static_init5.cc b/lib/tsan/output_tests/static_init5.cc
new file mode 100644
index 0000000..4b050c9
--- /dev/null
+++ b/lib/tsan/output_tests/static_init5.cc
@@ -0,0 +1,40 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sched.h>
+
+struct Cache {
+ int x;
+ Cache(int x)
+ : x(x) {
+ }
+};
+
+void *AsyncInit(void *p) {
+ return new Cache((int)(long)p);
+}
+
+Cache *CreateCache() {
+ pthread_t t;
+ pthread_create(&t, 0, AsyncInit, (void*)rand());
+ void *res;
+ pthread_join(t, &res);
+ return (Cache*)res;
+}
+
+void *Thread1(void *x) {
+ static Cache *c = CreateCache();
+ if (c->x >= RAND_MAX)
+ exit(1);
+ return 0;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], 0, Thread1, 0);
+ pthread_create(&t[1], 0, Thread1, 0);
+ pthread_join(t[0], 0);
+ pthread_join(t[1], 0);
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/suppress_same_address.cc b/lib/tsan/output_tests/suppress_same_address.cc
new file mode 100644
index 0000000..6e98970
--- /dev/null
+++ b/lib/tsan/output_tests/suppress_same_address.cc
@@ -0,0 +1,27 @@
+#include <pthread.h>
+
+int X;
+
+void *Thread1(void *x) {
+ X = 42;
+ X = 66;
+ X = 78;
+ return 0;
+}
+
+void *Thread2(void *x) {
+ X = 11;
+ X = 99;
+ X = 73;
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread1, 0);
+ Thread2(0);
+ pthread_join(t, 0);
+}
+
+// CHECK: ThreadSanitizer: reported 1 warnings
+
diff --git a/lib/tsan/output_tests/suppress_same_stacks.cc b/lib/tsan/output_tests/suppress_same_stacks.cc
new file mode 100644
index 0000000..6046a4e
--- /dev/null
+++ b/lib/tsan/output_tests/suppress_same_stacks.cc
@@ -0,0 +1,27 @@
+#include <pthread.h>
+
+volatile int N; // Prevent loop unrolling.
+int **data;
+
+void *Thread1(void *x) {
+ for (int i = 0; i < N; i++)
+ data[i][0] = 42;
+ return 0;
+}
+
+int main() {
+ N = 4;
+ data = new int*[N];
+ for (int i = 0; i < N; i++)
+ data[i] = new int;
+ pthread_t t;
+ pthread_create(&t, 0, Thread1, 0);
+ Thread1(0);
+ pthread_join(t, 0);
+ for (int i = 0; i < N; i++)
+ delete data[i];
+ delete[] data;
+}
+
+// CHECK: ThreadSanitizer: reported 1 warnings
+
diff --git a/lib/tsan/output_tests/test_output.sh b/lib/tsan/output_tests/test_output.sh
new file mode 100755
index 0000000..bd9cd91
--- /dev/null
+++ b/lib/tsan/output_tests/test_output.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+ulimit -s 8192
+set -e # fail on any error
+
+ROOTDIR=$(dirname $0)/..
+
+# Assuming clang is in path.
+CC=clang
+CXX=clang++
+
+# TODO: add testing for all of -O0...-O3
+CFLAGS="-fthread-sanitizer -fPIE -O1 -g -fno-builtin -Wall"
+LDFLAGS="-pie -lpthread -ldl $ROOTDIR/rtl/libtsan.a"
+
+test_file() {
+ SRC=$1
+ COMPILER=$2
+ echo ----- TESTING $(basename $1)
+ OBJ=$SRC.o
+ EXE=$SRC.exe
+ $COMPILER $SRC $CFLAGS -c -o $OBJ
+ $COMPILER $OBJ $LDFLAGS -o $EXE
+ RES=$(TSAN_OPTIONS="atexit_sleep_ms=0" $EXE 2>&1 || true)
+ if [ "$3" != "" ]; then
+ printf "%s\n" "$RES"
+ fi
+ printf "%s\n" "$RES" | FileCheck $SRC
+ if [ "$3" == "" ]; then
+ rm -f $EXE $OBJ
+ fi
+}
+
+if [ "$1" == "" ]; then
+ for c in $ROOTDIR/output_tests/*.{c,cc}; do
+ if [[ $c == */failing_* ]]; then
+ echo SKIPPING FAILING TEST $c
+ continue
+ fi
+ COMPILER=$CXX
+ case $c in
+ *.c) COMPILER=$CC
+ esac
+ test_file $c $COMPILER
+ done
+ wait
+else
+ test_file $ROOTDIR/output_tests/$1 $CXX "DUMP"
+fi
diff --git a/lib/tsan/output_tests/thread_leak.c b/lib/tsan/output_tests/thread_leak.c
new file mode 100644
index 0000000..88a11be
--- /dev/null
+++ b/lib/tsan/output_tests/thread_leak.c
@@ -0,0 +1,15 @@
+#include <pthread.h>
+
+void *Thread(void *x) {
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ pthread_join(t, 0);
+ return 0;
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: thread leak
+
diff --git a/lib/tsan/output_tests/thread_leak2.c b/lib/tsan/output_tests/thread_leak2.c
new file mode 100644
index 0000000..71e9c50
--- /dev/null
+++ b/lib/tsan/output_tests/thread_leak2.c
@@ -0,0 +1,15 @@
+#include <pthread.h>
+
+void *Thread(void *x) {
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ pthread_detach(t);
+ return 0;
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: thread leak
+
diff --git a/lib/tsan/output_tests/thread_leak3.c b/lib/tsan/output_tests/thread_leak3.c
new file mode 100644
index 0000000..058b6e5
--- /dev/null
+++ b/lib/tsan/output_tests/thread_leak3.c
@@ -0,0 +1,14 @@
+#include <pthread.h>
+
+void *Thread(void *x) {
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: thread leak
+
diff --git a/lib/tsan/output_tests/tiny_race.c b/lib/tsan/output_tests/tiny_race.c
new file mode 100644
index 0000000..3a8d192
--- /dev/null
+++ b/lib/tsan/output_tests/tiny_race.c
@@ -0,0 +1,14 @@
+#include <pthread.h>
+int Global;
+void *Thread1(void *x) {
+ Global = 42;
+ return x;
+}
+int main() {
+ pthread_t t;
+ pthread_create(&t, NULL, Thread1, NULL);
+ Global = 43;
+ pthread_join(t, NULL);
+ return Global;
+}
+// CHECK: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/virtual_inheritance_compile_bug.cc b/lib/tsan/output_tests/virtual_inheritance_compile_bug.cc
new file mode 100644
index 0000000..fd2febe
--- /dev/null
+++ b/lib/tsan/output_tests/virtual_inheritance_compile_bug.cc
@@ -0,0 +1,13 @@
+// Regression test for http://code.google.com/p/thread-sanitizer/issues/detail?id=3.
+// The C++ variant is much more compact that the LLVM IR equivalent.
+#include <stdio.h>
+struct AAA { virtual long aaa () { return 0; } };
+struct BBB: virtual AAA { unsigned long bbb; };
+struct CCC: virtual AAA { };
+struct DDD: CCC, BBB { DDD (); };
+DDD::DDD() { }
+int main() {
+ DDD d;
+ printf("OK\n");
+}
+// CHECK: OK
diff --git a/lib/tsan/output_tests/vptr_benign_race.cc b/lib/tsan/output_tests/vptr_benign_race.cc
new file mode 100644
index 0000000..fec4ffb
--- /dev/null
+++ b/lib/tsan/output_tests/vptr_benign_race.cc
@@ -0,0 +1,50 @@
+#include <pthread.h>
+#include <semaphore.h>
+#include <stdio.h>
+
+struct A {
+ A() {
+ sem_init(&sem_, 0, 0);
+ }
+ virtual void F() {
+ }
+ void Done() {
+ sem_post(&sem_);
+ }
+ virtual ~A() {
+ }
+ sem_t sem_;
+};
+
+struct B : A {
+ virtual void F() {
+ }
+ virtual ~B() {
+ sem_wait(&sem_);
+ sem_destroy(&sem_);
+ }
+};
+
+static A *obj = new B;
+
+void *Thread1(void *x) {
+ obj->F();
+ obj->Done();
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ delete obj;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ fprintf(stderr, "PASS\n");
+}
+// CHECK: PASS
+// CHECK-NOT: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/output_tests/vptr_harmful_race.cc b/lib/tsan/output_tests/vptr_harmful_race.cc
new file mode 100644
index 0000000..a19e6ab
--- /dev/null
+++ b/lib/tsan/output_tests/vptr_harmful_race.cc
@@ -0,0 +1,48 @@
+#include <pthread.h>
+#include <semaphore.h>
+#include <stdio.h>
+
+struct A {
+ A() {
+ sem_init(&sem_, 0, 0);
+ }
+ virtual void F() {
+ }
+ void Done() {
+ sem_post(&sem_);
+ }
+ virtual ~A() {
+ sem_wait(&sem_);
+ sem_destroy(&sem_);
+ }
+ sem_t sem_;
+};
+
+struct B : A {
+ virtual void F() {
+ }
+ virtual ~B() { }
+};
+
+static A *obj = new B;
+
+void *Thread1(void *x) {
+ obj->F();
+ obj->Done();
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ delete obj;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/rtl/Makefile.mk b/lib/tsan/rtl/Makefile.mk
new file mode 100644
index 0000000..d5d6327
--- /dev/null
+++ b/lib/tsan/rtl/Makefile.mk
@@ -0,0 +1,23 @@
+#===- lib/tsan/rtl/Makefile.mk -----------------------------*- Makefile -*--===#
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===------------------------------------------------------------------------===#
+
+ModuleName := tsan
+SubDirs :=
+
+Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
+AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
+ObjNames := $(Sources:%.cc=%.o) $(AsmSources:%.S=%.o)
+
+Implementation := Generic
+
+# FIXME: use automatic dependencies?
+Dependencies := $(wildcard $(Dir)/*.h)
+
+# Define a convenience variable for all the tsan functions.
+TsanFunctions += $(Sources:%.cc=%) $(AsmSources:%.S=%)
diff --git a/lib/tsan/rtl/Makefile.old b/lib/tsan/rtl/Makefile.old
new file mode 100644
index 0000000..9b79f57
--- /dev/null
+++ b/lib/tsan/rtl/Makefile.old
@@ -0,0 +1,59 @@
+CXXFLAGS = -fPIE -g -Wall -Werror -fno-builtin -DTSAN_DEBUG=$(DEBUG)
+ifeq ($(DEBUG), 0)
+ CXXFLAGS += -O3
+endif
+
+# For interception. FIXME: move interception one level higher.
+INTERCEPTION=../../interception
+COMMON=../../sanitizer_common
+INCLUDES= -I../..
+EXTRA_CXXFLAGS=-fno-exceptions
+NO_SYSROOT=--sysroot=.
+CXXFLAGS+=$(EXTRA_CXXFLAGS)
+CXXFLAGS+=$(CFLAGS)
+ifeq ($(DEBUG), 0)
+ CXXFLAGS+=-fomit-frame-pointer
+ifeq ($(CXX), g++)
+ CXXFLAGS+=-Wframe-larger-than=512
+endif # CXX=g++
+endif # DEBUG=0
+
+ifeq ($(CXX), clang++)
+ # Global constructors are banned.
+ CXXFLAGS+=-Wglobal-constructors
+endif
+
+
+
+all: libtsan.a
+
+LIBTSAN_HEADERS=$(wildcard *.h) \
+ $(wildcard $(INTERCEPTION)/*.h) \
+ $(wildcard $(COMMON)/*.h)
+LIBTSAN_SRC=$(wildcard *.cc)
+LIBTSAN_ASM_SRC=$(wildcard *.S)
+INTERCEPTION_SRC=$(wildcard $(INTERCEPTION)/*.cc)
+COMMON_SRC=$(wildcard $(COMMON)/*.cc)
+
+LIBTSAN_OBJ=$(patsubst %.cc,%.o,$(LIBTSAN_SRC)) \
+ $(patsubst %.S,%.o,$(LIBTSAN_ASM_SRC)) \
+ $(patsubst $(INTERCEPTION)/%.cc,%.o,$(INTERCEPTION_SRC)) \
+ $(patsubst $(COMMON)/%.cc,%.o,$(COMMON_SRC))
+
+%_linux.o: %_linux.cc Makefile.old $(LIBTSAN_HEADERS)
+ $(CXX) $(CXXFLAGS) $(INCLUDES) -c $<
+%.o: %.cc Makefile.old $(LIBTSAN_HEADERS)
+ $(CXX) $(CXXFLAGS) $(INCLUDES) $(NO_SYSROOT) -c $<
+%.o: $(INTERCEPTION)/%.cc Makefile.old $(LIBTSAN_HEADERS)
+ $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
+%.o: $(COMMON)/%.cc Makefile.old $(LIBTSAN_HEADERS)
+ $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
+
+libtsan.a: $(LIBTSAN_OBJ)
+ ar ru $@ $(LIBTSAN_OBJ)
+
+libtsan_dummy.a: tsan_dummy_rtl.o
+ ar ru $@ $<
+
+clean:
+ rm -f *.o *.a
diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc
new file mode 100644
index 0000000..32ed91d
--- /dev/null
+++ b/lib/tsan/rtl/tsan_clock.cc
@@ -0,0 +1,118 @@
+//===-- tsan_clock.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_clock.h"
+#include "tsan_rtl.h"
+
+// It's possible to optimize clock operations for some important cases
+// so that they are O(1). The cases include singletons, once's, local mutexes.
+// First, SyncClock must be re-implemented to allow indexing by tid.
+// It must not necessarily be a full vector clock, though. For example it may
+// be a multi-level table.
+// Then, each slot in SyncClock must contain a dirty bit (it's united with
+// the clock value, so no space increase). The acquire algorithm looks
+// as follows:
+// void acquire(thr, tid, thr_clock, sync_clock) {
+// if (!sync_clock[tid].dirty)
+// return; // No new info to acquire.
+// // This handles constant reads of singleton pointers and
+// // stop-flags.
+// acquire_impl(thr_clock, sync_clock); // As usual, O(N).
+// sync_clock[tid].dirty = false;
+// sync_clock.dirty_count--;
+// }
+// The release operation looks as follows:
+// void release(thr, tid, thr_clock, sync_clock) {
+// // thr->sync_cache is a simple fixed-size hash-based cache that holds
+// // several previous sync_clock's.
+// if (thr->sync_cache[sync_clock] >= thr->last_acquire_epoch) {
+// // The thread did no acquire operations since last release on this clock.
+// // So update only the thread's slot (other slots can't possibly change).
+// sync_clock[tid].clock = thr->epoch;
+// if (sync_clock.dirty_count == sync_clock.cnt
+// || (sync_clock.dirty_count == sync_clock.cnt - 1
+// && sync_clock[tid].dirty == false))
+// // All dirty flags are set, bail out.
+// return;
+// set all dirty bits, but preserve the thread's bit. // O(N)
+// update sync_clock.dirty_count;
+// return;
+// }
+// release_impl(thr_clock, sync_clock); // As usual, O(N).
+// set all dirty bits, but preserve the thread's bit.
+// // The previous step is combined with release_impl(), so that
+// // we scan the arrays only once.
+// update sync_clock.dirty_count;
+// }
+
+namespace __tsan {
+
+ThreadClock::ThreadClock() {
+ nclk_ = 0;
+ for (uptr i = 0; i < (uptr)kMaxTidInClock; i++)
+ clk_[i] = 0;
+}
+
+void ThreadClock::acquire(const SyncClock *src) {
+ DCHECK(nclk_ <= kMaxTid);
+ DCHECK(src->clk_.Size() <= kMaxTid);
+
+ const uptr nclk = src->clk_.Size();
+ if (nclk == 0)
+ return;
+ nclk_ = max(nclk_, nclk);
+ for (uptr i = 0; i < nclk; i++) {
+ if (clk_[i] < src->clk_[i])
+ clk_[i] = src->clk_[i];
+ }
+}
+
+void ThreadClock::release(SyncClock *dst) const {
+ DCHECK(nclk_ <= kMaxTid);
+ DCHECK(dst->clk_.Size() <= kMaxTid);
+
+ if (dst->clk_.Size() < nclk_)
+ dst->clk_.Resize(nclk_);
+ for (uptr i = 0; i < nclk_; i++) {
+ if (dst->clk_[i] < clk_[i])
+ dst->clk_[i] = clk_[i];
+ }
+}
+
+void ThreadClock::ReleaseStore(SyncClock *dst) const {
+ DCHECK(nclk_ <= kMaxTid);
+ DCHECK(dst->clk_.Size() <= kMaxTid);
+
+ if (dst->clk_.Size() < nclk_)
+ dst->clk_.Resize(nclk_);
+ for (uptr i = 0; i < nclk_; i++)
+ dst->clk_[i] = clk_[i];
+ for (uptr i = nclk_; i < dst->clk_.Size(); i++)
+ dst->clk_[i] = 0;
+}
+
+void ThreadClock::acq_rel(SyncClock *dst) {
+ acquire(dst);
+ release(dst);
+}
+
+void ThreadClock::Disable(unsigned tid) {
+ u64 c0 = clk_[tid];
+ for (uptr i = 0; i < kMaxTidInClock; i++)
+ clk_[i] = (u64)-1;
+ clk_[tid] = c0;
+}
+
+SyncClock::SyncClock()
+ : clk_(MBlockClock) {
+}
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_clock.h b/lib/tsan/rtl/tsan_clock.h
new file mode 100644
index 0000000..02ddb9a
--- /dev/null
+++ b/lib/tsan/rtl/tsan_clock.h
@@ -0,0 +1,82 @@
+//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_CLOCK_H
+#define TSAN_CLOCK_H
+
+#include "tsan_defs.h"
+#include "tsan_vector.h"
+
+namespace __tsan {
+
+// The clock that lives in sync variables (mutexes, atomics, etc).
+class SyncClock {
+ public:
+ SyncClock();
+
+ uptr size() const {
+ return clk_.Size();
+ }
+
+ void Reset() {
+ clk_.Reset();
+ }
+
+ private:
+ Vector<u64> clk_;
+ friend struct ThreadClock;
+};
+
+// The clock that lives in threads.
+struct ThreadClock {
+ public:
+ ThreadClock();
+
+ u64 get(unsigned tid) const {
+ DCHECK_LT(tid, kMaxTidInClock);
+ return clk_[tid];
+ }
+
+ void set(unsigned tid, u64 v) {
+ DCHECK_LT(tid, kMaxTid);
+ DCHECK_GE(v, clk_[tid]);
+ clk_[tid] = v;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ }
+
+ void tick(unsigned tid) {
+ DCHECK_LT(tid, kMaxTid);
+ clk_[tid]++;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ }
+
+ void Disable(unsigned tid);
+
+ uptr size() const {
+ return nclk_;
+ }
+
+ void acquire(const SyncClock *src);
+ void release(SyncClock *dst) const;
+ void acq_rel(SyncClock *dst);
+ void ReleaseStore(SyncClock *dst) const;
+
+ private:
+ uptr nclk_;
+ u64 clk_[kMaxTidInClock];
+};
+
+} // namespace __tsan
+
+#endif // TSAN_CLOCK_H
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
new file mode 100644
index 0000000..ca8f0ae
--- /dev/null
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -0,0 +1,139 @@
+//===-- tsan_defs.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_DEFS_H
+#define TSAN_DEFS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_stat.h"
+
+#ifndef TSAN_DEBUG
+#define TSAN_DEBUG 0
+#endif // TSAN_DEBUG
+
+namespace __tsan {
+
+const int kTidBits = 13;
+const unsigned kMaxTid = 1 << kTidBits;
+const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
+const int kClkBits = 43;
+#ifndef TSAN_GO
+const int kShadowStackSize = 1024;
+#endif
+
+#ifdef TSAN_SHADOW_COUNT
+# if TSAN_SHADOW_COUNT == 2 \
+ || TSAN_SHADOW_COUNT == 4 || TSAN_SHADOW_COUNT == 8
+const unsigned kShadowCnt = TSAN_SHADOW_COUNT;
+# else
+# error "TSAN_SHADOW_COUNT must be one of 2,4,8"
+# endif
+#else
+// Count of shadow values in a shadow cell.
+const unsigned kShadowCnt = 8;
+#endif
+
+// That many user bytes are mapped onto a single shadow cell.
+const unsigned kShadowCell = 8;
+
+// Size of a single shadow value (u64).
+const unsigned kShadowSize = 8;
+
+#if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS
+const bool kCollectStats = true;
+#else
+const bool kCollectStats = false;
+#endif
+
+// The following "build consistency" machinery ensures that all source files
+// are built in the same configuration. Inconsistent builds lead to
+// hard to debug crashes.
+#if TSAN_DEBUG
+void build_consistency_debug();
+#else
+void build_consistency_release();
+#endif
+
+#if TSAN_COLLECT_STATS
+void build_consistency_stats();
+#else
+void build_consistency_nostats();
+#endif
+
+#if TSAN_SHADOW_COUNT == 1
+void build_consistency_shadow1();
+#elif TSAN_SHADOW_COUNT == 2
+void build_consistency_shadow2();
+#elif TSAN_SHADOW_COUNT == 4
+void build_consistency_shadow4();
+#else
+void build_consistency_shadow8();
+#endif
+
+static inline void USED build_consistency() {
+#if TSAN_DEBUG
+ build_consistency_debug();
+#else
+ build_consistency_release();
+#endif
+#if TSAN_COLLECT_STATS
+ build_consistency_stats();
+#else
+ build_consistency_nostats();
+#endif
+#if TSAN_SHADOW_COUNT == 1
+ build_consistency_shadow1();
+#elif TSAN_SHADOW_COUNT == 2
+ build_consistency_shadow2();
+#elif TSAN_SHADOW_COUNT == 4
+ build_consistency_shadow4();
+#else
+ build_consistency_shadow8();
+#endif
+}
+
+template<typename T>
+T min(T a, T b) {
+ return a < b ? a : b;
+}
+
+template<typename T>
+T max(T a, T b) {
+ return a > b ? a : b;
+}
+
+template<typename T>
+T RoundUp(T p, int align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)(((u64)p + align - 1) & ~(align - 1));
+}
+
+struct MD5Hash {
+ u64 hash[2];
+ bool operator==(const MD5Hash &other) const;
+};
+
+MD5Hash md5_hash(const void *data, uptr size);
+
+struct ThreadState;
+struct ThreadContext;
+struct Context;
+struct ReportStack;
+class ReportDesc;
+class RegionAlloc;
+class StackTrace;
+
+} // namespace __tsan
+
+#endif // TSAN_DEFS_H
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
new file mode 100644
index 0000000..8f91939
--- /dev/null
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -0,0 +1,79 @@
+//===-- tsan_flags.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_flags.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+Flags *flags() {
+ return &CTX()->flags;
+}
+
+// Can be overriden in frontend.
+#ifdef TSAN_EXTERNAL_HOOKS
+void OverrideFlags(Flags *f);
+#else
+void WEAK OverrideFlags(Flags *f) {
+ (void)f;
+}
+#endif
+
+void InitializeFlags(Flags *f, const char *env) {
+ internal_memset(f, 0, sizeof(*f));
+
+ // Default values.
+ f->enable_annotations = true;
+ f->suppress_equal_stacks = true;
+ f->suppress_equal_addresses = true;
+ f->report_thread_leaks = true;
+ f->report_signal_unsafe = true;
+ f->force_seq_cst_atomics = false;
+ f->strip_path_prefix = "";
+ f->suppressions = "";
+ f->exitcode = 66;
+ f->log_fileno = 2;
+ f->atexit_sleep_ms = 1000;
+ f->verbosity = 0;
+ f->profile_memory = "";
+ f->flush_memory_ms = 0;
+ f->stop_on_start = false;
+ f->running_on_valgrind = false;
+ f->use_internal_symbolizer = false;
+
+ // Let a frontend override.
+ OverrideFlags(f);
+
+ // Override from command line.
+ ParseFlag(env, &f->enable_annotations, "enable_annotations");
+ ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
+ ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
+ ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
+ ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
+ ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
+ ParseFlag(env, &f->strip_path_prefix, "strip_path_prefix");
+ ParseFlag(env, &f->suppressions, "suppressions");
+ ParseFlag(env, &f->exitcode, "exitcode");
+ ParseFlag(env, &f->log_fileno, "log_fileno");
+ ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
+ ParseFlag(env, &f->verbosity, "verbosity");
+ ParseFlag(env, &f->profile_memory, "profile_memory");
+ ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
+ ParseFlag(env, &f->stop_on_start, "stop_on_start");
+ ParseFlag(env, &f->use_internal_symbolizer, "use_internal_symbolizer");
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_flags.h b/lib/tsan/rtl/tsan_flags.h
new file mode 100644
index 0000000..c22132f
--- /dev/null
+++ b/lib/tsan/rtl/tsan_flags.h
@@ -0,0 +1,71 @@
+//===-- tsan_flags.h --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+// NOTE: This file may be included into user code.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_FLAGS_H
+#define TSAN_FLAGS_H
+
+// ----------- ATTENTION -------------
+// ThreadSanitizer user may provide its implementation of weak
+// symbol __tsan::OverrideFlags(__tsan::Flags). Therefore, this
+// header may be included in the user code, and shouldn't include
+// other headers from TSan or common sanitizer runtime.
+
+namespace __tsan {
+
+struct Flags {
+ // Enable dynamic annotations, otherwise they are no-ops.
+ bool enable_annotations;
+ // Supress a race report if we've already output another race report
+ // with the same stack.
+ bool suppress_equal_stacks;
+ // Supress a race report if we've already output another race report
+ // on the same address.
+ bool suppress_equal_addresses;
+ // Report thread leaks at exit?
+ bool report_thread_leaks;
+ // Report violations of async signal-safety
+ // (e.g. malloc() call from a signal handler).
+ bool report_signal_unsafe;
+ // If set, all atomics are effectively sequentially consistent (seq_cst),
+ // regardless of what user actually specified.
+ bool force_seq_cst_atomics;
+ // Strip that prefix from file paths in reports.
+ const char *strip_path_prefix;
+ // Suppressions filename.
+ const char *suppressions;
+ // Override exit status if something was reported.
+ int exitcode;
+ // Log fileno (1 - stdout, 2 - stderr).
+ int log_fileno;
+ // Sleep in main thread before exiting for that many ms
+ // (useful to catch "at exit" races).
+ int atexit_sleep_ms;
+ // Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
+ int verbosity;
+ // If set, periodically write memory profile to that file.
+ const char *profile_memory;
+ // Flush shadow memory every X ms.
+ int flush_memory_ms;
+ // Stops on start until __tsan_resume() is called (for debugging).
+ bool stop_on_start;
+ // Controls whether RunningOnValgrind() returns true or false.
+ bool running_on_valgrind;
+ // If set, uses in-process symbolizer from common sanitizer runtime.
+ bool use_internal_symbolizer;
+};
+
+Flags *flags();
+void InitializeFlags(Flags *flags, const char *env);
+}
+
+#endif // TSAN_FLAGS_H
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
new file mode 100644
index 0000000..a962250
--- /dev/null
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -0,0 +1,1596 @@
+//===-- tsan_interceptors_linux.cc ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_interface.h"
+#include "tsan_platform.h"
+#include "tsan_mman.h"
+
+using namespace __tsan; // NOLINT
+
+const int kSigCount = 128;
+
+struct my_siginfo_t {
+ int opaque[128];
+};
+
+struct sigset_t {
+ u64 val[1024 / 8 / sizeof(u64)];
+};
+
+struct ucontext_t {
+ uptr opaque[117];
+};
+
+extern "C" int pthread_attr_init(void *attr);
+extern "C" int pthread_attr_destroy(void *attr);
+extern "C" int pthread_attr_getdetachstate(void *attr, int *v);
+extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
+extern "C" int pthread_attr_getstacksize(void *attr, uptr *stacksize);
+extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
+extern "C" int pthread_setspecific(unsigned key, const void *v);
+extern "C" int pthread_mutexattr_gettype(void *a, int *type);
+extern "C" int pthread_yield();
+extern "C" int pthread_sigmask(int how, const sigset_t *set, sigset_t *oldset);
+extern "C" int sigfillset(sigset_t *set);
+extern "C" void *pthread_self();
+extern "C" void _exit(int status);
+extern "C" int __cxa_atexit(void (*func)(void *arg), void *arg, void *dso);
+extern "C" int *__errno_location();
+extern "C" int usleep(unsigned usec);
+const int PTHREAD_MUTEX_RECURSIVE = 1;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
+const int kPthreadAttrSize = 56;
+const int EINVAL = 22;
+const int EBUSY = 16;
+const int EPOLL_CTL_ADD = 1;
+const int SIGILL = 4;
+const int SIGABRT = 6;
+const int SIGFPE = 8;
+const int SIGSEGV = 11;
+const int SIGPIPE = 13;
+const int SIGBUS = 7;
+void *const MAP_FAILED = (void*)-1;
+const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
+const int MAP_FIXED = 0x10;
+typedef long long_t; // NOLINT
+
+typedef void (*sighandler_t)(int sig);
+
+#define errno (*__errno_location())
+
+union pthread_attr_t {
+ char size[kPthreadAttrSize];
+ void *align;
+};
+
+struct sigaction_t {
+ union {
+ sighandler_t sa_handler;
+ void (*sa_sigaction)(int sig, my_siginfo_t *siginfo, void *uctx);
+ };
+ sigset_t sa_mask;
+ int sa_flags;
+ void (*sa_restorer)();
+};
+
+const sighandler_t SIG_DFL = (sighandler_t)0;
+const sighandler_t SIG_IGN = (sighandler_t)1;
+const sighandler_t SIG_ERR = (sighandler_t)-1;
+const int SA_SIGINFO = 4;
+const int SIG_SETMASK = 2;
+
+static sigaction_t sigactions[kSigCount];
+
+namespace __tsan {
+struct SignalDesc {
+ bool armed;
+ bool sigaction;
+ my_siginfo_t siginfo;
+ ucontext_t ctx;
+};
+
+struct SignalContext {
+ int int_signal_send;
+ int pending_signal_count;
+ SignalDesc pending_signals[kSigCount];
+};
+}
+
+static SignalContext *SigCtx(ThreadState *thr) {
+ SignalContext *ctx = (SignalContext*)thr->signal_ctx;
+ if (ctx == 0 && thr->is_alive) {
+ ScopedInRtl in_rtl;
+ ctx = (SignalContext*)internal_alloc(
+ MBlockSignal, sizeof(*ctx));
+ MemoryResetRange(thr, 0, (uptr)ctx, sizeof(*ctx));
+ internal_memset(ctx, 0, sizeof(*ctx));
+ thr->signal_ctx = ctx;
+ }
+ return ctx;
+}
+
+static unsigned g_thread_finalize_key;
+
+static void process_pending_signals(ThreadState *thr);
+
+class ScopedInterceptor {
+ public:
+ ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc)
+ : thr_(thr)
+ , in_rtl_(thr->in_rtl) {
+ if (thr_->in_rtl == 0) {
+ Initialize(thr);
+ FuncEntry(thr, pc);
+ thr_->in_rtl++;
+ DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
+ } else {
+ thr_->in_rtl++;
+ }
+ }
+
+ ~ScopedInterceptor() {
+ thr_->in_rtl--;
+ if (thr_->in_rtl == 0) {
+ FuncExit(thr_);
+ process_pending_signals(thr_);
+ }
+ CHECK_EQ(in_rtl_, thr_->in_rtl);
+ }
+
+ private:
+ ThreadState *const thr_;
+ const int in_rtl_;
+};
+
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ ThreadState *thr = cur_thread(); \
+ StatInc(thr, StatInterceptor); \
+ StatInc(thr, StatInt_##func); \
+ ScopedInterceptor si(thr, #func, \
+ (__sanitizer::uptr)__builtin_return_address(0)); \
+ const uptr pc = (uptr)&func; \
+ (void)pc; \
+/**/
+
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ if (thr->in_rtl > 1) \
+ return REAL(func)(__VA_ARGS__); \
+/**/
+
+#define SCOPED_INTERCEPTOR_LIBC(func, ...) \
+ ThreadState *thr = cur_thread(); \
+ StatInc(thr, StatInterceptor); \
+ StatInc(thr, StatInt_##func); \
+ ScopedInterceptor si(thr, #func, callpc); \
+ const uptr pc = (uptr)&func; \
+ (void)pc; \
+ if (thr->in_rtl > 1) \
+ return REAL(func)(__VA_ARGS__); \
+/**/
+
+#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+
+// May be overriden by front-end.
+extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+
+extern "C" void WEAK __tsan_free_hook(void *ptr) {
+ (void)ptr;
+}
+
+static void invoke_malloc_hook(void *ptr, uptr size) {
+ Context *ctx = CTX();
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ return;
+ __tsan_malloc_hook(ptr, size);
+}
+
+static void invoke_free_hook(void *ptr) {
+ Context *ctx = CTX();
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ return;
+ __tsan_free_hook(ptr);
+}
+
+class AtExitContext {
+ public:
+ AtExitContext()
+ : mtx_(MutexTypeAtExit, StatMtxAtExit)
+ , pos_() {
+ }
+
+ typedef void(*atexit_t)();
+
+ int atexit(ThreadState *thr, uptr pc, atexit_t f) {
+ Lock l(&mtx_);
+ if (pos_ == kMaxAtExit)
+ return 1;
+ Release(thr, pc, (uptr)this);
+ stack_[pos_] = f;
+ pos_++;
+ return 0;
+ }
+
+ void exit(ThreadState *thr, uptr pc) {
+ CHECK_EQ(thr->in_rtl, 0);
+ for (;;) {
+ atexit_t f = 0;
+ {
+ Lock l(&mtx_);
+ if (pos_) {
+ pos_--;
+ f = stack_[pos_];
+ ScopedInRtl in_rtl;
+ Acquire(thr, pc, (uptr)this);
+ }
+ }
+ if (f == 0)
+ break;
+ DPrintf("#%d: executing atexit func %p\n", thr->tid, f);
+ CHECK_EQ(thr->in_rtl, 0);
+ f();
+ }
+ }
+
+ private:
+ static const int kMaxAtExit = 128;
+ Mutex mtx_;
+ atexit_t stack_[kMaxAtExit];
+ int pos_;
+};
+
+static AtExitContext *atexit_ctx;
+
+static void finalize(void *arg) {
+ ThreadState * thr = cur_thread();
+ uptr pc = 0;
+ atexit_ctx->exit(thr, pc);
+ {
+ ScopedInRtl in_rtl;
+ DestroyAndFree(atexit_ctx);
+ usleep(flags()->atexit_sleep_ms * 1000);
+ }
+ int status = Finalize(cur_thread());
+ if (status)
+ _exit(status);
+}
+
+TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
+ SCOPED_TSAN_INTERCEPTOR(atexit, f);
+ return atexit_ctx->atexit(thr, pc, f);
+ return 0;
+}
+
+TSAN_INTERCEPTOR(void, longjmp, void *env, int val) {
+ SCOPED_TSAN_INTERCEPTOR(longjmp, env, val);
+ TsanPrintf("ThreadSanitizer: longjmp() is not supported\n");
+ Die();
+}
+
+TSAN_INTERCEPTOR(void, siglongjmp, void *env, int val) {
+ SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val);
+ TsanPrintf("ThreadSanitizer: siglongjmp() is not supported\n");
+ Die();
+}
+
+static uptr fd2addr(int fd) {
+ (void)fd;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+static uptr epollfd2addr(int fd) {
+ (void)fd;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+static uptr file2addr(char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+static uptr dir2addr(char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+TSAN_INTERCEPTOR(void*, malloc, uptr size) {
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(malloc, size);
+ p = user_alloc(thr, pc, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(calloc, size, n);
+ p = user_alloc(thr, pc, n * size);
+ internal_memset(p, 0, n * size);
+ }
+ invoke_malloc_hook(p, n * size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
+ if (p)
+ invoke_free_hook(p);
+ {
+ SCOPED_INTERCEPTOR_RAW(realloc, p, size);
+ p = user_realloc(thr, pc, p, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void, free, void *p) {
+ if (p == 0)
+ return;
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(free, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(void, cfree, void *p) {
+ if (p == 0)
+ return;
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(cfree, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(uptr, strlen, const char *s) {
+ SCOPED_TSAN_INTERCEPTOR(strlen, s);
+ uptr len = internal_strlen(s);
+ MemoryAccessRange(thr, pc, (uptr)s, len + 1, false);
+ return len;
+}
+
+TSAN_INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
+ SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size);
+ MemoryAccessRange(thr, pc, (uptr)dst, size, true);
+ return internal_memset(dst, v, size);
+}
+
+TSAN_INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
+ SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size);
+ MemoryAccessRange(thr, pc, (uptr)dst, size, true);
+ MemoryAccessRange(thr, pc, (uptr)src, size, false);
+ return internal_memcpy(dst, src, size);
+}
+
+TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(memcmp, s1, s2, n);
+ int res = 0;
+ uptr len = 0;
+ for (; len < n; len++) {
+ if ((res = ((unsigned char*)s1)[len] - ((unsigned char*)s2)[len]))
+ break;
+ }
+ MemoryAccessRange(thr, pc, (uptr)s1, len < n ? len + 1 : n, false);
+ MemoryAccessRange(thr, pc, (uptr)s2, len < n ? len + 1 : n, false);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
+ SCOPED_TSAN_INTERCEPTOR(strcmp, s1, s2);
+ uptr len = 0;
+ for (; s1[len] && s2[len]; len++) {
+ if (s1[len] != s2[len])
+ break;
+ }
+ MemoryAccessRange(thr, pc, (uptr)s1, len + 1, false);
+ MemoryAccessRange(thr, pc, (uptr)s2, len + 1, false);
+ return s1[len] - s2[len];
+}
+
+TSAN_INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(strncmp, s1, s2, n);
+ uptr len = 0;
+ for (; len < n && s1[len] && s2[len]; len++) {
+ if (s1[len] != s2[len])
+ break;
+ }
+ MemoryAccessRange(thr, pc, (uptr)s1, len < n ? len + 1 : n, false);
+ MemoryAccessRange(thr, pc, (uptr)s2, len < n ? len + 1 : n, false);
+ return len == n ? 0 : s1[len] - s2[len];
+}
+
+TSAN_INTERCEPTOR(void*, memchr, void *s, int c, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(memchr, s, c, n);
+ void *res = REAL(memchr)(s, c, n);
+ uptr len = res ? (char*)res - (char*)s + 1 : n;
+ MemoryAccessRange(thr, pc, (uptr)s, len, false);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, memrchr, char *s, int c, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(memrchr, s, c, n);
+ MemoryAccessRange(thr, pc, (uptr)s, n, false);
+ return REAL(memrchr)(s, c, n);
+}
+
+TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n);
+ MemoryAccessRange(thr, pc, (uptr)dst, n, true);
+ MemoryAccessRange(thr, pc, (uptr)src, n, false);
+ return REAL(memmove)(dst, src, n);
+}
+
+TSAN_INTERCEPTOR(char*, strchr, char *s, int c) {
+ SCOPED_TSAN_INTERCEPTOR(strchr, s, c);
+ char *res = REAL(strchr)(s, c);
+ uptr len = res ? (char*)res - (char*)s + 1 : internal_strlen(s) + 1;
+ MemoryAccessRange(thr, pc, (uptr)s, len, false);
+ return res;
+}
+
+TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) {
+ SCOPED_TSAN_INTERCEPTOR(strchrnul, s, c);
+ char *res = REAL(strchrnul)(s, c);
+ uptr len = (char*)res - (char*)s + 1;
+ MemoryAccessRange(thr, pc, (uptr)s, len, false);
+ return res;
+}
+
+TSAN_INTERCEPTOR(char*, strrchr, char *s, int c) {
+ SCOPED_TSAN_INTERCEPTOR(strrchr, s, c);
+ MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s) + 1, false);
+ return REAL(strrchr)(s, c);
+}
+
+TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT
+ SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT
+ uptr srclen = internal_strlen(src);
+ MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
+ MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
+ return REAL(strcpy)(dst, src); // NOLINT
+}
+
+TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
+ uptr srclen = internal_strnlen(src, n);
+ MemoryAccessRange(thr, pc, (uptr)dst, n, true);
+ MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
+ return REAL(strncpy)(dst, src, n);
+}
+
+TSAN_INTERCEPTOR(const char*, strstr, const char *s1, const char *s2) {
+ SCOPED_TSAN_INTERCEPTOR(strstr, s1, s2);
+ const char *res = REAL(strstr)(s1, s2);
+ uptr len1 = internal_strlen(s1);
+ uptr len2 = internal_strlen(s2);
+ MemoryAccessRange(thr, pc, (uptr)s1, len1 + 1, false);
+ MemoryAccessRange(thr, pc, (uptr)s2, len2 + 1, false);
+ return res;
+}
+
+static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
+ if (*addr) {
+ if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
+ if (flags & MAP_FIXED) {
+ errno = EINVAL;
+ return false;
+ } else {
+ *addr = 0;
+ }
+ }
+ }
+ return true;
+}
+
+TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot,
+ int flags, int fd, unsigned off) {
+ SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off);
+ if (!fix_mmap_addr(&addr, sz, flags))
+ return MAP_FAILED;
+ void *res = REAL(mmap)(addr, sz, prot, flags, fd, off);
+ if (res != MAP_FAILED) {
+ MemoryResetRange(thr, pc, (uptr)res, sz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
+ int flags, int fd, u64 off) {
+ SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
+ if (!fix_mmap_addr(&addr, sz, flags))
+ return MAP_FAILED;
+ void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off);
+ if (res != MAP_FAILED) {
+ MemoryResetRange(thr, pc, (uptr)res, sz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
+ SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
+ int res = REAL(munmap)(addr, sz);
+ return res;
+}
+
+#ifdef __LP64__
+
+// void *operator new(size_t)
+TSAN_INTERCEPTOR(void*, _Znwm, uptr sz) {
+ void *p = 0;
+ {
+ SCOPED_TSAN_INTERCEPTOR(_Znwm, sz);
+ p = user_alloc(thr, pc, sz);
+ }
+ invoke_malloc_hook(p, sz);
+ return p;
+}
+
+// void *operator new(size_t, nothrow_t)
+TSAN_INTERCEPTOR(void*, _ZnwmRKSt9nothrow_t, uptr sz) {
+ void *p = 0;
+ {
+ SCOPED_TSAN_INTERCEPTOR(_ZnwmRKSt9nothrow_t, sz);
+ p = user_alloc(thr, pc, sz);
+ }
+ invoke_malloc_hook(p, sz);
+ return p;
+}
+
+// void *operator new[](size_t)
+TSAN_INTERCEPTOR(void*, _Znam, uptr sz) {
+ void *p = 0;
+ {
+ SCOPED_TSAN_INTERCEPTOR(_Znam, sz);
+ p = user_alloc(thr, pc, sz);
+ }
+ invoke_malloc_hook(p, sz);
+ return p;
+}
+
+// void *operator new[](size_t, nothrow_t)
+TSAN_INTERCEPTOR(void*, _ZnamRKSt9nothrow_t, uptr sz) {
+ void *p = 0;
+ {
+ SCOPED_TSAN_INTERCEPTOR(_ZnamRKSt9nothrow_t, sz);
+ p = user_alloc(thr, pc, sz);
+ }
+ invoke_malloc_hook(p, sz);
+ return p;
+}
+
+#else
+#error "Not implemented"
+#endif
+
+// void operator delete(void*)
+TSAN_INTERCEPTOR(void, _ZdlPv, void *p) {
+ if (p == 0)
+ return;
+ invoke_free_hook(p);
+ SCOPED_TSAN_INTERCEPTOR(_ZdlPv, p);
+ user_free(thr, pc, p);
+}
+
+// void operator delete(void*, nothrow_t)
+TSAN_INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *p) {
+ if (p == 0)
+ return;
+ invoke_free_hook(p);
+ SCOPED_TSAN_INTERCEPTOR(_ZdlPvRKSt9nothrow_t, p);
+ user_free(thr, pc, p);
+}
+
+// void operator delete[](void*)
+TSAN_INTERCEPTOR(void, _ZdaPv, void *p) {
+ if (p == 0)
+ return;
+ invoke_free_hook(p);
+ SCOPED_TSAN_INTERCEPTOR(_ZdaPv, p);
+ user_free(thr, pc, p);
+}
+
+// void operator delete[](void*, nothrow_t)
+TSAN_INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *p) {
+ if (p == 0)
+ return;
+ invoke_free_hook(p);
+ SCOPED_TSAN_INTERCEPTOR(_ZdaPvRKSt9nothrow_t, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
+ SCOPED_TSAN_INTERCEPTOR(memalign, align, sz);
+ return user_alloc_aligned(thr, pc, sz, align);
+}
+
+TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
+ SCOPED_TSAN_INTERCEPTOR(valloc, sz);
+ return user_alloc_aligned(thr, pc, sz, kPageSize);
+}
+
+TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
+ SCOPED_TSAN_INTERCEPTOR(pvalloc, sz);
+ sz = RoundUp(sz, kPageSize);
+ return user_alloc_aligned(thr, pc, sz, kPageSize);
+}
+
+TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
+ SCOPED_TSAN_INTERCEPTOR(posix_memalign, memptr, align, sz);
+ *memptr = user_alloc_aligned(thr, pc, sz, align);
+ return 0;
+}
+
+// Used in thread-safe function static initialization.
+TSAN_INTERCEPTOR(int, __cxa_guard_acquire, char *m) {
+ SCOPED_TSAN_INTERCEPTOR(__cxa_guard_acquire, m);
+ int res = REAL(__cxa_guard_acquire)(m);
+ if (res) {
+ // This thread does the init.
+ } else {
+ Acquire(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(void, __cxa_guard_release, char *m) {
+ SCOPED_TSAN_INTERCEPTOR(__cxa_guard_release, m);
+ Release(thr, pc, (uptr)m);
+ REAL(__cxa_guard_release)(m);
+}
+
+static void thread_finalize(void *v) {
+ uptr iter = (uptr)v;
+ if (iter > 1) {
+ if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
+ TsanPrintf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ return;
+ }
+ {
+ ScopedInRtl in_rtl;
+ ThreadState *thr = cur_thread();
+ ThreadFinish(thr);
+ SignalContext *sctx = thr->signal_ctx;
+ if (sctx) {
+ thr->signal_ctx = 0;
+ internal_free(sctx);
+ }
+ }
+}
+
+
+struct ThreadParam {
+ void* (*callback)(void *arg);
+ void *param;
+ atomic_uintptr_t tid;
+};
+
+extern "C" void *__tsan_thread_start_func(void *arg) {
+ ThreadParam *p = (ThreadParam*)arg;
+ void* (*callback)(void *arg) = p->callback;
+ void *param = p->param;
+ int tid = 0;
+ {
+ ThreadState *thr = cur_thread();
+ ScopedInRtl in_rtl;
+ if (pthread_setspecific(g_thread_finalize_key, (void*)4)) {
+ TsanPrintf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
+ pthread_yield();
+ atomic_store(&p->tid, 0, memory_order_release);
+ ThreadStart(thr, tid);
+ CHECK_EQ(thr->in_rtl, 1);
+ }
+ void *res = callback(param);
+ // Prevent the callback from being tail called,
+ // it mixes up stack traces.
+ volatile int foo = 42;
+ foo++;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_create,
+ void *th, void *attr, void *(*callback)(void*), void * param) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_create, th, attr, callback, param);
+ pthread_attr_t myattr;
+ if (attr == 0) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+ int detached = 0;
+ pthread_attr_getdetachstate(attr, &detached);
+ uptr stacksize = 0;
+ pthread_attr_getstacksize(attr, &stacksize);
+ // We place the huge ThreadState object into TLS, account for that.
+ const uptr minstacksize = GetTlsSize() + 128*1024;
+ if (stacksize < minstacksize) {
+ DPrintf("ThreadSanitizer: stacksize %zu->%zu\n", stacksize, minstacksize);
+ pthread_attr_setstacksize(attr, minstacksize);
+ }
+ ThreadParam p;
+ p.callback = callback;
+ p.param = param;
+ atomic_store(&p.tid, 0, memory_order_relaxed);
+ int res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ if (res == 0) {
+ int tid = ThreadCreate(cur_thread(), pc, *(uptr*)th, detached);
+ CHECK_NE(tid, 0);
+ atomic_store(&p.tid, tid, memory_order_release);
+ while (atomic_load(&p.tid, memory_order_acquire) != 0)
+ pthread_yield();
+ }
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_join, th, ret);
+ int tid = ThreadTid(thr, pc, (uptr)th);
+ int res = REAL(pthread_join)(th, ret);
+ if (res == 0) {
+ ThreadJoin(cur_thread(), pc, tid);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
+ int tid = ThreadTid(thr, pc, (uptr)th);
+ int res = REAL(pthread_detach)(th);
+ if (res == 0) {
+ ThreadDetach(cur_thread(), pc, tid);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
+ int res = REAL(pthread_mutex_init)(m, a);
+ if (res == 0) {
+ bool recursive = false;
+ if (a) {
+ int type = 0;
+ if (pthread_mutexattr_gettype(a, &type) == 0)
+ recursive = (type == PTHREAD_MUTEX_RECURSIVE
+ || type == PTHREAD_MUTEX_RECURSIVE_NP);
+ }
+ MutexCreate(cur_thread(), pc, (uptr)m, false, recursive);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
+ int res = REAL(pthread_mutex_destroy)(m);
+ if (res == 0 || res == EBUSY) {
+ MutexDestroy(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
+ int res = REAL(pthread_mutex_lock)(m);
+ if (res == 0) {
+ MutexLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
+ int res = REAL(pthread_mutex_trylock)(m);
+ if (res == 0) {
+ MutexLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
+ int res = REAL(pthread_mutex_timedlock)(m, abstime);
+ if (res == 0) {
+ MutexLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
+ MutexUnlock(cur_thread(), pc, (uptr)m);
+ int res = REAL(pthread_mutex_unlock)(m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
+ int res = REAL(pthread_spin_init)(m, pshared);
+ if (res == 0) {
+ MutexCreate(cur_thread(), pc, (uptr)m, false, false);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
+ int res = REAL(pthread_spin_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
+ int res = REAL(pthread_spin_lock)(m);
+ if (res == 0) {
+ MutexLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
+ int res = REAL(pthread_spin_trylock)(m);
+ if (res == 0) {
+ MutexLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
+ MutexUnlock(cur_thread(), pc, (uptr)m);
+ int res = REAL(pthread_spin_unlock)(m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
+ int res = REAL(pthread_rwlock_init)(m, a);
+ if (res == 0) {
+ MutexCreate(cur_thread(), pc, (uptr)m, true, false);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
+ int res = REAL(pthread_rwlock_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
+ int res = REAL(pthread_rwlock_rdlock)(m);
+ if (res == 0) {
+ MutexReadLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
+ int res = REAL(pthread_rwlock_tryrdlock)(m);
+ if (res == 0) {
+ MutexReadLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
+ if (res == 0) {
+ MutexReadLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
+ int res = REAL(pthread_rwlock_wrlock)(m);
+ if (res == 0) {
+ MutexLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
+ int res = REAL(pthread_rwlock_trywrlock)(m);
+ if (res == 0) {
+ MutexLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
+ if (res == 0) {
+ MutexLock(cur_thread(), pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
+ MutexReadOrWriteUnlock(cur_thread(), pc, (uptr)m);
+ int res = REAL(pthread_rwlock_unlock)(m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, c, a);
+ int res = REAL(pthread_cond_init)(c, a);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_cond_destroy, void *c) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, c);
+ int res = REAL(pthread_cond_destroy)(c);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_cond_signal, void *c) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, c);
+ int res = REAL(pthread_cond_signal)(c);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, c);
+ int res = REAL(pthread_cond_broadcast)(c);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, c, m);
+ MutexUnlock(cur_thread(), pc, (uptr)m);
+ int res = REAL(pthread_cond_wait)(c, m);
+ MutexLock(cur_thread(), pc, (uptr)m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, c, m, abstime);
+ MutexUnlock(cur_thread(), pc, (uptr)m);
+ int res = REAL(pthread_cond_timedwait)(c, m, abstime);
+ MutexLock(cur_thread(), pc, (uptr)m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
+ MemoryWrite1Byte(thr, pc, (uptr)b);
+ int res = REAL(pthread_barrier_init)(b, a, count);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
+ MemoryWrite1Byte(thr, pc, (uptr)b);
+ int res = REAL(pthread_barrier_destroy)(b);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
+ Release(cur_thread(), pc, (uptr)b);
+ MemoryRead1Byte(thr, pc, (uptr)b);
+ int res = REAL(pthread_barrier_wait)(b);
+ MemoryRead1Byte(thr, pc, (uptr)b);
+ if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
+ Acquire(cur_thread(), pc, (uptr)b);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_once, o, f);
+ if (o == 0 || f == 0)
+ return EINVAL;
+ atomic_uint32_t *a = static_cast<atomic_uint32_t*>(o);
+ u32 v = atomic_load(a, memory_order_acquire);
+ if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
+ memory_order_relaxed)) {
+ const int old_in_rtl = thr->in_rtl;
+ thr->in_rtl = 0;
+ (*f)();
+ CHECK_EQ(thr->in_rtl, 0);
+ thr->in_rtl = old_in_rtl;
+ Release(cur_thread(), pc, (uptr)o);
+ atomic_store(a, 2, memory_order_release);
+ } else {
+ while (v != 2) {
+ pthread_yield();
+ v = atomic_load(a, memory_order_acquire);
+ }
+ Acquire(cur_thread(), pc, (uptr)o);
+ }
+ return 0;
+}
+
+TSAN_INTERCEPTOR(int, sem_init, void *s, int pshared, unsigned value) {
+ SCOPED_TSAN_INTERCEPTOR(sem_init, s, pshared, value);
+ int res = REAL(sem_init)(s, pshared, value);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_destroy, void *s) {
+ SCOPED_TSAN_INTERCEPTOR(sem_destroy, s);
+ int res = REAL(sem_destroy)(s);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_wait, void *s) {
+ SCOPED_TSAN_INTERCEPTOR(sem_wait, s);
+ int res = REAL(sem_wait)(s);
+ if (res == 0) {
+ Acquire(cur_thread(), pc, (uptr)s);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_trywait, void *s) {
+ SCOPED_TSAN_INTERCEPTOR(sem_trywait, s);
+ int res = REAL(sem_trywait)(s);
+ if (res == 0) {
+ Acquire(cur_thread(), pc, (uptr)s);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_timedwait, void *s, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(sem_timedwait, s, abstime);
+ int res = REAL(sem_timedwait)(s, abstime);
+ if (res == 0) {
+ Acquire(cur_thread(), pc, (uptr)s);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_post, void *s) {
+ SCOPED_TSAN_INTERCEPTOR(sem_post, s);
+ Release(cur_thread(), pc, (uptr)s);
+ int res = REAL(sem_post)(s);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) {
+ SCOPED_TSAN_INTERCEPTOR(sem_getvalue, s, sval);
+ int res = REAL(sem_getvalue)(s, sval);
+ if (res == 0) {
+ Acquire(cur_thread(), pc, (uptr)s);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, read, int fd, void *buf, long_t sz) {
+ SCOPED_TSAN_INTERCEPTOR(read, fd, buf, sz);
+ int res = REAL(read)(fd, buf, sz);
+ if (res >= 0) {
+ Acquire(cur_thread(), pc, fd2addr(fd));
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, pread, int fd, void *buf, long_t sz, unsigned off) {
+ SCOPED_TSAN_INTERCEPTOR(pread, fd, buf, sz, off);
+ int res = REAL(pread)(fd, buf, sz, off);
+ if (res >= 0) {
+ Acquire(cur_thread(), pc, fd2addr(fd));
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, pread64, int fd, void *buf, long_t sz, u64 off) {
+ SCOPED_TSAN_INTERCEPTOR(pread64, fd, buf, sz, off);
+ int res = REAL(pread64)(fd, buf, sz, off);
+ if (res >= 0) {
+ Acquire(cur_thread(), pc, fd2addr(fd));
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, readv, int fd, void *vec, int cnt) {
+ SCOPED_TSAN_INTERCEPTOR(readv, fd, vec, cnt);
+ int res = REAL(readv)(fd, vec, cnt);
+ if (res >= 0) {
+ Acquire(cur_thread(), pc, fd2addr(fd));
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, preadv64, int fd, void *vec, int cnt, u64 off) {
+ SCOPED_TSAN_INTERCEPTOR(preadv64, fd, vec, cnt, off);
+ int res = REAL(preadv64)(fd, vec, cnt, off);
+ if (res >= 0) {
+ Acquire(cur_thread(), pc, fd2addr(fd));
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, write, int fd, void *buf, long_t sz) {
+ SCOPED_TSAN_INTERCEPTOR(write, fd, buf, sz);
+ Release(cur_thread(), pc, fd2addr(fd));
+ int res = REAL(write)(fd, buf, sz);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, pwrite, int fd, void *buf, long_t sz, unsigned off) {
+ SCOPED_TSAN_INTERCEPTOR(pwrite, fd, buf, sz, off);
+ Release(cur_thread(), pc, fd2addr(fd));
+ int res = REAL(pwrite)(fd, buf, sz, off);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, pwrite64, int fd, void *buf, long_t sz, unsigned off) {
+ SCOPED_TSAN_INTERCEPTOR(pwrite64, fd, buf, sz, off);
+ Release(cur_thread(), pc, fd2addr(fd));
+ int res = REAL(pwrite64)(fd, buf, sz, off);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, writev, int fd, void *vec, int cnt) {
+ SCOPED_TSAN_INTERCEPTOR(writev, fd, vec, cnt);
+ Release(cur_thread(), pc, fd2addr(fd));
+ int res = REAL(writev)(fd, vec, cnt);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, pwritev64, int fd, void *vec, int cnt, u64 off) {
+ SCOPED_TSAN_INTERCEPTOR(pwritev64, fd, vec, cnt, off);
+ Release(cur_thread(), pc, fd2addr(fd));
+ int res = REAL(pwritev64)(fd, vec, cnt, off);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags);
+ Release(cur_thread(), pc, fd2addr(fd));
+ int res = REAL(send)(fd, buf, len, flags);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(sendmsg, fd, msg, flags);
+ Release(cur_thread(), pc, fd2addr(fd));
+ int res = REAL(sendmsg)(fd, msg, flags);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(recv, fd, buf, len, flags);
+ int res = REAL(recv)(fd, buf, len, flags);
+ if (res >= 0) {
+ Acquire(cur_thread(), pc, fd2addr(fd));
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, recvmsg, int fd, void *msg, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(recvmsg, fd, msg, flags);
+ int res = REAL(recvmsg)(fd, msg, flags);
+ if (res >= 0) {
+ Acquire(cur_thread(), pc, fd2addr(fd));
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, unlink, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(unlink, path);
+ Release(cur_thread(), pc, file2addr(path));
+ int res = REAL(unlink)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
+ SCOPED_TSAN_INTERCEPTOR(fopen, path, mode);
+ void *res = REAL(fopen)(path, mode);
+ Acquire(cur_thread(), pc, file2addr(path));
+ return res;
+}
+
+TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
+ SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f);
+ MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true);
+ return REAL(fread)(ptr, size, nmemb, f);
+}
+
+TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
+ SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f);
+ MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false);
+ return REAL(fwrite)(p, size, nmemb, f);
+}
+
+TSAN_INTERCEPTOR(int, puts, const char *s) {
+ SCOPED_TSAN_INTERCEPTOR(puts, s);
+ MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false);
+ return REAL(puts)(s);
+}
+
+TSAN_INTERCEPTOR(int, rmdir, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(rmdir, path);
+ Release(cur_thread(), pc, dir2addr(path));
+ int res = REAL(rmdir)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, opendir, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(opendir, path);
+ void *res = REAL(opendir)(path);
+ Acquire(cur_thread(), pc, dir2addr(path));
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
+ if (op == EPOLL_CTL_ADD) {
+ Release(cur_thread(), pc, epollfd2addr(epfd));
+ }
+ int res = REAL(epoll_ctl)(epfd, op, fd, ev);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
+ int res = REAL(epoll_wait)(epfd, ev, cnt, timeout);
+ if (res > 0) {
+ Acquire(cur_thread(), pc, epollfd2addr(epfd));
+ }
+ return res;
+}
+
+static void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
+ my_siginfo_t *info, void *ctx) {
+ ThreadState *thr = cur_thread();
+ SignalContext *sctx = SigCtx(thr);
+ // Don't mess with synchronous signals.
+ if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE ||
+ (sctx && sig == sctx->int_signal_send)) {
+ CHECK(thr->in_rtl == 0 || thr->in_rtl == 1);
+ int in_rtl = thr->in_rtl;
+ thr->in_rtl = 0;
+ CHECK_EQ(thr->in_signal_handler, false);
+ thr->in_signal_handler = true;
+ if (sigact)
+ sigactions[sig].sa_sigaction(sig, info, ctx);
+ else
+ sigactions[sig].sa_handler(sig);
+ CHECK_EQ(thr->in_signal_handler, true);
+ thr->in_signal_handler = false;
+ thr->in_rtl = in_rtl;
+ return;
+ }
+
+ if (sctx == 0)
+ return;
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed == false) {
+ signal->armed = true;
+ signal->sigaction = sigact;
+ if (info)
+ internal_memcpy(&signal->siginfo, info, sizeof(*info));
+ if (ctx)
+ internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
+ sctx->pending_signal_count++;
+ }
+}
+
+static void rtl_sighandler(int sig) {
+ rtl_generic_sighandler(false, sig, 0, 0);
+}
+
+static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) {
+ rtl_generic_sighandler(true, sig, info, ctx);
+}
+
+TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
+ SCOPED_TSAN_INTERCEPTOR(sigaction, sig, act, old);
+ if (old)
+ internal_memcpy(old, &sigactions[sig], sizeof(*old));
+ if (act == 0)
+ return 0;
+ internal_memcpy(&sigactions[sig], act, sizeof(*act));
+ sigaction_t newact;
+ internal_memcpy(&newact, act, sizeof(newact));
+ sigfillset(&newact.sa_mask);
+ if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) {
+ if (newact.sa_flags & SA_SIGINFO)
+ newact.sa_sigaction = rtl_sigaction;
+ else
+ newact.sa_handler = rtl_sighandler;
+ }
+ int res = REAL(sigaction)(sig, &newact, 0);
+ return res;
+}
+
+TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
+ sigaction_t act = {};
+ act.sa_handler = h;
+ REAL(memset)(&act.sa_mask, -1, sizeof(act.sa_mask));
+ act.sa_flags = 0;
+ sigaction_t old = {};
+ int res = sigaction(sig, &act, &old);
+ if (res)
+ return SIG_ERR;
+ return old.sa_handler;
+}
+
+TSAN_INTERCEPTOR(int, raise, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(raise, sig);
+ SignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ sctx->int_signal_send = sig;
+ int res = REAL(raise)(sig);
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
+ SignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ if (pid == GetPid()) {
+ sctx->int_signal_send = sig;
+ }
+ int res = REAL(kill)(pid, sig);
+ if (pid == GetPid()) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
+ SignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ if (tid == pthread_self()) {
+ sctx->int_signal_send = sig;
+ }
+ int res = REAL(pthread_kill)(tid, sig);
+ if (tid == pthread_self()) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+static void process_pending_signals(ThreadState *thr) {
+ CHECK_EQ(thr->in_rtl, 0);
+ SignalContext *sctx = SigCtx(thr);
+ if (sctx == 0 || sctx->pending_signal_count == 0 || thr->in_signal_handler)
+ return;
+ thr->in_signal_handler = true;
+ sctx->pending_signal_count = 0;
+ // These are too big for stack.
+ static THREADLOCAL sigset_t emptyset, oldset;
+ sigfillset(&emptyset);
+ pthread_sigmask(SIG_SETMASK, &emptyset, &oldset);
+ for (int sig = 0; sig < kSigCount; sig++) {
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed) {
+ signal->armed = false;
+ if (sigactions[sig].sa_handler != SIG_DFL
+ && sigactions[sig].sa_handler != SIG_IGN) {
+ // Insure that the handler does not spoil errno.
+ const int saved_errno = errno;
+ errno = 0;
+ if (signal->sigaction)
+ sigactions[sig].sa_sigaction(sig, &signal->siginfo, &signal->ctx);
+ else
+ sigactions[sig].sa_handler(sig);
+ if (errno != 0) {
+ ScopedInRtl in_rtl;
+ StackTrace stack;
+ uptr pc = signal->sigaction ?
+ (uptr)sigactions[sig].sa_sigaction :
+ (uptr)sigactions[sig].sa_handler;
+ stack.Init(&pc, 1);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ rep.AddStack(&stack);
+ OutputReport(rep, rep.GetReport()->stacks[0]);
+ }
+ errno = saved_errno;
+ }
+ }
+ }
+ pthread_sigmask(SIG_SETMASK, &oldset, 0);
+ CHECK_EQ(thr->in_signal_handler, true);
+ thr->in_signal_handler = false;
+}
+
+namespace __tsan {
+
+void InitializeInterceptors() {
+ CHECK_GT(cur_thread()->in_rtl, 0);
+
+ // We need to setup it early, because functions like dlsym() can call it.
+ REAL(memset) = internal_memset;
+ REAL(memcpy) = internal_memcpy;
+ REAL(memcmp) = internal_memcmp;
+
+ TSAN_INTERCEPT(longjmp);
+ TSAN_INTERCEPT(siglongjmp);
+
+ TSAN_INTERCEPT(malloc);
+ TSAN_INTERCEPT(calloc);
+ TSAN_INTERCEPT(realloc);
+ TSAN_INTERCEPT(free);
+ TSAN_INTERCEPT(cfree);
+ TSAN_INTERCEPT(mmap);
+ TSAN_INTERCEPT(mmap64);
+ TSAN_INTERCEPT(munmap);
+ TSAN_INTERCEPT(memalign);
+ TSAN_INTERCEPT(valloc);
+ TSAN_INTERCEPT(pvalloc);
+ TSAN_INTERCEPT(posix_memalign);
+
+ TSAN_INTERCEPT(_Znwm);
+ TSAN_INTERCEPT(_ZnwmRKSt9nothrow_t);
+ TSAN_INTERCEPT(_Znam);
+ TSAN_INTERCEPT(_ZnamRKSt9nothrow_t);
+ TSAN_INTERCEPT(_ZdlPv);
+ TSAN_INTERCEPT(_ZdlPvRKSt9nothrow_t);
+ TSAN_INTERCEPT(_ZdaPv);
+ TSAN_INTERCEPT(_ZdaPvRKSt9nothrow_t);
+
+ TSAN_INTERCEPT(strlen);
+ TSAN_INTERCEPT(memset);
+ TSAN_INTERCEPT(memcpy);
+ TSAN_INTERCEPT(strcmp);
+ TSAN_INTERCEPT(memchr);
+ TSAN_INTERCEPT(memrchr);
+ TSAN_INTERCEPT(memmove);
+ TSAN_INTERCEPT(memcmp);
+ TSAN_INTERCEPT(strchr);
+ TSAN_INTERCEPT(strchrnul);
+ TSAN_INTERCEPT(strrchr);
+ TSAN_INTERCEPT(strncmp);
+ TSAN_INTERCEPT(strcpy); // NOLINT
+ TSAN_INTERCEPT(strncpy);
+ TSAN_INTERCEPT(strstr);
+
+ TSAN_INTERCEPT(__cxa_guard_acquire);
+ TSAN_INTERCEPT(__cxa_guard_release);
+
+ TSAN_INTERCEPT(pthread_create);
+ TSAN_INTERCEPT(pthread_join);
+ TSAN_INTERCEPT(pthread_detach);
+
+ TSAN_INTERCEPT(pthread_mutex_init);
+ TSAN_INTERCEPT(pthread_mutex_destroy);
+ TSAN_INTERCEPT(pthread_mutex_lock);
+ TSAN_INTERCEPT(pthread_mutex_trylock);
+ TSAN_INTERCEPT(pthread_mutex_timedlock);
+ TSAN_INTERCEPT(pthread_mutex_unlock);
+
+ TSAN_INTERCEPT(pthread_spin_init);
+ TSAN_INTERCEPT(pthread_spin_destroy);
+ TSAN_INTERCEPT(pthread_spin_lock);
+ TSAN_INTERCEPT(pthread_spin_trylock);
+ TSAN_INTERCEPT(pthread_spin_unlock);
+
+ TSAN_INTERCEPT(pthread_rwlock_init);
+ TSAN_INTERCEPT(pthread_rwlock_destroy);
+ TSAN_INTERCEPT(pthread_rwlock_rdlock);
+ TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_wrlock);
+ TSAN_INTERCEPT(pthread_rwlock_trywrlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
+ TSAN_INTERCEPT(pthread_rwlock_unlock);
+
+ TSAN_INTERCEPT(pthread_cond_init);
+ TSAN_INTERCEPT(pthread_cond_destroy);
+ TSAN_INTERCEPT(pthread_cond_signal);
+ TSAN_INTERCEPT(pthread_cond_broadcast);
+ TSAN_INTERCEPT(pthread_cond_wait);
+ TSAN_INTERCEPT(pthread_cond_timedwait);
+
+ TSAN_INTERCEPT(pthread_barrier_init);
+ TSAN_INTERCEPT(pthread_barrier_destroy);
+ TSAN_INTERCEPT(pthread_barrier_wait);
+
+ TSAN_INTERCEPT(pthread_once);
+
+ TSAN_INTERCEPT(sem_init);
+ TSAN_INTERCEPT(sem_destroy);
+ TSAN_INTERCEPT(sem_wait);
+ TSAN_INTERCEPT(sem_trywait);
+ TSAN_INTERCEPT(sem_timedwait);
+ TSAN_INTERCEPT(sem_post);
+ TSAN_INTERCEPT(sem_getvalue);
+
+ TSAN_INTERCEPT(read);
+ TSAN_INTERCEPT(pread);
+ TSAN_INTERCEPT(pread64);
+ TSAN_INTERCEPT(readv);
+ TSAN_INTERCEPT(preadv64);
+ TSAN_INTERCEPT(write);
+ TSAN_INTERCEPT(pwrite);
+ TSAN_INTERCEPT(pwrite64);
+ TSAN_INTERCEPT(writev);
+ TSAN_INTERCEPT(pwritev64);
+ TSAN_INTERCEPT(send);
+ TSAN_INTERCEPT(sendmsg);
+ TSAN_INTERCEPT(recv);
+ TSAN_INTERCEPT(recvmsg);
+
+ TSAN_INTERCEPT(unlink);
+ TSAN_INTERCEPT(fopen);
+ TSAN_INTERCEPT(fread);
+ TSAN_INTERCEPT(fwrite);
+ TSAN_INTERCEPT(puts);
+ TSAN_INTERCEPT(rmdir);
+ TSAN_INTERCEPT(opendir);
+
+ TSAN_INTERCEPT(epoll_ctl);
+ TSAN_INTERCEPT(epoll_wait);
+
+ TSAN_INTERCEPT(sigaction);
+ TSAN_INTERCEPT(signal);
+ TSAN_INTERCEPT(raise);
+ TSAN_INTERCEPT(kill);
+ TSAN_INTERCEPT(pthread_kill);
+
+ atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext)))
+ AtExitContext();
+
+ if (__cxa_atexit(&finalize, 0, 0)) {
+ TsanPrintf("ThreadSanitizer: failed to setup atexit callback\n");
+ Die();
+ }
+
+ if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
+ TsanPrintf("ThreadSanitizer: failed to create thread key\n");
+ Die();
+ }
+}
+
+void internal_start_thread(void(*func)(void *arg), void *arg) {
+ void *th;
+ REAL(pthread_create)(&th, 0, (void*(*)(void *arg))func, arg);
+ REAL(pthread_detach)(th);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_interface.cc b/lib/tsan/rtl/tsan_interface.cc
new file mode 100644
index 0000000..6d09546
--- /dev/null
+++ b/lib/tsan/rtl/tsan_interface.cc
@@ -0,0 +1,42 @@
+//===-- tsan_interface.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+#include "tsan_rtl.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan; // NOLINT
+
+void __tsan_init() {
+ Initialize(cur_thread());
+}
+
+void __tsan_read16(void *addr) {
+ MemoryRead8Byte(cur_thread(), CALLERPC, (uptr)addr);
+ MemoryRead8Byte(cur_thread(), CALLERPC, (uptr)addr + 8);
+}
+
+void __tsan_write16(void *addr) {
+ MemoryWrite8Byte(cur_thread(), CALLERPC, (uptr)addr);
+ MemoryWrite8Byte(cur_thread(), CALLERPC, (uptr)addr + 8);
+}
+
+void __tsan_acquire(void *addr) {
+ Acquire(cur_thread(), CALLERPC, (uptr)addr);
+}
+
+void __tsan_release(void *addr) {
+ Release(cur_thread(), CALLERPC, (uptr)addr);
+}
diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h
new file mode 100644
index 0000000..ed21ec6
--- /dev/null
+++ b/lib/tsan/rtl/tsan_interface.h
@@ -0,0 +1,51 @@
+//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// The functions declared in this header will be inserted by the instrumentation
+// module.
+// This header can be included by the instrumented program or by TSan tests.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_H
+#define TSAN_INTERFACE_H
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// This function should be called at the very beginning of the process,
+// before any instrumented code is executed and before any call to malloc.
+void __tsan_init();
+
+void __tsan_read1(void *addr);
+void __tsan_read2(void *addr);
+void __tsan_read4(void *addr);
+void __tsan_read8(void *addr);
+void __tsan_read16(void *addr);
+
+void __tsan_write1(void *addr);
+void __tsan_write2(void *addr);
+void __tsan_write4(void *addr);
+void __tsan_write8(void *addr);
+void __tsan_write16(void *addr);
+
+void __tsan_vptr_update(void **vptr_p, void *new_val);
+
+void __tsan_func_entry(void *call_pc);
+void __tsan_func_exit();
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TSAN_INTERFACE_H
diff --git a/lib/tsan/rtl/tsan_interface_ann.cc b/lib/tsan/rtl/tsan_interface_ann.cc
new file mode 100644
index 0000000..a605b6c
--- /dev/null
+++ b/lib/tsan/rtl/tsan_interface_ann.cc
@@ -0,0 +1,352 @@
+//===-- tsan_interface_ann.cc ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_interface_ann.h"
+#include "tsan_mutex.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+#include "tsan_platform.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan; // NOLINT
+
+namespace __tsan {
+
+class ScopedAnnotation {
+ public:
+ ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l,
+ uptr pc)
+ : thr_(thr)
+ , in_rtl_(thr->in_rtl) {
+ CHECK_EQ(thr_->in_rtl, 0);
+ FuncEntry(thr_, pc);
+ thr_->in_rtl++;
+ DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l);
+ }
+
+ ~ScopedAnnotation() {
+ thr_->in_rtl--;
+ CHECK_EQ(in_rtl_, thr_->in_rtl);
+ FuncExit(thr_);
+ }
+ private:
+ ThreadState *const thr_;
+ const int in_rtl_;
+};
+
+#define SCOPED_ANNOTATION(typ) \
+ if (!flags()->enable_annotations) \
+ return; \
+ ThreadState *thr = cur_thread(); \
+ StatInc(thr, StatAnnotation); \
+ StatInc(thr, Stat##typ); \
+ ScopedAnnotation sa(thr, __FUNCTION__, f, l, \
+ (uptr)__builtin_return_address(0)); \
+ const uptr pc = (uptr)&__FUNCTION__; \
+ (void)pc; \
+/**/
+
+static const int kMaxDescLen = 128;
+
+struct ExpectRace {
+ ExpectRace *next;
+ ExpectRace *prev;
+ int hitcount;
+ uptr addr;
+ uptr size;
+ char *file;
+ int line;
+ char desc[kMaxDescLen];
+};
+
+struct DynamicAnnContext {
+ Mutex mtx;
+ ExpectRace expect;
+ ExpectRace benign;
+
+ DynamicAnnContext()
+ : mtx(MutexTypeAnnotations, StatMtxAnnotations) {
+ }
+};
+
+static DynamicAnnContext *dyn_ann_ctx;
+static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64);
+
+static void AddExpectRace(ExpectRace *list,
+ char *f, int l, uptr addr, uptr size, char *desc) {
+ ExpectRace *race = list->next;
+ for (; race != list; race = race->next) {
+ if (race->addr == addr && race->size == size)
+ return;
+ }
+ race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
+ race->hitcount = 0;
+ race->addr = addr;
+ race->size = size;
+ race->file = f;
+ race->line = l;
+ race->desc[0] = 0;
+ if (desc) {
+ int i = 0;
+ for (; i < kMaxDescLen - 1 && desc[i]; i++)
+ race->desc[i] = desc[i];
+ race->desc[i] = 0;
+ }
+ race->prev = list;
+ race->next = list->next;
+ race->next->prev = race;
+ list->next = race;
+}
+
+static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
+ for (ExpectRace *race = list->next; race != list; race = race->next) {
+ uptr maxbegin = max(race->addr, addr);
+ uptr minend = min(race->addr + race->size, addr + size);
+ if (maxbegin < minend)
+ return race;
+ }
+ return 0;
+}
+
+static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
+ ExpectRace *race = FindRace(list, addr, size);
+ if (race == 0 && AlternativeAddress(addr))
+ race = FindRace(list, AlternativeAddress(addr), size);
+ if (race == 0)
+ return false;
+ DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
+ race->desc, race->addr, (int)race->size, race->file, race->line);
+ race->hitcount++;
+ return true;
+}
+
+static void InitList(ExpectRace *list) {
+ list->next = list;
+ list->prev = list;
+}
+
+void InitializeDynamicAnnotations() {
+ dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
+ InitList(&dyn_ann_ctx->expect);
+ InitList(&dyn_ann_ctx->benign);
+}
+
+bool IsExpectedReport(uptr addr, uptr size) {
+ Lock lock(&dyn_ann_ctx->mtx);
+ if (CheckContains(&dyn_ann_ctx->expect, addr, size))
+ return true;
+ if (CheckContains(&dyn_ann_ctx->benign, addr, size))
+ return true;
+ return false;
+}
+
+} // namespace __tsan
+
+using namespace __tsan; // NOLINT
+
+extern "C" {
+void AnnotateHappensBefore(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensBefore);
+ Release(cur_thread(), CALLERPC, addr);
+}
+
+void AnnotateHappensAfter(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensAfter);
+ Acquire(cur_thread(), CALLERPC, addr);
+}
+
+void AnnotateCondVarSignal(char *f, int l, uptr cv) {
+ SCOPED_ANNOTATION(AnnotateCondVarSignal);
+}
+
+void AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
+ SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
+}
+
+void AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
+ SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
+}
+
+void AnnotateCondVarWait(char *f, int l, uptr cv, uptr lock) {
+ SCOPED_ANNOTATION(AnnotateCondVarWait);
+}
+
+void AnnotateRWLockCreate(char *f, int l, uptr lock) {
+ SCOPED_ANNOTATION(AnnotateRWLockCreate);
+}
+
+void AnnotateRWLockDestroy(char *f, int l, uptr lock) {
+ SCOPED_ANNOTATION(AnnotateRWLockDestroy);
+}
+
+void AnnotateRWLockAcquired(char *f, int l, uptr lock, uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockAcquired);
+}
+
+void AnnotateRWLockReleased(char *f, int l, uptr lock, uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockReleased);
+}
+
+void AnnotateTraceMemory(char *f, int l, uptr mem) {
+ SCOPED_ANNOTATION(AnnotateTraceMemory);
+}
+
+void AnnotateFlushState(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateFlushState);
+}
+
+void AnnotateNewMemory(char *f, int l, uptr mem, uptr size) {
+ SCOPED_ANNOTATION(AnnotateNewMemory);
+}
+
+void AnnotateNoOp(char *f, int l, uptr mem) {
+ SCOPED_ANNOTATION(AnnotateNoOp);
+}
+
+static void ReportMissedExpectedRace(ExpectRace *race) {
+ TsanPrintf("==================\n");
+ TsanPrintf("WARNING: ThreadSanitizer: missed expected data race\n");
+ TsanPrintf(" %s addr=%zx %s:%d\n",
+ race->desc, race->addr, race->file, race->line);
+ TsanPrintf("==================\n");
+}
+
+void AnnotateFlushExpectedRaces(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
+ Lock lock(&dyn_ann_ctx->mtx);
+ while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
+ ExpectRace *race = dyn_ann_ctx->expect.next;
+ if (race->hitcount == 0) {
+ CTX()->nmissed_expected++;
+ ReportMissedExpectedRace(race);
+ }
+ race->prev->next = race->next;
+ race->next->prev = race->prev;
+ internal_free(race);
+ }
+}
+
+void AnnotateEnableRaceDetection(char *f, int l, int enable) {
+ SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
+ // FIXME: Reconsider this functionality later. It may be irrelevant.
+}
+
+void AnnotateMutexIsUsedAsCondVar(char *f, int l, uptr mu) {
+ SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
+}
+
+void AnnotatePCQGet(char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQGet);
+}
+
+void AnnotatePCQPut(char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQPut);
+}
+
+void AnnotatePCQDestroy(char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQDestroy);
+}
+
+void AnnotatePCQCreate(char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQCreate);
+}
+
+void AnnotateExpectRace(char *f, int l, uptr mem, char *desc) {
+ SCOPED_ANNOTATION(AnnotateExpectRace);
+ Lock lock(&dyn_ann_ctx->mtx);
+ AddExpectRace(&dyn_ann_ctx->expect,
+ f, l, mem, 1, desc);
+ DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
+}
+
+static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
+ Lock lock(&dyn_ann_ctx->mtx);
+ AddExpectRace(&dyn_ann_ctx->benign,
+ f, l, mem, size, desc);
+ DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
+}
+
+// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
+void AnnotateBenignRaceSized(char *f, int l, uptr mem, uptr size, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, size, desc);
+}
+
+void AnnotateBenignRace(char *f, int l, uptr mem, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRace);
+ BenignRaceImpl(f, l, mem, 1, desc);
+}
+
+void AnnotateIgnoreReadsBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
+ IgnoreCtl(cur_thread(), false, true);
+}
+
+void AnnotateIgnoreReadsEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
+ IgnoreCtl(cur_thread(), false, false);
+}
+
+void AnnotateIgnoreWritesBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
+ IgnoreCtl(cur_thread(), true, true);
+}
+
+void AnnotateIgnoreWritesEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
+ IgnoreCtl(cur_thread(), true, false);
+}
+
+void AnnotatePublishMemoryRange(char *f, int l, uptr addr, uptr size) {
+ SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
+}
+
+void AnnotateUnpublishMemoryRange(char *f, int l, uptr addr, uptr size) {
+ SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
+}
+
+void AnnotateThreadName(char *f, int l, char *name) {
+ SCOPED_ANNOTATION(AnnotateThreadName);
+}
+
+void WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensBefore);
+}
+
+void WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensAfter);
+}
+
+void WTFAnnotateBenignRaceSized(char *f, int l, uptr mem, uptr sz, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+}
+
+int RunningOnValgrind() {
+ return flags()->running_on_valgrind;
+}
+
+double __attribute__((weak)) ValgrindSlowdown(void) {
+ return 10.0;
+}
+
+const char *ThreadSanitizerQuery(const char *query) {
+ if (internal_strcmp(query, "pure_happens_before") == 0)
+ return "1";
+ else
+ return "0";
+}
+} // extern "C"
diff --git a/lib/tsan/rtl/tsan_interface_ann.h b/lib/tsan/rtl/tsan_interface_ann.h
new file mode 100644
index 0000000..09e807a
--- /dev/null
+++ b/lib/tsan/rtl/tsan_interface_ann.h
@@ -0,0 +1,31 @@
+//===-- tsan_interface_ann.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interface for dynamic annotations.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_ANN_H
+#define TSAN_INTERFACE_ANN_H
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void __tsan_acquire(void *addr);
+void __tsan_release(void *addr);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TSAN_INTERFACE_ANN_H
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
new file mode 100644
index 0000000..a3982a1
--- /dev/null
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -0,0 +1,321 @@
+//===-- tsan_interface_atomic.cc ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_interface_atomic.h"
+#include "tsan_flags.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan; // NOLINT
+
+class ScopedAtomic {
+ public:
+ ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
+ : thr_(thr) {
+ CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member.
+ DPrintf("#%d: %s\n", thr_->tid, func);
+ }
+ ~ScopedAtomic() {
+ CHECK_EQ(thr_->in_rtl, 1);
+ }
+ private:
+ ThreadState *thr_;
+ ScopedInRtl in_rtl_;
+};
+
+// Some shortcuts.
+typedef __tsan_memory_order morder;
+typedef __tsan_atomic8 a8;
+typedef __tsan_atomic16 a16;
+typedef __tsan_atomic32 a32;
+typedef __tsan_atomic64 a64;
+const int mo_relaxed = __tsan_memory_order_relaxed;
+const int mo_consume = __tsan_memory_order_consume;
+const int mo_acquire = __tsan_memory_order_acquire;
+const int mo_release = __tsan_memory_order_release;
+const int mo_acq_rel = __tsan_memory_order_acq_rel;
+const int mo_seq_cst = __tsan_memory_order_seq_cst;
+
+static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
+ StatInc(thr, StatAtomic);
+ StatInc(thr, t);
+ StatInc(thr, size == 1 ? StatAtomic1
+ : size == 2 ? StatAtomic2
+ : size == 4 ? StatAtomic4
+ : StatAtomic8);
+ StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
+ : mo == mo_consume ? StatAtomicConsume
+ : mo == mo_acquire ? StatAtomicAcquire
+ : mo == mo_release ? StatAtomicRelease
+ : mo == mo_acq_rel ? StatAtomicAcq_Rel
+ : StatAtomicSeq_Cst);
+}
+
+#define SCOPED_ATOMIC(func, ...) \
+ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
+ ThreadState *const thr = cur_thread(); \
+ const uptr pc = (uptr)__builtin_return_address(0); \
+ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
+ ScopedAtomic sa(thr, pc, __FUNCTION__); \
+ return Atomic##func(thr, pc, __VA_ARGS__); \
+/**/
+
+template<typename T>
+static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
+ morder mo) {
+ CHECK(mo & (mo_relaxed | mo_consume | mo_acquire | mo_seq_cst));
+ T v = *a;
+ if (mo & (mo_consume | mo_acquire | mo_seq_cst))
+ Acquire(thr, pc, (uptr)a);
+ return v;
+}
+
+template<typename T>
+static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ CHECK(mo & (mo_relaxed | mo_release | mo_seq_cst));
+ if (mo & (mo_release | mo_seq_cst))
+ Release(thr, pc, (uptr)a);
+ *a = v;
+}
+
+template<typename T>
+static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ Release(thr, pc, (uptr)a);
+ v = __sync_lock_test_and_set(a, v);
+ if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ Acquire(thr, pc, (uptr)a);
+ return v;
+}
+
+template<typename T>
+static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ Release(thr, pc, (uptr)a);
+ v = __sync_fetch_and_add(a, v);
+ if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ Acquire(thr, pc, (uptr)a);
+ return v;
+}
+
+template<typename T>
+static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ Release(thr, pc, (uptr)a);
+ v = __sync_fetch_and_and(a, v);
+ if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ Acquire(thr, pc, (uptr)a);
+ return v;
+}
+
+template<typename T>
+static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ Release(thr, pc, (uptr)a);
+ v = __sync_fetch_and_or(a, v);
+ if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ Acquire(thr, pc, (uptr)a);
+ return v;
+}
+
+template<typename T>
+static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ Release(thr, pc, (uptr)a);
+ v = __sync_fetch_and_xor(a, v);
+ if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ Acquire(thr, pc, (uptr)a);
+ return v;
+}
+
+template<typename T>
+static bool AtomicCAS(ThreadState *thr, uptr pc,
+ volatile T *a, T *c, T v, morder mo) {
+ if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+ Release(thr, pc, (uptr)a);
+ T cc = *c;
+ T pr = __sync_val_compare_and_swap(a, cc, v);
+ if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+ Acquire(thr, pc, (uptr)a);
+ if (pr == cc)
+ return true;
+ *c = pr;
+ return false;
+}
+
+static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
+ __sync_synchronize();
+}
+
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
+ morder mo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+void __tsan_atomic_thread_fence(morder mo) {
+ char* a;
+ SCOPED_ATOMIC(Fence, mo);
+}
diff --git a/lib/tsan/rtl/tsan_interface_atomic.h b/lib/tsan/rtl/tsan_interface_atomic.h
new file mode 100644
index 0000000..dff32b1
--- /dev/null
+++ b/lib/tsan/rtl/tsan_interface_atomic.h
@@ -0,0 +1,121 @@
+//===-- tsan_interface_atomic.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+
+typedef enum {
+ __tsan_memory_order_relaxed = 1 << 0,
+ __tsan_memory_order_consume = 1 << 1,
+ __tsan_memory_order_acquire = 1 << 2,
+ __tsan_memory_order_release = 1 << 3,
+ __tsan_memory_order_acq_rel = 1 << 4,
+ __tsan_memory_order_seq_cst = 1 << 5,
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+ __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+ __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+ __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+ __tsan_memory_order mo);
+
+void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+ __tsan_memory_order mo);
+void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+ __tsan_memory_order mo);
+void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+ __tsan_memory_order mo);
+void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+ __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
diff --git a/lib/tsan/rtl/tsan_interface_inl.h b/lib/tsan/rtl/tsan_interface_inl.h
new file mode 100644
index 0000000..233f902
--- /dev/null
+++ b/lib/tsan/rtl/tsan_interface_inl.h
@@ -0,0 +1,65 @@
+//===-- tsan_interface_inl.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan; // NOLINT
+
+void __tsan_read1(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 0, 0);
+}
+
+void __tsan_read2(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, 0);
+}
+
+void __tsan_read4(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, 0);
+}
+
+void __tsan_read8(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 3, 0);
+}
+
+void __tsan_write1(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 0, 1);
+}
+
+void __tsan_write2(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, 1);
+}
+
+void __tsan_write4(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, 1);
+}
+
+void __tsan_write8(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 3, 1);
+}
+
+void __tsan_vptr_update(void **vptr_p, void *new_val) {
+ CHECK_EQ(sizeof(vptr_p), 8);
+ if (*vptr_p != new_val)
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, 3, 1);
+}
+
+void __tsan_func_entry(void *pc) {
+ FuncEntry(cur_thread(), (uptr)pc);
+}
+
+void __tsan_func_exit() {
+ FuncExit(cur_thread());
+}
diff --git a/lib/tsan/rtl/tsan_md5.cc b/lib/tsan/rtl/tsan_md5.cc
new file mode 100644
index 0000000..c9d671f
--- /dev/null
+++ b/lib/tsan/rtl/tsan_md5.cc
@@ -0,0 +1,245 @@
+//===-- tsan_md5.cc -------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define I(x, y, z) ((y) ^ ((x) | ~(z)))
+
+#define STEP(f, a, b, c, d, x, t, s) \
+ (a) += f((b), (c), (d)) + (x) + (t); \
+ (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
+ (a) += (b);
+
+#define SET(n) \
+ (*(MD5_u32plus *)&ptr[(n) * 4])
+#define GET(n) \
+ SET(n)
+
+typedef unsigned int MD5_u32plus;
+typedef unsigned long ulong_t; // NOLINT
+
+typedef struct {
+ MD5_u32plus lo, hi;
+ MD5_u32plus a, b, c, d;
+ unsigned char buffer[64];
+ MD5_u32plus block[16];
+} MD5_CTX;
+
+static void *body(MD5_CTX *ctx, void *data, ulong_t size) {
+ unsigned char *ptr;
+ MD5_u32plus a, b, c, d;
+ MD5_u32plus saved_a, saved_b, saved_c, saved_d;
+
+ ptr = (unsigned char*)data;
+
+ a = ctx->a;
+ b = ctx->b;
+ c = ctx->c;
+ d = ctx->d;
+
+ do {
+ saved_a = a;
+ saved_b = b;
+ saved_c = c;
+ saved_d = d;
+
+ STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
+ STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
+ STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
+ STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
+ STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
+ STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
+ STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
+ STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
+ STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
+ STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
+ STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
+ STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
+ STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
+ STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
+ STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
+ STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
+
+ STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
+ STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
+ STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
+ STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
+ STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
+ STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
+ STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
+ STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
+ STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
+ STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
+ STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
+ STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
+ STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
+ STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
+ STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
+ STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
+
+ STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
+ STEP(H, d, a, b, c, GET(8), 0x8771f681, 11)
+ STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
+ STEP(H, b, c, d, a, GET(14), 0xfde5380c, 23)
+ STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
+ STEP(H, d, a, b, c, GET(4), 0x4bdecfa9, 11)
+ STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
+ STEP(H, b, c, d, a, GET(10), 0xbebfbc70, 23)
+ STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
+ STEP(H, d, a, b, c, GET(0), 0xeaa127fa, 11)
+ STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
+ STEP(H, b, c, d, a, GET(6), 0x04881d05, 23)
+ STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
+ STEP(H, d, a, b, c, GET(12), 0xe6db99e5, 11)
+ STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
+ STEP(H, b, c, d, a, GET(2), 0xc4ac5665, 23)
+
+ STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
+ STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
+ STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
+ STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
+ STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
+ STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
+ STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
+ STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
+ STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
+ STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
+ STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
+ STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
+ STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
+ STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
+ STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
+ STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
+
+ a += saved_a;
+ b += saved_b;
+ c += saved_c;
+ d += saved_d;
+
+ ptr += 64;
+ } while (size -= 64);
+
+ ctx->a = a;
+ ctx->b = b;
+ ctx->c = c;
+ ctx->d = d;
+
+ return ptr;
+}
+
+void MD5_Init(MD5_CTX *ctx) {
+ ctx->a = 0x67452301;
+ ctx->b = 0xefcdab89;
+ ctx->c = 0x98badcfe;
+ ctx->d = 0x10325476;
+
+ ctx->lo = 0;
+ ctx->hi = 0;
+}
+
+void MD5_Update(MD5_CTX *ctx, void *data, ulong_t size) {
+ MD5_u32plus saved_lo;
+ ulong_t used, free;
+
+ saved_lo = ctx->lo;
+ if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
+ ctx->hi++;
+ ctx->hi += size >> 29;
+
+ used = saved_lo & 0x3f;
+
+ if (used) {
+ free = 64 - used;
+
+ if (size < free) {
+ internal_memcpy(&ctx->buffer[used], data, size);
+ return;
+ }
+
+ internal_memcpy(&ctx->buffer[used], data, free);
+ data = (unsigned char *)data + free;
+ size -= free;
+ body(ctx, ctx->buffer, 64);
+ }
+
+ if (size >= 64) {
+ data = body(ctx, data, size & ~(ulong_t)0x3f);
+ size &= 0x3f;
+ }
+
+ internal_memcpy(ctx->buffer, data, size);
+}
+
+void MD5_Final(unsigned char *result, MD5_CTX *ctx) {
+ ulong_t used, free;
+
+ used = ctx->lo & 0x3f;
+
+ ctx->buffer[used++] = 0x80;
+
+ free = 64 - used;
+
+ if (free < 8) {
+ internal_memset(&ctx->buffer[used], 0, free);
+ body(ctx, ctx->buffer, 64);
+ used = 0;
+ free = 64;
+ }
+
+ internal_memset(&ctx->buffer[used], 0, free - 8);
+
+ ctx->lo <<= 3;
+ ctx->buffer[56] = ctx->lo;
+ ctx->buffer[57] = ctx->lo >> 8;
+ ctx->buffer[58] = ctx->lo >> 16;
+ ctx->buffer[59] = ctx->lo >> 24;
+ ctx->buffer[60] = ctx->hi;
+ ctx->buffer[61] = ctx->hi >> 8;
+ ctx->buffer[62] = ctx->hi >> 16;
+ ctx->buffer[63] = ctx->hi >> 24;
+
+ body(ctx, ctx->buffer, 64);
+
+ result[0] = ctx->a;
+ result[1] = ctx->a >> 8;
+ result[2] = ctx->a >> 16;
+ result[3] = ctx->a >> 24;
+ result[4] = ctx->b;
+ result[5] = ctx->b >> 8;
+ result[6] = ctx->b >> 16;
+ result[7] = ctx->b >> 24;
+ result[8] = ctx->c;
+ result[9] = ctx->c >> 8;
+ result[10] = ctx->c >> 16;
+ result[11] = ctx->c >> 24;
+ result[12] = ctx->d;
+ result[13] = ctx->d >> 8;
+ result[14] = ctx->d >> 16;
+ result[15] = ctx->d >> 24;
+
+ internal_memset(ctx, 0, sizeof(*ctx));
+}
+
+MD5Hash md5_hash(const void *data, uptr size) {
+ MD5Hash res;
+ MD5_CTX ctx;
+ MD5_Init(&ctx);
+ MD5_Update(&ctx, (void*)data, size);
+ MD5_Final((unsigned char*)&res.hash[0], &ctx);
+ return res;
+}
+}
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
new file mode 100644
index 0000000..7f956df
--- /dev/null
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -0,0 +1,123 @@
+//===-- tsan_mman.cc ------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_mman.h"
+#include "tsan_rtl.h"
+#include "tsan_report.h"
+#include "tsan_flags.h"
+
+namespace __tsan {
+
+static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
+ if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
+ return;
+ StackTrace stack;
+ stack.ObtainCurrent(thr, pc);
+ ScopedReport rep(ReportTypeSignalUnsafe);
+ rep.AddStack(&stack);
+ OutputReport(rep, rep.GetReport()->stacks[0]);
+}
+
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
+ CHECK_GT(thr->in_rtl, 0);
+ if (sz + sizeof(MBlock) < sz)
+ return 0;
+ MBlock *b = (MBlock*)InternalAlloc(sz + sizeof(MBlock));
+ if (b == 0)
+ return 0;
+ b->size = sz;
+ void *p = b + 1;
+ if (CTX() && CTX()->initialized) {
+ MemoryResetRange(thr, pc, (uptr)p, sz);
+ }
+ DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ SignalUnsafeCall(thr, pc);
+ return p;
+}
+
+void user_free(ThreadState *thr, uptr pc, void *p) {
+ CHECK_GT(thr->in_rtl, 0);
+ CHECK_NE(p, (void*)0);
+ DPrintf("#%d: free(%p)\n", thr->tid, p);
+ MBlock *b = user_mblock(thr, p);
+ p = b + 1;
+ if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
+ MemoryRangeFreed(thr, pc, (uptr)p, b->size);
+ }
+ InternalFree(b);
+ SignalUnsafeCall(thr, pc);
+}
+
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
+ CHECK_GT(thr->in_rtl, 0);
+ void *p2 = 0;
+ // FIXME: Handle "shrinking" more efficiently,
+ // it seems that some software actually does this.
+ if (sz) {
+ p2 = user_alloc(thr, pc, sz);
+ if (p2 == 0)
+ return 0;
+ if (p) {
+ MBlock *b = user_mblock(thr, p);
+ internal_memcpy(p2, p, min(b->size, sz));
+ }
+ }
+ if (p) {
+ user_free(thr, pc, p);
+ }
+ return p2;
+}
+
+void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align) {
+ CHECK_GT(thr->in_rtl, 0);
+ void *p = user_alloc(thr, pc, sz + align);
+ void *pa = RoundUp(p, align);
+ DCHECK_LE((uptr)pa + sz, (uptr)p + sz + align);
+ return pa;
+}
+
+MBlock *user_mblock(ThreadState *thr, void *p) {
+ CHECK_GT(thr->in_rtl, 0);
+ CHECK_NE(p, (void*)0);
+ MBlock *b = (MBlock*)InternalAllocBlock(p);
+ // FIXME: Output a warning, it's a user error.
+ if (p < (char*)(b + 1) || p > (char*)(b + 1) + b->size) {
+ TsanPrintf("user_mblock p=%p b=%p size=%zu beg=%p end=%p\n",
+ p, b, b->size, (char*)(b + 1), (char*)(b + 1) + b->size);
+ CHECK_GE(p, (char*)(b + 1));
+ CHECK_LE(p, (char*)(b + 1) + b->size);
+ }
+ return b;
+}
+
+void *internal_alloc(MBlockType typ, uptr sz) {
+ ThreadState *thr = cur_thread();
+ CHECK_GT(thr->in_rtl, 0);
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ return InternalAlloc(sz);
+}
+
+void internal_free(void *p) {
+ ThreadState *thr = cur_thread();
+ CHECK_GT(thr->in_rtl, 0);
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ InternalFree(p);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h
new file mode 100644
index 0000000..53f147e
--- /dev/null
+++ b/lib/tsan/rtl/tsan_mman.h
@@ -0,0 +1,114 @@
+//===-- tsan_mman.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MMAN_H
+#define TSAN_MMAN_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// Descriptor of user's memory block.
+struct MBlock {
+ uptr size;
+};
+
+// For user allocations.
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz);
+// Does not accept NULL.
+void user_free(ThreadState *thr, uptr pc, void *p);
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
+void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
+// Given the pointer p into a valid allocated block,
+// returns the descriptor of the block.
+MBlock *user_mblock(ThreadState *thr, void *p);
+
+enum MBlockType {
+ MBlockScopedBuf,
+ MBlockString,
+ MBlockStackTrace,
+ MBlockShadowStack,
+ MBlockSync,
+ MBlockClock,
+ MBlockThreadContex,
+ MBlockDeadInfo,
+ MBlockRacyStacks,
+ MBlockRacyAddresses,
+ MBlockAtExit,
+ MBlockFlag,
+ MBlockReport,
+ MBlockReportMop,
+ MBlockReportThread,
+ MBlockReportMutex,
+ MBlockReportLoc,
+ MBlockReportStack,
+ MBlockSuppression,
+ MBlockExpectRace,
+ MBlockSignal,
+
+ // This must be the last.
+ MBlockTypeCount,
+};
+
+// For internal data structures.
+void *internal_alloc(MBlockType typ, uptr sz);
+void internal_free(void *p);
+
+template<typename T>
+void DestroyAndFree(T *&p) {
+ p->~T();
+ internal_free(p);
+ p = 0;
+}
+
+template<typename T>
+class InternalScopedBuf {
+ public:
+ explicit InternalScopedBuf(uptr cnt) {
+ cnt_ = cnt;
+ ptr_ = (T*)internal_alloc(MBlockScopedBuf, cnt * sizeof(T));
+ }
+
+ ~InternalScopedBuf() {
+ internal_free(ptr_);
+ }
+
+ operator T *() {
+ return ptr_;
+ }
+
+ T &operator[](uptr i) {
+ return ptr_[i];
+ }
+
+ T *Ptr() {
+ return ptr_;
+ }
+
+ uptr Count() {
+ return cnt_;
+ }
+
+ uptr Size() {
+ return cnt_ * sizeof(T);
+ }
+
+ private:
+ T *ptr_;
+ uptr cnt_;
+
+ InternalScopedBuf(const InternalScopedBuf&);
+ void operator = (const InternalScopedBuf&);
+};
+
+} // namespace __tsan
+#endif // TSAN_MMAN_H
diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc
new file mode 100644
index 0000000..1a70f8f
--- /dev/null
+++ b/lib/tsan/rtl/tsan_mutex.cc
@@ -0,0 +1,259 @@
+//===-- tsan_mutex.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_mutex.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
+// Readers have preference, can possibly starvate writers.
+
+// The table fixes what mutexes can be locked under what mutexes.
+// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
+// then Report mutex can be locked while under Threads mutex.
+// The leaf mutexes can be locked under any other mutexes.
+// Recursive locking is not supported.
+const MutexType MutexTypeLeaf = (MutexType)-1;
+static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
+ /*0 MutexTypeInvalid*/ {},
+ /*1 MutexTypeTrace*/ {MutexTypeLeaf},
+ /*2 MutexTypeThreads*/ {MutexTypeReport},
+ /*3 MutexTypeReport*/ {},
+ /*4 MutexTypeSyncVar*/ {},
+ /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
+ /*6 MutexTypeSlab*/ {MutexTypeLeaf},
+ /*7 MutexTypeAnnotations*/ {},
+ /*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
+};
+
+static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
+
+void InitializeMutex() {
+ // Build the "can lock" adjacency matrix.
+ // If [i][j]==true, then one can lock mutex j while under mutex i.
+ const int N = MutexTypeCount;
+ int cnt[N] = {};
+ bool leaf[N] = {};
+ for (int i = 1; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ int z = CanLockTab[i][j];
+ if (z == MutexTypeInvalid)
+ continue;
+ if (z == MutexTypeLeaf) {
+ CHECK(!leaf[i]);
+ leaf[i] = true;
+ continue;
+ }
+ CHECK(!CanLockAdj[i][z]);
+ CanLockAdj[i][z] = true;
+ cnt[i]++;
+ }
+ }
+ for (int i = 0; i < N; i++) {
+ CHECK(!leaf[i] || cnt[i] == 0);
+ }
+ // Add leaf mutexes.
+ for (int i = 0; i < N; i++) {
+ if (!leaf[i])
+ continue;
+ for (int j = 0; j < N; j++) {
+ if (i == j || leaf[j] || j == MutexTypeInvalid)
+ continue;
+ CHECK(!CanLockAdj[j][i]);
+ CanLockAdj[j][i] = true;
+ }
+ }
+ // Build the transitive closure.
+ bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ CanLockAdj2[i][j] = CanLockAdj[i][j];
+ }
+ }
+ for (int k = 0; k < N; k++) {
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
+ CanLockAdj2[i][j] = true;
+ }
+ }
+ }
+ }
+#if 0
+ TsanPrintf("Can lock graph:\n");
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ TsanPrintf("%d ", CanLockAdj[i][j]);
+ }
+ TsanPrintf("\n");
+ }
+ TsanPrintf("Can lock graph closure:\n");
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ TsanPrintf("%d ", CanLockAdj2[i][j]);
+ }
+ TsanPrintf("\n");
+ }
+#endif
+ // Verify that the graph is acyclic.
+ for (int i = 0; i < N; i++) {
+ if (CanLockAdj2[i][i]) {
+ TsanPrintf("Mutex %d participates in a cycle\n", i);
+ Die();
+ }
+ }
+}
+
+DeadlockDetector::DeadlockDetector() {
+ // Rely on zero initialization because some mutexes can be locked before ctor.
+}
+
+void DeadlockDetector::Lock(MutexType t) {
+ // TsanPrintf("LOCK %d @%zu\n", t, seq_ + 1);
+ u64 max_seq = 0;
+ u64 max_idx = MutexTypeInvalid;
+ for (int i = 0; i != MutexTypeCount; i++) {
+ if (locked_[i] == 0)
+ continue;
+ CHECK_NE(locked_[i], max_seq);
+ if (max_seq < locked_[i]) {
+ max_seq = locked_[i];
+ max_idx = i;
+ }
+ }
+ locked_[t] = ++seq_;
+ if (max_idx == MutexTypeInvalid)
+ return;
+ // TsanPrintf(" last %d @%zu\n", max_idx, max_seq);
+ if (!CanLockAdj[max_idx][t]) {
+ TsanPrintf("ThreadSanitizer: internal deadlock detected\n");
+ TsanPrintf("ThreadSanitizer: can't lock %d while under %zu\n",
+ t, (uptr)max_idx);
+ Die();
+ }
+}
+
+void DeadlockDetector::Unlock(MutexType t) {
+ // TsanPrintf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
+ CHECK(locked_[t]);
+ locked_[t] = 0;
+}
+
+const uptr kUnlocked = 0;
+const uptr kWriteLock = 1;
+const uptr kReadLock = 2;
+
+class Backoff {
+ public:
+ Backoff()
+ : iter_() {
+ }
+
+ bool Do() {
+ if (iter_++ < kActiveSpinIters)
+ proc_yield(kActiveSpinCnt);
+ else
+ internal_sched_yield();
+ return true;
+ }
+
+ u64 Contention() const {
+ u64 active = iter_ % kActiveSpinIters;
+ u64 passive = iter_ - active;
+ return active + 10 * passive;
+ }
+
+ private:
+ int iter_;
+ static const int kActiveSpinIters = 10;
+ static const int kActiveSpinCnt = 20;
+};
+
+Mutex::Mutex(MutexType type, StatType stat_type) {
+ CHECK_GT(type, MutexTypeInvalid);
+ CHECK_LT(type, MutexTypeCount);
+#if TSAN_DEBUG
+ type_ = type;
+#endif
+#if TSAN_COLLECT_STATS
+ stat_type_ = stat_type;
+#endif
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);
+}
+
+Mutex::~Mutex() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+}
+
+void Mutex::Lock() {
+#if TSAN_DEBUG && !TSAN_GO
+ cur_thread()->deadlock_detector.Lock(type_);
+#endif
+ uptr cmp = kUnlocked;
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ for (Backoff backoff; backoff.Do();) {
+ if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
+ cmp = kUnlocked;
+ if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
+ memory_order_acquire)) {
+#if TSAN_COLLECT_STATS
+ StatInc(cur_thread(), stat_type_, backoff.Contention());
+#endif
+ return;
+ }
+ }
+ }
+}
+
+void Mutex::Unlock() {
+ uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
+ (void)prev;
+ DCHECK_NE(prev & kWriteLock, 0);
+#if TSAN_DEBUG && !TSAN_GO
+ cur_thread()->deadlock_detector.Unlock(type_);
+#endif
+}
+
+void Mutex::ReadLock() {
+#if TSAN_DEBUG && !TSAN_GO
+ cur_thread()->deadlock_detector.Lock(type_);
+#endif
+ uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ for (Backoff backoff; backoff.Do();) {
+ prev = atomic_load(&state_, memory_order_acquire);
+ if ((prev & kWriteLock) == 0) {
+#if TSAN_COLLECT_STATS
+ StatInc(cur_thread(), stat_type_, backoff.Contention());
+#endif
+ return;
+ }
+ }
+}
+
+void Mutex::ReadUnlock() {
+ uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
+ (void)prev;
+ DCHECK_EQ(prev & kWriteLock, 0);
+ DCHECK_GT(prev & ~kWriteLock, 0);
+#if TSAN_DEBUG && !TSAN_GO
+ cur_thread()->deadlock_detector.Unlock(type_);
+#endif
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_mutex.h b/lib/tsan/rtl/tsan_mutex.h
new file mode 100644
index 0000000..5b22a41
--- /dev/null
+++ b/lib/tsan/rtl/tsan_mutex.h
@@ -0,0 +1,78 @@
+//===-- tsan_mutex.h --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MUTEX_H
+#define TSAN_MUTEX_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+enum MutexType {
+ MutexTypeInvalid,
+ MutexTypeTrace,
+ MutexTypeThreads,
+ MutexTypeReport,
+ MutexTypeSyncVar,
+ MutexTypeSyncTab,
+ MutexTypeSlab,
+ MutexTypeAnnotations,
+ MutexTypeAtExit,
+
+ // This must be the last.
+ MutexTypeCount,
+};
+
+class Mutex {
+ public:
+ explicit Mutex(MutexType type, StatType stat_type);
+ ~Mutex();
+
+ void Lock();
+ void Unlock();
+
+ void ReadLock();
+ void ReadUnlock();
+
+ private:
+ atomic_uintptr_t state_;
+#if TSAN_DEBUG
+ MutexType type_;
+#endif
+#if TSAN_COLLECT_STATS
+ StatType stat_type_;
+#endif
+
+ Mutex(const Mutex&);
+ void operator = (const Mutex&);
+};
+
+typedef GenericScopedLock<Mutex> Lock;
+typedef GenericScopedReadLock<Mutex> ReadLock;
+
+class DeadlockDetector {
+ public:
+ DeadlockDetector();
+ void Lock(MutexType t);
+ void Unlock(MutexType t);
+ private:
+ u64 seq_;
+ u64 locked_[MutexTypeCount];
+};
+
+void InitializeMutex();
+
+} // namespace __tsan
+
+#endif // TSAN_MUTEX_H
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
new file mode 100644
index 0000000..b557fa1
--- /dev/null
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -0,0 +1,101 @@
+//===-- tsan_platform.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Platform-specific code.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#include "tsan_rtl.h"
+
+#if __LP64__
+namespace __tsan {
+
+#if defined(TSAN_GO)
+static const uptr kLinuxAppMemBeg = 0x000000000000ULL;
+static const uptr kLinuxAppMemEnd = 0x00fcffffffffULL;
+static const uptr kLinuxShadowMsk = 0x100000000000ULL;
+// TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout,
+// when memory addresses are of the 0x2axxxxxxxxxx form.
+// The option is enabled with 'setarch x86_64 -L'.
+#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
+static const uptr kLinuxAppMemBeg = 0x2a0000000000ULL;
+static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
+#else
+static const uptr kLinuxAppMemBeg = 0x7ef000000000ULL;
+static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
+#endif
+
+static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
+
+// This has to be a macro to allow constant initialization of constants below.
+#ifndef TSAN_GO
+#define MemToShadow(addr) \
+ (((addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
+#else
+#define MemToShadow(addr) \
+ ((((addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
+#endif
+
+static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
+static const uptr kLinuxShadowEnd =
+ MemToShadow(kLinuxAppMemEnd) | (kPageSize - 1);
+
+static inline bool IsAppMem(uptr mem) {
+ return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
+}
+
+static inline bool IsShadowMem(uptr mem) {
+ return mem >= kLinuxShadowBeg && mem <= kLinuxShadowEnd;
+}
+
+static inline uptr ShadowToMem(uptr shadow) {
+ CHECK(IsShadowMem(shadow));
+#ifdef TSAN_GO
+ return (shadow & ~kLinuxShadowMsk) / kShadowCnt;
+#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
+ // COMPAT mapping is not quite one-to-one.
+ return (shadow / kShadowCnt) | 0x280000000000ULL;
+#else
+ return (shadow / kShadowCnt) | kLinuxAppMemMsk;
+#endif
+}
+
+// For COMPAT mapping returns an alternative address
+// that mapped to the same shadow address.
+static inline uptr AlternativeAddress(uptr addr) {
+#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
+ return addr | kLinuxAppMemMsk;
+#else
+ return 0;
+#endif
+}
+
+uptr GetShadowMemoryConsumption();
+void FlushShadowMemory();
+
+const char *InitializePlatform();
+void FinalizePlatform();
+
+void internal_start_thread(void(*func)(void*), void *arg);
+
+uptr GetTlsSize();
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size);
+
+} // namespace __tsan
+
+#else // __LP64__
+# error "Only 64-bit is supported"
+#endif
+
+#endif // TSAN_PLATFORM_H
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
new file mode 100644
index 0000000..c791c96
--- /dev/null
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -0,0 +1,238 @@
+//===-- tsan_platform_linux.cc --------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Linux-specific code.
+//===----------------------------------------------------------------------===//
+
+#ifdef __linux__
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+
+#include <asm/prctl.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sched.h>
+#include <dlfcn.h>
+
+extern "C" int arch_prctl(int code, __sanitizer::uptr *addr);
+
+namespace __sanitizer {
+
+void Die() {
+ _exit(1);
+}
+
+} // namespace __sanitizer
+
+namespace __tsan {
+
+#ifndef TSAN_GO
+ScopedInRtl::ScopedInRtl()
+ : thr_(cur_thread()) {
+ in_rtl_ = thr_->in_rtl;
+ thr_->in_rtl++;
+ errno_ = errno;
+}
+
+ScopedInRtl::~ScopedInRtl() {
+ thr_->in_rtl--;
+ errno = errno_;
+ CHECK_EQ(in_rtl_, thr_->in_rtl);
+}
+#else
+ScopedInRtl::ScopedInRtl() {
+}
+
+ScopedInRtl::~ScopedInRtl() {
+}
+#endif
+
+uptr GetShadowMemoryConsumption() {
+ return 0;
+}
+
+void FlushShadowMemory() {
+ madvise((void*)kLinuxShadowBeg,
+ kLinuxShadowEnd - kLinuxShadowBeg,
+ MADV_DONTNEED);
+}
+
+#ifndef TSAN_GO
+static void ProtectRange(uptr beg, uptr end) {
+ ScopedInRtl in_rtl;
+ CHECK_LE(beg, end);
+ if (beg == end)
+ return;
+ if (beg != (uptr)Mprotect(beg, end - beg)) {
+ TsanPrintf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
+ TsanPrintf("FATAL: Make sure you are not using unlimited stack\n");
+ Die();
+ }
+}
+#endif
+
+void InitializeShadowMemory() {
+ uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
+ kLinuxShadowEnd - kLinuxShadowBeg);
+ if (shadow != kLinuxShadowBeg) {
+ TsanPrintf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ TsanPrintf("FATAL: Make sure to compile with -fPIE and "
+ "to link with -pie.\n");
+ Die();
+ }
+#ifndef TSAN_GO
+ const uptr kClosedLowBeg = 0x200000;
+ const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
+ const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
+ const uptr kClosedMidEnd = kLinuxAppMemBeg - 1;
+ ProtectRange(kClosedLowBeg, kClosedLowEnd);
+ ProtectRange(kClosedMidBeg, kClosedMidEnd);
+#endif
+#ifndef TSAN_GO
+ DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
+ kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
+#endif
+ DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
+ kLinuxShadowBeg, kLinuxShadowEnd,
+ (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
+#ifndef TSAN_GO
+ DPrintf("kClosedMid %zx-%zx (%zuGB)\n",
+ kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
+#endif
+ DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
+ kLinuxAppMemBeg, kLinuxAppMemEnd,
+ (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
+ DPrintf("stack %zx\n", (uptr)&shadow);
+}
+
+#ifndef TSAN_GO
+static void CheckPIE() {
+ // Ensure that the binary is indeed compiled with -pie.
+ ProcessMaps proc_maps;
+ uptr start, end;
+ if (proc_maps.Next(&start, &end,
+ /*offset*/0, /*filename*/0, /*filename_size*/0)) {
+ if ((u64)start < kLinuxAppMemBeg) {
+ TsanPrintf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
+ "something is mapped at 0x%zx < 0x%zx)\n",
+ start, kLinuxAppMemBeg);
+ TsanPrintf("FATAL: Make sure to compile with -fPIE"
+ " and to link with -pie.\n");
+ Die();
+ }
+ }
+}
+
+static uptr g_tls_size;
+
+#ifdef __i386__
+# define INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
+#else
+# define INTERNAL_FUNCTION
+#endif
+extern "C" void _dl_get_tls_static_info(size_t*, size_t*)
+ __attribute__((weak)) INTERNAL_FUNCTION;
+
+static int InitTlsSize() {
+ typedef void (*get_tls_func)(size_t*, size_t*) INTERNAL_FUNCTION;
+ get_tls_func get_tls = &_dl_get_tls_static_info;
+ if (get_tls == 0)
+ get_tls = (get_tls_func)dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
+ CHECK_NE(get_tls, 0);
+ size_t tls_size = 0;
+ size_t tls_align = 0;
+ get_tls(&tls_size, &tls_align);
+ return tls_size;
+}
+#endif // #ifndef TSAN_GO
+
+const char *InitializePlatform() {
+ void *p = 0;
+ if (sizeof(p) == 8) {
+ // Disable core dumps, dumping of 16TB usually takes a bit long.
+ // The following magic is to prevent clang from replacing it with memset.
+ volatile rlimit lim;
+ lim.rlim_cur = 0;
+ lim.rlim_max = 0;
+ setrlimit(RLIMIT_CORE, (rlimit*)&lim);
+ }
+
+#ifndef TSAN_GO
+ CheckPIE();
+ g_tls_size = (uptr)InitTlsSize();
+#endif
+ return getenv("TSAN_OPTIONS");
+}
+
+void FinalizePlatform() {
+ fflush(0);
+}
+
+uptr GetTlsSize() {
+#ifndef TSAN_GO
+ return g_tls_size;
+#else
+ return 0;
+#endif
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+#ifndef TSAN_GO
+ arch_prctl(ARCH_GET_FS, tls_addr);
+ *tls_addr -= g_tls_size;
+ *tls_size = g_tls_size;
+
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+
+ if (!main) {
+ // If stack and tls intersect, make them non-intersecting.
+ if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
+ CHECK_GT(*tls_addr + *tls_size, *stk_addr);
+ CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
+ *stk_size -= *tls_size;
+ *tls_addr = *stk_addr + *stk_size;
+ }
+ }
+#else
+ *stk_addr = 0;
+ *stk_size = 0;
+ *tls_addr = 0;
+ *tls_size = 0;
+#endif
+}
+
+
+} // namespace __tsan
+
+#endif // #ifdef __linux__
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
new file mode 100644
index 0000000..7451492
--- /dev/null
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -0,0 +1,112 @@
+//===-- tsan_platform_mac.cc ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific code.
+//===----------------------------------------------------------------------===//
+
+#ifdef __APPLE__
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sched.h>
+
+namespace __sanitizer {
+
+void Die() {
+ _exit(1);
+}
+
+} // namespace __sanitizer
+
+namespace __tsan {
+
+ScopedInRtl::ScopedInRtl() {
+}
+
+ScopedInRtl::~ScopedInRtl() {
+}
+
+uptr GetShadowMemoryConsumption() {
+ return 0;
+}
+
+void FlushShadowMemory() {
+}
+
+void InitializeShadowMemory() {
+ uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
+ kLinuxShadowEnd - kLinuxShadowBeg);
+ if (shadow != kLinuxShadowBeg) {
+ TsanPrintf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ TsanPrintf("FATAL: Make sure to compile with -fPIE and "
+ "to link with -pie.\n");
+ Die();
+ }
+ DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
+ kLinuxShadowBeg, kLinuxShadowEnd,
+ (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
+ DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
+ kLinuxAppMemBeg, kLinuxAppMemEnd,
+ (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
+}
+
+const char *InitializePlatform() {
+ void *p = 0;
+ if (sizeof(p) == 8) {
+ // Disable core dumps, dumping of 16TB usually takes a bit long.
+ // The following magic is to prevent clang from replacing it with memset.
+ volatile rlimit lim;
+ lim.rlim_cur = 0;
+ lim.rlim_max = 0;
+ setrlimit(RLIMIT_CORE, (rlimit*)&lim);
+ }
+
+ return getenv("TSAN_OPTIONS");
+}
+
+void FinalizePlatform() {
+ fflush(0);
+}
+
+uptr GetTlsSize() {
+ return 0;
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+ *stk_addr = 0;
+ *stk_size = 0;
+ *tls_addr = 0;
+ *tls_size = 0;
+}
+
+} // namespace __tsan
+
+#endif // #ifdef __APPLE__
diff --git a/lib/tsan/rtl/tsan_printf.cc b/lib/tsan/rtl/tsan_printf.cc
new file mode 100644
index 0000000..6f41440
--- /dev/null
+++ b/lib/tsan/rtl/tsan_printf.cc
@@ -0,0 +1,39 @@
+//===-- tsan_printf.cc ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_defs.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+
+#include <stdarg.h> // va_list
+
+namespace __sanitizer {
+int VSNPrintf(char *buff, int buff_length, const char *format, va_list args);
+} // namespace __sanitizer
+
+namespace __tsan {
+
+void TsanPrintf(const char *format, ...) {
+ ScopedInRtl in_rtl;
+ const uptr kMaxLen = 16 * 1024;
+ InternalScopedBuf<char> buffer(kMaxLen);
+ va_list args;
+ va_start(args, format);
+ uptr len = VSNPrintf(buffer, buffer.Size(), format, args);
+ va_end(args);
+ internal_write(CTX() ? flags()->log_fileno : 2,
+ buffer, len < buffer.Size() ? len : buffer.Size() - 1);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
new file mode 100644
index 0000000..c841a98
--- /dev/null
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -0,0 +1,167 @@
+//===-- tsan_report.cc ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_report.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+ReportDesc::ReportDesc()
+ : stacks(MBlockReportStack)
+ , mops(MBlockReportMop)
+ , locs(MBlockReportLoc)
+ , mutexes(MBlockReportMutex)
+ , threads(MBlockReportThread) {
+}
+
+ReportDesc::~ReportDesc() {
+}
+
+#ifndef TSAN_GO
+
+static void PrintHeader(ReportType typ) {
+ TsanPrintf("WARNING: ThreadSanitizer: ");
+
+ if (typ == ReportTypeRace)
+ TsanPrintf("data race");
+ else if (typ == ReportTypeUseAfterFree)
+ TsanPrintf("heap-use-after-free");
+ else if (typ == ReportTypeThreadLeak)
+ TsanPrintf("thread leak");
+ else if (typ == ReportTypeMutexDestroyLocked)
+ TsanPrintf("destroy of a locked mutex");
+ else if (typ == ReportTypeSignalUnsafe)
+ TsanPrintf("signal-unsafe call inside of a signal");
+ else if (typ == ReportTypeErrnoInSignal)
+ TsanPrintf("signal handler spoils errno");
+
+ TsanPrintf(" (pid=%d)\n", GetPid());
+}
+
+static void PrintStack(const ReportStack *ent) {
+ for (int i = 0; ent; ent = ent->next, i++) {
+ TsanPrintf(" #%d %s %s:%d", i, ent->func, ent->file, ent->line);
+ if (ent->col)
+ TsanPrintf(":%d", ent->col);
+ if (ent->module && ent->offset)
+ TsanPrintf(" (%s+%p)\n", ent->module, (void*)ent->offset);
+ else
+ TsanPrintf(" (%p)\n", (void*)ent->pc);
+ }
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ TsanPrintf(" %s of size %d at %p",
+ (first ? (mop->write ? "Write" : "Read")
+ : (mop->write ? "Previous write" : "Previous read")),
+ mop->size, (void*)mop->addr);
+ if (mop->tid == 0)
+ TsanPrintf(" by main thread:\n");
+ else
+ TsanPrintf(" by thread %d:\n", mop->tid);
+ PrintStack(mop->stack);
+}
+
+static void PrintLocation(const ReportLocation *loc) {
+ if (loc->type == ReportLocationGlobal) {
+ TsanPrintf(" Location is global '%s' of size %zu at %zx %s:%d\n",
+ loc->name, loc->size, loc->addr, loc->file, loc->line);
+ } else if (loc->type == ReportLocationHeap) {
+ TsanPrintf(" Location is heap of size %zu at %zx allocated "
+ "by thread %d:\n", loc->size, loc->addr, loc->tid);
+ PrintStack(loc->stack);
+ } else if (loc->type == ReportLocationStack) {
+ TsanPrintf(" Location is stack of thread %d:\n", loc->tid);
+ }
+}
+
+static void PrintMutex(const ReportMutex *rm) {
+ if (rm->stack == 0)
+ return;
+ TsanPrintf(" Mutex %d created at:\n", rm->id);
+ PrintStack(rm->stack);
+}
+
+static void PrintThread(const ReportThread *rt) {
+ if (rt->id == 0) // Little sense in describing the main thread.
+ return;
+ TsanPrintf(" Thread %d", rt->id);
+ if (rt->name)
+ TsanPrintf(" '%s'", rt->name);
+ TsanPrintf(" (%s)", rt->running ? "running" : "finished");
+ if (rt->stack)
+ TsanPrintf(" created at:");
+ TsanPrintf("\n");
+ PrintStack(rt->stack);
+}
+
+void PrintReport(const ReportDesc *rep) {
+ TsanPrintf("==================\n");
+ PrintHeader(rep->typ);
+
+ for (uptr i = 0; i < rep->stacks.Size(); i++)
+ PrintStack(rep->stacks[i]);
+
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
+
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutex(rep->mutexes[i]);
+
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+
+ TsanPrintf("==================\n");
+}
+
+#else
+
+static void PrintStack(const ReportStack *ent) {
+ for (int i = 0; ent; ent = ent->next, i++) {
+ TsanPrintf(" %s()\n %s:%d +0x%zx\n",
+ ent->func, ent->file, ent->line, (void*)ent->offset);
+ }
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ TsanPrintf("%s by goroutine %d:\n",
+ (first ? (mop->write ? "Write" : "Read")
+ : (mop->write ? "Previous write" : "Previous read")),
+ mop->tid);
+ PrintStack(mop->stack);
+}
+
+static void PrintThread(const ReportThread *rt) {
+ if (rt->id == 0) // Little sense in describing the main thread.
+ return;
+ TsanPrintf("Goroutine %d (%s) created at:\n",
+ rt->id, rt->running ? "running" : "finished");
+ PrintStack(rt->stack);
+}
+
+void PrintReport(const ReportDesc *rep) {
+ TsanPrintf("==================\n");
+ TsanPrintf("WARNING: DATA RACE at %p\n", (void*)rep->mops[0]->addr);
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+ TsanPrintf("==================\n");
+}
+
+#endif
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_report.h b/lib/tsan/rtl/tsan_report.h
new file mode 100644
index 0000000..d139296
--- /dev/null
+++ b/lib/tsan/rtl/tsan_report.h
@@ -0,0 +1,102 @@
+//===-- tsan_report.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_REPORT_H
+#define TSAN_REPORT_H
+
+#include "tsan_defs.h"
+#include "tsan_vector.h"
+
+namespace __tsan {
+
+enum ReportType {
+ ReportTypeRace,
+ ReportTypeUseAfterFree,
+ ReportTypeThreadLeak,
+ ReportTypeMutexDestroyLocked,
+ ReportTypeSignalUnsafe,
+ ReportTypeErrnoInSignal,
+};
+
+struct ReportStack {
+ ReportStack *next;
+ char *module;
+ uptr offset;
+ uptr pc;
+ char *func;
+ char *file;
+ int line;
+ int col;
+};
+
+struct ReportMop {
+ int tid;
+ uptr addr;
+ int size;
+ bool write;
+ int nmutex;
+ int *mutex;
+ ReportStack *stack;
+};
+
+enum ReportLocationType {
+ ReportLocationGlobal,
+ ReportLocationHeap,
+ ReportLocationStack,
+};
+
+struct ReportLocation {
+ ReportLocationType type;
+ uptr addr;
+ uptr size;
+ int tid;
+ char *name;
+ char *file;
+ int line;
+ ReportStack *stack;
+};
+
+struct ReportThread {
+ int id;
+ bool running;
+ char *name;
+ ReportStack *stack;
+};
+
+struct ReportMutex {
+ int id;
+ ReportStack *stack;
+};
+
+class ReportDesc {
+ public:
+ ReportType typ;
+ Vector<ReportStack*> stacks;
+ Vector<ReportMop*> mops;
+ Vector<ReportLocation*> locs;
+ Vector<ReportMutex*> mutexes;
+ Vector<ReportThread*> threads;
+
+ ReportDesc();
+ ~ReportDesc();
+
+ private:
+ ReportDesc(const ReportDesc&);
+ void operator = (const ReportDesc&);
+};
+
+// Format and output the report to the console/log. No additional logic.
+void PrintReport(const ReportDesc *rep);
+
+} // namespace __tsan
+
+#endif // TSAN_REPORT_H
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
new file mode 100644
index 0000000..0ceb26c
--- /dev/null
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -0,0 +1,534 @@
+//===-- tsan_rtl.cc -------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main file (entry points) for the TSan run-time.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_defs.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_suppressions.h"
+
+volatile int __tsan_resumed = 0;
+
+extern "C" void __tsan_resume() {
+ __tsan_resumed = 1;
+}
+
+namespace __tsan {
+
+#ifndef TSAN_GO
+THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
+#endif
+static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+
+static Context *ctx;
+Context *CTX() {
+ return ctx;
+}
+
+Context::Context()
+ : initialized()
+ , report_mtx(MutexTypeReport, StatMtxReport)
+ , nreported()
+ , nmissed_expected()
+ , thread_mtx(MutexTypeThreads, StatMtxThreads)
+ , racy_stacks(MBlockRacyStacks)
+ , racy_addresses(MBlockRacyAddresses) {
+}
+
+// The objects are allocated in TLS, so one may rely on zero-initialization.
+ThreadState::ThreadState(Context *ctx, int tid, u64 epoch,
+ uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size)
+ : fast_state(tid, epoch)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // , fast_ignore_reads()
+ // , fast_ignore_writes()
+ // , in_rtl()
+ , shadow_stack_pos(&shadow_stack[0])
+ , tid(tid)
+ , stk_addr(stk_addr)
+ , stk_size(stk_size)
+ , tls_addr(tls_addr)
+ , tls_size(tls_size) {
+}
+
+ThreadContext::ThreadContext(int tid)
+ : tid(tid)
+ , unique_id()
+ , user_id()
+ , thr()
+ , status(ThreadStatusInvalid)
+ , detached()
+ , reuse_count()
+ , epoch0()
+ , epoch1()
+ , dead_info()
+ , dead_next() {
+}
+
+static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
+ uptr shadow = GetShadowMemoryConsumption();
+
+ int nthread = 0;
+ int nlivethread = 0;
+ uptr threadmem = 0;
+ {
+ Lock l(&ctx->thread_mtx);
+ for (unsigned i = 0; i < kMaxTid; i++) {
+ ThreadContext *tctx = ctx->threads[i];
+ if (tctx == 0)
+ continue;
+ nthread += 1;
+ threadmem += sizeof(ThreadContext);
+ if (tctx->status != ThreadStatusRunning)
+ continue;
+ nlivethread += 1;
+ threadmem += sizeof(ThreadState);
+ }
+ }
+
+ uptr nsync = 0;
+ uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync);
+
+ internal_snprintf(buf, buf_size, "%d: shadow=%zuMB"
+ " thread=%zuMB(total=%d/live=%d)"
+ " sync=%zuMB(cnt=%zu)\n",
+ num,
+ shadow >> 20,
+ threadmem >> 20, nthread, nlivethread,
+ syncmem >> 20, nsync);
+}
+
+static void MemoryProfileThread(void *arg) {
+ ScopedInRtl in_rtl;
+ fd_t fd = (fd_t)(uptr)arg;
+ for (int i = 0; ; i++) {
+ InternalScopedBuf<char> buf(4096);
+ WriteMemoryProfile(buf.Ptr(), buf.Size(), i);
+ internal_write(fd, buf.Ptr(), internal_strlen(buf.Ptr()));
+ SleepForSeconds(1);
+ }
+}
+
+static void InitializeMemoryProfile() {
+ if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0)
+ return;
+ InternalScopedBuf<char> filename(4096);
+ internal_snprintf(filename.Ptr(), filename.Size(), "%s.%d",
+ flags()->profile_memory, GetPid());
+ fd_t fd = internal_open(filename.Ptr(), true);
+ if (fd == kInvalidFd) {
+ TsanPrintf("Failed to open memory profile file '%s'\n", &filename[0]);
+ Die();
+ }
+ internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd);
+}
+
+static void MemoryFlushThread(void *arg) {
+ ScopedInRtl in_rtl;
+ for (int i = 0; ; i++) {
+ SleepForMillis(flags()->flush_memory_ms);
+ FlushShadowMemory();
+ }
+}
+
+static void InitializeMemoryFlush() {
+ if (flags()->flush_memory_ms == 0)
+ return;
+ if (flags()->flush_memory_ms < 100)
+ flags()->flush_memory_ms = 100;
+ internal_start_thread(&MemoryFlushThread, 0);
+}
+
+void Initialize(ThreadState *thr) {
+ // Thread safe because done before all threads exist.
+ static bool is_initialized = false;
+ if (is_initialized)
+ return;
+ is_initialized = true;
+ ScopedInRtl in_rtl;
+ InitializeInterceptors();
+ const char *env = InitializePlatform();
+ InitializeMutex();
+ InitializeDynamicAnnotations();
+ ctx = new(ctx_placeholder) Context;
+ InitializeShadowMemory();
+ ctx->dead_list_size = 0;
+ ctx->dead_list_head = 0;
+ ctx->dead_list_tail = 0;
+ InitializeFlags(&ctx->flags, env);
+ InitializeSuppressions();
+ InitializeMemoryProfile();
+ InitializeMemoryFlush();
+
+ if (ctx->flags.verbosity)
+ TsanPrintf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ GetPid());
+
+ // Initialize thread 0.
+ ctx->thread_seq = 0;
+ int tid = ThreadCreate(thr, 0, 0, true);
+ CHECK_EQ(tid, 0);
+ ThreadStart(thr, tid);
+ CHECK_EQ(thr->in_rtl, 1);
+ ctx->initialized = true;
+
+ if (flags()->stop_on_start) {
+ TsanPrintf("ThreadSanitizer is suspended at startup (pid %d)."
+ " Call __tsan_resume().\n",
+ GetPid());
+ while (__tsan_resumed == 0);
+ }
+}
+
+int Finalize(ThreadState *thr) {
+ ScopedInRtl in_rtl;
+ Context *ctx = __tsan::ctx;
+ bool failed = false;
+
+ ThreadFinalize(thr);
+
+ if (ctx->nreported) {
+ failed = true;
+ TsanPrintf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
+ }
+
+ if (ctx->nmissed_expected) {
+ failed = true;
+ TsanPrintf("ThreadSanitizer: missed %d expected races\n",
+ ctx->nmissed_expected);
+ }
+
+ StatOutput(ctx->stat);
+ return failed ? flags()->exitcode : 0;
+}
+
+void TraceSwitch(ThreadState *thr) {
+ thr->nomalloc++;
+ ScopedInRtl in_rtl;
+ Lock l(&thr->trace.mtx);
+ unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts;
+ TraceHeader *hdr = &thr->trace.headers[trace];
+ hdr->epoch0 = thr->fast_state.epoch();
+ hdr->stack0.ObtainCurrent(thr, 0);
+ thr->nomalloc--;
+}
+
+#ifndef TSAN_GO
+extern "C" void __tsan_trace_switch() {
+ TraceSwitch(cur_thread());
+}
+
+extern "C" void __tsan_report_race() {
+ ReportRace(cur_thread());
+}
+#endif
+
+ALWAYS_INLINE
+static Shadow LoadShadow(u64 *p) {
+ u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
+ return Shadow(raw);
+}
+
+ALWAYS_INLINE
+static void StoreShadow(u64 *sp, u64 s) {
+ atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
+}
+
+ALWAYS_INLINE
+static void StoreIfNotYetStored(u64 *sp, u64 *s) {
+ StoreShadow(sp, *s);
+ *s = 0;
+}
+
+static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
+ Shadow cur, Shadow old) {
+ thr->racy_state[0] = cur.raw();
+ thr->racy_state[1] = old.raw();
+ thr->racy_shadow_addr = shadow_mem;
+#ifndef TSAN_GO
+ HACKY_CALL(__tsan_report_race);
+#else
+ ReportRace(thr);
+#endif
+}
+
+static inline bool BothReads(Shadow s, int kAccessIsWrite) {
+ return !kAccessIsWrite && !s.is_write();
+}
+
+static inline bool OldIsRWStronger(Shadow old, int kAccessIsWrite) {
+ return old.is_write() || !kAccessIsWrite;
+}
+
+static inline bool OldIsRWWeaker(Shadow old, int kAccessIsWrite) {
+ return !old.is_write() || kAccessIsWrite;
+}
+
+static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
+ return old.epoch() >= thr->fast_synch_epoch;
+}
+
+static inline bool HappensBefore(Shadow old, ThreadState *thr) {
+ return thr->clock.get(old.tid()) >= old.epoch();
+}
+
+ALWAYS_INLINE
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state,
+ u64 *shadow_mem, Shadow cur) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+
+ // This potentially can live in an MMX/SSE scratch register.
+ // The required intrinsics are:
+ // __m128i _mm_move_epi64(__m128i*);
+ // _mm_storel_epi64(u64*, __m128i);
+ u64 store_word = cur.raw();
+
+ // scan all the shadow values and dispatch to 4 categories:
+ // same, replace, candidate and race (see comments below).
+ // we consider only 3 cases regarding access sizes:
+ // equal, intersect and not intersect. initially I considered
+ // larger and smaller as well, it allowed to replace some
+ // 'candidates' with 'same' or 'replace', but I think
+ // it's just not worth it (performance- and complexity-wise).
+
+ Shadow old(0);
+ if (kShadowCnt == 1) {
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ } else if (kShadowCnt == 2) {
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 1;
+#include "tsan_update_shadow_word_inl.h"
+ } else if (kShadowCnt == 4) {
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 1;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 2;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 3;
+#include "tsan_update_shadow_word_inl.h"
+ } else if (kShadowCnt == 8) {
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 1;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 2;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 3;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 4;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 5;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 6;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 7;
+#include "tsan_update_shadow_word_inl.h"
+ } else {
+ CHECK(false);
+ }
+
+ // we did not find any races and had already stored
+ // the current access info, so we are done
+ if (LIKELY(store_word == 0))
+ return;
+ // choose a random candidate slot and replace it
+ StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
+ StatInc(thr, StatShadowReplace);
+ return;
+ RACE:
+ HandleRace(thr, shadow_mem, cur, old);
+ return;
+}
+
+ALWAYS_INLINE
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite) {
+ u64 *shadow_mem = (u64*)MemToShadow(addr);
+ DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d"
+ " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
+ (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
+ (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
+ (uptr)shadow_mem[0], (uptr)shadow_mem[1],
+ (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
+#if TSAN_DEBUG
+ if (!IsAppMem(addr)) {
+ TsanPrintf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsShadowMem((uptr)shadow_mem)) {
+ TsanPrintf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem((uptr)shadow_mem));
+ }
+#endif
+
+ FastState fast_state = thr->fast_state;
+ if (fast_state.GetIgnoreBit())
+ return;
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ Shadow cur(fast_state);
+ cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
+ cur.SetWrite(kAccessIsWrite);
+
+ // We must not store to the trace if we do not store to the shadow.
+ // That is, this call must be moved somewhere below.
+ TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
+
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, fast_state,
+ shadow_mem, cur);
+}
+
+static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ u64 val) {
+ if (size == 0)
+ return;
+ // FIXME: fix me.
+ uptr offset = addr % kShadowCell;
+ if (offset) {
+ offset = kShadowCell - offset;
+ if (size <= offset)
+ return;
+ addr += offset;
+ size -= offset;
+ }
+ CHECK_EQ(addr % 8, 0);
+ CHECK(IsAppMem(addr));
+ CHECK(IsAppMem(addr + size - 1));
+ (void)thr;
+ (void)pc;
+ // Some programs mmap like hundreds of GBs but actually used a small part.
+ // So, it's better to report a false positive on the memory
+ // then to hang here senselessly.
+ const uptr kMaxResetSize = 1024*1024*1024;
+ if (size > kMaxResetSize)
+ size = kMaxResetSize;
+ size = (size + 7) & ~7;
+ u64 *p = (u64*)MemToShadow(addr);
+ CHECK(IsShadowMem((uptr)p));
+ CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
+ // FIXME: may overwrite a part outside the region
+ for (uptr i = 0; i < size * kShadowCnt / kShadowCell; i++)
+ p[i] = val;
+}
+
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ MemoryRangeSet(thr, pc, addr, size, 0);
+}
+
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ MemoryAccessRange(thr, pc, addr, size, true);
+ Shadow s(thr->fast_state);
+ s.MarkAsFreed();
+ s.SetWrite(true);
+ s.SetAddr0AndSizeLog(0, 3);
+ MemoryRangeSet(thr, pc, addr, size, s.raw());
+}
+
+void FuncEntry(ThreadState *thr, uptr pc) {
+ DCHECK_EQ(thr->in_rtl, 0);
+ StatInc(thr, StatFuncEnter);
+ DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc);
+
+ // Shadow stack maintenance can be replaced with
+ // stack unwinding during trace switch (which presumably must be faster).
+ DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
+#ifndef TSAN_GO
+ DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end) {
+ const int sz = thr->shadow_stack_end - thr->shadow_stack;
+ const int newsz = 2 * sz;
+ uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
+ newsz * sizeof(uptr));
+ internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = newstack;
+ thr->shadow_stack_pos = newstack + sz;
+ thr->shadow_stack_end = newstack + newsz;
+ }
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+}
+
+void FuncExit(ThreadState *thr) {
+ DCHECK_EQ(thr->in_rtl, 0);
+ StatInc(thr, StatFuncExit);
+ DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0);
+
+ DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
+#ifndef TSAN_GO
+ DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
+#endif
+ thr->shadow_stack_pos--;
+}
+
+void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
+ DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
+ thr->ignore_reads_and_writes += begin ? 1 : -1;
+ CHECK_GE(thr->ignore_reads_and_writes, 0);
+ if (thr->ignore_reads_and_writes)
+ thr->fast_state.SetIgnoreBit();
+ else
+ thr->fast_state.ClearIgnoreBit();
+}
+
+bool MD5Hash::operator==(const MD5Hash &other) const {
+ return hash[0] == other.hash[0] && hash[1] == other.hash[1];
+}
+
+#if TSAN_DEBUG
+void build_consistency_debug() {}
+#else
+void build_consistency_release() {}
+#endif
+
+#if TSAN_COLLECT_STATS
+void build_consistency_stats() {}
+#else
+void build_consistency_nostats() {}
+#endif
+
+#if TSAN_SHADOW_COUNT == 1
+void build_consistency_shadow1() {}
+#elif TSAN_SHADOW_COUNT == 2
+void build_consistency_shadow2() {}
+#elif TSAN_SHADOW_COUNT == 4
+void build_consistency_shadow4() {}
+#else
+void build_consistency_shadow8() {}
+#endif
+
+} // namespace __tsan
+
+#ifndef TSAN_GO
+// Must be included in this file to make sure everything is inlined.
+#include "tsan_interface_inl.h"
+#endif
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
new file mode 100644
index 0000000..c559cb2
--- /dev/null
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -0,0 +1,491 @@
+//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main internal TSan header file.
+//
+// Ground rules:
+// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
+// function-scope locals)
+// - All functions/classes/etc reside in namespace __tsan, except for those
+// declared in tsan_interface.h.
+// - Platform-specific files should be used instead of ifdefs (*).
+// - No system headers included in header files (*).
+// - Platform specific headres included only into platform-specific files (*).
+//
+// (*) Except when inlining is critical for performance.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_RTL_H
+#define TSAN_RTL_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_clock.h"
+#include "tsan_defs.h"
+#include "tsan_flags.h"
+#include "tsan_sync.h"
+#include "tsan_trace.h"
+#include "tsan_vector.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+void TsanPrintf(const char *format, ...);
+
+// FastState (from most significant bit):
+// unused : 1
+// tid : kTidBits
+// epoch : kClkBits
+// unused : -
+// ignore_bit : 1
+class FastState {
+ public:
+ FastState(u64 tid, u64 epoch) {
+ x_ = tid << kTidShift;
+ x_ |= epoch << kClkShift;
+ DCHECK(tid == this->tid());
+ DCHECK(epoch == this->epoch());
+ }
+
+ explicit FastState(u64 x)
+ : x_(x) {
+ }
+
+ u64 tid() const {
+ u64 res = x_ >> kTidShift;
+ return res;
+ }
+
+ u64 epoch() const {
+ u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
+ return res;
+ }
+
+ void IncrementEpoch() {
+ u64 old_epoch = epoch();
+ x_ += 1 << kClkShift;
+ DCHECK_EQ(old_epoch + 1, epoch());
+ (void)old_epoch;
+ }
+
+ void SetIgnoreBit() { x_ |= kIgnoreBit; }
+ void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
+ bool GetIgnoreBit() const { return x_ & kIgnoreBit; }
+
+ private:
+ friend class Shadow;
+ static const int kTidShift = 64 - kTidBits - 1;
+ static const int kClkShift = kTidShift - kClkBits;
+ static const u64 kIgnoreBit = 1ull;
+ static const u64 kFreedBit = 1ull << 63;
+ u64 x_;
+};
+
+// Shadow (from most significant bit):
+// freed : 1
+// tid : kTidBits
+// epoch : kClkBits
+// is_write : 1
+// size_log : 2
+// addr0 : 3
+class Shadow : public FastState {
+ public:
+ explicit Shadow(u64 x) : FastState(x) { }
+
+ explicit Shadow(const FastState &s) : FastState(s.x_) { }
+
+ void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
+ DCHECK_EQ(x_ & 31, 0);
+ DCHECK_LE(addr0, 7);
+ DCHECK_LE(kAccessSizeLog, 3);
+ x_ |= (kAccessSizeLog << 3) | addr0;
+ DCHECK_EQ(kAccessSizeLog, size_log());
+ DCHECK_EQ(addr0, this->addr0());
+ }
+
+ void SetWrite(unsigned kAccessIsWrite) {
+ DCHECK_EQ(x_ & 32, 0);
+ if (kAccessIsWrite)
+ x_ |= 32;
+ DCHECK_EQ(kAccessIsWrite, is_write());
+ }
+
+ bool IsZero() const { return x_ == 0; }
+ u64 raw() const { return x_; }
+
+ static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
+ u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
+ DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid());
+ return shifted_xor == 0;
+ }
+
+ static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
+ u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
+ return masked_xor == 0;
+ }
+
+ static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
+ unsigned kS2AccessSize) {
+ bool res = false;
+ u64 diff = s1.addr0() - s2.addr0();
+ if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
+ // if (s1.addr0() + size1) > s2.addr0()) return true;
+ if (s1.size() > -diff) res = true;
+ } else {
+ // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
+ if (kS2AccessSize > diff) res = true;
+ }
+ DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
+ DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
+ return res;
+ }
+
+ // The idea behind the offset is as follows.
+ // Consider that we have 8 bool's contained within a single 8-byte block
+ // (mapped to a single shadow "cell"). Now consider that we write to the bools
+ // from a single thread (which we consider the common case).
+ // W/o offsetting each access will have to scan 4 shadow values at average
+ // to find the corresponding shadow value for the bool.
+ // With offsetting we start scanning shadow with the offset so that
+ // each access hits necessary shadow straight off (at least in an expected
+ // optimistic case).
+ // This logic works seamlessly for any layout of user data. For example,
+ // if user data is {int, short, char, char}, then accesses to the int are
+ // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
+ // from a single thread won't need to scan all 8 shadow values.
+ unsigned ComputeSearchOffset() {
+ return x_ & 7;
+ }
+ u64 addr0() const { return x_ & 7; }
+ u64 size() const { return 1ull << size_log(); }
+ bool is_write() const { return x_ & 32; }
+
+ // The idea behind the freed bit is as follows.
+ // When the memory is freed (or otherwise unaccessible) we write to the shadow
+ // values with tid/epoch related to the free and the freed bit set.
+ // During memory accesses processing the freed bit is considered
+ // as msb of tid. So any access races with shadow with freed bit set
+ // (it is as if write from a thread with which we never synchronized before).
+ // This allows us to detect accesses to freed memory w/o additional
+ // overheads in memory access processing and at the same time restore
+ // tid/epoch of free.
+ void MarkAsFreed() {
+ x_ |= kFreedBit;
+ }
+
+ bool GetFreedAndReset() {
+ bool res = x_ & kFreedBit;
+ x_ &= ~kFreedBit;
+ return res;
+ }
+
+ private:
+ u64 size_log() const { return (x_ >> 3) & 3; }
+
+ static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
+ if (s1.addr0() == s2.addr0()) return true;
+ if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
+ return true;
+ if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
+ return true;
+ return false;
+ }
+};
+
+// Freed memory.
+// As if 8-byte write by thread 0xff..f at epoch 0xff..f, races with everything.
+const u64 kShadowFreed = 0xfffffffffffffff8ull;
+
+struct SignalContext;
+
+// This struct is stored in TLS.
+struct ThreadState {
+ FastState fast_state;
+ // Synch epoch represents the threads's epoch before the last synchronization
+ // action. It allows to reduce number of shadow state updates.
+ // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
+ // if we are processing write to X from the same thread at epoch=200,
+ // we do nothing, because both writes happen in the same 'synch epoch'.
+ // That is, if another memory access does not race with the former write,
+ // it does not race with the latter as well.
+ // QUESTION: can we can squeeze this into ThreadState::Fast?
+ // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
+ // taken by epoch between synchs.
+ // This way we can save one load from tls.
+ u64 fast_synch_epoch;
+ // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
+ // We do not distinguish beteween ignoring reads and writes
+ // for better performance.
+ int ignore_reads_and_writes;
+ uptr *shadow_stack_pos;
+ u64 *racy_shadow_addr;
+ u64 racy_state[2];
+ Trace trace;
+#ifndef TSAN_GO
+ // C/C++ uses embed shadow stack of fixed size.
+ uptr shadow_stack[kShadowStackSize];
+#else
+ // Go uses satellite shadow stack with dynamic size.
+ uptr *shadow_stack;
+ uptr *shadow_stack_end;
+#endif
+ ThreadClock clock;
+ u64 stat[StatCnt];
+ const int tid;
+ int in_rtl;
+ bool is_alive;
+ const uptr stk_addr;
+ const uptr stk_size;
+ const uptr tls_addr;
+ const uptr tls_size;
+
+ DeadlockDetector deadlock_detector;
+
+ bool in_signal_handler;
+ SignalContext *signal_ctx;
+
+ // Set in regions of runtime that must be signal-safe and fork-safe.
+ // If set, malloc must not be called.
+ int nomalloc;
+
+ explicit ThreadState(Context *ctx, int tid, u64 epoch,
+ uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size);
+};
+
+Context *CTX();
+
+#ifndef TSAN_GO
+extern THREADLOCAL char cur_thread_placeholder[];
+INLINE ThreadState *cur_thread() {
+ return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
+}
+#endif
+
+enum ThreadStatus {
+ ThreadStatusInvalid, // Non-existent thread, data is invalid.
+ ThreadStatusCreated, // Created but not yet running.
+ ThreadStatusRunning, // The thread is currently running.
+ ThreadStatusFinished, // Joinable thread is finished but not yet joined.
+ ThreadStatusDead, // Joined, but some info (trace) is still alive.
+};
+
+// An info about a thread that is hold for some time after its termination.
+struct ThreadDeadInfo {
+ Trace trace;
+};
+
+struct ThreadContext {
+ const int tid;
+ int unique_id; // Non-rolling thread id.
+ uptr user_id; // Some opaque user thread id (e.g. pthread_t).
+ ThreadState *thr;
+ ThreadStatus status;
+ bool detached;
+ int reuse_count;
+ SyncClock sync;
+ // Epoch at which the thread had started.
+ // If we see an event from the thread stamped by an older epoch,
+ // the event is from a dead thread that shared tid with this thread.
+ u64 epoch0;
+ u64 epoch1;
+ StackTrace creation_stack;
+ ThreadDeadInfo *dead_info;
+ ThreadContext *dead_next; // In dead thread list.
+
+ explicit ThreadContext(int tid);
+};
+
+struct RacyStacks {
+ MD5Hash hash[2];
+ bool operator==(const RacyStacks &other) const {
+ if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
+ return true;
+ if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
+ return true;
+ return false;
+ }
+};
+
+struct RacyAddress {
+ uptr addr_min;
+ uptr addr_max;
+};
+
+struct Context {
+ Context();
+
+ bool initialized;
+
+ SyncTab synctab;
+
+ Mutex report_mtx;
+ int nreported;
+ int nmissed_expected;
+
+ Mutex thread_mtx;
+ unsigned thread_seq;
+ unsigned unique_thread_seq;
+ int alive_threads;
+ int max_alive_threads;
+ ThreadContext *threads[kMaxTid];
+ int dead_list_size;
+ ThreadContext* dead_list_head;
+ ThreadContext* dead_list_tail;
+
+ Vector<RacyStacks> racy_stacks;
+ Vector<RacyAddress> racy_addresses;
+
+ Flags flags;
+
+ u64 stat[StatCnt];
+ u64 int_alloc_cnt[MBlockTypeCount];
+ u64 int_alloc_siz[MBlockTypeCount];
+};
+
+class ScopedInRtl {
+ public:
+ ScopedInRtl();
+ ~ScopedInRtl();
+ private:
+ ThreadState*thr_;
+ int in_rtl_;
+ int errno_;
+};
+
+class ScopedReport {
+ public:
+ explicit ScopedReport(ReportType typ);
+ ~ScopedReport();
+
+ void AddStack(const StackTrace *stack);
+ void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack);
+ void AddThread(const ThreadContext *tctx);
+ void AddMutex(const SyncVar *s);
+ void AddLocation(uptr addr, uptr size);
+
+ const ReportDesc *GetReport() const;
+
+ private:
+ Context *ctx_;
+ ReportDesc *rep_;
+
+ ScopedReport(const ScopedReport&);
+ void operator = (const ScopedReport&);
+};
+
+void StatAggregate(u64 *dst, u64 *src);
+void StatOutput(u64 *stat);
+void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
+ if (kCollectStats)
+ thr->stat[typ] += n;
+}
+
+void InitializeShadowMemory();
+void InitializeInterceptors();
+void InitializeDynamicAnnotations();
+
+void ReportRace(ThreadState *thr);
+bool OutputReport(const ScopedReport &srep,
+ const ReportStack *suppress_stack = 0);
+bool IsExpectedReport(uptr addr, uptr size);
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
+# define DPrintf TsanPrintf
+#else
+# define DPrintf(...)
+#endif
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
+# define DPrintf2 TsanPrintf
+#else
+# define DPrintf2(...)
+#endif
+
+void Initialize(ThreadState *thr);
+int Finalize(ThreadState *thr);
+
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite);
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state,
+ u64 *shadow_mem, Shadow cur);
+void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr);
+void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr);
+void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr);
+void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr);
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, bool is_write);
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void IgnoreCtl(ThreadState *thr, bool write, bool begin);
+
+void FuncEntry(ThreadState *thr, uptr pc);
+void FuncExit(ThreadState *thr);
+
+int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+void ThreadStart(ThreadState *thr, int tid);
+void ThreadFinish(ThreadState *thr);
+int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
+void ThreadJoin(ThreadState *thr, uptr pc, int tid);
+void ThreadDetach(ThreadState *thr, uptr pc, int tid);
+void ThreadFinalize(ThreadState *thr);
+void ThreadFinalizerGoroutine(ThreadState *thr);
+
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive);
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr);
+void MutexUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr);
+void Release(ThreadState *thr, uptr pc, uptr addr);
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
+
+// The hacky call uses custom calling convention and an assembly thunk.
+// It is considerably faster that a normal call for the caller
+// if it is not executed (it is intended for slow paths from hot functions).
+// The trick is that the call preserves all registers and the compiler
+// does not treat it as a call.
+// If it does not work for you, use normal call.
+#if TSAN_DEBUG == 0
+// The caller may not create the stack frame for itself at all,
+// so we create a reserve stack frame for it (1024b must be enough).
+#define HACKY_CALL(f) \
+ __asm__ __volatile__("sub $0x400, %%rsp;" \
+ "call " #f "_thunk;" \
+ "add $0x400, %%rsp;" ::: "memory");
+#else
+#define HACKY_CALL(f) f()
+#endif
+
+void TraceSwitch(ThreadState *thr);
+
+extern "C" void __tsan_trace_switch();
+void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch,
+ EventType typ, uptr addr) {
+ StatInc(thr, StatEvents);
+ if (UNLIKELY((epoch % kTracePartSize) == 0)) {
+#ifndef TSAN_GO
+ HACKY_CALL(__tsan_trace_switch);
+#else
+ TraceSwitch(thr);
+#endif
+ }
+ Event *evp = &thr->trace.events[epoch % kTraceSize];
+ Event ev = (u64)addr | ((u64)typ << 61);
+ *evp = ev;
+}
+
+} // namespace __tsan
+
+#endif // TSAN_RTL_H
diff --git a/lib/tsan/rtl/tsan_rtl_amd64.S b/lib/tsan/rtl/tsan_rtl_amd64.S
new file mode 100644
index 0000000..2028ec5
--- /dev/null
+++ b/lib/tsan/rtl/tsan_rtl_amd64.S
@@ -0,0 +1,71 @@
+.section .text
+
+.globl __tsan_trace_switch_thunk
+__tsan_trace_switch_thunk:
+ # Save scratch registers.
+ push %rax
+ push %rcx
+ push %rdx
+ push %rsi
+ push %rdi
+ push %r8
+ push %r9
+ push %r10
+ push %r11
+ # Align stack frame.
+ push %rbx # non-scratch
+ mov %rsp, %rbx # save current rsp
+ shr $4, %rsp # clear 4 lsb, align to 16
+ shl $4, %rsp
+
+ call __tsan_trace_switch
+
+ # Unalign stack frame back.
+ mov %rbx, %rsp # restore the original rsp
+ pop %rbx
+ # Restore scratch registers.
+ pop %r11
+ pop %r10
+ pop %r9
+ pop %r8
+ pop %rdi
+ pop %rsi
+ pop %rdx
+ pop %rcx
+ pop %rax
+ ret
+
+.globl __tsan_report_race_thunk
+__tsan_report_race_thunk:
+ # Save scratch registers.
+ push %rax
+ push %rcx
+ push %rdx
+ push %rsi
+ push %rdi
+ push %r8
+ push %r9
+ push %r10
+ push %r11
+ # Align stack frame.
+ push %rbx # non-scratch
+ mov %rsp, %rbx # save current rsp
+ shr $4, %rsp # clear 4 lsb, align to 16
+ shl $4, %rsp
+
+ call __tsan_report_race
+
+ # Unalign stack frame back.
+ mov %rbx, %rsp # restore the original rsp
+ pop %rbx
+ # Restore scratch registers.
+ pop %r11
+ pop %r10
+ pop %r9
+ pop %r8
+ pop %rdi
+ pop %rsi
+ pop %rdx
+ pop %rcx
+ pop %rax
+ ret
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
new file mode 100644
index 0000000..882def8
--- /dev/null
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -0,0 +1,220 @@
+//===-- tsan_rtl_mutex.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_rtl.h"
+#include "tsan_sync.h"
+#include "tsan_report.h"
+#include "tsan_symbolize.h"
+
+namespace __tsan {
+
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
+ bool rw, bool recursive) {
+ Context *ctx = CTX();
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexCreate);
+ MemoryWrite1Byte(thr, pc, addr);
+ SyncVar *s = ctx->synctab.GetAndLock(thr, pc, addr, true);
+ s->is_rw = rw;
+ s->is_recursive = recursive;
+ s->mtx.Unlock();
+}
+
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
+ Context *ctx = CTX();
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexDestroy);
+ MemoryWrite1Byte(thr, pc, addr);
+ SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
+ if (s == 0)
+ return;
+ if (s->owner_tid != SyncVar::kInvalidTid && !s->is_broken) {
+ s->is_broken = true;
+ ScopedReport rep(ReportTypeMutexDestroyLocked);
+ rep.AddMutex(s);
+ rep.AddLocation(s->addr, 1);
+ OutputReport(rep);
+ }
+ DestroyAndFree(s);
+}
+
+void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
+ MemoryRead1Byte(thr, pc, addr);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeLock, addr);
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
+ if (s->owner_tid == SyncVar::kInvalidTid) {
+ CHECK_EQ(s->recursion, 0);
+ s->owner_tid = thr->tid;
+ } else if (s->owner_tid == thr->tid) {
+ CHECK_GT(s->recursion, 0);
+ } else {
+ TsanPrintf("ThreadSanitizer WARNING: double lock\n");
+ }
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexLock);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.acquire(&s->clock);
+ StatInc(thr, StatSyncAcquire);
+ thr->clock.acquire(&s->read_clock);
+ StatInc(thr, StatSyncAcquire);
+ } else if (!s->is_recursive) {
+ StatInc(thr, StatMutexRecLock);
+ }
+ s->recursion++;
+ s->mtx.Unlock();
+}
+
+void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
+ MemoryRead1Byte(thr, pc, addr);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
+ if (s->recursion == 0) {
+ if (!s->is_broken) {
+ s->is_broken = true;
+ TsanPrintf("ThreadSanitizer WARNING: unlock of unlocked mutex\n");
+ }
+ } else if (s->owner_tid != thr->tid) {
+ if (!s->is_broken) {
+ s->is_broken = true;
+ TsanPrintf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
+ }
+ } else {
+ s->recursion--;
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexUnlock);
+ s->owner_tid = SyncVar::kInvalidTid;
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&s->clock);
+ StatInc(thr, StatSyncRelease);
+ } else {
+ StatInc(thr, StatMutexRecUnlock);
+ }
+ }
+ s->mtx.Unlock();
+}
+
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexReadLock);
+ MemoryRead1Byte(thr, pc, addr);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRLock, addr);
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
+ if (s->owner_tid != SyncVar::kInvalidTid)
+ TsanPrintf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.acquire(&s->clock);
+ StatInc(thr, StatSyncAcquire);
+ s->mtx.ReadUnlock();
+}
+
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexReadUnlock);
+ MemoryRead1Byte(thr, pc, addr);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
+ if (s->owner_tid != SyncVar::kInvalidTid)
+ TsanPrintf("ThreadSanitizer WARNING: read unlock of a write "
+ "locked mutex\n");
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&s->read_clock);
+ StatInc(thr, StatSyncRelease);
+ s->mtx.Unlock();
+}
+
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
+ MemoryRead1Byte(thr, pc, addr);
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
+ if (s->owner_tid == SyncVar::kInvalidTid) {
+ // Seems to be read unlock.
+ StatInc(thr, StatMutexReadUnlock);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&s->read_clock);
+ StatInc(thr, StatSyncRelease);
+ } else if (s->owner_tid == thr->tid) {
+ // Seems to be write unlock.
+ CHECK_GT(s->recursion, 0);
+ s->recursion--;
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexUnlock);
+ s->owner_tid = SyncVar::kInvalidTid;
+ // FIXME: Refactor me, plz.
+ // The sequence of events is quite tricky and doubled in several places.
+ // First, it's a bug to increment the epoch w/o writing to the trace.
+ // Then, the acquire/release logic can be factored out as well.
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&s->clock);
+ StatInc(thr, StatSyncRelease);
+ } else {
+ StatInc(thr, StatMutexRecUnlock);
+ }
+ } else if (!s->is_broken) {
+ s->is_broken = true;
+ TsanPrintf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
+ }
+ s->mtx.Unlock();
+}
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.acquire(&s->clock);
+ StatInc(thr, StatSyncAcquire);
+ s->mtx.ReadUnlock();
+}
+
+void Release(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: Release %zx\n", thr->tid, addr);
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.release(&s->clock);
+ StatInc(thr, StatSyncRelease);
+ s->mtx.Unlock();
+}
+
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.ReleaseStore(&s->clock);
+ StatInc(thr, StatSyncRelease);
+ s->mtx.Unlock();
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
new file mode 100644
index 0000000..f66e17e
--- /dev/null
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -0,0 +1,372 @@
+//===-- tsan_rtl_report.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+
+namespace __sanitizer {
+using namespace __tsan;
+
+void CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) {
+ ScopedInRtl in_rtl;
+ TsanPrintf("FATAL: ThreadSanitizer CHECK failed: "
+ "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
+ file, line, cond, (uptr)v1, (uptr)v2);
+ Die();
+}
+
+} // namespace __sanitizer
+
+namespace __tsan {
+
+// Can be overriden by an application/test to intercept reports.
+#ifdef TSAN_EXTERNAL_HOOKS
+bool OnReport(const ReportDesc *rep, bool suppressed);
+#else
+bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
+ (void)rep;
+ return suppressed;
+}
+#endif
+
+static void StackStripMain(ReportStack *stack) {
+ ReportStack *last_frame = 0;
+ ReportStack *last_frame2 = 0;
+ const char *prefix = "__interceptor_";
+ uptr prefix_len = internal_strlen(prefix);
+ const char *path_prefix = flags()->strip_path_prefix;
+ uptr path_prefix_len = internal_strlen(path_prefix);
+ char *pos;
+ for (ReportStack *ent = stack; ent; ent = ent->next) {
+ if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
+ ent->func += prefix_len;
+ if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
+ ent->file = pos + path_prefix_len;
+ if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
+ ent->file += 2;
+ last_frame2 = last_frame;
+ last_frame = ent;
+ }
+
+ if (last_frame2 == 0)
+ return;
+ const char *last = last_frame->func;
+#ifndef TSAN_GO
+ const char *last2 = last_frame2->func;
+ // Strip frame above 'main'
+ if (last2 && 0 == internal_strcmp(last2, "main")) {
+ last_frame2->next = 0;
+ // Strip our internal thread start routine.
+ } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
+ last_frame2->next = 0;
+ // Strip global ctors init.
+ } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
+ last_frame2->next = 0;
+ // If both are 0, then we probably just failed to symbolize.
+ } else if (last || last2) {
+ // Ensure that we recovered stack completely. Trimmed stack
+ // can actually happen if we do not instrument some code,
+ // so it's only a DCHECK. However we must try hard to not miss it
+ // due to our fault.
+ TsanPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
+ }
+#else
+ if (last && 0 == internal_strcmp(last, "schedunlock"))
+ last_frame2->next = 0;
+#endif
+}
+
+static ReportStack *SymbolizeStack(const StackTrace& trace) {
+ if (trace.IsEmpty())
+ return 0;
+ ReportStack *stack = 0;
+ for (uptr si = 0; si < trace.Size(); si++) {
+ // We obtain the return address, that is, address of the next instruction,
+ // so offset it by 1 byte.
+ bool is_last = (si == trace.Size() - 1);
+ ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last);
+ CHECK_NE(ent, 0);
+ ReportStack *last = ent;
+ while (last->next) {
+ last->pc += !is_last;
+ last = last->next;
+ }
+ last->pc += !is_last;
+ last->next = stack;
+ stack = ent;
+ }
+ StackStripMain(stack);
+ return stack;
+}
+
+ScopedReport::ScopedReport(ReportType typ) {
+ ctx_ = CTX();
+ void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
+ rep_ = new(mem) ReportDesc;
+ rep_->typ = typ;
+ ctx_->report_mtx.Lock();
+}
+
+ScopedReport::~ScopedReport() {
+ ctx_->report_mtx.Unlock();
+ rep_->~ReportDesc();
+ internal_free(rep_);
+}
+
+void ScopedReport::AddStack(const StackTrace *stack) {
+ ReportStack **rs = rep_->stacks.PushBack();
+ *rs = SymbolizeStack(*stack);
+}
+
+void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
+ const StackTrace *stack) {
+ void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
+ ReportMop *mop = new(mem) ReportMop;
+ rep_->mops.PushBack(mop);
+ mop->tid = s.tid();
+ mop->addr = addr + s.addr0();
+ mop->size = s.size();
+ mop->write = s.is_write();
+ mop->nmutex = 0;
+ mop->stack = SymbolizeStack(*stack);
+}
+
+void ScopedReport::AddThread(const ThreadContext *tctx) {
+ void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
+ ReportThread *rt = new(mem) ReportThread();
+ rep_->threads.PushBack(rt);
+ rt->id = tctx->tid;
+ rt->running = (tctx->status == ThreadStatusRunning);
+ rt->stack = SymbolizeStack(tctx->creation_stack);
+}
+
+void ScopedReport::AddMutex(const SyncVar *s) {
+ void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
+ ReportMutex *rm = new(mem) ReportMutex();
+ rep_->mutexes.PushBack(rm);
+ rm->id = 42;
+ rm->stack = SymbolizeStack(s->creation_stack);
+}
+
+void ScopedReport::AddLocation(uptr addr, uptr size) {
+ ReportStack *symb = SymbolizeData(addr);
+ if (symb) {
+ void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
+ ReportLocation *loc = new(mem) ReportLocation();
+ rep_->locs.PushBack(loc);
+ loc->type = ReportLocationGlobal;
+ loc->addr = addr;
+ loc->size = size;
+ loc->tid = 0;
+ loc->name = symb->func;
+ loc->file = symb->file;
+ loc->line = symb->line;
+ loc->stack = 0;
+ internal_free(symb);
+ }
+}
+
+const ReportDesc *ScopedReport::GetReport() const {
+ return rep_;
+}
+
+static void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
+ ThreadContext *tctx = CTX()->threads[tid];
+ if (tctx == 0)
+ return;
+ Trace* trace = 0;
+ if (tctx->status == ThreadStatusRunning) {
+ CHECK(tctx->thr);
+ trace = &tctx->thr->trace;
+ } else if (tctx->status == ThreadStatusFinished
+ || tctx->status == ThreadStatusDead) {
+ if (tctx->dead_info == 0)
+ return;
+ trace = &tctx->dead_info->trace;
+ } else {
+ return;
+ }
+ Lock l(&trace->mtx);
+ const int partidx = (epoch / (kTraceSize / kTraceParts)) % kTraceParts;
+ TraceHeader* hdr = &trace->headers[partidx];
+ if (epoch < hdr->epoch0)
+ return;
+ const u64 eend = epoch % kTraceSize;
+ const u64 ebegin = eend / kTracePartSize * kTracePartSize;
+ DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
+ tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
+ InternalScopedBuf<uptr> stack(1024); // FIXME: de-hardcode 1024
+ for (uptr i = 0; i < hdr->stack0.Size(); i++) {
+ stack[i] = hdr->stack0.Get(i);
+ DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
+ }
+ uptr pos = hdr->stack0.Size();
+ for (uptr i = ebegin; i <= eend; i++) {
+ Event ev = trace->events[i];
+ EventType typ = (EventType)(ev >> 61);
+ uptr pc = (uptr)(ev & 0xffffffffffffull);
+ DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
+ if (typ == EventTypeMop) {
+ stack[pos] = pc;
+ } else if (typ == EventTypeFuncEnter) {
+ stack[pos++] = pc;
+ } else if (typ == EventTypeFuncExit) {
+ // Since we have full stacks, this should never happen.
+ DCHECK_GT(pos, 0);
+ if (pos > 0)
+ pos--;
+ }
+ for (uptr j = 0; j <= pos; j++)
+ DPrintf2(" #%zu: %zx\n", j, stack[j]);
+ }
+ if (pos == 0 && stack[0] == 0)
+ return;
+ pos++;
+ stk->Init(stack, pos);
+}
+
+static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
+ uptr addr_min, uptr addr_max) {
+ Context *ctx = CTX();
+ bool equal_stack = false;
+ RacyStacks hash = {};
+ if (flags()->suppress_equal_stacks) {
+ hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
+ if (hash == ctx->racy_stacks[i]) {
+ DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
+ equal_stack = true;
+ break;
+ }
+ }
+ }
+ bool equal_address = false;
+ RacyAddress ra0 = {addr_min, addr_max};
+ if (flags()->suppress_equal_addresses) {
+ for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
+ RacyAddress ra2 = ctx->racy_addresses[i];
+ uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
+ uptr minend = min(ra0.addr_max, ra2.addr_max);
+ if (maxbeg < minend) {
+ DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
+ equal_address = true;
+ break;
+ }
+ }
+ }
+ if (equal_stack || equal_address) {
+ if (!equal_stack)
+ ctx->racy_stacks.PushBack(hash);
+ if (!equal_address)
+ ctx->racy_addresses.PushBack(ra0);
+ return true;
+ }
+ return false;
+}
+
+static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
+ uptr addr_min, uptr addr_max) {
+ Context *ctx = CTX();
+ if (flags()->suppress_equal_stacks) {
+ RacyStacks hash;
+ hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ ctx->racy_stacks.PushBack(hash);
+ }
+ if (flags()->suppress_equal_addresses) {
+ RacyAddress ra0 = {addr_min, addr_max};
+ ctx->racy_addresses.PushBack(ra0);
+ }
+}
+
+bool OutputReport(const ScopedReport &srep, const ReportStack *suppress_stack) {
+ const ReportDesc *rep = srep.GetReport();
+ bool suppressed = IsSuppressed(rep->typ, suppress_stack);
+ suppressed = OnReport(rep, suppressed);
+ if (suppressed)
+ return false;
+ PrintReport(rep);
+ CTX()->nreported++;
+ return true;
+}
+
+void ReportRace(ThreadState *thr) {
+ ScopedInRtl in_rtl;
+
+ bool freed = false;
+ {
+ Shadow s(thr->racy_state[1]);
+ freed = s.GetFreedAndReset();
+ thr->racy_state[1] = s.raw();
+ }
+
+ uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
+ uptr addr_min = 0;
+ uptr addr_max = 0;
+ {
+ uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
+ uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
+ uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
+ uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
+ addr_min = min(a0, a1);
+ addr_max = max(e0, e1);
+ if (IsExpectedReport(addr_min, addr_max - addr_min))
+ return;
+ }
+
+ Context *ctx = CTX();
+ Lock l0(&ctx->thread_mtx);
+
+ ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
+ const uptr kMop = 2;
+ StackTrace traces[kMop];
+ for (uptr i = 0; i < kMop; i++) {
+ Shadow s(thr->racy_state[i]);
+ RestoreStack(s.tid(), s.epoch(), &traces[i]);
+ }
+
+ if (HandleRacyStacks(thr, traces, addr_min, addr_max))
+ return;
+
+ for (uptr i = 0; i < kMop; i++) {
+ Shadow s(thr->racy_state[i]);
+ rep.AddMemoryAccess(addr, s, &traces[i]);
+ }
+
+ // Ensure that we have at least something for the current thread.
+ CHECK_EQ(traces[0].IsEmpty(), false);
+
+ for (uptr i = 0; i < kMop; i++) {
+ FastState s(thr->racy_state[i]);
+ ThreadContext *tctx = ctx->threads[s.tid()];
+ if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
+ continue;
+ rep.AddThread(tctx);
+ }
+
+ if (!OutputReport(rep, rep.GetReport()->mops[0]->stack))
+ return;
+
+ AddRacyStacks(thr, traces, addr_min, addr_max);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
new file mode 100644
index 0000000..f7d5f13
--- /dev/null
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -0,0 +1,394 @@
+//===-- tsan_rtl_thread.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+
+namespace __tsan {
+
+#ifndef TSAN_GO
+const int kThreadQuarantineSize = 16;
+#else
+const int kThreadQuarantineSize = 64;
+#endif
+
+static void MaybeReportThreadLeak(ThreadContext *tctx) {
+ if (tctx->detached)
+ return;
+ if (tctx->status != ThreadStatusCreated
+ && tctx->status != ThreadStatusRunning
+ && tctx->status != ThreadStatusFinished)
+ return;
+ ScopedReport rep(ReportTypeThreadLeak);
+ rep.AddThread(tctx);
+ OutputReport(rep);
+}
+
+void ThreadFinalize(ThreadState *thr) {
+ CHECK_GT(thr->in_rtl, 0);
+ if (!flags()->report_thread_leaks)
+ return;
+ Context *ctx = CTX();
+ Lock l(&ctx->thread_mtx);
+ for (unsigned i = 0; i < kMaxTid; i++) {
+ ThreadContext *tctx = ctx->threads[i];
+ if (tctx == 0)
+ continue;
+ MaybeReportThreadLeak(tctx);
+ }
+}
+
+static void ThreadDead(ThreadState *thr, ThreadContext *tctx) {
+ Context *ctx = CTX();
+ CHECK_GT(thr->in_rtl, 0);
+ CHECK(tctx->status == ThreadStatusRunning
+ || tctx->status == ThreadStatusFinished);
+ DPrintf("#%d: ThreadDead uid=%zu\n", thr->tid, tctx->user_id);
+ tctx->status = ThreadStatusDead;
+ tctx->user_id = 0;
+ tctx->sync.Reset();
+
+ // Put to dead list.
+ tctx->dead_next = 0;
+ if (ctx->dead_list_size == 0)
+ ctx->dead_list_head = tctx;
+ else
+ ctx->dead_list_tail->dead_next = tctx;
+ ctx->dead_list_tail = tctx;
+ ctx->dead_list_size++;
+}
+
+int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
+ CHECK_GT(thr->in_rtl, 0);
+ Context *ctx = CTX();
+ Lock l(&ctx->thread_mtx);
+ StatInc(thr, StatThreadCreate);
+ int tid = -1;
+ ThreadContext *tctx = 0;
+ if (ctx->dead_list_size > kThreadQuarantineSize
+ || ctx->thread_seq >= kMaxTid) {
+ if (ctx->dead_list_size == 0) {
+ TsanPrintf("ThreadSanitizer: %d thread limit exceeded. Dying.\n",
+ kMaxTid);
+ Die();
+ }
+ StatInc(thr, StatThreadReuse);
+ tctx = ctx->dead_list_head;
+ ctx->dead_list_head = tctx->dead_next;
+ ctx->dead_list_size--;
+ if (ctx->dead_list_size == 0) {
+ CHECK_EQ(tctx->dead_next, 0);
+ ctx->dead_list_head = 0;
+ }
+ CHECK_EQ(tctx->status, ThreadStatusDead);
+ tctx->status = ThreadStatusInvalid;
+ tctx->reuse_count++;
+ tctx->sync.Reset();
+ tid = tctx->tid;
+ DestroyAndFree(tctx->dead_info);
+ } else {
+ StatInc(thr, StatThreadMaxTid);
+ tid = ctx->thread_seq++;
+ void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
+ tctx = new(mem) ThreadContext(tid);
+ ctx->threads[tid] = tctx;
+ }
+ CHECK_NE(tctx, 0);
+ CHECK_GE(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
+ CHECK_EQ(tctx->status, ThreadStatusInvalid);
+ ctx->alive_threads++;
+ if (ctx->max_alive_threads < ctx->alive_threads) {
+ ctx->max_alive_threads++;
+ CHECK_EQ(ctx->max_alive_threads, ctx->alive_threads);
+ StatInc(thr, StatThreadMaxAlive);
+ }
+ tctx->status = ThreadStatusCreated;
+ tctx->thr = 0;
+ tctx->user_id = uid;
+ tctx->unique_id = ctx->unique_thread_seq++;
+ tctx->detached = detached;
+ if (tid) {
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&tctx->sync);
+ StatInc(thr, StatSyncRelease);
+
+ tctx->creation_stack.ObtainCurrent(thr, pc);
+ }
+ return tid;
+}
+
+void ThreadStart(ThreadState *thr, int tid) {
+ CHECK_GT(thr->in_rtl, 0);
+ uptr stk_addr = 0;
+ uptr stk_size = 0;
+ uptr tls_addr = 0;
+ uptr tls_size = 0;
+ GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
+
+ if (tid) {
+ if (stk_addr && stk_size) {
+ MemoryResetRange(thr, /*pc=*/ 1, stk_addr, stk_size);
+ }
+
+ if (tls_addr && tls_size) {
+ // Check that the thr object is in tls;
+ const uptr thr_beg = (uptr)thr;
+ const uptr thr_end = (uptr)thr + sizeof(*thr);
+ CHECK_GE(thr_beg, tls_addr);
+ CHECK_LE(thr_beg, tls_addr + tls_size);
+ CHECK_GE(thr_end, tls_addr);
+ CHECK_LE(thr_end, tls_addr + tls_size);
+ // Since the thr object is huge, skip it.
+ MemoryResetRange(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr);
+ MemoryResetRange(thr, /*pc=*/ 2, thr_end, tls_addr + tls_size - thr_end);
+ }
+ }
+
+ Lock l(&CTX()->thread_mtx);
+ ThreadContext *tctx = CTX()->threads[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(tctx->status, ThreadStatusCreated);
+ tctx->status = ThreadStatusRunning;
+ tctx->epoch0 = tctx->epoch1 + 1;
+ tctx->epoch1 = (u64)-1;
+ new(thr) ThreadState(CTX(), tid, tctx->epoch0, stk_addr, stk_size,
+ tls_addr, tls_size);
+#ifdef TSAN_GO
+ // Setup dynamic shadow stack.
+ const int kInitStackSize = 8;
+ thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
+ kInitStackSize * sizeof(uptr));
+ thr->shadow_stack_pos = thr->shadow_stack;
+ thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
+#endif
+ tctx->thr = thr;
+ thr->fast_synch_epoch = tctx->epoch0;
+ thr->clock.set(tid, tctx->epoch0);
+ thr->clock.acquire(&tctx->sync);
+ StatInc(thr, StatSyncAcquire);
+ DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
+ "tls_addr=%zx tls_size=%zx\n",
+ tid, (uptr)tctx->epoch0, stk_addr, stk_size, tls_addr, tls_size);
+ thr->is_alive = true;
+}
+
+void ThreadFinish(ThreadState *thr) {
+ CHECK_GT(thr->in_rtl, 0);
+ StatInc(thr, StatThreadFinish);
+ // FIXME: Treat it as write.
+ if (thr->stk_addr && thr->stk_size)
+ MemoryResetRange(thr, /*pc=*/ 3, thr->stk_addr, thr->stk_size);
+ if (thr->tls_addr && thr->tls_size) {
+ const uptr thr_beg = (uptr)thr;
+ const uptr thr_end = (uptr)thr + sizeof(*thr);
+ // Since the thr object is huge, skip it.
+ MemoryResetRange(thr, /*pc=*/ 4, thr->tls_addr, thr_beg - thr->tls_addr);
+ MemoryResetRange(thr, /*pc=*/ 5,
+ thr_end, thr->tls_addr + thr->tls_size - thr_end);
+ }
+ thr->is_alive = false;
+ Context *ctx = CTX();
+ Lock l(&ctx->thread_mtx);
+ ThreadContext *tctx = ctx->threads[thr->tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(tctx->status, ThreadStatusRunning);
+ CHECK_GT(ctx->alive_threads, 0);
+ ctx->alive_threads--;
+ if (tctx->detached) {
+ ThreadDead(thr, tctx);
+ } else {
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&tctx->sync);
+ StatInc(thr, StatSyncRelease);
+ tctx->status = ThreadStatusFinished;
+ }
+
+ // Save from info about the thread.
+ tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo)))
+ ThreadDeadInfo();
+ internal_memcpy(&tctx->dead_info->trace.events[0],
+ &thr->trace.events[0], sizeof(thr->trace.events));
+ for (int i = 0; i < kTraceParts; i++) {
+ tctx->dead_info->trace.headers[i].stack0.CopyFrom(
+ thr->trace.headers[i].stack0);
+ }
+ tctx->epoch1 = thr->fast_state.epoch();
+
+ thr->~ThreadState();
+ StatAggregate(ctx->stat, thr->stat);
+ tctx->thr = 0;
+}
+
+int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
+ CHECK_GT(thr->in_rtl, 0);
+ Context *ctx = CTX();
+ Lock l(&ctx->thread_mtx);
+ int res = -1;
+ for (unsigned tid = 0; tid < kMaxTid; tid++) {
+ ThreadContext *tctx = ctx->threads[tid];
+ if (tctx != 0 && tctx->user_id == uid
+ && tctx->status != ThreadStatusInvalid) {
+ tctx->user_id = 0;
+ res = tid;
+ break;
+ }
+ }
+ DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
+ return res;
+}
+
+void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
+ CHECK_GT(thr->in_rtl, 0);
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
+ Context *ctx = CTX();
+ Lock l(&ctx->thread_mtx);
+ ThreadContext *tctx = ctx->threads[tid];
+ if (tctx->status == ThreadStatusInvalid) {
+ TsanPrintf("ThreadSanitizer: join of non-existent thread\n");
+ return;
+ }
+ CHECK_EQ(tctx->detached, false);
+ CHECK_EQ(tctx->status, ThreadStatusFinished);
+ thr->clock.acquire(&tctx->sync);
+ StatInc(thr, StatSyncAcquire);
+ ThreadDead(thr, tctx);
+}
+
+void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
+ CHECK_GT(thr->in_rtl, 0);
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ Context *ctx = CTX();
+ Lock l(&ctx->thread_mtx);
+ ThreadContext *tctx = ctx->threads[tid];
+ if (tctx->status == ThreadStatusInvalid) {
+ TsanPrintf("ThreadSanitizer: detach of non-existent thread\n");
+ return;
+ }
+ if (tctx->status == ThreadStatusFinished) {
+ ThreadDead(thr, tctx);
+ } else {
+ tctx->detached = true;
+ }
+}
+
+void ThreadFinalizerGoroutine(ThreadState *thr) {
+ thr->clock.Disable(thr->tid);
+}
+
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, bool is_write) {
+ if (size == 0)
+ return;
+
+ u64 *shadow_mem = (u64*)MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
+ thr->tid, (void*)pc, (void*)addr,
+ (int)size, is_write);
+
+#if TSAN_DEBUG
+ if (!IsAppMem(addr)) {
+ TsanPrintf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsAppMem(addr + size - 1)) {
+ TsanPrintf("Access to non app mem %zx\n", addr + size - 1);
+ DCHECK(IsAppMem(addr + size - 1));
+ }
+ if (!IsShadowMem((uptr)shadow_mem)) {
+ TsanPrintf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem((uptr)shadow_mem));
+ }
+ if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
+ TsanPrintf("Bad shadow addr %p (%zx)\n",
+ shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
+ DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
+ }
+#endif
+
+ StatInc(thr, StatMopRange);
+
+ FastState fast_state = thr->fast_state;
+ if (fast_state.GetIgnoreBit())
+ return;
+
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
+
+ bool unaligned = (addr % kShadowCell) != 0;
+
+ // Handle unaligned beginning, if any.
+ for (; addr % kShadowCell && size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state,
+ shadow_mem, cur);
+ }
+ if (unaligned)
+ shadow_mem += kShadowCnt;
+ // Handle middle part, if any.
+ for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
+ int const kAccessSizeLog = 3;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state,
+ shadow_mem, cur);
+ shadow_mem += kShadowCnt;
+ }
+ // Handle ending, if any.
+ for (; size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state,
+ shadow_mem, cur);
+ }
+}
+
+void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr) {
+ MemoryAccess(thr, pc, addr, 0, 0);
+}
+
+void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr) {
+ MemoryAccess(thr, pc, addr, 0, 1);
+}
+
+void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr) {
+ MemoryAccess(thr, pc, addr, 3, 0);
+}
+
+void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr) {
+ MemoryAccess(thr, pc, addr, 3, 1);
+}
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc
new file mode 100644
index 0000000..a7c33a5
--- /dev/null
+++ b/lib/tsan/rtl/tsan_stat.cc
@@ -0,0 +1,249 @@
+//===-- tsan_stat.cc ------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stat.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void StatAggregate(u64 *dst, u64 *src) {
+ if (!kCollectStats)
+ return;
+ for (int i = 0; i < StatCnt; i++)
+ dst[i] += src[i];
+}
+
+void StatOutput(u64 *stat) {
+ if (!kCollectStats)
+ return;
+
+ stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero];
+
+ static const char *name[StatCnt] = {};
+ name[StatMop] = "Memory accesses ";
+ name[StatMopRead] = " Including reads ";
+ name[StatMopWrite] = " writes ";
+ name[StatMop1] = " Including size 1 ";
+ name[StatMop2] = " size 2 ";
+ name[StatMop4] = " size 4 ";
+ name[StatMop8] = " size 8 ";
+ name[StatMopSame] = " Including same ";
+ name[StatMopRange] = " Including range ";
+ name[StatShadowProcessed] = "Shadow processed ";
+ name[StatShadowZero] = " Including empty ";
+ name[StatShadowNonZero] = " Including non empty ";
+ name[StatShadowSameSize] = " Including same size ";
+ name[StatShadowIntersect] = " intersect ";
+ name[StatShadowNotIntersect] = " not intersect ";
+ name[StatShadowSameThread] = " Including same thread ";
+ name[StatShadowAnotherThread] = " another thread ";
+ name[StatShadowReplace] = " Including evicted ";
+
+ name[StatFuncEnter] = "Function entries ";
+ name[StatFuncExit] = "Function exits ";
+ name[StatEvents] = "Events collected ";
+
+ name[StatThreadCreate] = "Total threads created ";
+ name[StatThreadFinish] = " threads finished ";
+ name[StatThreadReuse] = " threads reused ";
+ name[StatThreadMaxTid] = " max tid ";
+ name[StatThreadMaxAlive] = " max alive threads ";
+
+ name[StatMutexCreate] = "Mutexes created ";
+ name[StatMutexDestroy] = " destroyed ";
+ name[StatMutexLock] = " lock ";
+ name[StatMutexUnlock] = " unlock ";
+ name[StatMutexRecLock] = " recursive lock ";
+ name[StatMutexRecUnlock] = " recursive unlock ";
+ name[StatMutexReadLock] = " read lock ";
+ name[StatMutexReadUnlock] = " read unlock ";
+
+ name[StatSyncCreated] = "Sync objects created ";
+ name[StatSyncDestroyed] = " destroyed ";
+ name[StatSyncAcquire] = " acquired ";
+ name[StatSyncRelease] = " released ";
+
+ name[StatAtomic] = "Atomic operations ";
+ name[StatAtomicLoad] = " Including load ";
+ name[StatAtomicStore] = " store ";
+ name[StatAtomicExchange] = " exchange ";
+ name[StatAtomicFetchAdd] = " fetch_add ";
+ name[StatAtomicCAS] = " compare_exchange ";
+ name[StatAtomicFence] = " fence ";
+ name[StatAtomicRelaxed] = " Including relaxed ";
+ name[StatAtomicConsume] = " consume ";
+ name[StatAtomicAcquire] = " acquire ";
+ name[StatAtomicRelease] = " release ";
+ name[StatAtomicAcq_Rel] = " acq_rel ";
+ name[StatAtomicSeq_Cst] = " seq_cst ";
+ name[StatAtomic1] = " Including size 1 ";
+ name[StatAtomic2] = " size 2 ";
+ name[StatAtomic4] = " size 4 ";
+ name[StatAtomic8] = " size 8 ";
+
+ name[StatInterceptor] = "Interceptors ";
+ name[StatInt_longjmp] = " longjmp ";
+ name[StatInt_siglongjmp] = " siglongjmp ";
+ name[StatInt_malloc] = " malloc ";
+ name[StatInt_calloc] = " calloc ";
+ name[StatInt_realloc] = " realloc ";
+ name[StatInt_free] = " free ";
+ name[StatInt_cfree] = " cfree ";
+ name[StatInt_mmap] = " mmap ";
+ name[StatInt_mmap64] = " mmap64 ";
+ name[StatInt_munmap] = " munmap ";
+ name[StatInt_memalign] = " memalign ";
+ name[StatInt_valloc] = " valloc ";
+ name[StatInt_pvalloc] = " pvalloc ";
+ name[StatInt_posix_memalign] = " posix_memalign ";
+ name[StatInt__Znwm] = " _Znwm ";
+ name[StatInt__ZnwmRKSt9nothrow_t] = " _ZnwmRKSt9nothrow_t ";
+ name[StatInt__Znam] = " _Znam ";
+ name[StatInt__ZnamRKSt9nothrow_t] = " _ZnamRKSt9nothrow_t ";
+ name[StatInt__ZdlPv] = " _ZdlPv ";
+ name[StatInt__ZdlPvRKSt9nothrow_t] = " _ZdlPvRKSt9nothrow_t ";
+ name[StatInt__ZdaPv] = " _ZdaPv ";
+ name[StatInt__ZdaPvRKSt9nothrow_t] = " _ZdaPvRKSt9nothrow_t ";
+ name[StatInt_strlen] = " strlen ";
+ name[StatInt_memset] = " memset ";
+ name[StatInt_memcpy] = " memcpy ";
+ name[StatInt_strcmp] = " strcmp ";
+ name[StatInt_memchr] = " memchr ";
+ name[StatInt_memrchr] = " memrchr ";
+ name[StatInt_memmove] = " memmove ";
+ name[StatInt_memcmp] = " memcmp ";
+ name[StatInt_strchr] = " strchr ";
+ name[StatInt_strchrnul] = " strchrnul ";
+ name[StatInt_strrchr] = " strrchr ";
+ name[StatInt_strncmp] = " strncmp ";
+ name[StatInt_strcpy] = " strcpy ";
+ name[StatInt_strncpy] = " strncpy ";
+ name[StatInt_strstr] = " strstr ";
+ name[StatInt_atexit] = " atexit ";
+ name[StatInt___cxa_guard_acquire] = " __cxa_guard_acquire ";
+ name[StatInt___cxa_guard_release] = " __cxa_guard_release ";
+ name[StatInt_pthread_create] = " pthread_create ";
+ name[StatInt_pthread_join] = " pthread_join ";
+ name[StatInt_pthread_detach] = " pthread_detach ";
+ name[StatInt_pthread_mutex_init] = " pthread_mutex_init ";
+ name[StatInt_pthread_mutex_destroy] = " pthread_mutex_destroy ";
+ name[StatInt_pthread_mutex_lock] = " pthread_mutex_lock ";
+ name[StatInt_pthread_mutex_trylock] = " pthread_mutex_trylock ";
+ name[StatInt_pthread_mutex_timedlock] = " pthread_mutex_timedlock ";
+ name[StatInt_pthread_mutex_unlock] = " pthread_mutex_unlock ";
+ name[StatInt_pthread_spin_init] = " pthread_spin_init ";
+ name[StatInt_pthread_spin_destroy] = " pthread_spin_destroy ";
+ name[StatInt_pthread_spin_lock] = " pthread_spin_lock ";
+ name[StatInt_pthread_spin_trylock] = " pthread_spin_trylock ";
+ name[StatInt_pthread_spin_unlock] = " pthread_spin_unlock ";
+ name[StatInt_pthread_rwlock_init] = " pthread_rwlock_init ";
+ name[StatInt_pthread_rwlock_destroy] = " pthread_rwlock_destroy ";
+ name[StatInt_pthread_rwlock_rdlock] = " pthread_rwlock_rdlock ";
+ name[StatInt_pthread_rwlock_tryrdlock] = " pthread_rwlock_tryrdlock ";
+ name[StatInt_pthread_rwlock_timedrdlock]
+ = " pthread_rwlock_timedrdlock ";
+ name[StatInt_pthread_rwlock_wrlock] = " pthread_rwlock_wrlock ";
+ name[StatInt_pthread_rwlock_trywrlock] = " pthread_rwlock_trywrlock ";
+ name[StatInt_pthread_rwlock_timedwrlock]
+ = " pthread_rwlock_timedwrlock ";
+ name[StatInt_pthread_rwlock_unlock] = " pthread_rwlock_unlock ";
+ name[StatInt_pthread_cond_init] = " pthread_cond_init ";
+ name[StatInt_pthread_cond_destroy] = " pthread_cond_destroy ";
+ name[StatInt_pthread_cond_signal] = " pthread_cond_signal ";
+ name[StatInt_pthread_cond_broadcast] = " pthread_cond_broadcast ";
+ name[StatInt_pthread_cond_wait] = " pthread_cond_wait ";
+ name[StatInt_pthread_cond_timedwait] = " pthread_cond_timedwait ";
+ name[StatInt_pthread_barrier_init] = " pthread_barrier_init ";
+ name[StatInt_pthread_barrier_destroy] = " pthread_barrier_destroy ";
+ name[StatInt_pthread_barrier_wait] = " pthread_barrier_wait ";
+ name[StatInt_pthread_once] = " pthread_once ";
+ name[StatInt_sem_init] = " sem_init ";
+ name[StatInt_sem_destroy] = " sem_destroy ";
+ name[StatInt_sem_wait] = " sem_wait ";
+ name[StatInt_sem_trywait] = " sem_trywait ";
+ name[StatInt_sem_timedwait] = " sem_timedwait ";
+ name[StatInt_sem_post] = " sem_post ";
+ name[StatInt_sem_getvalue] = " sem_getvalue ";
+ name[StatInt_read] = " read ";
+ name[StatInt_pread] = " pread ";
+ name[StatInt_pread64] = " pread64 ";
+ name[StatInt_readv] = " readv ";
+ name[StatInt_preadv64] = " preadv64 ";
+ name[StatInt_write] = " write ";
+ name[StatInt_pwrite] = " pwrite ";
+ name[StatInt_pwrite64] = " pwrite64 ";
+ name[StatInt_writev] = " writev ";
+ name[StatInt_pwritev64] = " pwritev64 ";
+ name[StatInt_send] = " send ";
+ name[StatInt_sendmsg] = " sendmsg ";
+ name[StatInt_recv] = " recv ";
+ name[StatInt_recvmsg] = " recvmsg ";
+ name[StatInt_unlink] = " unlink ";
+ name[StatInt_fopen] = " fopen ";
+ name[StatInt_fread] = " fread ";
+ name[StatInt_fwrite] = " fwrite ";
+ name[StatInt_puts] = " puts ";
+ name[StatInt_rmdir] = " rmdir ";
+ name[StatInt_opendir] = " opendir ";
+ name[StatInt_epoll_ctl] = " epoll_ctl ";
+ name[StatInt_epoll_wait] = " epoll_wait ";
+ name[StatInt_sigaction] = " sigaction ";
+
+ name[StatAnnotation] = "Dynamic annotations ";
+ name[StatAnnotateHappensBefore] = " HappensBefore ";
+ name[StatAnnotateHappensAfter] = " HappensAfter ";
+ name[StatAnnotateCondVarSignal] = " CondVarSignal ";
+ name[StatAnnotateCondVarSignalAll] = " CondVarSignalAll ";
+ name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB ";
+ name[StatAnnotateCondVarWait] = " CondVarWait ";
+ name[StatAnnotateRWLockCreate] = " RWLockCreate ";
+ name[StatAnnotateRWLockDestroy] = " RWLockDestroy ";
+ name[StatAnnotateRWLockAcquired] = " RWLockAcquired ";
+ name[StatAnnotateRWLockReleased] = " RWLockReleased ";
+ name[StatAnnotateTraceMemory] = " TraceMemory ";
+ name[StatAnnotateFlushState] = " FlushState ";
+ name[StatAnnotateNewMemory] = " NewMemory ";
+ name[StatAnnotateNoOp] = " NoOp ";
+ name[StatAnnotateFlushExpectedRaces] = " FlushExpectedRaces ";
+ name[StatAnnotateEnableRaceDetection] = " EnableRaceDetection ";
+ name[StatAnnotateMutexIsUsedAsCondVar] = " MutexIsUsedAsCondVar ";
+ name[StatAnnotatePCQGet] = " PCQGet ";
+ name[StatAnnotatePCQPut] = " PCQPut ";
+ name[StatAnnotatePCQDestroy] = " PCQDestroy ";
+ name[StatAnnotatePCQCreate] = " PCQCreate ";
+ name[StatAnnotateExpectRace] = " ExpectRace ";
+ name[StatAnnotateBenignRaceSized] = " BenignRaceSized ";
+ name[StatAnnotateBenignRace] = " BenignRace ";
+ name[StatAnnotateIgnoreReadsBegin] = " IgnoreReadsBegin ";
+ name[StatAnnotateIgnoreReadsEnd] = " IgnoreReadsEnd ";
+ name[StatAnnotateIgnoreWritesBegin] = " IgnoreWritesBegin ";
+ name[StatAnnotateIgnoreWritesEnd] = " IgnoreWritesEnd ";
+ name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange ";
+ name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange ";
+ name[StatAnnotateThreadName] = " ThreadName ";
+
+ name[StatMtxTotal] = "Contentionz ";
+ name[StatMtxTrace] = " Trace ";
+ name[StatMtxThreads] = " Threads ";
+ name[StatMtxReport] = " Report ";
+ name[StatMtxSyncVar] = " SyncVar ";
+ name[StatMtxSyncTab] = " SyncTab ";
+ name[StatMtxSlab] = " Slab ";
+ name[StatMtxAtExit] = " Atexit ";
+ name[StatMtxAnnotations] = " Annotations ";
+
+ TsanPrintf("Statistics:\n");
+ for (int i = 0; i < StatCnt; i++)
+ TsanPrintf("%s: %zu\n", name[i], (uptr)stat[i]);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_stat.h b/lib/tsan/rtl/tsan_stat.h
new file mode 100644
index 0000000..71b1b13
--- /dev/null
+++ b/lib/tsan/rtl/tsan_stat.h
@@ -0,0 +1,254 @@
+//===-- tsan_stat.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_STAT_H
+#define TSAN_STAT_H
+
+namespace __tsan {
+
+enum StatType {
+ // Memory access processing related stuff.
+ StatMop,
+ StatMopRead,
+ StatMopWrite,
+ StatMop1, // These must be consequtive.
+ StatMop2,
+ StatMop4,
+ StatMop8,
+ StatMopSame,
+ StatMopRange,
+ StatShadowProcessed,
+ StatShadowZero,
+ StatShadowNonZero, // Derived.
+ StatShadowSameSize,
+ StatShadowIntersect,
+ StatShadowNotIntersect,
+ StatShadowSameThread,
+ StatShadowAnotherThread,
+ StatShadowReplace,
+
+ // Func processing.
+ StatFuncEnter,
+ StatFuncExit,
+
+ // Trace processing.
+ StatEvents,
+
+ // Threads.
+ StatThreadCreate,
+ StatThreadFinish,
+ StatThreadReuse,
+ StatThreadMaxTid,
+ StatThreadMaxAlive,
+
+ // Mutexes.
+ StatMutexCreate,
+ StatMutexDestroy,
+ StatMutexLock,
+ StatMutexUnlock,
+ StatMutexRecLock,
+ StatMutexRecUnlock,
+ StatMutexReadLock,
+ StatMutexReadUnlock,
+
+ // Synchronization.
+ StatSyncCreated,
+ StatSyncDestroyed,
+ StatSyncAcquire,
+ StatSyncRelease,
+
+ // Atomics.
+ StatAtomic,
+ StatAtomicLoad,
+ StatAtomicStore,
+ StatAtomicExchange,
+ StatAtomicFetchAdd,
+ StatAtomicFetchAnd,
+ StatAtomicFetchOr,
+ StatAtomicFetchXor,
+ StatAtomicCAS,
+ StatAtomicFence,
+ StatAtomicRelaxed,
+ StatAtomicConsume,
+ StatAtomicAcquire,
+ StatAtomicRelease,
+ StatAtomicAcq_Rel,
+ StatAtomicSeq_Cst,
+ StatAtomic1,
+ StatAtomic2,
+ StatAtomic4,
+ StatAtomic8,
+
+ // Interceptors.
+ StatInterceptor,
+ StatInt_longjmp,
+ StatInt_siglongjmp,
+ StatInt_malloc,
+ StatInt_calloc,
+ StatInt_realloc,
+ StatInt_free,
+ StatInt_cfree,
+ StatInt_mmap,
+ StatInt_mmap64,
+ StatInt_munmap,
+ StatInt_memalign,
+ StatInt_valloc,
+ StatInt_pvalloc,
+ StatInt_posix_memalign,
+ StatInt__Znwm,
+ StatInt__ZnwmRKSt9nothrow_t,
+ StatInt__Znam,
+ StatInt__ZnamRKSt9nothrow_t,
+ StatInt__ZdlPv,
+ StatInt__ZdlPvRKSt9nothrow_t,
+ StatInt__ZdaPv,
+ StatInt__ZdaPvRKSt9nothrow_t,
+ StatInt_strlen,
+ StatInt_memset,
+ StatInt_memcpy,
+ StatInt_strcmp,
+ StatInt_memchr,
+ StatInt_memrchr,
+ StatInt_memmove,
+ StatInt_memcmp,
+ StatInt_strchr,
+ StatInt_strchrnul,
+ StatInt_strrchr,
+ StatInt_strncmp,
+ StatInt_strcpy,
+ StatInt_strncpy,
+ StatInt_strstr,
+ StatInt_atexit,
+ StatInt___cxa_guard_acquire,
+ StatInt___cxa_guard_release,
+ StatInt_pthread_create,
+ StatInt_pthread_join,
+ StatInt_pthread_detach,
+ StatInt_pthread_mutex_init,
+ StatInt_pthread_mutex_destroy,
+ StatInt_pthread_mutex_lock,
+ StatInt_pthread_mutex_trylock,
+ StatInt_pthread_mutex_timedlock,
+ StatInt_pthread_mutex_unlock,
+ StatInt_pthread_spin_init,
+ StatInt_pthread_spin_destroy,
+ StatInt_pthread_spin_lock,
+ StatInt_pthread_spin_trylock,
+ StatInt_pthread_spin_unlock,
+ StatInt_pthread_rwlock_init,
+ StatInt_pthread_rwlock_destroy,
+ StatInt_pthread_rwlock_rdlock,
+ StatInt_pthread_rwlock_tryrdlock,
+ StatInt_pthread_rwlock_timedrdlock,
+ StatInt_pthread_rwlock_wrlock,
+ StatInt_pthread_rwlock_trywrlock,
+ StatInt_pthread_rwlock_timedwrlock,
+ StatInt_pthread_rwlock_unlock,
+ StatInt_pthread_cond_init,
+ StatInt_pthread_cond_destroy,
+ StatInt_pthread_cond_signal,
+ StatInt_pthread_cond_broadcast,
+ StatInt_pthread_cond_wait,
+ StatInt_pthread_cond_timedwait,
+ StatInt_pthread_barrier_init,
+ StatInt_pthread_barrier_destroy,
+ StatInt_pthread_barrier_wait,
+ StatInt_pthread_once,
+ StatInt_sem_init,
+ StatInt_sem_destroy,
+ StatInt_sem_wait,
+ StatInt_sem_trywait,
+ StatInt_sem_timedwait,
+ StatInt_sem_post,
+ StatInt_sem_getvalue,
+ StatInt_read,
+ StatInt_pread,
+ StatInt_pread64,
+ StatInt_readv,
+ StatInt_preadv64,
+ StatInt_write,
+ StatInt_pwrite,
+ StatInt_pwrite64,
+ StatInt_writev,
+ StatInt_pwritev64,
+ StatInt_send,
+ StatInt_sendmsg,
+ StatInt_recv,
+ StatInt_recvmsg,
+ StatInt_unlink,
+ StatInt_fopen,
+ StatInt_fread,
+ StatInt_fwrite,
+ StatInt_puts,
+ StatInt_rmdir,
+ StatInt_opendir,
+ StatInt_epoll_ctl,
+ StatInt_epoll_wait,
+ StatInt_sigaction,
+ StatInt_signal,
+ StatInt_raise,
+ StatInt_kill,
+ StatInt_pthread_kill,
+
+ // Dynamic annotations.
+ StatAnnotation,
+ StatAnnotateHappensBefore,
+ StatAnnotateHappensAfter,
+ StatAnnotateCondVarSignal,
+ StatAnnotateCondVarSignalAll,
+ StatAnnotateMutexIsNotPHB,
+ StatAnnotateCondVarWait,
+ StatAnnotateRWLockCreate,
+ StatAnnotateRWLockDestroy,
+ StatAnnotateRWLockAcquired,
+ StatAnnotateRWLockReleased,
+ StatAnnotateTraceMemory,
+ StatAnnotateFlushState,
+ StatAnnotateNewMemory,
+ StatAnnotateNoOp,
+ StatAnnotateFlushExpectedRaces,
+ StatAnnotateEnableRaceDetection,
+ StatAnnotateMutexIsUsedAsCondVar,
+ StatAnnotatePCQGet,
+ StatAnnotatePCQPut,
+ StatAnnotatePCQDestroy,
+ StatAnnotatePCQCreate,
+ StatAnnotateExpectRace,
+ StatAnnotateBenignRaceSized,
+ StatAnnotateBenignRace,
+ StatAnnotateIgnoreReadsBegin,
+ StatAnnotateIgnoreReadsEnd,
+ StatAnnotateIgnoreWritesBegin,
+ StatAnnotateIgnoreWritesEnd,
+ StatAnnotatePublishMemoryRange,
+ StatAnnotateUnpublishMemoryRange,
+ StatAnnotateThreadName,
+
+ // Internal mutex contentionz.
+ StatMtxTotal,
+ StatMtxTrace,
+ StatMtxThreads,
+ StatMtxReport,
+ StatMtxSyncVar,
+ StatMtxSyncTab,
+ StatMtxSlab,
+ StatMtxAnnotations,
+ StatMtxAtExit,
+
+ // This must be the last.
+ StatCnt,
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STAT_H
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
new file mode 100644
index 0000000..7549a4f
--- /dev/null
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -0,0 +1,163 @@
+//===-- tsan_suppressions.cc ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+
+namespace __tsan {
+
+static Suppression *g_suppressions;
+
+static char *ReadFile(const char *filename) {
+ if (filename == 0 || filename[0] == 0)
+ return 0;
+ InternalScopedBuf<char> tmp(4*1024);
+ if (filename[0] == '/')
+ internal_snprintf(tmp, tmp.Size(), "%s", filename);
+ else
+ internal_snprintf(tmp, tmp.Size(), "%s/%s", GetPwd(), filename);
+ fd_t fd = internal_open(tmp, false);
+ if (fd == kInvalidFd) {
+ TsanPrintf("ThreadSanitizer: failed to open suppressions file '%s'\n",
+ tmp.Ptr());
+ Die();
+ }
+ const uptr fsize = internal_filesize(fd);
+ if (fsize == (uptr)-1) {
+ TsanPrintf("ThreadSanitizer: failed to stat suppressions file '%s'\n",
+ tmp.Ptr());
+ Die();
+ }
+ char *buf = (char*)internal_alloc(MBlockSuppression, fsize + 1);
+ if (fsize != internal_read(fd, buf, fsize)) {
+ TsanPrintf("ThreadSanitizer: failed to read suppressions file '%s'\n",
+ tmp.Ptr());
+ Die();
+ }
+ internal_close(fd);
+ buf[fsize] = 0;
+ return buf;
+}
+
+bool SuppressionMatch(char *templ, const char *str) {
+ if (str == 0 || str[0] == 0)
+ return false;
+ char *tpos;
+ const char *spos;
+ while (templ && templ[0]) {
+ if (templ[0] == '*') {
+ templ++;
+ continue;
+ }
+ if (str[0] == 0)
+ return false;
+ tpos = (char*)internal_strchr(templ, '*');
+ if (tpos != 0)
+ tpos[0] = 0;
+ spos = internal_strstr(str, templ);
+ str = spos + internal_strlen(templ);
+ templ = tpos;
+ if (tpos)
+ tpos[0] = '*';
+ if (spos == 0)
+ return false;
+ }
+ return true;
+}
+
+Suppression *SuppressionParse(const char* supp) {
+ Suppression *head = 0;
+ const char *line = supp;
+ while (line) {
+ while (line[0] == ' ' || line[0] == '\t')
+ line++;
+ const char *end = internal_strchr(line, '\n');
+ if (end == 0)
+ end = line + internal_strlen(line);
+ if (line != end && line[0] != '#') {
+ const char *end2 = end;
+ while (line != end2 && (end2[-1] == ' ' || end2[-1] == '\t'))
+ end2--;
+ SuppressionType stype;
+ if (0 == internal_strncmp(line, "race:", sizeof("race:") - 1)) {
+ stype = SuppressionRace;
+ line += sizeof("race:") - 1;
+ } else if (0 == internal_strncmp(line, "thread:",
+ sizeof("thread:") - 1)) {
+ stype = SuppressionThread;
+ line += sizeof("thread:") - 1;
+ } else if (0 == internal_strncmp(line, "mutex:",
+ sizeof("mutex:") - 1)) {
+ stype = SuppressionMutex;
+ line += sizeof("mutex:") - 1;
+ } else if (0 == internal_strncmp(line, "signal:",
+ sizeof("signal:") - 1)) {
+ stype = SuppressionSignal;
+ line += sizeof("signal:") - 1;
+ } else {
+ TsanPrintf("ThreadSanitizer: failed to parse suppressions file\n");
+ Die();
+ }
+ Suppression *s = (Suppression*)internal_alloc(MBlockSuppression,
+ sizeof(Suppression));
+ s->next = head;
+ head = s;
+ s->type = stype;
+ s->templ = (char*)internal_alloc(MBlockSuppression, end2 - line + 1);
+ internal_memcpy(s->templ, line, end2 - line);
+ s->templ[end2 - line] = 0;
+ }
+ if (end[0] == 0)
+ break;
+ line = end + 1;
+ }
+ return head;
+}
+
+void InitializeSuppressions() {
+ char *supp = ReadFile(flags()->suppressions);
+ g_suppressions = SuppressionParse(supp);
+}
+
+bool IsSuppressed(ReportType typ, const ReportStack *stack) {
+ if (g_suppressions == 0 || stack == 0)
+ return false;
+ SuppressionType stype;
+ if (typ == ReportTypeRace)
+ stype = SuppressionRace;
+ else if (typ == ReportTypeThreadLeak)
+ stype = SuppressionThread;
+ else if (typ == ReportTypeMutexDestroyLocked)
+ stype = SuppressionMutex;
+ else if (typ == ReportTypeSignalUnsafe)
+ stype = SuppressionSignal;
+ else
+ return false;
+ for (const ReportStack *frame = stack; frame; frame = frame->next) {
+ for (Suppression *supp = g_suppressions; supp; supp = supp->next) {
+ if (stype == supp->type &&
+ (SuppressionMatch(supp->templ, frame->func) ||
+ SuppressionMatch(supp->templ, frame->file))) {
+ DPrintf("ThreadSanitizer: matched suppression '%s'\n", supp->templ);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_suppressions.h b/lib/tsan/rtl/tsan_suppressions.h
new file mode 100644
index 0000000..29311c1
--- /dev/null
+++ b/lib/tsan/rtl/tsan_suppressions.h
@@ -0,0 +1,43 @@
+//===-- tsan_suppressions.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SUPPRESSIONS_H
+#define TSAN_SUPPRESSIONS_H
+
+#include "tsan_report.h"
+
+namespace __tsan {
+
+void InitializeSuppressions();
+void FinalizeSuppressions();
+bool IsSuppressed(ReportType typ, const ReportStack *stack);
+
+// Exposed for testing.
+enum SuppressionType {
+ SuppressionRace,
+ SuppressionMutex,
+ SuppressionThread,
+ SuppressionSignal,
+};
+
+struct Suppression {
+ Suppression *next;
+ SuppressionType type;
+ char *templ;
+};
+
+Suppression *SuppressionParse(const char* supp);
+bool SuppressionMatch(char *templ, const char *str);
+
+} // namespace __tsan
+
+#endif // TSAN_SUPPRESSIONS_H
diff --git a/lib/tsan/rtl/tsan_symbolize.cc b/lib/tsan/rtl/tsan_symbolize.cc
new file mode 100644
index 0000000..f757d07
--- /dev/null
+++ b/lib/tsan/rtl/tsan_symbolize.cc
@@ -0,0 +1,78 @@
+//===-- tsan_symbolize.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_symbolize.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "tsan_flags.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+ReportStack *NewReportStackEntry(uptr addr) {
+ ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack,
+ sizeof(ReportStack));
+ internal_memset(ent, 0, sizeof(*ent));
+ ent->pc = addr;
+ return ent;
+}
+
+static ReportStack *NewReportStackEntry(const AddressInfo &info) {
+ ReportStack *ent = NewReportStackEntry(info.address);
+ if (info.module)
+ ent->module = internal_strdup(info.module);
+ ent->offset = info.module_offset;
+ if (info.function) {
+ ent->func = internal_strdup(info.function);
+ }
+ if (info.file)
+ ent->file = internal_strdup(info.file);
+ ent->line = info.line;
+ ent->col = info.column;
+ return ent;
+}
+
+ReportStack *SymbolizeCode(uptr addr) {
+ if (flags()->use_internal_symbolizer) {
+ static const uptr kMaxAddrFrames = 16;
+ InternalScopedBuf<AddressInfo> addr_frames(kMaxAddrFrames);
+ for (uptr i = 0; i < kMaxAddrFrames; i++)
+ new(&addr_frames[i]) AddressInfo();
+ uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames,
+ kMaxAddrFrames);
+ if (addr_frames_num == 0)
+ return NewReportStackEntry(addr);
+ ReportStack *top = 0;
+ ReportStack *bottom = 0;
+ for (uptr i = 0; i < addr_frames_num; i++) {
+ ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]);
+ CHECK(cur_entry);
+ addr_frames[i].Clear();
+ if (i == 0)
+ top = cur_entry;
+ else
+ bottom->next = cur_entry;
+ bottom = cur_entry;
+ }
+ return top;
+ }
+ return SymbolizeCodeAddr2Line(addr);
+}
+
+ReportStack *SymbolizeData(uptr addr) {
+ return SymbolizeDataAddr2Line(addr);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_symbolize.h b/lib/tsan/rtl/tsan_symbolize.h
new file mode 100644
index 0000000..115339b
--- /dev/null
+++ b/lib/tsan/rtl/tsan_symbolize.h
@@ -0,0 +1,31 @@
+//===-- tsan_symbolize.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYMBOLIZE_H
+#define TSAN_SYMBOLIZE_H
+
+#include "tsan_defs.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+ReportStack *SymbolizeCode(uptr addr);
+ReportStack *SymbolizeData(uptr addr);
+
+ReportStack *SymbolizeCodeAddr2Line(uptr addr);
+ReportStack *SymbolizeDataAddr2Line(uptr addr);
+
+ReportStack *NewReportStackEntry(uptr addr);
+
+} // namespace __tsan
+
+#endif // TSAN_SYMBOLIZE_H
diff --git a/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc b/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc
new file mode 100644
index 0000000..5eed977
--- /dev/null
+++ b/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc
@@ -0,0 +1,193 @@
+//===-- tsan_symbolize_addr2line.cc ---------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_symbolize.h"
+#include "tsan_mman.h"
+#include "tsan_rtl.h"
+#include "tsan_platform.h"
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <link.h>
+#include <linux/limits.h>
+#include <sys/types.h>
+
+namespace __tsan {
+
+struct ModuleDesc {
+ const char *fullname;
+ const char *name;
+ uptr base;
+ int inp_fd;
+ int out_fd;
+};
+
+struct SectionDesc {
+ SectionDesc *next;
+ ModuleDesc *module;
+ uptr base;
+ uptr end;
+};
+
+struct DlIteratePhdrCtx {
+ SectionDesc *sections;
+ bool is_first;
+};
+
+static void NOINLINE InitModule(ModuleDesc *m) {
+ int outfd[2] = {};
+ if (pipe(&outfd[0])) {
+ TsanPrintf("ThreadSanitizer: outfd pipe() failed (%d)\n", errno);
+ Die();
+ }
+ int infd[2] = {};
+ if (pipe(&infd[0])) {
+ TsanPrintf("ThreadSanitizer: infd pipe() failed (%d)\n", errno);
+ Die();
+ }
+ int pid = fork();
+ if (pid == 0) {
+ flags()->log_fileno = STDERR_FILENO;
+ internal_close(STDOUT_FILENO);
+ internal_close(STDIN_FILENO);
+ internal_dup2(outfd[0], STDIN_FILENO);
+ internal_dup2(infd[1], STDOUT_FILENO);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ for (int fd = getdtablesize(); fd > 2; fd--)
+ internal_close(fd);
+ execl("/usr/bin/addr2line", "/usr/bin/addr2line", "-Cfe", m->fullname, 0);
+ _exit(0);
+ } else if (pid < 0) {
+ TsanPrintf("ThreadSanitizer: failed to fork symbolizer\n");
+ Die();
+ }
+ internal_close(outfd[0]);
+ internal_close(infd[1]);
+ m->inp_fd = infd[0];
+ m->out_fd = outfd[1];
+}
+
+static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
+ DlIteratePhdrCtx *ctx = (DlIteratePhdrCtx*)arg;
+ InternalScopedBuf<char> tmp(128);
+ if (ctx->is_first) {
+ internal_snprintf(tmp.Ptr(), tmp.Size(), "/proc/%d/exe", GetPid());
+ info->dlpi_name = tmp.Ptr();
+ }
+ ctx->is_first = false;
+ if (info->dlpi_name == 0 || info->dlpi_name[0] == 0)
+ return 0;
+ ModuleDesc *m = (ModuleDesc*)internal_alloc(MBlockReportStack,
+ sizeof(ModuleDesc));
+ m->fullname = internal_strdup(info->dlpi_name);
+ m->name = internal_strrchr(m->fullname, '/');
+ if (m->name)
+ m->name += 1;
+ else
+ m->name = m->fullname;
+ m->base = (uptr)info->dlpi_addr;
+ m->inp_fd = -1;
+ m->out_fd = -1;
+ DPrintf("Module %s %zx\n", m->name, m->base);
+ for (int i = 0; i < info->dlpi_phnum; i++) {
+ const Elf64_Phdr *s = &info->dlpi_phdr[i];
+ DPrintf(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx"
+ " p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n",
+ (uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr,
+ (uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz,
+ (uptr)s->p_flags, (uptr)s->p_align);
+ if (s->p_type != PT_LOAD)
+ continue;
+ SectionDesc *sec = (SectionDesc*)internal_alloc(MBlockReportStack,
+ sizeof(SectionDesc));
+ sec->module = m;
+ sec->base = info->dlpi_addr + s->p_vaddr;
+ sec->end = sec->base + s->p_memsz;
+ sec->next = ctx->sections;
+ ctx->sections = sec;
+ DPrintf(" Section %zx-%zx\n", sec->base, sec->end);
+ }
+ return 0;
+}
+
+static SectionDesc *InitSections() {
+ DlIteratePhdrCtx ctx = {0, true};
+ dl_iterate_phdr(dl_iterate_phdr_cb, &ctx);
+ return ctx.sections;
+}
+
+static SectionDesc *GetSectionDesc(uptr addr) {
+ static SectionDesc *sections = 0;
+ if (sections == 0)
+ sections = InitSections();
+ for (SectionDesc *s = sections; s; s = s->next) {
+ if (addr >= s->base && addr < s->end) {
+ if (s->module->inp_fd == -1)
+ InitModule(s->module);
+ return s;
+ }
+ }
+ return 0;
+}
+
+ReportStack *SymbolizeCodeAddr2Line(uptr addr) {
+ SectionDesc *s = GetSectionDesc(addr);
+ if (s == 0)
+ return NewReportStackEntry(addr);
+ ModuleDesc *m = s->module;
+ uptr offset = addr - m->base;
+ char addrstr[32];
+ internal_snprintf(addrstr, sizeof(addrstr), "%p\n", (void*)offset);
+ if (0 >= internal_write(m->out_fd, addrstr, internal_strlen(addrstr))) {
+ TsanPrintf("ThreadSanitizer: can't write from symbolizer (%d, %d)\n",
+ m->out_fd, errno);
+ Die();
+ }
+ InternalScopedBuf<char> func(1024);
+ ssize_t len = internal_read(m->inp_fd, func, func.Size() - 1);
+ if (len <= 0) {
+ TsanPrintf("ThreadSanitizer: can't read from symbolizer (%d, %d)\n",
+ m->inp_fd, errno);
+ Die();
+ }
+ func.Ptr()[len] = 0;
+ ReportStack *res = NewReportStackEntry(addr);
+ res->module = internal_strdup(m->name);
+ res->offset = offset;
+ char *pos = (char*)internal_strchr(func, '\n');
+ if (pos && func[0] != '?') {
+ res->func = (char*)internal_alloc(MBlockReportStack, pos - func + 1);
+ internal_memcpy(res->func, func, pos - func);
+ res->func[pos - func] = 0;
+ char *pos2 = (char*)internal_strchr(pos, ':');
+ if (pos2) {
+ res->file = (char*)internal_alloc(MBlockReportStack, pos2 - pos - 1 + 1);
+ internal_memcpy(res->file, pos + 1, pos2 - pos - 1);
+ res->file[pos2 - pos - 1] = 0;
+ res->line = atoi(pos2 + 1);
+ }
+ }
+ return res;
+}
+
+ReportStack *SymbolizeDataAddr2Line(uptr addr) {
+ return 0;
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
new file mode 100644
index 0000000..abb5a2a
--- /dev/null
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -0,0 +1,219 @@
+//===-- tsan_sync.cc ------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_sync.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+SyncVar::SyncVar(uptr addr)
+ : mtx(MutexTypeSyncVar, StatMtxSyncVar)
+ , addr(addr)
+ , owner_tid(kInvalidTid)
+ , recursion()
+ , is_rw()
+ , is_recursive()
+ , is_broken() {
+}
+
+SyncTab::Part::Part()
+ : mtx(MutexTypeSyncTab, StatMtxSyncTab)
+ , val() {
+}
+
+SyncTab::SyncTab() {
+}
+
+SyncTab::~SyncTab() {
+ for (int i = 0; i < kPartCount; i++) {
+ while (tab_[i].val) {
+ SyncVar *tmp = tab_[i].val;
+ tab_[i].val = tmp->next;
+ DestroyAndFree(tmp);
+ }
+ }
+}
+
+SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ Part *p = &tab_[PartIdx(addr)];
+ {
+ ReadLock l(&p->mtx);
+ for (SyncVar *res = p->val; res; res = res->next) {
+ if (res->addr == addr) {
+ if (write_lock)
+ res->mtx.Lock();
+ else
+ res->mtx.ReadLock();
+ return res;
+ }
+ }
+ }
+ {
+ Lock l(&p->mtx);
+ SyncVar *res = p->val;
+ for (; res; res = res->next) {
+ if (res->addr == addr)
+ break;
+ }
+ if (res == 0) {
+ StatInc(thr, StatSyncCreated);
+ void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
+ res = new(mem) SyncVar(addr);
+#ifndef TSAN_GO
+ res->creation_stack.ObtainCurrent(thr, pc);
+#endif
+ res->next = p->val;
+ p->val = res;
+ }
+ if (write_lock)
+ res->mtx.Lock();
+ else
+ res->mtx.ReadLock();
+ return res;
+ }
+}
+
+SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
+ Part *p = &tab_[PartIdx(addr)];
+ SyncVar *res = 0;
+ {
+ Lock l(&p->mtx);
+ SyncVar **prev = &p->val;
+ res = *prev;
+ while (res) {
+ if (res->addr == addr) {
+ *prev = res->next;
+ break;
+ }
+ prev = &res->next;
+ res = *prev;
+ }
+ }
+ if (res) {
+ StatInc(thr, StatSyncDestroyed);
+ res->mtx.Lock();
+ res->mtx.Unlock();
+ }
+ return res;
+}
+
+uptr SyncVar::GetMemoryConsumption() {
+ return sizeof(*this)
+ + clock.size() * sizeof(u64)
+ + read_clock.size() * sizeof(u64)
+ + creation_stack.Size() * sizeof(uptr);
+}
+
+uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
+ uptr mem = 0;
+ for (int i = 0; i < kPartCount; i++) {
+ Part *p = &tab_[i];
+ Lock l(&p->mtx);
+ for (SyncVar *s = p->val; s; s = s->next) {
+ *nsync += 1;
+ mem += s->GetMemoryConsumption();
+ }
+ }
+ return mem;
+}
+
+int SyncTab::PartIdx(uptr addr) {
+ return (addr >> 3) % kPartCount;
+}
+
+StackTrace::StackTrace()
+ : n_()
+ , s_()
+ , c_() {
+}
+
+StackTrace::StackTrace(uptr *buf, uptr cnt)
+ : n_()
+ , s_(buf)
+ , c_(cnt) {
+ CHECK_NE(buf, 0);
+ CHECK_NE(cnt, 0);
+}
+
+StackTrace::~StackTrace() {
+ Reset();
+}
+
+void StackTrace::Reset() {
+ if (s_ && !c_) {
+ CHECK_NE(n_, 0);
+ internal_free(s_);
+ s_ = 0;
+ }
+ n_ = 0;
+}
+
+void StackTrace::Init(const uptr *pcs, uptr cnt) {
+ Reset();
+ if (cnt == 0)
+ return;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ CHECK_LE(cnt, c_);
+ } else {
+ s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+ }
+ n_ = cnt;
+ internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
+}
+
+void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
+ Reset();
+ n_ = thr->shadow_stack_pos - thr->shadow_stack;
+ if (n_ + !!toppc == 0)
+ return;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ CHECK_LE(n_ + !!toppc, c_);
+ } else {
+ s_ = (uptr*)internal_alloc(MBlockStackTrace,
+ (n_ + !!toppc) * sizeof(s_[0]));
+ }
+ for (uptr i = 0; i < n_; i++)
+ s_[i] = thr->shadow_stack[i];
+ if (toppc) {
+ s_[n_] = toppc;
+ n_++;
+ }
+}
+
+void StackTrace::CopyFrom(const StackTrace& other) {
+ Reset();
+ Init(other.Begin(), other.Size());
+}
+
+bool StackTrace::IsEmpty() const {
+ return n_ == 0;
+}
+
+uptr StackTrace::Size() const {
+ return n_;
+}
+
+uptr StackTrace::Get(uptr i) const {
+ CHECK_LT(i, n_);
+ return s_[i];
+}
+
+const uptr *StackTrace::Begin() const {
+ return s_;
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h
new file mode 100644
index 0000000..34d3e0b
--- /dev/null
+++ b/lib/tsan/rtl/tsan_sync.h
@@ -0,0 +1,106 @@
+//===-- tsan_sync.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYNC_H
+#define TSAN_SYNC_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_clock.h"
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+
+namespace __tsan {
+
+class SlabCache;
+
+class StackTrace {
+ public:
+ StackTrace();
+ // Initialized the object in "static mode",
+ // in this mode it never calls malloc/free but uses the provided buffer.
+ StackTrace(uptr *buf, uptr cnt);
+ ~StackTrace();
+ void Reset();
+
+ void Init(const uptr *pcs, uptr cnt);
+ void ObtainCurrent(ThreadState *thr, uptr toppc);
+ bool IsEmpty() const;
+ uptr Size() const;
+ uptr Get(uptr i) const;
+ const uptr *Begin() const;
+ void CopyFrom(const StackTrace& other);
+
+ private:
+ uptr n_;
+ uptr *s_;
+ const uptr c_;
+
+ StackTrace(const StackTrace&);
+ void operator = (const StackTrace&);
+};
+
+struct SyncVar {
+ explicit SyncVar(uptr addr);
+
+ static const int kInvalidTid = -1;
+
+ Mutex mtx;
+ const uptr addr;
+ SyncClock clock;
+ SyncClock read_clock; // Used for rw mutexes only.
+ StackTrace creation_stack;
+ int owner_tid; // Set only by exclusive owners.
+ int recursion;
+ bool is_rw;
+ bool is_recursive;
+ bool is_broken;
+ SyncVar *next; // In SyncTab hashtable.
+
+ uptr GetMemoryConsumption();
+};
+
+class SyncTab {
+ public:
+ SyncTab();
+ ~SyncTab();
+
+ // If the SyncVar does not exist yet, it is created.
+ SyncVar* GetAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock);
+
+ // If the SyncVar does not exist, returns 0.
+ SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
+
+ uptr GetMemoryConsumption(uptr *nsync);
+
+ private:
+ struct Part {
+ Mutex mtx;
+ SyncVar *val;
+ char pad[kCacheLineSize - sizeof(Mutex) - sizeof(SyncVar*)]; // NOLINT
+ Part();
+ };
+
+ // FIXME: Implement something more sane.
+ static const int kPartCount = 1009;
+ Part tab_[kPartCount];
+
+ int PartIdx(uptr addr);
+
+ SyncTab(const SyncTab&); // Not implemented.
+ void operator = (const SyncTab&); // Not implemented.
+};
+
+} // namespace __tsan
+
+#endif // TSAN_SYNC_H
diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h
new file mode 100644
index 0000000..bf15bf5
--- /dev/null
+++ b/lib/tsan/rtl/tsan_trace.h
@@ -0,0 +1,71 @@
+//===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_TRACE_H
+#define TSAN_TRACE_H
+
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+#include "tsan_sync.h"
+
+namespace __tsan {
+
+const int kTraceParts = 8;
+const int kTraceSize = 128*1024;
+const int kTracePartSize = kTraceSize / kTraceParts;
+
+// Must fit into 3 bits.
+enum EventType {
+ EventTypeMop,
+ EventTypeFuncEnter,
+ EventTypeFuncExit,
+ EventTypeLock,
+ EventTypeUnlock,
+ EventTypeRLock,
+ EventTypeRUnlock,
+};
+
+// Represents a thread event (from most significant bit):
+// u64 typ : 3; // EventType.
+// u64 addr : 61; // Associated pc.
+typedef u64 Event;
+
+struct TraceHeader {
+ StackTrace stack0; // Start stack for the trace.
+ u64 epoch0; // Start epoch for the trace.
+#ifndef TSAN_GO
+ uptr stack0buf[kShadowStackSize];
+#endif
+
+ TraceHeader()
+#ifndef TSAN_GO
+ : stack0(stack0buf, kShadowStackSize)
+#else
+ : stack0()
+#endif
+ , epoch0() {
+ }
+};
+
+struct Trace {
+ Event events[kTraceSize];
+ TraceHeader headers[kTraceParts];
+ Mutex mtx;
+
+ Trace()
+ : mtx(MutexTypeTrace, StatMtxTrace) {
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_TRACE_H
diff --git a/lib/tsan/rtl/tsan_update_shadow_word_inl.h b/lib/tsan/rtl/tsan_update_shadow_word_inl.h
new file mode 100644
index 0000000..c7864ce
--- /dev/null
+++ b/lib/tsan/rtl/tsan_update_shadow_word_inl.h
@@ -0,0 +1,79 @@
+//===-- tsan_update_shadow_word_inl.h ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Body of the hottest inner loop.
+// If we wrap this body into a function, compilers (both gcc and clang)
+// produce sligtly less efficient code.
+//===----------------------------------------------------------------------===//
+do {
+ StatInc(thr, StatShadowProcessed);
+ const unsigned kAccessSize = 1 << kAccessSizeLog;
+ unsigned off = cur.ComputeSearchOffset();
+ u64 *sp = &shadow_mem[(idx + off) % kShadowCnt];
+ old = LoadShadow(sp);
+ if (old.IsZero()) {
+ StatInc(thr, StatShadowZero);
+ if (store_word)
+ StoreIfNotYetStored(sp, &store_word);
+ // The above StoreIfNotYetStored could be done unconditionally
+ // and it even shows 4% gain on synthetic benchmarks (r4307).
+ break;
+ }
+ // is the memory access equal to the previous?
+ if (Shadow::Addr0AndSizeAreEqual(cur, old)) {
+ StatInc(thr, StatShadowSameSize);
+ // same thread?
+ if (Shadow::TidsAreEqual(old, cur)) {
+ StatInc(thr, StatShadowSameThread);
+ if (OldIsInSameSynchEpoch(old, thr)) {
+ if (OldIsRWStronger(old, kAccessIsWrite)) {
+ // found a slot that holds effectively the same info
+ // (that is, same tid, same sync epoch and same size)
+ StatInc(thr, StatMopSame);
+ return;
+ }
+ StoreIfNotYetStored(sp, &store_word);
+ break;
+ }
+ if (OldIsRWWeaker(old, kAccessIsWrite))
+ StoreIfNotYetStored(sp, &store_word);
+ break;
+ }
+ StatInc(thr, StatShadowAnotherThread);
+ if (HappensBefore(old, thr)) {
+ StoreIfNotYetStored(sp, &store_word);
+ break;
+ }
+ if (BothReads(old, kAccessIsWrite))
+ break;
+ goto RACE;
+ }
+
+ // Do the memory access intersect?
+ if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
+ StatInc(thr, StatShadowIntersect);
+ if (Shadow::TidsAreEqual(old, cur)) {
+ StatInc(thr, StatShadowSameThread);
+ break;
+ }
+ StatInc(thr, StatShadowAnotherThread);
+ if (HappensBefore(old, thr))
+ break;
+
+ if (BothReads(old, kAccessIsWrite))
+ break;
+
+ goto RACE;
+ }
+ // The accesses do not intersect.
+ StatInc(thr, StatShadowNotIntersect);
+ break;
+} while (0);
diff --git a/lib/tsan/rtl/tsan_vector.h b/lib/tsan/rtl/tsan_vector.h
new file mode 100644
index 0000000..d41063d
--- /dev/null
+++ b/lib/tsan/rtl/tsan_vector.h
@@ -0,0 +1,110 @@
+//===-- tsan_vector.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+// Low-fat STL-like vector container.
+
+#ifndef TSAN_VECTOR_H
+#define TSAN_VECTOR_H
+
+#include "tsan_defs.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+template<typename T>
+class Vector {
+ public:
+ explicit Vector(MBlockType typ)
+ : typ_(typ)
+ , begin_()
+ , end_()
+ , last_() {
+ }
+
+ ~Vector() {
+ if (begin_)
+ internal_free(begin_);
+ }
+
+ void Reset() {
+ if (begin_)
+ internal_free(begin_);
+ begin_ = 0;
+ end_ = 0;
+ last_ = 0;
+ }
+
+ uptr Size() const {
+ return end_ - begin_;
+ }
+
+ T &operator[](uptr i) {
+ DCHECK_LT(i, end_ - begin_);
+ return begin_[i];
+ }
+
+ const T &operator[](uptr i) const {
+ DCHECK_LT(i, end_ - begin_);
+ return begin_[i];
+ }
+
+ T *PushBack(T v = T()) {
+ EnsureSize(Size() + 1);
+ end_[-1] = v;
+ return &end_[-1];
+ }
+
+ void Resize(uptr size) {
+ uptr old_size = Size();
+ EnsureSize(size);
+ if (old_size < size) {
+ for (uptr i = old_size; i < size; i++)
+ begin_[i] = T();
+ }
+ }
+
+ private:
+ const MBlockType typ_;
+ T *begin_;
+ T *end_;
+ T *last_;
+
+ void EnsureSize(uptr size) {
+ if (size <= Size())
+ return;
+ if (size <= (uptr)(last_ - begin_)) {
+ end_ = begin_ + size;
+ return;
+ }
+ uptr cap0 = last_ - begin_;
+ uptr cap = 2 * cap0;
+ if (cap == 0)
+ cap = 16;
+ if (cap < size)
+ cap = size;
+ T *p = (T*)internal_alloc(typ_, cap * sizeof(T));
+ if (cap0) {
+ internal_memcpy(p, begin_, cap0 * sizeof(T));
+ internal_free(begin_);
+ }
+ begin_ = p;
+ end_ = begin_ + size;
+ last_ = begin_ + cap;
+ }
+
+ Vector(const Vector&);
+ void operator=(const Vector&);
+};
+}
+
+#endif // #ifndef TSAN_VECTOR_H
diff --git a/lib/tsan/rtl_tests/tsan_bench.cc b/lib/tsan/rtl_tests/tsan_bench.cc
new file mode 100644
index 0000000..a3cf22f
--- /dev/null
+++ b/lib/tsan/rtl_tests/tsan_bench.cc
@@ -0,0 +1,105 @@
+//===-- tsan_bench.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_test_util.h"
+#include "tsan_interface.h"
+#include "tsan_defs.h"
+#include "gtest/gtest.h"
+#include <stdint.h>
+
+const int kSize = 128;
+const int kRepeat = 2*1024*1024;
+
+void noinstr(void *p) {}
+
+template<typename T, void(*__tsan_mop)(void *p)>
+static void Benchmark() {
+ volatile T data[kSize];
+ for (int i = 0; i < kRepeat; i++) {
+ for (int j = 0; j < kSize; j++) {
+ __tsan_mop((void*)&data[j]);
+ data[j]++;
+ }
+ }
+}
+
+TEST(DISABLED_BENCH, Mop1) {
+ Benchmark<uint8_t, noinstr>();
+}
+
+TEST(DISABLED_BENCH, Mop1Read) {
+ Benchmark<uint8_t, __tsan_read1>();
+}
+
+TEST(DISABLED_BENCH, Mop1Write) {
+ Benchmark<uint8_t, __tsan_write1>();
+}
+
+TEST(DISABLED_BENCH, Mop2) {
+ Benchmark<uint16_t, noinstr>();
+}
+
+TEST(DISABLED_BENCH, Mop2Read) {
+ Benchmark<uint16_t, __tsan_read2>();
+}
+
+TEST(DISABLED_BENCH, Mop2Write) {
+ Benchmark<uint16_t, __tsan_write2>();
+}
+
+TEST(DISABLED_BENCH, Mop4) {
+ Benchmark<uint32_t, noinstr>();
+}
+
+TEST(DISABLED_BENCH, Mop4Read) {
+ Benchmark<uint32_t, __tsan_read4>();
+}
+
+TEST(DISABLED_BENCH, Mop4Write) {
+ Benchmark<uint32_t, __tsan_write4>();
+}
+
+TEST(DISABLED_BENCH, Mop8) {
+ Benchmark<uint8_t, noinstr>();
+}
+
+TEST(DISABLED_BENCH, Mop8Read) {
+ Benchmark<uint64_t, __tsan_read8>();
+}
+
+TEST(DISABLED_BENCH, Mop8Write) {
+ Benchmark<uint64_t, __tsan_write8>();
+}
+
+TEST(DISABLED_BENCH, FuncCall) {
+ for (int i = 0; i < kRepeat; i++) {
+ for (int j = 0; j < kSize; j++)
+ __tsan_func_entry((void*)(uintptr_t)j);
+ for (int j = 0; j < kSize; j++)
+ __tsan_func_exit();
+ }
+}
+
+TEST(DISABLED_BENCH, MutexLocal) {
+ Mutex m;
+ ScopedThread().Create(m);
+ for (int i = 0; i < 50; i++) {
+ ScopedThread t;
+ t.Lock(m);
+ t.Unlock(m);
+ }
+ for (int i = 0; i < 16*1024*1024; i++) {
+ m.Lock();
+ m.Unlock();
+ }
+ ScopedThread().Destroy(m);
+}
diff --git a/lib/tsan/rtl_tests/tsan_mop.cc b/lib/tsan/rtl_tests/tsan_mop.cc
new file mode 100644
index 0000000..f217428
--- /dev/null
+++ b/lib/tsan/rtl_tests/tsan_mop.cc
@@ -0,0 +1,233 @@
+//===-- tsan_mop.cc -------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+#include <stddef.h>
+#include <stdint.h>
+
+TEST(ThreadSanitizer, SimpleWrite) {
+ ScopedThread t;
+ MemLoc l;
+ t.Write1(l);
+}
+
+TEST(ThreadSanitizer, SimpleWriteWrite) {
+ ScopedThread t1, t2;
+ MemLoc l1, l2;
+ t1.Write1(l1);
+ t2.Write1(l2);
+}
+
+TEST(ThreadSanitizer, WriteWriteRace) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Write1(l);
+ t2.Write1(l, true);
+}
+
+TEST(ThreadSanitizer, ReadWriteRace) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Read1(l);
+ t2.Write1(l, true);
+}
+
+TEST(ThreadSanitizer, WriteReadRace) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Write1(l);
+ t2.Read1(l, true);
+}
+
+TEST(ThreadSanitizer, ReadReadNoRace) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Read1(l);
+ t2.Read1(l);
+}
+
+TEST(ThreadSanitizer, WriteThenRead) {
+ MemLoc l;
+ ScopedThread t1, t2;
+ t1.Write1(l);
+ t1.Read1(l);
+ t2.Read1(l, true);
+}
+
+TEST(ThreadSanitizer, WriteThenLockedRead) {
+ Mutex m(Mutex::RW);
+ MainThread t0;
+ t0.Create(m);
+ MemLoc l;
+ {
+ ScopedThread t1, t2;
+
+ t1.Write8(l);
+
+ t1.Lock(m);
+ t1.Read8(l);
+ t1.Unlock(m);
+
+ t2.Read8(l, true);
+ }
+ t0.Destroy(m);
+}
+
+TEST(ThreadSanitizer, LockedWriteThenRead) {
+ Mutex m(Mutex::RW);
+ MainThread t0;
+ t0.Create(m);
+ MemLoc l;
+ {
+ ScopedThread t1, t2;
+
+ t1.Lock(m);
+ t1.Write8(l);
+ t1.Unlock(m);
+
+ t1.Read8(l);
+
+ t2.Read8(l, true);
+ }
+ t0.Destroy(m);
+}
+
+
+TEST(ThreadSanitizer, RaceWithOffset) {
+ ScopedThread t1, t2;
+ {
+ MemLoc l;
+ t1.Access(l.loc(), true, 8, false);
+ t2.Access((char*)l.loc() + 4, true, 4, true);
+ }
+ {
+ MemLoc l;
+ t1.Access(l.loc(), true, 8, false);
+ t2.Access((char*)l.loc() + 7, true, 1, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 4, true, 4, false);
+ t2.Access((char*)l.loc() + 4, true, 2, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 4, true, 4, false);
+ t2.Access((char*)l.loc() + 6, true, 2, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 3, true, 2, false);
+ t2.Access((char*)l.loc() + 4, true, 1, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 1, true, 8, false);
+ t2.Access((char*)l.loc() + 3, true, 1, true);
+ }
+}
+
+TEST(ThreadSanitizer, RaceWithOffset2) {
+ ScopedThread t1, t2;
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc(), true, 4, false);
+ t2.Access((char*)l.loc() + 2, true, 1, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 2, true, 1, false);
+ t2.Access((char*)l.loc(), true, 4, true);
+ }
+}
+
+TEST(ThreadSanitizer, NoRaceWithOffset) {
+ ScopedThread t1, t2;
+ {
+ MemLoc l;
+ t1.Access(l.loc(), true, 4, false);
+ t2.Access((char*)l.loc() + 4, true, 4, false);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 3, true, 2, false);
+ t2.Access((char*)l.loc() + 1, true, 2, false);
+ t2.Access((char*)l.loc() + 5, true, 2, false);
+ }
+}
+
+TEST(ThreadSanitizer, RaceWithDeadThread) {
+ MemLoc l;
+ ScopedThread t;
+ ScopedThread().Write1(l);
+ t.Write1(l, true);
+}
+
+TEST(ThreadSanitizer, BenignRaceOnVptr) {
+ void *vptr_storage;
+ MemLoc vptr(&vptr_storage), val;
+ vptr_storage = val.loc();
+ ScopedThread t1, t2;
+ t1.VptrUpdate(vptr, val);
+ t2.Read8(vptr);
+}
+
+TEST(ThreadSanitizer, HarmfulRaceOnVptr) {
+ void *vptr_storage;
+ MemLoc vptr(&vptr_storage), val1, val2;
+ vptr_storage = val1.loc();
+ ScopedThread t1, t2;
+ t1.VptrUpdate(vptr, val2);
+ t2.Read8(vptr, true);
+}
+
+static void foo() {
+ volatile int x = 42;
+ int x2 = x;
+ (void)x2;
+}
+
+static void bar() {
+ volatile int x = 43;
+ int x2 = x;
+ (void)x2;
+}
+
+TEST(ThreadSanitizer, ReportDeadThread) {
+ MemLoc l;
+ ScopedThread t1;
+ {
+ ScopedThread t2;
+ t2.Call(&foo);
+ t2.Call(&bar);
+ t2.Write1(l);
+ }
+ t1.Write1(l, true);
+}
+
+struct ClassWithStatic {
+ static int Data[4];
+};
+
+int ClassWithStatic::Data[4];
+
+static void foobarbaz() {}
+
+TEST(ThreadSanitizer, ReportRace) {
+ ScopedThread t1;
+ MainThread().Access(&ClassWithStatic::Data, true, 4, false);
+ t1.Call(&foobarbaz);
+ t1.Access(&ClassWithStatic::Data, true, 2, true);
+ t1.Return();
+}
diff --git a/lib/tsan/rtl_tests/tsan_mutex.cc b/lib/tsan/rtl_tests/tsan_mutex.cc
new file mode 100644
index 0000000..4d9c779
--- /dev/null
+++ b/lib/tsan/rtl_tests/tsan_mutex.cc
@@ -0,0 +1,221 @@
+//===-- tsan_mutex.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+#include <stdint.h>
+
+namespace __tsan {
+
+TEST(ThreadSanitizer, BasicMutex) {
+ ScopedThread t;
+ Mutex m;
+ t.Create(m);
+
+ t.Lock(m);
+ t.Unlock(m);
+
+ CHECK(t.TryLock(m));
+ t.Unlock(m);
+
+ t.Lock(m);
+ CHECK(!t.TryLock(m));
+ t.Unlock(m);
+
+ t.Destroy(m);
+}
+
+TEST(ThreadSanitizer, BasicSpinMutex) {
+ ScopedThread t;
+ Mutex m(Mutex::Spin);
+ t.Create(m);
+
+ t.Lock(m);
+ t.Unlock(m);
+
+ CHECK(t.TryLock(m));
+ t.Unlock(m);
+
+ t.Lock(m);
+ CHECK(!t.TryLock(m));
+ t.Unlock(m);
+
+ t.Destroy(m);
+}
+
+TEST(ThreadSanitizer, BasicRwMutex) {
+ ScopedThread t;
+ Mutex m(Mutex::RW);
+ t.Create(m);
+
+ t.Lock(m);
+ t.Unlock(m);
+
+ CHECK(t.TryLock(m));
+ t.Unlock(m);
+
+ t.Lock(m);
+ CHECK(!t.TryLock(m));
+ t.Unlock(m);
+
+ t.ReadLock(m);
+ t.ReadUnlock(m);
+
+ CHECK(t.TryReadLock(m));
+ t.ReadUnlock(m);
+
+ t.Lock(m);
+ CHECK(!t.TryReadLock(m));
+ t.Unlock(m);
+
+ t.ReadLock(m);
+ CHECK(!t.TryLock(m));
+ t.ReadUnlock(m);
+
+ t.ReadLock(m);
+ CHECK(t.TryReadLock(m));
+ t.ReadUnlock(m);
+ t.ReadUnlock(m);
+
+ t.Destroy(m);
+}
+
+TEST(ThreadSanitizer, Mutex) {
+ Mutex m;
+ MainThread t0;
+ t0.Create(m);
+
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Lock(m);
+ t1.Write1(l);
+ t1.Unlock(m);
+ t2.Lock(m);
+ t2.Write1(l);
+ t2.Unlock(m);
+ t2.Destroy(m);
+}
+
+TEST(ThreadSanitizer, SpinMutex) {
+ Mutex m(Mutex::Spin);
+ MainThread t0;
+ t0.Create(m);
+
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Lock(m);
+ t1.Write1(l);
+ t1.Unlock(m);
+ t2.Lock(m);
+ t2.Write1(l);
+ t2.Unlock(m);
+ t2.Destroy(m);
+}
+
+TEST(ThreadSanitizer, RwMutex) {
+ Mutex m(Mutex::RW);
+ MainThread t0;
+ t0.Create(m);
+
+ ScopedThread t1, t2, t3;
+ MemLoc l;
+ t1.Lock(m);
+ t1.Write1(l);
+ t1.Unlock(m);
+ t2.Lock(m);
+ t2.Write1(l);
+ t2.Unlock(m);
+ t1.ReadLock(m);
+ t3.ReadLock(m);
+ t1.Read1(l);
+ t3.Read1(l);
+ t1.ReadUnlock(m);
+ t3.ReadUnlock(m);
+ t2.Lock(m);
+ t2.Write1(l);
+ t2.Unlock(m);
+ t2.Destroy(m);
+}
+
+TEST(ThreadSanitizer, StaticMutex) {
+ // Emulates statically initialized mutex.
+ Mutex m;
+ m.StaticInit();
+ {
+ ScopedThread t1, t2;
+ t1.Lock(m);
+ t1.Unlock(m);
+ t2.Lock(m);
+ t2.Unlock(m);
+ }
+ MainThread().Destroy(m);
+}
+
+static void *singleton_thread(void *param) {
+ atomic_uintptr_t *singleton = (atomic_uintptr_t *)param;
+ for (int i = 0; i < 4*1024*1024; i++) {
+ int *val = (int *)atomic_load(singleton, memory_order_acquire);
+ __tsan_acquire(singleton);
+ __tsan_read4(val);
+ CHECK_EQ(*val, 42);
+ }
+ return 0;
+}
+
+TEST(DISABLED_BENCH_ThreadSanitizer, Singleton) {
+ const int kClockSize = 100;
+ const int kThreadCount = 8;
+
+ // Puff off thread's clock.
+ for (int i = 0; i < kClockSize; i++) {
+ ScopedThread t1;
+ (void)t1;
+ }
+ // Create the singleton.
+ int val = 42;
+ __tsan_write4(&val);
+ atomic_uintptr_t singleton;
+ __tsan_release(&singleton);
+ atomic_store(&singleton, (uintptr_t)&val, memory_order_release);
+ // Create reader threads.
+ pthread_t threads[kThreadCount];
+ for (int t = 0; t < kThreadCount; t++)
+ pthread_create(&threads[t], 0, singleton_thread, &singleton);
+ for (int t = 0; t < kThreadCount; t++)
+ pthread_join(threads[t], 0);
+}
+
+TEST(DISABLED_BENCH_ThreadSanitizer, StopFlag) {
+ const int kClockSize = 100;
+ const int kIters = 16*1024*1024;
+
+ // Puff off thread's clock.
+ for (int i = 0; i < kClockSize; i++) {
+ ScopedThread t1;
+ (void)t1;
+ }
+ // Create the stop flag.
+ atomic_uintptr_t flag;
+ __tsan_release(&flag);
+ atomic_store(&flag, 0, memory_order_release);
+ // Read it a lot.
+ for (int i = 0; i < kIters; i++) {
+ uptr v = atomic_load(&flag, memory_order_acquire);
+ __tsan_acquire(&flag);
+ CHECK_EQ(v, 0);
+ }
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl_tests/tsan_posix.cc b/lib/tsan/rtl_tests/tsan_posix.cc
new file mode 100644
index 0000000..0caedd7
--- /dev/null
+++ b/lib/tsan/rtl_tests/tsan_posix.cc
@@ -0,0 +1,146 @@
+//===-- tsan_posix.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+#include <pthread.h>
+
+struct thread_key {
+ pthread_key_t key;
+ pthread_mutex_t *mtx;
+ int val;
+ int *cnt;
+ thread_key(pthread_key_t key, pthread_mutex_t *mtx, int val, int *cnt)
+ : key(key)
+ , mtx(mtx)
+ , val(val)
+ , cnt(cnt) {
+ }
+};
+
+static void thread_secific_dtor(void *v) {
+ thread_key *k = (thread_key *)v;
+ EXPECT_EQ(pthread_mutex_lock(k->mtx), 0);
+ (*k->cnt)++;
+ __tsan_write4(&k->cnt);
+ EXPECT_EQ(pthread_mutex_unlock(k->mtx), 0);
+ if (k->val == 42) {
+ delete k;
+ } else if (k->val == 43 || k->val == 44) {
+ k->val--;
+ EXPECT_EQ(pthread_setspecific(k->key, k), 0);
+ } else {
+ ASSERT_TRUE(false);
+ }
+}
+
+static void *dtors_thread(void *p) {
+ thread_key *k = (thread_key *)p;
+ EXPECT_EQ(pthread_setspecific(k->key, k), 0);
+ return 0;
+}
+
+TEST(Posix, ThreadSpecificDtors) {
+ int cnt = 0;
+ pthread_key_t key;
+ EXPECT_EQ(pthread_key_create(&key, thread_secific_dtor), 0);
+ pthread_mutex_t mtx;
+ EXPECT_EQ(pthread_mutex_init(&mtx, 0), 0);
+ pthread_t th[3];
+ thread_key *k[3];
+ k[0] = new thread_key(key, &mtx, 42, &cnt);
+ k[1] = new thread_key(key, &mtx, 43, &cnt);
+ k[2] = new thread_key(key, &mtx, 44, &cnt);
+ EXPECT_EQ(pthread_create(&th[0], 0, dtors_thread, k[0]), 0);
+ EXPECT_EQ(pthread_create(&th[1], 0, dtors_thread, k[1]), 0);
+ EXPECT_EQ(pthread_join(th[0], 0), 0);
+ EXPECT_EQ(pthread_create(&th[2], 0, dtors_thread, k[2]), 0);
+ EXPECT_EQ(pthread_join(th[1], 0), 0);
+ EXPECT_EQ(pthread_join(th[2], 0), 0);
+ EXPECT_EQ(pthread_key_delete(key), 0);
+ EXPECT_EQ(6, cnt);
+}
+
+static __thread int local_var;
+
+static void *local_thread(void *p) {
+ __tsan_write1(&local_var);
+ __tsan_write1(&p);
+ if (p == 0)
+ return 0;
+ const int kThreads = 4;
+ pthread_t th[kThreads];
+ for (int i = 0; i < kThreads; i++)
+ EXPECT_EQ(pthread_create(&th[i], 0, local_thread,
+ (void*)((long)p - 1)), 0); // NOLINT
+ for (int i = 0; i < kThreads; i++)
+ EXPECT_EQ(pthread_join(th[i], 0), 0);
+ return 0;
+}
+
+TEST(Posix, ThreadLocalAccesses) {
+ local_thread((void*)2);
+}
+
+struct CondContext {
+ pthread_mutex_t m;
+ pthread_cond_t c;
+ int data;
+};
+
+static void *cond_thread(void *p) {
+ CondContext &ctx = *static_cast<CondContext*>(p);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(ctx.data, 0);
+ ctx.data = 1;
+ EXPECT_EQ(pthread_cond_signal(&ctx.c), 0);
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ while (ctx.data != 2)
+ EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ ctx.data = 3;
+ EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+
+ return 0;
+}
+
+TEST(Posix, CondBasic) {
+ CondContext ctx;
+ EXPECT_EQ(pthread_mutex_init(&ctx.m, 0), 0);
+ EXPECT_EQ(pthread_cond_init(&ctx.c, 0), 0);
+ ctx.data = 0;
+ pthread_t th;
+ EXPECT_EQ(pthread_create(&th, 0, cond_thread, &ctx), 0);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ while (ctx.data != 1)
+ EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ ctx.data = 2;
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ while (ctx.data != 3)
+ EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+
+ EXPECT_EQ(pthread_join(th, 0), 0);
+ EXPECT_EQ(pthread_cond_destroy(&ctx.c), 0);
+ EXPECT_EQ(pthread_mutex_destroy(&ctx.m), 0);
+}
diff --git a/lib/tsan/rtl_tests/tsan_string.cc b/lib/tsan/rtl_tests/tsan_string.cc
new file mode 100644
index 0000000..75adc6c
--- /dev/null
+++ b/lib/tsan/rtl_tests/tsan_string.cc
@@ -0,0 +1,82 @@
+//===-- tsan_string.cc ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+#include <string.h>
+
+namespace __tsan {
+
+TEST(ThreadSanitizer, Memcpy) {
+ char data0[7] = {1, 2, 3, 4, 5, 6, 7};
+ char data[7] = {42, 42, 42, 42, 42, 42, 42};
+ MainThread().Memcpy(data+1, data0+1, 5);
+ EXPECT_EQ(data[0], 42);
+ EXPECT_EQ(data[1], 2);
+ EXPECT_EQ(data[2], 3);
+ EXPECT_EQ(data[3], 4);
+ EXPECT_EQ(data[4], 5);
+ EXPECT_EQ(data[5], 6);
+ EXPECT_EQ(data[6], 42);
+ MainThread().Memset(data+1, 13, 5);
+ EXPECT_EQ(data[0], 42);
+ EXPECT_EQ(data[1], 13);
+ EXPECT_EQ(data[2], 13);
+ EXPECT_EQ(data[3], 13);
+ EXPECT_EQ(data[4], 13);
+ EXPECT_EQ(data[5], 13);
+ EXPECT_EQ(data[6], 42);
+}
+
+TEST(ThreadSanitizer, MemcpyRace1) {
+ char *data = new char[10];
+ char *data1 = new char[10];
+ char *data2 = new char[10];
+ ScopedThread t1, t2;
+ t1.Memcpy(data, data1, 10);
+ t2.Memcpy(data, data2, 10, true);
+}
+
+TEST(ThreadSanitizer, MemcpyRace2) {
+ char *data = new char[10];
+ char *data1 = new char[10];
+ char *data2 = new char[10];
+ ScopedThread t1, t2;
+ t1.Memcpy(data+5, data1, 1);
+ t2.Memcpy(data+3, data2, 4, true);
+}
+
+TEST(ThreadSanitizer, MemcpyRace3) {
+ char *data = new char[10];
+ char *data1 = new char[10];
+ char *data2 = new char[10];
+ ScopedThread t1, t2;
+ t1.Memcpy(data, data1, 10);
+ t2.Memcpy(data1, data2, 10, true);
+}
+
+TEST(ThreadSanitizer, MemcpyStack) {
+ char *data = new char[10];
+ char *data1 = new char[10];
+ ScopedThread t1, t2;
+ t1.Memcpy(data, data1, 10);
+ t2.Memcpy(data, data1, 10, true);
+}
+
+TEST(ThreadSanitizer, MemsetRace1) {
+ char *data = new char[10];
+ ScopedThread t1, t2;
+ t1.Memset(data, 1, 10);
+ t2.Memset(data, 2, 10, true);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl_tests/tsan_test.cc b/lib/tsan/rtl_tests/tsan_test.cc
new file mode 100644
index 0000000..7164140
--- /dev/null
+++ b/lib/tsan/rtl_tests/tsan_test.cc
@@ -0,0 +1,44 @@
+//===-- tsan_test.cc ------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+
+static void foo() {}
+static void bar() {}
+
+TEST(ThreadSanitizer, FuncCall) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Write1(l);
+ t2.Call(foo);
+ t2.Call(bar);
+ t2.Write1(l, true);
+ t2.Return();
+ t2.Return();
+}
+
+int main(int argc, char **argv) {
+ TestMutexBeforeInit(); // Mutexes must be usable before __tsan_init();
+ __tsan_init();
+ __tsan_func_entry(__builtin_return_address(0));
+ __tsan_func_entry((char*)&main + 1);
+
+ testing::GTEST_FLAG(death_test_style) = "threadsafe";
+ testing::InitGoogleTest(&argc, argv);
+ int res = RUN_ALL_TESTS();
+
+ __tsan_func_exit();
+ __tsan_func_exit();
+ return res;
+}
diff --git a/lib/tsan/rtl_tests/tsan_test_util.h b/lib/tsan/rtl_tests/tsan_test_util.h
new file mode 100644
index 0000000..483a564
--- /dev/null
+++ b/lib/tsan/rtl_tests/tsan_test_util.h
@@ -0,0 +1,122 @@
+//===-- tsan_test_util.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Test utils.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_TEST_UTIL_H
+#define TSAN_TEST_UTIL_H
+
+void TestMutexBeforeInit();
+
+// A location of memory on which a race may be detected.
+class MemLoc {
+ public:
+ explicit MemLoc(int offset_from_aligned = 0);
+ explicit MemLoc(void *const real_addr) : loc_(real_addr) { }
+ ~MemLoc();
+ void *loc() const { return loc_; }
+ private:
+ void *const loc_;
+ MemLoc(const MemLoc&);
+ void operator = (const MemLoc&);
+};
+
+class Mutex {
+ public:
+ enum Type { Normal, Spin, RW };
+
+ explicit Mutex(Type type = Normal);
+ ~Mutex();
+
+ void Init();
+ void StaticInit(); // Emulates static initalization (tsan invisible).
+ void Destroy();
+ void Lock();
+ bool TryLock();
+ void Unlock();
+ void ReadLock();
+ bool TryReadLock();
+ void ReadUnlock();
+
+ private:
+ // Placeholder for pthread_mutex_t, CRITICAL_SECTION or whatever.
+ void *mtx_[128];
+ bool alive_;
+ const Type type_;
+
+ Mutex(const Mutex&);
+ void operator = (const Mutex&);
+};
+
+// A thread is started in CTOR and joined in DTOR.
+class ScopedThread {
+ public:
+ explicit ScopedThread(bool detached = false, bool main = false);
+ ~ScopedThread();
+ void Detach();
+
+ void Access(void *addr, bool is_write, int size, bool expect_race);
+ void Read(const MemLoc &ml, int size, bool expect_race = false) {
+ Access(ml.loc(), false, size, expect_race);
+ }
+ void Write(const MemLoc &ml, int size, bool expect_race = false) {
+ Access(ml.loc(), true, size, expect_race);
+ }
+ void Read1(const MemLoc &ml, bool expect_race = false) {
+ Read(ml, 1, expect_race); }
+ void Read2(const MemLoc &ml, bool expect_race = false) {
+ Read(ml, 2, expect_race); }
+ void Read4(const MemLoc &ml, bool expect_race = false) {
+ Read(ml, 4, expect_race); }
+ void Read8(const MemLoc &ml, bool expect_race = false) {
+ Read(ml, 8, expect_race); }
+ void Write1(const MemLoc &ml, bool expect_race = false) {
+ Write(ml, 1, expect_race); }
+ void Write2(const MemLoc &ml, bool expect_race = false) {
+ Write(ml, 2, expect_race); }
+ void Write4(const MemLoc &ml, bool expect_race = false) {
+ Write(ml, 4, expect_race); }
+ void Write8(const MemLoc &ml, bool expect_race = false) {
+ Write(ml, 8, expect_race); }
+
+ void VptrUpdate(const MemLoc &vptr, const MemLoc &new_val,
+ bool expect_race = false);
+
+ void Call(void(*pc)());
+ void Return();
+
+ void Create(const Mutex &m);
+ void Destroy(const Mutex &m);
+ void Lock(const Mutex &m);
+ bool TryLock(const Mutex &m);
+ void Unlock(const Mutex &m);
+ void ReadLock(const Mutex &m);
+ bool TryReadLock(const Mutex &m);
+ void ReadUnlock(const Mutex &m);
+
+ void Memcpy(void *dst, const void *src, int size, bool expect_race = false);
+ void Memset(void *dst, int val, int size, bool expect_race = false);
+
+ private:
+ struct Impl;
+ Impl *impl_;
+ ScopedThread(const ScopedThread&); // Not implemented.
+ void operator = (const ScopedThread&); // Not implemented.
+};
+
+class MainThread : public ScopedThread {
+ public:
+ MainThread()
+ : ScopedThread(false, true) {
+ }
+};
+
+#endif // #ifndef TSAN_TEST_UTIL_H
diff --git a/lib/tsan/rtl_tests/tsan_test_util_linux.cc b/lib/tsan/rtl_tests/tsan_test_util_linux.cc
new file mode 100644
index 0000000..5bc393b
--- /dev/null
+++ b/lib/tsan/rtl_tests/tsan_test_util_linux.cc
@@ -0,0 +1,465 @@
+
+//===-- tsan_test_util_linux.cc -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Test utils, linux implementation.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "tsan_interface.h"
+#include "tsan_test_util.h"
+#include "tsan_report.h"
+
+#include "gtest/gtest.h"
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+using namespace __tsan; // NOLINT
+
+static __thread bool expect_report;
+static __thread bool expect_report_reported;
+static __thread ReportType expect_report_type;
+
+extern "C" void *__interceptor_memcpy(void*, const void*, uptr);
+extern "C" void *__interceptor_memset(void*, int, uptr);
+
+static void *BeforeInitThread(void *param) {
+ (void)param;
+ return 0;
+}
+
+static void AtExit() {
+}
+
+void TestMutexBeforeInit() {
+ // Mutexes must be usable before __tsan_init();
+ pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
+ pthread_mutex_lock(&mtx);
+ pthread_mutex_unlock(&mtx);
+ pthread_mutex_destroy(&mtx);
+ pthread_t thr;
+ pthread_create(&thr, 0, BeforeInitThread, 0);
+ pthread_join(thr, 0);
+ atexit(AtExit);
+}
+
+namespace __tsan {
+bool OnReport(const ReportDesc *rep, bool suppressed) {
+ if (expect_report) {
+ if (rep->typ != expect_report_type) {
+ printf("Expected report of type %d, got type %d\n",
+ (int)expect_report_type, (int)rep->typ);
+ EXPECT_FALSE("Wrong report type");
+ return false;
+ }
+ } else {
+ EXPECT_FALSE("Unexpected report");
+ return false;
+ }
+ expect_report_reported = true;
+ return true;
+}
+}
+
+static void* allocate_addr(int size, int offset_from_aligned = 0) {
+ static uintptr_t foo;
+ static atomic_uintptr_t uniq = {(uintptr_t)&foo}; // Some real address.
+ const int kAlign = 16;
+ CHECK(offset_from_aligned < kAlign);
+ size = (size + 2 * kAlign) & ~(kAlign - 1);
+ uintptr_t addr = atomic_fetch_add(&uniq, size, memory_order_relaxed);
+ return (void*)(addr + offset_from_aligned);
+}
+
+MemLoc::MemLoc(int offset_from_aligned)
+ : loc_(allocate_addr(16, offset_from_aligned)) {
+}
+
+MemLoc::~MemLoc() {
+}
+
+Mutex::Mutex(Type type)
+ : alive_()
+ , type_(type) {
+}
+
+Mutex::~Mutex() {
+ CHECK(!alive_);
+}
+
+void Mutex::Init() {
+ CHECK(!alive_);
+ alive_ = true;
+ if (type_ == Normal)
+ CHECK_EQ(pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
+ else if (type_ == Spin)
+ CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
+ else if (type_ == RW)
+ CHECK_EQ(pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
+ else
+ CHECK(0);
+}
+
+void Mutex::StaticInit() {
+ CHECK(!alive_);
+ CHECK(type_ == Normal);
+ alive_ = true;
+ pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
+ memcpy(mtx_, &tmp, sizeof(tmp));
+}
+
+void Mutex::Destroy() {
+ CHECK(alive_);
+ alive_ = false;
+ if (type_ == Normal)
+ CHECK_EQ(pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
+ else if (type_ == Spin)
+ CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
+ else if (type_ == RW)
+ CHECK_EQ(pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
+}
+
+void Mutex::Lock() {
+ CHECK(alive_);
+ if (type_ == Normal)
+ CHECK_EQ(pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
+ else if (type_ == Spin)
+ CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
+ else if (type_ == RW)
+ CHECK_EQ(pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
+}
+
+bool Mutex::TryLock() {
+ CHECK(alive_);
+ if (type_ == Normal)
+ return pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
+ else if (type_ == Spin)
+ return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
+ else if (type_ == RW)
+ return pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
+ return false;
+}
+
+void Mutex::Unlock() {
+ CHECK(alive_);
+ if (type_ == Normal)
+ CHECK_EQ(pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
+ else if (type_ == Spin)
+ CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
+ else if (type_ == RW)
+ CHECK_EQ(pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
+}
+
+void Mutex::ReadLock() {
+ CHECK(alive_);
+ CHECK(type_ == RW);
+ CHECK_EQ(pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
+}
+
+bool Mutex::TryReadLock() {
+ CHECK(alive_);
+ CHECK(type_ == RW);
+ return pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) == 0;
+}
+
+void Mutex::ReadUnlock() {
+ CHECK(alive_);
+ CHECK(type_ == RW);
+ CHECK_EQ(pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
+}
+
+struct Event {
+ enum Type {
+ SHUTDOWN,
+ READ,
+ WRITE,
+ VPTR_UPDATE,
+ CALL,
+ RETURN,
+ MUTEX_CREATE,
+ MUTEX_DESTROY,
+ MUTEX_LOCK,
+ MUTEX_TRYLOCK,
+ MUTEX_UNLOCK,
+ MUTEX_READLOCK,
+ MUTEX_TRYREADLOCK,
+ MUTEX_READUNLOCK,
+ MEMCPY,
+ MEMSET
+ };
+ Type type;
+ void *ptr;
+ uptr arg;
+ uptr arg2;
+ bool res;
+ bool expect_report;
+ ReportType report_type;
+
+ Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
+ : type(type)
+ , ptr(const_cast<void*>(ptr))
+ , arg(arg)
+ , arg2(arg2)
+ , res()
+ , expect_report()
+ , report_type() {
+ }
+
+ void ExpectReport(ReportType type) {
+ expect_report = true;
+ report_type = type;
+ }
+};
+
+struct ScopedThread::Impl {
+ pthread_t thread;
+ bool main;
+ bool detached;
+ atomic_uintptr_t event; // Event*
+
+ static void *ScopedThreadCallback(void *arg);
+ void send(Event *ev);
+ void HandleEvent(Event *ev);
+};
+
+void ScopedThread::Impl::HandleEvent(Event *ev) {
+ CHECK_EQ(expect_report, false);
+ expect_report = ev->expect_report;
+ expect_report_reported = false;
+ expect_report_type = ev->report_type;
+ switch (ev->type) {
+ case Event::READ:
+ case Event::WRITE: {
+ void (*tsan_mop)(void *addr) = 0;
+ if (ev->type == Event::READ) {
+ switch (ev->arg /*size*/) {
+ case 1: tsan_mop = __tsan_read1; break;
+ case 2: tsan_mop = __tsan_read2; break;
+ case 4: tsan_mop = __tsan_read4; break;
+ case 8: tsan_mop = __tsan_read8; break;
+ case 16: tsan_mop = __tsan_read16; break;
+ }
+ } else {
+ switch (ev->arg /*size*/) {
+ case 1: tsan_mop = __tsan_write1; break;
+ case 2: tsan_mop = __tsan_write2; break;
+ case 4: tsan_mop = __tsan_write4; break;
+ case 8: tsan_mop = __tsan_write8; break;
+ case 16: tsan_mop = __tsan_write16; break;
+ }
+ }
+ CHECK_NE(tsan_mop, 0);
+ errno = ECHRNG;
+ tsan_mop(ev->ptr);
+ CHECK_EQ(errno, ECHRNG); // In no case must errno be changed.
+ break;
+ }
+ case Event::VPTR_UPDATE:
+ __tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
+ break;
+ case Event::CALL:
+ __tsan_func_entry((void*)((uptr)ev->ptr));
+ break;
+ case Event::RETURN:
+ __tsan_func_exit();
+ break;
+ case Event::MUTEX_CREATE:
+ static_cast<Mutex*>(ev->ptr)->Init();
+ break;
+ case Event::MUTEX_DESTROY:
+ static_cast<Mutex*>(ev->ptr)->Destroy();
+ break;
+ case Event::MUTEX_LOCK:
+ static_cast<Mutex*>(ev->ptr)->Lock();
+ break;
+ case Event::MUTEX_TRYLOCK:
+ ev->res = static_cast<Mutex*>(ev->ptr)->TryLock();
+ break;
+ case Event::MUTEX_UNLOCK:
+ static_cast<Mutex*>(ev->ptr)->Unlock();
+ break;
+ case Event::MUTEX_READLOCK:
+ static_cast<Mutex*>(ev->ptr)->ReadLock();
+ break;
+ case Event::MUTEX_TRYREADLOCK:
+ ev->res = static_cast<Mutex*>(ev->ptr)->TryReadLock();
+ break;
+ case Event::MUTEX_READUNLOCK:
+ static_cast<Mutex*>(ev->ptr)->ReadUnlock();
+ break;
+ case Event::MEMCPY:
+ __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
+ break;
+ case Event::MEMSET:
+ __interceptor_memset(ev->ptr, ev->arg, ev->arg2);
+ break;
+ default: CHECK(0);
+ }
+ if (expect_report && !expect_report_reported) {
+ printf("Missed expected report of type %d\n", (int)ev->report_type);
+ EXPECT_FALSE("Missed expected race");
+ }
+ expect_report = false;
+}
+
+void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
+ __tsan_func_entry(__builtin_return_address(0));
+ Impl *impl = (Impl*)arg;
+ for (;;) {
+ Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire);
+ if (ev == 0) {
+ pthread_yield();
+ continue;
+ }
+ if (ev->type == Event::SHUTDOWN) {
+ atomic_store(&impl->event, 0, memory_order_release);
+ break;
+ }
+ impl->HandleEvent(ev);
+ atomic_store(&impl->event, 0, memory_order_release);
+ }
+ __tsan_func_exit();
+ return 0;
+}
+
+void ScopedThread::Impl::send(Event *e) {
+ if (main) {
+ HandleEvent(e);
+ } else {
+ CHECK_EQ(atomic_load(&event, memory_order_relaxed), 0);
+ atomic_store(&event, (uintptr_t)e, memory_order_release);
+ while (atomic_load(&event, memory_order_acquire) != 0)
+ pthread_yield();
+ }
+}
+
+ScopedThread::ScopedThread(bool detached, bool main) {
+ impl_ = new Impl;
+ impl_->main = main;
+ impl_->detached = detached;
+ atomic_store(&impl_->event, 0, memory_order_relaxed);
+ if (!main) {
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, detached);
+ pthread_attr_setstacksize(&attr, 64*1024);
+ pthread_create(&impl_->thread, &attr,
+ ScopedThread::Impl::ScopedThreadCallback, impl_);
+ }
+}
+
+ScopedThread::~ScopedThread() {
+ if (!impl_->main) {
+ Event event(Event::SHUTDOWN);
+ impl_->send(&event);
+ if (!impl_->detached)
+ pthread_join(impl_->thread, 0);
+ }
+ delete impl_;
+}
+
+void ScopedThread::Detach() {
+ CHECK(!impl_->main);
+ CHECK(!impl_->detached);
+ impl_->detached = true;
+ pthread_detach(impl_->thread);
+}
+
+void ScopedThread::Access(void *addr, bool is_write,
+ int size, bool expect_race) {
+ Event event(is_write ? Event::WRITE : Event::READ, addr, size);
+ if (expect_race)
+ event.ExpectReport(ReportTypeRace);
+ impl_->send(&event);
+}
+
+void ScopedThread::VptrUpdate(const MemLoc &vptr,
+ const MemLoc &new_val,
+ bool expect_race) {
+ Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
+ if (expect_race)
+ event.ExpectReport(ReportTypeRace);
+ impl_->send(&event);
+}
+
+void ScopedThread::Call(void(*pc)()) {
+ Event event(Event::CALL, (void*)pc);
+ impl_->send(&event);
+}
+
+void ScopedThread::Return() {
+ Event event(Event::RETURN);
+ impl_->send(&event);
+}
+
+void ScopedThread::Create(const Mutex &m) {
+ Event event(Event::MUTEX_CREATE, &m);
+ impl_->send(&event);
+}
+
+void ScopedThread::Destroy(const Mutex &m) {
+ Event event(Event::MUTEX_DESTROY, &m);
+ impl_->send(&event);
+}
+
+void ScopedThread::Lock(const Mutex &m) {
+ Event event(Event::MUTEX_LOCK, &m);
+ impl_->send(&event);
+}
+
+bool ScopedThread::TryLock(const Mutex &m) {
+ Event event(Event::MUTEX_TRYLOCK, &m);
+ impl_->send(&event);
+ return event.res;
+}
+
+void ScopedThread::Unlock(const Mutex &m) {
+ Event event(Event::MUTEX_UNLOCK, &m);
+ impl_->send(&event);
+}
+
+void ScopedThread::ReadLock(const Mutex &m) {
+ Event event(Event::MUTEX_READLOCK, &m);
+ impl_->send(&event);
+}
+
+bool ScopedThread::TryReadLock(const Mutex &m) {
+ Event event(Event::MUTEX_TRYREADLOCK, &m);
+ impl_->send(&event);
+ return event.res;
+}
+
+void ScopedThread::ReadUnlock(const Mutex &m) {
+ Event event(Event::MUTEX_READUNLOCK, &m);
+ impl_->send(&event);
+}
+
+void ScopedThread::Memcpy(void *dst, const void *src, int size,
+ bool expect_race) {
+ Event event(Event::MEMCPY, dst, (uptr)src, size);
+ if (expect_race)
+ event.ExpectReport(ReportTypeRace);
+ impl_->send(&event);
+}
+
+void ScopedThread::Memset(void *dst, int val, int size,
+ bool expect_race) {
+ Event event(Event::MEMSET, dst, val, size);
+ if (expect_race)
+ event.ExpectReport(ReportTypeRace);
+ impl_->send(&event);
+}
diff --git a/lib/tsan/rtl_tests/tsan_thread.cc b/lib/tsan/rtl_tests/tsan_thread.cc
new file mode 100644
index 0000000..5646415
--- /dev/null
+++ b/lib/tsan/rtl_tests/tsan_thread.cc
@@ -0,0 +1,59 @@
+//===-- tsan_thread.cc ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+
+TEST(ThreadSanitizer, ThreadSync) {
+ MainThread t0;
+ MemLoc l;
+ t0.Write1(l);
+ {
+ ScopedThread t1;
+ t1.Write1(l);
+ }
+ t0.Write1(l);
+}
+
+TEST(ThreadSanitizer, ThreadDetach1) {
+ ScopedThread t1(true);
+ MemLoc l;
+ t1.Write1(l);
+}
+
+TEST(ThreadSanitizer, ThreadDetach2) {
+ ScopedThread t1;
+ MemLoc l;
+ t1.Write1(l);
+ t1.Detach();
+}
+
+static void *thread_alot_func(void *arg) {
+ (void)arg;
+ int usleep(unsigned);
+ usleep(50);
+ return 0;
+}
+
+TEST(DISABLED_SLOW_ThreadSanitizer, ThreadALot) {
+ const int kThreads = 70000;
+ const int kAlive = 1000;
+ pthread_t threads[kAlive] = {};
+ for (int i = 0; i < kThreads; i++) {
+ if (threads[i % kAlive])
+ pthread_join(threads[i % kAlive], 0);
+ pthread_create(&threads[i % kAlive], 0, thread_alot_func, 0);
+ }
+ for (int i = 0; i < kAlive; i++) {
+ pthread_join(threads[i], 0);
+ }
+}
diff --git a/lib/tsan/unit_tests/tsan_clock_test.cc b/lib/tsan/unit_tests/tsan_clock_test.cc
new file mode 100644
index 0000000..fe886e1
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_clock_test.cc
@@ -0,0 +1,123 @@
+//===-- tsan_clock_test.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_clock.h"
+#include "tsan_rtl.h"
+#include "gtest/gtest.h"
+
+namespace __tsan {
+
+TEST(Clock, VectorBasic) {
+ ScopedInRtl in_rtl;
+ ThreadClock clk;
+ CHECK_EQ(clk.size(), 0);
+ clk.tick(0);
+ CHECK_EQ(clk.size(), 1);
+ CHECK_EQ(clk.get(0), 1);
+ clk.tick(3);
+ CHECK_EQ(clk.size(), 4);
+ CHECK_EQ(clk.get(0), 1);
+ CHECK_EQ(clk.get(1), 0);
+ CHECK_EQ(clk.get(2), 0);
+ CHECK_EQ(clk.get(3), 1);
+ clk.tick(3);
+ CHECK_EQ(clk.get(3), 2);
+}
+
+TEST(Clock, ChunkedBasic) {
+ ScopedInRtl in_rtl;
+ ThreadClock vector;
+ SyncClock chunked;
+ CHECK_EQ(vector.size(), 0);
+ CHECK_EQ(chunked.size(), 0);
+ vector.acquire(&chunked);
+ CHECK_EQ(vector.size(), 0);
+ CHECK_EQ(chunked.size(), 0);
+ vector.release(&chunked);
+ CHECK_EQ(vector.size(), 0);
+ CHECK_EQ(chunked.size(), 0);
+ vector.acq_rel(&chunked);
+ CHECK_EQ(vector.size(), 0);
+ CHECK_EQ(chunked.size(), 0);
+}
+
+TEST(Clock, AcquireRelease) {
+ ScopedInRtl in_rtl;
+ ThreadClock vector1;
+ vector1.tick(100);
+ SyncClock chunked;
+ vector1.release(&chunked);
+ CHECK_EQ(chunked.size(), 101);
+ ThreadClock vector2;
+ vector2.acquire(&chunked);
+ CHECK_EQ(vector2.size(), 101);
+ CHECK_EQ(vector2.get(0), 0);
+ CHECK_EQ(vector2.get(1), 0);
+ CHECK_EQ(vector2.get(99), 0);
+ CHECK_EQ(vector2.get(100), 1);
+}
+
+TEST(Clock, ManyThreads) {
+ ScopedInRtl in_rtl;
+ SyncClock chunked;
+ for (int i = 0; i < 100; i++) {
+ ThreadClock vector;
+ vector.tick(i);
+ vector.release(&chunked);
+ CHECK_EQ(chunked.size(), i + 1);
+ vector.acquire(&chunked);
+ CHECK_EQ(vector.size(), i + 1);
+ }
+ ThreadClock vector;
+ vector.acquire(&chunked);
+ CHECK_EQ(vector.size(), 100);
+ for (int i = 0; i < 100; i++)
+ CHECK_EQ(vector.get(i), 1);
+}
+
+TEST(Clock, DifferentSizes) {
+ ScopedInRtl in_rtl;
+ {
+ ThreadClock vector1;
+ vector1.tick(10);
+ ThreadClock vector2;
+ vector2.tick(20);
+ {
+ SyncClock chunked;
+ vector1.release(&chunked);
+ CHECK_EQ(chunked.size(), 11);
+ vector2.release(&chunked);
+ CHECK_EQ(chunked.size(), 21);
+ }
+ {
+ SyncClock chunked;
+ vector2.release(&chunked);
+ CHECK_EQ(chunked.size(), 21);
+ vector1.release(&chunked);
+ CHECK_EQ(chunked.size(), 21);
+ }
+ {
+ SyncClock chunked;
+ vector1.release(&chunked);
+ vector2.acquire(&chunked);
+ CHECK_EQ(vector2.size(), 21);
+ }
+ {
+ SyncClock chunked;
+ vector2.release(&chunked);
+ vector1.acquire(&chunked);
+ CHECK_EQ(vector1.size(), 21);
+ }
+ }
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/unit_tests/tsan_flags_test.cc b/lib/tsan/unit_tests/tsan_flags_test.cc
new file mode 100644
index 0000000..d344cb5
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_flags_test.cc
@@ -0,0 +1,38 @@
+//===-- tsan_flags_test.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_flags.h"
+#include "tsan_rtl.h"
+#include "gtest/gtest.h"
+
+namespace __tsan {
+
+TEST(Flags, Basic) {
+ ScopedInRtl in_rtl;
+ // At least should not crash.
+ Flags f = {};
+ InitializeFlags(&f, 0);
+ InitializeFlags(&f, "");
+}
+
+TEST(Flags, DefaultValues) {
+ ScopedInRtl in_rtl;
+ Flags f = {};
+
+ f.enable_annotations = false;
+ f.exitcode = -11;
+ InitializeFlags(&f, "");
+ EXPECT_EQ(66, f.exitcode);
+ EXPECT_EQ(true, f.enable_annotations);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/unit_tests/tsan_mman_test.cc b/lib/tsan/unit_tests/tsan_mman_test.cc
new file mode 100644
index 0000000..1a9a88f
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_mman_test.cc
@@ -0,0 +1,109 @@
+//===-- tsan_mman_test.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_mman.h"
+#include "tsan_rtl.h"
+#include "gtest/gtest.h"
+
+namespace __tsan {
+
+TEST(Mman, Internal) {
+ ScopedInRtl in_rtl;
+ char *p = (char*)internal_alloc(MBlockScopedBuf, 10);
+ EXPECT_NE(p, (char*)0);
+ char *p2 = (char*)internal_alloc(MBlockScopedBuf, 20);
+ EXPECT_NE(p2, (char*)0);
+ EXPECT_NE(p2, p);
+ for (int i = 0; i < 10; i++) {
+ p[i] = 42;
+ }
+ for (int i = 0; i < 20; i++) {
+ ((char*)p2)[i] = 42;
+ }
+ internal_free(p);
+ internal_free(p2);
+}
+
+TEST(Mman, User) {
+ ScopedInRtl in_rtl;
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ char *p = (char*)user_alloc(thr, pc, 10);
+ EXPECT_NE(p, (char*)0);
+ char *p2 = (char*)user_alloc(thr, pc, 20);
+ EXPECT_NE(p2, (char*)0);
+ EXPECT_NE(p2, p);
+ MBlock *b = user_mblock(thr, p);
+ EXPECT_NE(b, (MBlock*)0);
+ EXPECT_EQ(b->size, (uptr)10);
+ MBlock *b2 = user_mblock(thr, p2);
+ EXPECT_NE(b2, (MBlock*)0);
+ EXPECT_EQ(b2->size, (uptr)20);
+ for (int i = 0; i < 10; i++) {
+ p[i] = 42;
+ EXPECT_EQ(b, user_mblock(thr, p + i));
+ }
+ for (int i = 0; i < 20; i++) {
+ ((char*)p2)[i] = 42;
+ EXPECT_EQ(b2, user_mblock(thr, p2 + i));
+ }
+ user_free(thr, pc, p);
+ user_free(thr, pc, p2);
+}
+
+TEST(Mman, UserRealloc) {
+ ScopedInRtl in_rtl;
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ {
+ void *p = user_realloc(thr, pc, 0, 0);
+ // Strictly saying this is incorrect, realloc(NULL, N) is equivalent to
+ // malloc(N), thus must return non-NULL pointer.
+ EXPECT_EQ(p, (void*)0);
+ }
+ {
+ void *p = user_realloc(thr, pc, 0, 100);
+ EXPECT_NE(p, (void*)0);
+ memset(p, 0xde, 100);
+ user_free(thr, pc, p);
+ }
+ {
+ void *p = user_alloc(thr, pc, 100);
+ EXPECT_NE(p, (void*)0);
+ memset(p, 0xde, 100);
+ void *p2 = user_realloc(thr, pc, p, 0);
+ EXPECT_EQ(p2, (void*)0);
+ }
+ {
+ void *p = user_realloc(thr, pc, 0, 100);
+ EXPECT_NE(p, (void*)0);
+ memset(p, 0xde, 100);
+ void *p2 = user_realloc(thr, pc, p, 10000);
+ EXPECT_NE(p2, (void*)0);
+ for (int i = 0; i < 100; i++)
+ EXPECT_EQ(((char*)p2)[i], (char)0xde);
+ memset(p2, 0xde, 10000);
+ user_free(thr, pc, p2);
+ }
+ {
+ void *p = user_realloc(thr, pc, 0, 10000);
+ EXPECT_NE(p, (void*)0);
+ memset(p, 0xde, 10000);
+ void *p2 = user_realloc(thr, pc, p, 10);
+ EXPECT_NE(p2, (void*)0);
+ for (int i = 0; i < 10; i++)
+ EXPECT_EQ(((char*)p2)[i], (char)0xde);
+ user_free(thr, pc, p2);
+ }
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/unit_tests/tsan_mutex_test.cc b/lib/tsan/unit_tests/tsan_mutex_test.cc
new file mode 100644
index 0000000..c39841d
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_mutex_test.cc
@@ -0,0 +1,126 @@
+//===-- tsan_mutex_test.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "tsan_mutex.h"
+#include "gtest/gtest.h"
+
+namespace __tsan {
+
+template<typename MutexType>
+class TestData {
+ public:
+ explicit TestData(MutexType *mtx)
+ : mtx_(mtx) {
+ for (int i = 0; i < kSize; i++)
+ data_[i] = 0;
+ }
+
+ void Write() {
+ Lock l(mtx_);
+ T v0 = data_[0];
+ for (int i = 0; i < kSize; i++) {
+ CHECK_EQ(data_[i], v0);
+ data_[i]++;
+ }
+ }
+
+ void Read() {
+ ReadLock l(mtx_);
+ T v0 = data_[0];
+ for (int i = 0; i < kSize; i++) {
+ CHECK_EQ(data_[i], v0);
+ }
+ }
+
+ void Backoff() {
+ volatile T data[kSize] = {};
+ for (int i = 0; i < kSize; i++) {
+ data[i]++;
+ CHECK_EQ(data[i], 1);
+ }
+ }
+
+ private:
+ typedef GenericScopedLock<MutexType> Lock;
+ static const int kSize = 64;
+ typedef u64 T;
+ MutexType *mtx_;
+ char pad_[kCacheLineSize];
+ T data_[kSize];
+};
+
+const int kThreads = 8;
+const int kWriteRate = 1024;
+#if TSAN_DEBUG
+const int kIters = 16*1024;
+#else
+const int kIters = 64*1024;
+#endif
+
+template<typename MutexType>
+static void *write_mutex_thread(void *param) {
+ TestData<MutexType> *data = (TestData<MutexType>*)param;
+ for (int i = 0; i < kIters; i++) {
+ data->Write();
+ data->Backoff();
+ }
+ return 0;
+}
+
+template<typename MutexType>
+static void *read_mutex_thread(void *param) {
+ TestData<MutexType> *data = (TestData<MutexType>*)param;
+ for (int i = 0; i < kIters; i++) {
+ if ((i % kWriteRate) == 0)
+ data->Write();
+ else
+ data->Read();
+ data->Backoff();
+ }
+ return 0;
+}
+
+TEST(Mutex, Write) {
+ Mutex mtx(MutexTypeAnnotations, StatMtxAnnotations);
+ TestData<Mutex> data(&mtx);
+ pthread_t threads[kThreads];
+ for (int i = 0; i < kThreads; i++)
+ pthread_create(&threads[i], 0, write_mutex_thread<Mutex>, &data);
+ for (int i = 0; i < kThreads; i++)
+ pthread_join(threads[i], 0);
+}
+
+TEST(Mutex, ReadWrite) {
+ Mutex mtx(MutexTypeAnnotations, StatMtxAnnotations);
+ TestData<Mutex> data(&mtx);
+ pthread_t threads[kThreads];
+ for (int i = 0; i < kThreads; i++)
+ pthread_create(&threads[i], 0, read_mutex_thread<Mutex>, &data);
+ for (int i = 0; i < kThreads; i++)
+ pthread_join(threads[i], 0);
+}
+
+TEST(Mutex, SpinWrite) {
+ SpinMutex mtx;
+ TestData<SpinMutex> data(&mtx);
+ pthread_t threads[kThreads];
+ for (int i = 0; i < kThreads; i++)
+ pthread_create(&threads[i], 0, write_mutex_thread<SpinMutex>, &data);
+ for (int i = 0; i < kThreads; i++)
+ pthread_join(threads[i], 0);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/unit_tests/tsan_platform_test.cc b/lib/tsan/unit_tests/tsan_platform_test.cc
new file mode 100644
index 0000000..64c4499
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_platform_test.cc
@@ -0,0 +1,88 @@
+//===-- tsan_platform_test.cc ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_platform.h"
+#include "gtest/gtest.h"
+
+namespace __tsan {
+
+static void TestThreadInfo(bool main) {
+ ScopedInRtl in_rtl;
+ uptr stk_addr = 0;
+ uptr stk_size = 0;
+ uptr tls_addr = 0;
+ uptr tls_size = 0;
+ GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size);
+ // Printf("stk=%zx-%zx(%zu)\n", stk_addr, stk_addr + stk_size, stk_size);
+ // Printf("tls=%zx-%zx(%zu)\n", tls_addr, tls_addr + tls_size, tls_size);
+
+ int stack_var;
+ EXPECT_NE(stk_addr, (uptr)0);
+ EXPECT_NE(stk_size, (uptr)0);
+ EXPECT_GT((uptr)&stack_var, stk_addr);
+ EXPECT_LT((uptr)&stack_var, stk_addr + stk_size);
+
+ static __thread int thread_var;
+ EXPECT_NE(tls_addr, (uptr)0);
+ EXPECT_NE(tls_size, (uptr)0);
+ EXPECT_GT((uptr)&thread_var, tls_addr);
+ EXPECT_LT((uptr)&thread_var, tls_addr + tls_size);
+
+ // Ensure that tls and stack do not intersect.
+ uptr tls_end = tls_addr + tls_size;
+ EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size);
+ EXPECT_TRUE(tls_end < stk_addr || tls_end >= stk_addr + stk_size);
+ EXPECT_TRUE((tls_addr < stk_addr) == (tls_end < stk_addr));
+}
+
+static void *WorkerThread(void *arg) {
+ TestThreadInfo(false);
+ return 0;
+}
+
+TEST(Platform, ThreadInfoMain) {
+ TestThreadInfo(true);
+}
+
+TEST(Platform, ThreadInfoWorker) {
+ pthread_t t;
+ pthread_create(&t, 0, WorkerThread, 0);
+ pthread_join(t, 0);
+}
+
+TEST(Platform, FileOps) {
+ const char *str1 = "qwerty";
+ uptr len1 = internal_strlen(str1);
+ const char *str2 = "zxcv";
+ uptr len2 = internal_strlen(str2);
+
+ fd_t fd = internal_open("./tsan_test.tmp", true);
+ EXPECT_NE(fd, kInvalidFd);
+ EXPECT_EQ(len1, internal_write(fd, str1, len1));
+ EXPECT_EQ(len2, internal_write(fd, str2, len2));
+ internal_close(fd);
+
+ fd = internal_open("./tsan_test.tmp", false);
+ EXPECT_NE(fd, kInvalidFd);
+ EXPECT_EQ(len1 + len2, internal_filesize(fd));
+ char buf[64] = {};
+ EXPECT_EQ(len1, internal_read(fd, buf, len1));
+ EXPECT_EQ(0, internal_memcmp(buf, str1, len1));
+ EXPECT_EQ((char)0, buf[len1 + 1]);
+ internal_memset(buf, 0, len1);
+ EXPECT_EQ(len2, internal_read(fd, buf, len2));
+ EXPECT_EQ(0, internal_memcmp(buf, str2, len2));
+ internal_close(fd);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/unit_tests/tsan_printf_test.cc b/lib/tsan/unit_tests/tsan_printf_test.cc
new file mode 100644
index 0000000..0dfd1d2
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_printf_test.cc
@@ -0,0 +1,106 @@
+//===-- tsan_printf_test.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_rtl.h"
+#include "gtest/gtest.h"
+
+#include <string.h>
+#include <limits.h>
+
+namespace __tsan {
+
+TEST(Printf, Basic) {
+ char buf[1024];
+ uptr len = internal_snprintf(buf, sizeof(buf),
+ "a%db%zdc%ue%zuf%xh%zxq%pe%sr",
+ (int)-1, (long)-2, // NOLINT
+ (unsigned)-4, (unsigned long)5, // NOLINT
+ (unsigned)10, (unsigned long)11, // NOLINT
+ (void*)0x123, "_string_");
+ EXPECT_EQ(len, strlen(buf));
+ EXPECT_EQ(0, strcmp(buf, "a-1b-2c4294967292e5fahbq"
+ "0x000000000123e_string_r"));
+}
+
+TEST(Printf, OverflowStr) {
+ char buf[] = "123456789";
+ uptr len = internal_snprintf(buf, 4, "%s", "abcdef"); // NOLINT
+ EXPECT_EQ(len, (uptr)6);
+ EXPECT_EQ(0, strcmp(buf, "abc"));
+ EXPECT_EQ(buf[3], 0);
+ EXPECT_EQ(buf[4], '5');
+ EXPECT_EQ(buf[5], '6');
+ EXPECT_EQ(buf[6], '7');
+ EXPECT_EQ(buf[7], '8');
+ EXPECT_EQ(buf[8], '9');
+ EXPECT_EQ(buf[9], 0);
+}
+
+TEST(Printf, OverflowInt) {
+ char buf[] = "123456789";
+ internal_snprintf(buf, 4, "%d", -123456789); // NOLINT
+ EXPECT_EQ(0, strcmp(buf, "-12"));
+ EXPECT_EQ(buf[3], 0);
+ EXPECT_EQ(buf[4], '5');
+ EXPECT_EQ(buf[5], '6');
+ EXPECT_EQ(buf[6], '7');
+ EXPECT_EQ(buf[7], '8');
+ EXPECT_EQ(buf[8], '9');
+ EXPECT_EQ(buf[9], 0);
+}
+
+TEST(Printf, OverflowUint) {
+ char buf[] = "123456789";
+ internal_snprintf(buf, 4, "a%zx", (unsigned long)0x123456789); // NOLINT
+ EXPECT_EQ(0, strcmp(buf, "a12"));
+ EXPECT_EQ(buf[3], 0);
+ EXPECT_EQ(buf[4], '5');
+ EXPECT_EQ(buf[5], '6');
+ EXPECT_EQ(buf[6], '7');
+ EXPECT_EQ(buf[7], '8');
+ EXPECT_EQ(buf[8], '9');
+ EXPECT_EQ(buf[9], 0);
+}
+
+TEST(Printf, OverflowPtr) {
+ char buf[] = "123456789";
+ internal_snprintf(buf, 4, "%p", (void*)0x123456789); // NOLINT
+ EXPECT_EQ(0, strcmp(buf, "0x0"));
+ EXPECT_EQ(buf[3], 0);
+ EXPECT_EQ(buf[4], '5');
+ EXPECT_EQ(buf[5], '6');
+ EXPECT_EQ(buf[6], '7');
+ EXPECT_EQ(buf[7], '8');
+ EXPECT_EQ(buf[8], '9');
+ EXPECT_EQ(buf[9], 0);
+}
+
+template<typename T>
+static void TestMinMax(const char *fmt, T min, T max) {
+ char buf[1024];
+ uptr len = internal_snprintf(buf, sizeof(buf), fmt, min, max);
+ char buf2[1024];
+ snprintf(buf2, sizeof(buf2), fmt, min, max);
+ EXPECT_EQ(len, strlen(buf));
+ EXPECT_EQ(0, strcmp(buf, buf2));
+}
+
+TEST(Printf, MinMax) {
+ TestMinMax<int>("%d-%d", INT_MIN, INT_MAX); // NOLINT
+ TestMinMax<long>("%zd-%zd", LONG_MIN, LONG_MAX); // NOLINT
+ TestMinMax<unsigned>("%u-%u", 0, UINT_MAX); // NOLINT
+ TestMinMax<unsigned long>("%zu-%zu", 0, ULONG_MAX); // NOLINT
+ TestMinMax<unsigned>("%x-%x", 0, UINT_MAX); // NOLINT
+ TestMinMax<unsigned long>("%zx-%zx", 0, ULONG_MAX); // NOLINT
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/unit_tests/tsan_shadow_test.cc b/lib/tsan/unit_tests/tsan_shadow_test.cc
new file mode 100644
index 0000000..41f9121
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_shadow_test.cc
@@ -0,0 +1,47 @@
+//===-- tsan_shadow_test.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_platform.h"
+#include "gtest/gtest.h"
+
+namespace __tsan {
+
+TEST(Shadow, Mapping) {
+ static int global;
+ int stack;
+ void *heap = malloc(0);
+ free(heap);
+
+ CHECK(IsAppMem((uptr)&global));
+ CHECK(IsAppMem((uptr)&stack));
+ CHECK(IsAppMem((uptr)heap));
+
+ CHECK(IsShadowMem(MemToShadow((uptr)&global)));
+ CHECK(IsShadowMem(MemToShadow((uptr)&stack)));
+ CHECK(IsShadowMem(MemToShadow((uptr)heap)));
+}
+
+TEST(Shadow, Celling) {
+ u64 aligned_data[4];
+ char *data = (char*)aligned_data;
+ CHECK_EQ((uptr)data % kShadowSize, 0);
+ uptr s0 = MemToShadow((uptr)&data[0]);
+ CHECK_EQ(s0 % kShadowSize, 0);
+ for (unsigned i = 1; i < kShadowCell; i++)
+ CHECK_EQ(s0, MemToShadow((uptr)&data[i]));
+ for (unsigned i = kShadowCell; i < 2*kShadowCell; i++)
+ CHECK_EQ(s0 + kShadowSize*kShadowCnt, MemToShadow((uptr)&data[i]));
+ for (unsigned i = 2*kShadowCell; i < 3*kShadowCell; i++)
+ CHECK_EQ(s0 + 2*kShadowSize*kShadowCnt, MemToShadow((uptr)&data[i]));
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/unit_tests/tsan_suppressions_test.cc b/lib/tsan/unit_tests/tsan_suppressions_test.cc
new file mode 100644
index 0000000..e1e0c12
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_suppressions_test.cc
@@ -0,0 +1,128 @@
+//===-- tsan_suppressions_test.cc -----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "gtest/gtest.h"
+
+#include <string.h>
+
+namespace __tsan {
+
+TEST(Suppressions, Parse) {
+ ScopedInRtl in_rtl;
+ Suppression *supp0 = SuppressionParse(
+ "race:foo\n"
+ " race:bar\n" // NOLINT
+ "race:baz \n" // NOLINT
+ "# a comment\n"
+ "race:quz\n"
+ ); // NOLINT
+ Suppression *supp = supp0;
+ EXPECT_EQ(supp->type, SuppressionRace);
+ EXPECT_EQ(0, strcmp(supp->templ, "quz"));
+ supp = supp->next;
+ EXPECT_EQ(supp->type, SuppressionRace);
+ EXPECT_EQ(0, strcmp(supp->templ, "baz"));
+ supp = supp->next;
+ EXPECT_EQ(supp->type, SuppressionRace);
+ EXPECT_EQ(0, strcmp(supp->templ, "bar"));
+ supp = supp->next;
+ EXPECT_EQ(supp->type, SuppressionRace);
+ EXPECT_EQ(0, strcmp(supp->templ, "foo"));
+ supp = supp->next;
+ EXPECT_EQ((Suppression*)0, supp);
+}
+
+TEST(Suppressions, Parse2) {
+ ScopedInRtl in_rtl;
+ Suppression *supp0 = SuppressionParse(
+ " # first line comment\n" // NOLINT
+ " race:bar \n" // NOLINT
+ "race:baz* *baz\n"
+ "# a comment\n"
+ "# last line comment\n"
+ ); // NOLINT
+ Suppression *supp = supp0;
+ EXPECT_EQ(supp->type, SuppressionRace);
+ EXPECT_EQ(0, strcmp(supp->templ, "baz* *baz"));
+ supp = supp->next;
+ EXPECT_EQ(supp->type, SuppressionRace);
+ EXPECT_EQ(0, strcmp(supp->templ, "bar"));
+ supp = supp->next;
+ EXPECT_EQ((Suppression*)0, supp);
+}
+
+TEST(Suppressions, Parse3) {
+ ScopedInRtl in_rtl;
+ Suppression *supp0 = SuppressionParse(
+ "# last suppression w/o line-feed\n"
+ "race:foo\n"
+ "race:bar"
+ ); // NOLINT
+ Suppression *supp = supp0;
+ EXPECT_EQ(supp->type, SuppressionRace);
+ EXPECT_EQ(0, strcmp(supp->templ, "bar"));
+ supp = supp->next;
+ EXPECT_EQ(supp->type, SuppressionRace);
+ EXPECT_EQ(0, strcmp(supp->templ, "foo"));
+ supp = supp->next;
+ EXPECT_EQ((Suppression*)0, supp);
+}
+
+TEST(Suppressions, ParseType) {
+ ScopedInRtl in_rtl;
+ Suppression *supp0 = SuppressionParse(
+ "race:foo\n"
+ "thread:bar\n"
+ "mutex:baz\n"
+ "signal:quz\n"
+ ); // NOLINT
+ Suppression *supp = supp0;
+ EXPECT_EQ(supp->type, SuppressionSignal);
+ EXPECT_EQ(0, strcmp(supp->templ, "quz"));
+ supp = supp->next;
+ EXPECT_EQ(supp->type, SuppressionMutex);
+ EXPECT_EQ(0, strcmp(supp->templ, "baz"));
+ supp = supp->next;
+ EXPECT_EQ(supp->type, SuppressionThread);
+ EXPECT_EQ(0, strcmp(supp->templ, "bar"));
+ supp = supp->next;
+ EXPECT_EQ(supp->type, SuppressionRace);
+ EXPECT_EQ(0, strcmp(supp->templ, "foo"));
+ supp = supp->next;
+ EXPECT_EQ((Suppression*)0, supp);
+}
+
+static bool MyMatch(const char *templ, const char *func) {
+ char tmp[1024];
+ strcpy(tmp, templ); // NOLINT
+ return SuppressionMatch(tmp, func);
+}
+
+TEST(Suppressions, Match) {
+ EXPECT_TRUE(MyMatch("foobar", "foobar"));
+ EXPECT_TRUE(MyMatch("foobar", "prefix_foobar_postfix"));
+ EXPECT_TRUE(MyMatch("*foobar*", "prefix_foobar_postfix"));
+ EXPECT_TRUE(MyMatch("foo*bar", "foo_middle_bar"));
+ EXPECT_TRUE(MyMatch("foo*bar", "foobar"));
+ EXPECT_TRUE(MyMatch("foo*bar*baz", "foo_middle_bar_another_baz"));
+ EXPECT_TRUE(MyMatch("foo*bar*baz", "foo_middle_barbaz"));
+
+ EXPECT_FALSE(MyMatch("foo", "baz"));
+ EXPECT_FALSE(MyMatch("foobarbaz", "foobar"));
+ EXPECT_FALSE(MyMatch("foobarbaz", "barbaz"));
+ EXPECT_FALSE(MyMatch("foo*bar", "foobaz"));
+ EXPECT_FALSE(MyMatch("foo*bar", "foo_baz"));
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/unit_tests/tsan_sync_test.cc b/lib/tsan/unit_tests/tsan_sync_test.cc
new file mode 100644
index 0000000..b7605a5
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_sync_test.cc
@@ -0,0 +1,65 @@
+//===-- tsan_sync_test.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_sync.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "gtest/gtest.h"
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <map>
+
+namespace __tsan {
+
+TEST(Sync, Table) {
+ const uintptr_t kIters = 512*1024;
+ const uintptr_t kRange = 10000;
+
+ ScopedInRtl in_rtl;
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+
+ SyncTab tab;
+ SyncVar *golden[kRange] = {};
+ unsigned seed = 0;
+ for (uintptr_t i = 0; i < kIters; i++) {
+ uintptr_t addr = rand_r(&seed) % (kRange - 1) + 1;
+ if (rand_r(&seed) % 2) {
+ // Get or add.
+ SyncVar *v = tab.GetAndLock(thr, pc, addr, true);
+ EXPECT_TRUE(golden[addr] == 0 || golden[addr] == v);
+ EXPECT_EQ(v->addr, addr);
+ golden[addr] = v;
+ v->mtx.Unlock();
+ } else {
+ // Remove.
+ SyncVar *v = tab.GetAndRemove(thr, pc, addr);
+ EXPECT_EQ(golden[addr], v);
+ if (v) {
+ EXPECT_EQ(v->addr, addr);
+ golden[addr] = 0;
+ DestroyAndFree(v);
+ }
+ }
+ }
+ for (uintptr_t addr = 0; addr < kRange; addr++) {
+ if (golden[addr] == 0)
+ continue;
+ SyncVar *v = tab.GetAndRemove(thr, pc, addr);
+ EXPECT_EQ(v, golden[addr]);
+ EXPECT_EQ(v->addr, addr);
+ DestroyAndFree(v);
+ }
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/unit_tests/tsan_vector_test.cc b/lib/tsan/unit_tests/tsan_vector_test.cc
new file mode 100644
index 0000000..cfef6e5
--- /dev/null
+++ b/lib/tsan/unit_tests/tsan_vector_test.cc
@@ -0,0 +1,45 @@
+//===-- tsan_vector_test.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_vector.h"
+#include "tsan_rtl.h"
+#include "gtest/gtest.h"
+
+namespace __tsan {
+
+TEST(Vector, Basic) {
+ ScopedInRtl in_rtl;
+ Vector<int> v(MBlockScopedBuf);
+ EXPECT_EQ(v.Size(), (uptr)0);
+ v.PushBack(42);
+ EXPECT_EQ(v.Size(), (uptr)1);
+ EXPECT_EQ(v[0], 42);
+ v.PushBack(43);
+ EXPECT_EQ(v.Size(), (uptr)2);
+ EXPECT_EQ(v[0], 42);
+ EXPECT_EQ(v[1], 43);
+}
+
+TEST(Vector, Stride) {
+ ScopedInRtl in_rtl;
+ Vector<int> v(MBlockScopedBuf);
+ for (int i = 0; i < 1000; i++) {
+ v.PushBack(i);
+ EXPECT_EQ(v.Size(), (uptr)(i + 1));
+ EXPECT_EQ(v[i], i);
+ }
+ for (int i = 0; i < 1000; i++) {
+ EXPECT_EQ(v[i], i);
+ }
+}
+
+} // namespace __tsan
diff --git a/lib/ucmpti2.c b/lib/ucmpti2.c
index 11137c5..5466d21 100644
--- a/lib/ucmpti2.c
+++ b/lib/ucmpti2.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Returns: if (a < b) returns 0
* if (a == b) returns 1
* if (a > b) returns 2
diff --git a/lib/udivmoddi4.c b/lib/udivmoddi4.c
index 73043d4..57282d5 100644
--- a/lib/udivmoddi4.c
+++ b/lib/udivmoddi4.c
@@ -20,8 +20,6 @@
/* Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide */
-ARM_EABI_FNALIAS(uldivmod, udivmoddi4);
-
COMPILER_RT_ABI du_int
__udivmoddi4(du_int a, du_int b, du_int* rem)
{
diff --git a/lib/udivmodti4.c b/lib/udivmodti4.c
index 427861b..f619c74 100644
--- a/lib/udivmodti4.c
+++ b/lib/udivmodti4.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
/* Effects: if rem != 0, *rem = a % b
* Returns: a / b
*/
diff --git a/lib/udivsi3.c b/lib/udivsi3.c
index 39ef48b..5d0140c 100644
--- a/lib/udivsi3.c
+++ b/lib/udivsi3.c
@@ -18,8 +18,9 @@
/* Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide */
-ARM_EABI_FNALIAS(uidiv, udivsi3);
+ARM_EABI_FNALIAS(uidiv, udivsi3)
+/* This function should not call __divsi3! */
COMPILER_RT_ABI su_int
__udivsi3(su_int n, su_int d)
{
diff --git a/lib/udivti3.c b/lib/udivti3.c
index 7405a0f..d9e1bb4 100644
--- a/lib/udivti3.c
+++ b/lib/udivti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
/* Returns: a / b */
diff --git a/lib/umodti3.c b/lib/umodti3.c
index 8f20c5f..8ebe7f0 100644
--- a/lib/umodti3.c
+++ b/lib/umodti3.c
@@ -12,10 +12,10 @@
* ===----------------------------------------------------------------------===
*/
-#if __x86_64
-
#include "int_lib.h"
+#if __x86_64
+
tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
/* Returns: a % b */
diff --git a/lib/x86_64/CMakeLists.txt b/lib/x86_64/CMakeLists.txt
deleted file mode 100644
index ee21308..0000000
--- a/lib/x86_64/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-SET( SRCS
- floatdixf.c
- floatdisf.c
- floatdidf.c
- )
OpenPOWER on IntegriCloud