aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-04-16 16:02:53 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-04-16 16:02:53 +0000
commitab0bf875a5f328a6710f4e48258979ae1bc8da1c (patch)
tree66903cf9f73151825893dcc216b04c0930317a10 /lib
parentabacad30a54c59ad437ccf54ec5236a8dd7f3ba9 (diff)
downloadsrc-ab0bf875a5f328a6710f4e48258979ae1bc8da1c.tar.gz
src-ab0bf875a5f328a6710f4e48258979ae1bc8da1c.zip
Vendor import of compiler-rt trunk r300422:vendor/compiler-rt/compiler-rt-trunk-r300422
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=317021 svn path=/vendor/compiler-rt/compiler-rt-trunk-r300422/; revision=317022; tag=vendor/compiler-rt/compiler-rt-trunk-r300422
Diffstat (limited to 'lib')
-rw-r--r--lib/asan/CMakeLists.txt52
-rw-r--r--lib/asan/asan.syms.extra1
-rw-r--r--lib/asan/asan_allocator.cc40
-rw-r--r--lib/asan/asan_descriptions.cc3
-rw-r--r--lib/asan/asan_errors.cc15
-rw-r--r--lib/asan/asan_flags.cc23
-rw-r--r--lib/asan/asan_flags.inc13
-rw-r--r--lib/asan/asan_globals_win.cc2
-rw-r--r--lib/asan/asan_globals_win.h34
-rw-r--r--lib/asan/asan_interceptors.cc8
-rw-r--r--lib/asan/asan_interface.inc167
-rw-r--r--lib/asan/asan_interface_internal.h7
-rw-r--r--lib/asan/asan_internal.h6
-rw-r--r--lib/asan/asan_linux.cc3
-rw-r--r--lib/asan/asan_mac.cc4
-rw-r--r--lib/asan/asan_malloc_win.cc2
-rw-r--r--lib/asan/asan_mapping.h1
-rw-r--r--lib/asan/asan_memory_profile.cc27
-rw-r--r--lib/asan/asan_posix.cc13
-rw-r--r--lib/asan/asan_report.cc17
-rw-r--r--lib/asan/asan_report.h1
-rw-r--r--lib/asan/asan_suppressions.cc12
-rw-r--r--lib/asan/asan_thread.cc8
-rw-r--r--lib/asan/asan_win.cc129
-rw-r--r--lib/asan/asan_win_dll_thunk.cc482
-rw-r--r--lib/asan/asan_win_dynamic_runtime_thunk.cc18
-rw-r--r--lib/asan/asan_win_weak_interception.cc23
-rwxr-xr-xlib/asan/scripts/asan_symbolize.py30
-rw-r--r--lib/asan/tests/asan_interface_test.cc1
-rw-r--r--lib/asan/tests/asan_internal_interface_test.cc1
-rw-r--r--lib/asan/tests/asan_mac_test_helpers.mm1
-rw-r--r--lib/asan/tests/asan_mem_test.cc3
-rw-r--r--lib/asan/tests/asan_noinst_test.cc9
-rw-r--r--lib/asan/tests/asan_test.cc13
-rw-r--r--lib/asan/tests/asan_test_config.h4
-rw-r--r--lib/builtins/CMakeLists.txt31
-rw-r--r--lib/builtins/README.txt1
-rw-r--r--lib/builtins/arm/addsf3.S277
-rw-r--r--lib/builtins/arm/aeabi_cdcmp.S37
-rw-r--r--lib/builtins/arm/aeabi_cfcmp.S37
-rw-r--r--lib/builtins/arm/aeabi_dcmp.S4
-rw-r--r--lib/builtins/arm/aeabi_idivmod.S2
-rw-r--r--lib/builtins/arm/aeabi_ldivmod.S20
-rw-r--r--lib/builtins/arm/aeabi_memset.S2
-rw-r--r--lib/builtins/arm/aeabi_uidivmod.S2
-rw-r--r--lib/builtins/arm/aeabi_uldivmod.S20
-rw-r--r--lib/builtins/arm/comparesf2.S4
-rw-r--r--lib/builtins/arm/udivsi3.S26
-rw-r--r--lib/builtins/clear_cache.c15
-rw-r--r--lib/builtins/cpu_model.c19
-rw-r--r--lib/builtins/divtc3.c22
-rw-r--r--lib/builtins/ffssi2.c29
-rw-r--r--lib/builtins/int_lib.h6
-rw-r--r--lib/builtins/os_version_check.c178
-rw-r--r--lib/builtins/x86_64/floatdidf.c2
-rw-r--r--lib/builtins/x86_64/floatdisf.c2
-rw-r--r--lib/cfi/cfi.cc14
-rw-r--r--lib/esan/esan_interceptors.cpp14
-rw-r--r--lib/interception/interception_win.cc2
-rw-r--r--lib/interception/tests/interception_win_test.cc7
-rw-r--r--lib/lsan/CMakeLists.txt39
-rw-r--r--lib/lsan/lsan.cc1
-rw-r--r--lib/lsan/lsan.h7
-rw-r--r--lib/lsan/lsan_allocator.cc77
-rw-r--r--lib/lsan/lsan_allocator.h49
-rw-r--r--lib/lsan/lsan_common.cc48
-rw-r--r--lib/lsan/lsan_common.h54
-rw-r--r--lib/lsan/lsan_common_linux.cc26
-rw-r--r--lib/lsan/lsan_common_mac.cc126
-rw-r--r--lib/lsan/lsan_flags.inc2
-rw-r--r--lib/lsan/lsan_interceptors.cc84
-rw-r--r--lib/lsan/lsan_linux.cc33
-rw-r--r--lib/lsan/lsan_malloc_mac.cc55
-rw-r--r--lib/lsan/lsan_thread.cc15
-rw-r--r--lib/lsan/weak_symbols.txt2
-rw-r--r--lib/msan/msan_interceptors.cc29
-rw-r--r--lib/msan/tests/CMakeLists.txt1
-rw-r--r--lib/msan/tests/msan_test.cc332
-rw-r--r--lib/profile/InstrProfData.inc84
-rw-r--r--lib/profile/InstrProfilingFile.c29
-rw-r--r--lib/profile/InstrProfilingUtil.c23
-rw-r--r--lib/profile/InstrProfilingUtil.h8
-rw-r--r--lib/profile/InstrProfilingValue.c29
-rw-r--r--lib/sanitizer_common/CMakeLists.txt56
-rw-r--r--lib/sanitizer_common/sancov_flags.cc7
-rw-r--r--lib/sanitizer_common/sancov_flags.h4
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_interface.h9
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h14
-rw-r--r--lib/sanitizer_common/sanitizer_common.cc23
-rw-r--r--lib/sanitizer_common/sanitizer_common.h13
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors.inc210
-rw-r--r--lib/sanitizer_common/sanitizer_common_interface.inc39
-rw-r--r--lib/sanitizer_common/sanitizer_common_interface_posix.inc14
-rw-r--r--lib/sanitizer_common/sanitizer_common_libcdep.cc11
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_interface.inc40
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_libcdep.cc47
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc7
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cc21
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cc21
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_win_sections.cc22
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cc24
-rw-r--r--lib/sanitizer_common/sanitizer_flags.inc9
-rw-r--r--lib/sanitizer_common/sanitizer_interface_internal.h28
-rw-r--r--lib/sanitizer_common/sanitizer_internal_defs.h47
-rw-r--r--lib/sanitizer_common/sanitizer_linux.cc241
-rw-r--r--lib/sanitizer_common/sanitizer_linux.h3
-rw-r--r--lib/sanitizer_common/sanitizer_linux_libcdep.cc22
-rw-r--r--lib/sanitizer_common/sanitizer_linux_s390.cc24
-rw-r--r--lib/sanitizer_common/sanitizer_list.h11
-rw-r--r--lib/sanitizer_common/sanitizer_mac.cc40
-rw-r--r--lib/sanitizer_common/sanitizer_malloc_mac.inc3
-rw-r--r--lib/sanitizer_common/sanitizer_mutex.h8
-rw-r--r--lib/sanitizer_common/sanitizer_platform.h6
-rw-r--r--lib/sanitizer_common/sanitizer_platform_interceptors.h23
-rw-r--r--lib/sanitizer_common/sanitizer_posix.cc16
-rw-r--r--lib/sanitizer_common/sanitizer_posix.h3
-rw-r--r--lib/sanitizer_common/sanitizer_posix_libcdep.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_printf.cc12
-rw-r--r--lib/sanitizer_common/sanitizer_quarantine.h142
-rw-r--r--lib/sanitizer_common/sanitizer_stoptheworld.h9
-rw-r--r--lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc19
-rw-r--r--lib/sanitizer_common/sanitizer_stoptheworld_mac.cc40
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc37
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc1
-rw-r--r--lib/sanitizer_common/sanitizer_thread_registry.cc10
-rw-r--r--lib/sanitizer_common/sanitizer_thread_registry.h5
-rw-r--r--lib/sanitizer_common/sanitizer_tls_get_addr.cc8
-rw-r--r--lib/sanitizer_common/sanitizer_tls_get_addr.h2
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc71
-rw-r--r--lib/sanitizer_common/sanitizer_win.h26
-rw-r--r--lib/sanitizer_common/sanitizer_win_defs.h153
-rw-r--r--lib/sanitizer_common/sanitizer_win_dll_thunk.cc102
-rw-r--r--lib/sanitizer_common/sanitizer_win_dll_thunk.h182
-rw-r--r--lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cc21
-rw-r--r--lib/sanitizer_common/sanitizer_win_weak_interception.cc94
-rw-r--r--lib/sanitizer_common/sanitizer_win_weak_interception.h33
-rwxr-xr-xlib/sanitizer_common/scripts/sancov.py69
-rw-r--r--lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc19
-rw-r--r--lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc24
-rwxr-xr-xlib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh39
-rwxr-xr-xlib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh33
-rw-r--r--lib/sanitizer_common/symbolizer/scripts/global_symbols.txt1
-rw-r--r--lib/sanitizer_common/tests/CMakeLists.txt1
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc4
-rw-r--r--lib/sanitizer_common/tests/sanitizer_bitvector_test.cc4
-rw-r--r--lib/sanitizer_common/tests/sanitizer_list_test.cc16
-rw-r--r--lib/sanitizer_common/tests/sanitizer_quarantine_test.cc180
-rw-r--r--lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc12
-rw-r--r--lib/sanitizer_common/weak_symbols.txt2
-rw-r--r--lib/scudo/CMakeLists.txt7
-rw-r--r--lib/scudo/scudo_allocator.cpp69
-rw-r--r--lib/scudo/scudo_allocator.h6
-rw-r--r--lib/scudo/scudo_crc32.cpp17
-rw-r--r--lib/scudo/scudo_crc32.h30
-rw-r--r--lib/scudo/scudo_flags.cpp9
-rw-r--r--lib/scudo/scudo_flags.inc6
-rw-r--r--lib/scudo/scudo_utils.cpp28
-rw-r--r--lib/scudo/scudo_utils.h6
-rw-r--r--lib/tsan/CMakeLists.txt1
-rwxr-xr-xlib/tsan/check_analyze.sh12
-rw-r--r--lib/tsan/go/tsan_go.cc10
-rw-r--r--lib/tsan/rtl/tsan.syms.extra10
-rw-r--r--lib/tsan/rtl/tsan_debugging.cc11
-rw-r--r--lib/tsan/rtl/tsan_defs.h3
-rw-r--r--lib/tsan/rtl/tsan_external.cc78
-rw-r--r--lib/tsan/rtl/tsan_flags.cc4
-rw-r--r--lib/tsan/rtl/tsan_flags.h1
-rw-r--r--lib/tsan/rtl/tsan_flags.inc2
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc84
-rw-r--r--lib/tsan/rtl/tsan_interceptors_mac.cc6
-rw-r--r--lib/tsan/rtl/tsan_interface.h13
-rw-r--r--lib/tsan/rtl/tsan_interface_ann.cc112
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc27
-rw-r--r--lib/tsan/rtl/tsan_interface_java.cc14
-rw-r--r--lib/tsan/rtl/tsan_libdispatch_mac.cc18
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc2
-rw-r--r--lib/tsan/rtl/tsan_report.cc46
-rw-r--r--lib/tsan/rtl/tsan_report.h6
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc14
-rw-r--r--lib/tsan/rtl/tsan_rtl.h30
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc118
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc12
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc4
-rw-r--r--lib/tsan/rtl/tsan_stat.cc10
-rw-r--r--lib/tsan/rtl/tsan_stat.h10
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc2
-rw-r--r--lib/tsan/rtl/tsan_sync.cc6
-rw-r--r--lib/tsan/rtl/tsan_sync.h48
-rw-r--r--lib/ubsan/CMakeLists.txt29
-rw-r--r--lib/ubsan/ubsan_diag.cc6
-rw-r--r--lib/ubsan/ubsan_flags.cc20
-rw-r--r--lib/ubsan/ubsan_handlers.cc51
-rw-r--r--lib/ubsan/ubsan_handlers.h8
-rw-r--r--lib/ubsan/ubsan_init.cc6
-rw-r--r--lib/ubsan/ubsan_init.h3
-rw-r--r--lib/ubsan/ubsan_interface.inc47
-rw-r--r--lib/ubsan/ubsan_win_dll_thunk.cc21
-rw-r--r--lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc21
-rw-r--r--lib/ubsan/ubsan_win_weak_interception.cc23
-rw-r--r--lib/xray/CMakeLists.txt61
-rw-r--r--lib/xray/tests/CMakeLists.txt12
-rw-r--r--lib/xray/tests/unit/CMakeLists.txt2
-rw-r--r--lib/xray/tests/unit/buffer_queue_test.cc73
-rw-r--r--lib/xray/tests/unit/fdr_logging_test.cc127
-rw-r--r--lib/xray/xray_AArch64.cc30
-rw-r--r--lib/xray/xray_arm.cc38
-rw-r--r--lib/xray/xray_buffer_queue.cc68
-rw-r--r--lib/xray/xray_buffer_queue.h92
-rw-r--r--lib/xray/xray_emulate_tsc.h40
-rw-r--r--lib/xray/xray_fdr_log_records.h65
-rw-r--r--lib/xray/xray_fdr_logging.cc229
-rw-r--r--lib/xray/xray_fdr_logging.h38
-rw-r--r--lib/xray/xray_fdr_logging_impl.h639
-rw-r--r--lib/xray/xray_flags.cc36
-rw-r--r--lib/xray/xray_flags.h4
-rw-r--r--lib/xray/xray_flags.inc7
-rw-r--r--lib/xray/xray_init.cc25
-rw-r--r--lib/xray/xray_inmemory_log.cc110
-rw-r--r--lib/xray/xray_interface.cc86
-rw-r--r--lib/xray/xray_interface_internal.h5
-rw-r--r--lib/xray/xray_log_interface.cc59
-rw-r--r--lib/xray/xray_mips.cc158
-rw-r--r--lib/xray/xray_mips64.cc167
-rw-r--r--lib/xray/xray_powerpc64.cc100
-rw-r--r--lib/xray/xray_powerpc64.inc37
-rw-r--r--lib/xray/xray_trampoline_AArch64.S55
-rw-r--r--lib/xray/xray_trampoline_arm.S37
-rw-r--r--lib/xray/xray_trampoline_mips.S110
-rw-r--r--lib/xray/xray_trampoline_mips64.S136
-rw-r--r--lib/xray/xray_trampoline_powerpc64.cc15
-rw-r--r--lib/xray/xray_trampoline_powerpc64_asm.S171
-rw-r--r--lib/xray/xray_trampoline_x86_64.S47
-rw-r--r--lib/xray/xray_tsc.h68
-rw-r--r--lib/xray/xray_utils.cc125
-rw-r--r--lib/xray/xray_utils.h41
-rw-r--r--lib/xray/xray_x86_64.cc46
-rw-r--r--lib/xray/xray_x86_64.inc (renamed from lib/xray/xray_x86_64.h)11
237 files changed, 7919 insertions, 1918 deletions
diff --git a/lib/asan/CMakeLists.txt b/lib/asan/CMakeLists.txt
index 5ac5708a63d7..a2d4630dfc33 100644
--- a/lib/asan/CMakeLists.txt
+++ b/lib/asan/CMakeLists.txt
@@ -36,6 +36,7 @@ set(ASAN_PREINIT_SOURCES
include_directories(..)
set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+
append_rtti_flag(OFF ASAN_CFLAGS)
set(ASAN_DYNAMIC_LINK_FLAGS)
@@ -107,6 +108,7 @@ add_compiler_rt_component(asan)
if(APPLE)
add_weak_symbols("asan" WEAK_SYMBOL_LINK_FLAGS)
+ add_weak_symbols("lsan" WEAK_SYMBOL_LINK_FLAGS)
add_weak_symbols("ubsan" WEAK_SYMBOL_LINK_FLAGS)
add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS)
@@ -176,6 +178,21 @@ else()
set(VERSION_SCRIPT_FLAG)
endif()
+ set(ASAN_DYNAMIC_WEAK_INTERCEPTION)
+ if (MSVC)
+ add_compiler_rt_object_libraries(AsanWeakInterception
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${arch}
+ SOURCES asan_win_weak_interception.cc
+ CFLAGS ${ASAN_CFLAGS} -DSANITIZER_DYNAMIC
+ DEFS ${ASAN_COMMON_DEFINITIONS})
+ set(ASAN_DYNAMIC_WEAK_INTERCEPTION
+ AsanWeakInterception
+ UbsanWeakInterception
+ SancovWeakInterception
+ SanitizerCommonWeakInterception)
+ endif()
+
add_compiler_rt_runtime(clang_rt.asan
SHARED
ARCHS ${arch}
@@ -187,6 +204,7 @@ else()
# add_dependencies(clang_rt.asan-dynamic-${arch} clang_rt.asan-dynamic-${arch}-version-list)
RTAsan_dynamic_version_script_dummy
RTUbsan_cxx
+ ${ASAN_DYNAMIC_WEAK_INTERCEPTION}
CFLAGS ${ASAN_DYNAMIC_CFLAGS}
LINK_FLAGS ${ASAN_DYNAMIC_LINK_FLAGS}
${VERSION_SCRIPT_FLAG}
@@ -205,28 +223,46 @@ else()
endif()
if (WIN32)
+ add_compiler_rt_object_libraries(AsanDllThunk
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${arch}
+ SOURCES asan_globals_win.cc
+ asan_win_dll_thunk.cc
+ CFLAGS ${ASAN_CFLAGS} -DSANITIZER_DLL_THUNK
+ DEFS ${ASAN_COMMON_DEFINITIONS})
+
add_compiler_rt_runtime(clang_rt.asan_dll_thunk
STATIC
ARCHS ${arch}
- SOURCES asan_win_dll_thunk.cc
- asan_globals_win.cc
- $<TARGET_OBJECTS:RTInterception.${arch}>
- CFLAGS ${ASAN_CFLAGS} -DASAN_DLL_THUNK
- DEFS ${ASAN_COMMON_DEFINITIONS}
+ OBJECT_LIBS AsanDllThunk
+ UbsanDllThunk
+ SancovDllThunk
+ SanitizerCommonDllThunk
+ SOURCES $<TARGET_OBJECTS:RTInterception.${arch}>
PARENT_TARGET asan)
- set(DYNAMIC_RUNTIME_THUNK_CFLAGS "-DASAN_DYNAMIC_RUNTIME_THUNK")
+ set(DYNAMIC_RUNTIME_THUNK_CFLAGS "-DSANITIZER_DYNAMIC_RUNTIME_THUNK")
if(MSVC)
list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-Zl")
elseif(CMAKE_C_COMPILER_ID MATCHES Clang)
list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-nodefaultlibs")
endif()
+ add_compiler_rt_object_libraries(AsanDynamicRuntimeThunk
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${arch}
+ SOURCES asan_globals_win.cc
+ asan_win_dynamic_runtime_thunk.cc
+ CFLAGS ${ASAN_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
+ DEFS ${ASAN_COMMON_DEFINITIONS})
+
add_compiler_rt_runtime(clang_rt.asan_dynamic_runtime_thunk
STATIC
ARCHS ${arch}
- SOURCES asan_win_dynamic_runtime_thunk.cc
- asan_globals_win.cc
+ OBJECT_LIBS AsanDynamicRuntimeThunk
+ UbsanDynamicRuntimeThunk
+ SancovDynamicRuntimeThunk
+ SanitizerCommonDynamicRuntimeThunk
CFLAGS ${ASAN_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS}
PARENT_TARGET asan)
diff --git a/lib/asan/asan.syms.extra b/lib/asan/asan.syms.extra
index 007aafe380a8..f8e9b3aedcc3 100644
--- a/lib/asan/asan.syms.extra
+++ b/lib/asan/asan.syms.extra
@@ -1,3 +1,4 @@
__asan_*
__lsan_*
__ubsan_*
+__sancov_*
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index ee9b1a6a04cf..7010b6023614 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -523,6 +523,18 @@ struct Allocator {
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
m->free_context_id = StackDepotPut(*stack);
+
+ Flags &fl = *flags();
+ if (fl.max_free_fill_size > 0) {
+ // We have to skip the chunk header, it contains free_context_id.
+ uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
+ if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
+ uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
+ size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
+ REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
+ }
+ }
+
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
@@ -554,7 +566,17 @@ struct Allocator {
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
+ // malloc. Don't report an invalid free in this case.
+ if (SANITIZER_WINDOWS &&
+ !get_allocator().PointerIsMine(ptr)) {
+ if (!IsSystemHeapAddress(p))
+ ReportFreeNotMalloced(p, stack);
+ return;
+ }
+
ASAN_FREE_HOOK(ptr);
+
// Must mark the chunk as quarantined before any changes to its metadata.
// Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
@@ -790,8 +812,12 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (!p)
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
- instance.Deallocate(p, 0, stack, FROM_MALLOC);
- return nullptr;
+ if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
+ instance.Deallocate(p, 0, stack, FROM_MALLOC);
+ return nullptr;
+ }
+ // Allocate a size of 1 if we shouldn't free() on Realloc to 0
+ size = 1;
}
return instance.Reallocate(p, size, stack);
}
@@ -958,15 +984,13 @@ uptr __sanitizer_get_allocated_size(const void *p) {
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_malloc_hook(void *ptr, uptr size) {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
+ void *ptr, uptr size) {
(void)ptr;
(void)size;
}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_free_hook(void *ptr) {
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
(void)ptr;
}
-} // extern "C"
#endif
diff --git a/lib/asan/asan_descriptions.cc b/lib/asan/asan_descriptions.cc
index 0ecbe091c8b1..822a6a62d64e 100644
--- a/lib/asan/asan_descriptions.cc
+++ b/lib/asan/asan_descriptions.cc
@@ -252,6 +252,9 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
str.append("%c", var.name_pos[i]);
}
str.append("'");
+ if (var.line > 0) {
+ str.append(" (line %d)", var.line);
+ }
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
diff --git a/lib/asan/asan_errors.cc b/lib/asan/asan_errors.cc
index c287ba1b4be6..57490ad180b5 100644
--- a/lib/asan/asan_errors.cc
+++ b/lib/asan/asan_errors.cc
@@ -58,10 +58,22 @@ static void MaybeDumpRegisters(void *context) {
SignalContext::DumpAllRegisters(context);
}
+static void MaybeReportNonExecRegion(uptr pc) {
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ uptr start, end, protection;
+ while (proc_maps.Next(&start, &end, nullptr, nullptr, 0, &protection)) {
+ if (pc >= start && pc < end &&
+ !(protection & MemoryMappingLayout::kProtectionExecute))
+ Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n");
+ }
+#endif
+}
+
void ErrorDeadlySignal::Print() {
Decorator d;
Printf("%s", d.Warning());
- const char *description = DescribeSignalOrException(signo);
+ const char *description = __sanitizer::DescribeSignalOrException(signo);
Report(
"ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
"T%d)\n",
@@ -77,6 +89,7 @@ void ErrorDeadlySignal::Print() {
if (addr < GetPageSizeCached())
Report("Hint: address points to the zero page.\n");
}
+ MaybeReportNonExecRegion(pc);
scariness.Print();
BufferedStackTrace stack;
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
diff --git a/lib/asan/asan_flags.cc b/lib/asan/asan_flags.cc
index ad5bbff28c37..c8ae3faed7c2 100644
--- a/lib/asan/asan_flags.cc
+++ b/lib/asan/asan_flags.cc
@@ -61,7 +61,7 @@ void InitializeFlags() {
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
- cf.detect_leaks = CAN_SANITIZE_LEAKS;
+ cf.detect_leaks = cf.detect_leaks && CAN_SANITIZE_LEAKS;
cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf.malloc_context_size = kDefaultMallocContextSize;
cf.intercept_tls_get_addr = true;
@@ -95,6 +95,18 @@ void InitializeFlags() {
RegisterCommonFlags(&ubsan_parser);
#endif
+ if (SANITIZER_MAC) {
+ // Support macOS MallocScribble and MallocPreScribble:
+ // <https://developer.apple.com/library/content/documentation/Performance/
+ // Conceptual/ManagingMemory/Articles/MallocDebug.html>
+ if (GetEnv("MallocScribble")) {
+ f->max_free_fill_size = 0x1000;
+ }
+ if (GetEnv("MallocPreScribble")) {
+ f->malloc_fill_byte = 0xaa;
+ }
+ }
+
// Override from ASan compile definition.
const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
asan_parser.ParseString(asan_compile_def);
@@ -186,9 +198,6 @@ void InitializeFlags() {
} // namespace __asan
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __asan_default_options() { return ""; }
-} // extern "C"
-#endif
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __asan_default_options, void) {
+ return "";
+}
diff --git a/lib/asan/asan_flags.inc b/lib/asan/asan_flags.inc
index 4712efb86224..f2216c2e9b3b 100644
--- a/lib/asan/asan_flags.inc
+++ b/lib/asan/asan_flags.inc
@@ -63,8 +63,14 @@ ASAN_FLAG(
int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
"bytes that will be filled with malloc_fill_byte on malloc.")
+ASAN_FLAG(
+ int, max_free_fill_size, 0,
+ "ASan allocator flag. max_free_fill_size is the maximal amount of "
+ "bytes that will be filled with free_fill_byte during free.")
ASAN_FLAG(int, malloc_fill_byte, 0xbe,
"Value used to fill the newly allocated memory.")
+ASAN_FLAG(int, free_fill_byte, 0x55,
+ "Value used to fill deallocated memory.")
ASAN_FLAG(bool, allow_user_poisoning, true,
"If set, user may manually mark memory regions as poisoned or "
"unpoisoned.")
@@ -148,3 +154,10 @@ ASAN_FLAG(bool, halt_on_error, true,
"(WARNING: USE AT YOUR OWN RISK!)")
ASAN_FLAG(bool, use_odr_indicator, false,
"Use special ODR indicator symbol for ODR violation detection")
+ASAN_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true,
+ "realloc(p, 0) is equivalent to free(p) by default (Same as the "
+ "POSIX standard). If set to false, realloc(p, 0) will return a "
+ "pointer to an allocated space which can not be used.")
+ASAN_FLAG(bool, verify_asan_link_order, true,
+ "Check position of ASan runtime in library list (needs to be disabled"
+ " when other library has to be preloaded system-wide)")
diff --git a/lib/asan/asan_globals_win.cc b/lib/asan/asan_globals_win.cc
index 56c0d1a532f9..261762b63e2c 100644
--- a/lib/asan/asan_globals_win.cc
+++ b/lib/asan/asan_globals_win.cc
@@ -29,7 +29,7 @@ static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
__asan_global *end = &__asan_globals_end;
uptr bytediff = (uptr)end - (uptr)start;
if (bytediff % sizeof(__asan_global) != 0) {
-#ifdef ASAN_DLL_THUNK
+#ifdef SANITIZER_DLL_THUNK
__debugbreak();
#else
CHECK("corrupt asan global array");
diff --git a/lib/asan/asan_globals_win.h b/lib/asan/asan_globals_win.h
deleted file mode 100644
index d4ed9c1f38e1..000000000000
--- a/lib/asan/asan_globals_win.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//===-- asan_globals_win.h --------------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Interface to the Windows-specific global management code. Separated into a
-// standalone header to allow inclusion from asan_win_dynamic_runtime_thunk,
-// which defines symbols that clash with other sanitizer headers.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ASAN_GLOBALS_WIN_H
-#define ASAN_GLOBALS_WIN_H
-
-#if !defined(_MSC_VER)
-#error "this file is Windows-only, and uses MSVC pragmas"
-#endif
-
-#if defined(_WIN64)
-#define SANITIZER_SYM_PREFIX
-#else
-#define SANITIZER_SYM_PREFIX "_"
-#endif
-
-// Use this macro to force linking asan_globals_win.cc into the DSO.
-#define ASAN_LINK_GLOBALS_WIN() \
- __pragma( \
- comment(linker, "/include:" SANITIZER_SYM_PREFIX "__asan_dso_reg_hook"))
-
-#endif // ASAN_GLOBALS_WIN_H
diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc
index 606016d4f4d3..6ee3266062f8 100644
--- a/lib/asan/asan_interceptors.cc
+++ b/lib/asan/asan_interceptors.cc
@@ -228,9 +228,11 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
// Strict init-order checking is dlopen-hostile:
// https://github.com/google/sanitizers/issues/178
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
- if (flags()->strict_init_order) { \
- StopInitOrderChecking(); \
- }
+ do { \
+ if (flags()->strict_init_order) \
+ StopInitOrderChecking(); \
+ CheckNoDeepBind(filename, flag); \
+ } while (false)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
CoverageUpdateMapping()
diff --git a/lib/asan/asan_interface.inc b/lib/asan/asan_interface.inc
new file mode 100644
index 000000000000..351be4da5108
--- /dev/null
+++ b/lib/asan/asan_interface.inc
@@ -0,0 +1,167 @@
+//===-- asan_interface.inc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Asan interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
+INTERFACE_FUNCTION(__asan_address_is_poisoned)
+INTERFACE_FUNCTION(__asan_after_dynamic_init)
+INTERFACE_FUNCTION(__asan_alloca_poison)
+INTERFACE_FUNCTION(__asan_allocas_unpoison)
+INTERFACE_FUNCTION(__asan_before_dynamic_init)
+INTERFACE_FUNCTION(__asan_describe_address)
+INTERFACE_FUNCTION(__asan_exp_load1)
+INTERFACE_FUNCTION(__asan_exp_load2)
+INTERFACE_FUNCTION(__asan_exp_load4)
+INTERFACE_FUNCTION(__asan_exp_load8)
+INTERFACE_FUNCTION(__asan_exp_load16)
+INTERFACE_FUNCTION(__asan_exp_loadN)
+INTERFACE_FUNCTION(__asan_exp_store1)
+INTERFACE_FUNCTION(__asan_exp_store2)
+INTERFACE_FUNCTION(__asan_exp_store4)
+INTERFACE_FUNCTION(__asan_exp_store8)
+INTERFACE_FUNCTION(__asan_exp_store16)
+INTERFACE_FUNCTION(__asan_exp_storeN)
+INTERFACE_FUNCTION(__asan_get_alloc_stack)
+INTERFACE_FUNCTION(__asan_get_current_fake_stack)
+INTERFACE_FUNCTION(__asan_get_free_stack)
+INTERFACE_FUNCTION(__asan_get_report_access_size)
+INTERFACE_FUNCTION(__asan_get_report_access_type)
+INTERFACE_FUNCTION(__asan_get_report_address)
+INTERFACE_FUNCTION(__asan_get_report_bp)
+INTERFACE_FUNCTION(__asan_get_report_description)
+INTERFACE_FUNCTION(__asan_get_report_pc)
+INTERFACE_FUNCTION(__asan_get_report_sp)
+INTERFACE_FUNCTION(__asan_get_shadow_mapping)
+INTERFACE_FUNCTION(__asan_handle_no_return)
+INTERFACE_FUNCTION(__asan_init)
+INTERFACE_FUNCTION(__asan_load_cxx_array_cookie)
+INTERFACE_FUNCTION(__asan_load1)
+INTERFACE_FUNCTION(__asan_load2)
+INTERFACE_FUNCTION(__asan_load4)
+INTERFACE_FUNCTION(__asan_load8)
+INTERFACE_FUNCTION(__asan_load16)
+INTERFACE_FUNCTION(__asan_loadN)
+INTERFACE_FUNCTION(__asan_load1_noabort)
+INTERFACE_FUNCTION(__asan_load2_noabort)
+INTERFACE_FUNCTION(__asan_load4_noabort)
+INTERFACE_FUNCTION(__asan_load8_noabort)
+INTERFACE_FUNCTION(__asan_load16_noabort)
+INTERFACE_FUNCTION(__asan_loadN_noabort)
+INTERFACE_FUNCTION(__asan_locate_address)
+INTERFACE_FUNCTION(__asan_memcpy)
+INTERFACE_FUNCTION(__asan_memmove)
+INTERFACE_FUNCTION(__asan_memset)
+INTERFACE_FUNCTION(__asan_poison_cxx_array_cookie)
+INTERFACE_FUNCTION(__asan_poison_intra_object_redzone)
+INTERFACE_FUNCTION(__asan_poison_memory_region)
+INTERFACE_FUNCTION(__asan_poison_stack_memory)
+INTERFACE_FUNCTION(__asan_print_accumulated_stats)
+INTERFACE_FUNCTION(__asan_region_is_poisoned)
+INTERFACE_FUNCTION(__asan_register_globals)
+INTERFACE_FUNCTION(__asan_register_image_globals)
+INTERFACE_FUNCTION(__asan_report_error)
+INTERFACE_FUNCTION(__asan_report_exp_load1)
+INTERFACE_FUNCTION(__asan_report_exp_load2)
+INTERFACE_FUNCTION(__asan_report_exp_load4)
+INTERFACE_FUNCTION(__asan_report_exp_load8)
+INTERFACE_FUNCTION(__asan_report_exp_load16)
+INTERFACE_FUNCTION(__asan_report_exp_load_n)
+INTERFACE_FUNCTION(__asan_report_exp_store1)
+INTERFACE_FUNCTION(__asan_report_exp_store2)
+INTERFACE_FUNCTION(__asan_report_exp_store4)
+INTERFACE_FUNCTION(__asan_report_exp_store8)
+INTERFACE_FUNCTION(__asan_report_exp_store16)
+INTERFACE_FUNCTION(__asan_report_exp_store_n)
+INTERFACE_FUNCTION(__asan_report_load1)
+INTERFACE_FUNCTION(__asan_report_load2)
+INTERFACE_FUNCTION(__asan_report_load4)
+INTERFACE_FUNCTION(__asan_report_load8)
+INTERFACE_FUNCTION(__asan_report_load16)
+INTERFACE_FUNCTION(__asan_report_load_n)
+INTERFACE_FUNCTION(__asan_report_load1_noabort)
+INTERFACE_FUNCTION(__asan_report_load2_noabort)
+INTERFACE_FUNCTION(__asan_report_load4_noabort)
+INTERFACE_FUNCTION(__asan_report_load8_noabort)
+INTERFACE_FUNCTION(__asan_report_load16_noabort)
+INTERFACE_FUNCTION(__asan_report_load_n_noabort)
+INTERFACE_FUNCTION(__asan_report_present)
+INTERFACE_FUNCTION(__asan_report_store1)
+INTERFACE_FUNCTION(__asan_report_store2)
+INTERFACE_FUNCTION(__asan_report_store4)
+INTERFACE_FUNCTION(__asan_report_store8)
+INTERFACE_FUNCTION(__asan_report_store16)
+INTERFACE_FUNCTION(__asan_report_store_n)
+INTERFACE_FUNCTION(__asan_report_store1_noabort)
+INTERFACE_FUNCTION(__asan_report_store2_noabort)
+INTERFACE_FUNCTION(__asan_report_store4_noabort)
+INTERFACE_FUNCTION(__asan_report_store8_noabort)
+INTERFACE_FUNCTION(__asan_report_store16_noabort)
+INTERFACE_FUNCTION(__asan_report_store_n_noabort)
+INTERFACE_FUNCTION(__asan_set_death_callback)
+INTERFACE_FUNCTION(__asan_set_error_report_callback)
+INTERFACE_FUNCTION(__asan_set_shadow_00)
+INTERFACE_FUNCTION(__asan_set_shadow_f1)
+INTERFACE_FUNCTION(__asan_set_shadow_f2)
+INTERFACE_FUNCTION(__asan_set_shadow_f3)
+INTERFACE_FUNCTION(__asan_set_shadow_f5)
+INTERFACE_FUNCTION(__asan_set_shadow_f8)
+INTERFACE_FUNCTION(__asan_stack_free_0)
+INTERFACE_FUNCTION(__asan_stack_free_1)
+INTERFACE_FUNCTION(__asan_stack_free_2)
+INTERFACE_FUNCTION(__asan_stack_free_3)
+INTERFACE_FUNCTION(__asan_stack_free_4)
+INTERFACE_FUNCTION(__asan_stack_free_5)
+INTERFACE_FUNCTION(__asan_stack_free_6)
+INTERFACE_FUNCTION(__asan_stack_free_7)
+INTERFACE_FUNCTION(__asan_stack_free_8)
+INTERFACE_FUNCTION(__asan_stack_free_9)
+INTERFACE_FUNCTION(__asan_stack_free_10)
+INTERFACE_FUNCTION(__asan_stack_malloc_0)
+INTERFACE_FUNCTION(__asan_stack_malloc_1)
+INTERFACE_FUNCTION(__asan_stack_malloc_2)
+INTERFACE_FUNCTION(__asan_stack_malloc_3)
+INTERFACE_FUNCTION(__asan_stack_malloc_4)
+INTERFACE_FUNCTION(__asan_stack_malloc_5)
+INTERFACE_FUNCTION(__asan_stack_malloc_6)
+INTERFACE_FUNCTION(__asan_stack_malloc_7)
+INTERFACE_FUNCTION(__asan_stack_malloc_8)
+INTERFACE_FUNCTION(__asan_stack_malloc_9)
+INTERFACE_FUNCTION(__asan_stack_malloc_10)
+INTERFACE_FUNCTION(__asan_store1)
+INTERFACE_FUNCTION(__asan_store2)
+INTERFACE_FUNCTION(__asan_store4)
+INTERFACE_FUNCTION(__asan_store8)
+INTERFACE_FUNCTION(__asan_store16)
+INTERFACE_FUNCTION(__asan_storeN)
+INTERFACE_FUNCTION(__asan_store1_noabort)
+INTERFACE_FUNCTION(__asan_store2_noabort)
+INTERFACE_FUNCTION(__asan_store4_noabort)
+INTERFACE_FUNCTION(__asan_store8_noabort)
+INTERFACE_FUNCTION(__asan_store16_noabort)
+INTERFACE_FUNCTION(__asan_storeN_noabort)
+INTERFACE_FUNCTION(__asan_unpoison_intra_object_redzone)
+INTERFACE_FUNCTION(__asan_unpoison_memory_region)
+INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
+INTERFACE_FUNCTION(__asan_unregister_globals)
+INTERFACE_FUNCTION(__asan_unregister_image_globals)
+INTERFACE_FUNCTION(__asan_version_mismatch_check_v8)
+INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
+INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
+INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
+INTERFACE_FUNCTION(__sanitizer_ptr_sub)
+INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
+INTERFACE_WEAK_FUNCTION(__asan_default_options)
+INTERFACE_WEAK_FUNCTION(__asan_default_suppressions)
+INTERFACE_WEAK_FUNCTION(__asan_on_error)
diff --git a/lib/asan/asan_interface_internal.h b/lib/asan/asan_interface_internal.h
index 8cd424cc03e8..b18c31548860 100644
--- a/lib/asan/asan_interface_internal.h
+++ b/lib/asan/asan_interface_internal.h
@@ -165,12 +165,12 @@ extern "C" {
void __asan_set_error_report_callback(void (*callback)(const char*));
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __asan_on_error();
+ void __asan_on_error();
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ const char* __asan_default_options();
+ const char* __asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __asan_shadow_memory_dynamic_address;
@@ -242,6 +242,9 @@ extern "C" {
void __asan_alloca_poison(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom);
+
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ const char* __asan_default_suppressions();
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H
diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h
index 1dc678c0c357..3b70695249e4 100644
--- a/lib/asan/asan_internal.h
+++ b/lib/asan/asan_internal.h
@@ -64,9 +64,9 @@ void AsanInitFromRtl();
// asan_win.cc
void InitializePlatformExceptionHandlers();
-
-// asan_win.cc / asan_posix.cc
-const char *DescribeSignalOrException(int signo);
+// Returns whether an address is a valid allocated system heap block.
+// 'addr' must point to the beginning of the block.
+bool IsSystemHeapAddress(uptr addr);
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
diff --git a/lib/asan/asan_linux.cc b/lib/asan/asan_linux.cc
index c051573dd494..50ef84c39a66 100644
--- a/lib/asan/asan_linux.cc
+++ b/lib/asan/asan_linux.cc
@@ -70,6 +70,7 @@ namespace __asan {
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
+bool IsSystemHeapAddress (uptr addr) { return false; }
void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static.
@@ -110,7 +111,7 @@ static void ReportIncompatibleRT() {
}
void AsanCheckDynamicRTPrereqs() {
- if (!ASAN_DYNAMIC)
+ if (!ASAN_DYNAMIC || !flags()->verify_asan_link_order)
return;
// Ensure that dynamic RT is the first DSO in the list
diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc
index baf533ac96ac..3c93b26d9bf6 100644
--- a/lib/asan/asan_mac.cc
+++ b/lib/asan/asan_mac.cc
@@ -48,6 +48,7 @@ namespace __asan {
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
+bool IsSystemHeapAddress (uptr addr) { return false; }
// No-op. Mac does not support static linkage anyway.
void *AsanDoesNotSupportStaticLinkage() {
@@ -138,7 +139,8 @@ void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
parent_tid, stack, /* detached */ true);
t->Init();
- asanThreadRegistry().StartThread(t->tid(), 0, 0);
+ asanThreadRegistry().StartThread(t->tid(), GetTid(),
+ /* workerthread */ true, 0);
SetCurrentThread(t);
}
}
diff --git a/lib/asan/asan_malloc_win.cc b/lib/asan/asan_malloc_win.cc
index 5163c04f7e07..efa058243979 100644
--- a/lib/asan/asan_malloc_win.cc
+++ b/lib/asan/asan_malloc_win.cc
@@ -100,7 +100,7 @@ void *realloc(void *ptr, size_t size) {
ALLOCATION_FUNCTION_ATTRIBUTE
void *_realloc_dbg(void *ptr, size_t size, int) {
- CHECK(!"_realloc_dbg should not exist!");
+ UNREACHABLE("_realloc_dbg should not exist!");
return 0;
}
diff --git a/lib/asan/asan_mapping.h b/lib/asan/asan_mapping.h
index d8e60a4b34a9..695740cd982f 100644
--- a/lib/asan/asan_mapping.h
+++ b/lib/asan/asan_mapping.h
@@ -191,7 +191,6 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
-#define SHADOW_TO_MEM(shadow) (((shadow) - SHADOW_OFFSET) << SHADOW_SCALE)
#define kLowMemBeg 0
#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0)
diff --git a/lib/asan/asan_memory_profile.cc b/lib/asan/asan_memory_profile.cc
index c2678b974fe6..05846c37cb6d 100644
--- a/lib/asan/asan_memory_profile.cc
+++ b/lib/asan/asan_memory_profile.cc
@@ -48,7 +48,7 @@ class HeapProfile {
}
}
- void Print(uptr top_percent) {
+ void Print(uptr top_percent, uptr max_number_of_contexts) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
@@ -57,12 +57,14 @@ class HeapProfile {
uptr total_shown = 0;
Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
"%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; "
- "showing top %zd%%\n",
+ "showing top %zd%% (at most %zd unique contexts)\n",
total_allocated_user_size_, total_allocated_count_,
total_quarantined_user_size_, total_quarantined_count_,
total_other_count_, total_allocated_count_ +
- total_quarantined_count_ + total_other_count_, top_percent);
- for (uptr i = 0; i < allocations_.size(); i++) {
+ total_quarantined_count_ + total_other_count_, top_percent,
+ max_number_of_contexts);
+ for (uptr i = 0; i < Min(allocations_.size(), max_number_of_contexts);
+ i++) {
auto &a = allocations_[i];
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
a.total_size * 100 / total_allocated_user_size_, a.count);
@@ -103,16 +105,23 @@ static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
void *argument) {
HeapProfile hp;
__lsan::ForEachChunk(ChunkCallback, &hp);
- hp.Print(reinterpret_cast<uptr>(argument));
+ uptr *Arg = reinterpret_cast<uptr*>(argument);
+ hp.Print(Arg[0], Arg[1]);
}
} // namespace __asan
+#endif // CAN_SANITIZE_LEAKS
+
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_print_memory_profile(uptr top_percent) {
- __sanitizer::StopTheWorld(__asan::MemoryProfileCB, (void*)top_percent);
+void __sanitizer_print_memory_profile(uptr top_percent,
+ uptr max_number_of_contexts) {
+#if CAN_SANITIZE_LEAKS
+ uptr Arg[2];
+ Arg[0] = top_percent;
+ Arg[1] = max_number_of_contexts;
+ __sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg);
+#endif // CAN_SANITIZE_LEAKS
}
} // extern "C"
-
-#endif // CAN_SANITIZE_LEAKS
diff --git a/lib/asan/asan_posix.cc b/lib/asan/asan_posix.cc
index 8e5676309ae0..68fde9139232 100644
--- a/lib/asan/asan_posix.cc
+++ b/lib/asan/asan_posix.cc
@@ -33,19 +33,6 @@
namespace __asan {
-const char *DescribeSignalOrException(int signo) {
- switch (signo) {
- case SIGFPE:
- return "FPE";
- case SIGILL:
- return "ILL";
- case SIGABRT:
- return "ABRT";
- default:
- return "SEGV";
- }
-}
-
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
int code = (int)((siginfo_t*)siginfo)->si_code;
diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc
index 3ad48fa88b6f..f751b6184c6b 100644
--- a/lib/asan/asan_report.cc
+++ b/lib/asan/asan_report.cc
@@ -88,7 +88,8 @@ bool ParseFrameDescription(const char *frame_descr,
char *p;
// This string is created by the compiler and has the following form:
// "n alloc_1 alloc_2 ... alloc_n"
- // where alloc_i looks like "offset size len ObjectName".
+ // where alloc_i looks like "offset size len ObjectName"
+ // or "offset size len ObjectName:line".
uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
if (n_objects == 0)
return false;
@@ -101,7 +102,14 @@ bool ParseFrameDescription(const char *frame_descr,
return false;
}
p++;
- StackVarDescr var = {beg, size, p, len};
+ char *colon_pos = internal_strchr(p, ':');
+ uptr line = 0;
+ uptr name_len = len;
+ if (colon_pos != nullptr && colon_pos < p + len) {
+ name_len = colon_pos - p;
+ line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10);
+ }
+ StackVarDescr var = {beg, size, p, name_len, line};
vars->push_back(var);
p += len;
}
@@ -488,9 +496,6 @@ void __sanitizer_ptr_cmp(void *a, void *b) {
}
} // extern "C"
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default implementation of __asan_on_error that does nothing
// and may be overriden by user.
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
-void __asan_on_error() {}
-#endif
+SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {}
diff --git a/lib/asan/asan_report.h b/lib/asan/asan_report.h
index 5ebfda693d0c..5a3533a319af 100644
--- a/lib/asan/asan_report.h
+++ b/lib/asan/asan_report.h
@@ -23,6 +23,7 @@ struct StackVarDescr {
uptr size;
const char *name_pos;
uptr name_len;
+ uptr line;
};
// Returns the number of globals close to the provided address and copies
diff --git a/lib/asan/asan_suppressions.cc b/lib/asan/asan_suppressions.cc
index 62c868d25dbc..ac8aa023f6ba 100644
--- a/lib/asan/asan_suppressions.cc
+++ b/lib/asan/asan_suppressions.cc
@@ -31,15 +31,9 @@ static const char *kSuppressionTypes[] = {
kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
kODRViolation};
-extern "C" {
-#if SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char *__asan_default_suppressions();
-#else
-// No week hooks, provide empty implementation.
-const char *__asan_default_suppressions() { return ""; }
-#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
-} // extern "C"
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __asan_default_suppressions, void) {
+ return "";
+}
void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc
index 537b53d9e0c3..aaa32d6ea6da 100644
--- a/lib/asan/asan_thread.cc
+++ b/lib/asan/asan_thread.cc
@@ -239,7 +239,8 @@ void AsanThread::Init() {
thread_return_t AsanThread::ThreadStart(
uptr os_id, atomic_uintptr_t *signal_thread_is_registered) {
Init();
- asanThreadRegistry().StartThread(tid(), os_id, nullptr);
+ asanThreadRegistry().StartThread(tid(), os_id, /*workerthread*/ false,
+ nullptr);
if (signal_thread_is_registered)
atomic_store(signal_thread_is_registered, 1, memory_order_release);
@@ -299,24 +300,27 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
return true;
}
uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr.
+ uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr != kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
+ mem_ptr -= SHADOW_GRANULARITY;
}
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr == kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
+ mem_ptr -= SHADOW_GRANULARITY;
}
if (shadow_ptr < shadow_bottom) {
return false;
}
- uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
+ uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
CHECK(ptr[0] == kCurrentStackFrameMagic);
access->offset = addr - (uptr)ptr;
access->frame_pc = ptr[2];
diff --git a/lib/asan/asan_win.cc b/lib/asan/asan_win.cc
index 78268d83e539..4ab535c42e5a 100644
--- a/lib/asan/asan_win.cc
+++ b/lib/asan/asan_win.cc
@@ -19,7 +19,6 @@
#include <stdlib.h>
-#include "asan_globals_win.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_report.h"
@@ -28,6 +27,8 @@
#include "asan_mapping.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_win.h"
+#include "sanitizer_common/sanitizer_win_defs.h"
using namespace __asan; // NOLINT
@@ -43,35 +44,50 @@ uptr __asan_get_shadow_memory_dynamic_address() {
__asan_init();
return __asan_shadow_memory_dynamic_address;
}
-
-// -------------------- A workaround for the absence of weak symbols ----- {{{
-// We don't have a direct equivalent of weak symbols when using MSVC, but we can
-// use the /alternatename directive to tell the linker to default a specific
-// symbol to a specific value, which works nicely for allocator hooks and
-// __asan_default_options().
-void __sanitizer_default_malloc_hook(void *ptr, uptr size) { }
-void __sanitizer_default_free_hook(void *ptr) { }
-const char* __asan_default_default_options() { return ""; }
-const char* __asan_default_default_suppressions() { return ""; }
-void __asan_default_on_error() {}
-// 64-bit msvc will not prepend an underscore for symbols.
-#ifdef _WIN64
-#pragma comment(linker, "/alternatename:__sanitizer_malloc_hook=__sanitizer_default_malloc_hook") // NOLINT
-#pragma comment(linker, "/alternatename:__sanitizer_free_hook=__sanitizer_default_free_hook") // NOLINT
-#pragma comment(linker, "/alternatename:__asan_default_options=__asan_default_default_options") // NOLINT
-#pragma comment(linker, "/alternatename:__asan_default_suppressions=__asan_default_default_suppressions") // NOLINT
-#pragma comment(linker, "/alternatename:__asan_on_error=__asan_default_on_error") // NOLINT
-#else
-#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
-#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
-#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
-#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
-#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
-#endif
-// }}}
} // extern "C"
// ---------------------- Windows-specific interceptors ---------------- {{{
+static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
+static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
+ EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
+ CONTEXT *context = info->ContextRecord;
+
+ // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
+
+ SignalContext sig = SignalContext::Create(exception_record, context);
+ ReportDeadlySignal(exception_record->ExceptionCode, sig);
+ UNREACHABLE("returned from reporting deadly signal");
+}
+
+// Wrapper SEH Handler. If the exception should be handled by asan, we call
+// __asan_unhandled_exception_filter, otherwise, we execute the user provided
+// exception handler or the default.
+static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
+ DWORD exception_code = info->ExceptionRecord->ExceptionCode;
+ if (__sanitizer::IsHandledDeadlyException(exception_code))
+ return __asan_unhandled_exception_filter(info);
+ if (user_seh_handler)
+ return user_seh_handler(info);
+ // Bubble out to the default exception filter.
+ if (default_seh_handler)
+ return default_seh_handler(info);
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter,
+ LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) {
+ CHECK(REAL(SetUnhandledExceptionFilter));
+ if (ExceptionFilter == &SEHHandler || common_flags()->allow_user_segv_handler)
+ return REAL(SetUnhandledExceptionFilter)(ExceptionFilter);
+ // We record the user provided exception handler to be called for all the
+ // exceptions unhandled by asan.
+ Swap(ExceptionFilter, user_seh_handler);
+ return ExceptionFilter;
+}
+
INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
CHECK(REAL(RtlRaiseException));
// This is a noreturn function, unless it's one of the exceptions raised to
@@ -144,6 +160,7 @@ namespace __asan {
void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
+ ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter);
#ifdef _WIN64
ASAN_INTERCEPT_FUNC(__C_specific_handler);
@@ -260,60 +277,8 @@ void InitializePlatformExceptionHandlers() {
#endif
}
-static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
-
-// Check based on flags if we should report this exception.
-static bool ShouldReportDeadlyException(unsigned code) {
- switch (code) {
- case EXCEPTION_ACCESS_VIOLATION:
- case EXCEPTION_IN_PAGE_ERROR:
- return common_flags()->handle_segv;
- case EXCEPTION_BREAKPOINT:
- case EXCEPTION_ILLEGAL_INSTRUCTION: {
- return common_flags()->handle_sigill;
- }
- }
- return false;
-}
-
-// Return the textual name for this exception.
-const char *DescribeSignalOrException(int signo) {
- unsigned code = signo;
- // Get the string description of the exception if this is a known deadly
- // exception.
- switch (code) {
- case EXCEPTION_ACCESS_VIOLATION:
- return "access-violation";
- case EXCEPTION_IN_PAGE_ERROR:
- return "in-page-error";
- case EXCEPTION_BREAKPOINT:
- return "breakpoint";
- case EXCEPTION_ILLEGAL_INSTRUCTION:
- return "illegal-instruction";
- }
- return nullptr;
-}
-
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
- EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
- CONTEXT *context = info->ContextRecord;
-
- // Continue the search if the signal wasn't deadly.
- if (!ShouldReportDeadlyException(exception_record->ExceptionCode))
- return EXCEPTION_CONTINUE_SEARCH;
- // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
-
- SignalContext sig = SignalContext::Create(exception_record, context);
- ReportDeadlySignal(exception_record->ExceptionCode, sig);
- UNREACHABLE("returned from reporting deadly signal");
-}
-
-static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
- __asan_unhandled_exception_filter(info);
-
- // Bubble out to the default exception filter.
- return default_seh_handler(info);
+bool IsSystemHeapAddress(uptr addr) {
+ return ::HeapValidate(GetProcessHeap(), 0, (void*)addr) != FALSE;
}
// We want to install our own exception handler (EH) to print helpful reports
@@ -368,7 +333,7 @@ __declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
#endif
-ASAN_LINK_GLOBALS_WIN()
+WIN_FORCE_LINK(__asan_dso_reg_hook)
// }}}
} // namespace __asan
diff --git a/lib/asan/asan_win_dll_thunk.cc b/lib/asan/asan_win_dll_thunk.cc
index 4764fd0a736c..189b4b141bfa 100644
--- a/lib/asan/asan_win_dll_thunk.cc
+++ b/lib/asan/asan_win_dll_thunk.cc
@@ -15,388 +15,41 @@
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
-// Only compile this code when building asan_dll_thunk.lib
-// Using #ifdef rather than relying on Makefiles etc.
-// simplifies the build procedure.
-#ifdef ASAN_DLL_THUNK
+#ifdef SANITIZER_DLL_THUNK
#include "asan_init_version.h"
-#include "asan_globals_win.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_win_defs.h"
+#include "sanitizer_common/sanitizer_win_dll_thunk.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
-#ifdef _M_IX86
-#define WINAPI __stdcall
-#else
-#define WINAPI
-#endif
-
-// ---------- Function interception helper functions and macros ----------- {{{1
-extern "C" {
-void *WINAPI GetModuleHandleA(const char *module_name);
-void *WINAPI GetProcAddress(void *module, const char *proc_name);
-void abort();
-}
-
-using namespace __sanitizer;
-
-static uptr getRealProcAddressOrDie(const char *name) {
- uptr ret =
- __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
- if (!ret)
- abort();
- return ret;
-}
-
-// We need to intercept some functions (e.g. ASan interface, memory allocator --
-// let's call them "hooks") exported by the DLL thunk and forward the hooks to
-// the runtime in the main module.
-// However, we don't want to keep two lists of these hooks.
-// To avoid that, the list of hooks should be defined using the
-// INTERCEPT_WHEN_POSSIBLE macro. Then, all these hooks can be intercepted
-// at once by calling INTERCEPT_HOOKS().
-
-// Use macro+template magic to automatically generate the list of hooks.
-// Each hook at line LINE defines a template class with a static
-// FunctionInterceptor<LINE>::Execute() method intercepting the hook.
-// The default implementation of FunctionInterceptor<LINE> is to call
-// the Execute() method corresponding to the previous line.
-template<int LINE>
-struct FunctionInterceptor {
- static void Execute() { FunctionInterceptor<LINE-1>::Execute(); }
-};
-
-// There shouldn't be any hooks with negative definition line number.
-template<>
-struct FunctionInterceptor<0> {
- static void Execute() {}
-};
-
-#define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \
- template <> struct FunctionInterceptor<__LINE__> { \
- static void Execute() { \
- uptr wrapper = getRealProcAddressOrDie(main_function); \
- if (!__interception::OverrideFunction((uptr)dll_function, wrapper, 0)) \
- abort(); \
- FunctionInterceptor<__LINE__ - 1>::Execute(); \
- } \
- };
-
-// Special case of hooks -- ASan own interface functions. Those are only called
-// after __asan_init, thus an empty implementation is sufficient.
-#define INTERFACE_FUNCTION(name) \
- extern "C" __declspec(noinline) void name() { \
- volatile int prevent_icf = (__LINE__ << 8); (void)prevent_icf; \
- __debugbreak(); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name)
-
-// INTERCEPT_HOOKS must be used after the last INTERCEPT_WHEN_POSSIBLE.
-#define INTERCEPT_HOOKS FunctionInterceptor<__LINE__>::Execute
-
-// We can't define our own version of strlen etc. because that would lead to
-// link-time or even type mismatch errors. Instead, we can declare a function
-// just to be able to get its address. Me may miss the first few calls to the
-// functions since it can be called before __asan_init, but that would lead to
-// false negatives in the startup code before user's global initializers, which
-// isn't a big deal.
-#define INTERCEPT_LIBRARY_FUNCTION(name) \
- extern "C" void name(); \
- INTERCEPT_WHEN_POSSIBLE(WRAPPER_NAME(name), name)
-
-// Disable compiler warnings that show up if we declare our own version
-// of a compiler intrinsic (e.g. strlen).
-#pragma warning(disable: 4391)
-#pragma warning(disable: 4392)
-
-static void InterceptHooks();
-// }}}
-
-// ---------- Function wrapping helpers ----------------------------------- {{{1
-#define WRAP_V_V(name) \
- extern "C" void name() { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_V_W(name) \
- extern "C" void name(void *arg) { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_V_WW(name) \
- extern "C" void name(void *arg1, void *arg2) { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_V_WWW(name) \
- extern "C" void name(void *arg1, void *arg2, void *arg3) { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2, arg3); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_V(name) \
- extern "C" void *name() { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_W(name) \
- extern "C" void *name(void *arg) { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WW(name) \
- extern "C" void *name(void *arg1, void *arg2) { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5) { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WWWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5, void *arg6) { \
- typedef decltype(name) *fntype; \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-// }}}
-
-// ----------------- ASan own interface functions --------------------
-// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
-// want to call it in the __asan_init interceptor.
-WRAP_W_V(__asan_should_detect_stack_use_after_return)
-WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
-
-extern "C" {
- int __asan_option_detect_stack_use_after_return;
- uptr __asan_shadow_memory_dynamic_address;
-
- // Manually wrap __asan_init as we need to initialize
- // __asan_option_detect_stack_use_after_return afterwards.
- void __asan_init() {
- typedef void (*fntype)();
- static fntype fn = 0;
- // __asan_init is expected to be called by only one thread.
- if (fn) return;
-
- fn = (fntype)getRealProcAddressOrDie("__asan_init");
- fn();
- __asan_option_detect_stack_use_after_return =
- (__asan_should_detect_stack_use_after_return() != 0);
- __asan_shadow_memory_dynamic_address =
- (uptr)__asan_get_shadow_memory_dynamic_address();
- InterceptHooks();
- }
-}
-
-extern "C" void __asan_version_mismatch_check() {
- // Do nothing.
-}
-
-INTERFACE_FUNCTION(__asan_handle_no_return)
-INTERFACE_FUNCTION(__asan_unhandled_exception_filter)
-
-INTERFACE_FUNCTION(__asan_report_store1)
-INTERFACE_FUNCTION(__asan_report_store2)
-INTERFACE_FUNCTION(__asan_report_store4)
-INTERFACE_FUNCTION(__asan_report_store8)
-INTERFACE_FUNCTION(__asan_report_store16)
-INTERFACE_FUNCTION(__asan_report_store_n)
-
-INTERFACE_FUNCTION(__asan_report_load1)
-INTERFACE_FUNCTION(__asan_report_load2)
-INTERFACE_FUNCTION(__asan_report_load4)
-INTERFACE_FUNCTION(__asan_report_load8)
-INTERFACE_FUNCTION(__asan_report_load16)
-INTERFACE_FUNCTION(__asan_report_load_n)
-
-INTERFACE_FUNCTION(__asan_store1)
-INTERFACE_FUNCTION(__asan_store2)
-INTERFACE_FUNCTION(__asan_store4)
-INTERFACE_FUNCTION(__asan_store8)
-INTERFACE_FUNCTION(__asan_store16)
-INTERFACE_FUNCTION(__asan_storeN)
-
-INTERFACE_FUNCTION(__asan_load1)
-INTERFACE_FUNCTION(__asan_load2)
-INTERFACE_FUNCTION(__asan_load4)
-INTERFACE_FUNCTION(__asan_load8)
-INTERFACE_FUNCTION(__asan_load16)
-INTERFACE_FUNCTION(__asan_loadN)
-
-INTERFACE_FUNCTION(__asan_memcpy);
-INTERFACE_FUNCTION(__asan_memset);
-INTERFACE_FUNCTION(__asan_memmove);
-
-INTERFACE_FUNCTION(__asan_set_shadow_00);
-INTERFACE_FUNCTION(__asan_set_shadow_f1);
-INTERFACE_FUNCTION(__asan_set_shadow_f2);
-INTERFACE_FUNCTION(__asan_set_shadow_f3);
-INTERFACE_FUNCTION(__asan_set_shadow_f5);
-INTERFACE_FUNCTION(__asan_set_shadow_f8);
-
-INTERFACE_FUNCTION(__asan_alloca_poison);
-INTERFACE_FUNCTION(__asan_allocas_unpoison);
+// ASan own interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "asan_interface.inc"
-INTERFACE_FUNCTION(__asan_register_globals)
-INTERFACE_FUNCTION(__asan_unregister_globals)
+// Memory allocation functions.
+INTERCEPT_WRAP_V_W(free)
+INTERCEPT_WRAP_V_W(_free_base)
+INTERCEPT_WRAP_V_WW(_free_dbg)
-INTERFACE_FUNCTION(__asan_before_dynamic_init)
-INTERFACE_FUNCTION(__asan_after_dynamic_init)
+INTERCEPT_WRAP_W_W(malloc)
+INTERCEPT_WRAP_W_W(_malloc_base)
+INTERCEPT_WRAP_W_WWWW(_malloc_dbg)
-INTERFACE_FUNCTION(__asan_poison_stack_memory)
-INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
+INTERCEPT_WRAP_W_WW(calloc)
+INTERCEPT_WRAP_W_WW(_calloc_base)
+INTERCEPT_WRAP_W_WWWWW(_calloc_dbg)
+INTERCEPT_WRAP_W_WWW(_calloc_impl)
-INTERFACE_FUNCTION(__asan_poison_memory_region)
-INTERFACE_FUNCTION(__asan_unpoison_memory_region)
+INTERCEPT_WRAP_W_WW(realloc)
+INTERCEPT_WRAP_W_WW(_realloc_base)
+INTERCEPT_WRAP_W_WWW(_realloc_dbg)
+INTERCEPT_WRAP_W_WWW(_recalloc)
+INTERCEPT_WRAP_W_WWW(_recalloc_base)
-INTERFACE_FUNCTION(__asan_address_is_poisoned)
-INTERFACE_FUNCTION(__asan_region_is_poisoned)
-
-INTERFACE_FUNCTION(__asan_get_current_fake_stack)
-INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
-
-INTERFACE_FUNCTION(__asan_stack_malloc_0)
-INTERFACE_FUNCTION(__asan_stack_malloc_1)
-INTERFACE_FUNCTION(__asan_stack_malloc_2)
-INTERFACE_FUNCTION(__asan_stack_malloc_3)
-INTERFACE_FUNCTION(__asan_stack_malloc_4)
-INTERFACE_FUNCTION(__asan_stack_malloc_5)
-INTERFACE_FUNCTION(__asan_stack_malloc_6)
-INTERFACE_FUNCTION(__asan_stack_malloc_7)
-INTERFACE_FUNCTION(__asan_stack_malloc_8)
-INTERFACE_FUNCTION(__asan_stack_malloc_9)
-INTERFACE_FUNCTION(__asan_stack_malloc_10)
-
-INTERFACE_FUNCTION(__asan_stack_free_0)
-INTERFACE_FUNCTION(__asan_stack_free_1)
-INTERFACE_FUNCTION(__asan_stack_free_2)
-INTERFACE_FUNCTION(__asan_stack_free_4)
-INTERFACE_FUNCTION(__asan_stack_free_5)
-INTERFACE_FUNCTION(__asan_stack_free_6)
-INTERFACE_FUNCTION(__asan_stack_free_7)
-INTERFACE_FUNCTION(__asan_stack_free_8)
-INTERFACE_FUNCTION(__asan_stack_free_9)
-INTERFACE_FUNCTION(__asan_stack_free_10)
-
-// FIXME: we might want to have a sanitizer_win_dll_thunk?
-INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
-INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
-INTERFACE_FUNCTION(__sanitizer_cov)
-INTERFACE_FUNCTION(__sanitizer_cov_dump)
-INTERFACE_FUNCTION(__sanitizer_dump_coverage)
-INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
-INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
-INTERFACE_FUNCTION(__sanitizer_cov_init)
-INTERFACE_FUNCTION(__sanitizer_cov_module_init)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
-INTERFACE_FUNCTION(__sanitizer_cov_with_check)
-INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
-INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
-INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
-INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
-INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
-INTERFACE_FUNCTION(__sanitizer_get_heap_size)
-INTERFACE_FUNCTION(__sanitizer_get_ownership)
-INTERFACE_FUNCTION(__sanitizer_get_total_unique_caller_callee_pairs)
-INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
-INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
-INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
-INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
-INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
-INTERFACE_FUNCTION(__sanitizer_symbolize_global)
-INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
-INTERFACE_FUNCTION(__sanitizer_ptr_sub)
-INTERFACE_FUNCTION(__sanitizer_report_error_summary)
-INTERFACE_FUNCTION(__sanitizer_reset_coverage)
-INTERFACE_FUNCTION(__sanitizer_get_number_of_counters)
-INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
-INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
-INTERFACE_FUNCTION(__sanitizer_set_death_callback)
-INTERFACE_FUNCTION(__sanitizer_set_report_path)
-INTERFACE_FUNCTION(__sanitizer_set_report_fd)
-INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
-INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
-INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
-INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
-INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
-INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
-INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
-INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
-INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
-INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
-INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
-
-// TODO(timurrrr): Add more interface functions on the as-needed basis.
-
-// ----------------- Memory allocation functions ---------------------
-WRAP_V_W(free)
-WRAP_V_W(_free_base)
-WRAP_V_WW(_free_dbg)
-
-WRAP_W_W(malloc)
-WRAP_W_W(_malloc_base)
-WRAP_W_WWWW(_malloc_dbg)
-
-WRAP_W_WW(calloc)
-WRAP_W_WW(_calloc_base)
-WRAP_W_WWWWW(_calloc_dbg)
-WRAP_W_WWW(_calloc_impl)
-
-WRAP_W_WW(realloc)
-WRAP_W_WW(_realloc_base)
-WRAP_W_WWW(_realloc_dbg)
-WRAP_W_WWW(_recalloc)
-WRAP_W_WWW(_recalloc_base)
-
-WRAP_W_W(_msize)
-WRAP_W_W(_expand)
-WRAP_W_W(_expand_dbg)
+INTERCEPT_WRAP_W_W(_msize)
+INTERCEPT_WRAP_W_W(_expand)
+INTERCEPT_WRAP_W_W(_expand_dbg)
// TODO(timurrrr): Might want to add support for _aligned_* allocation
// functions to detect a bit more bugs. Those functions seem to wrap malloc().
@@ -405,20 +58,6 @@ WRAP_W_W(_expand_dbg)
INTERCEPT_LIBRARY_FUNCTION(atoi);
INTERCEPT_LIBRARY_FUNCTION(atol);
-
-#ifdef _WIN64
-INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
-#else
-INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
-
-// _except_handler4 checks -GS cookie which is different for each module, so we
-// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
-INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
- __asan_handle_no_return();
- return REAL(_except_handler4)(a, b, c, d);
-}
-#endif
-
INTERCEPT_LIBRARY_FUNCTION(frexp);
INTERCEPT_LIBRARY_FUNCTION(longjmp);
#if SANITIZER_INTERCEPT_MEMCHR
@@ -443,41 +82,70 @@ INTERCEPT_LIBRARY_FUNCTION(strpbrk);
INTERCEPT_LIBRARY_FUNCTION(strrchr);
INTERCEPT_LIBRARY_FUNCTION(strspn);
INTERCEPT_LIBRARY_FUNCTION(strstr);
+INTERCEPT_LIBRARY_FUNCTION(strtok);
INTERCEPT_LIBRARY_FUNCTION(strtol);
INTERCEPT_LIBRARY_FUNCTION(wcslen);
-// Must be after all the interceptor declarations due to the way INTERCEPT_HOOKS
-// is defined.
-void InterceptHooks() {
- INTERCEPT_HOOKS();
-#ifndef _WIN64
- INTERCEPT_FUNCTION(_except_handler4);
-#endif
+#ifdef _WIN64
+INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
+#else
+INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
+// _except_handler4 checks -GS cookie which is different for each module, so we
+// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
}
+#endif
-// We want to call __asan_init before C/C++ initializers/constructors are
-// executed, otherwise functions like memset might be invoked.
-// For some strange reason, merely linking in asan_preinit.cc doesn't work
-// as the callback is never called... Is link.exe doing something too smart?
+// Window specific functions not included in asan_interface.inc.
+INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return)
+INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
+INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter)
-// In DLLs, the callbacks are expected to return 0,
-// otherwise CRT initialization fails.
-static int call_asan_init() {
- __asan_init();
+using namespace __sanitizer;
+
+extern "C" {
+int __asan_option_detect_stack_use_after_return;
+uptr __asan_shadow_memory_dynamic_address;
+} // extern "C"
+
+static int asan_dll_thunk_init() {
+ typedef void (*fntype)();
+ static fntype fn = 0;
+ // asan_dll_thunk_init is expected to be called by only one thread.
+ if (fn) return 0;
+
+ // Ensure all interception was executed.
+ __dll_thunk_init();
+
+ fn = (fntype) dllThunkGetRealAddrOrDie("__asan_init");
+ fn();
+ __asan_option_detect_stack_use_after_return =
+ (__asan_should_detect_stack_use_after_return() != 0);
+ __asan_shadow_memory_dynamic_address =
+ (uptr)__asan_get_shadow_memory_dynamic_address();
+
+#ifndef _WIN64
+ INTERCEPT_FUNCTION(_except_handler4);
+#endif
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
return 0;
}
+
#pragma section(".CRT$XIB", long, read) // NOLINT
-__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
+__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = asan_dll_thunk_init;
static void WINAPI asan_thread_init(void *mod, unsigned long reason,
- void *reserved) {
- if (reason == /*DLL_PROCESS_ATTACH=*/1) __asan_init();
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) asan_dll_thunk_init();
}
#pragma section(".CRT$XLAB", long, read) // NOLINT
__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
-ASAN_LINK_GLOBALS_WIN()
+WIN_FORCE_LINK(__asan_dso_reg_hook)
-#endif // ASAN_DLL_THUNK
+#endif // SANITIZER_DLL_THUNK
diff --git a/lib/asan/asan_win_dynamic_runtime_thunk.cc b/lib/asan/asan_win_dynamic_runtime_thunk.cc
index 8e42f03c1a0d..416c73b23629 100644
--- a/lib/asan/asan_win_dynamic_runtime_thunk.cc
+++ b/lib/asan/asan_win_dynamic_runtime_thunk.cc
@@ -14,20 +14,24 @@
// using the default "import library" generated when linking the DLL RTL.
//
// This includes:
+// - creating weak aliases to default implementation imported from asan dll.
// - forwarding the detect_stack_use_after_return runtime option
// - working around deficiencies of the MD runtime
// - installing a custom SEH handler
//
//===----------------------------------------------------------------------===//
-// Only compile this code when building asan_dynamic_runtime_thunk.lib
-// Using #ifdef rather than relying on Makefiles etc.
-// simplifies the build procedure.
-#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
-#include "asan_globals_win.h"
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_common/sanitizer_win_defs.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
+// Define weak alias for all weak functions imported from asan dll.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "asan_interface.inc"
+
// First, declare CRT sections we'll be using in this file
#pragma section(".CRT$XIB", long, read) // NOLINT
#pragma section(".CRT$XID", long, read) // NOLINT
@@ -122,6 +126,6 @@ __declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
SetSEHFilter;
}
-ASAN_LINK_GLOBALS_WIN()
+WIN_FORCE_LINK(__asan_dso_reg_hook)
-#endif // ASAN_DYNAMIC_RUNTIME_THUNK
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
diff --git a/lib/asan/asan_win_weak_interception.cc b/lib/asan/asan_win_weak_interception.cc
new file mode 100644
index 000000000000..ca26f914cf5f
--- /dev/null
+++ b/lib/asan/asan_win_weak_interception.cc
@@ -0,0 +1,23 @@
+//===-- asan_win_weak_interception.cc -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Address Sanitizer when it is implemented as
+// a shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_common/sanitizer_win_weak_interception.h"
+#include "asan_interface_internal.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "asan_interface.inc"
+#endif // SANITIZER_DYNAMIC
diff --git a/lib/asan/scripts/asan_symbolize.py b/lib/asan/scripts/asan_symbolize.py
index d82c6773e306..1a56e44127c1 100755
--- a/lib/asan/scripts/asan_symbolize.py
+++ b/lib/asan/scripts/asan_symbolize.py
@@ -89,10 +89,12 @@ class LLVMSymbolizer(Symbolizer):
for hint in self.dsym_hints:
cmd.append('--dsym-hint=%s' % hint)
if DEBUG:
- print ' '.join(cmd)
+ print(' '.join(cmd))
try:
result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ bufsize=0,
+ universal_newlines=True)
except OSError:
result = None
return result
@@ -105,8 +107,8 @@ class LLVMSymbolizer(Symbolizer):
try:
symbolizer_input = '"%s" %s' % (binary, offset)
if DEBUG:
- print symbolizer_input
- print >> self.pipe.stdin, symbolizer_input
+ print(symbolizer_input)
+ self.pipe.stdin.write("%s\n" % symbolizer_input)
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
@@ -151,9 +153,11 @@ class Addr2LineSymbolizer(Symbolizer):
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
- print ' '.join(cmd)
+ print(' '.join(cmd))
return subprocess.Popen(cmd,
- stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ bufsize=0,
+ universal_newlines=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
@@ -161,8 +165,8 @@ class Addr2LineSymbolizer(Symbolizer):
return None
lines = []
try:
- print >> self.pipe.stdin, offset
- print >> self.pipe.stdin, self.output_terminator
+ self.pipe.stdin.write("%s\n" % offset)
+ self.pipe.stdin.write("%s\n" % self.output_terminator)
is_first_frame = True
while True:
function_name = self.pipe.stdout.readline().rstrip()
@@ -219,7 +223,7 @@ class DarwinSymbolizer(Symbolizer):
def open_atos(self):
if DEBUG:
- print 'atos -o %s -arch %s' % (self.binary, self.arch)
+ print('atos -o %s -arch %s' % (self.binary, self.arch))
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
@@ -234,7 +238,7 @@ class DarwinSymbolizer(Symbolizer):
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
- print 'atos_line: ', atos_line
+ print('atos_line: ', atos_line)
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
@@ -348,7 +352,7 @@ class BreakpadSymbolizer(Symbolizer):
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
- print result
+ print(result)
return result
else:
return None
@@ -434,7 +438,7 @@ class SymbolizationLoop(object):
self.frame_no = 0
for line in logfile:
processed = self.process_line(line)
- print '\n'.join(processed)
+ print('\n'.join(processed))
def process_line_echo(self, line):
return [line.rstrip()]
@@ -448,7 +452,7 @@ class SymbolizationLoop(object):
if not match:
return [self.current_line]
if DEBUG:
- print line
+ print(line)
_, frameno_str, addr, binary, offset = match.groups()
arch = ""
# Arch can be embedded in the filename, e.g.: "libabc.dylib:x86_64h"
diff --git a/lib/asan/tests/asan_interface_test.cc b/lib/asan/tests/asan_interface_test.cc
index e4e9524e73b7..d13962b8f505 100644
--- a/lib/asan/tests/asan_interface_test.cc
+++ b/lib/asan/tests/asan_interface_test.cc
@@ -14,6 +14,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include <sanitizer/allocator_interface.h>
#include <sanitizer/asan_interface.h>
+#include <vector>
TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
EXPECT_EQ(0U, __sanitizer_get_estimated_allocated_size(0));
diff --git a/lib/asan/tests/asan_internal_interface_test.cc b/lib/asan/tests/asan_internal_interface_test.cc
index ae4759478170..e247bb4299fa 100644
--- a/lib/asan/tests/asan_internal_interface_test.cc
+++ b/lib/asan/tests/asan_internal_interface_test.cc
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "asan_interface_internal.h"
#include "asan_test_utils.h"
+#include <vector>
TEST(AddressSanitizerInternalInterface, SetShadow) {
std::vector<char> buffer(17, 0xff);
diff --git a/lib/asan/tests/asan_mac_test_helpers.mm b/lib/asan/tests/asan_mac_test_helpers.mm
index a7e4b9d1928b..3f8fa26d95b8 100644
--- a/lib/asan/tests/asan_mac_test_helpers.mm
+++ b/lib/asan/tests/asan_mac_test_helpers.mm
@@ -237,4 +237,5 @@ void TestNSURLDeallocation() {
[[NSURL alloc] initWithString:@"Saved Application State"
relativeToURL:base];
[u release];
+ [base release];
}
diff --git a/lib/asan/tests/asan_mem_test.cc b/lib/asan/tests/asan_mem_test.cc
index 4a941faa0430..c3208868464e 100644
--- a/lib/asan/tests/asan_mem_test.cc
+++ b/lib/asan/tests/asan_mem_test.cc
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "asan_test_utils.h"
+#include <vector>
template<typename T>
void MemSetOOBTestTemplate(size_t length) {
@@ -76,7 +77,7 @@ TEST(AddressSanitizer, MemSetOOBTest) {
// Strictly speaking we are not guaranteed to find such two pointers,
// but given the structure of asan's allocator we will.
static bool AllocateTwoAdjacentArrays(char **x1, char **x2, size_t size) {
- vector<uintptr_t> v;
+ std::vector<uintptr_t> v;
bool res = false;
for (size_t i = 0; i < 1000U && !res; i++) {
v.push_back(reinterpret_cast<uintptr_t>(new char[size]));
diff --git a/lib/asan/tests/asan_noinst_test.cc b/lib/asan/tests/asan_noinst_test.cc
index 65acb2839ba1..b3a235e478e3 100644
--- a/lib/asan/tests/asan_noinst_test.cc
+++ b/lib/asan/tests/asan_noinst_test.cc
@@ -97,6 +97,9 @@ TEST(AddressSanitizer, NoInstMallocTest) {
MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
}
+#ifndef __powerpc64__
+// FIXME: This has not reliably worked on powerpc since r279664. Re-enable
+// this once the problem is tracked down and fixed.
TEST(AddressSanitizer, ThreadedMallocStressTest) {
const int kNumThreads = 4;
const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
@@ -109,6 +112,7 @@ TEST(AddressSanitizer, ThreadedMallocStressTest) {
PTHREAD_JOIN(t[i], 0);
}
}
+#endif
static void PrintShadow(const char *tag, uptr ptr, size_t size) {
fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
@@ -206,6 +210,10 @@ void *ThreadedOneSizeMallocStress(void *unused) {
return NULL;
}
+#ifndef __powerpc64__
+// FIXME: This has not reliably worked on powerpc since r279664. Re-enable
+// this once the problem is tracked down and fixed.
+
TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
const int kNumThreads = 4;
pthread_t t[kNumThreads];
@@ -216,6 +224,7 @@ TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
PTHREAD_JOIN(t[i], 0);
}
}
+#endif
TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) {
using __asan::kHighMemEnd;
diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc
index 424a79e00a4a..7ac72955f487 100644
--- a/lib/asan/tests/asan_test.cc
+++ b/lib/asan/tests/asan_test.cc
@@ -337,8 +337,9 @@ void *ManyThreadsWorker(void *a) {
return 0;
}
-#if !defined(__aarch64__)
+#if !defined(__aarch64__) && !defined(__powerpc64__)
// FIXME: Infinite loop in AArch64 (PR24389).
+// FIXME: Also occasional hang on powerpc. Maybe same problem as on AArch64?
TEST(AddressSanitizer, ManyThreadsTest) {
const size_t kNumThreads =
(SANITIZER_WORDSIZE == 32 || ASAN_AVOID_EXPENSIVE_TESTS) ? 30 : 1000;
@@ -684,6 +685,7 @@ void *ThreadStackReuseFunc2(void *unused) {
return 0;
}
+#if !defined(__thumb__)
TEST(AddressSanitizer, ThreadStackReuseTest) {
pthread_t t;
PTHREAD_CREATE(&t, 0, ThreadStackReuseFunc1, 0);
@@ -691,6 +693,7 @@ TEST(AddressSanitizer, ThreadStackReuseTest) {
PTHREAD_CREATE(&t, 0, ThreadStackReuseFunc2, 0);
PTHREAD_JOIN(t, 0);
}
+#endif
#if defined(__SSE2__)
#include <emmintrin.h>
@@ -945,7 +948,7 @@ TEST(AddressSanitizer, ShadowGapTest) {
char *addr = (char*)0x0000100000080000;
# endif
#endif
- EXPECT_DEATH(*addr = 1, "AddressSanitizer: SEGV on unknown");
+ EXPECT_DEATH(*addr = 1, "AddressSanitizer: (SEGV|BUS) on unknown");
}
#endif // ASAN_NEEDS_SEGV
@@ -1090,6 +1093,11 @@ TEST(AddressSanitizer, ThreadedStressStackReuseTest) {
}
}
+// pthread_exit tries to perform unwinding stuff that leads to dlopen'ing
+// libgcc_s.so. dlopen in its turn calls malloc to store "libgcc_s.so" string
+// that confuses LSan on Thumb because it fails to understand that this
+// allocation happens in dynamic linker and should be ignored.
+#if !defined(__thumb__)
static void *PthreadExit(void *a) {
pthread_exit(0);
return 0;
@@ -1102,6 +1110,7 @@ TEST(AddressSanitizer, PthreadExitTest) {
PTHREAD_JOIN(t, 0);
}
}
+#endif
// FIXME: Why does clang-cl define __EXCEPTIONS?
#if defined(__EXCEPTIONS) && !defined(_WIN32)
diff --git a/lib/asan/tests/asan_test_config.h b/lib/asan/tests/asan_test_config.h
index 92f276307481..8493f41ef4dc 100644
--- a/lib/asan/tests/asan_test_config.h
+++ b/lib/asan/tests/asan_test_config.h
@@ -17,13 +17,9 @@
#ifndef ASAN_TEST_CONFIG_H
#define ASAN_TEST_CONFIG_H
-#include <vector>
#include <string>
-#include <map>
using std::string;
-using std::vector;
-using std::map;
#ifndef ASAN_UAR
# error "please define ASAN_UAR"
diff --git a/lib/builtins/CMakeLists.txt b/lib/builtins/CMakeLists.txt
index 3cf78616ac07..161487e703d7 100644
--- a/lib/builtins/CMakeLists.txt
+++ b/lib/builtins/CMakeLists.txt
@@ -71,6 +71,7 @@ set(GENERIC_SOURCES
extendsfdf2.c
extendhfsf2.c
ffsdi2.c
+ ffssi2.c
ffsti2.c
fixdfdi.c
fixdfsi.c
@@ -132,6 +133,7 @@ set(GENERIC_SOURCES
negvdi2.c
negvsi2.c
negvti2.c
+ os_version_check.c
paritydi2.c
paritysi2.c
parityti2.c
@@ -174,15 +176,6 @@ if(COMPILER_RT_HAS_ATOMIC_KEYWORD AND NOT COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN)
atomic.c)
endif()
-set(MSVC_SOURCES
- divsc3.c
- divdc3.c
- divxc3.c
- mulsc3.c
- muldc3.c
- mulxc3.c)
-
-
if(APPLE)
set(GENERIC_SOURCES
${GENERIC_SOURCES}
@@ -262,9 +255,9 @@ else () # MSVC
x86_64/floatdidf.c
x86_64/floatdisf.c
x86_64/floatdixf.c
- ${MSVC_SOURCES})
+ ${GENERIC_SOURCES})
set(x86_64h_SOURCES ${x86_64_SOURCES})
- set(i386_SOURCES ${MSVC_SOURCES})
+ set(i386_SOURCES ${GENERIC_SOURCES})
set(i686_SOURCES ${i386_SOURCES})
endif () # if (NOT MSVC)
@@ -302,6 +295,13 @@ set(arm_SOURCES
arm/umodsi3.S
${GENERIC_SOURCES})
+set(thumb1_SOURCES
+ arm/divsi3.S
+ arm/udivsi3.S
+ arm/comparesf2.S
+ arm/addsf3.S
+ ${GENERIC_SOURCES})
+
set(arm_EABI_SOURCES
arm/aeabi_cdcmp.S
arm/aeabi_cdcmpeq_check_nan.c
@@ -320,6 +320,7 @@ set(arm_EABI_SOURCES
arm/aeabi_memset.S
arm/aeabi_uidivmod.S
arm/aeabi_uldivmod.S)
+
set(arm_Thumb1_JT_SOURCES
arm/switch16.S
arm/switch32.S
@@ -401,6 +402,10 @@ elseif(NOT WIN32)
${arm_SOURCES}
${arm_EABI_SOURCES}
${arm_Thumb1_SOURCES})
+
+ set(thumb1_SOURCES
+ ${thumb1_SOURCES}
+ ${arm_EABI_SOURCES})
endif()
set(aarch64_SOURCES
@@ -431,7 +436,7 @@ set(armv7k_SOURCES ${arm_SOURCES})
set(arm64_SOURCES ${aarch64_SOURCES})
# macho_embedded archs
-set(armv6m_SOURCES ${GENERIC_SOURCES})
+set(armv6m_SOURCES ${thumb1_SOURCES})
set(armv7m_SOURCES ${arm_SOURCES})
set(armv7em_SOURCES ${arm_SOURCES})
@@ -486,7 +491,7 @@ else ()
# Needed for clear_cache on debug mode, due to r7's usage in inline asm.
# Release mode already sets it via -O2/3, Debug mode doesn't.
if (${arch} STREQUAL "armhf")
- list(APPEND BUILTIN_CFLAGS -fomit-frame-pointer)
+ list(APPEND BUILTIN_CFLAGS -fomit-frame-pointer -DCOMPILER_RT_ARMHF_TARGET)
endif()
add_compiler_rt_runtime(clang_rt.builtins
diff --git a/lib/builtins/README.txt b/lib/builtins/README.txt
index ad36e4e5279a..b3d083614ee0 100644
--- a/lib/builtins/README.txt
+++ b/lib/builtins/README.txt
@@ -45,6 +45,7 @@ si_int __ctzsi2(si_int a); // count trailing zeros
si_int __ctzdi2(di_int a); // count trailing zeros
si_int __ctzti2(ti_int a); // count trailing zeros
+si_int __ffssi2(si_int a); // find least significant 1 bit
si_int __ffsdi2(di_int a); // find least significant 1 bit
si_int __ffsti2(ti_int a); // find least significant 1 bit
diff --git a/lib/builtins/arm/addsf3.S b/lib/builtins/arm/addsf3.S
new file mode 100644
index 000000000000..362b5c147ea6
--- /dev/null
+++ b/lib/builtins/arm/addsf3.S
@@ -0,0 +1,277 @@
+/*===-- addsf3.S - Adds two single precision floating pointer numbers-----===//
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ *===----------------------------------------------------------------------===//
+ *
+ * This file implements the __addsf3 (single precision floating pointer number
+ * addition with the IEEE-754 default rounding (to nearest, ties to even)
+ * function for the ARM Thumb1 ISA.
+ *
+ *===----------------------------------------------------------------------===*/
+
+#include "../assembly.h"
+#define significandBits 23
+#define typeWidth 32
+
+ .syntax unified
+ .text
+ .thumb
+ .p2align 2
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_fadd, __addsf3)
+
+DEFINE_COMPILERRT_THUMB_FUNCTION(__addsf3)
+ push {r4, r5, r6, r7, lr}
+ // Get the absolute value of a and b.
+ lsls r2, r0, #1
+ lsls r3, r1, #1
+ lsrs r2, r2, #1 /* aAbs */
+ beq LOCAL_LABEL(a_zero_nan_inf)
+ lsrs r3, r3, #1 /* bAbs */
+ beq LOCAL_LABEL(zero_nan_inf)
+
+ // Detect if a or b is infinity or Nan.
+ lsrs r6, r2, #(significandBits)
+ lsrs r7, r3, #(significandBits)
+ cmp r6, #0xFF
+ beq LOCAL_LABEL(zero_nan_inf)
+ cmp r7, #0xFF
+ beq LOCAL_LABEL(zero_nan_inf)
+
+ // Swap Rep and Abs so that a and aAbs has the larger absolute value.
+ cmp r2, r3
+ bhs LOCAL_LABEL(no_swap)
+ movs r4, r0
+ movs r5, r2
+ movs r0, r1
+ movs r2, r3
+ movs r1, r4
+ movs r3, r5
+LOCAL_LABEL(no_swap):
+
+ // Get the significands and shift them to give us round, guard and sticky.
+ lsls r4, r0, #(typeWidth - significandBits)
+ lsrs r4, r4, #(typeWidth - significandBits - 3) /* aSignificand << 3 */
+ lsls r5, r1, #(typeWidth - significandBits)
+ lsrs r5, r5, #(typeWidth - significandBits - 3) /* bSignificand << 3 */
+
+ // Get the implicitBit.
+ movs r6, #1
+ lsls r6, r6, #(significandBits + 3)
+
+ // Get aExponent and set implicit bit if necessary.
+ lsrs r2, r2, #(significandBits)
+ beq LOCAL_LABEL(a_done_implicit_bit)
+ orrs r4, r6
+LOCAL_LABEL(a_done_implicit_bit):
+
+ // Get bExponent and set implicit bit if necessary.
+ lsrs r3, r3, #(significandBits)
+ beq LOCAL_LABEL(b_done_implicit_bit)
+ orrs r5, r6
+LOCAL_LABEL(b_done_implicit_bit):
+
+ // Get the difference in exponents.
+ subs r6, r2, r3
+ beq LOCAL_LABEL(done_align)
+
+ // If b is denormal, then a must be normal as align > 0, and we only need to
+ // right shift bSignificand by (align - 1) bits.
+ cmp r3, #0
+ bne 1f
+ subs r6, r6, #1
+1:
+
+ // No longer needs bExponent. r3 is dead here.
+ // Set sticky bits of b: sticky = bSignificand << (typeWidth - align).
+ movs r3, #(typeWidth)
+ subs r3, r3, r6
+ movs r7, r5
+ lsls r7, r3
+ beq 1f
+ movs r7, #1
+1:
+
+ // bSignificand = bSignificand >> align | sticky;
+ lsrs r5, r6
+ orrs r5, r7
+ bne LOCAL_LABEL(done_align)
+ movs r5, #1 // sticky; b is known to be non-zero.
+
+LOCAL_LABEL(done_align):
+ // isSubtraction = (aRep ^ bRep) >> 31;
+ movs r7, r0
+ eors r7, r1
+ lsrs r7, #31
+ bne LOCAL_LABEL(do_substraction)
+
+ // Same sign, do Addition.
+
+ // aSignificand += bSignificand;
+ adds r4, r4, r5
+
+ // Check carry bit.
+ movs r6, #1
+ lsls r6, r6, #(significandBits + 3 + 1)
+ movs r7, r4
+ ands r7, r6
+ beq LOCAL_LABEL(form_result)
+ // If the addition carried up, we need to right-shift the result and
+ // adjust the exponent.
+ movs r7, r4
+ movs r6, #1
+ ands r7, r6 // sticky = aSignificand & 1;
+ lsrs r4, #1
+ orrs r4, r7 // result Significand
+ adds r2, #1 // result Exponent
+ // If we have overflowed the type, return +/- infinity.
+ cmp r2, 0xFF
+ beq LOCAL_LABEL(ret_inf)
+
+LOCAL_LABEL(form_result):
+ // Shift the sign, exponent and significand into place.
+ lsrs r0, #(typeWidth - 1)
+ lsls r0, #(typeWidth - 1) // Get Sign.
+ lsls r2, #(significandBits)
+ orrs r0, r2
+ movs r1, r4
+ lsls r4, #(typeWidth - significandBits - 3)
+ lsrs r4, #(typeWidth - significandBits)
+ orrs r0, r4
+
+ // Final rounding. The result may overflow to infinity, but that is the
+ // correct result in that case.
+ // roundGuardSticky = aSignificand & 0x7;
+ movs r2, #0x7
+ ands r1, r2
+ // if (roundGuardSticky > 0x4) result++;
+
+ cmp r1, #0x4
+ blt LOCAL_LABEL(done_round)
+ beq 1f
+ adds r0, #1
+ pop {r4, r5, r6, r7, pc}
+1:
+
+ // if (roundGuardSticky == 0x4) result += result & 1;
+ movs r1, r0
+ lsrs r1, #1
+ bcc LOCAL_LABEL(done_round)
+ adds r0, r0, #1
+LOCAL_LABEL(done_round):
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(do_substraction):
+ subs r4, r4, r5 // aSignificand -= bSignificand;
+ beq LOCAL_LABEL(ret_zero)
+ movs r6, r4
+ cmp r2, 0
+ beq LOCAL_LABEL(form_result) // if a's exp is 0, no need to normalize.
+ // If partial cancellation occured, we need to left-shift the result
+ // and adjust the exponent:
+ lsrs r6, r6, #(significandBits + 3)
+ bne LOCAL_LABEL(form_result)
+
+ push {r0, r1, r2, r3}
+ movs r0, r4
+ bl __clzsi2
+ movs r5, r0
+ pop {r0, r1, r2, r3}
+ // shift = rep_clz(aSignificand) - rep_clz(implicitBit << 3);
+ subs r5, r5, #(typeWidth - significandBits - 3 - 1)
+ // aSignificand <<= shift; aExponent -= shift;
+ lsls r4, r5
+ subs r2, r2, r5
+ bgt LOCAL_LABEL(form_result)
+
+ // Do normalization if aExponent <= 0.
+ movs r6, #1
+ subs r6, r6, r2 // 1 - aExponent;
+ movs r2, #0 // aExponent = 0;
+ movs r3, #(typeWidth) // bExponent is dead.
+ subs r3, r3, r6
+ movs r7, r4
+ lsls r7, r3 // stickyBit = (bool)(aSignificant << (typeWidth - align))
+ beq 1f
+ movs r7, #1
+1:
+ lsrs r4, r6 /* aSignificand >> shift */
+ orrs r4, r7
+ b LOCAL_LABEL(form_result)
+
+LOCAL_LABEL(ret_zero):
+ movs r0, #0
+ pop {r4, r5, r6, r7, pc}
+
+
+LOCAL_LABEL(a_zero_nan_inf):
+ lsrs r3, r3, #1
+
+LOCAL_LABEL(zero_nan_inf):
+ // Here r2 has aAbs, r3 has bAbs
+ movs r4, #0xFF
+ lsls r4, r4, #(significandBits) // Make +inf.
+
+ cmp r2, r4
+ bhi LOCAL_LABEL(a_is_nan)
+ cmp r3, r4
+ bhi LOCAL_LABEL(b_is_nan)
+
+ cmp r2, r4
+ bne LOCAL_LABEL(a_is_rational)
+ // aAbs is INF.
+ eors r1, r0 // aRep ^ bRep.
+ movs r6, #1
+ lsls r6, r6, #(typeWidth - 1) // get sign mask.
+ cmp r1, r6 // if they only differ on sign bit, it's -INF + INF
+ beq LOCAL_LABEL(a_is_nan)
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(a_is_rational):
+ cmp r3, r4
+ bne LOCAL_LABEL(b_is_rational)
+ movs r0, r1
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(b_is_rational):
+ // either a or b or both are zero.
+ adds r4, r2, r3
+ beq LOCAL_LABEL(both_zero)
+ cmp r2, #0 // is absA 0 ?
+ beq LOCAL_LABEL(ret_b)
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(both_zero):
+ ands r0, r1 // +0 + -0 = +0
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(ret_b):
+ movs r0, r1
+
+LOCAL_LABEL(ret):
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(b_is_nan):
+ movs r0, r1
+LOCAL_LABEL(a_is_nan):
+ movs r1, #1
+ lsls r1, r1, #(significandBits -1) // r1 is quiet bit.
+ orrs r0, r1
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(ret_inf):
+ movs r4, #0xFF
+ lsls r4, r4, #(significandBits)
+ orrs r0, r4
+ lsrs r0, r0, #(significandBits)
+ lsls r0, r0, #(significandBits)
+ pop {r4, r5, r6, r7, pc}
+
+
+END_COMPILERRT_FUNCTION(__addsf3)
+
+NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/builtins/arm/aeabi_cdcmp.S b/lib/builtins/arm/aeabi_cdcmp.S
index 8008f5fca262..b67814d9f20b 100644
--- a/lib/builtins/arm/aeabi_cdcmp.S
+++ b/lib/builtins/arm/aeabi_cdcmp.S
@@ -30,6 +30,19 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
push {r0-r3, lr}
bl __aeabi_cdcmpeq_check_nan
cmp r0, #1
+#if __ARM_ARCH_ISA_THUMB == 1
+ beq 1f
+ // NaN has been ruled out, so __aeabi_cdcmple can't trap
+ mov r0, sp
+ ldm r0, {r0-r3}
+ bl __aeabi_cdcmple
+ pop {r0-r3, pc}
+1:
+ // Z = 0, C = 1
+ movs r0, #0xF
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+#else
pop {r0-r3, lr}
// NaN has been ruled out, so __aeabi_cdcmple can't trap
@@ -37,6 +50,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
msr CPSR_f, #APSR_C
JMP(lr)
+#endif
END_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
@@ -59,6 +73,28 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmple)
bl __aeabi_dcmplt
cmp r0, #1
+#if __ARM_ARCH_ISA_THUMB == 1
+ bne 1f
+ // Z = 0, C = 0
+ movs r0, #1
+ lsls r0, r0, #1
+ pop {r0-r3, pc}
+1:
+ mov r0, sp
+ ldm r0, {r0-r3}
+ bl __aeabi_dcmpeq
+ cmp r0, #1
+ bne 2f
+ // Z = 1, C = 1
+ movs r0, #2
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+2:
+ // Z = 0, C = 1
+ movs r0, #0xF
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+#else
moveq ip, #0
beq 1f
@@ -72,6 +108,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmple)
msr CPSR_f, ip
pop {r0-r3}
POP_PC()
+#endif
END_COMPILERRT_FUNCTION(__aeabi_cdcmple)
// int __aeabi_cdrcmple(double a, double b) {
diff --git a/lib/builtins/arm/aeabi_cfcmp.S b/lib/builtins/arm/aeabi_cfcmp.S
index 274baf7aecf2..e37aa3d06c4e 100644
--- a/lib/builtins/arm/aeabi_cfcmp.S
+++ b/lib/builtins/arm/aeabi_cfcmp.S
@@ -30,6 +30,19 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
push {r0-r3, lr}
bl __aeabi_cfcmpeq_check_nan
cmp r0, #1
+#if __ARM_ARCH_ISA_THUMB == 1
+ beq 1f
+ // NaN has been ruled out, so __aeabi_cfcmple can't trap
+ mov r0, sp
+ ldm r0, {r0-r3}
+ bl __aeabi_cfcmple
+ pop {r0-r3, pc}
+1:
+ // Z = 0, C = 1
+ movs r0, #0xF
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+#else
pop {r0-r3, lr}
// NaN has been ruled out, so __aeabi_cfcmple can't trap
@@ -37,6 +50,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
msr CPSR_f, #APSR_C
JMP(lr)
+#endif
END_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
@@ -59,6 +73,28 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmple)
bl __aeabi_fcmplt
cmp r0, #1
+#if __ARM_ARCH_ISA_THUMB == 1
+ bne 1f
+ // Z = 0, C = 0
+ movs r0, #1
+ lsls r0, r0, #1
+ pop {r0-r3, pc}
+1:
+ mov r0, sp
+ ldm r0, {r0-r3}
+ bl __aeabi_fcmpeq
+ cmp r0, #1
+ bne 2f
+ // Z = 1, C = 1
+ movs r0, #2
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+2:
+ // Z = 0, C = 1
+ movs r0, #0xF
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+#else
moveq ip, #0
beq 1f
@@ -72,6 +108,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmple)
msr CPSR_f, ip
pop {r0-r3}
POP_PC()
+#endif
END_COMPILERRT_FUNCTION(__aeabi_cfcmple)
// int __aeabi_cfrcmple(float a, float b) {
diff --git a/lib/builtins/arm/aeabi_dcmp.S b/lib/builtins/arm/aeabi_dcmp.S
index 43e439268d9a..51539c0ac813 100644
--- a/lib/builtins/arm/aeabi_dcmp.S
+++ b/lib/builtins/arm/aeabi_dcmp.S
@@ -26,10 +26,10 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_dcmp ## cond) \
bl SYMBOL_NAME(__ ## cond ## df2) SEPARATOR \
cmp r0, #0 SEPARATOR \
b ## cond 1f SEPARATOR \
- mov r0, #0 SEPARATOR \
+ movs r0, #0 SEPARATOR \
pop { r4, pc } SEPARATOR \
1: SEPARATOR \
- mov r0, #1 SEPARATOR \
+ movs r0, #1 SEPARATOR \
pop { r4, pc } SEPARATOR \
END_COMPILERRT_FUNCTION(__aeabi_dcmp ## cond)
diff --git a/lib/builtins/arm/aeabi_idivmod.S b/lib/builtins/arm/aeabi_idivmod.S
index b43ea699058d..0164b15dca18 100644
--- a/lib/builtins/arm/aeabi_idivmod.S
+++ b/lib/builtins/arm/aeabi_idivmod.S
@@ -26,7 +26,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
push {r0, r1, lr}
bl SYMBOL_NAME(__divsi3)
pop {r1, r2, r3} // now r0 = quot, r1 = num, r2 = denom
- muls r2, r2, r0 // r2 = quot * denom
+ muls r2, r0, r2 // r2 = quot * denom
subs r1, r1, r2
JMP (r3)
#else
diff --git a/lib/builtins/arm/aeabi_ldivmod.S b/lib/builtins/arm/aeabi_ldivmod.S
index 3dae14ef07ec..038ae5d723a3 100644
--- a/lib/builtins/arm/aeabi_ldivmod.S
+++ b/lib/builtins/arm/aeabi_ldivmod.S
@@ -23,23 +23,23 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
- push {r11, lr}
+ push {r6, lr}
sub sp, sp, #16
- add r12, sp, #8
- str r12, [sp]
+ add r6, sp, #8
+ str r6, [sp]
#if defined(__MINGW32__)
- mov r12, r0
- mov r0, r2
- mov r2, r12
- mov r12, r1
- mov r1, r3
- mov r3, r12
+ movs r6, r0
+ movs r0, r2
+ movs r2, r6
+ movs r6, r1
+ movs r1, r3
+ movs r3, r6
#endif
bl SYMBOL_NAME(__divmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
- pop {r11, pc}
+ pop {r6, pc}
END_COMPILERRT_FUNCTION(__aeabi_ldivmod)
NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/builtins/arm/aeabi_memset.S b/lib/builtins/arm/aeabi_memset.S
index 48edd89705be..633f592279b5 100644
--- a/lib/builtins/arm/aeabi_memset.S
+++ b/lib/builtins/arm/aeabi_memset.S
@@ -26,7 +26,7 @@ DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset8, __aeabi_memset)
DEFINE_COMPILERRT_FUNCTION(__aeabi_memclr)
mov r2, r1
- mov r1, #0
+ movs r1, #0
b memset
END_COMPILERRT_FUNCTION(__aeabi_memclr)
diff --git a/lib/builtins/arm/aeabi_uidivmod.S b/lib/builtins/arm/aeabi_uidivmod.S
index 7098bc6ff92e..a627fc740a02 100644
--- a/lib/builtins/arm/aeabi_uidivmod.S
+++ b/lib/builtins/arm/aeabi_uidivmod.S
@@ -29,7 +29,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
push {r0, r1, lr}
bl SYMBOL_NAME(__aeabi_uidiv)
pop {r1, r2, r3}
- muls r2, r2, r0 // r2 = quot * denom
+ muls r2, r0, r2 // r2 = quot * denom
subs r1, r1, r2
JMP (r3)
LOCAL_LABEL(case_denom_larger):
diff --git a/lib/builtins/arm/aeabi_uldivmod.S b/lib/builtins/arm/aeabi_uldivmod.S
index bc26e5674ca0..be343b6bc826 100644
--- a/lib/builtins/arm/aeabi_uldivmod.S
+++ b/lib/builtins/arm/aeabi_uldivmod.S
@@ -23,23 +23,23 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
- push {r11, lr}
+ push {r6, lr}
sub sp, sp, #16
- add r12, sp, #8
- str r12, [sp]
+ add r6, sp, #8
+ str r6, [sp]
#if defined(__MINGW32__)
- mov r12, r0
- mov r0, r2
- mov r2, r12
- mov r12, r1
- mov r1, r3
- mov r3, r12
+ movs r6, r0
+ movs r0, r2
+ movs r2, r6
+ movs r6, r1
+ movs r1, r3
+ movs r3, r6
#endif
bl SYMBOL_NAME(__udivmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
- pop {r11, pc}
+ pop {r6, pc}
END_COMPILERRT_FUNCTION(__aeabi_uldivmod)
NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/builtins/arm/comparesf2.S b/lib/builtins/arm/comparesf2.S
index a5b7e36487d6..ef7091bf3c8f 100644
--- a/lib/builtins/arm/comparesf2.S
+++ b/lib/builtins/arm/comparesf2.S
@@ -69,7 +69,7 @@ DEFINE_COMPILERRT_FUNCTION(__eqsf2)
// the subsequent operations.
#if __ARM_ARCH_ISA_THUMB == 1
lsrs r6, r3, #1
- orrs r6, r2, r6
+ orrs r6, r2
#else
orrs r12, r2, r3, lsr #1
#endif
@@ -194,7 +194,7 @@ DEFINE_COMPILERRT_FUNCTION(__gtsf2)
lsls r2, r0, #1
lsls r3, r1, #1
lsrs r6, r3, #1
- orrs r6, r2, r6
+ orrs r6, r2
beq 1f
movs r6, r0
eors r6, r1
diff --git a/lib/builtins/arm/udivsi3.S b/lib/builtins/arm/udivsi3.S
index fcc472b4f3d9..b97b3080bff4 100644
--- a/lib/builtins/arm/udivsi3.S
+++ b/lib/builtins/arm/udivsi3.S
@@ -37,7 +37,16 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
beq LOCAL_LABEL(divby0)
udiv r0, r0, r1
bx lr
-#else
+
+LOCAL_LABEL(divby0):
+ mov r0, #0
+# ifdef __ARM_EABI__
+ b __aeabi_idiv0
+# else
+ JMP(lr)
+# endif
+
+#else /* ! __ARM_ARCH_EXT_IDIV__ */
cmp r1, #1
bcc LOCAL_LABEL(divby0)
#if __ARM_ARCH_ISA_THUMB == 1
@@ -186,9 +195,12 @@ LOCAL_LABEL(skip_1):
LOCAL_LABEL(divby0):
movs r0, #0
# if defined(__ARM_EABI__)
+ push {r7, lr}
bl __aeabi_idiv0 // due to relocation limit, can't use b.
-# endif
+ pop {r7, pc}
+# else
JMP(lr)
+# endif
#if __ARM_ARCH_ISA_THUMB == 1
@@ -252,16 +264,6 @@ LOCAL_LABEL(div0block):
JMP(lr)
#endif /* __ARM_ARCH_EXT_IDIV__ */
-#if __ARM_ARCH_EXT_IDIV__
-LOCAL_LABEL(divby0):
- mov r0, #0
-# ifdef __ARM_EABI__
- b __aeabi_idiv0
-# else
- JMP(lr)
-# endif
-#endif
-
END_COMPILERRT_FUNCTION(__udivsi3)
NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/builtins/clear_cache.c b/lib/builtins/clear_cache.c
index 4c2ac3b1eb05..25570fc2157a 100644
--- a/lib/builtins/clear_cache.c
+++ b/lib/builtins/clear_cache.c
@@ -82,10 +82,6 @@ uintptr_t GetCurrentProcess(void);
#endif
#endif
-#if defined(__linux__) && defined(__arm__)
- #include <asm/unistd.h>
-#endif
-
/*
* The compiler generates calls to __clear_cache() when creating
* trampoline functions on the stack for use with nested functions.
@@ -94,7 +90,7 @@ uintptr_t GetCurrentProcess(void);
*/
void __clear_cache(void *start, void *end) {
-#if __i386__ || __x86_64__
+#if __i386__ || __x86_64__ || defined(_M_IX86) || defined(_M_X64)
/*
* Intel processors have a unified instruction and data cache
* so there is nothing to do
@@ -108,6 +104,15 @@ void __clear_cache(void *start, void *end) {
sysarch(ARM_SYNC_ICACHE, &arg);
#elif defined(__linux__)
+ /*
+ * We used to include asm/unistd.h for the __ARM_NR_cacheflush define, but
+ * it also brought many other unused defines, as well as a dependency on
+ * kernel headers to be installed.
+ *
+ * This value is stable at least since Linux 3.13 and should remain so for
+ * compatibility reasons, warranting it's re-definition here.
+ */
+ #define __ARM_NR_cacheflush 0x0f0002
register int start_reg __asm("r0") = (int) (intptr_t) start;
const register int end_reg __asm("r1") = (int) (intptr_t) end;
const register int flags __asm("r2") = 0;
diff --git a/lib/builtins/cpu_model.c b/lib/builtins/cpu_model.c
index 9a3737020a4e..5ff6baf43876 100644
--- a/lib/builtins/cpu_model.c
+++ b/lib/builtins/cpu_model.c
@@ -27,6 +27,10 @@
#include <intrin.h>
#endif
+#ifndef __has_attribute
+#define __has_attribute(attr) 0
+#endif
+
enum VendorSignatures {
SIG_INTEL = 0x756e6547 /* Genu */,
SIG_AMD = 0x68747541 /* Auth */
@@ -720,14 +724,17 @@ static unsigned getAvailableFeatures(unsigned int ECX, unsigned int EDX,
return Features;
}
-#ifdef HAVE_INIT_PRIORITY
-#define CONSTRUCTOR_PRIORITY (101)
+#if defined(HAVE_INIT_PRIORITY)
+#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101))
+#elif __has_attribute(__constructor__)
+#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__))
#else
-#define CONSTRUCTOR_PRIORITY
+// FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
+// this runs during initialization.
+#define CONSTRUCTOR_ATTRIBUTE
#endif
-int __cpu_indicator_init(void)
- __attribute__((constructor CONSTRUCTOR_PRIORITY));
+int __cpu_indicator_init(void) CONSTRUCTOR_ATTRIBUTE;
struct __processor_model {
unsigned int __cpu_vendor;
@@ -742,7 +749,7 @@ struct __processor_model {
the priority set. However, it still runs after ifunc initializers and
needs to be called explicitly there. */
-int __attribute__((constructor CONSTRUCTOR_PRIORITY))
+int CONSTRUCTOR_ATTRIBUTE
__cpu_indicator_init(void) {
unsigned int EAX, EBX, ECX, EDX;
unsigned int MaxLeaf = 5;
diff --git a/lib/builtins/divtc3.c b/lib/builtins/divtc3.c
index 04693df471ff..16e538ba4a33 100644
--- a/lib/builtins/divtc3.c
+++ b/lib/builtins/divtc3.c
@@ -17,7 +17,7 @@
/* Returns: the quotient of (a + ib) / (c + id) */
-COMPILER_RT_ABI long double _Complex
+COMPILER_RT_ABI Lcomplex
__divtc3(long double __a, long double __b, long double __c, long double __d)
{
int __ilogbw = 0;
@@ -29,31 +29,31 @@ __divtc3(long double __a, long double __b, long double __c, long double __d)
__d = crt_scalbnl(__d, -__ilogbw);
}
long double __denom = __c * __c + __d * __d;
- long double _Complex z;
- __real__ z = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
- __imag__ z = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
- if (crt_isnan(__real__ z) && crt_isnan(__imag__ z))
+ Lcomplex z;
+ COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEX_IMAGINARY(z) = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z)))
{
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b)))
{
- __real__ z = crt_copysignl(CRT_INFINITY, __c) * __a;
- __imag__ z = crt_copysignl(CRT_INFINITY, __c) * __b;
+ COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a;
+ COMPLEX_IMAGINARY(z) = crt_copysignl(CRT_INFINITY, __c) * __b;
}
else if ((crt_isinf(__a) || crt_isinf(__b)) &&
crt_isfinite(__c) && crt_isfinite(__d))
{
__a = crt_copysignl(crt_isinf(__a) ? 1.0 : 0.0, __a);
__b = crt_copysignl(crt_isinf(__b) ? 1.0 : 0.0, __b);
- __real__ z = CRT_INFINITY * (__a * __c + __b * __d);
- __imag__ z = CRT_INFINITY * (__b * __c - __a * __d);
+ COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
}
else if (crt_isinf(__logbw) && __logbw > 0.0 &&
crt_isfinite(__a) && crt_isfinite(__b))
{
__c = crt_copysignl(crt_isinf(__c) ? 1.0 : 0.0, __c);
__d = crt_copysignl(crt_isinf(__d) ? 1.0 : 0.0, __d);
- __real__ z = 0.0 * (__a * __c + __b * __d);
- __imag__ z = 0.0 * (__b * __c - __a * __d);
+ COMPLEX_REAL(z) = 0.0 * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = 0.0 * (__b * __c - __a * __d);
}
}
return z;
diff --git a/lib/builtins/ffssi2.c b/lib/builtins/ffssi2.c
new file mode 100644
index 000000000000..e5180eff5e08
--- /dev/null
+++ b/lib/builtins/ffssi2.c
@@ -0,0 +1,29 @@
+/* ===-- ffssi2.c - Implement __ffssi2 -------------------------------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file implements __ffssi2 for the compiler_rt library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#include "int_lib.h"
+
+/* Returns: the index of the least significant 1-bit in a, or
+ * the value zero if a is zero. The least significant bit is index one.
+ */
+
+COMPILER_RT_ABI si_int
+__ffssi2(si_int a)
+{
+ if (a == 0)
+ {
+ return 0;
+ }
+ return __builtin_ctz(a) + 1;
+}
diff --git a/lib/builtins/int_lib.h b/lib/builtins/int_lib.h
index 39eee18d9149..8a202dde70f1 100644
--- a/lib/builtins/int_lib.h
+++ b/lib/builtins/int_lib.h
@@ -32,7 +32,11 @@
#if __ARM_EABI__
# define ARM_EABI_FNALIAS(aeabi_name, name) \
void __aeabi_##aeabi_name() __attribute__((alias("__" #name)));
-# define COMPILER_RT_ABI __attribute__((pcs("aapcs")))
+# ifdef COMPILER_RT_ARMHF_TARGET
+# define COMPILER_RT_ABI
+# else
+# define COMPILER_RT_ABI __attribute__((pcs("aapcs")))
+# endif
#else
# define ARM_EABI_FNALIAS(aeabi_name, name)
# define COMPILER_RT_ABI
diff --git a/lib/builtins/os_version_check.c b/lib/builtins/os_version_check.c
new file mode 100644
index 000000000000..74ade2f5b966
--- /dev/null
+++ b/lib/builtins/os_version_check.c
@@ -0,0 +1,178 @@
+/* ===-- os_version_check.c - OS version checking -------------------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file implements the function __isOSVersionAtLeast, used by
+ * Objective-C's @available
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifdef __APPLE__
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <dispatch/dispatch.h>
+#include <TargetConditionals.h>
+#include <dlfcn.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* These three variables hold the host's OS version. */
+static int32_t GlobalMajor, GlobalMinor, GlobalSubminor;
+static dispatch_once_t DispatchOnceCounter;
+
+/* Find and parse the SystemVersion.plist file. */
+static void parseSystemVersionPList(void *Unused) {
+ (void)Unused;
+ /* Load CoreFoundation dynamically */
+ const void *NullAllocator = dlsym(RTLD_DEFAULT, "kCFAllocatorNull");
+ if (!NullAllocator)
+ return;
+ const CFAllocatorRef kCFAllocatorNull =
+ *(const CFAllocatorRef *)NullAllocator;
+ typeof(CFDataCreateWithBytesNoCopy) *CFDataCreateWithBytesNoCopyFunc =
+ (typeof(CFDataCreateWithBytesNoCopy) *)dlsym(
+ RTLD_DEFAULT, "CFDataCreateWithBytesNoCopy");
+ if (!CFDataCreateWithBytesNoCopyFunc)
+ return;
+ typeof(CFPropertyListCreateWithData) *CFPropertyListCreateWithDataFunc =
+ (typeof(CFPropertyListCreateWithData) *)dlsym(
+ RTLD_DEFAULT, "CFPropertyListCreateWithData");
+ /* CFPropertyListCreateWithData was introduced only in macOS 10.6+, so it
+ * will be NULL on earlier OS versions. */
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+ typeof(CFPropertyListCreateFromXMLData) *CFPropertyListCreateFromXMLDataFunc =
+ (typeof(CFPropertyListCreateFromXMLData) *)dlsym(
+ RTLD_DEFAULT, "CFPropertyListCreateFromXMLData");
+#pragma clang diagnostic pop
+ /* CFPropertyListCreateFromXMLDataFunc is deprecated in macOS 10.10, so it
+ * might be NULL in future OS versions. */
+ if (!CFPropertyListCreateWithDataFunc && !CFPropertyListCreateFromXMLDataFunc)
+ return;
+ typeof(CFStringCreateWithCStringNoCopy) *CFStringCreateWithCStringNoCopyFunc =
+ (typeof(CFStringCreateWithCStringNoCopy) *)dlsym(
+ RTLD_DEFAULT, "CFStringCreateWithCStringNoCopy");
+ if (!CFStringCreateWithCStringNoCopyFunc)
+ return;
+ typeof(CFDictionaryGetValue) *CFDictionaryGetValueFunc =
+ (typeof(CFDictionaryGetValue) *)dlsym(RTLD_DEFAULT,
+ "CFDictionaryGetValue");
+ if (!CFDictionaryGetValueFunc)
+ return;
+ typeof(CFGetTypeID) *CFGetTypeIDFunc =
+ (typeof(CFGetTypeID) *)dlsym(RTLD_DEFAULT, "CFGetTypeID");
+ if (!CFGetTypeIDFunc)
+ return;
+ typeof(CFStringGetTypeID) *CFStringGetTypeIDFunc =
+ (typeof(CFStringGetTypeID) *)dlsym(RTLD_DEFAULT, "CFStringGetTypeID");
+ if (!CFStringGetTypeIDFunc)
+ return;
+ typeof(CFStringGetCString) *CFStringGetCStringFunc =
+ (typeof(CFStringGetCString) *)dlsym(RTLD_DEFAULT, "CFStringGetCString");
+ if (!CFStringGetCStringFunc)
+ return;
+ typeof(CFRelease) *CFReleaseFunc =
+ (typeof(CFRelease) *)dlsym(RTLD_DEFAULT, "CFRelease");
+ if (!CFReleaseFunc)
+ return;
+
+ char *PListPath = "/System/Library/CoreServices/SystemVersion.plist";
+
+#if TARGET_OS_SIMULATOR
+ char *PListPathPrefix = getenv("IPHONE_SIMULATOR_ROOT");
+ if (!PListPathPrefix)
+ return;
+ char FullPath[strlen(PListPathPrefix) + strlen(PListPath) + 1];
+ strcpy(FullPath, PListPathPrefix);
+ strcat(FullPath, PListPath);
+ PListPath = FullPath;
+#endif
+ FILE *PropertyList = fopen(PListPath, "r");
+ if (!PropertyList)
+ return;
+
+ /* Dynamically allocated stuff. */
+ CFDictionaryRef PListRef = NULL;
+ CFDataRef FileContentsRef = NULL;
+ UInt8 *PListBuf = NULL;
+
+ fseek(PropertyList, 0, SEEK_END);
+ long PListFileSize = ftell(PropertyList);
+ if (PListFileSize < 0)
+ goto Fail;
+ rewind(PropertyList);
+
+ PListBuf = malloc((size_t)PListFileSize);
+ if (!PListBuf)
+ goto Fail;
+
+ size_t NumRead = fread(PListBuf, 1, (size_t)PListFileSize, PropertyList);
+ if (NumRead != (size_t)PListFileSize)
+ goto Fail;
+
+ /* Get the file buffer into CF's format. We pass in a null allocator here *
+ * because we free PListBuf ourselves */
+ FileContentsRef = (*CFDataCreateWithBytesNoCopyFunc)(
+ NULL, PListBuf, (CFIndex)NumRead, kCFAllocatorNull);
+ if (!FileContentsRef)
+ goto Fail;
+
+ if (CFPropertyListCreateWithDataFunc)
+ PListRef = (*CFPropertyListCreateWithDataFunc)(
+ NULL, FileContentsRef, kCFPropertyListImmutable, NULL, NULL);
+ else
+ PListRef = (*CFPropertyListCreateFromXMLDataFunc)(
+ NULL, FileContentsRef, kCFPropertyListImmutable, NULL);
+ if (!PListRef)
+ goto Fail;
+
+ CFStringRef ProductVersion = (*CFStringCreateWithCStringNoCopyFunc)(
+ NULL, "ProductVersion", kCFStringEncodingASCII, kCFAllocatorNull);
+ if (!ProductVersion)
+ goto Fail;
+ CFTypeRef OpaqueValue = (*CFDictionaryGetValueFunc)(PListRef, ProductVersion);
+ (*CFReleaseFunc)(ProductVersion);
+ if (!OpaqueValue ||
+ (*CFGetTypeIDFunc)(OpaqueValue) != (*CFStringGetTypeIDFunc)())
+ goto Fail;
+
+ char VersionStr[32];
+ if (!(*CFStringGetCStringFunc)((CFStringRef)OpaqueValue, VersionStr,
+ sizeof(VersionStr), kCFStringEncodingUTF8))
+ goto Fail;
+ sscanf(VersionStr, "%d.%d.%d", &GlobalMajor, &GlobalMinor, &GlobalSubminor);
+
+Fail:
+ if (PListRef)
+ (*CFReleaseFunc)(PListRef);
+ if (FileContentsRef)
+ (*CFReleaseFunc)(FileContentsRef);
+ free(PListBuf);
+ fclose(PropertyList);
+}
+
+int32_t __isOSVersionAtLeast(int32_t Major, int32_t Minor, int32_t Subminor) {
+ /* Populate the global version variables, if they haven't already. */
+ dispatch_once_f(&DispatchOnceCounter, NULL, parseSystemVersionPList);
+
+ if (Major < GlobalMajor) return 1;
+ if (Major > GlobalMajor) return 0;
+ if (Minor < GlobalMinor) return 1;
+ if (Minor > GlobalMinor) return 0;
+ return Subminor <= GlobalSubminor;
+}
+
+#else
+
+/* Silence an empty translation unit warning. */
+typedef int unused;
+
+#endif
diff --git a/lib/builtins/x86_64/floatdidf.c b/lib/builtins/x86_64/floatdidf.c
index 388404e5e089..dead0ed42c65 100644
--- a/lib/builtins/x86_64/floatdidf.c
+++ b/lib/builtins/x86_64/floatdidf.c
@@ -4,7 +4,7 @@
/* double __floatdidf(di_int a); */
-#ifdef __x86_64__
+#if defined(__x86_64__) || defined(_M_X64)
#include "../int_lib.h"
diff --git a/lib/builtins/x86_64/floatdisf.c b/lib/builtins/x86_64/floatdisf.c
index 96c3728e92c3..99d5621c6327 100644
--- a/lib/builtins/x86_64/floatdisf.c
+++ b/lib/builtins/x86_64/floatdisf.c
@@ -2,7 +2,7 @@
* License. See LICENSE.TXT for details.
*/
-#ifdef __x86_64__
+#if defined(__x86_64__) || defined(_M_X64)
#include "../int_lib.h"
diff --git a/lib/cfi/cfi.cc b/lib/cfi/cfi.cc
index d463ca8daf50..f720230a70be 100644
--- a/lib/cfi/cfi.cc
+++ b/lib/cfi/cfi.cc
@@ -188,12 +188,14 @@ uptr find_cfi_check_in_dso(dl_phdr_info *info) {
}
}
if (!dynamic) return 0;
- uptr strtab = 0, symtab = 0;
+ uptr strtab = 0, symtab = 0, strsz = 0;
for (const ElfW(Dyn) *p = dynamic; p->d_tag != PT_NULL; ++p) {
if (p->d_tag == DT_SYMTAB)
symtab = p->d_un.d_ptr;
else if (p->d_tag == DT_STRTAB)
strtab = p->d_un.d_ptr;
+ else if (p->d_tag == DT_STRSZ)
+ strsz = p->d_un.d_ptr;
}
if (symtab > strtab) {
@@ -209,7 +211,8 @@ uptr find_cfi_check_in_dso(dl_phdr_info *info) {
if (phdr->p_type == PT_LOAD) {
uptr beg = info->dlpi_addr + phdr->p_vaddr;
uptr end = beg + phdr->p_memsz;
- if (strtab >= beg && strtab < end && symtab >= beg && symtab < end)
+ if (strtab >= beg && strtab + strsz < end && symtab >= beg &&
+ symtab < end)
break;
}
}
@@ -222,9 +225,14 @@ uptr find_cfi_check_in_dso(dl_phdr_info *info) {
for (const ElfW(Sym) *p = (const ElfW(Sym) *)symtab; (ElfW(Addr))p < strtab;
++p) {
+ // There is no reliable way to find the end of the symbol table. In
+ // lld-produces files, there are other sections between symtab and strtab.
+ // Stop looking when the symbol name is not inside strtab.
+ if (p->st_name >= strsz) break;
char *name = (char*)(strtab + p->st_name);
if (strcmp(name, "__cfi_check") == 0) {
- assert(p->st_info == ELF32_ST_INFO(STB_GLOBAL, STT_FUNC));
+ assert(p->st_info == ELF32_ST_INFO(STB_GLOBAL, STT_FUNC) ||
+ p->st_info == ELF32_ST_INFO(STB_WEAK, STT_FUNC));
uptr addr = info->dlpi_addr + p->st_value;
return addr;
}
diff --git a/lib/esan/esan_interceptors.cpp b/lib/esan/esan_interceptors.cpp
index 9ae5482a3cad..9740f4dae8fa 100644
--- a/lib/esan/esan_interceptors.cpp
+++ b/lib/esan/esan_interceptors.cpp
@@ -304,20 +304,6 @@ INTERCEPTOR(int, unlink, char *path) {
return REAL(unlink)(path);
}
-INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, f);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size * nmemb);
- return REAL(fread)(ptr, size, nmemb, f);
-}
-
-INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, f);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, p, size * nmemb);
- return REAL(fwrite)(p, size, nmemb, f);
-}
-
INTERCEPTOR(int, puts, const char *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, puts, s);
diff --git a/lib/interception/interception_win.cc b/lib/interception/interception_win.cc
index 91abecf6de5f..e4f3d358f40c 100644
--- a/lib/interception/interception_win.cc
+++ b/lib/interception/interception_win.cc
@@ -878,6 +878,8 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
IMAGE_DATA_DIRECTORY *export_directory =
&headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
+ if (export_directory->Size == 0)
+ return 0;
RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,
export_directory->VirtualAddress);
RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);
diff --git a/lib/interception/tests/interception_win_test.cc b/lib/interception/tests/interception_win_test.cc
index 684ee0303559..a705768d6d5d 100644
--- a/lib/interception/tests/interception_win_test.cc
+++ b/lib/interception/tests/interception_win_test.cc
@@ -613,6 +613,13 @@ TEST(Interception, PatchableFunctionPadding) {
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override, prefix));
}
+TEST(Interception, EmptyExportTable) {
+ // We try to get a pointer to a function from an executable that doesn't
+ // export any symbol (empty export table).
+ uptr FunPtr = InternalGetProcAddress((void *)GetModuleHandleA(0), "example");
+ EXPECT_EQ(0U, FunPtr);
+}
+
} // namespace __interception
#endif // SANITIZER_WINDOWS
diff --git a/lib/lsan/CMakeLists.txt b/lib/lsan/CMakeLists.txt
index b782f2421dc2..55c825f5c6ea 100644
--- a/lib/lsan/CMakeLists.txt
+++ b/lib/lsan/CMakeLists.txt
@@ -5,12 +5,15 @@ append_rtti_flag(OFF LSAN_CFLAGS)
set(LSAN_COMMON_SOURCES
lsan_common.cc
- lsan_common_linux.cc)
+ lsan_common_linux.cc
+ lsan_common_mac.cc)
set(LSAN_SOURCES
lsan.cc
lsan_allocator.cc
+ lsan_linux.cc
lsan_interceptors.cc
+ lsan_malloc_mac.cc
lsan_preinit.cc
lsan_thread.cc)
@@ -24,16 +27,34 @@ add_compiler_rt_object_libraries(RTLSanCommon
if(COMPILER_RT_HAS_LSAN)
add_compiler_rt_component(lsan)
- foreach(arch ${LSAN_SUPPORTED_ARCH})
+ if(APPLE)
+ add_weak_symbols("lsan" WEAK_SYMBOL_LINK_FLAGS)
+ add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS)
+
add_compiler_rt_runtime(clang_rt.lsan
- STATIC
- ARCHS ${arch}
+ SHARED
+ OS ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${LSAN_SUPPORTED_ARCH}
SOURCES ${LSAN_SOURCES}
- $<TARGET_OBJECTS:RTInterception.${arch}>
- $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
- $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
- $<TARGET_OBJECTS:RTLSanCommon.${arch}>
+ OBJECT_LIBS RTLSanCommon
+ RTInterception
+ RTSanitizerCommon
+ RTSanitizerCommonLibc
CFLAGS ${LSAN_CFLAGS}
+ LINK_FLAGS ${WEAK_SYMBOL_LINK_FLAGS}
PARENT_TARGET lsan)
- endforeach()
+ else()
+ foreach(arch ${LSAN_SUPPORTED_ARCH})
+ add_compiler_rt_runtime(clang_rt.lsan
+ STATIC
+ ARCHS ${arch}
+ SOURCES ${LSAN_SOURCES}
+ $<TARGET_OBJECTS:RTInterception.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
+ $<TARGET_OBJECTS:RTLSanCommon.${arch}>
+ CFLAGS ${LSAN_CFLAGS}
+ PARENT_TARGET lsan)
+ endforeach()
+ endif()
endif()
diff --git a/lib/lsan/lsan.cc b/lib/lsan/lsan.cc
index c7c34299147d..6c4767d61252 100644
--- a/lib/lsan/lsan.cc
+++ b/lib/lsan/lsan.cc
@@ -76,6 +76,7 @@ extern "C" void __lsan_init() {
InitializeFlags();
InitCommonLsan();
InitializeAllocator();
+ ReplaceSystemMalloc();
InitTlsSize();
InitializeInterceptors();
InitializeThreadRegistry();
diff --git a/lib/lsan/lsan.h b/lib/lsan/lsan.h
index ec5eb93dc155..1061d2fcfde7 100644
--- a/lib/lsan/lsan.h
+++ b/lib/lsan/lsan.h
@@ -41,6 +41,13 @@
namespace __lsan {
void InitializeInterceptors();
+void ReplaceSystemMalloc();
+
+#define ENSURE_LSAN_INITED do { \
+ CHECK(!lsan_init_is_running); \
+ if (!lsan_inited) \
+ __lsan_init(); \
+} while (0)
} // namespace __lsan
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index c805a39e1cc5..011979eee396 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -24,44 +24,18 @@
extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
-
-struct ChunkMetadata {
- u8 allocated : 8; // Must be first.
- ChunkTag tag : 2;
- uptr requested_size : 54;
- u32 stack_trace_id;
-};
-
-#if defined(__mips64) || defined(__aarch64__)
+#if defined(__i386__) || defined(__arm__)
+static const uptr kMaxAllowedMallocSize = 1UL << 30;
+#elif defined(__mips64) || defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 4UL << 30;
-static const uptr kRegionSizeLog = 20;
-static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
-typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
-typedef CompactSizeClassMap SizeClassMap;
-typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
- sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
- PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
-
-struct AP64 { // Allocator64 parameters. Deliberately using a short name.
- static const uptr kSpaceBeg = 0x600000000000ULL;
- static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
- static const uptr kMetadataSize = sizeof(ChunkMetadata);
- typedef DefaultSizeClassMap SizeClassMap;
- typedef NoOpMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
-};
-
-typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
-static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
allocator.InitLinkerInitialized(
@@ -70,7 +44,7 @@ void InitializeAllocator() {
}
void AllocatorThreadFinish() {
- allocator.SwallowCache(&cache);
+ allocator.SwallowCache(GetAllocatorCache());
}
static ChunkMetadata *Metadata(const void *p) {
@@ -102,7 +76,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
return nullptr;
}
- void *p = allocator.Allocate(&cache, size, alignment, false);
+ void *p = allocator.Allocate(GetAllocatorCache(), size, alignment, false);
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
@@ -116,7 +90,7 @@ void Deallocate(void *p) {
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RunFreeHooks(p);
RegisterDeallocation(p);
- allocator.Deallocate(&cache, p);
+ allocator.Deallocate(GetAllocatorCache(), p);
}
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
@@ -124,17 +98,17 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
- allocator.Deallocate(&cache, p);
+ allocator.Deallocate(GetAllocatorCache(), p);
return nullptr;
}
- p = allocator.Reallocate(&cache, p, new_size, alignment);
+ p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
return p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
- *begin = (uptr)&cache;
- *end = *begin + sizeof(cache);
+ *begin = (uptr)GetAllocatorCache();
+ *end = *begin + sizeof(AllocatorCache);
}
uptr GetMallocUsableSize(const void *p) {
@@ -143,6 +117,37 @@ uptr GetMallocUsableSize(const void *p) {
return m->requested_size;
}
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
+ return Allocate(stack, size, alignment, kAlwaysClearMemory);
+}
+
+void *lsan_malloc(uptr size, const StackTrace &stack) {
+ return Allocate(stack, size, 1, kAlwaysClearMemory);
+}
+
+void lsan_free(void *p) {
+ Deallocate(p);
+}
+
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
+ return Reallocate(stack, p, size, 1);
+}
+
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ size *= nmemb;
+ return Allocate(stack, size, 1, true);
+}
+
+void *lsan_valloc(uptr size, const StackTrace &stack) {
+ if (size == 0)
+ size = GetPageSizeCached();
+ return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
+}
+
+uptr lsan_mz_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
///// Interface to the common LSan module. /////
void LockAllocator() {
diff --git a/lib/lsan/lsan_allocator.h b/lib/lsan/lsan_allocator.h
index f564601193bd..e5def17d4ee9 100644
--- a/lib/lsan/lsan_allocator.h
+++ b/lib/lsan/lsan_allocator.h
@@ -15,8 +15,10 @@
#ifndef LSAN_ALLOCATOR_H
#define LSAN_ALLOCATOR_H
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "lsan_common.h"
namespace __lsan {
@@ -34,6 +36,53 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end);
void AllocatorThreadFinish();
void InitializeAllocator();
+const bool kAlwaysClearMemory = true;
+
+struct ChunkMetadata {
+ u8 allocated : 8; // Must be first.
+ ChunkTag tag : 2;
+#if SANITIZER_WORDSIZE == 64
+ uptr requested_size : 54;
+#else
+ uptr requested_size : 32;
+ uptr padding : 22;
+#endif
+ u32 stack_trace_id;
+};
+
+#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
+ defined(__arm__)
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+typedef CompactSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
+ sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
+ PrimaryAllocator;
+#elif defined(__x86_64__)
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x600000000000ULL;
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#endif
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+
+AllocatorCache *GetAllocatorCache();
+
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_malloc(uptr size, const StackTrace &stack);
+void lsan_free(void *p);
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
+void *lsan_valloc(uptr size, const StackTrace &stack);
+uptr lsan_mz_size(const void *p);
+
} // namespace __lsan
#endif // LSAN_ALLOCATOR_H
diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc
index f0554526b76f..6cc73749812b 100644
--- a/lib/lsan/lsan_common.cc
+++ b/lib/lsan/lsan_common.cc
@@ -32,20 +32,15 @@ namespace __lsan {
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
-__attribute__((tls_model("initial-exec")))
-THREADLOCAL int disable_counter;
-bool DisabledInThisThread() { return disable_counter > 0; }
-void DisableInThisThread() { disable_counter++; }
-void EnableInThisThread() {
- if (!disable_counter && common_flags()->detect_leaks) {
+Flags lsan_flags;
+
+void DisableCounterUnderflow() {
+ if (common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
Die();
}
- disable_counter--;
}
-Flags lsan_flags;
-
void Flags::SetDefaults() {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "lsan_flags.inc"
@@ -180,6 +175,23 @@ void ScanRangeForPointers(uptr begin, uptr end,
}
}
+// Scans a global range for pointers
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
+ uptr allocator_begin = 0, allocator_end = 0;
+ GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
+ if (begin <= allocator_begin && allocator_begin < end) {
+ CHECK_LE(allocator_begin, allocator_end);
+ CHECK_LE(allocator_end, end);
+ if (begin < allocator_begin)
+ ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
+ kReachable);
+ if (allocator_end < end)
+ ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
+ }
+}
+
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
@@ -206,11 +218,13 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
continue;
}
uptr sp;
- bool have_registers =
- (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
- if (!have_registers) {
- Report("Unable to get registers from thread %d.\n");
- // If unable to get SP, consider the entire stack to be reachable.
+ PtraceRegistersStatus have_registers =
+ suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
+ if (have_registers != REGISTERS_AVAILABLE) {
+ Report("Unable to get registers from thread %d.\n", os_id);
+ // If unable to get SP, consider the entire stack to be reachable unless
+ // GetRegistersAndSP failed with ESRCH.
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
sp = stack_begin;
}
@@ -258,7 +272,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
if (tls_end > cache_end)
ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
}
- if (dtls) {
+ if (dtls && !DTLSInDestruction(dtls)) {
for (uptr j = 0; j < dtls->dtv_size; ++j) {
uptr dtls_beg = dtls->dtv[j].beg;
uptr dtls_end = dtls_beg + dtls->dtv[j].size;
@@ -268,6 +282,10 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
kReachable);
}
}
+ } else {
+ // We are handling a thread with DTLS under destruction. Log about
+ // this and continue.
+ LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
}
}
}
diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h
index 890ce6562c82..919be0ec2662 100644
--- a/lib/lsan/lsan_common.h
+++ b/lib/lsan/lsan_common.h
@@ -22,8 +22,23 @@
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID) && (SANITIZER_WORDSIZE == 64) \
- && (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
+// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
+// supported for Linux only. Also, LSan doesn't like 32 bit architectures
+// because of "small" (4 bytes) pointer size that leads to high false negative
+// ratio on large leaks. But we still want to have it for some 32 bit arches
+// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
+// To enable LeakSanitizer on new architecture, one need to implement
+// internal_clone function as well as (probably) adjust TLS machinery for
+// new architecture inside sanitizer library.
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
+ (SANITIZER_WORDSIZE == 64) && \
+ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__i386__) && \
+ (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__arm__) && \
+ SANITIZER_LINUX && !SANITIZER_ANDROID
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
@@ -44,6 +59,8 @@ enum ChunkTag {
kIgnored = 3
};
+const u32 kInvalidTid = (u32) -1;
+
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
@@ -107,6 +124,7 @@ void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag);
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
enum IgnoreObjectResult {
kIgnoreObjectSuccess,
@@ -117,6 +135,7 @@ enum IgnoreObjectResult {
// Functions called from the parent tool.
void InitCommonLsan();
void DoLeakCheck();
+void DisableCounterUnderflow();
bool DisabledInThisThread();
// Used to implement __lsan::ScopedDisabler.
@@ -129,13 +148,36 @@ struct ScopedInterceptorDisabler {
~ScopedInterceptorDisabler() { EnableInThisThread(); }
};
+// According to Itanium C++ ABI array cookie is a one word containing
+// size of allocated array.
+static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
+// According to ARM C++ ABI array cookie consists of two words:
+// struct array_cookie {
+// std::size_t element_size; // element_size != 0
+// std::size_t element_count;
+// };
+static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
+}
+
// Special case for "new T[0]" where T is a type with DTOR.
-// new T[0] will allocate one word for the array size (0) and store a pointer
-// to the end of allocated chunk.
+// new T[0] will allocate a cookie (one or two words) for the array size (0)
+// and store a pointer to the end of allocated chunk. The actual cookie layout
+// varies between platforms according to their C++ ABI implementation.
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
uptr addr) {
- return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
- *reinterpret_cast<uptr *>(chunk_beg) == 0;
+#if defined(__arm__)
+ return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
+#else
+ return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
+#endif
}
// The following must be implemented in the parent tool.
diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc
index f6154d8b97d1..0d1e998a5cfe 100644
--- a/lib/lsan/lsan_common_linux.cc
+++ b/lib/lsan/lsan_common_linux.cc
@@ -34,6 +34,17 @@ static bool IsLinker(const char* full_name) {
return LibraryNameIs(full_name, kLinkerName);
}
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ disable_counter--;
+}
+
void InitializePlatformSpecificModules() {
ListOfModules modules;
modules.init();
@@ -67,20 +78,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
continue;
uptr begin = info->dlpi_addr + phdr->p_vaddr;
uptr end = begin + phdr->p_memsz;
- uptr allocator_begin = 0, allocator_end = 0;
- GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
- if (begin <= allocator_begin && allocator_begin < end) {
- CHECK_LE(allocator_begin, allocator_end);
- CHECK_LE(allocator_end, end);
- if (begin < allocator_begin)
- ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
- kReachable);
- if (allocator_end < end)
- ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
- kReachable);
- } else {
- ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
- }
+ ScanGlobalRange(begin, end, frontier);
}
return 0;
}
diff --git a/lib/lsan/lsan_common_mac.cc b/lib/lsan/lsan_common_mac.cc
new file mode 100644
index 000000000000..022e73937895
--- /dev/null
+++ b/lib/lsan/lsan_common_mac.cc
@@ -0,0 +1,126 @@
+//=-- lsan_common_mac.cc --------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Darwin-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "lsan_allocator.h"
+
+#include <pthread.h>
+
+namespace __lsan {
+
+typedef struct {
+ int disable_counter;
+ u32 current_thread_id;
+ AllocatorCache cache;
+} thread_local_data_t;
+
+static pthread_key_t key;
+static pthread_once_t key_once = PTHREAD_ONCE_INIT;
+
+// The main thread destructor requires the current thread id,
+// so we can't destroy it until it's been used and reset to invalid tid
+void restore_tid_data(void *ptr) {
+ thread_local_data_t *data = (thread_local_data_t *)ptr;
+ if (data->current_thread_id != kInvalidTid)
+ pthread_setspecific(key, data);
+}
+
+static void make_tls_key() {
+ CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
+}
+
+static thread_local_data_t *get_tls_val(bool alloc) {
+ pthread_once(&key_once, make_tls_key);
+
+ thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
+ if (ptr == NULL && alloc) {
+ ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
+ ptr->disable_counter = 0;
+ ptr->current_thread_id = kInvalidTid;
+ ptr->cache = AllocatorCache();
+ pthread_setspecific(key, ptr);
+ }
+
+ return ptr;
+}
+
+bool DisabledInThisThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->disable_counter > 0 : false;
+}
+
+void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
+
+void EnableInThisThread() {
+ int *disable_counter = &get_tls_val(true)->disable_counter;
+ if (*disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ --*disable_counter;
+}
+
+u32 GetCurrentThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ CHECK(data);
+ return data->current_thread_id;
+}
+
+void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
+
+AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
+
+// Required on Linux for initialization of TLS behavior, but should not be
+// required on Darwin.
+void InitializePlatformSpecificModules() {
+ if (flags()->use_tls) {
+ Report("use_tls=1 is not supported on Darwin.\n");
+ Die();
+ }
+}
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128);
+ memory_mapping.DumpListOfModules(&modules);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ // Even when global scanning is disabled, we still need to scan
+ // system libraries for stashed pointers
+ if (!flags()->use_globals && modules[i].instrumented()) continue;
+
+ for (const __sanitizer::LoadedModule::AddressRange &range :
+ modules[i].ranges()) {
+ if (range.executable) continue;
+
+ ScanGlobalRange(range.beg, range.end, frontier);
+ }
+ }
+}
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {
+ CHECK(0 && "unimplemented");
+}
+
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+ CHECK(0 && "unimplemented");
+}
+
+} // namespace __lsan
+
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC
diff --git a/lib/lsan/lsan_flags.inc b/lib/lsan/lsan_flags.inc
index e390e2ae5a1b..8135bdcff01a 100644
--- a/lib/lsan/lsan_flags.inc
+++ b/lib/lsan/lsan_flags.inc
@@ -30,7 +30,7 @@ LSAN_FLAG(bool, use_globals, true,
"Root set: include global variables (.data and .bss)")
LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
-LSAN_FLAG(bool, use_tls, true,
+LSAN_FLAG(bool, use_tls, !SANITIZER_MAC,
"Root set: include TLS and thread-specific storage")
LSAN_FLAG(bool, use_root_regions, true,
"Root set: include regions added via __lsan_register_root_region().")
diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc
index 12190175949f..fe1f49bcdeba 100644
--- a/lib/lsan/lsan_interceptors.cc
+++ b/lib/lsan/lsan_interceptors.cc
@@ -21,12 +21,15 @@
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
+#include <stddef.h>
+
using namespace __lsan;
extern "C" {
@@ -37,29 +40,22 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
-#define ENSURE_LSAN_INITED do { \
- CHECK(!lsan_init_is_running); \
- if (!lsan_inited) \
- __lsan_init(); \
-} while (0)
-
///// Malloc/free interceptors. /////
-const bool kAlwaysClearMemory = true;
-
namespace std {
struct nothrow_t;
}
+#if !SANITIZER_MAC
INTERCEPTOR(void*, malloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, 1, kAlwaysClearMemory);
+ return lsan_malloc(size, stack);
}
INTERCEPTOR(void, free, void *p) {
ENSURE_LSAN_INITED;
- Deallocate(p);
+ lsan_free(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
@@ -77,28 +73,42 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr;
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- size *= nmemb;
- return Allocate(stack, size, 1, true);
+ return lsan_calloc(nmemb, size, stack);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Reallocate(stack, q, size, 1);
+ return lsan_realloc(q, size, stack);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ *memptr = lsan_memalign(alignment, size, stack);
+ // FIXME: Return ENOMEM if user requested more than max alloc size.
+ return 0;
+}
+
+INTERCEPTOR(void*, valloc, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_valloc(size, stack);
}
+#endif
#if SANITIZER_INTERCEPT_MEMALIGN
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, alignment, kAlwaysClearMemory);
+ return lsan_memalign(alignment, size, stack);
}
#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- void *res = Allocate(stack, size, alignment, kAlwaysClearMemory);
+ void *res = lsan_memalign(alignment, size, stack);
DTLS_on_libc_memalign(res, size);
return res;
}
@@ -108,32 +118,27 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
#endif // SANITIZER_INTERCEPT_MEMALIGN
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, alignment, kAlwaysClearMemory);
-}
-
-INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
- ENSURE_LSAN_INITED;
- GET_STACK_TRACE_MALLOC;
- *memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
- // FIXME: Return ENOMEM if user requested more than max alloc size.
- return 0;
-}
-
-INTERCEPTOR(void*, valloc, uptr size) {
- ENSURE_LSAN_INITED;
- GET_STACK_TRACE_MALLOC;
- if (size == 0)
- size = GetPageSizeCached();
- return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
+ return lsan_memalign(alignment, size, stack);
}
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
+#endif
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
ENSURE_LSAN_INITED;
return GetMallocUsableSize(ptr);
}
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
+ INTERCEPT_FUNCTION(malloc_usable_size)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
+#endif
#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
struct fake_mallinfo {
@@ -186,13 +191,13 @@ INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
return Allocate(stack, size, 1, kAlwaysClearMemory);
INTERCEPTOR_ATTRIBUTE
-void *operator new(uptr size) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](uptr size) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
-void *operator new(uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
#define OPERATOR_DELETE_BODY \
ENSURE_LSAN_INITED; \
@@ -277,7 +282,8 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
}
if (res == 0) {
- int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
+ int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
+ IsStateDetached(detached));
CHECK_NE(tid, 0);
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
@@ -307,11 +313,11 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(realloc);
LSAN_MAYBE_INTERCEPT_MEMALIGN;
LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
- INTERCEPT_FUNCTION(aligned_alloc);
+ LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
INTERCEPT_FUNCTION(posix_memalign);
INTERCEPT_FUNCTION(valloc);
LSAN_MAYBE_INTERCEPT_PVALLOC;
- INTERCEPT_FUNCTION(malloc_usable_size);
+ LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
LSAN_MAYBE_INTERCEPT_MALLINFO;
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
diff --git a/lib/lsan/lsan_linux.cc b/lib/lsan/lsan_linux.cc
new file mode 100644
index 000000000000..c9749c745655
--- /dev/null
+++ b/lib/lsan/lsan_linux.cc
@@ -0,0 +1,33 @@
+//=-- lsan_linux.cc -------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer. Linux-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_LINUX
+
+#include "lsan_allocator.h"
+
+namespace __lsan {
+
+static THREADLOCAL u32 current_thread_tid = kInvalidTid;
+u32 GetCurrentThread() { return current_thread_tid; }
+void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
+
+static THREADLOCAL AllocatorCache allocator_cache;
+AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
+
+void ReplaceSystemMalloc() {}
+
+} // namespace __lsan
+
+#endif // SANITIZER_LINUX
diff --git a/lib/lsan/lsan_malloc_mac.cc b/lib/lsan/lsan_malloc_mac.cc
new file mode 100644
index 000000000000..9c1dacc055bd
--- /dev/null
+++ b/lib/lsan/lsan_malloc_mac.cc
@@ -0,0 +1,55 @@
+//===-- lsan_malloc_mac.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer (LSan), a memory leak detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+using namespace __lsan;
+#define COMMON_MALLOC_ZONE_NAME "lsan"
+#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
+#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_memalign(alignment, size, stack)
+#define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_malloc(size, stack)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_realloc(ptr, size, stack)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_calloc(count, size, stack)
+#define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_valloc(size, stack)
+#define COMMON_MALLOC_FREE(ptr) \
+ lsan_free(ptr)
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = lsan_mz_size(ptr)
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __lsan
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif // SANITIZER_MAC
diff --git a/lib/lsan/lsan_thread.cc b/lib/lsan/lsan_thread.cc
index 5dff4f748106..09eeb9c24982 100644
--- a/lib/lsan/lsan_thread.cc
+++ b/lib/lsan/lsan_thread.cc
@@ -19,13 +19,11 @@
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan_allocator.h"
+#include "lsan_common.h"
namespace __lsan {
-const u32 kInvalidTid = (u32) -1;
-
static ThreadRegistry *thread_registry;
-static THREADLOCAL u32 current_thread_tid = kInvalidTid;
static ThreadContextBase *CreateThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
@@ -41,14 +39,6 @@ void InitializeThreadRegistry() {
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
-u32 GetCurrentThread() {
- return current_thread_tid;
-}
-
-void SetCurrentThread(u32 tid) {
- current_thread_tid = tid;
-}
-
ThreadContext::ThreadContext(int tid)
: ThreadContextBase(tid),
stack_begin_(0),
@@ -97,11 +87,12 @@ void ThreadStart(u32 tid, uptr os_id) {
args.tls_end = args.tls_begin + tls_size;
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
args.dtls = DTLS_Get();
- thread_registry->StartThread(tid, os_id, &args);
+ thread_registry->StartThread(tid, os_id, /*workerthread*/ false, &args);
}
void ThreadFinish() {
thread_registry->FinishThread(GetCurrentThread());
+ SetCurrentThread(kInvalidTid);
}
ThreadContext *CurrentThreadContext() {
diff --git a/lib/lsan/weak_symbols.txt b/lib/lsan/weak_symbols.txt
new file mode 100644
index 000000000000..da4f994da865
--- /dev/null
+++ b/lib/lsan/weak_symbols.txt
@@ -0,0 +1,2 @@
+___lsan_default_suppressions
+___lsan_is_turned_off
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index 6447bb1b270e..15543bd912d6 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -123,14 +123,6 @@ static void *AllocateFromLocalPool(uptr size_in_bytes) {
#define CHECK_UNPOISONED_STRING(x, n) \
CHECK_UNPOISONED_STRING_OF_LEN((x), internal_strlen(x), (n))
-INTERCEPTOR(SIZE_T, fread, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) {
- ENSURE_MSAN_INITED();
- SIZE_T res = REAL(fread)(ptr, size, nmemb, file);
- if (res > 0)
- __msan_unpoison(ptr, res *size);
- return res;
-}
-
#if !SANITIZER_FREEBSD
INTERCEPTOR(SIZE_T, fread_unlocked, void *ptr, SIZE_T size, SIZE_T nmemb,
void *file) {
@@ -580,6 +572,13 @@ INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
return res;
}
+INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
+ ENSURE_MSAN_INITED();
+ SIZE_T res = REAL(wcsnlen)(s, n);
+ CHECK_UNPOISONED(s, sizeof(wchar_t) * Min(res + 1, n));
+ return res;
+}
+
// wchar_t *wcschr(const wchar_t *wcs, wchar_t wc);
INTERCEPTOR(wchar_t *, wcschr, void *s, wchar_t wc, void *ps) {
ENSURE_MSAN_INITED();
@@ -597,6 +596,18 @@ INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) {
return res;
}
+INTERCEPTOR(wchar_t *, wcsncpy, wchar_t *dest, const wchar_t *src,
+ SIZE_T n) { // NOLINT
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ SIZE_T copy_size = REAL(wcsnlen)(src, n);
+ if (copy_size < n) copy_size++; // trailing \0
+ wchar_t *res = REAL(wcsncpy)(dest, src, n); // NOLINT
+ CopyShadowAndOrigin(dest, src, copy_size * sizeof(wchar_t), &stack);
+ __msan_unpoison(dest + copy_size, (n - copy_size) * sizeof(wchar_t));
+ return res;
+}
+
// wchar_t *wmemcpy(wchar_t *dest, const wchar_t *src, SIZE_T n);
INTERCEPTOR(wchar_t *, wmemcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) {
ENSURE_MSAN_INITED();
@@ -1565,8 +1576,10 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(mbtowc);
INTERCEPT_FUNCTION(mbrtowc);
INTERCEPT_FUNCTION(wcslen);
+ INTERCEPT_FUNCTION(wcsnlen);
INTERCEPT_FUNCTION(wcschr);
INTERCEPT_FUNCTION(wcscpy);
+ INTERCEPT_FUNCTION(wcsncpy);
INTERCEPT_FUNCTION(wcscmp);
INTERCEPT_FUNCTION(getenv);
INTERCEPT_FUNCTION(setenv);
diff --git a/lib/msan/tests/CMakeLists.txt b/lib/msan/tests/CMakeLists.txt
index 8e911dc10bed..65fbc732df25 100644
--- a/lib/msan/tests/CMakeLists.txt
+++ b/lib/msan/tests/CMakeLists.txt
@@ -35,6 +35,7 @@ set(MSAN_UNITTEST_COMMON_CFLAGS
-Wno-zero-length-array
-Wno-uninitialized
-Werror=sign-compare
+ -Wno-gnu-zero-variadic-macro-arguments
)
set(MSAN_UNITTEST_INSTRUMENTED_CFLAGS
${MSAN_UNITTEST_COMMON_CFLAGS}
diff --git a/lib/msan/tests/msan_test.cc b/lib/msan/tests/msan_test.cc
index 9ec1e28374db..dd81c4d798f6 100644
--- a/lib/msan/tests/msan_test.cc
+++ b/lib/msan/tests/msan_test.cc
@@ -175,10 +175,16 @@ void ExpectPoisonedWithOrigin(const T& t, unsigned origin) {
}
#define EXPECT_NOT_POISONED(x) EXPECT_EQ(true, TestForNotPoisoned((x)))
+#define EXPECT_NOT_POISONED2(data, size) \
+ EXPECT_EQ(true, TestForNotPoisoned((data), (size)))
+
+bool TestForNotPoisoned(const void *data, size_t size) {
+ return __msan_test_shadow(data, size) == -1;
+}
template<typename T>
bool TestForNotPoisoned(const T& t) {
- return __msan_test_shadow((void*)&t, sizeof(t)) == -1;
+ return TestForNotPoisoned((void *)&t, sizeof(t));
}
static U8 poisoned_array[100];
@@ -879,92 +885,203 @@ TEST(MemorySanitizer, bind_getsockname) {
close(sock);
}
-TEST(MemorySanitizer, accept) {
- int listen_socket = socket(AF_INET, SOCK_STREAM, 0);
+class SocketAddr {
+ public:
+ virtual ~SocketAddr() = default;
+ virtual struct sockaddr *ptr() = 0;
+ virtual size_t size() const = 0;
+
+ template <class... Args>
+ static std::unique_ptr<SocketAddr> Create(int family, Args... args);
+};
+
+class SocketAddr4 : public SocketAddr {
+ public:
+ SocketAddr4() { EXPECT_POISONED(sai_); }
+ explicit SocketAddr4(uint16_t port) {
+ memset(&sai_, 0, sizeof(sai_));
+ sai_.sin_family = AF_INET;
+ sai_.sin_port = port;
+ sai_.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ }
+
+ sockaddr *ptr() override { return reinterpret_cast<sockaddr *>(&sai_); }
+
+ size_t size() const override { return sizeof(sai_); }
+
+ private:
+ sockaddr_in sai_;
+};
+
+class SocketAddr6 : public SocketAddr {
+ public:
+ SocketAddr6() { EXPECT_POISONED(sai_); }
+ explicit SocketAddr6(uint16_t port) {
+ memset(&sai_, 0, sizeof(sai_));
+ sai_.sin6_family = AF_INET6;
+ sai_.sin6_port = port;
+ sai_.sin6_addr = in6addr_loopback;
+ }
+
+ sockaddr *ptr() override { return reinterpret_cast<sockaddr *>(&sai_); }
+
+ size_t size() const override { return sizeof(sai_); }
+
+ private:
+ sockaddr_in6 sai_;
+};
+
+template <class... Args>
+std::unique_ptr<SocketAddr> SocketAddr::Create(int family, Args... args) {
+ if (family == AF_INET)
+ return std::unique_ptr<SocketAddr>(new SocketAddr4(args...));
+ return std::unique_ptr<SocketAddr>(new SocketAddr6(args...));
+}
+
+class MemorySanitizerIpTest : public ::testing::TestWithParam<int> {
+ public:
+ void SetUp() override {
+ ASSERT_TRUE(GetParam() == AF_INET || GetParam() == AF_INET6);
+ }
+
+ template <class... Args>
+ std::unique_ptr<SocketAddr> CreateSockAddr(Args... args) const {
+ return SocketAddr::Create(GetParam(), args...);
+ }
+
+ int CreateSocket(int socket_type) const {
+ return socket(GetParam(), socket_type, 0);
+ }
+};
+
+std::vector<int> GetAvailableIpSocketFamilies() {
+ std::vector<int> result;
+
+ for (int i : {AF_INET, AF_INET6}) {
+ int s = socket(i, SOCK_STREAM, 0);
+ if (s > 0) {
+ auto sai = SocketAddr::Create(i, 0);
+ if (bind(s, sai->ptr(), sai->size()) == 0) result.push_back(i);
+ close(s);
+ }
+ }
+
+ return result;
+}
+
+INSTANTIATE_TEST_CASE_P(IpTests, MemorySanitizerIpTest,
+ ::testing::ValuesIn(GetAvailableIpSocketFamilies()));
+
+TEST_P(MemorySanitizerIpTest, accept) {
+ int listen_socket = CreateSocket(SOCK_STREAM);
ASSERT_LT(0, listen_socket);
- struct sockaddr_in sai;
- memset(&sai, 0, sizeof(sai));
- sai.sin_family = AF_INET;
- sai.sin_port = 0;
- sai.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- int res = bind(listen_socket, (struct sockaddr *)&sai, sizeof(sai));
+ auto sai = CreateSockAddr(0);
+ int res = bind(listen_socket, sai->ptr(), sai->size());
ASSERT_EQ(0, res);
res = listen(listen_socket, 1);
ASSERT_EQ(0, res);
- socklen_t sz = sizeof(sai);
- res = getsockname(listen_socket, (struct sockaddr *)&sai, &sz);
+ socklen_t sz = sai->size();
+ res = getsockname(listen_socket, sai->ptr(), &sz);
ASSERT_EQ(0, res);
- ASSERT_EQ(sizeof(sai), sz);
+ ASSERT_EQ(sai->size(), sz);
- int connect_socket = socket(AF_INET, SOCK_STREAM, 0);
+ int connect_socket = CreateSocket(SOCK_STREAM);
ASSERT_LT(0, connect_socket);
res = fcntl(connect_socket, F_SETFL, O_NONBLOCK);
ASSERT_EQ(0, res);
- res = connect(connect_socket, (struct sockaddr *)&sai, sizeof(sai));
+ res = connect(connect_socket, sai->ptr(), sai->size());
// On FreeBSD this connection completes immediately.
if (res != 0) {
ASSERT_EQ(-1, res);
ASSERT_EQ(EINPROGRESS, errno);
}
- __msan_poison(&sai, sizeof(sai));
- int new_sock = accept(listen_socket, (struct sockaddr *)&sai, &sz);
+ __msan_poison(sai->ptr(), sai->size());
+ int new_sock = accept(listen_socket, sai->ptr(), &sz);
ASSERT_LT(0, new_sock);
- ASSERT_EQ(sizeof(sai), sz);
- EXPECT_NOT_POISONED(sai);
+ ASSERT_EQ(sai->size(), sz);
+ EXPECT_NOT_POISONED2(sai->ptr(), sai->size());
- __msan_poison(&sai, sizeof(sai));
- res = getpeername(new_sock, (struct sockaddr *)&sai, &sz);
+ __msan_poison(sai->ptr(), sai->size());
+ res = getpeername(new_sock, sai->ptr(), &sz);
ASSERT_EQ(0, res);
- ASSERT_EQ(sizeof(sai), sz);
- EXPECT_NOT_POISONED(sai);
+ ASSERT_EQ(sai->size(), sz);
+ EXPECT_NOT_POISONED2(sai->ptr(), sai->size());
close(new_sock);
close(connect_socket);
close(listen_socket);
}
-TEST(MemorySanitizer, getaddrinfo) {
- struct addrinfo *ai;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(hints));
- hints.ai_family = AF_INET;
- int res = getaddrinfo("localhost", NULL, &hints, &ai);
+TEST_P(MemorySanitizerIpTest, recvmsg) {
+ int server_socket = CreateSocket(SOCK_DGRAM);
+ ASSERT_LT(0, server_socket);
+
+ auto sai = CreateSockAddr(0);
+ int res = bind(server_socket, sai->ptr(), sai->size());
ASSERT_EQ(0, res);
- EXPECT_NOT_POISONED(*ai);
- ASSERT_EQ(sizeof(sockaddr_in), ai->ai_addrlen);
- EXPECT_NOT_POISONED(*(sockaddr_in*)ai->ai_addr);
-}
-TEST(MemorySanitizer, getnameinfo) {
- struct sockaddr_in sai;
- memset(&sai, 0, sizeof(sai));
- sai.sin_family = AF_INET;
- sai.sin_port = 80;
- sai.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- char host[500];
- char serv[500];
- int res = getnameinfo((struct sockaddr *)&sai, sizeof(sai), host,
- sizeof(host), serv, sizeof(serv), 0);
+ socklen_t sz = sai->size();
+ res = getsockname(server_socket, sai->ptr(), &sz);
ASSERT_EQ(0, res);
- EXPECT_NOT_POISONED(host[0]);
- EXPECT_POISONED(host[sizeof(host) - 1]);
+ ASSERT_EQ(sai->size(), sz);
- ASSERT_NE(0U, strlen(host));
- EXPECT_NOT_POISONED(serv[0]);
- EXPECT_POISONED(serv[sizeof(serv) - 1]);
- ASSERT_NE(0U, strlen(serv));
+ int client_socket = CreateSocket(SOCK_DGRAM);
+ ASSERT_LT(0, client_socket);
+
+ auto client_sai = CreateSockAddr(0);
+ res = bind(client_socket, client_sai->ptr(), client_sai->size());
+ ASSERT_EQ(0, res);
+
+ sz = client_sai->size();
+ res = getsockname(client_socket, client_sai->ptr(), &sz);
+ ASSERT_EQ(0, res);
+ ASSERT_EQ(client_sai->size(), sz);
+
+ const char *s = "message text";
+ struct iovec iov;
+ iov.iov_base = (void *)s;
+ iov.iov_len = strlen(s) + 1;
+ struct msghdr msg;
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_name = sai->ptr();
+ msg.msg_namelen = sai->size();
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ res = sendmsg(client_socket, &msg, 0);
+ ASSERT_LT(0, res);
+
+ char buf[1000];
+ struct iovec recv_iov;
+ recv_iov.iov_base = (void *)&buf;
+ recv_iov.iov_len = sizeof(buf);
+ auto recv_sai = CreateSockAddr();
+ struct msghdr recv_msg;
+ memset(&recv_msg, 0, sizeof(recv_msg));
+ recv_msg.msg_name = recv_sai->ptr();
+ recv_msg.msg_namelen = recv_sai->size();
+ recv_msg.msg_iov = &recv_iov;
+ recv_msg.msg_iovlen = 1;
+ res = recvmsg(server_socket, &recv_msg, 0);
+ ASSERT_LT(0, res);
+
+ ASSERT_EQ(recv_sai->size(), recv_msg.msg_namelen);
+ EXPECT_NOT_POISONED2(recv_sai->ptr(), recv_sai->size());
+ EXPECT_STREQ(s, buf);
+
+ close(server_socket);
+ close(client_socket);
}
#define EXPECT_HOSTENT_NOT_POISONED(he) \
do { \
EXPECT_NOT_POISONED(*(he)); \
- ASSERT_NE((void *) 0, (he)->h_name); \
- ASSERT_NE((void *) 0, (he)->h_aliases); \
- ASSERT_NE((void *) 0, (he)->h_addr_list); \
+ ASSERT_NE((void *)0, (he)->h_name); \
+ ASSERT_NE((void *)0, (he)->h_aliases); \
+ ASSERT_NE((void *)0, (he)->h_addr_list); \
EXPECT_NOT_POISONED(strlen((he)->h_name)); \
char **p = (he)->h_aliases; \
while (*p) { \
@@ -993,76 +1110,38 @@ TEST(MemorySanitizer, gethostbyname) {
EXPECT_HOSTENT_NOT_POISONED(he);
}
-#endif // MSAN_TEST_DISABLE_GETHOSTBYNAME
+#endif // MSAN_TEST_DISABLE_GETHOSTBYNAME
-TEST(MemorySanitizer, recvmsg) {
- int server_socket = socket(AF_INET, SOCK_DGRAM, 0);
- ASSERT_LT(0, server_socket);
+TEST(MemorySanitizer, getaddrinfo) {
+ struct addrinfo *ai;
+ struct addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_INET;
+ int res = getaddrinfo("localhost", NULL, &hints, &ai);
+ ASSERT_EQ(0, res);
+ EXPECT_NOT_POISONED(*ai);
+ ASSERT_EQ(sizeof(sockaddr_in), ai->ai_addrlen);
+ EXPECT_NOT_POISONED(*(sockaddr_in *)ai->ai_addr);
+}
+TEST(MemorySanitizer, getnameinfo) {
struct sockaddr_in sai;
memset(&sai, 0, sizeof(sai));
sai.sin_family = AF_INET;
- sai.sin_port = 0;
+ sai.sin_port = 80;
sai.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- int res = bind(server_socket, (struct sockaddr *)&sai, sizeof(sai));
- ASSERT_EQ(0, res);
-
- socklen_t sz = sizeof(sai);
- res = getsockname(server_socket, (struct sockaddr *)&sai, &sz);
- ASSERT_EQ(0, res);
- ASSERT_EQ(sizeof(sai), sz);
-
-
- int client_socket = socket(AF_INET, SOCK_DGRAM, 0);
- ASSERT_LT(0, client_socket);
-
- struct sockaddr_in client_sai;
- memset(&client_sai, 0, sizeof(client_sai));
- client_sai.sin_family = AF_INET;
- client_sai.sin_port = 0;
- client_sai.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- res = bind(client_socket, (struct sockaddr *)&client_sai, sizeof(client_sai));
- ASSERT_EQ(0, res);
-
- sz = sizeof(client_sai);
- res = getsockname(client_socket, (struct sockaddr *)&client_sai, &sz);
+ char host[500];
+ char serv[500];
+ int res = getnameinfo((struct sockaddr *)&sai, sizeof(sai), host,
+ sizeof(host), serv, sizeof(serv), 0);
ASSERT_EQ(0, res);
- ASSERT_EQ(sizeof(client_sai), sz);
-
- const char *s = "message text";
- struct iovec iov;
- iov.iov_base = (void *)s;
- iov.iov_len = strlen(s) + 1;
- struct msghdr msg;
- memset(&msg, 0, sizeof(msg));
- msg.msg_name = &sai;
- msg.msg_namelen = sizeof(sai);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- res = sendmsg(client_socket, &msg, 0);
- ASSERT_LT(0, res);
-
-
- char buf[1000];
- struct iovec recv_iov;
- recv_iov.iov_base = (void *)&buf;
- recv_iov.iov_len = sizeof(buf);
- struct sockaddr_in recv_sai;
- struct msghdr recv_msg;
- memset(&recv_msg, 0, sizeof(recv_msg));
- recv_msg.msg_name = &recv_sai;
- recv_msg.msg_namelen = sizeof(recv_sai);
- recv_msg.msg_iov = &recv_iov;
- recv_msg.msg_iovlen = 1;
- res = recvmsg(server_socket, &recv_msg, 0);
- ASSERT_LT(0, res);
-
- ASSERT_EQ(sizeof(recv_sai), recv_msg.msg_namelen);
- EXPECT_NOT_POISONED(*(struct sockaddr_in *)recv_msg.msg_name);
- EXPECT_STREQ(s, buf);
+ EXPECT_NOT_POISONED(host[0]);
+ EXPECT_POISONED(host[sizeof(host) - 1]);
- close(server_socket);
- close(client_socket);
+ ASSERT_NE(0U, strlen(host));
+ EXPECT_NOT_POISONED(serv[0]);
+ EXPECT_POISONED(serv[sizeof(serv) - 1]);
+ ASSERT_NE(0U, strlen(serv));
}
TEST(MemorySanitizer, gethostbyname2) {
@@ -3502,6 +3581,21 @@ TEST(MemorySanitizer, getgroups) {
EXPECT_NOT_POISONED(gids[i]);
}
+TEST(MemorySanitizer, getgroups_zero) {
+ gid_t group;
+ int n = getgroups(0, &group);
+ ASSERT_GE(n, 0);
+}
+
+TEST(MemorySanitizer, getgroups_negative) {
+ gid_t group;
+ int n = getgroups(-1, 0);
+ ASSERT_EQ(-1, n);
+
+ n = getgroups(-1, 0);
+ ASSERT_EQ(-1, n);
+}
+
TEST(MemorySanitizer, wordexp) {
wordexp_t w;
int res = wordexp("a b c", &w, 0);
@@ -3602,6 +3696,18 @@ TEST(MemorySanitizer, ICmpVectorRelational) {
EXPECT_POISONED(_mm_cmpgt_epi16(poisoned(_mm_set1_epi16(6), _mm_set1_epi16(0xF)),
poisoned(_mm_set1_epi16(7), _mm_set1_epi16(0))));
}
+
+TEST(MemorySanitizer, stmxcsr_ldmxcsr) {
+ U4 x = _mm_getcsr();
+ EXPECT_NOT_POISONED(x);
+
+ _mm_setcsr(x);
+
+ __msan_poison(&x, sizeof(x));
+ U4 origin = __LINE__;
+ __msan_set_origin(&x, sizeof(x), origin);
+ EXPECT_UMR_O(_mm_setcsr(x), origin);
+}
#endif
// Volatile bitfield store is implemented as load-mask-store
diff --git a/lib/profile/InstrProfData.inc b/lib/profile/InstrProfData.inc
index f7c22d10763c..be0dd4ad04bf 100644
--- a/lib/profile/InstrProfData.inc
+++ b/lib/profile/InstrProfData.inc
@@ -153,7 +153,17 @@ INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
INSTR_PROF_COMMA
VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
+#ifndef VALUE_RANGE_PROF
VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
+#else /* VALUE_RANGE_PROF */
+VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) \
+ INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeStart, Type::getInt64Ty(Ctx)) \
+ INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeLast, Type::getInt64Ty(Ctx)) \
+ INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint64_t, LargeValue, Type::getInt64Ty(Ctx))
+#endif /*VALUE_RANGE_PROF */
#undef VALUE_PROF_FUNC_PARAM
#undef INSTR_PROF_COMMA
/* VALUE_PROF_FUNC_PARAM end */
@@ -174,13 +184,15 @@ VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
* name hash and the function address.
*/
VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0)
+/* For memory intrinsic functions size profiling. */
+VALUE_PROF_KIND(IPVK_MemOPSize, 1)
/* These two kinds must be the last to be
* declared. This is to make sure the string
* array created with the template can be
* indexed with the kind value.
*/
VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget)
-VALUE_PROF_KIND(IPVK_Last, IPVK_IndirectCallTarget)
+VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize)
#undef VALUE_PROF_KIND
/* VALUE_PROF_KIND end */
@@ -234,6 +246,31 @@ COVMAP_HEADER(uint32_t, Int32Ty, Version, \
/* COVMAP_HEADER end. */
+#ifdef INSTR_PROF_SECT_ENTRY
+#define INSTR_PROF_DATA_DEFINED
+INSTR_PROF_SECT_ENTRY(IPSK_data, \
+ INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
+ INSTR_PROF_QUOTE(INSTR_PROF_DATA_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
+ INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
+ INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_name, \
+ INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
+ INSTR_PROF_QUOTE(INSTR_PROF_NAME_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_vals, \
+ INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
+ INSTR_PROF_QUOTE(INSTR_PROF_VALS_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
+ INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
+ INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COFF), "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
+ INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
+ INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COFF), "__LLVM_COV,")
+
+#undef INSTR_PROF_SECT_ENTRY
+#endif
+
+
#ifdef INSTR_PROF_VALUE_PROF_DATA
#define INSTR_PROF_DATA_DEFINED
@@ -610,17 +647,47 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
* specified via command line. */
#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
+/* section name strings common to all targets other
+ than WIN32 */
+#define INSTR_PROF_DATA_COMMON __llvm_prf_data
+#define INSTR_PROF_NAME_COMMON __llvm_prf_names
+#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
+#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
+#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
+#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
+/* Win32 */
+#define INSTR_PROF_DATA_COFF .lprfd
+#define INSTR_PROF_NAME_COFF .lprfn
+#define INSTR_PROF_CNTS_COFF .lprfc
+#define INSTR_PROF_VALS_COFF .lprfv
+#define INSTR_PROF_VNODES_COFF .lprfnd
+#define INSTR_PROF_COVMAP_COFF .lcovmap
+
+#ifdef _WIN32
/* Runtime section names and name strings. */
-#define INSTR_PROF_DATA_SECT_NAME __llvm_prf_data
-#define INSTR_PROF_NAME_SECT_NAME __llvm_prf_names
-#define INSTR_PROF_CNTS_SECT_NAME __llvm_prf_cnts
+#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
+#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
+#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
/* Array of pointers. Each pointer points to a list
* of value nodes associated with one value site.
*/
-#define INSTR_PROF_VALS_SECT_NAME __llvm_prf_vals
+#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME __llvm_prf_vnds
-#define INSTR_PROF_COVMAP_SECT_NAME __llvm_covmap
+#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
+#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
+#else
+/* Runtime section names and name strings. */
+#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COMMON
+#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COMMON
+#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COMMON
+/* Array of pointers. Each pointer points to a list
+ * of value nodes associated with one value site.
+ */
+#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COMMON
+/* Value profile nodes section. */
+#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COMMON
+#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COMMON
+#endif
#define INSTR_PROF_DATA_SECT_NAME_STR \
INSTR_PROF_QUOTE(INSTR_PROF_DATA_SECT_NAME)
@@ -649,6 +716,9 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
#define INSTR_PROF_VALUE_PROF_FUNC_STR \
INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
+#define INSTR_PROF_VALUE_RANGE_PROF_FUNC __llvm_profile_instrument_range
+#define INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR \
+ INSTR_PROF_QUOTE(INSTR_PROF_VALUE_RANGE_PROF_FUNC)
/* InstrProfile per-function control data alignment. */
#define INSTR_PROF_DATA_ALIGNMENT 8
diff --git a/lib/profile/InstrProfilingFile.c b/lib/profile/InstrProfilingFile.c
index f82080c98aac..dfcbe52d7e4f 100644
--- a/lib/profile/InstrProfilingFile.c
+++ b/lib/profile/InstrProfilingFile.c
@@ -172,6 +172,16 @@ static int doProfileMerging(FILE *ProfileFile) {
return 0;
}
+/* Create the directory holding the file, if needed. */
+static void createProfileDir(const char *Filename) {
+ size_t Length = strlen(Filename);
+ if (lprofFindFirstDirSeparator(Filename)) {
+ char *Copy = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ strncpy(Copy, Filename, Length + 1);
+ __llvm_profile_recursive_mkdir(Copy);
+ }
+}
+
/* Open the profile data for merging. It opens the file in r+b mode with
* file locking. If the file has content which is compatible with the
* current process, it also reads in the profile data in the file and merge
@@ -184,6 +194,7 @@ static FILE *openFileForMerging(const char *ProfileFileName) {
FILE *ProfileFile;
int rc;
+ createProfileDir(ProfileFileName);
ProfileFile = lprofOpenFileEx(ProfileFileName);
if (!ProfileFile)
return NULL;
@@ -233,18 +244,13 @@ static void truncateCurrentFile(void) {
if (!Filename)
return;
- /* Create the directory holding the file, if needed. */
- if (lprofFindFirstDirSeparator(Filename)) {
- char *Copy = (char *)COMPILER_RT_ALLOCA(Length + 1);
- strncpy(Copy, Filename, Length + 1);
- __llvm_profile_recursive_mkdir(Copy);
- }
-
/* By pass file truncation to allow online raw profile
* merging. */
if (lprofCurFilename.MergePoolSize)
return;
+ createProfileDir(Filename);
+
/* Truncate the file. Later we'll reopen and append. */
File = fopen(Filename, "w");
if (!File)
@@ -524,6 +530,7 @@ int __llvm_profile_write_file(void) {
int rc, Length;
const char *Filename;
char *FilenameBuf;
+ int PDeathSig = 0;
if (lprofProfileDumped()) {
PROF_NOTE("Profile data not written to file: %s.\n",
@@ -550,10 +557,18 @@ int __llvm_profile_write_file(void) {
return -1;
}
+ // Temporarily suspend getting SIGKILL when the parent exits.
+ PDeathSig = lprofSuspendSigKill();
+
/* Write profile data to the file. */
rc = writeFile(Filename);
if (rc)
PROF_ERR("Failed to write file \"%s\": %s\n", Filename, strerror(errno));
+
+ // Restore SIGKILL.
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
+
return rc;
}
diff --git a/lib/profile/InstrProfilingUtil.c b/lib/profile/InstrProfilingUtil.c
index 321c7192cc60..fb68f30a5e1f 100644
--- a/lib/profile/InstrProfilingUtil.c
+++ b/lib/profile/InstrProfilingUtil.c
@@ -29,6 +29,11 @@
#include <stdlib.h>
#include <string.h>
+#if defined(__linux__)
+#include <signal.h>
+#include <sys/prctl.h>
+#endif
+
COMPILER_RT_VISIBILITY
void __llvm_profile_recursive_mkdir(char *path) {
int i;
@@ -219,3 +224,21 @@ COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) {
#endif
return Sep;
}
+
+COMPILER_RT_VISIBILITY int lprofSuspendSigKill() {
+#if defined(__linux__)
+ int PDeachSig = 0;
+ /* Temporarily suspend getting SIGKILL upon exit of the parent process. */
+ if (prctl(PR_GET_PDEATHSIG, &PDeachSig) == 0 && PDeachSig == SIGKILL)
+ prctl(PR_SET_PDEATHSIG, 0);
+ return (PDeachSig == SIGKILL);
+#else
+ return 0;
+#endif
+}
+
+COMPILER_RT_VISIBILITY void lprofRestoreSigKill() {
+#if defined(__linux__)
+ prctl(PR_SET_PDEATHSIG, SIGKILL);
+#endif
+}
diff --git a/lib/profile/InstrProfilingUtil.h b/lib/profile/InstrProfilingUtil.h
index a80fde77e16a..9698599606e0 100644
--- a/lib/profile/InstrProfilingUtil.h
+++ b/lib/profile/InstrProfilingUtil.h
@@ -51,4 +51,12 @@ int lprofGetHostName(char *Name, int Len);
unsigned lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV);
void *lprofPtrFetchAdd(void **Mem, long ByteIncr);
+/* Temporarily suspend SIGKILL. Return value of 1 means a restore is needed.
+ * Other return values mean no restore is needed.
+ */
+int lprofSuspendSigKill();
+
+/* Restore previously suspended SIGKILL. */
+void lprofRestoreSigKill();
+
#endif /* PROFILE_INSTRPROFILINGUTIL_H */
diff --git a/lib/profile/InstrProfilingValue.c b/lib/profile/InstrProfilingValue.c
index 6648f8923584..44263da80097 100644
--- a/lib/profile/InstrProfilingValue.c
+++ b/lib/profile/InstrProfilingValue.c
@@ -220,6 +220,35 @@ __llvm_profile_instrument_target(uint64_t TargetValue, void *Data,
}
/*
+ * The target values are partitioned into multiple regions/ranges. There is one
+ * contiguous region which is precise -- every value in the range is tracked
+ * individually. A value outside the precise region will be collapsed into one
+ * value depending on the region it falls in.
+ *
+ * There are three regions:
+ * 1. (-inf, PreciseRangeStart) and (PreciseRangeLast, LargeRangeValue) belong
+ * to one region -- all values here should be mapped to one value of
+ * "PreciseRangeLast + 1".
+ * 2. [PreciseRangeStart, PreciseRangeLast]
+ * 3. Large values: [LargeValue, +inf) maps to one value of LargeValue.
+ *
+ * The range for large values is optional. The default value of INT64_MIN
+ * indicates it is not specified.
+ */
+COMPILER_RT_VISIBILITY void __llvm_profile_instrument_range(
+ uint64_t TargetValue, void *Data, uint32_t CounterIndex,
+ int64_t PreciseRangeStart, int64_t PreciseRangeLast, int64_t LargeValue) {
+
+ if (LargeValue != INT64_MIN && (int64_t)TargetValue >= LargeValue)
+ TargetValue = LargeValue;
+ else if ((int64_t)TargetValue < PreciseRangeStart ||
+ (int64_t)TargetValue > PreciseRangeLast)
+ TargetValue = PreciseRangeLast + 1;
+
+ __llvm_profile_instrument_target(TargetValue, Data, CounterIndex);
+}
+
+/*
* A wrapper struct that represents value profile runtime data.
* Like InstrProfRecord class which is used by profiling host tools,
* ValueProfRuntimeRecord also implements the abstract intefaces defined in
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
index c70b8be6d8e2..6cdc91897cd8 100644
--- a/lib/sanitizer_common/CMakeLists.txt
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -25,6 +25,7 @@ set(SANITIZER_SOURCES_NOTERMINATION
sanitizer_stackdepot.cc
sanitizer_stacktrace.cc
sanitizer_stacktrace_printer.cc
+ sanitizer_stoptheworld_mac.cc
sanitizer_suppressions.cc
sanitizer_symbolizer.cc
sanitizer_symbolizer_libbacktrace.cc
@@ -56,6 +57,7 @@ set(SANITIZER_LIBCDEP_SOURCES
sanitizer_coverage_libcdep.cc
sanitizer_coverage_libcdep_new.cc
sanitizer_coverage_mapping_libcdep.cc
+ sanitizer_coverage_win_sections.cc
sanitizer_linux_libcdep.cc
sanitizer_posix_libcdep.cc
sanitizer_stacktrace_libcdep.cc
@@ -126,7 +128,10 @@ set(SANITIZER_HEADERS
sanitizer_syscall_generic.inc
sanitizer_syscall_linux_x86_64.inc
sanitizer_syscall_linux_aarch64.inc
- sanitizer_thread_registry.h)
+ sanitizer_thread_registry.h
+ sanitizer_win.h)
+
+include_directories(..)
set(SANITIZER_COMMON_DEFINITIONS)
@@ -184,6 +189,55 @@ add_compiler_rt_object_libraries(RTSanitizerCommonLibc
CFLAGS ${SANITIZER_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
+if(WIN32)
+ add_compiler_rt_object_libraries(SanitizerCommonWeakInterception
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
+ SOURCES sanitizer_win_weak_interception.cc
+ CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DYNAMIC
+ DEFS ${SANITIZER_COMMON_DEFINITIONS})
+ add_compiler_rt_object_libraries(SancovWeakInterception
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
+ SOURCES sanitizer_coverage_win_weak_interception.cc
+ CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DYNAMIC
+ DEFS ${SANITIZER_COMMON_DEFINITIONS})
+
+ add_compiler_rt_object_libraries(SanitizerCommonDllThunk
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
+ SOURCES sanitizer_win_dll_thunk.cc
+ CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DLL_THUNK
+ DEFS ${SANITIZER_COMMON_DEFINITIONS})
+ add_compiler_rt_object_libraries(SancovDllThunk
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
+ SOURCES sanitizer_coverage_win_dll_thunk.cc
+ sanitizer_coverage_win_sections.cc
+ CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DLL_THUNK
+ DEFS ${SANITIZER_COMMON_DEFINITIONS})
+
+ set(DYNAMIC_RUNTIME_THUNK_CFLAGS "-DSANITIZER_DYNAMIC_RUNTIME_THUNK")
+ if(MSVC)
+ list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-Zl")
+ elseif(CMAKE_C_COMPILER_ID MATCHES Clang)
+ list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-nodefaultlibs")
+ endif()
+ add_compiler_rt_object_libraries(SanitizerCommonDynamicRuntimeThunk
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
+ SOURCES sanitizer_win_dynamic_runtime_thunk.cc
+ CFLAGS ${SANITIZER_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
+ DEFS ${SANITIZER_COMMON_DEFINITIONS})
+ add_compiler_rt_object_libraries(SancovDynamicRuntimeThunk
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
+ SOURCES sanitizer_coverage_win_dynamic_runtime_thunk.cc
+ sanitizer_coverage_win_sections.cc
+ CFLAGS ${SANITIZER_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
+ DEFS ${SANITIZER_COMMON_DEFINITIONS})
+endif()
+
# Unit tests for common sanitizer runtime.
if(COMPILER_RT_INCLUDE_TESTS)
add_subdirectory(tests)
diff --git a/lib/sanitizer_common/sancov_flags.cc b/lib/sanitizer_common/sancov_flags.cc
index 08fd2a4366a8..9abb5b5c5988 100644
--- a/lib/sanitizer_common/sancov_flags.cc
+++ b/lib/sanitizer_common/sancov_flags.cc
@@ -15,10 +15,9 @@
#include "sanitizer_flag_parser.h"
#include "sanitizer_platform.h"
-#if !SANITIZER_LINUX
-// other platforms do not have weak symbols out of the box.
-extern "C" const char* __sancov_default_options() { return ""; }
-#endif
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __sancov_default_options, void) {
+ return "";
+}
using namespace __sanitizer;
diff --git a/lib/sanitizer_common/sancov_flags.h b/lib/sanitizer_common/sancov_flags.h
index 5fbd7ad06a95..627d9a3df6f4 100644
--- a/lib/sanitizer_common/sancov_flags.h
+++ b/lib/sanitizer_common/sancov_flags.h
@@ -32,9 +32,9 @@ inline SancovFlags* sancov_flags() { return &sancov_flags_dont_use_directly; }
void InitializeSancovFlags();
+} // namespace __sancov
+
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char*
__sancov_default_options();
-} // namespace __sancov
-
#endif
diff --git a/lib/sanitizer_common/sanitizer_allocator_interface.h b/lib/sanitizer_common/sanitizer_allocator_interface.h
index 5ff6edba0a1a..13910e719e78 100644
--- a/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -34,13 +34,12 @@ SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_install_malloc_and_free_hooks(
void (*free_hook)(const void *));
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
+ void __sanitizer_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
+ void __sanitizer_free_hook(void *ptr);
-
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_print_memory_profile(int top_percent);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
} // extern "C"
#endif // SANITIZER_ALLOCATOR_INTERFACE_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index e1172e0c2820..d6c66604ec86 100644
--- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -45,10 +45,10 @@ struct SizeClassAllocator64LocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(c, allocator, class_id);
+ stats_.Add(AllocatorStatAllocated, c->class_size);
CHECK_GT(c->count, 0);
CompactPtrT chunk = c->chunks[--c->count];
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
@@ -62,8 +62,8 @@ struct SizeClassAllocator64LocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(c, allocator, class_id, c->max_count / 2);
@@ -85,6 +85,7 @@ struct SizeClassAllocator64LocalCache {
struct PerClass {
u32 count;
u32 max_count;
+ uptr class_size;
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
};
PerClass per_class_[kNumClasses];
@@ -96,13 +97,14 @@ struct SizeClassAllocator64LocalCache {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
+ c->class_size = Allocator::ClassIdToSize(i);
}
}
NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
uptr class_id) {
InitCache();
- uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
+ uptr num_requested_chunks = c->max_count / 2;
allocator->GetFromAllocator(&stats_, class_id, c->chunks,
num_requested_chunks);
c->count = num_requested_chunks;
@@ -141,10 +143,10 @@ struct SizeClassAllocator32LocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
+ stats_.Add(AllocatorStatAllocated, c->class_size);
void *res = c->batch[--c->count];
PREFETCH(c->batch[c->count - 1]);
return res;
@@ -156,8 +158,8 @@ struct SizeClassAllocator32LocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id);
@@ -177,6 +179,7 @@ struct SizeClassAllocator32LocalCache {
struct PerClass {
uptr count;
uptr max_count;
+ uptr class_size;
void *batch[2 * TransferBatch::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
@@ -188,6 +191,7 @@ struct SizeClassAllocator32LocalCache {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * TransferBatch::MaxCached(i);
+ c->class_size = Allocator::ClassIdToSize(i);
}
}
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index 9824a5198b52..3ef366f4f328 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -199,23 +199,24 @@ const char *StripModuleName(const char *module) {
return module;
}
-void ReportErrorSummary(const char *error_message) {
+void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
if (!common_flags()->print_summary)
return;
InternalScopedString buff(kMaxSummaryLength);
- buff.append("SUMMARY: %s: %s", SanitizerToolName, error_message);
+ buff.append("SUMMARY: %s: %s",
+ alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data());
}
#if !SANITIZER_GO
-void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
- if (!common_flags()->print_summary)
- return;
+void ReportErrorSummary(const char *error_type, const AddressInfo &info,
+ const char *alt_tool_name) {
+ if (!common_flags()->print_summary) return;
InternalScopedString buff(kMaxSummaryLength);
buff.append("%s ", error_type);
RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
- ReportErrorSummary(buff.data());
+ ReportErrorSummary(buff.data(), alt_tool_name);
}
#endif
@@ -489,7 +490,8 @@ void __sanitizer_set_report_fd(void *fd) {
report_file.fd_pid = internal_getpid();
}
-void __sanitizer_report_error_summary(const char *error_summary) {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_report_error_summary,
+ const char *error_summary) {
Printf("%s\n", error_summary);
}
@@ -504,11 +506,4 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
void (*free_hook)(const void *)) {
return InstallMallocFreeHooks(malloc_hook, free_hook);
}
-
-#if !SANITIZER_GO && !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_print_memory_profile(int top_percent) {
- (void)top_percent;
-}
-#endif
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index 2dabb5066ecf..9d367ca80144 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -382,6 +382,7 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
typedef void (*SignalHandlerType)(int, void *, void *);
bool IsHandledDeadlySignal(int signum);
void InstallDeadlySignalHandlers(SignalHandlerType handler);
+const char *DescribeSignalOrException(int signo);
// Alternative signal stack (POSIX-only).
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
@@ -391,12 +392,16 @@ const int kMaxSummaryLength = 1024;
// Construct a one-line string:
// SUMMARY: SanitizerToolName: error_message
// and pass it to __sanitizer_report_error_summary.
-void ReportErrorSummary(const char *error_message);
+// If alt_tool_name is provided, it's used in place of SanitizerToolName.
+void ReportErrorSummary(const char *error_message,
+ const char *alt_tool_name = nullptr);
// Same as above, but construct error_message as:
// error_type file:line[:column][ function]
-void ReportErrorSummary(const char *error_type, const AddressInfo &info);
+void ReportErrorSummary(const char *error_type, const AddressInfo &info,
+ const char *alt_tool_name = nullptr);
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
-void ReportErrorSummary(const char *error_type, const StackTrace *trace);
+void ReportErrorSummary(const char *error_type, const StackTrace *trace,
+ const char *alt_tool_name = nullptr);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
@@ -910,6 +915,8 @@ struct StackDepotStats {
// indicate that sanitizer allocator should not attempt to release memory to OS.
const s32 kReleaseToOSIntervalNever = -1;
+void CheckNoDeepBind(const char *filename, int flag);
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc
index ca571d1a9fd5..7b4e6d27df3d 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -24,7 +24,8 @@
// COMMON_INTERCEPTOR_SET_THREAD_NAME
// COMMON_INTERCEPTOR_ON_DLOPEN
// COMMON_INTERCEPTOR_ON_EXIT
-// COMMON_INTERCEPTOR_MUTEX_LOCK
+// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
+// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
// COMMON_INTERCEPTOR_MUTEX_UNLOCK
// COMMON_INTERCEPTOR_MUTEX_REPAIR
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
@@ -44,15 +45,9 @@
#include <stdarg.h>
#if SANITIZER_INTERCEPTOR_HOOKS
-#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) \
- do { \
- if (f) \
- f(__VA_ARGS__); \
- } while (false);
-#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
- extern "C" { \
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__); \
- } // extern "C"
+#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);
+#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
+ SANITIZER_INTERFACE_WEAK_DEF(void, f, __VA_ARGS__) {}
#else
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...)
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...)
@@ -95,8 +90,12 @@ bool PlatformHasDifferentMemcpyAndMemmove();
#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}
#endif
-#ifndef COMMON_INTERCEPTOR_MUTEX_LOCK
-#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) {}
+#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
+#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK
+#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}
#endif
#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK
@@ -148,7 +147,8 @@ bool PlatformHasDifferentMemcpyAndMemmove();
COMMON_INTERCEPTOR_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
-#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) {}
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
+ CheckNoDeepBind(filename, flag);
#endif
#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE
@@ -500,6 +500,52 @@ INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
#define INIT_STRCASESTR
#endif
+#if SANITIZER_INTERCEPT_STRTOK
+
+INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtok, str, delimiters);
+ if (!common_flags()->intercept_strtok) {
+ return REAL(strtok)(str, delimiters);
+ }
+ if (common_flags()->strict_string_checks) {
+ // If strict_string_checks is enabled, we check the whole first argument
+ // string on the first call (strtok saves this string in a static buffer
+ // for subsequent calls). We do not need to check strtok's result.
+ // As the delimiters can change, we check them every call.
+ if (str != nullptr) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters,
+ REAL(strlen)(delimiters) + 1);
+ return REAL(strtok)(str, delimiters);
+ } else {
+ // However, when strict_string_checks is disabled we cannot check the
+ // whole string on the first call. Instead, we check the result string
+ // which is guaranteed to be a NULL-terminated substring of the first
+ // argument. We also conservatively check one character of str and the
+ // delimiters.
+ if (str != nullptr) {
+ COMMON_INTERCEPTOR_READ_STRING(ctx, str, 1);
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1);
+ char *result = REAL(strtok)(str, delimiters);
+ if (result != nullptr) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, result, REAL(strlen)(result) + 1);
+ } else if (str != nullptr) {
+ // No delimiter were found, it's safe to assume that the entire str was
+ // scanned.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ }
+ return result;
+ }
+}
+
+#define INIT_STRTOK COMMON_INTERCEPT_FUNCTION(strtok)
+#else
+#define INIT_STRTOK
+#endif
+
#if SANITIZER_INTERCEPT_MEMMEM
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,
const void *s1, SIZE_T len1, const void *s2,
@@ -842,6 +888,23 @@ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
#define INIT_READ
#endif
+#if SANITIZER_INTERCEPT_FREAD
+INTERCEPTOR(SIZE_T, fread, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, file);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(fread)(ptr, size, nmemb, file);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res * size);
+ return res;
+}
+#define INIT_FREAD COMMON_INTERCEPT_FUNCTION(fread)
+#else
+#define INIT_FREAD
+#endif
+
#if SANITIZER_INTERCEPT_PREAD
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
void *ctx;
@@ -942,6 +1005,20 @@ INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
#define INIT_WRITE
#endif
+#if SANITIZER_INTERCEPT_FWRITE
+INTERCEPTOR(SIZE_T, fwrite, const void *p, uptr size, uptr nmemb, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, file);
+ SIZE_T res = REAL(fwrite)(p, size, nmemb, file);
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, p, res * size);
+ return res;
+}
+#define INIT_FWRITE COMMON_INTERCEPT_FUNCTION(fwrite)
+#else
+#define INIT_FWRITE
+#endif
+
#if SANITIZER_INTERCEPT_PWRITE
INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) {
void *ctx;
@@ -3251,6 +3328,30 @@ INTERCEPTOR(char *, strerror, int errnum) {
#endif
#if SANITIZER_INTERCEPT_STRERROR_R
+// There are 2 versions of strerror_r:
+// * POSIX version returns 0 on success, negative error code on failure,
+// writes message to buf.
+// * GNU version returns message pointer, which points to either buf or some
+// static storage.
+#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
+ SANITIZER_MAC
+// POSIX version. Spec is not clear on whether buf is NULL-terminated.
+// At least on OSX, buf contents are valid even when the call fails.
+INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(strerror_r)(errnum, buf, buflen);
+
+ SIZE_T sz = internal_strnlen(buf, buflen);
+ if (sz < buflen) ++sz;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
+ return res;
+}
+#else
+// GNU version.
INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
@@ -3258,24 +3359,11 @@ INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(strerror_r)(errnum, buf, buflen);
- // There are 2 versions of strerror_r:
- // * POSIX version returns 0 on success, negative error code on failure,
- // writes message to buf.
- // * GNU version returns message pointer, which points to either buf or some
- // static storage.
- SIZE_T posix_res = (SIZE_T)res;
- if (posix_res < 1024 || posix_res > (SIZE_T) - 1024) {
- // POSIX version. Spec is not clear on whether buf is NULL-terminated.
- // At least on OSX, buf contents are valid even when the call fails.
- SIZE_T sz = internal_strnlen(buf, buflen);
- if (sz < buflen) ++sz;
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
- } else {
- // GNU version.
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
- }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
}
+#endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE ||
+ //SANITIZER_MAC
#define INIT_STRERROR_R COMMON_INTERCEPT_FUNCTION(strerror_r);
#else
#define INIT_STRERROR_R
@@ -3414,7 +3502,8 @@ INTERCEPTOR(int, getgroups, int size, u32 *lst) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(getgroups)(size, lst);
- if (res && lst) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
+ if (res >= 0 && lst && size > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
return res;
}
#define INIT_GETGROUPS COMMON_INTERCEPT_FUNCTION(getgroups);
@@ -3669,11 +3758,12 @@ INTERCEPTOR(void, _exit, int status) {
INTERCEPTOR(int, pthread_mutex_lock, void *m) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
+ COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
int res = REAL(pthread_mutex_lock)(m);
if (res == errno_EOWNERDEAD)
COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
if (res == 0 || res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m);
+ COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
if (res == errno_EINVAL)
COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
return res;
@@ -4547,7 +4637,7 @@ INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);
- if (res != (SIZE_T) - 1 && outbuf && *outbuf > outbuf_orig) {
+ if (outbuf && *outbuf > outbuf_orig) {
SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, outbuf_orig, sz);
}
@@ -4614,11 +4704,15 @@ void *__tls_get_addr_opt(void *arg);
// descriptor offset as an argument instead of a pointer. GOT address
// is passed in r12, so it's necessary to write it in assembly. This is
// the function used by the compiler.
-#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr_internal)
+extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
+#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset)
+DEFINE_REAL(uptr, __tls_get_offset, void *arg)
+extern "C" uptr __tls_get_offset(void *arg);
+extern "C" uptr __interceptor___tls_get_offset(void *arg);
INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);
- uptr res = REAL(__tls_get_addr_internal)(arg);
+ uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
uptr tp = reinterpret_cast<uptr>(__builtin_thread_pointer());
void *ptr = reinterpret_cast<void *>(res + tp);
uptr tls_begin, tls_end;
@@ -4630,32 +4724,43 @@ INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
}
return res;
}
-// We need a protected symbol aliasing the above, so that we can jump
+// We need a hidden symbol aliasing the above, so that we can jump
// directly to it from the assembly below.
extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
- visibility("protected")))
-uptr __interceptor___tls_get_addr_internal_protected(void *arg);
+ visibility("hidden")))
+uptr __tls_get_addr_hidden(void *arg);
// Now carefully intercept __tls_get_offset.
asm(
".text\n"
- ".global __tls_get_offset\n"
- "__tls_get_offset:\n"
// The __intercept_ version has to exist, so that gen_dynamic_list.py
// exports our symbol.
+ ".weak __tls_get_offset\n"
+ ".type __tls_get_offset, @function\n"
+ "__tls_get_offset:\n"
".global __interceptor___tls_get_offset\n"
+ ".type __interceptor___tls_get_offset, @function\n"
"__interceptor___tls_get_offset:\n"
#ifdef __s390x__
"la %r2, 0(%r2,%r12)\n"
- "jg __interceptor___tls_get_addr_internal_protected\n"
+ "jg __tls_get_addr_hidden\n"
#else
"basr %r3,0\n"
"0: la %r2,0(%r2,%r12)\n"
"l %r4,1f-0b(%r3)\n"
"b 0(%r4,%r3)\n"
- "1: .long __interceptor___tls_get_addr_internal_protected - 0b\n"
+ "1: .long __tls_get_addr_hidden - 0b\n"
#endif
- ".type __tls_get_offset, @function\n"
- ".size __tls_get_offset, .-__tls_get_offset\n"
+ ".size __interceptor___tls_get_offset, .-__interceptor___tls_get_offset\n"
+// Assembly wrapper to call REAL(__tls_get_offset)(arg)
+ ".type __tls_get_offset_wrapper, @function\n"
+ "__tls_get_offset_wrapper:\n"
+#ifdef __s390x__
+ "sgr %r2,%r12\n"
+#else
+ "sr %r2,%r12\n"
+#endif
+ "br %r3\n"
+ ".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\n"
);
#endif // SANITIZER_S390
#else
@@ -6026,6 +6131,21 @@ INTERCEPTOR(void *, getutxline, void *ut) {
#define INIT_UTMPX
#endif
+#if SANITIZER_INTERCEPT_GETLOADAVG
+INTERCEPTOR(int, getloadavg, double *loadavg, int nelem) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getloadavg, loadavg, nelem);
+ int res = REAL(getloadavg)(loadavg, nelem);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, loadavg, res * sizeof(*loadavg));
+ return res;
+}
+#define INIT_GETLOADAVG \
+ COMMON_INTERCEPT_FUNCTION(getloadavg);
+#else
+#define INIT_GETLOADAVG
+#endif
+
static void InitializeCommonInterceptors() {
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
@@ -6043,6 +6163,7 @@ static void InitializeCommonInterceptors() {
INIT_STRCHRNUL;
INIT_STRRCHR;
INIT_STRSPN;
+ INIT_STRTOK;
INIT_STRPBRK;
INIT_MEMSET;
INIT_MEMMOVE;
@@ -6052,12 +6173,14 @@ static void InitializeCommonInterceptors() {
INIT_MEMRCHR;
INIT_MEMMEM;
INIT_READ;
+ INIT_FREAD;
INIT_PREAD;
INIT_PREAD64;
INIT_READV;
INIT_PREADV;
INIT_PREADV64;
INIT_WRITE;
+ INIT_FWRITE;
INIT_PWRITE;
INIT_PWRITE64;
INIT_WRITEV;
@@ -6224,4 +6347,5 @@ static void InitializeCommonInterceptors() {
// FIXME: add other *stat interceptors.
INIT_UTMP;
INIT_UTMPX;
+ INIT_GETLOADAVG;
}
diff --git a/lib/sanitizer_common/sanitizer_common_interface.inc b/lib/sanitizer_common/sanitizer_common_interface.inc
new file mode 100644
index 000000000000..550427c906a6
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_common_interface.inc
@@ -0,0 +1,39 @@
+//===-- sanitizer_common_interface.inc ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Common interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
+INTERFACE_FUNCTION(__sanitizer_set_death_callback)
+INTERFACE_FUNCTION(__sanitizer_set_report_path)
+INTERFACE_FUNCTION(__sanitizer_set_report_fd)
+INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
+INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
+INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
+// Sanitizer weak hooks
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_memcmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strcmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strncmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strstr)
+// Stacktrace interface.
+INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
+INTERFACE_FUNCTION(__sanitizer_symbolize_global)
+INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
+// Allocator interface.
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_heap_size)
+INTERFACE_FUNCTION(__sanitizer_get_ownership)
+INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
+INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
+INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
+INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
+INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
diff --git a/lib/sanitizer_common/sanitizer_common_interface_posix.inc b/lib/sanitizer_common/sanitizer_common_interface_posix.inc
new file mode 100644
index 000000000000..bbc725a9d4d1
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_common_interface_posix.inc
@@ -0,0 +1,14 @@
+//===-- sanitizer_common_interface_posix.inc ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Common interface list only available for Posix systems.
+//===----------------------------------------------------------------------===//
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
diff --git a/lib/sanitizer_common/sanitizer_common_libcdep.cc b/lib/sanitizer_common/sanitizer_common_libcdep.cc
index 49ca961f3cb0..cf200512de84 100644
--- a/lib/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_common_libcdep.cc
@@ -47,7 +47,8 @@ void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
}
-void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
+void ReportErrorSummary(const char *error_type, const StackTrace *stack,
+ const char *alt_tool_name) {
#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
@@ -59,7 +60,7 @@ void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
- ReportErrorSummary(error_type, frame->info);
+ ReportErrorSummary(error_type, frame->info, alt_tool_name);
frame->ClearAll();
#endif
}
@@ -123,7 +124,7 @@ void BackgroundThread(void *arg) {
if (heap_profile &&
current_rss_mb > rss_during_last_reported_profile * 1.1) {
Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
- __sanitizer_print_memory_profile(90);
+ __sanitizer_print_memory_profile(90, 20);
rss_during_last_reported_profile = current_rss_mb;
}
}
@@ -162,8 +163,8 @@ void MaybeStartBackgroudThread() {
} // namespace __sanitizer
-void NOINLINE
-__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
+ __sanitizer_sandbox_arguments *args) {
__sanitizer::PrepareForSandboxing(args);
if (__sanitizer::sandboxing_callback)
__sanitizer::sandboxing_callback();
diff --git a/lib/sanitizer_common/sanitizer_coverage_interface.inc b/lib/sanitizer_common/sanitizer_coverage_interface.inc
new file mode 100644
index 000000000000..ae691bd9dd27
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_coverage_interface.inc
@@ -0,0 +1,40 @@
+//===-- sanitizer_coverage_interface.inc ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Coverage interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__sanitizer_cov)
+INTERFACE_FUNCTION(__sanitizer_cov_dump)
+INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
+INTERFACE_FUNCTION(__sanitizer_cov_init)
+INTERFACE_FUNCTION(__sanitizer_cov_module_init)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
+INTERFACE_FUNCTION(__sanitizer_cov_with_check)
+INTERFACE_FUNCTION(__sanitizer_dump_coverage)
+INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
+INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
+INTERFACE_FUNCTION(__sanitizer_get_number_of_counters)
+INTERFACE_FUNCTION(__sanitizer_get_total_unique_caller_callee_pairs)
+INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
+INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
+INTERFACE_FUNCTION(__sanitizer_reset_coverage)
+INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
+INTERFACE_WEAK_FUNCTION(__sancov_default_options)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
index 5945ebbe90b2..e934af3ed975 100644
--- a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
@@ -171,7 +171,11 @@ class CoverageData {
// - not thread-safe;
// - does not support long traces;
// - not tuned for performance.
- static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
+ // Windows doesn't do overcommit (committed virtual memory costs swap), so
+ // programs can't reliably map such large amounts of virtual memory.
+ // TODO(etienneb): Find a way to support coverage of larger executable
+static const uptr kTrEventArrayMaxSize =
+ (SANITIZER_WORDSIZE == 32 || SANITIZER_WINDOWS) ? 1 << 22 : 1 << 30;
u32 *tr_event_array;
uptr tr_event_array_size;
u32 *tr_event_pointer;
@@ -415,8 +419,7 @@ void CoverageData::Add(uptr pc, u32 *guard) {
uptr idx = -guard_value - 1;
if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
return; // May happen after fork when pc_array_index becomes 0.
- CHECK_LT(idx * sizeof(uptr),
- atomic_load(&pc_array_size, memory_order_acquire));
+ CHECK_LT(idx, atomic_load(&pc_array_size, memory_order_acquire));
uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
pc_array[idx] = BundlePcAndCounter(pc, counter);
}
@@ -940,7 +943,8 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
if (static_cast<s32>(
__sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0)
- __sanitizer_cov(guard);
+ coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
+ guard);
}
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
@@ -954,9 +958,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
coverage_data.DumpAll();
-#if SANITIZER_LINUX
__sanitizer_dump_trace_pc_guard_coverage();
-#endif
}
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
@@ -1018,27 +1020,16 @@ SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
}
+
// Default empty implementations (weak). Users should redefine them.
-#if !SANITIZER_WINDOWS // weak does not work on Windows.
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp1() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp2() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp4() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp8() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_switch() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_div4() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_div8() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_gep() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_pc_indir() {}
-#endif // !SANITIZER_WINDOWS
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
index df6d10f81762..73c36082bc67 100644
--- a/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
@@ -156,14 +156,13 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
return __sancov::SanitizerDumpCoverage(pcs, len);
}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
-__sanitizer_cov_trace_pc_guard(u32* guard) {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) {
if (!*guard) return;
__sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
-__sanitizer_cov_trace_pc_guard_init(u32* start, u32* end) {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
+ u32* start, u32* end) {
if (start == end || *start) return;
__sancov::pc_guard_controller.InitTracePcGuard(start, end);
}
diff --git a/lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cc b/lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cc
new file mode 100644
index 000000000000..d5e459f2c020
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cc
@@ -0,0 +1,21 @@
+//===-- sanitizer_coverage_win_dll_thunk.cc -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_win_dll_thunk.h"
+// Sanitizer Coverage interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DLL_THUNK
diff --git a/lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cc b/lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cc
new file mode 100644
index 000000000000..988a2065ffa9
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cc
@@ -0,0 +1,21 @@
+//===-- sanitizer_coverage_win_dynamic_runtime_thunk.cc -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Sanitizer Coverage, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from sanitizer coverage.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
diff --git a/lib/sanitizer_common/sanitizer_coverage_win_sections.cc b/lib/sanitizer_common/sanitizer_coverage_win_sections.cc
new file mode 100644
index 000000000000..4b0bbf1ed1e5
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_coverage_win_sections.cc
@@ -0,0 +1,22 @@
+//===-- sanitizer_coverage_win_sections.cc --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines delimiters for Sanitizer Coverage's section.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+#include <stdint.h>
+#pragma section(".SCOV$A", read, write) // NOLINT
+#pragma section(".SCOV$Z", read, write) // NOLINT
+extern "C" {
+__declspec(allocate(".SCOV$A")) uint32_t __start___sancov_guards = 0;
+__declspec(allocate(".SCOV$Z")) uint32_t __stop___sancov_guards = 0;
+}
+#endif // SANITIZER_WINDOWS
diff --git a/lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cc b/lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cc
new file mode 100644
index 000000000000..0926f460b3cd
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cc
@@ -0,0 +1,24 @@
+//===-- sanitizer_coverage_win_weak_interception.cc -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Sanitizer Coverage when it implemented as a
+// shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_win_weak_interception.h"
+#include "sanitizer_interface_internal.h"
+#include "sancov_flags.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DYNAMIC
diff --git a/lib/sanitizer_common/sanitizer_flags.inc b/lib/sanitizer_common/sanitizer_flags.inc
index d7fa34a58081..40f8b6204cda 100644
--- a/lib/sanitizer_common/sanitizer_flags.inc
+++ b/lib/sanitizer_common/sanitizer_flags.inc
@@ -62,7 +62,7 @@ COMMON_FLAG(
COMMON_FLAG(
int, verbosity, 0,
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
-COMMON_FLAG(bool, detect_leaks, true, "Enable memory leak detection.")
+COMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, "Enable memory leak detection.")
COMMON_FLAG(
bool, leak_check_at_exit, true,
"Invoke leak checking in an atexit handler. Has no effect if "
@@ -79,7 +79,9 @@ COMMON_FLAG(int, print_module_map, 0,
"exits, 2 = print after each report.")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
COMMON_FLAG(bool, handle_segv, true,
- "If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
+ "If set, registers the tool's custom SIGSEGV handler.")
+COMMON_FLAG(bool, handle_sigbus, true,
+ "If set, registers the tool's custom SIGBUS handler.")
COMMON_FLAG(bool, handle_abort, false,
"If set, registers the tool's custom SIGABRT handler.")
COMMON_FLAG(bool, handle_sigill, false,
@@ -190,6 +192,9 @@ COMMON_FLAG(bool, intercept_strstr, true,
COMMON_FLAG(bool, intercept_strspn, true,
"If set, uses custom wrappers for strspn and strcspn function "
"to find more errors.")
+COMMON_FLAG(bool, intercept_strtok, true,
+ "If set, uses a custom wrapper for the strtok function "
+ "to find more errors.")
COMMON_FLAG(bool, intercept_strpbrk, true,
"If set, uses custom wrappers for strpbrk function "
"to find more errors.")
diff --git a/lib/sanitizer_common/sanitizer_interface_internal.h b/lib/sanitizer_common/sanitizer_interface_internal.h
index 174d5e92ba44..b28d8f08e7a3 100644
--- a/lib/sanitizer_common/sanitizer_interface_internal.h
+++ b/lib/sanitizer_common/sanitizer_interface_internal.h
@@ -69,6 +69,32 @@ extern "C" {
int __sanitizer_get_module_and_offset_for_pc(
__sanitizer::uptr pc, char *module_path,
__sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
- } // extern "C"
+
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp1();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp2();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_switch();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_div4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_div8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_gep();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_indir();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
+ __sanitizer::u32*);
+} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h
index 5338f79423cc..ea5022e31bc3 100644
--- a/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -21,8 +21,11 @@
// Only use SANITIZER_*ATTRIBUTE* before the function return type!
#if SANITIZER_WINDOWS
+#if SANITIZER_IMPORT_INTERFACE
+# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllimport)
+#else
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
-// FIXME find out what we need on Windows, if anything.
+#endif
# define SANITIZER_WEAK_ATTRIBUTE
#elif SANITIZER_GO
# define SANITIZER_INTERFACE_ATTRIBUTE
@@ -32,11 +35,46 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-#if (SANITIZER_LINUX || SANITIZER_MAC || SANITIZER_WINDOWS) && !SANITIZER_GO
+//--------------------------- WEAK FUNCTIONS ---------------------------------//
+// When working with weak functions, to simplify the code and make it more
+// portable, when possible define a default implementation using this macro:
+//
+// SANITIZER_INTERFACE_WEAK_DEF(<return_type>, <name>, <parameter list>)
+//
+// For example:
+// SANITIZER_INTERFACE_WEAK_DEF(bool, compare, int a, int b) { return a > b; }
+//
+#if SANITIZER_WINDOWS
+#include "sanitizer_win_defs.h"
+# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
+ WIN_WEAK_EXPORT_DEF(ReturnType, Name, __VA_ARGS__)
+#else
+# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE \
+ ReturnType Name(__VA_ARGS__)
+#endif
+
+// SANITIZER_SUPPORTS_WEAK_HOOKS means that we support real weak functions that
+// will evaluate to a null pointer when not defined.
+#if (SANITIZER_LINUX || SANITIZER_MAC) && !SANITIZER_GO
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif
+// For some weak hooks that will be called very often and we want to avoid the
+// overhead of executing the default implementation when it is not necessary,
+// we can use the flag SANITIZER_SUPPORTS_WEAK_HOOKS to only define the default
+// implementation for platforms that doesn't support weak symbols. For example:
+//
+// #if !SANITIZER_SUPPORT_WEAK_HOOKS
+// SANITIZER_INTERFACE_WEAK_DEF(bool, compare_hook, int a, int b) {
+// return a > b;
+// }
+// #endif
+//
+// And then use it as: if (compare_hook) compare_hook(a, b);
+//----------------------------------------------------------------------------//
+
// We can use .preinit_array section on Linux to call sanitizer initialization
// functions very early in the process startup (unless PIC macro is defined).
@@ -289,7 +327,12 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
enum LinkerInitialized { LINKER_INITIALIZED = 0 };
#if !defined(_MSC_VER) || defined(__clang__)
+#if SANITIZER_S390_31
+#define GET_CALLER_PC() \
+ (__sanitizer::uptr) __builtin_extract_return_addr(__builtin_return_address(0))
+#else
#define GET_CALLER_PC() (__sanitizer::uptr) __builtin_return_address(0)
+#endif
#define GET_CURRENT_FRAME() (__sanitizer::uptr) __builtin_frame_address(0)
inline void Trap() {
__builtin_trap();
diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc
index 7328a5c0ac18..0b5473d95336 100644
--- a/lib/sanitizer_common/sanitizer_linux.cc
+++ b/lib/sanitizer_common/sanitizer_linux.cc
@@ -77,6 +77,20 @@ extern char **environ; // provided by crt1
#include <sys/signal.h>
#endif
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+
+#if SANITIZER_LINUX && __GLIBC_PREREQ(2, 16)
+# define SANITIZER_USE_GETAUXVAL 1
+#else
+# define SANITIZER_USE_GETAUXVAL 0
+#endif
+
+#if SANITIZER_USE_GETAUXVAL
+#include <sys/auxv.h>
+#endif
+
#if SANITIZER_LINUX
// <linux/time.h>
struct kernel_timeval {
@@ -805,6 +819,8 @@ uptr GetPageSize() {
return 4096;
#elif SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
return EXEC_PAGESIZE;
+#elif SANITIZER_USE_GETAUXVAL
+ return getauxval(AT_PAGESZ);
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
#endif
@@ -1097,36 +1113,50 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
-/* Stack frame offsets. */
-#if _CALL_ELF != 2
-#define FRAME_MIN_SIZE 112
-#define FRAME_TOC_SAVE 40
+// Stack frame structure.
+#if SANITIZER_PPC64V1
+// Back chain == 0 (SP + 112)
+// Frame (112 bytes):
+// Parameter save area (SP + 48), 8 doublewords
+// TOC save area (SP + 40)
+// Link editor doubleword (SP + 32)
+// Compiler doubleword (SP + 24)
+// LR save area (SP + 16)
+// CR save area (SP + 8)
+// Back chain (SP + 0)
+# define FRAME_SIZE 112
+# define FRAME_TOC_SAVE_OFFSET 40
+#elif SANITIZER_PPC64V2
+// Back chain == 0 (SP + 32)
+// Frame (32 bytes):
+// TOC save area (SP + 24)
+// LR save area (SP + 16)
+// CR save area (SP + 8)
+// Back chain (SP + 0)
+# define FRAME_SIZE 32
+# define FRAME_TOC_SAVE_OFFSET 24
#else
-#define FRAME_MIN_SIZE 32
-#define FRAME_TOC_SAVE 24
+# error "Unsupported PPC64 ABI"
#endif
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
- child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
- ((unsigned long long *)child_stack)[0] = (uptr)fn;
- ((unsigned long long *)child_stack)[1] = (uptr)arg;
register int (*__fn)(void *) __asm__("r3") = fn;
register void *__cstack __asm__("r4") = child_stack;
register int __flags __asm__("r5") = flags;
- register void * __arg __asm__("r6") = arg;
- register int * __ptidptr __asm__("r7") = parent_tidptr;
- register void * __newtls __asm__("r8") = newtls;
- register int * __ctidptr __asm__("r9") = child_tidptr;
+ register void *__arg __asm__("r6") = arg;
+ register int *__ptidptr __asm__("r7") = parent_tidptr;
+ register void *__newtls __asm__("r8") = newtls;
+ register int *__ctidptr __asm__("r9") = child_tidptr;
__asm__ __volatile__(
- /* fn, arg, child_stack are saved acrVoss the syscall */
+ /* fn and arg are saved across the syscall */
"mr 28, %5\n\t"
- "mr 29, %6\n\t"
"mr 27, %8\n\t"
/* syscall
+ r0 == __NR_clone
r3 == flags
r4 == child_stack
r5 == parent_tidptr
@@ -1144,15 +1174,21 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
"crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
"bne- cr1, 1f\n\t"
+ /* Set up stack frame */
+ "li 29, 0\n\t"
+ "stdu 29, -8(1)\n\t"
+ "stdu 1, -%12(1)\n\t"
/* Do the function call */
"std 2, %13(1)\n\t"
-#if _CALL_ELF != 2
+#if SANITIZER_PPC64V1
"ld 0, 0(28)\n\t"
"ld 2, 8(28)\n\t"
"mtctr 0\n\t"
-#else
+#elif SANITIZER_PPC64V2
"mr 12, 28\n\t"
"mtctr 12\n\t"
+#else
+# error "Unsupported PPC64 ABI"
#endif
"mr 3, 27\n\t"
"bctrl\n\t"
@@ -1166,13 +1202,151 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
"1:\n\t"
"mr %0, 3\n\t"
: "=r" (res)
- : "0" (-1), "i" (EINVAL),
- "i" (__NR_clone), "i" (__NR_exit),
- "r" (__fn), "r" (__cstack), "r" (__flags),
- "r" (__arg), "r" (__ptidptr), "r" (__newtls),
- "r" (__ctidptr), "i" (FRAME_MIN_SIZE), "i" (FRAME_TOC_SAVE)
- : "cr0", "cr1", "memory", "ctr",
- "r0", "r29", "r27", "r28");
+ : "0" (-1),
+ "i" (EINVAL),
+ "i" (__NR_clone),
+ "i" (__NR_exit),
+ "r" (__fn),
+ "r" (__cstack),
+ "r" (__flags),
+ "r" (__arg),
+ "r" (__ptidptr),
+ "r" (__newtls),
+ "r" (__ctidptr),
+ "i" (FRAME_SIZE),
+ "i" (FRAME_TOC_SAVE_OFFSET)
+ : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
+ return res;
+}
+#elif defined(__i386__) && SANITIZER_LINUX
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ int res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 7 * sizeof(unsigned int);
+ ((unsigned int *)child_stack)[0] = (uptr)flags;
+ ((unsigned int *)child_stack)[1] = (uptr)0;
+ ((unsigned int *)child_stack)[2] = (uptr)fn;
+ ((unsigned int *)child_stack)[3] = (uptr)arg;
+ __asm__ __volatile__(
+ /* %eax = syscall(%eax = SYSCALL(clone),
+ * %ebx = flags,
+ * %ecx = child_stack,
+ * %edx = parent_tidptr,
+ * %esi = new_tls,
+ * %edi = child_tidptr)
+ */
+
+ /* Obtain flags */
+ "movl (%%ecx), %%ebx\n"
+ /* Do the system call */
+ "pushl %%ebx\n"
+ "pushl %%esi\n"
+ "pushl %%edi\n"
+ /* Remember the flag value. */
+ "movl %%ebx, (%%ecx)\n"
+ "int $0x80\n"
+ "popl %%edi\n"
+ "popl %%esi\n"
+ "popl %%ebx\n"
+
+ /* if (%eax != 0)
+ * return;
+ */
+
+ "test %%eax,%%eax\n"
+ "jnz 1f\n"
+
+ /* terminate the stack frame */
+ "xorl %%ebp,%%ebp\n"
+ /* Call FN. */
+ "call *%%ebx\n"
+#ifdef PIC
+ "call here\n"
+ "here:\n"
+ "popl %%ebx\n"
+ "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n"
+#endif
+ /* Call exit */
+ "movl %%eax, %%ebx\n"
+ "movl %2, %%eax\n"
+ "int $0x80\n"
+ "1:\n"
+ : "=a" (res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
+ "c"(child_stack),
+ "d"(parent_tidptr),
+ "S"(newtls),
+ "D"(child_tidptr)
+ : "memory");
+ return res;
+}
+#elif defined(__arm__) && SANITIZER_LINUX
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ unsigned int res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned int);
+ ((unsigned int *)child_stack)[0] = (uptr)fn;
+ ((unsigned int *)child_stack)[1] = (uptr)arg;
+ register int r0 __asm__("r0") = flags;
+ register void *r1 __asm__("r1") = child_stack;
+ register int *r2 __asm__("r2") = parent_tidptr;
+ register void *r3 __asm__("r3") = newtls;
+ register int *r4 __asm__("r4") = child_tidptr;
+ register int r7 __asm__("r7") = __NR_clone;
+
+#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__)
+# define ARCH_HAS_BX
+#endif
+#if __ARM_ARCH > 4
+# define ARCH_HAS_BLX
+#endif
+
+#ifdef ARCH_HAS_BX
+# ifdef ARCH_HAS_BLX
+# define BLX(R) "blx " #R "\n"
+# else
+# define BLX(R) "mov lr, pc; bx " #R "\n"
+# endif
+#else
+# define BLX(R) "mov lr, pc; mov pc," #R "\n"
+#endif
+
+ __asm__ __volatile__(
+ /* %r0 = syscall(%r7 = SYSCALL(clone),
+ * %r0 = flags,
+ * %r1 = child_stack,
+ * %r2 = parent_tidptr,
+ * %r3 = new_tls,
+ * %r4 = child_tidptr)
+ */
+
+ /* Do the system call */
+ "swi 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp r0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldr r0, [sp, #4]\n"
+ "ldr ip, [sp], #8\n"
+ BLX(ip)
+ /* Call _exit(%r0). */
+ "mov r7, %7\n"
+ "swi 0x0\n"
+ "1:\n"
+ "mov %0, r0\n"
+ : "=r"(res)
+ : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7),
+ "i"(__NR_exit)
+ : "memory");
return res;
}
#endif // defined(__x86_64__) && SANITIZER_LINUX
@@ -1227,7 +1401,9 @@ bool IsHandledDeadlySignal(int signum) {
return true;
if (common_flags()->handle_sigfpe && signum == SIGFPE)
return true;
- return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
+ if (common_flags()->handle_segv && signum == SIGSEGV)
+ return true;
+ return common_flags()->handle_sigbus && signum == SIGBUS;
}
#if !SANITIZER_GO
@@ -1395,6 +1571,21 @@ void MaybeReexec() {
void PrintModuleMap() { }
+void CheckNoDeepBind(const char *filename, int flag) {
+#ifdef RTLD_DEEPBIND
+ if (flag & RTLD_DEEPBIND) {
+ Report(
+ "You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag"
+ " which is incompatibe with sanitizer runtime "
+ "(see https://github.com/google/sanitizers/issues/611 for details"
+ "). If you want to run %s library under sanitizers please remove "
+ "RTLD_DEEPBIND from dlopen flags.\n",
+ filename, filename);
+ Die();
+ }
+#endif
+}
+
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
UNREACHABLE("FindAvailableMemoryRange is not available");
return 0;
diff --git a/lib/sanitizer_common/sanitizer_linux.h b/lib/sanitizer_common/sanitizer_linux.h
index d4d0f47eed02..14047b4803f8 100644
--- a/lib/sanitizer_common/sanitizer_linux.h
+++ b/lib/sanitizer_common/sanitizer_linux.h
@@ -48,7 +48,8 @@ int internal_sigaction_syscall(int signum, const void *act, void *oldact);
#endif
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
- || defined(__powerpc64__) || defined(__s390__)
+ || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \
+ || defined(__arm__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
diff --git a/lib/sanitizer_common/sanitizer_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
index f99f0b5948de..6fde671f882d 100644
--- a/lib/sanitizer_common/sanitizer_linux_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -183,8 +183,8 @@ void InitTlsSize() { }
#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \
- || defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__)) \
- && SANITIZER_LINUX && !SANITIZER_ANDROID
+ || defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) \
+ || defined(__arm__)) && SANITIZER_LINUX && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t kThreadDescriptorSize;
@@ -192,14 +192,14 @@ uptr ThreadDescriptorSize() {
uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed);
if (val)
return val;
-#if defined(__x86_64__) || defined(__i386__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
#ifdef _CS_GNU_LIBC_VERSION
char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
if (len < sizeof(buf) && internal_strncmp(buf, "glibc 2.", 8) == 0) {
char *end;
int minor = internal_simple_strtoll(buf + 8, &end, 10);
- if (end != buf + 8 && (*end == '\0' || *end == '.')) {
+ if (end != buf + 8 && (*end == '\0' || *end == '.' || *end == '-')) {
int patch = 0;
if (*end == '.')
// strtoll will return 0 if no valid conversion could be performed
@@ -208,6 +208,9 @@ uptr ThreadDescriptorSize() {
/* sizeof(struct pthread) values from various glibc versions. */
if (SANITIZER_X32)
val = 1728; // Assume only one particular version for x32.
+ // For ARM sizeof(struct pthread) changed in Glibc 2.23.
+ else if (SANITIZER_ARM)
+ val = minor <= 22 ? 1120 : 1216;
else if (minor <= 3)
val = FIRST_32_SECOND_64(1104, 1696);
else if (minor == 4)
@@ -270,9 +273,7 @@ static uptr TlsPreTcbSize() {
# endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
- (ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1);
- InitTlsSize();
- g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1);
+ RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
return kTlsPreTcbSize;
}
#endif
@@ -295,7 +296,7 @@ uptr ThreadSelf() {
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
-# elif defined(__aarch64__)
+# elif defined(__aarch64__) || defined(__arm__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
# elif defined(__s390__)
@@ -344,7 +345,8 @@ static void GetTls(uptr *addr, uptr *size) {
*size = GetTlsSize();
*addr -= *size;
*addr += ThreadDescriptorSize();
-# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__)
+# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
+ || defined(__arm__)
*addr = ThreadSelf();
*size = GetTlsSize();
# else
@@ -379,6 +381,8 @@ uptr GetTlsSize() {
uptr addr, size;
GetTls(&addr, &size);
return size;
+#elif defined(__mips__) || defined(__powerpc64__)
+ return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
#else
return g_tls_size;
#endif
diff --git a/lib/sanitizer_common/sanitizer_linux_s390.cc b/lib/sanitizer_common/sanitizer_linux_s390.cc
index 053fd174b2bf..c2b03b27e66c 100644
--- a/lib/sanitizer_common/sanitizer_linux_s390.cc
+++ b/lib/sanitizer_common/sanitizer_linux_s390.cc
@@ -136,6 +136,18 @@ static bool FixedCVE_2016_2143() {
if (ptr[0] == '.')
patch = internal_simple_strtoll(ptr+1, &ptr, 10);
if (major < 3) {
+ if (major == 2 && minor == 6 && patch == 32 && ptr[0] == '-' &&
+ internal_strstr(ptr, ".el6")) {
+ // Check RHEL6
+ int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r1 >= 657) // 2.6.32-657.el6 or later
+ return true;
+ if (r1 == 642 && ptr[0] == '.') {
+ int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
+ return true;
+ }
+ }
// <3.0 is bad.
return false;
} else if (major == 3) {
@@ -145,6 +157,18 @@ static bool FixedCVE_2016_2143() {
// 3.12.58+ is OK.
if (minor == 12 && patch >= 58)
return true;
+ if (minor == 10 && patch == 0 && ptr[0] == '-' &&
+ internal_strstr(ptr, ".el7")) {
+ // Check RHEL7
+ int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r1 >= 426) // 3.10.0-426.el7 or later
+ return true;
+ if (r1 == 327 && ptr[0] == '.') {
+ int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
+ return true;
+ }
+ }
// Otherwise, bad.
return false;
} else if (major == 4) {
diff --git a/lib/sanitizer_common/sanitizer_list.h b/lib/sanitizer_common/sanitizer_list.h
index c78cb4cdf8e0..598ce51d848b 100644
--- a/lib/sanitizer_common/sanitizer_list.h
+++ b/lib/sanitizer_common/sanitizer_list.h
@@ -70,6 +70,17 @@ struct IntrusiveList {
size_--;
}
+ void extract(Item *prev, Item *x) {
+ CHECK(!empty());
+ CHECK_NE(prev, nullptr);
+ CHECK_NE(x, nullptr);
+ CHECK_EQ(prev->next, x);
+ prev->next = x->next;
+ if (last_ == x)
+ last_ = prev;
+ size_--;
+ }
+
Item *front() { return first_; }
const Item *front() const { return first_; }
Item *back() { return last_; }
diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc
index b4f8ab5e3a9e..34af4e91876e 100644
--- a/lib/sanitizer_common/sanitizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_mac.cc
@@ -93,20 +93,22 @@ namespace __sanitizer {
#include "sanitizer_syscall_generic.inc"
-// Direct syscalls, don't call libmalloc hooks.
+// Direct syscalls, don't call libmalloc hooks (but not available on 10.6).
extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes,
- off_t off);
-extern "C" int __munmap(void *, size_t);
+ off_t off) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
// ---------------------- sanitizer_libc.h
uptr internal_mmap(void *addr, size_t length, int prot, int flags,
int fd, u64 offset) {
if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL);
- return (uptr)__mmap(addr, length, prot, flags, fd, offset);
+ if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset);
+ return (uptr)mmap(addr, length, prot, flags, fd, offset);
}
uptr internal_munmap(void *addr, uptr length) {
- return __munmap(addr, length);
+ if (&__munmap) return __munmap(addr, length);
+ return munmap(addr, length);
}
int internal_mprotect(void *addr, uptr length, int prot) {
@@ -192,17 +194,19 @@ uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
return sigprocmask(how, set, oldset);
}
-// Doesn't call pthread_atfork() handlers.
-extern "C" pid_t __fork(void);
+// Doesn't call pthread_atfork() handlers (but not available on 10.6).
+extern "C" pid_t __fork(void) SANITIZER_WEAK_ATTRIBUTE;
int internal_fork() {
- return __fork();
+ if (&__fork)
+ return __fork();
+ return fork();
}
int internal_forkpty(int *amaster) {
int master, slave;
if (openpty(&master, &slave, nullptr, nullptr, nullptr) == -1) return -1;
- int pid = __fork();
+ int pid = internal_fork();
if (pid == -1) {
close(master);
close(slave);
@@ -344,20 +348,16 @@ BlockingMutex::BlockingMutex() {
void BlockingMutex::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
CHECK_EQ(OS_SPINLOCK_INIT, 0);
- CHECK_NE(owner_, (uptr)pthread_self());
+ CHECK_EQ(owner_, 0);
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
- CHECK(!owner_);
- owner_ = (uptr)pthread_self();
}
void BlockingMutex::Unlock() {
- CHECK(owner_ == (uptr)pthread_self());
- owner_ = 0;
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
void BlockingMutex::CheckLocked() {
- CHECK_EQ((uptr)pthread_self(), owner_);
+ CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
}
u64 NanoTime() {
@@ -402,7 +402,11 @@ bool IsHandledDeadlySignal(int signum) {
return true;
if (common_flags()->handle_sigill && signum == SIGILL)
return true;
- return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
+ if (common_flags()->handle_sigfpe && signum == SIGFPE)
+ return true;
+ if (common_flags()->handle_segv && signum == SIGSEGV)
+ return true;
+ return common_flags()->handle_sigbus && signum == SIGBUS;
}
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
@@ -884,6 +888,10 @@ void PrintModuleMap() {
Printf("End of module map.\n");
}
+void CheckNoDeepBind(const char *filename, int flag) {
+ // Do nothing.
+}
+
} // namespace __sanitizer
#endif // SANITIZER_MAC
diff --git a/lib/sanitizer_common/sanitizer_malloc_mac.inc b/lib/sanitizer_common/sanitizer_malloc_mac.inc
index 6fbee07c16cc..5699c59043e9 100644
--- a/lib/sanitizer_common/sanitizer_malloc_mac.inc
+++ b/lib/sanitizer_common/sanitizer_malloc_mac.inc
@@ -59,6 +59,9 @@ INTERCEPTOR(void, malloc_destroy_zone, malloc_zone_t *zone) {
uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);
mprotect(zone, allocated_size, PROT_READ | PROT_WRITE);
}
+ if (zone->zone_name) {
+ COMMON_MALLOC_FREE((void *)zone->zone_name);
+ }
COMMON_MALLOC_FREE(zone);
}
diff --git a/lib/sanitizer_common/sanitizer_mutex.h b/lib/sanitizer_common/sanitizer_mutex.h
index d06fc45ff931..1759bf13f689 100644
--- a/lib/sanitizer_common/sanitizer_mutex.h
+++ b/lib/sanitizer_common/sanitizer_mutex.h
@@ -83,6 +83,14 @@ class BlockingMutex {
BlockingMutex();
void Lock();
void Unlock();
+
+ // This function does not guarantee an explicit check that the calling thread
+ // is the thread which owns the mutex. This behavior, while more strictly
+ // correct, causes problems in cases like StopTheWorld, where a parent thread
+ // owns the mutex but a child checks that it is locked. Rather than
+ // maintaining complex state to work around those situations, the check only
+ // checks that the mutex is owned, and assumes callers to be generally
+ // well-behaved.
void CheckLocked();
private:
uptr opaque_storage_[10];
diff --git a/lib/sanitizer_common/sanitizer_platform.h b/lib/sanitizer_common/sanitizer_platform.h
index d9a8e8df1573..1a6410878579 100644
--- a/lib/sanitizer_common/sanitizer_platform.h
+++ b/lib/sanitizer_common/sanitizer_platform.h
@@ -162,6 +162,12 @@
# define SANITIZER_PPC64V2 0
#endif
+#if defined(__arm__)
+# define SANITIZER_ARM 1
+#else
+# define SANITIZER_ARM 0
+#endif
+
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
// does not work well and we need to fallback to SizeClassAllocator32.
diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h
index 62875d11a1de..a583e989c315 100644
--- a/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -17,9 +17,11 @@
#include "sanitizer_internal_defs.h"
#if !SANITIZER_WINDOWS
+# define SI_WINDOWS 0
# define SI_NOT_WINDOWS 1
# include "sanitizer_platform_limits_posix.h"
#else
+# define SI_WINDOWS 1
# define SI_NOT_WINDOWS 0
#endif
@@ -72,6 +74,7 @@
#define SANITIZER_INTERCEPT_STRCMP 1
#define SANITIZER_INTERCEPT_STRSTR 1
#define SANITIZER_INTERCEPT_STRCASESTR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRTOK 1
#define SANITIZER_INTERCEPT_STRCHR 1
#define SANITIZER_INTERCEPT_STRCHRNUL SI_UNIX_NOT_MAC
#define SANITIZER_INTERCEPT_STRRCHR 1
@@ -83,8 +86,16 @@
#define SANITIZER_INTERCEPT_MEMMOVE 1
#define SANITIZER_INTERCEPT_MEMCPY 1
#define SANITIZER_INTERCEPT_MEMCMP 1
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070
+# define SI_MAC_DEPLOYMENT_BELOW_10_7 1
+#else
+# define SI_MAC_DEPLOYMENT_BELOW_10_7 0
+#endif
+// memmem on Darwin doesn't exist on 10.6
// FIXME: enable memmem on Windows.
-#define SANITIZER_INTERCEPT_MEMMEM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MEMMEM \
+ SI_NOT_WINDOWS && !SI_MAC_DEPLOYMENT_BELOW_10_7
#define SANITIZER_INTERCEPT_MEMCHR 1
#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX
@@ -93,6 +104,9 @@
#define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FREAD SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FWRITE SI_NOT_WINDOWS
+
#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
@@ -302,7 +316,7 @@
#define SANITIZER_INTERCEPT_CTERMID SI_LINUX || SI_MAC || SI_FREEBSD
#define SANITIZER_INTERCEPT_CTERMID_R SI_MAC || SI_FREEBSD
-#define SANITIZER_INTERCEPTOR_HOOKS SI_LINUX
+#define SANITIZER_INTERCEPTOR_HOOKS SI_LINUX || SI_MAC || SI_WINDOWS
#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
@@ -316,9 +330,14 @@
#define SANITIZER_INTERCEPT_UTMP SI_NOT_WINDOWS && !SI_MAC && !SI_FREEBSD
#define SANITIZER_INTERCEPT_UTMPX SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD
+#define SANITIZER_INTERCEPT_GETLOADAVG \
+ SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD
+
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (!SI_FREEBSD && !SI_MAC)
#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC)
#define SANITIZER_INTERCEPT_PVALLOC (!SI_FREEBSD && !SI_MAC)
#define SANITIZER_INTERCEPT_CFREE (!SI_FREEBSD && !SI_MAC)
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
+#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC)
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc
index c70d5a40cb46..9916f4d38538 100644
--- a/lib/sanitizer_common/sanitizer_posix.cc
+++ b/lib/sanitizer_common/sanitizer_posix.cc
@@ -358,6 +358,22 @@ SignalContext SignalContext::Create(void *siginfo, void *context) {
return SignalContext(context, addr, pc, sp, bp, is_memory_access, write_flag);
}
+const char *DescribeSignalOrException(int signo) {
+ switch (signo) {
+ case SIGFPE:
+ return "FPE";
+ case SIGILL:
+ return "ILL";
+ case SIGABRT:
+ return "ABRT";
+ case SIGSEGV:
+ return "SEGV";
+ case SIGBUS:
+ return "BUS";
+ }
+ return "UNKNOWN SIGNAL";
+}
+
} // namespace __sanitizer
#endif // SANITIZER_POSIX
diff --git a/lib/sanitizer_common/sanitizer_posix.h b/lib/sanitizer_common/sanitizer_posix.h
index 7f862cd9e393..e7d37cbf0882 100644
--- a/lib/sanitizer_common/sanitizer_posix.h
+++ b/lib/sanitizer_common/sanitizer_posix.h
@@ -87,6 +87,9 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum);
uptr internal_execve(const char *filename, char *const argv[],
char *const envp[]);
+
+bool IsStateDetached(int state);
+
} // namespace __sanitizer
#endif // SANITIZER_POSIX_H
diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
index dd62140b5e07..8d688f3778b5 100644
--- a/lib/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -418,6 +418,10 @@ int WaitForProcess(pid_t pid) {
return process_status;
}
+bool IsStateDetached(int state) {
+ return state == PTHREAD_CREATE_DETACHED;
+}
+
} // namespace __sanitizer
#endif // SANITIZER_POSIX
diff --git a/lib/sanitizer_common/sanitizer_printf.cc b/lib/sanitizer_common/sanitizer_printf.cc
index c8317be6043f..99b7ff1b55cf 100644
--- a/lib/sanitizer_common/sanitizer_printf.cc
+++ b/lib/sanitizer_common/sanitizer_printf.cc
@@ -214,15 +214,11 @@ void SetPrintfAndReportCallback(void (*callback)(const char *)) {
}
// Can be overriden in frontend.
-#if SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void OnPrint(const char *str) {
- (void)str;
-}
-#elif SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
-void OnPrint(const char *str);
+#if SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
+// Implementation must be defined in frontend.
+extern "C" void OnPrint(const char *str);
#else
-void OnPrint(const char *str) {
+SANITIZER_INTERFACE_WEAK_DEF(void, OnPrint, const char *str) {
(void)str;
}
#endif
diff --git a/lib/sanitizer_common/sanitizer_quarantine.h b/lib/sanitizer_common/sanitizer_quarantine.h
index 1a0d9545b7e1..db38867ced28 100644
--- a/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/lib/sanitizer_common/sanitizer_quarantine.h
@@ -31,6 +31,40 @@ struct QuarantineBatch {
uptr size;
uptr count;
void *batch[kSize];
+
+ void init(void *ptr, uptr size) {
+ count = 1;
+ batch[0] = ptr;
+ this->size = size + sizeof(QuarantineBatch); // Account for the batch size.
+ }
+
+ // The total size of quarantined nodes recorded in this batch.
+ uptr quarantined_size() const {
+ return size - sizeof(QuarantineBatch);
+ }
+
+ void push_back(void *ptr, uptr size) {
+ CHECK_LT(count, kSize);
+ batch[count++] = ptr;
+ this->size += size;
+ }
+
+ bool can_merge(const QuarantineBatch* const from) const {
+ return count + from->count <= kSize;
+ }
+
+ void merge(QuarantineBatch* const from) {
+ CHECK_LE(count + from->count, kSize);
+ CHECK_GE(size, sizeof(QuarantineBatch));
+
+ for (uptr i = 0; i < from->count; ++i)
+ batch[count + i] = from->batch[i];
+ count += from->count;
+ size += from->quarantined_size();
+
+ from->count = 0;
+ from->size = sizeof(QuarantineBatch);
+ }
};
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
@@ -69,7 +103,7 @@ class Quarantine {
if (cache_size) {
c->Enqueue(cb, ptr, size);
} else {
- // cache_size == 0 only when size == 0 (see Init).
+ // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
cb.Recycle(ptr);
}
// Check cache size anyway to accommodate for runtime cache_size change.
@@ -88,6 +122,8 @@ class Quarantine {
void PrintStats() const {
// It assumes that the world is stopped, just as the allocator's PrintStats.
+ Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
+ GetSize() >> 20, GetCacheSize() >> 10);
cache_.PrintStats();
}
@@ -108,9 +144,27 @@ class Quarantine {
uptr min_size = atomic_load(&min_size_, memory_order_relaxed);
{
SpinMutexLock l(&cache_mutex_);
+ // Go over the batches and merge partially filled ones to
+ // save some memory, otherwise batches themselves (since the memory used
+ // by them is counted against quarantine limit) can overcome the actual
+ // user's quarantined chunks, which diminishes the purpose of the
+ // quarantine.
+ uptr cache_size = cache_.Size();
+ uptr overhead_size = cache_.OverheadSize();
+ CHECK_GE(cache_size, overhead_size);
+ // Do the merge only when overhead exceeds this predefined limit (might
+ // require some tuning). It saves us merge attempt when the batch list
+ // quarantine is unlikely to contain batches suitable for merge.
+ const uptr kOverheadThresholdPercents = 100;
+ if (cache_size > overhead_size &&
+ overhead_size * (100 + kOverheadThresholdPercents) >
+ cache_size * kOverheadThresholdPercents) {
+ cache_.MergeBatches(&tmp);
+ }
+ // Extract enough chunks from the quarantine to get below the max
+ // quarantine size and leave some leeway for the newly quarantined chunks.
while (cache_.Size() > min_size) {
- QuarantineBatch *b = cache_.DequeueBatch();
- tmp.EnqueueBatch(b);
+ tmp.EnqueueBatch(cache_.DequeueBatch());
}
}
recycle_mutex_.Unlock();
@@ -145,26 +199,33 @@ class QuarantineCache {
list_.clear();
}
+ // Total memory used, including internal accounting.
uptr Size() const {
return atomic_load(&size_, memory_order_relaxed);
}
+ // Memory used for internal accounting.
+ uptr OverheadSize() const {
+ return list_.size() * sizeof(QuarantineBatch);
+ }
+
void Enqueue(Callback cb, void *ptr, uptr size) {
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
- AllocBatch(cb);
- size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
+ QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
+ CHECK(b);
+ b->init(ptr, size);
+ EnqueueBatch(b);
+ } else {
+ list_.back()->push_back(ptr, size);
+ SizeAdd(size);
}
- QuarantineBatch *b = list_.back();
- CHECK(b);
- b->batch[b->count++] = ptr;
- b->size += size;
- SizeAdd(size);
}
- void Transfer(QuarantineCache *c) {
- list_.append_back(&c->list_);
- SizeAdd(c->Size());
- atomic_store(&c->size_, 0, memory_order_relaxed);
+ void Transfer(QuarantineCache *from_cache) {
+ list_.append_back(&from_cache->list_);
+ SizeAdd(from_cache->Size());
+
+ atomic_store(&from_cache->size_, 0, memory_order_relaxed);
}
void EnqueueBatch(QuarantineBatch *b) {
@@ -181,19 +242,51 @@ class QuarantineCache {
return b;
}
+ void MergeBatches(QuarantineCache *to_deallocate) {
+ uptr extracted_size = 0;
+ QuarantineBatch *current = list_.front();
+ while (current && current->next) {
+ if (current->can_merge(current->next)) {
+ QuarantineBatch *extracted = current->next;
+ // Move all the chunks into the current batch.
+ current->merge(extracted);
+ CHECK_EQ(extracted->count, 0);
+ CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
+ // Remove the next batch from the list and account for its size.
+ list_.extract(current, extracted);
+ extracted_size += extracted->size;
+ // Add it to deallocation list.
+ to_deallocate->EnqueueBatch(extracted);
+ } else {
+ current = current->next;
+ }
+ }
+ SizeSub(extracted_size);
+ }
+
void PrintStats() const {
uptr batch_count = 0;
- uptr total_quarantine_bytes = 0;
+ uptr total_overhead_bytes = 0;
+ uptr total_bytes = 0;
uptr total_quarantine_chunks = 0;
for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
batch_count++;
- total_quarantine_bytes += (*it).size;
+ total_bytes += (*it).size;
+ total_overhead_bytes += (*it).size - (*it).quarantined_size();
total_quarantine_chunks += (*it).count;
}
- Printf("Global quarantine stats: batches: %zd; bytes: %zd; chunks: %zd "
- "(capacity: %zd chunks)\n",
- batch_count, total_quarantine_bytes, total_quarantine_chunks,
- batch_count * QuarantineBatch::kSize);
+ uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
+ int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
+ 0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
+ uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
+ int memory_overhead_percent = total_quarantined_bytes == 0 ?
+ 0 : total_overhead_bytes * 100 / total_quarantined_bytes;
+ Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
+ "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
+ "\n",
+ batch_count, total_bytes, total_quarantined_bytes,
+ total_quarantine_chunks, quarantine_chunks_capacity,
+ chunks_usage_percent, memory_overhead_percent);
}
private:
@@ -208,15 +301,6 @@ class QuarantineCache {
void SizeSub(uptr sub) {
atomic_store(&size_, Size() - sub, memory_order_relaxed);
}
-
- NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
- QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
- CHECK(b);
- b->count = 0;
- b->size = 0;
- list_.push_back(b);
- return b;
- }
};
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_stoptheworld.h b/lib/sanitizer_common/sanitizer_stoptheworld.h
index aa6f5d833a4d..41752d8f66e7 100644
--- a/lib/sanitizer_common/sanitizer_stoptheworld.h
+++ b/lib/sanitizer_common/sanitizer_stoptheworld.h
@@ -20,6 +20,12 @@
namespace __sanitizer {
typedef int SuspendedThreadID;
+enum PtraceRegistersStatus {
+ REGISTERS_UNAVAILABLE_FATAL = -1,
+ REGISTERS_UNAVAILABLE = 0,
+ REGISTERS_AVAILABLE = 1
+};
+
// Holds the list of suspended threads and provides an interface to dump their
// register contexts.
class SuspendedThreadsList {
@@ -30,7 +36,8 @@ class SuspendedThreadsList {
CHECK_LT(index, thread_ids_.size());
return thread_ids_[index];
}
- int GetRegistersAndSP(uptr index, uptr *buffer, uptr *sp) const;
+ PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const;
// The buffer in GetRegistersAndSP should be at least this big.
static uptr RegisterCount();
uptr thread_count() const { return thread_ids_.size(); }
diff --git a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
index eb4c403d3de0..6e4baeecaffd 100644
--- a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
@@ -16,7 +16,8 @@
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
defined(__aarch64__) || defined(__powerpc64__) || \
- defined(__s390__))
+ defined(__s390__) || defined(__i386__) || \
+ defined(__arm__))
#include "sanitizer_stoptheworld.h"
@@ -493,9 +494,9 @@ typedef _user_regs_struct regs_struct;
#error "Unsupported architecture"
#endif // SANITIZER_ANDROID && defined(__arm__)
-int SuspendedThreadsList::GetRegistersAndSP(uptr index,
- uptr *buffer,
- uptr *sp) const {
+PtraceRegistersStatus SuspendedThreadsList::GetRegistersAndSP(uptr index,
+ uptr *buffer,
+ uptr *sp) const {
pid_t tid = GetThreadID(index);
regs_struct regs;
int pterrno;
@@ -513,12 +514,16 @@ int SuspendedThreadsList::GetRegistersAndSP(uptr index,
if (isErr) {
VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
pterrno);
- return -1;
+ // ESRCH means that the given thread is not suspended or already dead.
+ // Therefore it's unsafe to inspect its data (e.g. walk through stack) and
+ // we should notify caller about this.
+ return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
+ : REGISTERS_UNAVAILABLE;
}
*sp = regs.REG_SP;
internal_memcpy(buffer, &regs, sizeof(regs));
- return 0;
+ return REGISTERS_AVAILABLE;
}
uptr SuspendedThreadsList::RegisterCount() {
@@ -528,4 +533,4 @@ uptr SuspendedThreadsList::RegisterCount() {
#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
// || defined(__aarch64__) || defined(__powerpc64__)
- // || defined(__s390__)
+ // || defined(__s390__) || defined(__i386__) || defined(__arm__)
diff --git a/lib/sanitizer_common/sanitizer_stoptheworld_mac.cc b/lib/sanitizer_common/sanitizer_stoptheworld_mac.cc
new file mode 100644
index 000000000000..047472a657a6
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_stoptheworld_mac.cc
@@ -0,0 +1,40 @@
+//===-- sanitizer_stoptheworld_mac.cc -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__) || \
+ defined(__mips64) || defined(__i386))
+
+#include "sanitizer_stoptheworld.h"
+
+namespace __sanitizer {
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ CHECK(0 && "unimplemented");
+}
+
+PtraceRegistersStatus SuspendedThreadsList::GetRegistersAndSP(uptr index,
+ uptr *buffer,
+ uptr *sp) const {
+ CHECK(0 && "unimplemented");
+ return REGISTERS_UNAVAILABLE_FATAL;
+}
+
+uptr SuspendedThreadsList::RegisterCount() {
+ CHECK(0 && "unimplemented");
+ return 0;
+}
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||
+ // defined(__mips64) || defined(__i386))
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
index 7c377a729638..614470a633d0 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
@@ -356,11 +356,19 @@ const char *LLVMSymbolizer::FormatAndSendCommand(bool is_data,
CHECK(module_name);
const char *is_data_str = is_data ? "DATA " : "";
if (arch == kModuleArchUnknown) {
- internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n", is_data_str,
- module_name, module_offset);
+ if (internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n", is_data_str,
+ module_name,
+ module_offset) >= static_cast<int>(kBufferSize)) {
+ Report("WARNING: Command buffer too small");
+ return nullptr;
+ }
} else {
- internal_snprintf(buffer_, kBufferSize, "%s\"%s:%s\" 0x%zx\n", is_data_str,
- module_name, ModuleArchToString(arch), module_offset);
+ if (internal_snprintf(buffer_, kBufferSize, "%s\"%s:%s\" 0x%zx\n",
+ is_data_str, module_name, ModuleArchToString(arch),
+ module_offset) >= static_cast<int>(kBufferSize)) {
+ Report("WARNING: Command buffer too small");
+ return nullptr;
+ }
}
return symbolizer_process_->SendCommand(buffer_);
}
@@ -377,7 +385,23 @@ SymbolizerProcess::SymbolizerProcess(const char *path, bool use_forkpty)
CHECK_NE(path_[0], '\0');
}
+static bool IsSameModule(const char* path) {
+ if (const char* ProcessName = GetProcessName()) {
+ if (const char* SymbolizerName = StripModuleName(path)) {
+ return !internal_strcmp(ProcessName, SymbolizerName);
+ }
+ }
+ return false;
+}
+
const char *SymbolizerProcess::SendCommand(const char *command) {
+ if (failed_to_start_)
+ return nullptr;
+ if (IsSameModule(path_)) {
+ Report("WARNING: Symbolizer was blocked from starting itself!\n");
+ failed_to_start_ = true;
+ return nullptr;
+ }
for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {
// Start or restart symbolizer if we failed to send command to it.
if (const char *res = SendCommandImpl(command))
@@ -426,6 +450,11 @@ bool SymbolizerProcess::ReadFromSymbolizer(char *buffer, uptr max_length) {
read_len += just_read;
if (ReachedEndOfOutput(buffer, read_len))
break;
+ if (read_len + 1 == max_length) {
+ Report("WARNING: Symbolizer buffer too small");
+ read_len = 0;
+ break;
+ }
}
buffer[read_len] = '\0';
return true;
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
index f50d8b1840ab..d3c77b510d35 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
@@ -424,7 +424,6 @@ class InternalSymbolizer : public SymbolizerTool {
InternalSymbolizer() { }
static const int kBufferSize = 16 * 1024;
- static const int kMaxDemangledNameSize = 1024;
char buffer_[kBufferSize];
};
#else // SANITIZER_SUPPORTS_WEAK_HOOKS
diff --git a/lib/sanitizer_common/sanitizer_thread_registry.cc b/lib/sanitizer_common/sanitizer_thread_registry.cc
index c2b75e652ce9..c5b2e0946282 100644
--- a/lib/sanitizer_common/sanitizer_thread_registry.cc
+++ b/lib/sanitizer_common/sanitizer_thread_registry.cc
@@ -19,7 +19,7 @@ namespace __sanitizer {
ThreadContextBase::ThreadContextBase(u32 tid)
: tid(tid), unique_id(0), reuse_count(), os_id(0), user_id(0),
status(ThreadStatusInvalid),
- detached(false), parent_tid(0), next(0) {
+ detached(false), workerthread(false), parent_tid(0), next(0) {
name[0] = '\0';
}
@@ -59,9 +59,10 @@ void ThreadContextBase::SetFinished() {
OnFinished();
}
-void ThreadContextBase::SetStarted(uptr _os_id, void *arg) {
+void ThreadContextBase::SetStarted(uptr _os_id, bool _workerthread, void *arg) {
status = ThreadStatusRunning;
os_id = _os_id;
+ workerthread = _workerthread;
OnStarted(arg);
}
@@ -266,14 +267,15 @@ void ThreadRegistry::FinishThread(u32 tid) {
}
}
-void ThreadRegistry::StartThread(u32 tid, uptr os_id, void *arg) {
+void ThreadRegistry::StartThread(u32 tid, uptr os_id, bool workerthread,
+ void *arg) {
BlockingMutexLock l(&mtx_);
running_threads_++;
CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_EQ(ThreadStatusCreated, tctx->status);
- tctx->SetStarted(os_id, arg);
+ tctx->SetStarted(os_id, workerthread, arg);
}
void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
diff --git a/lib/sanitizer_common/sanitizer_thread_registry.h b/lib/sanitizer_common/sanitizer_thread_registry.h
index a27bbb376e85..17b1d5d90962 100644
--- a/lib/sanitizer_common/sanitizer_thread_registry.h
+++ b/lib/sanitizer_common/sanitizer_thread_registry.h
@@ -45,6 +45,7 @@ class ThreadContextBase {
ThreadStatus status;
bool detached;
+ bool workerthread;
u32 parent_tid;
ThreadContextBase *next; // For storing thread contexts in a list.
@@ -54,7 +55,7 @@ class ThreadContextBase {
void SetDead();
void SetJoined(void *arg);
void SetFinished();
- void SetStarted(uptr _os_id, void *arg);
+ void SetStarted(uptr _os_id, bool _workerthread, void *arg);
void SetCreated(uptr _user_id, u64 _unique_id, bool _detached,
u32 _parent_tid, void *arg);
void Reset();
@@ -115,7 +116,7 @@ class ThreadRegistry {
void DetachThread(u32 tid, void *arg);
void JoinThread(u32 tid, void *arg);
void FinishThread(u32 tid);
- void StartThread(u32 tid, uptr os_id, void *arg);
+ void StartThread(u32 tid, uptr os_id, bool workerthread, void *arg);
private:
const ThreadContextFactory context_factory_;
diff --git a/lib/sanitizer_common/sanitizer_tls_get_addr.cc b/lib/sanitizer_common/sanitizer_tls_get_addr.cc
index 77c1947d52da..29db37b8a464 100644
--- a/lib/sanitizer_common/sanitizer_tls_get_addr.cc
+++ b/lib/sanitizer_common/sanitizer_tls_get_addr.cc
@@ -136,11 +136,19 @@ void DTLS_on_libc_memalign(void *ptr, uptr size) {
DTLS *DTLS_Get() { return &dtls; }
+bool DTLSInDestruction(DTLS *dtls) {
+ return dtls->dtv_size == kDestroyedThread;
+}
+
#else
void DTLS_on_libc_memalign(void *ptr, uptr size) {}
DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res) { return 0; }
DTLS *DTLS_Get() { return 0; }
void DTLS_Destroy() {}
+bool DTLSInDestruction(DTLS *dtls) {
+ UNREACHABLE("dtls is unsupported on this platform!");
+}
+
#endif // SANITIZER_INTERCEPT_TLS_GET_ADDR
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_tls_get_addr.h b/lib/sanitizer_common/sanitizer_tls_get_addr.h
index 58d47634d382..199a3b2e9c61 100644
--- a/lib/sanitizer_common/sanitizer_tls_get_addr.h
+++ b/lib/sanitizer_common/sanitizer_tls_get_addr.h
@@ -55,6 +55,8 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res, uptr static_tls_begin,
void DTLS_on_libc_memalign(void *ptr, uptr size);
DTLS *DTLS_Get();
void DTLS_Destroy(); // Make sure to call this before the thread is destroyed.
+// Returns true if DTLS of suspended thread is in destruction process.
+bool DTLSInDestruction(DTLS *dtls);
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index 9682d2921420..b1a2a53a3fbf 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -30,6 +30,7 @@
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
+#include "sanitizer_win_defs.h"
// A macro to tell the compiler that this part of the code cannot be reached,
// if the compiler supports this feature. Since we're using this in
@@ -835,6 +836,59 @@ bool IsHandledDeadlySignal(int signum) {
return false;
}
+// Check based on flags if we should handle this exception.
+bool IsHandledDeadlyException(DWORD exceptionCode) {
+ switch (exceptionCode) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ case EXCEPTION_STACK_OVERFLOW:
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ case EXCEPTION_IN_PAGE_ERROR:
+ return common_flags()->handle_segv;
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ case EXCEPTION_PRIV_INSTRUCTION:
+ case EXCEPTION_BREAKPOINT:
+ return common_flags()->handle_sigill;
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ case EXCEPTION_FLT_INVALID_OPERATION:
+ case EXCEPTION_FLT_OVERFLOW:
+ case EXCEPTION_FLT_STACK_CHECK:
+ case EXCEPTION_FLT_UNDERFLOW:
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ case EXCEPTION_INT_OVERFLOW:
+ return common_flags()->handle_sigfpe;
+ }
+ return false;
+}
+
+const char *DescribeSignalOrException(int signo) {
+ unsigned code = signo;
+ // Get the string description of the exception if this is a known deadly
+ // exception.
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION: return "access-violation";
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: return "array-bounds-exceeded";
+ case EXCEPTION_STACK_OVERFLOW: return "stack-overflow";
+ case EXCEPTION_DATATYPE_MISALIGNMENT: return "datatype-misalignment";
+ case EXCEPTION_IN_PAGE_ERROR: return "in-page-error";
+ case EXCEPTION_ILLEGAL_INSTRUCTION: return "illegal-instruction";
+ case EXCEPTION_PRIV_INSTRUCTION: return "priv-instruction";
+ case EXCEPTION_BREAKPOINT: return "breakpoint";
+ case EXCEPTION_FLT_DENORMAL_OPERAND: return "flt-denormal-operand";
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO: return "flt-divide-by-zero";
+ case EXCEPTION_FLT_INEXACT_RESULT: return "flt-inexact-result";
+ case EXCEPTION_FLT_INVALID_OPERATION: return "flt-invalid-operation";
+ case EXCEPTION_FLT_OVERFLOW: return "flt-overflow";
+ case EXCEPTION_FLT_STACK_CHECK: return "flt-stack-check";
+ case EXCEPTION_FLT_UNDERFLOW: return "flt-underflow";
+ case EXCEPTION_INT_DIVIDE_BY_ZERO: return "int-divide-by-zero";
+ case EXCEPTION_INT_OVERFLOW: return "int-overflow";
+ }
+ return "unknown exception";
+}
+
bool IsAccessibleMemoryRange(uptr beg, uptr size) {
SYSTEM_INFO si;
GetNativeSystemInfo(&si);
@@ -936,21 +990,10 @@ int WaitForProcess(pid_t pid) { return -1; }
// FIXME implement on this platform.
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+void CheckNoDeepBind(const char *filename, int flag) {
+ // Do nothing.
+}
} // namespace __sanitizer
-#if !SANITIZER_GO
-// Workaround to implement weak hooks on Windows. COFF doesn't directly support
-// weak symbols, but it does support /alternatename, which is similar. If the
-// user does not override the hook, we will use this default definition instead
-// of null.
-extern "C" void __sanitizer_print_memory_profile(int top_percent) {}
-
-#ifdef _WIN64
-#pragma comment(linker, "/alternatename:__sanitizer_print_memory_profile=__sanitizer_default_print_memory_profile") // NOLINT
-#else
-#pragma comment(linker, "/alternatename:___sanitizer_print_memory_profile=___sanitizer_default_print_memory_profile") // NOLINT
-#endif
-#endif
-
#endif // _WIN32
diff --git a/lib/sanitizer_common/sanitizer_win.h b/lib/sanitizer_common/sanitizer_win.h
new file mode 100644
index 000000000000..23e01ab75049
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_win.h
@@ -0,0 +1,26 @@
+//===-- sanitizer_win.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Windows-specific declarations.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_H
+#define SANITIZER_WIN_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+// Check based on flags if we should handle the exception.
+bool IsHandledDeadlyException(DWORD exceptionCode);
+} // namespace __sanitizer
+
+#endif // SANITIZER_WINDOWS
+#endif // SANITIZER_WIN_H
diff --git a/lib/sanitizer_common/sanitizer_win_defs.h b/lib/sanitizer_common/sanitizer_win_defs.h
new file mode 100644
index 000000000000..077ff9ccc8df
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_win_defs.h
@@ -0,0 +1,153 @@
+//===-- sanitizer_win_defs.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common definitions for Windows-specific code.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_DEFS_H
+#define SANITIZER_WIN_DEFS_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#ifndef WINAPI
+#ifdef _M_IX86
+#define WINAPI __stdcall
+#else
+#define WINAPI
+#endif
+#endif
+
+#if defined(_WIN64)
+#define WIN_SYM_PREFIX
+#else
+#define WIN_SYM_PREFIX "_"
+#endif
+
+// Intermediate macro to ensure the parameter is expanded before stringified.
+#define STRINGIFY_(A) #A
+#define STRINGIFY(A) STRINGIFY_(A)
+
+// ----------------- A workaround for the absence of weak symbols --------------
+// We don't have a direct equivalent of weak symbols when using MSVC, but we can
+// use the /alternatename directive to tell the linker to default a specific
+// symbol to a specific value.
+// Take into account that this is a pragma directive for the linker, so it will
+// be ignored by the compiler and the function will be marked as UNDEF in the
+// symbol table of the resulting object file. The linker won't find the default
+// implementation until it links with that object file.
+// So, suppose we provide a default implementation "fundef" for "fun", and this
+// is compiled into the object file "test.obj" including the pragma directive.
+// If we have some code with references to "fun" and we link that code with
+// "test.obj", it will work because the linker always link object files.
+// But, if "test.obj" is included in a static library, like "test.lib", then the
+// liker will only link to "test.obj" if necessary. If we only included the
+// definition of "fun", it won't link to "test.obj" (from test.lib) because
+// "fun" appears as UNDEF, so it doesn't resolve the symbol "fun", and will
+// result in a link error (the linker doesn't find the pragma directive).
+// So, a workaround is to force linkage with the modules that include weak
+// definitions, with the following macro: WIN_FORCE_LINK()
+
+#define WIN_WEAK_ALIAS(Name, Default) \
+ __pragma(comment(linker, "/alternatename:" WIN_SYM_PREFIX STRINGIFY(Name) "="\
+ WIN_SYM_PREFIX STRINGIFY(Default)))
+
+#define WIN_FORCE_LINK(Name) \
+ __pragma(comment(linker, "/include:" WIN_SYM_PREFIX STRINGIFY(Name)))
+
+#define WIN_EXPORT(ExportedName, Name) \
+ __pragma(comment(linker, "/export:" WIN_SYM_PREFIX STRINGIFY(ExportedName) \
+ "=" WIN_SYM_PREFIX STRINGIFY(Name)))
+
+// We cannot define weak functions on Windows, but we can use WIN_WEAK_ALIAS()
+// which defines an alias to a default implementation, and only works when
+// linking statically.
+// So, to define a weak function "fun", we define a default implementation with
+// a different name "fun__def" and we create a "weak alias" fun = fun__def.
+// Then, users can override it just defining "fun".
+// We impose "extern "C"" because otherwise WIN_WEAK_ALIAS() will fail because
+// of name mangling.
+
+// Dummy name for default implementation of weak function.
+# define WEAK_DEFAULT_NAME(Name) Name##__def
+// Name for exported implementation of weak function.
+# define WEAK_EXPORT_NAME(Name) Name##__dll
+
+// Use this macro when you need to define and export a weak function from a
+// library. For example:
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+# define WIN_WEAK_EXPORT_DEF(ReturnType, Name, ...) \
+ WIN_WEAK_ALIAS(Name, WEAK_DEFAULT_NAME(Name)) \
+ WIN_EXPORT(WEAK_EXPORT_NAME(Name), Name) \
+ extern "C" ReturnType Name(__VA_ARGS__); \
+ extern "C" ReturnType WEAK_DEFAULT_NAME(Name)(__VA_ARGS__)
+
+// Use this macro when you need to import a weak function from a library. It
+// defines a weak alias to the imported function from the dll. For example:
+// WIN_WEAK_IMPORT_DEF(compare)
+# define WIN_WEAK_IMPORT_DEF(Name) \
+ WIN_WEAK_ALIAS(Name, WEAK_EXPORT_NAME(Name))
+
+// So, for Windows we provide something similar to weak symbols in Linux, with
+// some differences:
+// + A default implementation must always be provided.
+//
+// + When linking statically it works quite similarly. For example:
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+//
+// // client.cc
+// // We can use the default implementation from the library:
+// compare(1, 2);
+// // Or we can override it:
+// extern "C" bool compare (int a, int b) { return a >= b; }
+//
+// And it will work fine. If we don't override the function, we need to ensure
+// that the linker includes the object file with the default implementation.
+// We can do so with the linker option "-wholearchive:".
+//
+// + When linking dynamically with a library (dll), weak functions are exported
+// with "__dll" suffix. Clients can use the macro WIN_WEAK_IMPORT_DEF(fun)
+// which defines a "weak alias" fun = fun__dll.
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+//
+// // client.cc
+// WIN_WEAK_IMPORT_DEF(compare)
+// // We can use the default implementation from the library:
+// compare(1, 2);
+// // Or we can override it:
+// extern "C" bool compare (int a, int b) { return a >= b; }
+//
+// But if we override the function, the dlls don't have access to it (which
+// is different in linux). If that is desired, the strong definition must be
+// exported and interception can be used from the rest of the dlls.
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+// // When initialized, check if the main executable defined "compare".
+// int libExample_init() {
+// uptr fnptr = __interception::InternalGetProcAddress(
+// (void *)GetModuleHandleA(0), "compare");
+// if (fnptr && !__interception::OverrideFunction((uptr)compare, fnptr, 0))
+// abort();
+// return 0;
+// }
+//
+// // client.cc
+// WIN_WEAK_IMPORT_DEF(compare)
+// // We override and export compare:
+// extern "C" __declspec(dllexport) bool compare (int a, int b) {
+// return a >= b;
+// }
+//
+#endif // SANITIZER_WINDOWS
+#endif // SANITIZER_WIN_DEFS_H
diff --git a/lib/sanitizer_common/sanitizer_win_dll_thunk.cc b/lib/sanitizer_common/sanitizer_win_dll_thunk.cc
new file mode 100644
index 000000000000..4fb4650be478
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_win_dll_thunk.cc
@@ -0,0 +1,102 @@
+//===-- sanitizer_win_dll_thunk.cc ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_win_defs.h"
+#include "sanitizer_win_dll_thunk.h"
+#include "interception/interception.h"
+
+extern "C" {
+void *WINAPI GetModuleHandleA(const char *module_name);
+void abort();
+}
+
+namespace __sanitizer {
+uptr dllThunkGetRealAddrOrDie(const char *name) {
+ uptr ret =
+ __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
+ if (!ret)
+ abort();
+ return ret;
+}
+
+int dllThunkIntercept(const char* main_function, uptr dll_function) {
+ uptr wrapper = dllThunkGetRealAddrOrDie(main_function);
+ if (!__interception::OverrideFunction(dll_function, wrapper, 0))
+ abort();
+ return 0;
+}
+
+int dllThunkInterceptWhenPossible(const char* main_function,
+ const char* default_function, uptr dll_function) {
+ uptr wrapper = __interception::InternalGetProcAddress(
+ (void *)GetModuleHandleA(0), main_function);
+ if (!wrapper)
+ wrapper = dllThunkGetRealAddrOrDie(default_function);
+ if (!__interception::OverrideFunction(dll_function, wrapper, 0))
+ abort();
+ return 0;
+}
+} // namespace __sanitizer
+
+// Include Sanitizer Common interface.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_common_interface.inc"
+
+#pragma section(".DLLTH$A", read) // NOLINT
+#pragma section(".DLLTH$Z", read) // NOLINT
+
+typedef void (*DllThunkCB)();
+extern "C" {
+__declspec(allocate(".DLLTH$A")) DllThunkCB __start_dll_thunk;
+__declspec(allocate(".DLLTH$Z")) DllThunkCB __stop_dll_thunk;
+}
+
+// Disable compiler warnings that show up if we declare our own version
+// of a compiler intrinsic (e.g. strlen).
+#pragma warning(disable: 4391)
+#pragma warning(disable: 4392)
+
+extern "C" int __dll_thunk_init() {
+ static bool flag = false;
+ // __dll_thunk_init is expected to be called by only one thread.
+ if (flag) return 0;
+ flag = true;
+
+ for (DllThunkCB *it = &__start_dll_thunk; it < &__stop_dll_thunk; ++it)
+ if (*it)
+ (*it)();
+
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
+}
+
+// We want to call dll_thunk_init before C/C++ initializers / constructors are
+// executed, otherwise functions like memset might be invoked.
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__dll_thunk_preinit)() =
+ __dll_thunk_init;
+
+static void WINAPI dll_thunk_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) __dll_thunk_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (WINAPI *__dll_thunk_tls_init)(void *,
+ unsigned long, void *) = dll_thunk_thread_init;
+
+#endif // SANITIZER_DLL_THUNK
diff --git a/lib/sanitizer_common/sanitizer_win_dll_thunk.h b/lib/sanitizer_common/sanitizer_win_dll_thunk.h
new file mode 100644
index 000000000000..2f9ebdaa6e76
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_win_dll_thunk.h
@@ -0,0 +1,182 @@
+//===-- sanitizer_win_dll_thunk.h -----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This header provide helper macros to delegate calls to the shared runtime
+// that lives in the main executable. It should be included to dll_thunks that
+// will be linked to the dlls, when the sanitizer is a static library included
+// in the main executable.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_DLL_THUNK_H
+#define SANITIZER_WIN_DLL_THUNK_H
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+uptr dllThunkGetRealAddrOrDie(const char *name);
+
+int dllThunkIntercept(const char* main_function, uptr dll_function);
+
+int dllThunkInterceptWhenPossible(const char* main_function,
+ const char* default_function, uptr dll_function);
+}
+
+extern "C" int __dll_thunk_init();
+
+// ----------------- Function interception helper macros -------------------- //
+// Override dll_function with main_function from main executable.
+#define INTERCEPT_OR_DIE(main_function, dll_function) \
+ static int intercept_##dll_function() { \
+ return __sanitizer::dllThunkIntercept(main_function, (__sanitizer::uptr) \
+ dll_function); \
+ } \
+ __pragma(section(".DLLTH$M", long, read)) \
+ __declspec(allocate(".DLLTH$M")) int (*__dll_thunk_##dll_function)() = \
+ intercept_##dll_function;
+
+// Try to override dll_function with main_function from main executable.
+// If main_function is not present, override dll_function with default_function.
+#define INTERCEPT_WHEN_POSSIBLE(main_function, default_function, dll_function) \
+ static int intercept_##dll_function() { \
+ return __sanitizer::dllThunkInterceptWhenPossible(main_function, \
+ default_function, (__sanitizer::uptr)dll_function); \
+ } \
+ __pragma(section(".DLLTH$M", long, read)) \
+ __declspec(allocate(".DLLTH$M")) int (*__dll_thunk_##dll_function)() = \
+ intercept_##dll_function;
+
+// -------------------- Function interception macros ------------------------ //
+// Special case of hooks -- ASan own interface functions. Those are only called
+// after __asan_init, thus an empty implementation is sufficient.
+#define INTERCEPT_SANITIZER_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__; \
+ static const char function_name[] = #name; \
+ for (const char* ptr = &function_name[0]; *ptr; ++ptr) \
+ prevent_icf ^= *ptr; \
+ (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name)
+
+// Special case of hooks -- Weak functions, could be redefined in the main
+// executable, but that is not necessary, so we shouldn't die if we can not find
+// a reference. Instead, when the function is not present in the main executable
+// we consider the default impl provided by asan library.
+#define INTERCEPT_SANITIZER_WEAK_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__; \
+ static const char function_name[] = #name; \
+ for (const char* ptr = &function_name[0]; *ptr; ++ptr) \
+ prevent_icf ^= *ptr; \
+ (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, STRINGIFY(WEAK_EXPORT_NAME(name)), name)
+
+// We can't define our own version of strlen etc. because that would lead to
+// link-time or even type mismatch errors. Instead, we can declare a function
+// just to be able to get its address. Me may miss the first few calls to the
+// functions since it can be called before __dll_thunk_init, but that would lead
+// to false negatives in the startup code before user's global initializers,
+// which isn't a big deal.
+#define INTERCEPT_LIBRARY_FUNCTION(name) \
+ extern "C" void name(); \
+ INTERCEPT_OR_DIE(WRAPPER_NAME(name), name)
+
+// Use these macros for functions that could be called before __dll_thunk_init()
+// is executed and don't lead to errors if defined (free, malloc, etc).
+#define INTERCEPT_WRAP_V_V(name) \
+ extern "C" void name() { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_W(name) \
+ extern "C" void name(void *arg) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_WW(name) \
+ extern "C" void name(void *arg1, void *arg2) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg1, arg2); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_WWW(name) \
+ extern "C" void name(void *arg1, void *arg2, void *arg3) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_V(name) \
+ extern "C" void *name() { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_W(name) \
+ extern "C" void *name(void *arg) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WW(name) \
+ extern "C" void *name(void *arg1, void *arg2) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5, void *arg6) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#endif // SANITIZER_WIN_DLL_THUNK_H
diff --git a/lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cc b/lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cc
new file mode 100644
index 000000000000..f8f916473bf2
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cc
@@ -0,0 +1,21 @@
+//===-- santizer_win_dynamic_runtime_thunk.cc -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Sanitizer Common, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from sanitizer common.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "sanitizer_common_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
diff --git a/lib/sanitizer_common/sanitizer_win_weak_interception.cc b/lib/sanitizer_common/sanitizer_win_weak_interception.cc
new file mode 100644
index 000000000000..364319398198
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_win_weak_interception.cc
@@ -0,0 +1,94 @@
+//===-- sanitizer_win_weak_interception.cc --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in the sanitizer when it is implemented as a
+// shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_WINDOWS && SANITIZER_DYNAMIC
+#include "sanitizer_win_weak_interception.h"
+#include "sanitizer_allocator_interface.h"
+#include "sanitizer_interface_internal.h"
+#include "sanitizer_win_defs.h"
+#include "interception/interception.h"
+
+extern "C" {
+void *WINAPI GetModuleHandleA(const char *module_name);
+void abort();
+}
+
+namespace __sanitizer {
+// Try to get a pointer to real_function in the main module and override
+// dll_function with that pointer. If the function isn't found, nothing changes.
+int interceptWhenPossible(uptr dll_function, const char *real_function) {
+ uptr real = __interception::InternalGetProcAddress(
+ (void *)GetModuleHandleA(0), real_function);
+ if (real && !__interception::OverrideFunction((uptr)dll_function, real, 0))
+ abort();
+ return 0;
+}
+} // namespace __sanitizer
+
+// Declare weak hooks.
+extern "C" {
+void __sanitizer_weak_hook_memcmp(uptr called_pc, const void *s1,
+ const void *s2, uptr n, int result);
+void __sanitizer_weak_hook_strcmp(uptr called_pc, const char *s1,
+ const char *s2, int result);
+void __sanitizer_weak_hook_strncmp(uptr called_pc, const char *s1,
+ const char *s2, uptr n, int result);
+void __sanitizer_weak_hook_strstr(uptr called_pc, const char *s1,
+ const char *s2, char *result);
+}
+
+// Include Sanitizer Common interface.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_common_interface.inc"
+
+#pragma section(".WEAK$A", read) // NOLINT
+#pragma section(".WEAK$Z", read) // NOLINT
+
+typedef void (*InterceptCB)();
+extern "C" {
+__declspec(allocate(".WEAK$A")) InterceptCB __start_weak_list;
+__declspec(allocate(".WEAK$Z")) InterceptCB __stop_weak_list;
+}
+
+static int weak_intercept_init() {
+ static bool flag = false;
+ // weak_interception_init is expected to be called by only one thread.
+ if (flag) return 0;
+ flag = true;
+
+ for (InterceptCB *it = &__start_weak_list; it < &__stop_weak_list; ++it)
+ if (*it)
+ (*it)();
+
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
+}
+
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__weak_intercept_preinit)() =
+ weak_intercept_init;
+
+static void WINAPI weak_intercept_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) weak_intercept_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void(WINAPI *__weak_intercept_tls_init)(
+ void *, unsigned long, void *) = weak_intercept_thread_init;
+
+#endif // SANITIZER_WINDOWS && SANITIZER_DYNAMIC
diff --git a/lib/sanitizer_common/sanitizer_win_weak_interception.h b/lib/sanitizer_common/sanitizer_win_weak_interception.h
new file mode 100644
index 000000000000..5b122971d2d0
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_win_weak_interception.h
@@ -0,0 +1,33 @@
+//===-- sanitizer_win_weak_interception.h ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This header provide helper macros to delegate calls of weak functions to the
+// implementation in the main executable when a strong definition is present.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_WEAK_INTERCEPTION_H
+#define SANITIZER_WIN_WEAK_INTERCEPTION_H
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+int interceptWhenPossible(uptr dll_function, const char *real_function);
+}
+
+// ----------------- Function interception helper macros -------------------- //
+// Weak functions, could be redefined in the main executable, but that is not
+// necessary, so we shouldn't die if we can not find a reference.
+#define INTERCEPT_WEAK(Name) interceptWhenPossible((uptr) Name, #Name);
+
+#define INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) \
+ static int intercept_##Name() { \
+ return __sanitizer::interceptWhenPossible((__sanitizer::uptr) Name, #Name);\
+ } \
+ __pragma(section(".WEAK$M", long, read)) \
+ __declspec(allocate(".WEAK$M")) int (*__weak_intercept_##Name)() = \
+ intercept_##Name;
+
+#endif // SANITIZER_WIN_WEAK_INTERCEPTION_H
diff --git a/lib/sanitizer_common/scripts/sancov.py b/lib/sanitizer_common/scripts/sancov.py
index e19afdb717ec..e2eba36a802f 100755
--- a/lib/sanitizer_common/scripts/sancov.py
+++ b/lib/sanitizer_common/scripts/sancov.py
@@ -14,12 +14,14 @@ import sys
prog_name = ""
def Usage():
- print >> sys.stderr, "Usage: \n" + \
- " " + prog_name + " merge FILE [FILE...] > OUTPUT\n" \
- " " + prog_name + " print FILE [FILE...]\n" \
- " " + prog_name + " unpack FILE [FILE...]\n" \
- " " + prog_name + " rawunpack FILE [FILE ...]\n" \
- " " + prog_name + " missing BINARY < LIST_OF_PCS\n"
+ sys.stderr.write(
+ "Usage: \n" + \
+ " " + prog_name + " merge FILE [FILE...] > OUTPUT\n" \
+ " " + prog_name + " print FILE [FILE...]\n" \
+ " " + prog_name + " unpack FILE [FILE...]\n" \
+ " " + prog_name + " rawunpack FILE [FILE ...]\n" \
+ " " + prog_name + " missing BINARY < LIST_OF_PCS\n" \
+ "\n")
exit(1)
def CheckBits(bits):
@@ -68,16 +70,19 @@ def ReadOneFile(path):
raise Exception('File %s is short (< 8 bytes)' % path)
bits = ReadMagicAndReturnBitness(f, path)
size -= 8
- s = struct.unpack_from(TypeCodeForStruct(bits) * (size * 8 / bits), f.read(size))
- print >>sys.stderr, "%s: read %d %d-bit PCs from %s" % (prog_name, size * 8 / bits, bits, path)
+ w = size * 8 // bits
+ s = struct.unpack_from(TypeCodeForStruct(bits) * (w), f.read(size))
+ sys.stderr.write(
+ "%s: read %d %d-bit PCs from %s\n" % (prog_name, w, bits, path))
return s
def Merge(files):
s = set()
for f in files:
s = s.union(set(ReadOneFile(f)))
- print >> sys.stderr, "%s: %d files merged; %d PCs total" % \
- (prog_name, len(files), len(s))
+ sys.stderr.write(
+ "%s: %d files merged; %d PCs total\n" % (prog_name, len(files), len(s))
+ )
return sorted(s)
def PrintFiles(files):
@@ -85,10 +90,9 @@ def PrintFiles(files):
s = Merge(files)
else: # If there is just on file, print the PCs in order.
s = ReadOneFile(files[0])
- print >> sys.stderr, "%s: 1 file merged; %d PCs total" % \
- (prog_name, len(s))
+ sys.stderr.write("%s: 1 file merged; %d PCs total\n" % (prog_name, len(s)))
for i in s:
- print "0x%x" % i
+ print("0x%x" % i)
def MergeAndPrint(files):
if sys.stdout.isatty():
@@ -97,27 +101,27 @@ def MergeAndPrint(files):
bits = 32
if max(s) > 0xFFFFFFFF:
bits = 64
- array.array('I', MagicForBits(bits)).tofile(sys.stdout)
+ stdout_buf = getattr(sys.stdout, 'buffer', sys.stdout)
+ array.array('I', MagicForBits(bits)).tofile(stdout_buf)
a = struct.pack(TypeCodeForStruct(bits) * len(s), *s)
- sys.stdout.write(a)
+ stdout_buf.write(a)
def UnpackOneFile(path):
with open(path, mode="rb") as f:
- print >> sys.stderr, "%s: unpacking %s" % (prog_name, path)
+ sys.stderr.write("%s: unpacking %s\n" % (prog_name, path))
while True:
header = f.read(12)
if not header: return
if len(header) < 12:
break
pid, module_length, blob_size = struct.unpack('iII', header)
- module = f.read(module_length)
+ module = f.read(module_length).decode('utf-8')
blob = f.read(blob_size)
assert(len(module) == module_length)
assert(len(blob) == blob_size)
extracted_file = "%s.%d.sancov" % (module, pid)
- print >> sys.stderr, "%s: extracting %s" % \
- (prog_name, extracted_file)
+ sys.stderr.write("%s: extracting %s\n" % (prog_name, extracted_file))
# The packed file may contain multiple blobs for the same pid/module
# pair. Append to the end of the file instead of overwriting.
with open(extracted_file, 'ab') as f2:
@@ -133,7 +137,7 @@ def Unpack(files):
def UnpackOneRawFile(path, map_path):
mem_map = []
with open(map_path, mode="rt") as f_map:
- print >> sys.stderr, "%s: reading map %s" % (prog_name, map_path)
+ sys.stderr.write("%s: reading map %s\n" % (prog_name, map_path))
bits = int(f_map.readline())
if bits != 32 and bits != 64:
raise Exception('Wrong bits size in the map')
@@ -147,12 +151,12 @@ def UnpackOneRawFile(path, map_path):
mem_map_keys = [m[0] for m in mem_map]
with open(path, mode="rb") as f:
- print >> sys.stderr, "%s: unpacking %s" % (prog_name, path)
+ sys.stderr.write("%s: unpacking %s\n" % (prog_name, path))
f.seek(0, 2)
size = f.tell()
f.seek(0, 0)
- pcs = struct.unpack_from(TypeCodeForStruct(bits) * (size * 8 / bits), f.read(size))
+ pcs = struct.unpack_from(TypeCodeForStruct(bits) * (size * 8 // bits), f.read(size))
mem_map_pcs = [[] for i in range(0, len(mem_map))]
for pc in pcs:
@@ -161,7 +165,7 @@ def UnpackOneRawFile(path, map_path):
(start, end, base, module_path) = mem_map[map_idx]
assert pc >= start
if pc >= end:
- print >> sys.stderr, "warning: %s: pc %x outside of any known mapping" % (prog_name, pc)
+ sys.stderr.write("warning: %s: pc %x outside of any known mapping\n" % (prog_name, pc))
continue
mem_map_pcs[map_idx].append(pc - base)
@@ -169,7 +173,7 @@ def UnpackOneRawFile(path, map_path):
if len(pc_list) == 0: continue
assert path.endswith('.sancov.raw')
dst_path = module_path + '.' + os.path.basename(path)[:-4]
- print >> sys.stderr, "%s: writing %d PCs to %s" % (prog_name, len(pc_list), dst_path)
+ sys.stderr.write("%s: writing %d PCs to %s\n" % (prog_name, len(pc_list), dst_path))
sorted_pc_list = sorted(pc_list)
pc_buffer = struct.pack(TypeCodeForStruct(bits) * len(pc_list), *sorted_pc_list)
with open(dst_path, 'ab+') as f2:
@@ -204,18 +208,19 @@ def PrintMissing(binary):
if not os.path.isfile(binary):
raise Exception('File not found: %s' % binary)
instrumented = GetInstrumentedPCs(binary)
- print >> sys.stderr, "%s: found %d instrumented PCs in %s" % (prog_name,
- len(instrumented),
- binary)
+ sys.stderr.write("%s: found %d instrumented PCs in %s\n" % (prog_name,
+ len(instrumented),
+ binary))
covered = set(int(line, 16) for line in sys.stdin)
- print >> sys.stderr, "%s: read %d PCs from stdin" % (prog_name, len(covered))
+ sys.stderr.write("%s: read %d PCs from stdin\n" % (prog_name, len(covered)))
missing = instrumented - covered
- print >> sys.stderr, "%s: %d PCs missing from coverage" % (prog_name, len(missing))
+ sys.stderr.write("%s: %d PCs missing from coverage\n" % (prog_name, len(missing)))
if (len(missing) > len(instrumented) - len(covered)):
- print >> sys.stderr, \
- "%s: WARNING: stdin contains PCs not found in binary" % prog_name
+ sys.stderr.write(
+ "%s: WARNING: stdin contains PCs not found in binary\n" % prog_name
+ )
for pc in sorted(missing):
- print "0x%x" % pc
+ print("0x%x" % pc)
if __name__ == '__main__':
prog_name = sys.argv[0]
diff --git a/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc b/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc
index bd315a0c9bd4..b25a53d73cb5 100644
--- a/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc
+++ b/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc
@@ -18,8 +18,9 @@
#include "llvm/DebugInfo/Symbolize/Symbolize.h"
static llvm::symbolize::LLVMSymbolizer *getDefaultSymbolizer() {
- static llvm::symbolize::LLVMSymbolizer DefaultSymbolizer;
- return &DefaultSymbolizer;
+ static llvm::symbolize::LLVMSymbolizer *DefaultSymbolizer =
+ new llvm::symbolize::LLVMSymbolizer();
+ return DefaultSymbolizer;
}
namespace __sanitizer {
@@ -41,8 +42,8 @@ bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
getDefaultSymbolizer()->symbolizeInlinedCode(ModuleName, ModuleOffset);
Printer << (ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo());
}
- __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str());
- return true;
+ return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
+ Result.c_str()) < MaxLength;
}
bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
@@ -55,8 +56,8 @@ bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
getDefaultSymbolizer()->symbolizeData(ModuleName, ModuleOffset);
Printer << (ResOrErr ? ResOrErr.get() : llvm::DIGlobal());
}
- __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str());
- return true;
+ return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
+ Result.c_str()) < MaxLength;
}
void __sanitizer_symbolize_flush() { getDefaultSymbolizer()->flush(); }
@@ -65,8 +66,10 @@ int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
int MaxLength) {
std::string Result =
llvm::symbolize::LLVMSymbolizer::DemangleName(Name, nullptr);
- __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str());
- return static_cast<int>(Result.size() + 1);
+ return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
+ Result.c_str()) < MaxLength
+ ? static_cast<int>(Result.size() + 1)
+ : 0;
}
} // extern "C"
diff --git a/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc b/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc
index 0a796d91a3d0..66d089a0e6c0 100644
--- a/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc
+++ b/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc
@@ -172,4 +172,28 @@ LLVM_SYMBOLIZER_INTERCEPTOR4(pread, ssize_t(int, void *, size_t, off_t))
LLVM_SYMBOLIZER_INTERCEPTOR4(pread64, ssize_t(int, void *, size_t, off64_t))
LLVM_SYMBOLIZER_INTERCEPTOR2(realpath, char *(const char *, char *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_cond_broadcast, int(pthread_cond_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_cond_wait,
+ int(pthread_cond_t *, pthread_mutex_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutex_lock, int(pthread_mutex_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutex_unlock, int(pthread_mutex_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutex_destroy, int(pthread_mutex_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_mutex_init,
+ int(pthread_mutex_t *,
+ const pthread_mutexattr_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutexattr_destroy,
+ int(pthread_mutexattr_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutexattr_init, int(pthread_mutexattr_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_mutexattr_settype,
+ int(pthread_mutexattr_t *, int))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_getspecific, void *(pthread_key_t))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_key_create,
+ int(pthread_key_t *, void (*)(void *)))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_once,
+ int(pthread_once_t *, void (*)(void)))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_setspecific,
+ int(pthread_key_t, const void *))
+LLVM_SYMBOLIZER_INTERCEPTOR3(pthread_sigmask,
+ int(int, const sigset_t *, sigset_t *))
+
} // extern "C"
diff --git a/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh b/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh
new file mode 100755
index 000000000000..788cef85a581
--- /dev/null
+++ b/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+function usage() {
+ echo "Usage: $0 INPUT... OUTPUT"
+ exit 1
+}
+
+if [ "$#" -le 1 ]; then
+ usage
+fi
+
+AR=$(readlink -f $AR)
+LINK=$(readlink -f $LINK)
+
+INPUTS=
+OUTPUT=
+for ARG in $@; do
+ INPUTS="$INPUTS $OUTPUT"
+ OUTPUT=$(readlink -f $ARG)
+done
+
+echo Inputs: $INPUTS
+echo Output: $OUTPUT
+
+SCRATCH_DIR=$(mktemp -d)
+ln -s $INPUTS $SCRATCH_DIR/
+
+pushd $SCRATCH_DIR
+
+for INPUT in *; do
+ for OBJ in $($AR t $INPUT); do
+ $AR x $INPUT $OBJ
+ mv -f $OBJ $(basename $INPUT).$OBJ
+ done
+done
+
+$LINK *.o -o $OUTPUT
+
+rm -rf $SCRATCH_DIR
diff --git a/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
index 07239eb50587..4a0fb03c4c70 100755
--- a/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
+++ b/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -58,9 +58,9 @@ cd $BUILD_DIR
CC=$CLANG_DIR/clang
CXX=$CLANG_DIR/clang++
TBLGEN=$CLANG_DIR/llvm-tblgen
-LINK=$CLANG_DIR/llvm-link
OPT=$CLANG_DIR/opt
-AR=$CLANG_DIR/llvm-ar
+export AR=$CLANG_DIR/llvm-ar
+export LINK=$CLANG_DIR/llvm-link
for F in $CC $CXX $TBLGEN $LINK $OPT $AR; do
if [[ ! -x "$F" ]]; then
@@ -136,29 +136,26 @@ rm -rf ${SYMBOLIZER_BUILD}
mkdir ${SYMBOLIZER_BUILD}
cd ${SYMBOLIZER_BUILD}
-for A in $LIBCXX_BUILD/lib/libc++.a \
- $LIBCXX_BUILD/lib/libc++abi.a \
- $LLVM_BUILD/lib/libLLVMSymbolize.a \
- $LLVM_BUILD/lib/libLLVMObject.a \
- $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \
- $LLVM_BUILD/lib/libLLVMSupport.a \
- $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \
- $LLVM_BUILD/lib/libLLVMMC.a \
- $ZLIB_BUILD/libz.a ; do
- for O in $($AR t $A); do
- $AR x $A $O
- mv -f $O "$(basename $A).$O" # Rename to avoid collisions between libs.
- done
-done
-
echo "Compiling..."
SYMBOLIZER_FLAGS="$FLAGS -std=c++11 -I${LLVM_SRC}/include -I${LLVM_BUILD}/include -I${LIBCXX_BUILD}/include/c++/v1"
$CXX $SYMBOLIZER_FLAGS ${SRC_DIR}/sanitizer_symbolize.cc ${SRC_DIR}/sanitizer_wrappers.cc -c
+$AR rc symbolizer.a sanitizer_symbolize.o sanitizer_wrappers.o
SYMBOLIZER_API_LIST=__sanitizer_symbolize_code,__sanitizer_symbolize_data,__sanitizer_symbolize_flush,__sanitizer_symbolize_demangle
# Merge all the object files together and copy the resulting library back.
-$LINK *.o -o all.bc
+$SCRIPT_DIR/ar_to_bc.sh $LIBCXX_BUILD/lib/libc++.a \
+ $LIBCXX_BUILD/lib/libc++abi.a \
+ $LLVM_BUILD/lib/libLLVMSymbolize.a \
+ $LLVM_BUILD/lib/libLLVMObject.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \
+ $LLVM_BUILD/lib/libLLVMSupport.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \
+ $LLVM_BUILD/lib/libLLVMMC.a \
+ $ZLIB_BUILD/libz.a \
+ symbolizer.a \
+ all.bc
+
echo "Optimizing..."
$OPT -internalize -internalize-public-api-list=${SYMBOLIZER_API_LIST} all.bc -o opt.bc
$CC $FLAGS -fno-lto -c opt.bc -o symbolizer.o
diff --git a/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
index 033acf7f202a..737f9459d240 100644
--- a/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
+++ b/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
@@ -37,6 +37,7 @@ clock_gettime U
cfgetospeed U
dl_iterate_phdr U
dlsym U
+dup U
dup2 U
environ U
execv U
diff --git a/lib/sanitizer_common/tests/CMakeLists.txt b/lib/sanitizer_common/tests/CMakeLists.txt
index 20698b9a8a96..b310f93743ac 100644
--- a/lib/sanitizer_common/tests/CMakeLists.txt
+++ b/lib/sanitizer_common/tests/CMakeLists.txt
@@ -26,6 +26,7 @@ set(SANITIZER_UNITTESTS
sanitizer_posix_test.cc
sanitizer_printf_test.cc
sanitizer_procmaps_test.cc
+ sanitizer_quarantine_test.cc
sanitizer_stackdepot_test.cc
sanitizer_stacktrace_printer_test.cc
sanitizer_stacktrace_test.cc
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index 8df5efda674e..e14517fca518 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -23,6 +23,7 @@
#include <stdlib.h>
#include <algorithm>
#include <vector>
+#include <random>
#include <set>
using namespace __sanitizer;
@@ -539,6 +540,7 @@ void TestCombinedAllocator() {
Allocator;
Allocator *a = new Allocator;
a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever);
+ std::mt19937 r;
AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
@@ -570,7 +572,7 @@ void TestCombinedAllocator() {
allocated.push_back(x);
}
- random_shuffle(allocated.begin(), allocated.end());
+ std::shuffle(allocated.begin(), allocated.end(), r);
for (uptr i = 0; i < kNumAllocs; i++) {
void *x = allocated[i];
diff --git a/lib/sanitizer_common/tests/sanitizer_bitvector_test.cc b/lib/sanitizer_common/tests/sanitizer_bitvector_test.cc
index 706b4c58968e..dec5459b2515 100644
--- a/lib/sanitizer_common/tests/sanitizer_bitvector_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_bitvector_test.cc
@@ -19,6 +19,7 @@
#include <algorithm>
#include <vector>
+#include <random>
#include <set>
using namespace __sanitizer;
@@ -75,6 +76,7 @@ void Print(const set<uptr> &s) {
template <class BV>
void TestBitVector(uptr expected_size) {
+ std::mt19937 r;
BV bv, bv1, t_bv;
EXPECT_EQ(expected_size, BV::kSize);
bv.clear();
@@ -112,7 +114,7 @@ void TestBitVector(uptr expected_size) {
for (uptr it = 0; it < 30; it++) {
// iota
for (size_t j = 0; j < bits.size(); j++) bits[j] = j;
- random_shuffle(bits.begin(), bits.end());
+ std::shuffle(bits.begin(), bits.end(), r);
set<uptr> s, s1, t_s;
bv.clear();
bv1.clear();
diff --git a/lib/sanitizer_common/tests/sanitizer_list_test.cc b/lib/sanitizer_common/tests/sanitizer_list_test.cc
index fbe53c0375c0..ede9771cb249 100644
--- a/lib/sanitizer_common/tests/sanitizer_list_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_list_test.cc
@@ -125,6 +125,22 @@ TEST(SanitizerCommon, IntrusiveList) {
CHECK(l.empty());
l.CheckConsistency();
+ l.push_back(x);
+ l.push_back(y);
+ l.push_back(z);
+ l.extract(x, y);
+ CHECK_EQ(l.size(), 2);
+ CHECK_EQ(l.front(), x);
+ CHECK_EQ(l.back(), z);
+ l.CheckConsistency();
+ l.extract(x, z);
+ CHECK_EQ(l.size(), 1);
+ CHECK_EQ(l.front(), x);
+ CHECK_EQ(l.back(), x);
+ l.CheckConsistency();
+ l.pop_front();
+ CHECK(l.empty());
+
List l1, l2;
l1.clear();
l2.clear();
diff --git a/lib/sanitizer_common/tests/sanitizer_quarantine_test.cc b/lib/sanitizer_common/tests/sanitizer_quarantine_test.cc
new file mode 100644
index 000000000000..23ed5f97ae27
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_quarantine_test.cc
@@ -0,0 +1,180 @@
+//===-- sanitizer_quarantine_test.cc --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+#include "gtest/gtest.h"
+
+#include <stdlib.h>
+
+namespace __sanitizer {
+
+struct QuarantineCallback {
+ void Recycle(void *m) {}
+ void *Allocate(uptr size) {
+ return malloc(size);
+ }
+ void Deallocate(void *p) {
+ free(p);
+ }
+};
+
+typedef QuarantineCache<QuarantineCallback> Cache;
+
+static void* kFakePtr = reinterpret_cast<void*>(0xFA83FA83);
+static const size_t kBlockSize = 8;
+
+static QuarantineCallback cb;
+
+static void DeallocateCache(Cache *cache) {
+ while (QuarantineBatch *batch = cache->DequeueBatch())
+ cb.Deallocate(batch);
+}
+
+TEST(SanitizerCommon, QuarantineBatchMerge) {
+ // Verify the trivial case.
+ QuarantineBatch into;
+ into.init(kFakePtr, 4UL);
+ QuarantineBatch from;
+ from.init(kFakePtr, 8UL);
+
+ into.merge(&from);
+
+ ASSERT_EQ(into.count, 2UL);
+ ASSERT_EQ(into.batch[0], kFakePtr);
+ ASSERT_EQ(into.batch[1], kFakePtr);
+ ASSERT_EQ(into.size, 12UL + sizeof(QuarantineBatch));
+ ASSERT_EQ(into.quarantined_size(), 12UL);
+
+ ASSERT_EQ(from.count, 0UL);
+ ASSERT_EQ(from.size, sizeof(QuarantineBatch));
+ ASSERT_EQ(from.quarantined_size(), 0UL);
+
+ // Merge the batch to the limit.
+ for (uptr i = 2; i < QuarantineBatch::kSize; ++i)
+ from.push_back(kFakePtr, 8UL);
+ ASSERT_TRUE(into.count + from.count == QuarantineBatch::kSize);
+ ASSERT_TRUE(into.can_merge(&from));
+
+ into.merge(&from);
+ ASSERT_TRUE(into.count == QuarantineBatch::kSize);
+
+ // No more space, not even for one element.
+ from.init(kFakePtr, 8UL);
+
+ ASSERT_FALSE(into.can_merge(&from));
+}
+
+TEST(SanitizerCommon, QuarantineCacheMergeBatchesEmpty) {
+ Cache cache;
+ Cache to_deallocate;
+ cache.MergeBatches(&to_deallocate);
+
+ ASSERT_EQ(to_deallocate.Size(), 0UL);
+ ASSERT_EQ(to_deallocate.DequeueBatch(), nullptr);
+}
+
+TEST(SanitizerCommon, QuarantineCacheMergeBatchesOneBatch) {
+ Cache cache;
+ cache.Enqueue(cb, kFakePtr, kBlockSize);
+ ASSERT_EQ(kBlockSize + sizeof(QuarantineBatch), cache.Size());
+
+ Cache to_deallocate;
+ cache.MergeBatches(&to_deallocate);
+
+ // Nothing to merge, nothing to deallocate.
+ ASSERT_EQ(kBlockSize + sizeof(QuarantineBatch), cache.Size());
+
+ ASSERT_EQ(to_deallocate.Size(), 0UL);
+ ASSERT_EQ(to_deallocate.DequeueBatch(), nullptr);
+
+ DeallocateCache(&cache);
+}
+
+TEST(SanitizerCommon, QuarantineCacheMergeBatchesSmallBatches) {
+ // Make a cache with two batches small enough to merge.
+ Cache from;
+ from.Enqueue(cb, kFakePtr, kBlockSize);
+ Cache cache;
+ cache.Enqueue(cb, kFakePtr, kBlockSize);
+
+ cache.Transfer(&from);
+ ASSERT_EQ(kBlockSize * 2 + sizeof(QuarantineBatch) * 2, cache.Size());
+
+ Cache to_deallocate;
+ cache.MergeBatches(&to_deallocate);
+
+ // Batches merged, one batch to deallocate.
+ ASSERT_EQ(kBlockSize * 2 + sizeof(QuarantineBatch), cache.Size());
+ ASSERT_EQ(to_deallocate.Size(), sizeof(QuarantineBatch));
+
+ DeallocateCache(&cache);
+ DeallocateCache(&to_deallocate);
+}
+
+TEST(SanitizerCommon, QuarantineCacheMergeBatchesTooBigToMerge) {
+ const uptr kNumBlocks = QuarantineBatch::kSize - 1;
+
+ // Make a cache with two batches small enough to merge.
+ Cache from;
+ Cache cache;
+ for (uptr i = 0; i < kNumBlocks; ++i) {
+ from.Enqueue(cb, kFakePtr, kBlockSize);
+ cache.Enqueue(cb, kFakePtr, kBlockSize);
+ }
+ cache.Transfer(&from);
+ ASSERT_EQ(kBlockSize * kNumBlocks * 2 +
+ sizeof(QuarantineBatch) * 2, cache.Size());
+
+ Cache to_deallocate;
+ cache.MergeBatches(&to_deallocate);
+
+ // Batches cannot be merged.
+ ASSERT_EQ(kBlockSize * kNumBlocks * 2 +
+ sizeof(QuarantineBatch) * 2, cache.Size());
+ ASSERT_EQ(to_deallocate.Size(), 0UL);
+
+ DeallocateCache(&cache);
+}
+
+TEST(SanitizerCommon, QuarantineCacheMergeBatchesALotOfBatches) {
+ const uptr kNumBatchesAfterMerge = 3;
+ const uptr kNumBlocks = QuarantineBatch::kSize * kNumBatchesAfterMerge;
+ const uptr kNumBatchesBeforeMerge = kNumBlocks;
+
+ // Make a cache with many small batches.
+ Cache cache;
+ for (uptr i = 0; i < kNumBlocks; ++i) {
+ Cache from;
+ from.Enqueue(cb, kFakePtr, kBlockSize);
+ cache.Transfer(&from);
+ }
+
+ ASSERT_EQ(kBlockSize * kNumBlocks +
+ sizeof(QuarantineBatch) * kNumBatchesBeforeMerge, cache.Size());
+
+ Cache to_deallocate;
+ cache.MergeBatches(&to_deallocate);
+
+ // All blocks should fit into 3 batches.
+ ASSERT_EQ(kBlockSize * kNumBlocks +
+ sizeof(QuarantineBatch) * kNumBatchesAfterMerge, cache.Size());
+
+ ASSERT_EQ(to_deallocate.Size(),
+ sizeof(QuarantineBatch) *
+ (kNumBatchesBeforeMerge - kNumBatchesAfterMerge));
+
+ DeallocateCache(&cache);
+ DeallocateCache(&to_deallocate);
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc b/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc
index 1132bfdf62e3..f8b8c12d4ac3 100644
--- a/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc
@@ -67,7 +67,7 @@ static void MarkUidAsPresent(ThreadContextBase *tctx, void *arg) {
static void TestRegistry(ThreadRegistry *registry, bool has_quarantine) {
// Create and start a main thread.
EXPECT_EQ(0U, registry->CreateThread(get_uid(0), true, -1, 0));
- registry->StartThread(0, 0, 0);
+ registry->StartThread(0, 0, false, 0);
// Create a bunch of threads.
for (u32 i = 1; i <= 10; i++) {
EXPECT_EQ(i, registry->CreateThread(get_uid(i), is_detached(i), 0, 0));
@@ -75,7 +75,7 @@ static void TestRegistry(ThreadRegistry *registry, bool has_quarantine) {
CheckThreadQuantity(registry, 11, 1, 11);
// Start some of them.
for (u32 i = 1; i <= 5; i++) {
- registry->StartThread(i, 0, 0);
+ registry->StartThread(i, 0, false, 0);
}
CheckThreadQuantity(registry, 11, 6, 11);
// Finish, create and start more threads.
@@ -85,7 +85,7 @@ static void TestRegistry(ThreadRegistry *registry, bool has_quarantine) {
registry->JoinThread(i, 0);
}
for (u32 i = 6; i <= 10; i++) {
- registry->StartThread(i, 0, 0);
+ registry->StartThread(i, 0, false, 0);
}
std::vector<u32> new_tids;
for (u32 i = 11; i <= 15; i++) {
@@ -112,7 +112,7 @@ static void TestRegistry(ThreadRegistry *registry, bool has_quarantine) {
}
for (u32 i = 0; i < new_tids.size(); i++) {
u32 tid = new_tids[i];
- registry->StartThread(tid, 0, 0);
+ registry->StartThread(tid, 0, false, 0);
registry->DetachThread(tid, 0);
registry->FinishThread(tid);
}
@@ -189,7 +189,7 @@ void *RunThread(void *arg) {
tids.push_back(
args->registry->CreateThread(0, false, 0, (void*)args->shard));
for (int i = 0; i < kThreadsPerShard; i++)
- args->registry->StartThread(tids[i], 0, (void*)args->shard);
+ args->registry->StartThread(tids[i], 0, false, (void*)args->shard);
for (int i = 0; i < kThreadsPerShard; i++)
args->registry->FinishThread(tids[i]);
for (int i = 0; i < kThreadsPerShard; i++)
@@ -200,7 +200,7 @@ void *RunThread(void *arg) {
static void ThreadedTestRegistry(ThreadRegistry *registry) {
// Create and start a main thread.
EXPECT_EQ(0U, registry->CreateThread(0, true, -1, 0));
- registry->StartThread(0, 0, 0);
+ registry->StartThread(0, 0, false, 0);
pthread_t threads[kNumShards];
RunThreadArgs args[kNumShards];
for (int i = 0; i < kNumShards; i++) {
diff --git a/lib/sanitizer_common/weak_symbols.txt b/lib/sanitizer_common/weak_symbols.txt
index 8a1e32b8041b..5a2b275184f4 100644
--- a/lib/sanitizer_common/weak_symbols.txt
+++ b/lib/sanitizer_common/weak_symbols.txt
@@ -1,5 +1,7 @@
___sanitizer_free_hook
___sanitizer_malloc_hook
+___sanitizer_report_error_summary
+___sanitizer_sandbox_on_notify
___sanitizer_symbolize_code
___sanitizer_symbolize_data
___sanitizer_symbolize_demangle
diff --git a/lib/scudo/CMakeLists.txt b/lib/scudo/CMakeLists.txt
index 4f1acec78c8f..ba5e8acd8598 100644
--- a/lib/scudo/CMakeLists.txt
+++ b/lib/scudo/CMakeLists.txt
@@ -16,10 +16,17 @@ set(SCUDO_SOURCES
scudo_termination.cpp
scudo_utils.cpp)
+# Enable the SSE 4.2 instruction set for scudo_crc32.cpp, if available.
if (COMPILER_RT_HAS_MSSE4_2_FLAG)
set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -msse4.2)
endif()
+# Enable the AArch64 CRC32 feature for scudo_crc32.cpp, if available.
+# Note that it is enabled by default starting with armv8.1-a.
+if (COMPILER_RT_HAS_MCRC_FLAG)
+ set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -mcrc)
+endif()
+
if(COMPILER_RT_HAS_SCUDO)
foreach(arch ${SCUDO_SUPPORTED_ARCH})
add_compiler_rt_runtime(clang_rt.scudo
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index d1121b0e7a74..dab6abedcb3e 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -15,7 +15,6 @@
//===----------------------------------------------------------------------===//
#include "scudo_allocator.h"
-#include "scudo_crc32.h"
#include "scudo_utils.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
@@ -73,15 +72,21 @@ static uptr Cookie;
// at compilation or at runtime.
static atomic_uint8_t HashAlgorithm = { CRC32Software };
-// Helper function that will compute the chunk checksum, being passed all the
-// the needed information as uptrs. It will opt for the hardware version of
-// the checksumming function if available.
-INLINE u32 hashUptrs(uptr Pointer, uptr *Array, uptr ArraySize, u8 HashType) {
- u32 Crc;
- Crc = computeCRC32(Cookie, Pointer, HashType);
- for (uptr i = 0; i < ArraySize; i++)
- Crc = computeCRC32(Crc, Array[i], HashType);
- return Crc;
+SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data);
+
+INLINE u32 computeCRC32(u32 Crc, uptr Data, u8 HashType) {
+ // If SSE4.2 is defined here, it was enabled everywhere, as opposed to only
+ // for scudo_crc32.cpp. This means that other SSE instructions were likely
+ // emitted at other places, and as a result there is no reason to not use
+ // the hardware version of the CRC32.
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+ return computeHardwareCRC32(Crc, Data);
+#else
+ if (computeHardwareCRC32 && HashType == CRC32Hardware)
+ return computeHardwareCRC32(Crc, Data);
+ else
+ return computeSoftwareCRC32(Crc, Data);
+#endif // defined(__SSE4_2__)
}
struct ScudoChunk : UnpackedHeader {
@@ -108,11 +113,11 @@ struct ScudoChunk : UnpackedHeader {
ZeroChecksumHeader.Checksum = 0;
uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
- u32 Hash = hashUptrs(reinterpret_cast<uptr>(this),
- HeaderHolder,
- ARRAY_SIZE(HeaderHolder),
- atomic_load_relaxed(&HashAlgorithm));
- return static_cast<u16>(Hash);
+ u8 HashType = atomic_load_relaxed(&HashAlgorithm);
+ u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HashType);
+ for (uptr i = 0; i < ARRAY_SIZE(HeaderHolder); i++)
+ Crc = computeCRC32(Crc, HeaderHolder[i], HashType);
+ return static_cast<u16>(Crc);
}
// Checks the validity of a chunk by verifying its checksum.
@@ -120,8 +125,7 @@ struct ScudoChunk : UnpackedHeader {
UnpackedHeader NewUnpackedHeader;
const AtomicPackedHeader *AtomicHeader =
reinterpret_cast<const AtomicPackedHeader *>(this);
- PackedHeader NewPackedHeader =
- AtomicHeader->load(std::memory_order_relaxed);
+ PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
}
@@ -130,8 +134,7 @@ struct ScudoChunk : UnpackedHeader {
void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
const AtomicPackedHeader *AtomicHeader =
reinterpret_cast<const AtomicPackedHeader *>(this);
- PackedHeader NewPackedHeader =
- AtomicHeader->load(std::memory_order_relaxed);
+ PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
*NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
if (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader)) {
dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
@@ -144,7 +147,7 @@ struct ScudoChunk : UnpackedHeader {
PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
AtomicPackedHeader *AtomicHeader =
reinterpret_cast<AtomicPackedHeader *>(this);
- AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
+ atomic_store_relaxed(AtomicHeader, NewPackedHeader);
}
// Packs and stores the header, computing the checksum in the process. We
@@ -157,10 +160,10 @@ struct ScudoChunk : UnpackedHeader {
PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
AtomicPackedHeader *AtomicHeader =
reinterpret_cast<AtomicPackedHeader *>(this);
- if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
- NewPackedHeader,
- std::memory_order_relaxed,
- std::memory_order_relaxed)) {
+ if (!atomic_compare_exchange_strong(AtomicHeader,
+ &OldPackedHeader,
+ NewPackedHeader,
+ memory_order_relaxed)) {
dieWithMessage("ERROR: race on chunk header at address %p\n", this);
}
}
@@ -351,6 +354,8 @@ struct Allocator {
// Helper function that checks for a valid Scudo chunk.
bool isValidPointer(const void *UserPtr) {
+ if (UNLIKELY(!ThreadInited))
+ initThread();
uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
if (!IsAligned(ChunkBeg, MinAlignment)) {
return false;
@@ -577,6 +582,14 @@ struct Allocator {
AllocatorQuarantine.Drain(&ThreadQuarantineCache,
QuarantineCallback(&Cache));
}
+
+ uptr getStats(AllocatorStat StatType) {
+ if (UNLIKELY(!ThreadInited))
+ initThread();
+ uptr stats[AllocatorStatCount];
+ BackendAllocator.GetStats(stats);
+ return stats[StatType];
+ }
};
static Allocator Instance(LINKER_INITIALIZED);
@@ -661,15 +674,11 @@ using namespace __scudo;
// MallocExtension helper functions
uptr __sanitizer_get_current_allocated_bytes() {
- uptr stats[AllocatorStatCount];
- getAllocator().GetStats(stats);
- return stats[AllocatorStatAllocated];
+ return Instance.getStats(AllocatorStatAllocated);
}
uptr __sanitizer_get_heap_size() {
- uptr stats[AllocatorStatCount];
- getAllocator().GetStats(stats);
- return stats[AllocatorStatMapped];
+ return Instance.getStats(AllocatorStatMapped);
}
uptr __sanitizer_get_free_bytes() {
diff --git a/lib/scudo/scudo_allocator.h b/lib/scudo/scudo_allocator.h
index 6431a2aa07d7..5f5225b36286 100644
--- a/lib/scudo/scudo_allocator.h
+++ b/lib/scudo/scudo_allocator.h
@@ -18,7 +18,9 @@
#include "sanitizer_common/sanitizer_allocator.h"
-#include <atomic>
+#if !SANITIZER_LINUX
+# error "The Scudo hardened allocator is currently only supported on Linux."
+#endif
namespace __scudo {
@@ -54,7 +56,7 @@ struct UnpackedHeader {
u64 Salt : 8;
};
-typedef std::atomic<PackedHeader> AtomicPackedHeader;
+typedef atomic_uint64_t AtomicPackedHeader;
COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
diff --git a/lib/scudo/scudo_crc32.cpp b/lib/scudo/scudo_crc32.cpp
index 94c8c2424929..56be22f4ee62 100644
--- a/lib/scudo/scudo_crc32.cpp
+++ b/lib/scudo/scudo_crc32.cpp
@@ -12,13 +12,13 @@
///
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
// Hardware CRC32 is supported at compilation via the following:
// - for i386 & x86_64: -msse4.2
// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
// An additional check must be performed at runtime as well to make sure the
// emitted instructions are valid on the target host.
-#include "scudo_crc32.h"
-#include "scudo_utils.h"
#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
# ifdef __SSE4_2__
@@ -34,20 +34,9 @@
namespace __scudo {
#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
-INLINE u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
return CRC32_INTRINSIC(Crc, Data);
}
-
-u32 computeCRC32(u32 Crc, uptr Data, u8 HashType) {
- if (HashType == CRC32Hardware) {
- return computeHardwareCRC32(Crc, Data);
- }
- return computeSoftwareCRC32(Crc, Data);
-}
-#else
-u32 computeCRC32(u32 Crc, uptr Data, u8 HashType) {
- return computeSoftwareCRC32(Crc, Data);
-}
#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
} // namespace __scudo
diff --git a/lib/scudo/scudo_crc32.h b/lib/scudo/scudo_crc32.h
deleted file mode 100644
index 6635cc78bbab..000000000000
--- a/lib/scudo/scudo_crc32.h
+++ /dev/null
@@ -1,30 +0,0 @@
-//===-- scudo_crc32.h -------------------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-///
-/// Header for scudo_crc32.cpp.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_CRC32_H_
-#define SCUDO_CRC32_H_
-
-#include "sanitizer_common/sanitizer_internal_defs.h"
-
-namespace __scudo {
-
-enum : u8 {
- CRC32Software = 0,
- CRC32Hardware = 1,
-};
-
-u32 computeCRC32(u32 Crc, uptr Data, u8 HashType);
-
-} // namespace __scudo
-
-#endif // SCUDO_CRC32_H_
diff --git a/lib/scudo/scudo_flags.cpp b/lib/scudo/scudo_flags.cpp
index b9c838107305..64da1d9d8ec1 100644
--- a/lib/scudo/scudo_flags.cpp
+++ b/lib/scudo/scudo_flags.cpp
@@ -68,7 +68,7 @@ void initFlags() {
// Sanity checks and default settings for the Quarantine parameters.
if (f->QuarantineSizeMb < 0) {
- const int DefaultQuarantineSizeMb = 64;
+ const int DefaultQuarantineSizeMb = FIRST_32_SECOND_64(16, 64);
f->QuarantineSizeMb = DefaultQuarantineSizeMb;
}
// We enforce an upper limit for the quarantine size of 4Gb.
@@ -76,7 +76,8 @@ void initFlags() {
dieWithMessage("ERROR: the quarantine size is too large\n");
}
if (f->ThreadLocalQuarantineSizeKb < 0) {
- const int DefaultThreadLocalQuarantineSizeKb = 1024;
+ const int DefaultThreadLocalQuarantineSizeKb =
+ FIRST_32_SECOND_64(256, 1024);
f->ThreadLocalQuarantineSizeKb = DefaultThreadLocalQuarantineSizeKb;
}
// And an upper limit of 128Mb for the thread quarantine cache.
@@ -84,6 +85,10 @@ void initFlags() {
dieWithMessage("ERROR: the per thread quarantine cache size is too "
"large\n");
}
+ if (f->ThreadLocalQuarantineSizeKb == 0 && f->QuarantineSizeMb > 0) {
+ dieWithMessage("ERROR: ThreadLocalQuarantineSizeKb can be set to 0 only "
+ "when QuarantineSizeMb is set to 0\n");
+ }
}
Flags *getFlags() {
diff --git a/lib/scudo/scudo_flags.inc b/lib/scudo/scudo_flags.inc
index c7a2acf146ca..45f9ea846e1a 100644
--- a/lib/scudo/scudo_flags.inc
+++ b/lib/scudo/scudo_flags.inc
@@ -15,12 +15,14 @@
# error "Define SCUDO_FLAG prior to including this file!"
#endif
-SCUDO_FLAG(int, QuarantineSizeMb, 64,
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, QuarantineSizeMb, -1,
"Size (in Mb) of quarantine used to delay the actual deallocation "
"of chunks. Lower value may reduce memory usage but decrease the "
"effectiveness of the mitigation.")
-SCUDO_FLAG(int, ThreadLocalQuarantineSizeKb, 1024,
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, ThreadLocalQuarantineSizeKb, -1,
"Size (in Kb) of per-thread cache used to offload the global "
"quarantine. Lower value may reduce memory usage but might increase "
"the contention on the global quarantine.")
diff --git a/lib/scudo/scudo_utils.cpp b/lib/scudo/scudo_utils.cpp
index ffa65b219fd1..4e2f6e08e80d 100644
--- a/lib/scudo/scudo_utils.cpp
+++ b/lib/scudo/scudo_utils.cpp
@@ -20,8 +20,9 @@
#if defined(__x86_64__) || defined(__i386__)
# include <cpuid.h>
#endif
-
-#include <cstring>
+#if defined(__arm__) || defined(__aarch64__)
+# include <sys/auxv.h>
+#endif
// TODO(kostyak): remove __sanitizer *Printf uses in favor for our own less
// complicated string formatting code. The following is a
@@ -82,12 +83,12 @@ CPUIDRegs getCPUFeatures() {
}
#ifndef bit_SSE4_2
-#define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
+# define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
#endif
bool testCPUFeature(CPUFeature Feature)
{
- static CPUIDRegs FeaturesRegs = getCPUFeatures();
+ CPUIDRegs FeaturesRegs = getCPUFeatures();
switch (Feature) {
case CRC32CPUFeature: // CRC32 is provided by SSE 4.2.
@@ -97,6 +98,25 @@ bool testCPUFeature(CPUFeature Feature)
}
return false;
}
+#elif defined(__arm__) || defined(__aarch64__)
+// For ARM and AArch64, hardware CRC32 support is indicated in the
+// AT_HWVAL auxiliary vector.
+
+#ifndef HWCAP_CRC32
+# define HWCAP_CRC32 (1<<7) // HWCAP_CRC32 is missing on older platforms.
+#endif
+
+bool testCPUFeature(CPUFeature Feature) {
+ uptr HWCap = getauxval(AT_HWCAP);
+
+ switch (Feature) {
+ case CRC32CPUFeature:
+ return !!(HWCap & HWCAP_CRC32);
+ default:
+ break;
+ }
+ return false;
+}
#else
bool testCPUFeature(CPUFeature Feature) {
return false;
diff --git a/lib/scudo/scudo_utils.h b/lib/scudo/scudo_utils.h
index ef2a609671ac..5082d79f6954 100644
--- a/lib/scudo/scudo_utils.h
+++ b/lib/scudo/scudo_utils.h
@@ -53,7 +53,11 @@ struct Xorshift128Plus {
u64 State[2];
};
-// Software CRC32 functions, to be used when hardware support is not detected.
+enum : u8 {
+ CRC32Software = 0,
+ CRC32Hardware = 1,
+};
+
u32 computeSoftwareCRC32(u32 Crc, uptr Data);
} // namespace __scudo
diff --git a/lib/tsan/CMakeLists.txt b/lib/tsan/CMakeLists.txt
index d5195459656c..195ecb5dfe8a 100644
--- a/lib/tsan/CMakeLists.txt
+++ b/lib/tsan/CMakeLists.txt
@@ -25,6 +25,7 @@ append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors
set(TSAN_SOURCES
rtl/tsan_clock.cc
rtl/tsan_debugging.cc
+ rtl/tsan_external.cc
rtl/tsan_fd.cc
rtl/tsan_flags.cc
rtl/tsan_ignoreset.cc
diff --git a/lib/tsan/check_analyze.sh b/lib/tsan/check_analyze.sh
index a5d3632dfa51..d454ec2dd0fd 100755
--- a/lib/tsan/check_analyze.sh
+++ b/lib/tsan/check_analyze.sh
@@ -26,22 +26,16 @@ check() {
fi
}
-for f in write1; do
+for f in write1 write2 write4 write8; do
check $f rsp 1
check $f push 2
check $f pop 2
done
-for f in write2 write4 write8; do
- check $f rsp 1
- check $f push 3
- check $f pop 3
-done
-
for f in read1 read2 read4 read8; do
check $f rsp 1
- check $f push 5
- check $f pop 5
+ check $f push 4
+ check $f pop 4
done
for f in func_entry func_exit; do
diff --git a/lib/tsan/go/tsan_go.cc b/lib/tsan/go/tsan_go.cc
index 34625c8af0b0..d7a9e0b67962 100644
--- a/lib/tsan/go/tsan_go.cc
+++ b/lib/tsan/go/tsan_go.cc
@@ -214,7 +214,7 @@ void __tsan_go_start(ThreadState *parent, ThreadState **pthr, void *pc) {
ThreadState *thr = AllocGoroutine();
*pthr = thr;
int goid = ThreadCreate(parent, (uptr)pc, 0, true);
- ThreadStart(thr, goid, 0);
+ ThreadStart(thr, goid, 0, /*workerthread*/ false);
}
void __tsan_go_end(ThreadState *thr) {
@@ -247,13 +247,17 @@ void __tsan_finalizer_goroutine(ThreadState *thr) {
}
void __tsan_mutex_before_lock(ThreadState *thr, uptr addr, uptr write) {
+ if (write)
+ MutexPreLock(thr, 0, addr);
+ else
+ MutexPreReadLock(thr, 0, addr);
}
void __tsan_mutex_after_lock(ThreadState *thr, uptr addr, uptr write) {
if (write)
- MutexLock(thr, 0, addr);
+ MutexPostLock(thr, 0, addr);
else
- MutexReadLock(thr, 0, addr);
+ MutexPostReadLock(thr, 0, addr);
}
void __tsan_mutex_before_unlock(ThreadState *thr, uptr addr, uptr write) {
diff --git a/lib/tsan/rtl/tsan.syms.extra b/lib/tsan/rtl/tsan.syms.extra
index 22dfde914136..ab5b5a4fcbae 100644
--- a/lib/tsan/rtl/tsan.syms.extra
+++ b/lib/tsan/rtl/tsan.syms.extra
@@ -9,6 +9,16 @@ __tsan_java*
__tsan_unaligned*
__tsan_release
__tsan_acquire
+__tsan_mutex_create
+__tsan_mutex_destroy
+__tsan_mutex_pre_lock
+__tsan_mutex_post_lock
+__tsan_mutex_pre_unlock
+__tsan_mutex_post_unlock
+__tsan_mutex_pre_signal
+__tsan_mutex_post_signal
+__tsan_mutex_pre_divert
+__tsan_mutex_post_divert
__ubsan_*
Annotate*
WTFAnnotate*
diff --git a/lib/tsan/rtl/tsan_debugging.cc b/lib/tsan/rtl/tsan_debugging.cc
index d9fb6861bc0c..06154bc135a9 100644
--- a/lib/tsan/rtl/tsan_debugging.cc
+++ b/lib/tsan/rtl/tsan_debugging.cc
@@ -24,6 +24,7 @@ static const char *ReportTypeDescription(ReportType typ) {
if (typ == ReportTypeVptrRace) return "data-race-vptr";
if (typ == ReportTypeUseAfterFree) return "heap-use-after-free";
if (typ == ReportTypeVptrUseAfterFree) return "heap-use-after-free-vptr";
+ if (typ == ReportTypeExternalRace) return "external-race";
if (typ == ReportTypeThreadLeak) return "thread-leak";
if (typ == ReportTypeMutexDestroyLocked) return "locked-mutex-destroy";
if (typ == ReportTypeMutexDoubleLock) return "mutex-double-lock";
@@ -127,6 +128,16 @@ int __tsan_get_report_loc(void *report, uptr idx, const char **type,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *object_type = GetObjectTypeFromTag(loc->external_tag);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
int *destroyed, void **trace, uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index 55580a5c4436..8a0381e61ab0 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -149,7 +149,8 @@ class RegionAlloc;
// Descriptor of user's memory block.
struct MBlock {
- u64 siz;
+ u64 siz : 48;
+ u64 tag : 16;
u32 stk;
u16 tid;
};
diff --git a/lib/tsan/rtl/tsan_external.cc b/lib/tsan/rtl/tsan_external.cc
new file mode 100644
index 000000000000..dc8ec62322ce
--- /dev/null
+++ b/lib/tsan/rtl/tsan_external.cc
@@ -0,0 +1,78 @@
+//===-- tsan_external.cc --------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+const uptr kMaxTag = 128; // Limited to 65,536, since MBlock only stores tags
+ // as 16-bit values, see tsan_defs.h.
+
+const char *registered_tags[kMaxTag];
+static atomic_uint32_t used_tags{1}; // Tag 0 means "no tag". NOLINT
+
+const char *GetObjectTypeFromTag(uptr tag) {
+ if (tag == 0) return nullptr;
+ // Invalid/corrupted tag? Better return NULL and let the caller deal with it.
+ if (tag >= atomic_load(&used_tags, memory_order_relaxed)) return nullptr;
+ return registered_tags[tag];
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type) {
+ uptr new_tag = atomic_fetch_add(&used_tags, 1, memory_order_relaxed);
+ CHECK_LT(new_tag, kMaxTag);
+ registered_tags[new_tag] = internal_strdup(object_type);
+ return (void *)new_tag;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ Allocator *a = allocator();
+ MBlock *b = nullptr;
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b) {
+ b->tag = (uptr)tag;
+ }
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ ThreadState *thr = cur_thread();
+ thr->external_tag = (uptr)tag;
+ FuncEntry(thr, (uptr)caller_pc);
+ MemoryRead(thr, CALLERPC, (uptr)addr, kSizeLog8);
+ FuncExit(thr);
+ thr->external_tag = 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ ThreadState *thr = cur_thread();
+ thr->external_tag = (uptr)tag;
+ FuncEntry(thr, (uptr)caller_pc);
+ MemoryWrite(thr, CALLERPC, (uptr)addr, kSizeLog8);
+ FuncExit(thr);
+ thr->external_tag = 0;
+}
+} // extern "C"
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
index d8d4746ab59b..89e22a132786 100644
--- a/lib/tsan/rtl/tsan_flags.cc
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -21,10 +21,6 @@
namespace __tsan {
-Flags *flags() {
- return &ctx->flags;
-}
-
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
extern "C" const char* __tsan_default_options();
diff --git a/lib/tsan/rtl/tsan_flags.h b/lib/tsan/rtl/tsan_flags.h
index e2f6b3c9f021..66740def52fa 100644
--- a/lib/tsan/rtl/tsan_flags.h
+++ b/lib/tsan/rtl/tsan_flags.h
@@ -28,7 +28,6 @@ struct Flags : DDFlags {
void ParseFromString(const char *str);
};
-Flags *flags();
void InitializeFlags(Flags *flags, const char *env);
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_flags.inc b/lib/tsan/rtl/tsan_flags.inc
index a48545c433ba..e9b3e35f07e5 100644
--- a/lib/tsan/rtl/tsan_flags.inc
+++ b/lib/tsan/rtl/tsan_flags.inc
@@ -79,7 +79,7 @@ TSAN_FLAG(bool, die_after_fork, true,
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
TSAN_FLAG(bool, ignore_interceptors_accesses, false,
"Ignore reads and writes from all interceptors.")
-TSAN_FLAG(bool, ignore_noninstrumented_modules, false,
+TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
"Interceptors should only detect races when called from instrumented "
"modules.")
TSAN_FLAG(bool, shared_ptr_interceptor, true,
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index 898f32df182b..d0fd91aec234 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "interception/interception.h"
@@ -29,9 +30,6 @@
#include "tsan_mman.h"
#include "tsan_fd.h"
-#if SANITIZER_POSIX
-#include "sanitizer_common/sanitizer_posix.h"
-#endif
using namespace __tsan; // NOLINT
@@ -46,13 +44,6 @@ using namespace __tsan; // NOLINT
#define mallopt(a, b)
#endif
-#if SANITIZER_LINUX || SANITIZER_FREEBSD
-#define PTHREAD_CREATE_DETACHED 1
-#elif SANITIZER_MAC
-#define PTHREAD_CREATE_DETACHED 2
-#endif
-
-
#ifdef __mips__
const int kSigCount = 129;
#else
@@ -277,7 +268,7 @@ ScopedInterceptor::~ScopedInterceptor() {
void ScopedInterceptor::EnableIgnores() {
if (ignoring_) {
- ThreadIgnoreBegin(thr_, pc_);
+ ThreadIgnoreBegin(thr_, pc_, false);
if (in_ignored_lib_) {
DCHECK(!thr_->in_ignored_lib);
thr_->in_ignored_lib = true;
@@ -881,7 +872,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
internal_sched_yield();
Processor *proc = ProcCreate();
ProcWire(proc, thr);
- ThreadStart(thr, tid, GetTid());
+ ThreadStart(thr, tid, GetTid(), /*workerthread*/ false);
atomic_store(&p->tid, 0, memory_order_release);
}
void *res = callback(param);
@@ -928,8 +919,7 @@ TSAN_INTERCEPTOR(int, pthread_create,
ThreadIgnoreEnd(thr, pc);
}
if (res == 0) {
- int tid = ThreadCreate(thr, pc, *(uptr*)th,
- detached == PTHREAD_CREATE_DETACHED);
+ int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
CHECK_NE(tid, 0);
// Synchronization on p.tid serves two purposes:
// 1. ThreadCreate must finish before the new thread starts.
@@ -1025,7 +1015,7 @@ static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
ThreadSignalContext *ctx = SigCtx(arg->thr);
CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
- MutexLock(arg->thr, arg->pc, (uptr)arg->m);
+ MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
// Undo BlockingCall ctor effects.
arg->thr->ignore_interceptors--;
arg->si->~ScopedInterceptor();
@@ -1054,7 +1044,7 @@ static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
}
if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
return res;
}
@@ -1114,14 +1104,15 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
int res = REAL(pthread_mutex_init)(m, a);
if (res == 0) {
- bool recursive = false;
+ u32 flagz = 0;
if (a) {
int type = 0;
if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
- recursive = (type == PTHREAD_MUTEX_RECURSIVE
- || type == PTHREAD_MUTEX_RECURSIVE_NP);
+ if (type == PTHREAD_MUTEX_RECURSIVE ||
+ type == PTHREAD_MUTEX_RECURSIVE_NP)
+ flagz |= MutexFlagWriteReentrant;
}
- MutexCreate(thr, pc, (uptr)m, false, recursive, false);
+ MutexCreate(thr, pc, (uptr)m, flagz);
}
return res;
}
@@ -1141,7 +1132,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
if (res == EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
if (res == 0 || res == EOWNERDEAD)
- MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
return res;
}
@@ -1150,7 +1141,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
int res = REAL(pthread_mutex_timedlock)(m, abstime);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
@@ -1161,7 +1152,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
int res = REAL(pthread_spin_init)(m, pshared);
if (res == 0) {
- MutexCreate(thr, pc, (uptr)m, false, false, false);
+ MutexCreate(thr, pc, (uptr)m);
}
return res;
}
@@ -1177,9 +1168,10 @@ TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
+ MutexPreLock(thr, pc, (uptr)m);
int res = REAL(pthread_spin_lock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m);
}
return res;
}
@@ -1188,7 +1180,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
int res = REAL(pthread_spin_trylock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
@@ -1205,7 +1197,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
int res = REAL(pthread_rwlock_init)(m, a);
if (res == 0) {
- MutexCreate(thr, pc, (uptr)m, true, false, false);
+ MutexCreate(thr, pc, (uptr)m);
}
return res;
}
@@ -1221,9 +1213,10 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
+ MutexPreReadLock(thr, pc, (uptr)m);
int res = REAL(pthread_rwlock_rdlock)(m);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m);
+ MutexPostReadLock(thr, pc, (uptr)m);
}
return res;
}
@@ -1232,7 +1225,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
int res = REAL(pthread_rwlock_tryrdlock)(m);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true);
+ MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
@@ -1242,7 +1235,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m);
+ MutexPostReadLock(thr, pc, (uptr)m);
}
return res;
}
@@ -1250,9 +1243,10 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
+ MutexPreLock(thr, pc, (uptr)m);
int res = REAL(pthread_rwlock_wrlock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m);
}
return res;
}
@@ -1261,7 +1255,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
int res = REAL(pthread_rwlock_trywrlock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
@@ -1271,7 +1265,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
@@ -1644,24 +1638,6 @@ TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
#define TSAN_MAYBE_INTERCEPT_TMPFILE64
#endif
-TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f);
- MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true);
- }
- return REAL(fread)(ptr, size, nmemb, f);
-}
-
-TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f);
- MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false);
- }
- return REAL(fwrite)(p, size, nmemb, f);
-}
-
static void FlushStreams() {
// Flushing all the streams here may freeze the process if a child thread is
// performing file stream operations at the same time.
@@ -2251,8 +2227,12 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
OnExit(((TsanInterceptorContext *) ctx)->thr)
-#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \
- MutexLock(((TsanInterceptorContext *)ctx)->thr, \
+#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
+ MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
+ MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, (uptr)m)
#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
diff --git a/lib/tsan/rtl/tsan_interceptors_mac.cc b/lib/tsan/rtl/tsan_interceptors_mac.cc
index fc5eb0499076..f6bf8a0e586b 100644
--- a/lib/tsan/rtl/tsan_interceptors_mac.cc
+++ b/lib/tsan/rtl/tsan_interceptors_mac.cc
@@ -281,6 +281,12 @@ TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
(connection, message, replyq, new_handler);
}
+TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection);
+ Release(thr, pc, (uptr)connection);
+ REAL(xpc_connection_cancel)(connection);
+}
+
// On macOS, libc++ is always linked dynamically, so intercepting works the
// usual way.
#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h
index 4e342a58a066..496a8717f155 100644
--- a/lib/tsan/rtl/tsan_interface.h
+++ b/lib/tsan/rtl/tsan_interface.h
@@ -79,6 +79,15 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_read_range(void *addr, unsigned long size); // NOLINT
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_write_range(void *addr, unsigned long size); // NOLINT
@@ -123,6 +132,10 @@ int __tsan_get_report_loc(void *report, uptr idx, const char **type,
int *fd, int *suppressable, void **trace,
uptr trace_size);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type);
+
// Returns information about mutexes included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
diff --git a/lib/tsan/rtl/tsan_interface_ann.cc b/lib/tsan/rtl/tsan_interface_ann.cc
index 62db79661625..810c84025f23 100644
--- a/lib/tsan/rtl/tsan_interface_ann.cc
+++ b/lib/tsan/rtl/tsan_interface_ann.cc
@@ -31,11 +31,10 @@ namespace __tsan {
class ScopedAnnotation {
public:
- ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l,
- uptr pc)
+ ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
: thr_(thr) {
FuncEntry(thr_, pc);
- DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l);
+ DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
}
~ScopedAnnotation() {
@@ -46,18 +45,20 @@ class ScopedAnnotation {
ThreadState *const thr_;
};
-#define SCOPED_ANNOTATION(typ) \
+#define SCOPED_ANNOTATION_RET(typ, ret) \
if (!flags()->enable_annotations) \
- return; \
+ return ret; \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = (uptr)__builtin_return_address(0); \
StatInc(thr, StatAnnotation); \
StatInc(thr, Stat##typ); \
- ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
+ ScopedAnnotation sa(thr, __func__, caller_pc); \
const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
+#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
+
static const int kMaxDescLen = 128;
struct ExpectRace {
@@ -252,12 +253,12 @@ void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
SCOPED_ANNOTATION(AnnotateRWLockCreate);
- MutexCreate(thr, pc, m, true, true, false);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant);
}
void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
- MutexCreate(thr, pc, m, true, true, true);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit);
}
void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
@@ -269,9 +270,9 @@ void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
uptr is_w) {
SCOPED_ANNOTATION(AnnotateRWLockAcquired);
if (is_w)
- MutexLock(thr, pc, m);
+ MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
else
- MutexReadLock(thr, pc, m);
+ MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
}
void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
@@ -458,4 +459,95 @@ void INTERFACE_ATTRIBUTE
AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
void INTERFACE_ATTRIBUTE
AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
+
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_create(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_create);
+ MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_destroy(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_destroy);
+ MutexDestroy(thr, pc, (uptr)m);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
+ if (!(flagz & MutexFlagTryLock)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPreReadLock(thr, pc, (uptr)m);
+ else
+ MutexPreLock(thr, pc, (uptr)m);
+ }
+ ThreadIgnoreBegin(thr, pc, false);
+ ThreadIgnoreSyncBegin(thr, pc, false);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_lock);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+ if (!(flagz & MutexFlagTryLockFailed)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPostReadLock(thr, pc, (uptr)m, flagz);
+ else
+ MutexPostLock(thr, pc, (uptr)m, flagz, rec);
+ }
+}
+
+INTERFACE_ATTRIBUTE
+int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
+ int ret = 0;
+ if (flagz & MutexFlagReadLock) {
+ CHECK(!(flagz & MutexFlagRecursiveUnlock));
+ MutexReadUnlock(thr, pc, (uptr)m);
+ } else {
+ ret = MutexUnlock(thr, pc, (uptr)m, flagz);
+ }
+ ThreadIgnoreBegin(thr, pc, false);
+ ThreadIgnoreSyncBegin(thr, pc, false);
+ return ret;
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
+ ThreadIgnoreBegin(thr, pc, false);
+ ThreadIgnoreSyncBegin(thr, pc, false);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_signal);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
+ // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_divert);
+ ThreadIgnoreBegin(thr, pc, false);
+ ThreadIgnoreSyncBegin(thr, pc, false);
+}
} // extern "C"
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index 5238b66a2e51..b22d5c1ecef8 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -450,13 +450,32 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
// C/C++
+static morder convert_morder(morder mo) {
+ if (flags()->force_seq_cst_atomics)
+ return (morder)mo_seq_cst;
+
+ // Filter out additional memory order flags:
+ // MEMMODEL_SYNC = 1 << 15
+ // __ATOMIC_HLE_ACQUIRE = 1 << 16
+ // __ATOMIC_HLE_RELEASE = 1 << 17
+ //
+ // HLE is an optimization, and we pretend that elision always fails.
+ // MEMMODEL_SYNC is used when lowering __sync_ atomics,
+ // since we use __sync_ atomics for actual atomic operations,
+ // we can safely ignore it as well. It also subtly affects semantics,
+ // but we don't model the difference.
+ return (morder)(mo & 0x7fff);
+}
+
#define SCOPED_ATOMIC(func, ...) \
- const uptr callpc = (uptr)__builtin_return_address(0); \
- uptr pc = StackTrace::GetCurrentPc(); \
- mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
ThreadState *const thr = cur_thread(); \
- if (thr->ignore_interceptors) \
+ if (thr->ignore_sync || thr->ignore_interceptors) { \
+ ProcessPendingSignals(thr); \
return NoTsanAtomic##func(__VA_ARGS__); \
+ } \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = StackTrace::GetCurrentPc(); \
+ mo = convert_morder(mo); \
AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
ScopedAtomic sa(thr, callpc, a, mo, __func__); \
return Atomic##func(thr, pc, __VA_ARGS__); \
diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc
index 5bdc04f07567..75e960e629f9 100644
--- a/lib/tsan/rtl/tsan_interface_java.cc
+++ b/lib/tsan/rtl/tsan_interface_java.cc
@@ -180,8 +180,8 @@ void __tsan_java_mutex_lock(jptr addr) {
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- MutexCreate(thr, pc, addr, true, true, true);
- MutexLock(thr, pc, addr);
+ MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
}
void __tsan_java_mutex_unlock(jptr addr) {
@@ -201,8 +201,8 @@ void __tsan_java_mutex_read_lock(jptr addr) {
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- MutexCreate(thr, pc, addr, true, true, true);
- MutexReadLock(thr, pc, addr);
+ MutexPostReadLock(thr, pc, addr, MutexFlagLinkerInit |
+ MutexFlagWriteReentrant | MutexFlagDoPreLockOnPostLock);
}
void __tsan_java_mutex_read_unlock(jptr addr) {
@@ -223,8 +223,8 @@ void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
CHECK_GT(rec, 0);
- MutexCreate(thr, pc, addr, true, true, true);
- MutexLock(thr, pc, addr, rec);
+ MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock, rec);
}
int __tsan_java_mutex_unlock_rec(jptr addr) {
@@ -234,7 +234,7 @@ int __tsan_java_mutex_unlock_rec(jptr addr) {
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- return MutexUnlock(thr, pc, addr, true);
+ return MutexUnlock(thr, pc, addr, MutexFlagRecursiveUnlock);
}
void __tsan_java_acquire(jptr addr) {
diff --git a/lib/tsan/rtl/tsan_libdispatch_mac.cc b/lib/tsan/rtl/tsan_libdispatch_mac.cc
index d8c689ebb5fc..8c759a3be4e1 100644
--- a/lib/tsan/rtl/tsan_libdispatch_mac.cc
+++ b/lib/tsan/rtl/tsan_libdispatch_mac.cc
@@ -93,14 +93,15 @@ static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
new_context->free_context_in_callback = true;
new_context->submitted_synchronously = false;
new_context->is_barrier_block = false;
+ new_context->non_queue_sync_object = 0;
return new_context;
}
-#define GET_QUEUE_SYNC_VARS(context, q) \
- bool is_queue_serial = q && IsQueueSerial(q); \
- uptr sync_ptr = (uptr)q ?: context->non_queue_sync_object; \
- uptr serial_sync = (uptr)sync_ptr; \
- uptr concurrent_sync = ((uptr)sync_ptr) + sizeof(uptr); \
+#define GET_QUEUE_SYNC_VARS(context, q) \
+ bool is_queue_serial = q && IsQueueSerial(q); \
+ uptr sync_ptr = (uptr)q ?: context->non_queue_sync_object; \
+ uptr serial_sync = (uptr)sync_ptr; \
+ uptr concurrent_sync = sync_ptr ? ((uptr)sync_ptr) + sizeof(uptr) : 0; \
bool serial_task = context->is_barrier_block || is_queue_serial
static void dispatch_sync_pre_execute(ThreadState *thr, uptr pc,
@@ -111,8 +112,8 @@ static void dispatch_sync_pre_execute(ThreadState *thr, uptr pc,
dispatch_queue_t q = context->queue;
do {
GET_QUEUE_SYNC_VARS(context, q);
- Acquire(thr, pc, serial_sync);
- if (serial_task) Acquire(thr, pc, concurrent_sync);
+ if (serial_sync) Acquire(thr, pc, serial_sync);
+ if (serial_task && concurrent_sync) Acquire(thr, pc, concurrent_sync);
if (q) q = GetTargetQueueFromQueue(q);
} while (q);
@@ -126,7 +127,8 @@ static void dispatch_sync_post_execute(ThreadState *thr, uptr pc,
dispatch_queue_t q = context->queue;
do {
GET_QUEUE_SYNC_VARS(context, q);
- Release(thr, pc, serial_task ? serial_sync : concurrent_sync);
+ if (serial_task && serial_sync) Release(thr, pc, serial_sync);
+ if (!serial_task && concurrent_sync) Release(thr, pc, concurrent_sync);
if (q) q = GetTargetQueueFromQueue(q);
} while (q);
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index 25dd241d826f..b8d3d5528bb5 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -207,7 +207,7 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
ThreadState *parent_thread_state = nullptr; // No parent.
int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
CHECK_NE(tid, 0);
- ThreadStart(thr, tid, GetTid());
+ ThreadStart(thr, tid, GetTid(), /*workerthread*/ true);
}
} else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
if (thread == pthread_self()) {
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index 07fd41208eb7..7de00840cdbc 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -90,6 +90,8 @@ static const char *ReportTypeString(ReportType typ) {
return "heap-use-after-free";
if (typ == ReportTypeVptrUseAfterFree)
return "heap-use-after-free (virtual call vs free)";
+ if (typ == ReportTypeExternalRace)
+ return "race on a library object";
if (typ == ReportTypeThreadLeak)
return "thread leak";
if (typ == ReportTypeMutexDestroyLocked)
@@ -152,14 +154,25 @@ static const char *MopDesc(bool first, bool write, bool atomic) {
: (write ? "Previous write" : "Previous read"));
}
+static const char *ExternalMopDesc(bool first, bool write) {
+ return first ? (write ? "Mutating" : "Read-only")
+ : (write ? "Previous mutating" : "Previous read-only");
+}
+
static void PrintMop(const ReportMop *mop, bool first) {
Decorator d;
char thrbuf[kThreadBufSize];
Printf("%s", d.Access());
- Printf(" %s of size %d at %p by %s",
- MopDesc(first, mop->write, mop->atomic),
- mop->size, (void*)mop->addr,
- thread_name(thrbuf, mop->tid));
+ const char *object_type = GetObjectTypeFromTag(mop->external_tag);
+ if (!object_type) {
+ Printf(" %s of size %d at %p by %s",
+ MopDesc(first, mop->write, mop->atomic), mop->size,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ } else {
+ Printf(" %s access of object %s at %p by %s",
+ ExternalMopDesc(first, mop->write), object_type,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ }
PrintMutexSet(mop->mset);
Printf(":\n");
Printf("%s", d.EndAccess());
@@ -183,9 +196,16 @@ static void PrintLocation(const ReportLocation *loc) {
global.module_offset);
} else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize];
- Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
- loc->heap_chunk_size, loc->heap_chunk_start,
- thread_name(thrbuf, loc->tid));
+ const char *object_type = GetObjectTypeFromTag(loc->external_tag);
+ if (!object_type) {
+ Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
+ loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
+ } else {
+ Printf(" Location is %s object of size %zu at %p allocated by %s:\n",
+ object_type, loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
+ }
print_stack = true;
} else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
@@ -235,9 +255,15 @@ static void PrintThread(const ReportThread *rt) {
if (rt->name && rt->name[0] != '\0')
Printf(" '%s'", rt->name);
char thrbuf[kThreadBufSize];
- Printf(" (tid=%zu, %s) created by %s",
- rt->os_id, rt->running ? "running" : "finished",
- thread_name(thrbuf, rt->parent_tid));
+ const char *thread_status = rt->running ? "running" : "finished";
+ if (rt->workerthread) {
+ Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status);
+ Printf("\n");
+ Printf("%s", d.EndThreadDescription());
+ return;
+ }
+ Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status,
+ thread_name(thrbuf, rt->parent_tid));
if (rt->stack)
Printf(" at:");
Printf("\n");
diff --git a/lib/tsan/rtl/tsan_report.h b/lib/tsan/rtl/tsan_report.h
index d0b9d7458bf8..8d8ae0fd8f58 100644
--- a/lib/tsan/rtl/tsan_report.h
+++ b/lib/tsan/rtl/tsan_report.h
@@ -24,6 +24,7 @@ enum ReportType {
ReportTypeVptrRace,
ReportTypeUseAfterFree,
ReportTypeVptrUseAfterFree,
+ ReportTypeExternalRace,
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
ReportTypeMutexDoubleLock,
@@ -56,6 +57,7 @@ struct ReportMop {
int size;
bool write;
bool atomic;
+ uptr external_tag;
Vector<ReportMopMutex> mset;
ReportStack *stack;
@@ -75,6 +77,7 @@ struct ReportLocation {
DataInfo global;
uptr heap_chunk_start;
uptr heap_chunk_size;
+ uptr external_tag;
int tid;
int fd;
bool suppressable;
@@ -89,8 +92,9 @@ struct ReportThread {
int id;
uptr os_id;
bool running;
+ bool workerthread;
char *name;
- int parent_tid;
+ u32 parent_tid;
ReportStack *stack;
};
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index bfb835889c7a..70393037e786 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -381,7 +381,7 @@ void Initialize(ThreadState *thr) {
// Initialize thread 0.
int tid = ThreadCreate(thr, 0, 0, true);
CHECK_EQ(tid, 0);
- ThreadStart(thr, tid, internal_getpid());
+ ThreadStart(thr, tid, GetTid(), /*workerthread*/ false);
#if TSAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
#endif
@@ -980,21 +980,21 @@ void FuncExit(ThreadState *thr) {
thr->shadow_stack_pos--;
}
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
#if !SANITIZER_GO
- if (!ctx->after_multithreaded_fork)
+ if (save_stack && !ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->ignore_reads_and_writes--;
- CHECK_GE(thr->ignore_reads_and_writes, 0);
if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
#if !SANITIZER_GO
@@ -1011,20 +1011,20 @@ uptr __tsan_testonly_shadow_stack_current_size() {
}
#endif
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
#if !SANITIZER_GO
- if (!ctx->after_multithreaded_fork)
+ if (save_stack && !ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_sync, 0);
thr->ignore_sync--;
- CHECK_GE(thr->ignore_sync, 0);
#if !SANITIZER_GO
if (thr->ignore_sync == 0)
thr->sync_ignore_set.Reset();
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 7fcb9d48e038..0d62af00a05d 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -410,6 +410,7 @@ struct ThreadState {
bool is_dead;
bool is_freeing;
bool is_vptr_access;
+ uptr external_tag;
const uptr stk_addr;
const uptr stk_size;
const uptr tls_addr;
@@ -545,6 +546,10 @@ struct Context {
extern Context *ctx; // The one and the only global runtime context.
+ALWAYS_INLINE Flags *flags() {
+ return &ctx->flags;
+}
+
struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptors() {
#if !SANITIZER_GO
@@ -564,7 +569,7 @@ class ScopedReport {
explicit ScopedReport(ReportType typ);
~ScopedReport();
- void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
+ void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
const MutexSet *mset);
void AddStack(StackTrace stack, bool suppressable = false);
void AddThread(const ThreadContext *tctx, bool suppressable = false);
@@ -640,6 +645,8 @@ bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
void PrintMatchedBenignRaces();
+const char *GetObjectTypeFromTag(uptr tag);
+
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
# define DPrintf Printf
#else
@@ -704,16 +711,16 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
void FuncEntry(ThreadState *thr, uptr pc);
void FuncExit(ThreadState *thr);
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
-void ThreadStart(ThreadState *thr, int tid, uptr os_id);
+void ThreadStart(ThreadState *thr, int tid, uptr os_id, bool workerthread);
void ThreadFinish(ThreadState *thr);
int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
void ThreadJoin(ThreadState *thr, uptr pc, int tid);
@@ -728,13 +735,16 @@ void ProcDestroy(Processor *proc);
void ProcWire(Processor *proc, ThreadState *thr);
void ProcUnwire(Processor *proc, ThreadState *thr);
-void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
- bool rw, bool recursive, bool linker_init);
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
- bool try_lock = false);
-int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
+ int rec = 1);
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index f3b51c30faff..086b28927919 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -62,20 +62,17 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
OutputReport(thr, rep);
}
-void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
- bool rw, bool recursive, bool linker_init) {
- DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
StatInc(thr, StatMutexCreate);
- if (!linker_init && IsAppMem(addr)) {
+ if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- s->is_rw = rw;
- s->is_recursive = recursive;
- s->is_linker_init = linker_init;
+ s->SetFlags(flagz & MutexCreationFlagMask);
if (!SANITIZER_GO && s->creation_stack_id == 0)
s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
@@ -87,7 +84,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
if (s == 0)
return;
- if (s->is_linker_init) {
+ if (s->IsFlagSet(MutexFlagLinkerInit)) {
// Destroy is no-op for linker-initialized mutexes.
s->mtx.Unlock();
return;
@@ -100,8 +97,8 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
bool unlock_locked = false;
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
- && !s->is_broken) {
- s->is_broken = true;
+ && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
unlock_locked = true;
}
u64 mid = s->GetId();
@@ -141,12 +138,33 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
// s will be destroyed and freed in MetaMap::FreeBlock.
}
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
- DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
- CHECK_GT(rec, 0);
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ if (s->owner_tid != thr->tid) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ s->mtx.ReadUnlock();
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ } else {
+ s->mtx.ReadUnlock();
+ }
+ }
+}
+
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
+ DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
+ thr->tid, addr, flagz, rec);
+ if (flagz & MutexFlagRecursiveLock)
+ CHECK_GT(rec, 0);
+ else
+ rec = 1;
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ s->UpdateFlags(flagz);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
bool report_double_lock = false;
@@ -156,38 +174,43 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
s->last_lock = thr->fast_state.raw();
} else if (s->owner_tid == thr->tid) {
CHECK_GT(s->recursion, 0);
- } else if (flags()->report_mutex_bugs && !s->is_broken) {
- s->is_broken = true;
+ } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_double_lock = true;
}
- if (s->recursion == 0) {
+ const bool first = s->recursion == 0;
+ s->recursion += rec;
+ if (first) {
StatInc(thr, StatMutexLock);
AcquireImpl(thr, pc, &s->clock);
AcquireImpl(thr, pc, &s->read_clock);
- } else if (!s->is_recursive) {
+ } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
StatInc(thr, StatMutexRecLock);
}
- s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
- if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
+ bool pre_lock = false;
+ if (first && common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
Callback cb(thr, pc);
- if (!try_lock)
+ if (pre_lock)
ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
- ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
}
u64 mid = s->GetId();
s->mtx.Unlock();
// Can't touch s after this point.
+ s = 0;
if (report_double_lock)
ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
- if (common_flags()->detect_deadlocks) {
+ if (first && pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
- DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
@@ -196,12 +219,12 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
int rec = 0;
bool report_bad_unlock = false;
if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
- if (flags()->report_mutex_bugs && !s->is_broken) {
- s->is_broken = true;
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_bad_unlock = true;
}
} else {
- rec = all ? s->recursion : 1;
+ rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
s->recursion -= rec;
if (s->recursion == 0) {
StatInc(thr, StatMutexUnlock);
@@ -229,36 +252,53 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
return rec;
}
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
- DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ s->mtx.ReadUnlock();
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
bool report_bad_lock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->is_broken) {
- s->is_broken = true;
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_bad_lock = true;
}
}
AcquireImpl(thr, pc, &s->clock);
s->last_lock = thr->fast_state.raw();
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
- if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ bool pre_lock = false;
+ if (common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
Callback cb(thr, pc);
- if (!trylock)
+ if (pre_lock)
ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
- ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
}
u64 mid = s->GetId();
s->mtx.ReadUnlock();
// Can't touch s after this point.
+ s = 0;
if (report_bad_lock)
ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
- if (common_flags()->detect_deadlocks) {
+ if (pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
@@ -274,8 +314,8 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
bool report_bad_unlock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->is_broken) {
- s->is_broken = true;
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_bad_unlock = true;
}
}
@@ -323,8 +363,8 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
} else {
StatInc(thr, StatMutexRecUnlock);
}
- } else if (!s->is_broken) {
- s->is_broken = true;
+ } else if (!s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_bad_unlock = true;
}
thr->mset.Del(s->GetId(), write);
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index bc8944fbfb58..31b9e97898b0 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -164,8 +164,8 @@ void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
(*rs)->suppressable = suppressable;
}
-void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
- const MutexSet *mset) {
+void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
+ StackTrace stack, const MutexSet *mset) {
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
ReportMop *mop = new(mem) ReportMop;
rep_->mops.PushBack(mop);
@@ -175,6 +175,7 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
mop->write = s.IsWrite();
mop->atomic = s.IsAtomic();
mop->stack = SymbolizeStack(stack);
+ mop->external_tag = external_tag;
if (mop->stack)
mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
@@ -202,6 +203,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
rt->running = (tctx->status == ThreadStatusRunning);
rt->name = internal_strdup(tctx->name);
rt->parent_tid = tctx->parent_tid;
+ rt->workerthread = tctx->workerthread;
rt->stack = 0;
rt->stack = SymbolizeStackId(tctx->creation_stack_id);
if (rt->stack)
@@ -336,6 +338,7 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
loc->heap_chunk_size = b->siz;
+ loc->external_tag = b->tag;
loc->tid = tctx ? tctx->tid : b->tid;
loc->stack = SymbolizeStackId(b->stk);
rep_->locs.PushBack(loc);
@@ -622,6 +625,8 @@ void ReportRace(ThreadState *thr) {
typ = ReportTypeVptrRace;
else if (freed)
typ = ReportTypeUseAfterFree;
+ else if (thr->external_tag > 0)
+ typ = ReportTypeExternalRace;
if (IsFiredSuppression(ctx, typ, addr))
return;
@@ -650,7 +655,8 @@ void ReportRace(ThreadState *thr) {
ScopedReport rep(typ);
for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]);
- rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2);
+ rep.AddMemoryAccess(addr, thr->external_tag, s, traces[i],
+ i == 0 ? &thr->mset : mset2);
}
for (uptr i = 0; i < kMop; i++) {
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index 5b17dc60bcbe..7357d97a264c 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -236,7 +236,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
return tid;
}
-void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
+void ThreadStart(ThreadState *thr, int tid, uptr os_id, bool workerthread) {
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
@@ -266,7 +266,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
ThreadRegistry *tr = ctx->thread_registry;
OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
- tr->StartThread(tid, os_id, &args);
+ tr->StartThread(tid, os_id, workerthread, &args);
tr->Lock();
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc
index d1d6ed24d991..2ee688bf5771 100644
--- a/lib/tsan/rtl/tsan_stat.cc
+++ b/lib/tsan/rtl/tsan_stat.cc
@@ -153,6 +153,16 @@ void StatOutput(u64 *stat) {
name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange ";
name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange ";
name[StatAnnotateThreadName] = " ThreadName ";
+ name[Stat__tsan_mutex_create] = " __tsan_mutex_create ";
+ name[Stat__tsan_mutex_destroy] = " __tsan_mutex_destroy ";
+ name[Stat__tsan_mutex_pre_lock] = " __tsan_mutex_pre_lock ";
+ name[Stat__tsan_mutex_post_lock] = " __tsan_mutex_post_lock ";
+ name[Stat__tsan_mutex_pre_unlock] = " __tsan_mutex_pre_unlock ";
+ name[Stat__tsan_mutex_post_unlock] = " __tsan_mutex_post_unlock ";
+ name[Stat__tsan_mutex_pre_signal] = " __tsan_mutex_pre_signal ";
+ name[Stat__tsan_mutex_post_signal] = " __tsan_mutex_post_signal ";
+ name[Stat__tsan_mutex_pre_divert] = " __tsan_mutex_pre_divert ";
+ name[Stat__tsan_mutex_post_divert] = " __tsan_mutex_post_divert ";
name[StatMtxTotal] = "Contentionz ";
name[StatMtxTrace] = " Trace ";
diff --git a/lib/tsan/rtl/tsan_stat.h b/lib/tsan/rtl/tsan_stat.h
index 8447dd84fc17..7d2791ebbfcc 100644
--- a/lib/tsan/rtl/tsan_stat.h
+++ b/lib/tsan/rtl/tsan_stat.h
@@ -157,6 +157,16 @@ enum StatType {
StatAnnotatePublishMemoryRange,
StatAnnotateUnpublishMemoryRange,
StatAnnotateThreadName,
+ Stat__tsan_mutex_create,
+ Stat__tsan_mutex_destroy,
+ Stat__tsan_mutex_pre_lock,
+ Stat__tsan_mutex_post_lock,
+ Stat__tsan_mutex_pre_unlock,
+ Stat__tsan_mutex_post_unlock,
+ Stat__tsan_mutex_pre_signal,
+ Stat__tsan_mutex_post_signal,
+ Stat__tsan_mutex_pre_divert,
+ Stat__tsan_mutex_post_divert,
// Internal mutex contentionz.
StatMtxTotal,
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index bfb64e0018fb..e39702b7d22a 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -74,6 +74,8 @@ static const char *conv(ReportType typ) {
return kSuppressionRace;
else if (typ == ReportTypeVptrUseAfterFree)
return kSuppressionRace;
+ else if (typ == ReportTypeExternalRace)
+ return kSuppressionRace;
else if (typ == ReportTypeThreadLeak)
return kSuppressionThread;
else if (typ == ReportTypeMutexDestroyLocked)
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 44c6a26a1e8e..4cc3cb89c34f 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -42,10 +42,7 @@ void SyncVar::Reset(Processor *proc) {
owner_tid = kInvalidTid;
last_lock = 0;
recursion = 0;
- is_rw = 0;
- is_recursive = 0;
- is_broken = 0;
- is_linker_init = 0;
+ atomic_store_relaxed(&flags, 0);
if (proc == 0) {
CHECK_EQ(clock.size(), 0);
@@ -64,6 +61,7 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
MBlock *b = block_alloc_.Map(idx);
b->siz = sz;
+ b->tag = 0;
b->tid = thr->tid;
b->stk = CurrentStackId(thr, pc);
u32 *meta = MemToMeta(p);
diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h
index 86e6bbd55bac..d24d69762171 100644
--- a/lib/tsan/rtl/tsan_sync.h
+++ b/lib/tsan/rtl/tsan_sync.h
@@ -23,6 +23,29 @@
namespace __tsan {
+// These need to match __tsan_mutex_* flags defined in tsan_interface.h.
+// See documentation there as well.
+enum MutexFlags {
+ MutexFlagLinkerInit = 1 << 0, // __tsan_mutex_linker_init
+ MutexFlagWriteReentrant = 1 << 1, // __tsan_mutex_write_reentrant
+ MutexFlagReadReentrant = 1 << 2, // __tsan_mutex_read_reentrant
+ MutexFlagReadLock = 1 << 3, // __tsan_mutex_read_lock
+ MutexFlagTryLock = 1 << 4, // __tsan_mutex_try_lock
+ MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed
+ MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock
+ MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock
+
+ // The following flags are runtime private.
+ // Mutex API misuse was detected, so don't report any more.
+ MutexFlagBroken = 1 << 30,
+ // We did not intercept pre lock event, so handle it on post lock.
+ MutexFlagDoPreLockOnPostLock = 1 << 29,
+ // Must list all mutex creation flags.
+ MutexCreationFlagMask = MutexFlagLinkerInit |
+ MutexFlagWriteReentrant |
+ MutexFlagReadReentrant,
+};
+
struct SyncVar {
SyncVar();
@@ -35,10 +58,7 @@ struct SyncVar {
int owner_tid; // Set only by exclusive owners.
u64 last_lock;
int recursion;
- bool is_rw;
- bool is_recursive;
- bool is_broken;
- bool is_linker_init;
+ atomic_uint32_t flags;
u32 next; // in MetaMap
DDMutex dd;
SyncClock read_clock; // Used for rw mutexes only.
@@ -61,6 +81,26 @@ struct SyncVar {
*uid = id >> 48;
return (uptr)GetLsb(id, 48);
}
+
+ bool IsFlagSet(u32 f) const {
+ return atomic_load_relaxed(&flags);
+ }
+
+ void SetFlags(u32 f) {
+ atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f);
+ }
+
+ void UpdateFlags(u32 flagz) {
+ // Filter out operation flags.
+ if (!(flagz & MutexCreationFlagMask))
+ return;
+ u32 current = atomic_load_relaxed(&flags);
+ if (current & MutexCreationFlagMask)
+ return;
+ // Note: this can be called from MutexPostReadLock which holds only read
+ // lock on the SyncVar.
+ atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask));
+ }
};
/* MetaMap allows to map arbitrary user pointers onto various descriptors.
diff --git a/lib/ubsan/CMakeLists.txt b/lib/ubsan/CMakeLists.txt
index 9bb36edc72cf..f35b40f3b1cc 100644
--- a/lib/ubsan/CMakeLists.txt
+++ b/lib/ubsan/CMakeLists.txt
@@ -90,6 +90,35 @@ else()
ARCHS ${UBSAN_COMMON_SUPPORTED_ARCH}
SOURCES ${UBSAN_CXX_SOURCES} CFLAGS ${UBSAN_CXXFLAGS})
+ if (WIN32)
+ add_compiler_rt_object_libraries(UbsanWeakInterception
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${UBSAN_SUPPORTED_ARCH}
+ SOURCES ubsan_win_weak_interception.cc
+ CFLAGS ${UBSAN_CFLAGS} -DSANITIZER_DYNAMIC
+ DEFS ${UBSAN_COMMON_DEFINITIONS})
+
+ add_compiler_rt_object_libraries(UbsanDllThunk
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${UBSAN_SUPPORTED_ARCH}
+ SOURCES ubsan_win_dll_thunk.cc
+ CFLAGS ${UBSAN_CFLAGS} -DSANITIZER_DLL_THUNK
+ DEFS ${UBSAN_COMMON_DEFINITIONS})
+
+ set(DYNAMIC_RUNTIME_THUNK_CFLAGS "-DSANITIZER_DYNAMIC_RUNTIME_THUNK")
+ if(MSVC)
+ list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-Zl")
+ elseif(CMAKE_C_COMPILER_ID MATCHES Clang)
+ list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-nodefaultlibs")
+ endif()
+ add_compiler_rt_object_libraries(UbsanDynamicRuntimeThunk
+ ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${UBSAN_SUPPORTED_ARCH}
+ SOURCES ubsan_win_dynamic_runtime_thunk.cc
+ CFLAGS ${UBSAN_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
+ DEFS ${UBSAN_COMMON_DEFINITIONS})
+ endif()
+
if(COMPILER_RT_HAS_UBSAN)
# Initializer of standalone UBSan runtime.
add_compiler_rt_object_libraries(RTUbsan_standalone
diff --git a/lib/ubsan/ubsan_diag.cc b/lib/ubsan/ubsan_diag.cc
index c531c5f7757d..bbe1e07390ad 100644
--- a/lib/ubsan/ubsan_diag.cc
+++ b/lib/ubsan/ubsan_diag.cc
@@ -79,16 +79,16 @@ static void MaybeReportErrorSummary(Location Loc, ErrorType Type) {
AI.line = SLoc.getLine();
AI.column = SLoc.getColumn();
AI.function = internal_strdup(""); // Avoid printing ?? as function name.
- ReportErrorSummary(ErrorKind, AI);
+ ReportErrorSummary(ErrorKind, AI, GetSanititizerToolName());
AI.Clear();
return;
}
} else if (Loc.isSymbolizedStack()) {
const AddressInfo &AI = Loc.getSymbolizedStack()->info;
- ReportErrorSummary(ErrorKind, AI);
+ ReportErrorSummary(ErrorKind, AI, GetSanititizerToolName());
return;
}
- ReportErrorSummary(ErrorKind);
+ ReportErrorSummary(ErrorKind, GetSanititizerToolName());
}
namespace {
diff --git a/lib/ubsan/ubsan_flags.cc b/lib/ubsan/ubsan_flags.cc
index e77ba550148a..3d404c1b7d34 100644
--- a/lib/ubsan/ubsan_flags.cc
+++ b/lib/ubsan/ubsan_flags.cc
@@ -67,22 +67,8 @@ void InitializeFlags() {
} // namespace __ubsan
-extern "C" {
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char *__ubsan_default_options() { return ""; }
-#endif
-
-#if SANITIZER_WINDOWS
-const char *__ubsan_default_default_options() { return ""; }
-# ifdef _WIN64
-# pragma comment(linker, "/alternatename:__ubsan_default_options=__ubsan_default_default_options")
-# else
-# pragma comment(linker, "/alternatename:___ubsan_default_options=___ubsan_default_default_options")
-# endif
-#endif
-
-} // extern "C"
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __ubsan_default_options, void) {
+ return "";
+}
#endif // CAN_SANITIZE_UB
diff --git a/lib/ubsan/ubsan_handlers.cc b/lib/ubsan/ubsan_handlers.cc
index 6ffffae7bd30..4e025a8ddddd 100644
--- a/lib/ubsan/ubsan_handlers.cc
+++ b/lib/ubsan/ubsan_handlers.cc
@@ -38,7 +38,7 @@ bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET) {
const char *TypeCheckKinds[] = {
"load of", "store to", "reference binding to", "member access within",
"member call on", "constructor call on", "downcast of", "downcast of",
- "upcast of", "cast to virtual base of"};
+ "upcast of", "cast to virtual base of", "_Nonnull binding to"};
}
static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
@@ -472,7 +472,8 @@ void __ubsan::__ubsan_handle_function_type_mismatch_abort(
Die();
}
-static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts) {
+static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts,
+ bool IsAttr) {
SourceLocation Loc = Data->Loc.acquire();
ErrorType ET = ErrorType::InvalidNullReturn;
@@ -484,21 +485,35 @@ static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts) {
Diag(Loc, DL_Error, "null pointer returned from function declared to never "
"return null");
if (!Data->AttrLoc.isInvalid())
- Diag(Data->AttrLoc, DL_Note, "returns_nonnull attribute specified here");
+ Diag(Data->AttrLoc, DL_Note, "%0 specified here")
+ << (IsAttr ? "returns_nonnull attribute"
+ : "_Nonnull return type annotation");
}
void __ubsan::__ubsan_handle_nonnull_return(NonNullReturnData *Data) {
GET_REPORT_OPTIONS(false);
- handleNonNullReturn(Data, Opts);
+ handleNonNullReturn(Data, Opts, true);
}
void __ubsan::__ubsan_handle_nonnull_return_abort(NonNullReturnData *Data) {
GET_REPORT_OPTIONS(true);
- handleNonNullReturn(Data, Opts);
+ handleNonNullReturn(Data, Opts, true);
Die();
}
-static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts) {
+void __ubsan::__ubsan_handle_nullability_return(NonNullReturnData *Data) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullReturn(Data, Opts, false);
+}
+
+void __ubsan::__ubsan_handle_nullability_return_abort(NonNullReturnData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullReturn(Data, Opts, false);
+ Die();
+}
+
+static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts,
+ bool IsAttr) {
SourceLocation Loc = Data->Loc.acquire();
ErrorType ET = ErrorType::InvalidNullArgument;
@@ -507,20 +522,34 @@ static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts) {
ScopedReport R(Opts, Loc, ET);
- Diag(Loc, DL_Error, "null pointer passed as argument %0, which is declared to "
- "never be null") << Data->ArgIndex;
+ Diag(Loc, DL_Error,
+ "null pointer passed as argument %0, which is declared to "
+ "never be null")
+ << Data->ArgIndex;
if (!Data->AttrLoc.isInvalid())
- Diag(Data->AttrLoc, DL_Note, "nonnull attribute specified here");
+ Diag(Data->AttrLoc, DL_Note, "%0 specified here")
+ << (IsAttr ? "nonnull attribute" : "_Nonnull type annotation");
}
void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) {
GET_REPORT_OPTIONS(false);
- handleNonNullArg(Data, Opts);
+ handleNonNullArg(Data, Opts, true);
}
void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) {
GET_REPORT_OPTIONS(true);
- handleNonNullArg(Data, Opts);
+ handleNonNullArg(Data, Opts, true);
+ Die();
+}
+
+void __ubsan::__ubsan_handle_nullability_arg(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullArg(Data, Opts, false);
+}
+
+void __ubsan::__ubsan_handle_nullability_arg_abort(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullArg(Data, Opts, false);
Die();
}
diff --git a/lib/ubsan/ubsan_handlers.h b/lib/ubsan/ubsan_handlers.h
index 350eb91d1bf9..5857bc2495f5 100644
--- a/lib/ubsan/ubsan_handlers.h
+++ b/lib/ubsan/ubsan_handlers.h
@@ -136,8 +136,10 @@ struct NonNullReturnData {
SourceLocation AttrLoc;
};
-/// \brief Handle returning null from function with returns_nonnull attribute.
+/// \brief Handle returning null from function with the returns_nonnull
+/// attribute, or a return type annotated with _Nonnull.
RECOVERABLE(nonnull_return, NonNullReturnData *Data)
+RECOVERABLE(nullability_return, NonNullReturnData *Data)
struct NonNullArgData {
SourceLocation Loc;
@@ -145,8 +147,10 @@ struct NonNullArgData {
int ArgIndex;
};
-/// \brief Handle passing null pointer to function with nonnull attribute.
+/// \brief Handle passing null pointer to a function parameter with the nonnull
+/// attribute, or a _Nonnull type annotation.
RECOVERABLE(nonnull_arg, NonNullArgData *Data)
+RECOVERABLE(nullability_arg, NonNullArgData *Data)
/// \brief Known CFI check kinds.
/// Keep in sync with the enum of the same name in CodeGenFunction.h
diff --git a/lib/ubsan/ubsan_init.cc b/lib/ubsan/ubsan_init.cc
index b4f42c4b8503..307bca37680e 100644
--- a/lib/ubsan/ubsan_init.cc
+++ b/lib/ubsan/ubsan_init.cc
@@ -23,6 +23,10 @@
using namespace __ubsan;
+const char *__ubsan::GetSanititizerToolName() {
+ return "UndefinedBehaviorSanitizer";
+}
+
static enum {
UBSAN_MODE_UNKNOWN = 0,
UBSAN_MODE_STANDALONE,
@@ -35,7 +39,7 @@ static void CommonInit() {
}
static void CommonStandaloneInit() {
- SanitizerToolName = "UndefinedBehaviorSanitizer";
+ SanitizerToolName = GetSanititizerToolName();
InitializeFlags();
CacheBinaryName();
__sanitizer_set_report_path(common_flags()->log_path);
diff --git a/lib/ubsan/ubsan_init.h b/lib/ubsan/ubsan_init.h
index 103ae24d15b3..f12fc2ced32a 100644
--- a/lib/ubsan/ubsan_init.h
+++ b/lib/ubsan/ubsan_init.h
@@ -15,6 +15,9 @@
namespace __ubsan {
+// Get the full tool name for UBSan.
+const char *GetSanititizerToolName();
+
// Initialize UBSan as a standalone tool. Typically should be called early
// during initialization.
void InitAsStandalone();
diff --git a/lib/ubsan/ubsan_interface.inc b/lib/ubsan/ubsan_interface.inc
new file mode 100644
index 000000000000..0e43ebc68299
--- /dev/null
+++ b/lib/ubsan/ubsan_interface.inc
@@ -0,0 +1,47 @@
+//===-- ubsan_interface.inc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Ubsan interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__ubsan_handle_add_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_add_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_builtin_unreachable)
+INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail)
+INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail_abort)
+INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_abort)
+INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value)
+INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value_abort)
+INTERFACE_FUNCTION(__ubsan_handle_missing_return)
+INTERFACE_FUNCTION(__ubsan_handle_mul_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_mul_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_negate_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_negate_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_return)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_arg)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_arg_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_return)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_return_abort)
+INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds)
+INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort)
+INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds)
+INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds_abort)
+INTERFACE_FUNCTION(__ubsan_handle_sub_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_sub_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1)
+INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive)
+INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive_abort)
+INTERFACE_WEAK_FUNCTION(__ubsan_default_options)
diff --git a/lib/ubsan/ubsan_win_dll_thunk.cc b/lib/ubsan/ubsan_win_dll_thunk.cc
new file mode 100644
index 000000000000..a1d0dbd66056
--- /dev/null
+++ b/lib/ubsan/ubsan_win_dll_thunk.cc
@@ -0,0 +1,21 @@
+//===-- ubsan_win_dll_thunk.cc --------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_common/sanitizer_win_dll_thunk.h"
+// Ubsan interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "ubsan_interface.inc"
+#endif // SANITIZER_DLL_THUNK
diff --git a/lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc b/lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc
new file mode 100644
index 000000000000..c9b74a4c9e09
--- /dev/null
+++ b/lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc
@@ -0,0 +1,21 @@
+//===-- ubsan_win_dynamic_runtime_thunk.cc --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Ubsan, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_common/sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from ubsan.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "ubsan_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
diff --git a/lib/ubsan/ubsan_win_weak_interception.cc b/lib/ubsan/ubsan_win_weak_interception.cc
new file mode 100644
index 000000000000..353719eefacf
--- /dev/null
+++ b/lib/ubsan/ubsan_win_weak_interception.cc
@@ -0,0 +1,23 @@
+//===-- ubsan_win_weak_interception.cc ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Ubsan when it is implemented as a shared
+// library on Windows (dll), in order to delegate the calls of weak functions to
+// the implementation in the main executable when a strong definition is
+// provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_common/sanitizer_win_weak_interception.h"
+#include "ubsan_flags.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "ubsan_interface.inc"
+#endif // SANITIZER_DYNAMIC
diff --git a/lib/xray/CMakeLists.txt b/lib/xray/CMakeLists.txt
index 9c7cf6ce361c..72caa9fac732 100644
--- a/lib/xray/CMakeLists.txt
+++ b/lib/xray/CMakeLists.txt
@@ -1,15 +1,15 @@
# Build for the XRay runtime support library.
-# Core XRay runtime library implementation files.
+# XRay runtime library implementation files.
set(XRAY_SOURCES
+ xray_inmemory_log.cc
xray_init.cc
- xray_interface.cc
xray_flags.cc
- xray_inmemory_log.cc)
-
-# XRay flight data recorder (FDR) implementation files.
-set(XRAY_FDR_SOURCES
- xray_buffer_queue.cc)
+ xray_interface.cc
+ xray_buffer_queue.cc
+ xray_log_interface.cc
+ xray_fdr_logging.cc
+ xray_utils.cc)
set(x86_64_SOURCES
xray_x86_64.cc
@@ -21,11 +21,38 @@ set(arm_SOURCES
xray_trampoline_arm.S
${XRAY_SOURCES})
-set(armhf_SOURCES ${arm_SOURCES})
+set(armhf_SOURCES
+ ${arm_SOURCES})
set(aarch64_SOURCES
- xray_AArch64.cc
- xray_trampoline_AArch64.S
+ xray_AArch64.cc
+ xray_trampoline_AArch64.S
+ ${XRAY_SOURCES})
+
+set(mips_SOURCES
+ xray_mips.cc
+ xray_trampoline_mips.S
+ ${XRAY_SOURCES})
+
+set(mipsel_SOURCES
+ xray_mips.cc
+ xray_trampoline_mips.S
+ ${XRAY_SOURCES})
+
+set(mips64_SOURCES
+ xray_mips64.cc
+ xray_trampoline_mips64.S
+ ${XRAY_SOURCES})
+
+set(mips64el_SOURCES
+ xray_mips64.cc
+ xray_trampoline_mips64.S
+ ${XRAY_SOURCES})
+
+set(powerpc64le_SOURCES
+ xray_powerpc64.cc
+ xray_trampoline_powerpc64.cc
+ xray_trampoline_powerpc64_asm.S
${XRAY_SOURCES})
include_directories(..)
@@ -41,13 +68,7 @@ add_compiler_rt_object_libraries(RTXray
SOURCES ${XRAY_SOURCES} CFLAGS ${XRAY_CFLAGS}
DEFS ${XRAY_COMMON_DEFINITIONS})
-add_compiler_rt_object_libraries(RTXrayFDR
- ARCHS ${XRAY_SUPPORTED_ARCH}
- SOURCES ${XRAY_FDR_SOURCES} CFLAGS ${XRAY_CFLAGS}
- DEFS ${XRAY_COMMON_DEFINITIONS})
-
add_compiler_rt_component(xray)
-add_compiler_rt_component(xray-fdr)
set(XRAY_COMMON_RUNTIME_OBJECT_LIBS
RTSanitizerCommon
@@ -63,14 +84,6 @@ foreach(arch ${XRAY_SUPPORTED_ARCH})
DEFS ${XRAY_COMMON_DEFINITIONS}
OBJECT_LIBS ${XRAY_COMMON_RUNTIME_OBJECT_LIBS}
PARENT_TARGET xray)
- add_compiler_rt_runtime(clang_rt.xray-fdr
- STATIC
- ARCHS ${arch}
- SOURCES ${XRAY_FDR_SOURCES}
- CFLAGS ${XRAY_CFLAGS}
- DEFS ${XRAY_COMMON_DEFINITIONS}
- OBJECT_LIBS ${XRAY_COMMON_RUNTIME_OBJECT_LIBS}
- PARENT_TARGET xray-fdr)
endif()
endforeach()
diff --git a/lib/xray/tests/CMakeLists.txt b/lib/xray/tests/CMakeLists.txt
index 6cb17934c895..a1eb4a030ccc 100644
--- a/lib/xray/tests/CMakeLists.txt
+++ b/lib/xray/tests/CMakeLists.txt
@@ -8,14 +8,15 @@ set(XRAY_UNITTEST_CFLAGS
${COMPILER_RT_UNITTEST_CFLAGS}
${COMPILER_RT_GTEST_CFLAGS}
-I${COMPILER_RT_SOURCE_DIR}/include
- -I${COMPILER_RT_SOURCE_DIR}/lib/xray)
+ -I${COMPILER_RT_SOURCE_DIR}/lib/xray
+ -I${COMPILER_RT_SOURCE_DIR}/lib)
macro(xray_compile obj_list source arch)
get_filename_component(basename ${source} NAME)
set(output_obj "${basename}.${arch}.o")
get_target_flags_for_arch(${arch} TARGET_CFLAGS)
if(NOT COMPILER_RT_STANDALONE_BUILD)
- list(APPEND COMPILE_DEPS gtest_main xray-fdr)
+ list(APPEND COMPILE_DEPS gtest_main xray)
endif()
clang_compile(${output_obj} ${source}
CFLAGS ${XRAY_UNITTEST_CFLAGS} ${TARGET_CFLAGS}
@@ -38,16 +39,17 @@ macro(add_xray_unittest testname)
get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
set(TEST_DEPS ${TEST_OBJECTS})
if(NOT COMPILER_RT_STANDALONE_BUILD)
- list(APPEND TEST_DEPS gtest_main xray-fdr)
+ list(APPEND TEST_DEPS gtest_main xray)
endif()
if(NOT APPLE)
- add_compiler_rt_test(XRayUnitTests ${testname}
+ add_compiler_rt_test(XRayUnitTests ${testname}-${arch}
OBJECTS ${TEST_OBJECTS}
DEPS ${TEST_DEPS}
LINK_FLAGS ${TARGET_LINK_FLAGS}
-lstdc++ -lm ${CMAKE_THREAD_LIBS_INIT}
-lpthread
- -L${COMPILER_RT_LIBRARY_OUTPUT_DIR} -lclang_rt.xray-fdr-${arch})
+ -L${COMPILER_RT_LIBRARY_OUTPUT_DIR} -lclang_rt.xray-${arch}
+ -ldl -lrt)
endif()
# FIXME: Figure out how to run even just the unit tests on APPLE.
endforeach()
diff --git a/lib/xray/tests/unit/CMakeLists.txt b/lib/xray/tests/unit/CMakeLists.txt
index 3e5412d41e6e..62d01f239581 100644
--- a/lib/xray/tests/unit/CMakeLists.txt
+++ b/lib/xray/tests/unit/CMakeLists.txt
@@ -1,2 +1,4 @@
add_xray_unittest(XRayBufferQueueTest SOURCES
buffer_queue_test.cc xray_unit_test_main.cc)
+add_xray_unittest(XRayFDRLoggingTest SOURCES
+ fdr_logging_test.cc xray_unit_test_main.cc)
diff --git a/lib/xray/tests/unit/buffer_queue_test.cc b/lib/xray/tests/unit/buffer_queue_test.cc
index d46f19402c2a..ac89a8dbc50e 100644
--- a/lib/xray/tests/unit/buffer_queue_test.cc
+++ b/lib/xray/tests/unit/buffer_queue_test.cc
@@ -14,68 +14,101 @@
#include "gtest/gtest.h"
#include <future>
-#include <system_error>
#include <unistd.h>
namespace __xray {
static constexpr size_t kSize = 4096;
-TEST(BufferQueueTest, API) { BufferQueue Buffers(kSize, 1); }
+TEST(BufferQueueTest, API) {
+ bool Success = false;
+ BufferQueue Buffers(kSize, 1, Success);
+ ASSERT_TRUE(Success);
+}
TEST(BufferQueueTest, GetAndRelease) {
- BufferQueue Buffers(kSize, 1);
+ bool Success = false;
+ BufferQueue Buffers(kSize, 1, Success);
+ ASSERT_TRUE(Success);
BufferQueue::Buffer Buf;
- ASSERT_EQ(Buffers.getBuffer(Buf), std::error_code());
+ ASSERT_EQ(Buffers.getBuffer(Buf), BufferQueue::ErrorCode::Ok);
ASSERT_NE(nullptr, Buf.Buffer);
- ASSERT_EQ(Buffers.releaseBuffer(Buf), std::error_code());
+ ASSERT_EQ(Buffers.releaseBuffer(Buf), BufferQueue::ErrorCode::Ok);
ASSERT_EQ(nullptr, Buf.Buffer);
}
TEST(BufferQueueTest, GetUntilFailed) {
- BufferQueue Buffers(kSize, 1);
+ bool Success = false;
+ BufferQueue Buffers(kSize, 1, Success);
+ ASSERT_TRUE(Success);
BufferQueue::Buffer Buf0;
- EXPECT_EQ(Buffers.getBuffer(Buf0), std::error_code());
+ EXPECT_EQ(Buffers.getBuffer(Buf0), BufferQueue::ErrorCode::Ok);
BufferQueue::Buffer Buf1;
- EXPECT_EQ(std::errc::not_enough_memory, Buffers.getBuffer(Buf1));
- EXPECT_EQ(Buffers.releaseBuffer(Buf0), std::error_code());
+ EXPECT_EQ(BufferQueue::ErrorCode::NotEnoughMemory, Buffers.getBuffer(Buf1));
+ EXPECT_EQ(Buffers.releaseBuffer(Buf0), BufferQueue::ErrorCode::Ok);
}
TEST(BufferQueueTest, ReleaseUnknown) {
- BufferQueue Buffers(kSize, 1);
+ bool Success = false;
+ BufferQueue Buffers(kSize, 1, Success);
+ ASSERT_TRUE(Success);
BufferQueue::Buffer Buf;
Buf.Buffer = reinterpret_cast<void *>(0xdeadbeef);
Buf.Size = kSize;
- EXPECT_EQ(std::errc::argument_out_of_domain, Buffers.releaseBuffer(Buf));
+ EXPECT_EQ(BufferQueue::ErrorCode::UnrecognizedBuffer,
+ Buffers.releaseBuffer(Buf));
}
TEST(BufferQueueTest, ErrorsWhenFinalising) {
- BufferQueue Buffers(kSize, 2);
+ bool Success = false;
+ BufferQueue Buffers(kSize, 2, Success);
+ ASSERT_TRUE(Success);
BufferQueue::Buffer Buf;
- ASSERT_EQ(Buffers.getBuffer(Buf), std::error_code());
+ ASSERT_EQ(Buffers.getBuffer(Buf), BufferQueue::ErrorCode::Ok);
ASSERT_NE(nullptr, Buf.Buffer);
- ASSERT_EQ(Buffers.finalize(), std::error_code());
+ ASSERT_EQ(Buffers.finalize(), BufferQueue::ErrorCode::Ok);
BufferQueue::Buffer OtherBuf;
- ASSERT_EQ(std::errc::state_not_recoverable, Buffers.getBuffer(OtherBuf));
- ASSERT_EQ(std::errc::state_not_recoverable, Buffers.finalize());
- ASSERT_EQ(Buffers.releaseBuffer(Buf), std::error_code());
+ ASSERT_EQ(BufferQueue::ErrorCode::AlreadyFinalized,
+ Buffers.getBuffer(OtherBuf));
+ ASSERT_EQ(BufferQueue::ErrorCode::AlreadyFinalized,
+ Buffers.finalize());
+ ASSERT_EQ(Buffers.releaseBuffer(Buf), BufferQueue::ErrorCode::Ok);
}
TEST(BufferQueueTest, MultiThreaded) {
- BufferQueue Buffers(kSize, 100);
+ bool Success = false;
+ BufferQueue Buffers(kSize, 100, Success);
+ ASSERT_TRUE(Success);
auto F = [&] {
BufferQueue::Buffer B;
- while (!Buffers.getBuffer(B)) {
+ while (true) {
+ auto EC = Buffers.getBuffer(B);
+ if (EC != BufferQueue::ErrorCode::Ok)
+ return;
Buffers.releaseBuffer(B);
}
};
auto T0 = std::async(std::launch::async, F);
auto T1 = std::async(std::launch::async, F);
auto T2 = std::async(std::launch::async, [&] {
- while (!Buffers.finalize())
+ while (Buffers.finalize() != BufferQueue::ErrorCode::Ok)
;
});
F();
}
+TEST(BufferQueueTest, Apply) {
+ bool Success = false;
+ BufferQueue Buffers(kSize, 10, Success);
+ ASSERT_TRUE(Success);
+ auto Count = 0;
+ BufferQueue::Buffer B;
+ for (int I = 0; I < 10; ++I) {
+ ASSERT_EQ(Buffers.getBuffer(B), BufferQueue::ErrorCode::Ok);
+ ASSERT_EQ(Buffers.releaseBuffer(B), BufferQueue::ErrorCode::Ok);
+ }
+ Buffers.apply([&](const BufferQueue::Buffer &B) { ++Count; });
+ ASSERT_EQ(Count, 10);
+}
+
} // namespace __xray
diff --git a/lib/xray/tests/unit/fdr_logging_test.cc b/lib/xray/tests/unit/fdr_logging_test.cc
new file mode 100644
index 000000000000..0d5e99a74334
--- /dev/null
+++ b/lib/xray/tests/unit/fdr_logging_test.cc
@@ -0,0 +1,127 @@
+//===-- fdr_logging_test.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#include "xray_fdr_logging.h"
+#include "gtest/gtest.h"
+
+#include <fcntl.h>
+#include <iostream>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <system_error>
+#include <unistd.h>
+
+#include "xray/xray_records.h"
+
+namespace __xray {
+namespace {
+
+constexpr auto kBufferSize = 16384;
+constexpr auto kBufferMax = 10;
+
+struct ScopedFileCloserAndDeleter {
+ explicit ScopedFileCloserAndDeleter(int Fd, const char *Filename)
+ : Fd(Fd), Filename(Filename) {}
+
+ ~ScopedFileCloserAndDeleter() {
+ if (Fd) {
+ close(Fd);
+ unlink(Filename);
+ }
+ }
+
+ int Fd;
+ const char *Filename;
+};
+
+TEST(FDRLoggingTest, Simple) {
+ FDRLoggingOptions Options;
+ Options.ReportErrors = true;
+ char TmpFilename[] = "fdr-logging-test.XXXXXX";
+ Options.Fd = mkstemp(TmpFilename);
+ ASSERT_NE(Options.Fd, -1);
+ ASSERT_EQ(fdrLoggingInit(kBufferSize, kBufferMax, &Options,
+ sizeof(FDRLoggingOptions)),
+ XRayLogInitStatus::XRAY_LOG_INITIALIZED);
+ fdrLoggingHandleArg0(1, XRayEntryType::ENTRY);
+ fdrLoggingHandleArg0(1, XRayEntryType::EXIT);
+ ASSERT_EQ(fdrLoggingFinalize(), XRayLogInitStatus::XRAY_LOG_FINALIZED);
+ ASSERT_EQ(fdrLoggingFlush(), XRayLogFlushStatus::XRAY_LOG_FLUSHED);
+ ASSERT_EQ(fdrLoggingReset(), XRayLogInitStatus::XRAY_LOG_UNINITIALIZED);
+
+ // To do this properly, we have to close the file descriptor then re-open the
+ // file for reading this time.
+ ASSERT_EQ(close(Options.Fd), 0);
+ int Fd = open(TmpFilename, O_RDONLY);
+ ASSERT_NE(-1, Fd);
+ ScopedFileCloserAndDeleter Guard(Fd, TmpFilename);
+ auto Size = lseek(Fd, 0, SEEK_END);
+ ASSERT_NE(Size, 0);
+ // Map the file contents.
+ const char *Contents = static_cast<const char *>(
+ mmap(NULL, Size, PROT_READ, MAP_PRIVATE, Fd, 0));
+ ASSERT_NE(Contents, nullptr);
+
+ XRayFileHeader H;
+ memcpy(&H, Contents, sizeof(XRayFileHeader));
+ ASSERT_EQ(H.Version, 1);
+ ASSERT_EQ(H.Type, FileTypes::FDR_LOG);
+
+ // We require one buffer at least to have the "start of buffer" metadata
+ // record.
+ MetadataRecord MDR;
+ memcpy(&MDR, Contents + sizeof(XRayFileHeader), sizeof(MetadataRecord));
+ ASSERT_EQ(MDR.RecordKind, uint8_t(MetadataRecord::RecordKinds::NewBuffer));
+}
+
+TEST(FDRLoggingTest, Multiple) {
+ FDRLoggingOptions Options;
+ char TmpFilename[] = "fdr-logging-test.XXXXXX";
+ Options.Fd = mkstemp(TmpFilename);
+ ASSERT_NE(Options.Fd, -1);
+ ASSERT_EQ(fdrLoggingInit(kBufferSize, kBufferMax, &Options,
+ sizeof(FDRLoggingOptions)),
+ XRayLogInitStatus::XRAY_LOG_INITIALIZED);
+ for (uint64_t I = 0; I < 100; ++I) {
+ fdrLoggingHandleArg0(1, XRayEntryType::ENTRY);
+ fdrLoggingHandleArg0(1, XRayEntryType::EXIT);
+ }
+ ASSERT_EQ(fdrLoggingFinalize(), XRayLogInitStatus::XRAY_LOG_FINALIZED);
+ ASSERT_EQ(fdrLoggingFlush(), XRayLogFlushStatus::XRAY_LOG_FLUSHED);
+ ASSERT_EQ(fdrLoggingReset(), XRayLogInitStatus::XRAY_LOG_UNINITIALIZED);
+
+ // To do this properly, we have to close the file descriptor then re-open the
+ // file for reading this time.
+ ASSERT_EQ(close(Options.Fd), 0);
+ int Fd = open(TmpFilename, O_RDONLY);
+ ASSERT_NE(-1, Fd);
+ ScopedFileCloserAndDeleter Guard(Fd, TmpFilename);
+ auto Size = lseek(Fd, 0, SEEK_END);
+ ASSERT_NE(Size, 0);
+ // Map the file contents.
+ const char *Contents = static_cast<const char *>(
+ mmap(NULL, Size, PROT_READ, MAP_PRIVATE, Fd, 0));
+ ASSERT_NE(Contents, nullptr);
+
+ XRayFileHeader H;
+ memcpy(&H, Contents, sizeof(XRayFileHeader));
+ ASSERT_EQ(H.Version, 1);
+ ASSERT_EQ(H.Type, FileTypes::FDR_LOG);
+
+ MetadataRecord MDR0;
+ memcpy(&MDR0, Contents + sizeof(XRayFileHeader), sizeof(MetadataRecord));
+ ASSERT_EQ(MDR0.RecordKind, uint8_t(MetadataRecord::RecordKinds::NewBuffer));
+}
+
+} // namespace
+} // namespace __xray
diff --git a/lib/xray/xray_AArch64.cc b/lib/xray/xray_AArch64.cc
index 0c1df22ec2ef..8d1c7c5d807f 100644
--- a/lib/xray/xray_AArch64.cc
+++ b/lib/xray/xray_AArch64.cc
@@ -14,7 +14,6 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_common.h"
#include "xray_defs.h"
-#include "xray_emulate_tsc.h"
#include "xray_interface_internal.h"
#include <atomic>
#include <cassert>
@@ -24,19 +23,6 @@ extern "C" void __clear_cache(void* start, void* end);
namespace __xray {
-uint64_t cycleFrequency() XRAY_NEVER_INSTRUMENT {
- // There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
- // not have a constant frequency like TSC on x86[_64]; it may go faster or
- // slower depending on CPU's turbo or power saving modes. Furthermore, to
- // read from CP15 on ARM a kernel modification or a driver is needed.
- // We can not require this from users of compiler-rt.
- // So on ARM we use clock_gettime(2) which gives the result in nanoseconds.
- // To get the measurements per second, we scale this by the number of
- // nanoseconds per second, pretending that the TSC frequency is 1GHz and
- // one TSC tick is 1 nanosecond.
- return NanosecondsPerSecond;
-}
-
// The machine codes for some instructions used in runtime patching.
enum class PatchOpcodes : uint32_t {
PO_StpX0X30SP_m16e = 0xA9BF7BE0, // STP X0, X30, [SP, #-16]!
@@ -106,8 +92,9 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
}
bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
- const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
- return patchSled(Enable, FuncId, Sled, __xray_FunctionEntry);
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
}
bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
@@ -117,9 +104,14 @@ bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
- // FIXME: In the future we'd need to distinguish between non-tail exits and
- // tail exits for better information preservation.
- return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionTailExit);
}
+// FIXME: Maybe implement this better?
+bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
+
} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
diff --git a/lib/xray/xray_arm.cc b/lib/xray/xray_arm.cc
index f5e2cd2a93c2..26d673ec23a0 100644
--- a/lib/xray/xray_arm.cc
+++ b/lib/xray/xray_arm.cc
@@ -14,7 +14,6 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_common.h"
#include "xray_defs.h"
-#include "xray_emulate_tsc.h"
#include "xray_interface_internal.h"
#include <atomic>
#include <cassert>
@@ -23,19 +22,6 @@ extern "C" void __clear_cache(void* start, void* end);
namespace __xray {
-uint64_t cycleFrequency() XRAY_NEVER_INSTRUMENT {
- // There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
- // not have a constant frequency like TSC on x86[_64]; it may go faster or
- // slower depending on CPU's turbo or power saving modes. Furthermore, to
- // read from CP15 on ARM a kernel modification or a driver is needed.
- // We can not require this from users of compiler-rt.
- // So on ARM we use clock_gettime(2) which gives the result in nanoseconds.
- // To get the measurements per second, we scale this by the number of
- // nanoseconds per second, pretending that the TSC frequency is 1GHz and
- // one TSC tick is 1 nanosecond.
- return NanosecondsPerSecond;
-}
-
// The machine codes for some instructions used in runtime patching.
enum class PatchOpcodes : uint32_t {
PO_PushR0Lr = 0xE92D4001, // PUSH {r0, lr}
@@ -74,7 +60,7 @@ write32bitLoadReg(uint8_t regNo, uint32_t *Address,
// MOVW r0, #<lower 16 bits of the |Value|>
// MOVT r0, #<higher 16 bits of the |Value|>
inline static uint32_t *
-Write32bitLoadR0(uint32_t *Address,
+write32bitLoadR0(uint32_t *Address,
const uint32_t Value) XRAY_NEVER_INSTRUMENT {
return write32bitLoadReg(0, Address, Value);
}
@@ -83,7 +69,7 @@ Write32bitLoadR0(uint32_t *Address,
// MOVW ip, #<lower 16 bits of the |Value|>
// MOVT ip, #<higher 16 bits of the |Value|>
inline static uint32_t *
-Write32bitLoadIP(uint32_t *Address,
+write32bitLoadIP(uint32_t *Address,
const uint32_t Value) XRAY_NEVER_INSTRUMENT {
return write32bitLoadReg(12, Address, Value);
}
@@ -121,9 +107,9 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
uint32_t *CurAddress = FirstAddress + 1;
if (Enable) {
CurAddress =
- Write32bitLoadR0(CurAddress, reinterpret_cast<uint32_t>(FuncId));
+ write32bitLoadR0(CurAddress, reinterpret_cast<uint32_t>(FuncId));
CurAddress =
- Write32bitLoadIP(CurAddress, reinterpret_cast<uint32_t>(TracingHook));
+ write32bitLoadIP(CurAddress, reinterpret_cast<uint32_t>(TracingHook));
*CurAddress = uint32_t(PatchOpcodes::PO_BlxIp);
CurAddress++;
*CurAddress = uint32_t(PatchOpcodes::PO_PopR0Lr);
@@ -142,8 +128,9 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
}
bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
- const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
- return patchSled(Enable, FuncId, Sled, __xray_FunctionEntry);
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
}
bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
@@ -153,9 +140,14 @@ bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
- // FIXME: In the future we'd need to distinguish between non-tail exits and
- // tail exits for better information preservation.
- return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionTailExit);
}
+// FIXME: Maybe implement this better?
+bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
+
} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
diff --git a/lib/xray/xray_buffer_queue.cc b/lib/xray/xray_buffer_queue.cc
index 7e5462fb8e11..7ba755ac3069 100644
--- a/lib/xray/xray_buffer_queue.cc
+++ b/lib/xray/xray_buffer_queue.cc
@@ -13,53 +13,69 @@
//
//===----------------------------------------------------------------------===//
#include "xray_buffer_queue.h"
-#include <cassert>
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
#include <cstdlib>
+#include <tuple>
using namespace __xray;
+using namespace __sanitizer;
-BufferQueue::BufferQueue(std::size_t B, std::size_t N)
- : BufferSize(B), Buffers(N), Mutex(), OwnedBuffers(), Finalizing(false) {
- for (auto &Buf : Buffers) {
+BufferQueue::BufferQueue(std::size_t B, std::size_t N, bool &Success)
+ : BufferSize(B), Buffers(N), Mutex(), OwnedBuffers(), Finalizing{0} {
+ for (auto &T : Buffers) {
void *Tmp = malloc(BufferSize);
+ if (Tmp == nullptr) {
+ Success = false;
+ return;
+ }
+
+ auto &Buf = std::get<0>(T);
Buf.Buffer = Tmp;
Buf.Size = B;
- if (Tmp != 0)
- OwnedBuffers.insert(Tmp);
+ OwnedBuffers.emplace(Tmp);
}
+ Success = true;
}
-std::error_code BufferQueue::getBuffer(Buffer &Buf) {
- if (Finalizing.load(std::memory_order_acquire))
- return std::make_error_code(std::errc::state_not_recoverable);
- std::lock_guard<std::mutex> Guard(Mutex);
+BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) {
+ if (__sanitizer::atomic_load(&Finalizing, __sanitizer::memory_order_acquire))
+ return ErrorCode::QueueFinalizing;
+ __sanitizer::BlockingMutexLock Guard(&Mutex);
if (Buffers.empty())
- return std::make_error_code(std::errc::not_enough_memory);
- Buf = Buffers.front();
+ return ErrorCode::NotEnoughMemory;
+ auto &T = Buffers.front();
+ auto &B = std::get<0>(T);
+ Buf = B;
+ B.Buffer = nullptr;
+ B.Size = 0;
Buffers.pop_front();
- return {};
+ return ErrorCode::Ok;
}
-std::error_code BufferQueue::releaseBuffer(Buffer &Buf) {
+BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {
if (OwnedBuffers.count(Buf.Buffer) == 0)
- return std::make_error_code(std::errc::argument_out_of_domain);
- std::lock_guard<std::mutex> Guard(Mutex);
- Buffers.push_back(Buf);
+ return ErrorCode::UnrecognizedBuffer;
+ __sanitizer::BlockingMutexLock Guard(&Mutex);
+
+ // Now that the buffer has been released, we mark it as "used".
+ Buffers.emplace(Buffers.end(), Buf, true /* used */);
Buf.Buffer = nullptr;
- Buf.Size = BufferSize;
- return {};
+ Buf.Size = 0;
+ return ErrorCode::Ok;
}
-std::error_code BufferQueue::finalize() {
- if (Finalizing.exchange(true, std::memory_order_acq_rel))
- return std::make_error_code(std::errc::state_not_recoverable);
- return {};
+BufferQueue::ErrorCode BufferQueue::finalize() {
+ if (__sanitizer::atomic_exchange(&Finalizing, 1,
+ __sanitizer::memory_order_acq_rel))
+ return ErrorCode::QueueFinalizing;
+ return ErrorCode::Ok;
}
BufferQueue::~BufferQueue() {
- for (auto &Buf : Buffers) {
+ for (auto &T : Buffers) {
+ auto &Buf = std::get<0>(T);
free(Buf.Buffer);
- Buf.Buffer = nullptr;
- Buf.Size = 0;
}
}
diff --git a/lib/xray/xray_buffer_queue.h b/lib/xray/xray_buffer_queue.h
index bf0b7af9df4d..e051695a297b 100644
--- a/lib/xray/xray_buffer_queue.h
+++ b/lib/xray/xray_buffer_queue.h
@@ -15,12 +15,11 @@
#ifndef XRAY_BUFFER_QUEUE_H
#define XRAY_BUFFER_QUEUE_H
-#include <atomic>
-#include <cstdint>
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
#include <deque>
-#include <mutex>
-#include <system_error>
#include <unordered_set>
+#include <utility>
namespace __xray {
@@ -33,19 +32,47 @@ class BufferQueue {
public:
struct Buffer {
void *Buffer = nullptr;
- std::size_t Size = 0;
+ size_t Size = 0;
};
private:
- std::size_t BufferSize;
- std::deque<Buffer> Buffers;
- std::mutex Mutex;
+ size_t BufferSize;
+
+ // We use a bool to indicate whether the Buffer has been used in this
+ // freelist implementation.
+ std::deque<std::tuple<Buffer, bool>> Buffers;
+ __sanitizer::BlockingMutex Mutex;
std::unordered_set<void *> OwnedBuffers;
- std::atomic<bool> Finalizing;
+ __sanitizer::atomic_uint8_t Finalizing;
public:
- /// Initialise a queue of size |N| with buffers of size |B|.
- BufferQueue(std::size_t B, std::size_t N);
+ enum class ErrorCode : unsigned {
+ Ok,
+ NotEnoughMemory,
+ QueueFinalizing,
+ UnrecognizedBuffer,
+ AlreadyFinalized,
+ };
+
+ static const char *getErrorString(ErrorCode E) {
+ switch (E) {
+ case ErrorCode::Ok:
+ return "(none)";
+ case ErrorCode::NotEnoughMemory:
+ return "no available buffers in the queue";
+ case ErrorCode::QueueFinalizing:
+ return "queue already finalizing";
+ case ErrorCode::UnrecognizedBuffer:
+ return "buffer being returned not owned by buffer queue";
+ case ErrorCode::AlreadyFinalized:
+ return "queue already finalized";
+ }
+ return "unknown error";
+ }
+
+ /// Initialise a queue of size |N| with buffers of size |B|. We report success
+ /// through |Success|.
+ BufferQueue(size_t B, size_t N, bool &Success);
/// Updates |Buf| to contain the pointer to an appropriate buffer. Returns an
/// error in case there are no available buffers to return when we will run
@@ -58,24 +85,41 @@ public:
/// - std::errc::not_enough_memory on exceeding MaxSize.
/// - no error when we find a Buffer.
/// - std::errc::state_not_recoverable on finalising BufferQueue.
- std::error_code getBuffer(Buffer &Buf);
+ ErrorCode getBuffer(Buffer &Buf);
/// Updates |Buf| to point to nullptr, with size 0.
///
/// Returns:
/// - ...
- std::error_code releaseBuffer(Buffer &Buf);
-
- bool finalizing() const { return Finalizing.load(std::memory_order_acquire); }
-
- // Sets the state of the BufferQueue to finalizing, which ensures that:
- //
- // - All subsequent attempts to retrieve a Buffer will fail.
- // - All releaseBuffer operations will not fail.
- //
- // After a call to finalize succeeds, all subsequent calls to finalize will
- // fail with std::errc::state_not_recoverable.
- std::error_code finalize();
+ ErrorCode releaseBuffer(Buffer &Buf);
+
+ bool finalizing() const {
+ return __sanitizer::atomic_load(&Finalizing,
+ __sanitizer::memory_order_acquire);
+ }
+
+ /// Returns the configured size of the buffers in the buffer queue.
+ size_t ConfiguredBufferSize() const { return BufferSize; }
+
+ /// Sets the state of the BufferQueue to finalizing, which ensures that:
+ ///
+ /// - All subsequent attempts to retrieve a Buffer will fail.
+ /// - All releaseBuffer operations will not fail.
+ ///
+ /// After a call to finalize succeeds, all subsequent calls to finalize will
+ /// fail with std::errc::state_not_recoverable.
+ ErrorCode finalize();
+
+ /// Applies the provided function F to each Buffer in the queue, only if the
+ /// Buffer is marked 'used' (i.e. has been the result of getBuffer(...) and a
+ /// releaseBuffer(...) operation.
+ template <class F> void apply(F Fn) {
+ __sanitizer::BlockingMutexLock G(&Mutex);
+ for (const auto &T : Buffers) {
+ if (std::get<1>(T))
+ Fn(std::get<0>(T));
+ }
+ }
// Cleans up allocated buffers.
~BufferQueue();
diff --git a/lib/xray/xray_emulate_tsc.h b/lib/xray/xray_emulate_tsc.h
deleted file mode 100644
index a3e8b1c92eb4..000000000000
--- a/lib/xray/xray_emulate_tsc.h
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- xray_emulate_tsc.h --------------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of XRay, a dynamic runtime instrumentation system.
-//
-//===----------------------------------------------------------------------===//
-#ifndef XRAY_EMULATE_TSC_H
-#define XRAY_EMULATE_TSC_H
-
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_internal_defs.h"
-#include "xray_defs.h"
-#include <cerrno>
-#include <cstdint>
-#include <time.h>
-
-namespace __xray {
-
-static constexpr uint64_t NanosecondsPerSecond = 1000ULL * 1000 * 1000;
-
-ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
- timespec TS;
- int result = clock_gettime(CLOCK_REALTIME, &TS);
- if (result != 0) {
- Report("clock_gettime(2) returned %d, errno=%d.", result, int(errno));
- TS.tv_sec = 0;
- TS.tv_nsec = 0;
- }
- CPU = 0;
- return TS.tv_sec * NanosecondsPerSecond + TS.tv_nsec;
-}
-}
-
-#endif // XRAY_EMULATE_TSC_H
diff --git a/lib/xray/xray_fdr_log_records.h b/lib/xray/xray_fdr_log_records.h
new file mode 100644
index 000000000000..36d9410d16f6
--- /dev/null
+++ b/lib/xray/xray_fdr_log_records.h
@@ -0,0 +1,65 @@
+//===-- xray_fdr_log_records.h -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_FDR_LOG_RECORDS_H
+#define XRAY_XRAY_FDR_LOG_RECORDS_H
+
+enum class RecordType : uint8_t { Function, Metadata };
+
+// A MetadataRecord encodes the kind of record in its first byte, and have 15
+// additional bytes in the end to hold free-form data.
+struct alignas(16) MetadataRecord {
+ // A MetadataRecord must always have a type of 1.
+ /* RecordType */ uint8_t Type : 1;
+
+ // Each kind of record is represented as a 7-bit value (even though we use an
+ // unsigned 8-bit enum class to do so).
+ enum class RecordKinds : uint8_t {
+ NewBuffer,
+ EndOfBuffer,
+ NewCPUId,
+ TSCWrap,
+ WalltimeMarker,
+ };
+ // Use 7 bits to identify this record type.
+ /* RecordKinds */ uint8_t RecordKind : 7;
+ char Data[15];
+} __attribute__((packed));
+
+static_assert(sizeof(MetadataRecord) == 16, "Wrong size for MetadataRecord.");
+
+struct alignas(8) FunctionRecord {
+ // A FunctionRecord must always have a type of 0.
+ /* RecordType */ uint8_t Type : 1;
+ enum class RecordKinds {
+ FunctionEnter = 0x00,
+ FunctionExit = 0x01,
+ FunctionTailExit = 0x02,
+ };
+ /* RecordKinds */ uint8_t RecordKind : 3;
+
+ // We only use 28 bits of the function ID, so that we can use as few bytes as
+ // possible. This means we only support 2^28 (268,435,456) unique function ids
+ // in a single binary.
+ int FuncId : 28;
+
+ // We use another 4 bytes to hold the delta between the previous entry's TSC.
+ // In case we've found that the distance is greater than the allowable 32 bits
+ // (either because we are running in a different CPU and the TSC might be
+ // different then), we should use a MetadataRecord before this FunctionRecord
+ // that will contain the full TSC for that CPU, and keep this to 0.
+ uint32_t TSCDelta;
+} __attribute__((packed));
+
+static_assert(sizeof(FunctionRecord) == 8, "Wrong size for FunctionRecord.");
+
+#endif // XRAY_XRAY_FDR_LOG_RECORDS_H
diff --git a/lib/xray/xray_fdr_logging.cc b/lib/xray/xray_fdr_logging.cc
new file mode 100644
index 000000000000..c5b63b0a564e
--- /dev/null
+++ b/lib/xray/xray_fdr_logging.cc
@@ -0,0 +1,229 @@
+//===-- xray_fdr_logging.cc ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Here we implement the Flight Data Recorder mode for XRay, where we use
+// compact structures to store records in memory as well as when writing out the
+// data to files.
+//
+//===----------------------------------------------------------------------===//
+#include "xray_fdr_logging.h"
+#include <algorithm>
+#include <bitset>
+#include <cerrno>
+#include <cstring>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+#include <unordered_map>
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray/xray_interface.h"
+#include "xray/xray_records.h"
+#include "xray_buffer_queue.h"
+#include "xray_defs.h"
+#include "xray_fdr_logging_impl.h"
+#include "xray_flags.h"
+#include "xray_tsc.h"
+#include "xray_utils.h"
+
+namespace __xray {
+
+// Global BufferQueue.
+std::shared_ptr<BufferQueue> BQ;
+
+__sanitizer::atomic_sint32_t LoggingStatus = {
+ XRayLogInitStatus::XRAY_LOG_UNINITIALIZED};
+
+__sanitizer::atomic_sint32_t LogFlushStatus = {
+ XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING};
+
+std::unique_ptr<FDRLoggingOptions> FDROptions;
+
+XRayLogInitStatus fdrLoggingInit(std::size_t BufferSize, std::size_t BufferMax,
+ void *Options,
+ size_t OptionsSize) XRAY_NEVER_INSTRUMENT {
+ if (OptionsSize != sizeof(FDRLoggingOptions))
+ return static_cast<XRayLogInitStatus>(__sanitizer::atomic_load(
+ &LoggingStatus, __sanitizer::memory_order_acquire));
+ s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ if (!__sanitizer::atomic_compare_exchange_strong(
+ &LoggingStatus, &CurrentStatus,
+ XRayLogInitStatus::XRAY_LOG_INITIALIZING,
+ __sanitizer::memory_order_release))
+ return static_cast<XRayLogInitStatus>(CurrentStatus);
+
+ FDROptions.reset(new FDRLoggingOptions());
+ memcpy(FDROptions.get(), Options, OptionsSize);
+ bool Success = false;
+ BQ = std::make_shared<BufferQueue>(BufferSize, BufferMax, Success);
+ if (!Success) {
+ Report("BufferQueue init failed.\n");
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ }
+
+ // Install the actual handleArg0 handler after initialising the buffers.
+ __xray_set_handler(fdrLoggingHandleArg0);
+
+ __sanitizer::atomic_store(&LoggingStatus,
+ XRayLogInitStatus::XRAY_LOG_INITIALIZED,
+ __sanitizer::memory_order_release);
+ Report("XRay FDR init successful.\n");
+ return XRayLogInitStatus::XRAY_LOG_INITIALIZED;
+}
+
+// Must finalize before flushing.
+XRayLogFlushStatus fdrLoggingFlush() XRAY_NEVER_INSTRUMENT {
+ if (__sanitizer::atomic_load(&LoggingStatus,
+ __sanitizer::memory_order_acquire) !=
+ XRayLogInitStatus::XRAY_LOG_FINALIZED)
+ return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+
+ s32 Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ if (!__sanitizer::atomic_compare_exchange_strong(
+ &LogFlushStatus, &Result, XRayLogFlushStatus::XRAY_LOG_FLUSHING,
+ __sanitizer::memory_order_release))
+ return static_cast<XRayLogFlushStatus>(Result);
+
+ // Make a copy of the BufferQueue pointer to prevent other threads that may be
+ // resetting it from blowing away the queue prematurely while we're dealing
+ // with it.
+ auto LocalBQ = BQ;
+
+ // We write out the file in the following format:
+ //
+ // 1) We write down the XRay file header with version 1, type FDR_LOG.
+ // 2) Then we use the 'apply' member of the BufferQueue that's live, to
+ // ensure that at this point in time we write down the buffers that have
+ // been released (and marked "used") -- we dump the full buffer for now
+ // (fixed-sized) and let the tools reading the buffers deal with the data
+ // afterwards.
+ //
+ int Fd = FDROptions->Fd;
+ if (Fd == -1)
+ Fd = getLogFD();
+ if (Fd == -1) {
+ auto Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ __sanitizer::atomic_store(&LogFlushStatus, Result,
+ __sanitizer::memory_order_release);
+ return Result;
+ }
+
+ XRayFileHeader Header;
+ Header.Version = 1;
+ Header.Type = FileTypes::FDR_LOG;
+ Header.CycleFrequency = probeRequiredCPUFeatures()
+ ? getTSCFrequency() : __xray::NanosecondsPerSecond;
+ // FIXME: Actually check whether we have 'constant_tsc' and 'nonstop_tsc'
+ // before setting the values in the header.
+ Header.ConstantTSC = 1;
+ Header.NonstopTSC = 1;
+ Header.FdrData = FdrAdditionalHeaderData{LocalBQ->ConfiguredBufferSize()};
+ retryingWriteAll(Fd, reinterpret_cast<char *>(&Header),
+ reinterpret_cast<char *>(&Header) + sizeof(Header));
+
+ LocalBQ->apply([&](const BufferQueue::Buffer &B) {
+ uint64_t BufferSize = B.Size;
+ if (BufferSize > 0) {
+ retryingWriteAll(Fd, reinterpret_cast<char *>(B.Buffer),
+ reinterpret_cast<char *>(B.Buffer) + B.Size);
+ }
+ });
+ __sanitizer::atomic_store(&LogFlushStatus,
+ XRayLogFlushStatus::XRAY_LOG_FLUSHED,
+ __sanitizer::memory_order_release);
+ return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
+}
+
+XRayLogInitStatus fdrLoggingFinalize() XRAY_NEVER_INSTRUMENT {
+ s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED;
+ if (!__sanitizer::atomic_compare_exchange_strong(
+ &LoggingStatus, &CurrentStatus,
+ XRayLogInitStatus::XRAY_LOG_FINALIZING,
+ __sanitizer::memory_order_release))
+ return static_cast<XRayLogInitStatus>(CurrentStatus);
+
+ // Do special things to make the log finalize itself, and not allow any more
+ // operations to be performed until re-initialized.
+ BQ->finalize();
+
+ __sanitizer::atomic_store(&LoggingStatus,
+ XRayLogInitStatus::XRAY_LOG_FINALIZED,
+ __sanitizer::memory_order_release);
+ return XRayLogInitStatus::XRAY_LOG_FINALIZED;
+}
+
+XRayLogInitStatus fdrLoggingReset() XRAY_NEVER_INSTRUMENT {
+ s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_FINALIZED;
+ if (__sanitizer::atomic_compare_exchange_strong(
+ &LoggingStatus, &CurrentStatus,
+ XRayLogInitStatus::XRAY_LOG_INITIALIZED,
+ __sanitizer::memory_order_release))
+ return static_cast<XRayLogInitStatus>(CurrentStatus);
+
+ // Release the in-memory buffer queue.
+ BQ.reset();
+
+ // Spin until the flushing status is flushed.
+ s32 CurrentFlushingStatus = XRayLogFlushStatus::XRAY_LOG_FLUSHED;
+ while (__sanitizer::atomic_compare_exchange_weak(
+ &LogFlushStatus, &CurrentFlushingStatus,
+ XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING,
+ __sanitizer::memory_order_release)) {
+ if (CurrentFlushingStatus == XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING)
+ break;
+ CurrentFlushingStatus = XRayLogFlushStatus::XRAY_LOG_FLUSHED;
+ }
+
+ // At this point, we know that the status is flushed, and that we can assume
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+}
+
+void fdrLoggingHandleArg0(int32_t FuncId,
+ XRayEntryType Entry) XRAY_NEVER_INSTRUMENT {
+ // We want to get the TSC as early as possible, so that we can check whether
+ // we've seen this CPU before. We also do it before we load anything else, to
+ // allow for forward progress with the scheduling.
+ unsigned char CPU;
+ uint64_t TSC;
+
+ if(probeRequiredCPUFeatures()) {
+ TSC = __xray::readTSC(CPU);
+ } else {
+ // FIXME: This code needs refactoring as it appears in multiple locations
+ timespec TS;
+ int result = clock_gettime(CLOCK_REALTIME, &TS);
+ if (result != 0) {
+ Report("clock_gettime(2) return %d, errno=%d", result, int(errno));
+ TS = {0, 0};
+ }
+ CPU = 0;
+ TSC = TS.tv_sec * __xray::NanosecondsPerSecond + TS.tv_nsec;
+ }
+
+ __xray_fdr_internal::processFunctionHook(FuncId, Entry, TSC, CPU,
+ clock_gettime, LoggingStatus, BQ);
+}
+
+} // namespace __xray
+
+static auto UNUSED Unused = [] {
+ using namespace __xray;
+ if (flags()->xray_fdr_log) {
+ XRayLogImpl Impl{
+ fdrLoggingInit, fdrLoggingFinalize, fdrLoggingHandleArg0,
+ fdrLoggingFlush,
+ };
+ __xray_set_log_impl(Impl);
+ }
+ return true;
+}();
diff --git a/lib/xray/xray_fdr_logging.h b/lib/xray/xray_fdr_logging.h
new file mode 100644
index 000000000000..426b54dc7884
--- /dev/null
+++ b/lib/xray/xray_fdr_logging.h
@@ -0,0 +1,38 @@
+//===-- xray_fdr_logging.h ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_FDR_LOGGING_H
+#define XRAY_XRAY_FDR_LOGGING_H
+
+#include "xray/xray_log_interface.h"
+#include "xray_fdr_log_records.h"
+
+// FDR (Flight Data Recorder) Mode
+// ===============================
+//
+// The XRay whitepaper describes a mode of operation for function call trace
+// logging that involves writing small records into an in-memory circular
+// buffer, that then gets logged to disk on demand. To do this efficiently and
+// capture as much data as we can, we use smaller records compared to the
+// default mode of always writing fixed-size records.
+
+namespace __xray {
+XRayLogInitStatus fdrLoggingInit(size_t BufferSize, size_t BufferMax,
+ void *Options, size_t OptionsSize);
+XRayLogInitStatus fdrLoggingFinalize();
+void fdrLoggingHandleArg0(int32_t FuncId, XRayEntryType Entry);
+XRayLogFlushStatus fdrLoggingFlush();
+XRayLogInitStatus fdrLoggingReset();
+
+} // namespace __xray
+
+#endif // XRAY_XRAY_FDR_LOGGING_H
diff --git a/lib/xray/xray_fdr_logging_impl.h b/lib/xray/xray_fdr_logging_impl.h
new file mode 100644
index 000000000000..ce360cb03ea7
--- /dev/null
+++ b/lib/xray/xray_fdr_logging_impl.h
@@ -0,0 +1,639 @@
+//===-- xray_fdr_logging_impl.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Here we implement the thread local state management and record i/o for Flight
+// Data Recorder mode for XRay, where we use compact structures to store records
+// in memory as well as when writing out the data to files.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_FDR_LOGGING_IMPL_H
+#define XRAY_XRAY_FDR_LOGGING_IMPL_H
+
+#include <cassert>
+#include <cstdint>
+#include <cstring>
+#include <limits>
+#include <memory>
+#include <string>
+#include <sys/syscall.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray/xray_log_interface.h"
+#include "xray_buffer_queue.h"
+#include "xray_defs.h"
+#include "xray_fdr_log_records.h"
+#include "xray_flags.h"
+#include "xray_tsc.h"
+
+namespace __xray {
+
+/// We expose some of the state transitions when FDR logging mode is operating
+/// such that we can simulate a series of log events that may occur without
+/// and test with determinism without worrying about the real CPU time.
+///
+/// Because the code uses thread_local allocation extensively as part of its
+/// design, callers that wish to test events occuring on different threads
+/// will actually have to run them on different threads.
+///
+/// This also means that it is possible to break invariants maintained by
+/// cooperation with xray_fdr_logging class, so be careful and think twice.
+namespace __xray_fdr_internal {
+
+/// Writes the new buffer record and wallclock time that begin a buffer for a
+/// thread to MemPtr and increments MemPtr. Bypasses the thread local state
+/// machine and writes directly to memory without checks.
+static void writeNewBufferPreamble(pid_t Tid, timespec TS, char *&MemPtr);
+
+/// Write a metadata record to switch to a new CPU to MemPtr and increments
+/// MemPtr. Bypasses the thread local state machine and writes directly to
+/// memory without checks.
+static void writeNewCPUIdMetadata(uint16_t CPU, uint64_t TSC, char *&MemPtr);
+
+/// Writes an EOB metadata record to MemPtr and increments MemPtr. Bypasses the
+/// thread local state machine and writes directly to memory without checks.
+static void writeEOBMetadata(char *&MemPtr);
+
+/// Writes a TSC Wrap metadata record to MemPtr and increments MemPtr. Bypasses
+/// the thread local state machine and directly writes to memory without checks.
+static void writeTSCWrapMetadata(uint64_t TSC, char *&MemPtr);
+
+/// Writes a Function Record to MemPtr and increments MemPtr. Bypasses the
+/// thread local state machine and writes the function record directly to
+/// memory.
+static void writeFunctionRecord(int FuncId, uint32_t TSCDelta,
+ XRayEntryType EntryType, char *&MemPtr);
+
+/// Sets up a new buffer in thread_local storage and writes a preamble. The
+/// wall_clock_reader function is used to populate the WallTimeRecord entry.
+static void setupNewBuffer(int (*wall_clock_reader)(clockid_t,
+ struct timespec *));
+
+/// Called to record CPU time for a new CPU within the current thread.
+static void writeNewCPUIdMetadata(uint16_t CPU, uint64_t TSC);
+
+/// Called to close the buffer when the thread exhausts the buffer or when the
+/// thread exits (via a thread local variable destructor).
+static void writeEOBMetadata();
+
+/// TSC Wrap records are written when a TSC delta encoding scheme overflows.
+static void writeTSCWrapMetadata(uint64_t TSC);
+
+/// Here's where the meat of the processing happens. The writer captures
+/// function entry, exit and tail exit points with a time and will create
+/// TSCWrap, NewCPUId and Function records as necessary. The writer might
+/// walk backward through its buffer and erase trivial functions to avoid
+/// polluting the log and may use the buffer queue to obtain or release a
+/// buffer.
+static void processFunctionHook(int32_t FuncId, XRayEntryType Entry,
+ uint64_t TSC, unsigned char CPU,
+ int (*wall_clock_reader)(clockid_t,
+ struct timespec *),
+ __sanitizer::atomic_sint32_t &LoggingStatus,
+ const std::shared_ptr<BufferQueue> &BQ);
+
+//-----------------------------------------------------------------------------|
+// The rest of the file is implementation. |
+//-----------------------------------------------------------------------------|
+// Functions are implemented in the header for inlining since we don't want |
+// to grow the stack when we've hijacked the binary for logging. |
+//-----------------------------------------------------------------------------|
+
+namespace {
+
+thread_local BufferQueue::Buffer Buffer;
+thread_local char *RecordPtr = nullptr;
+
+// The number of FunctionEntry records immediately preceding RecordPtr.
+thread_local uint8_t NumConsecutiveFnEnters = 0;
+
+// The number of adjacent, consecutive pairs of FunctionEntry, Tail Exit
+// records preceding RecordPtr.
+thread_local uint8_t NumTailCalls = 0;
+
+constexpr auto MetadataRecSize = sizeof(MetadataRecord);
+constexpr auto FunctionRecSize = sizeof(FunctionRecord);
+
+class ThreadExitBufferCleanup {
+ std::weak_ptr<BufferQueue> Buffers;
+ BufferQueue::Buffer &Buffer;
+
+public:
+ explicit ThreadExitBufferCleanup(std::weak_ptr<BufferQueue> BQ,
+ BufferQueue::Buffer &Buffer)
+ XRAY_NEVER_INSTRUMENT : Buffers(BQ),
+ Buffer(Buffer) {}
+
+ ~ThreadExitBufferCleanup() noexcept XRAY_NEVER_INSTRUMENT {
+ if (RecordPtr == nullptr)
+ return;
+
+ // We make sure that upon exit, a thread will write out the EOB
+ // MetadataRecord in the thread-local log, and also release the buffer to
+ // the queue.
+ assert((RecordPtr + MetadataRecSize) - static_cast<char *>(Buffer.Buffer) >=
+ static_cast<ptrdiff_t>(MetadataRecSize));
+ if (auto BQ = Buffers.lock()) {
+ writeEOBMetadata();
+ auto EC = BQ->releaseBuffer(Buffer);
+ if (EC != BufferQueue::ErrorCode::Ok)
+ Report("Failed to release buffer at %p; error=%s\n", Buffer.Buffer,
+ BufferQueue::getErrorString(EC));
+ return;
+ }
+ }
+};
+
+class RecursionGuard {
+ bool &Running;
+ const bool Valid;
+
+public:
+ explicit RecursionGuard(bool &R) : Running(R), Valid(!R) {
+ if (Valid)
+ Running = true;
+ }
+
+ RecursionGuard(const RecursionGuard &) = delete;
+ RecursionGuard(RecursionGuard &&) = delete;
+ RecursionGuard &operator=(const RecursionGuard &) = delete;
+ RecursionGuard &operator=(RecursionGuard &&) = delete;
+
+ explicit operator bool() const { return Valid; }
+
+ ~RecursionGuard() noexcept {
+ if (Valid)
+ Running = false;
+ }
+};
+
+static inline bool loggingInitialized(
+ const __sanitizer::atomic_sint32_t &LoggingStatus) XRAY_NEVER_INSTRUMENT {
+ return __sanitizer::atomic_load(&LoggingStatus,
+ __sanitizer::memory_order_acquire) ==
+ XRayLogInitStatus::XRAY_LOG_INITIALIZED;
+}
+
+} // namespace
+
+static inline void writeNewBufferPreamble(pid_t Tid, timespec TS,
+ char *&MemPtr) XRAY_NEVER_INSTRUMENT {
+ static constexpr int InitRecordsCount = 2;
+ std::aligned_storage<sizeof(MetadataRecord)>::type Records[InitRecordsCount];
+ {
+ // Write out a MetadataRecord to signify that this is the start of a new
+ // buffer, associated with a particular thread, with a new CPU. For the
+ // data, we have 15 bytes to squeeze as much information as we can. At this
+ // point we only write down the following bytes:
+ // - Thread ID (pid_t, 4 bytes)
+ auto &NewBuffer = *reinterpret_cast<MetadataRecord *>(&Records[0]);
+ NewBuffer.Type = uint8_t(RecordType::Metadata);
+ NewBuffer.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewBuffer);
+ std::memcpy(&NewBuffer.Data, &Tid, sizeof(pid_t));
+ }
+ // Also write the WalltimeMarker record.
+ {
+ static_assert(sizeof(time_t) <= 8, "time_t needs to be at most 8 bytes");
+ auto &WalltimeMarker = *reinterpret_cast<MetadataRecord *>(&Records[1]);
+ WalltimeMarker.Type = uint8_t(RecordType::Metadata);
+ WalltimeMarker.RecordKind =
+ uint8_t(MetadataRecord::RecordKinds::WalltimeMarker);
+
+ // We only really need microsecond precision here, and enforce across
+ // platforms that we need 64-bit seconds and 32-bit microseconds encoded in
+ // the Metadata record.
+ int32_t Micros = TS.tv_nsec / 1000;
+ int64_t Seconds = TS.tv_sec;
+ std::memcpy(WalltimeMarker.Data, &Seconds, sizeof(Seconds));
+ std::memcpy(WalltimeMarker.Data + sizeof(Seconds), &Micros, sizeof(Micros));
+ }
+ std::memcpy(MemPtr, Records, sizeof(MetadataRecord) * InitRecordsCount);
+ MemPtr += sizeof(MetadataRecord) * InitRecordsCount;
+ NumConsecutiveFnEnters = 0;
+ NumTailCalls = 0;
+}
+
+static inline void setupNewBuffer(int (*wall_clock_reader)(clockid_t,
+ struct timespec *))
+ XRAY_NEVER_INSTRUMENT {
+ RecordPtr = static_cast<char *>(Buffer.Buffer);
+ pid_t Tid = syscall(SYS_gettid);
+ timespec TS{0, 0};
+ // This is typically clock_gettime, but callers have injection ability.
+ wall_clock_reader(CLOCK_MONOTONIC, &TS);
+ writeNewBufferPreamble(Tid, TS, RecordPtr);
+ NumConsecutiveFnEnters = 0;
+ NumTailCalls = 0;
+}
+
+static inline void writeNewCPUIdMetadata(uint16_t CPU, uint64_t TSC,
+ char *&MemPtr) XRAY_NEVER_INSTRUMENT {
+ MetadataRecord NewCPUId;
+ NewCPUId.Type = uint8_t(RecordType::Metadata);
+ NewCPUId.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewCPUId);
+
+ // The data for the New CPU will contain the following bytes:
+ // - CPU ID (uint16_t, 2 bytes)
+ // - Full TSC (uint64_t, 8 bytes)
+ // Total = 10 bytes.
+ std::memcpy(&NewCPUId.Data, &CPU, sizeof(CPU));
+ std::memcpy(&NewCPUId.Data[sizeof(CPU)], &TSC, sizeof(TSC));
+ std::memcpy(MemPtr, &NewCPUId, sizeof(MetadataRecord));
+ MemPtr += sizeof(MetadataRecord);
+ NumConsecutiveFnEnters = 0;
+ NumTailCalls = 0;
+}
+
+static inline void writeNewCPUIdMetadata(uint16_t CPU,
+ uint64_t TSC) XRAY_NEVER_INSTRUMENT {
+ writeNewCPUIdMetadata(CPU, TSC, RecordPtr);
+}
+
+static inline void writeEOBMetadata(char *&MemPtr) XRAY_NEVER_INSTRUMENT {
+ MetadataRecord EOBMeta;
+ EOBMeta.Type = uint8_t(RecordType::Metadata);
+ EOBMeta.RecordKind = uint8_t(MetadataRecord::RecordKinds::EndOfBuffer);
+ // For now we don't write any bytes into the Data field.
+ std::memcpy(MemPtr, &EOBMeta, sizeof(MetadataRecord));
+ MemPtr += sizeof(MetadataRecord);
+ NumConsecutiveFnEnters = 0;
+ NumTailCalls = 0;
+}
+
+static inline void writeEOBMetadata() XRAY_NEVER_INSTRUMENT {
+ writeEOBMetadata(RecordPtr);
+}
+
+static inline void writeTSCWrapMetadata(uint64_t TSC,
+ char *&MemPtr) XRAY_NEVER_INSTRUMENT {
+ MetadataRecord TSCWrap;
+ TSCWrap.Type = uint8_t(RecordType::Metadata);
+ TSCWrap.RecordKind = uint8_t(MetadataRecord::RecordKinds::TSCWrap);
+
+ // The data for the TSCWrap record contains the following bytes:
+ // - Full TSC (uint64_t, 8 bytes)
+ // Total = 8 bytes.
+ std::memcpy(&TSCWrap.Data, &TSC, sizeof(TSC));
+ std::memcpy(MemPtr, &TSCWrap, sizeof(MetadataRecord));
+ MemPtr += sizeof(MetadataRecord);
+ NumConsecutiveFnEnters = 0;
+ NumTailCalls = 0;
+}
+
+static inline void writeTSCWrapMetadata(uint64_t TSC) XRAY_NEVER_INSTRUMENT {
+ writeTSCWrapMetadata(TSC, RecordPtr);
+}
+
+static inline void writeFunctionRecord(int FuncId, uint32_t TSCDelta,
+ XRayEntryType EntryType,
+ char *&MemPtr) XRAY_NEVER_INSTRUMENT {
+ std::aligned_storage<sizeof(FunctionRecord), alignof(FunctionRecord)>::type
+ AlignedFuncRecordBuffer;
+ auto &FuncRecord =
+ *reinterpret_cast<FunctionRecord *>(&AlignedFuncRecordBuffer);
+ FuncRecord.Type = uint8_t(RecordType::Function);
+ // Only take 28 bits of the function id.
+ FuncRecord.FuncId = FuncId & ~(0x0F << 28);
+ FuncRecord.TSCDelta = TSCDelta;
+
+ switch (EntryType) {
+ case XRayEntryType::ENTRY:
+ ++NumConsecutiveFnEnters;
+ FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter);
+ break;
+ case XRayEntryType::LOG_ARGS_ENTRY:
+ // We should not rewind functions with logged args.
+ NumConsecutiveFnEnters = 0;
+ NumTailCalls = 0;
+ FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter);
+ break;
+ case XRayEntryType::EXIT:
+ // If we've decided to log the function exit, we will never erase the log
+ // before it.
+ NumConsecutiveFnEnters = 0;
+ NumTailCalls = 0;
+ FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionExit);
+ break;
+ case XRayEntryType::TAIL:
+ // If we just entered the function we're tail exiting from or erased every
+ // invocation since then, this function entry tail pair is a candidate to
+ // be erased when the child function exits.
+ if (NumConsecutiveFnEnters > 0) {
+ ++NumTailCalls;
+ NumConsecutiveFnEnters = 0;
+ } else {
+ // We will never be able to erase this tail call since we have logged
+ // something in between the function entry and tail exit.
+ NumTailCalls = 0;
+ NumConsecutiveFnEnters = 0;
+ }
+ FuncRecord.RecordKind =
+ uint8_t(FunctionRecord::RecordKinds::FunctionTailExit);
+ break;
+ }
+
+ std::memcpy(MemPtr, &AlignedFuncRecordBuffer, sizeof(FunctionRecord));
+ MemPtr += sizeof(FunctionRecord);
+}
+
+static uint64_t thresholdTicks() {
+ static uint64_t TicksPerSec = probeRequiredCPUFeatures() ? getTSCFrequency() :
+ __xray::NanosecondsPerSecond;
+ static const uint64_t ThresholdTicks =
+ TicksPerSec * flags()->xray_fdr_log_func_duration_threshold_us / 1000000;
+ return ThresholdTicks;
+}
+
+// Re-point the thread local pointer into this thread's Buffer before the recent
+// "Function Entry" record and any "Tail Call Exit" records after that.
+static void rewindRecentCall(uint64_t TSC, uint64_t &LastTSC,
+ uint64_t &LastFunctionEntryTSC, int32_t FuncId) {
+ using AlignedFuncStorage =
+ std::aligned_storage<sizeof(FunctionRecord),
+ alignof(FunctionRecord)>::type;
+ RecordPtr -= FunctionRecSize;
+ AlignedFuncStorage AlignedFuncRecordBuffer;
+ const auto &FuncRecord = *reinterpret_cast<FunctionRecord *>(
+ std::memcpy(&AlignedFuncRecordBuffer, RecordPtr, FunctionRecSize));
+ assert(FuncRecord.RecordKind ==
+ uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
+ "Expected to find function entry recording when rewinding.");
+ assert(FuncRecord.FuncId == (FuncId & ~(0x0F << 28)) &&
+ "Expected matching function id when rewinding Exit");
+ --NumConsecutiveFnEnters;
+ LastTSC -= FuncRecord.TSCDelta;
+
+ // We unwound one call. Update the state and return without writing a log.
+ if (NumConsecutiveFnEnters != 0) {
+ LastFunctionEntryTSC -= FuncRecord.TSCDelta;
+ return;
+ }
+
+ // Otherwise we've rewound the stack of all function entries, we might be
+ // able to rewind further by erasing tail call functions that are being
+ // exited from via this exit.
+ LastFunctionEntryTSC = 0;
+ auto RewindingTSC = LastTSC;
+ auto RewindingRecordPtr = RecordPtr - FunctionRecSize;
+ while (NumTailCalls > 0) {
+ AlignedFuncStorage TailExitRecordBuffer;
+ // Rewind the TSC back over the TAIL EXIT record.
+ const auto &ExpectedTailExit =
+ *reinterpret_cast<FunctionRecord *>(std::memcpy(
+ &TailExitRecordBuffer, RewindingRecordPtr, FunctionRecSize));
+
+ assert(ExpectedTailExit.RecordKind ==
+ uint8_t(FunctionRecord::RecordKinds::FunctionTailExit) &&
+ "Expected to find tail exit when rewinding.");
+ RewindingRecordPtr -= FunctionRecSize;
+ RewindingTSC -= ExpectedTailExit.TSCDelta;
+ AlignedFuncStorage FunctionEntryBuffer;
+ const auto &ExpectedFunctionEntry =
+ *reinterpret_cast<FunctionRecord *>(std::memcpy(
+ &FunctionEntryBuffer, RewindingRecordPtr, FunctionRecSize));
+ assert(ExpectedFunctionEntry.RecordKind ==
+ uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
+ "Expected to find function entry when rewinding tail call.");
+ assert(ExpectedFunctionEntry.FuncId == ExpectedTailExit.FuncId &&
+ "Expected funcids to match when rewinding tail call.");
+
+ // This tail call exceeded the threshold duration. It will not be erased.
+ if ((TSC - RewindingTSC) >= thresholdTicks()) {
+ NumTailCalls = 0;
+ return;
+ }
+
+ // We can erase a tail exit pair that we're exiting through since
+ // its duration is under threshold.
+ --NumTailCalls;
+ RewindingRecordPtr -= FunctionRecSize;
+ RewindingTSC -= ExpectedFunctionEntry.TSCDelta;
+ RecordPtr -= 2 * FunctionRecSize;
+ LastTSC = RewindingTSC;
+ }
+}
+
+static inline bool releaseThreadLocalBuffer(BufferQueue *BQ) {
+ auto EC = BQ->releaseBuffer(Buffer);
+ if (EC != BufferQueue::ErrorCode::Ok) {
+ Report("Failed to release buffer at %p; error=%s\n", Buffer.Buffer,
+ BufferQueue::getErrorString(EC));
+ return false;
+ }
+ return true;
+}
+
+static inline void processFunctionHook(
+ int32_t FuncId, XRayEntryType Entry, uint64_t TSC, unsigned char CPU,
+ int (*wall_clock_reader)(clockid_t, struct timespec *),
+ __sanitizer::atomic_sint32_t &LoggingStatus,
+ const std::shared_ptr<BufferQueue> &BQ) XRAY_NEVER_INSTRUMENT {
+ // Bail out right away if logging is not initialized yet.
+ // We should take the opportunity to release the buffer though.
+ auto Status = __sanitizer::atomic_load(&LoggingStatus,
+ __sanitizer::memory_order_acquire);
+ if (Status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) {
+ if (RecordPtr != nullptr &&
+ (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING ||
+ Status == XRayLogInitStatus::XRAY_LOG_FINALIZED)) {
+ writeEOBMetadata();
+ if (!releaseThreadLocalBuffer(BQ.get()))
+ return;
+ RecordPtr = nullptr;
+ }
+ return;
+ }
+
+ // We use a thread_local variable to keep track of which CPUs we've already
+ // run, and the TSC times for these CPUs. This allows us to stop repeating the
+ // CPU field in the function records.
+ //
+ // We assume that we'll support only 65536 CPUs for x86_64.
+ thread_local uint16_t CurrentCPU = std::numeric_limits<uint16_t>::max();
+ thread_local uint64_t LastTSC = 0;
+ thread_local uint64_t LastFunctionEntryTSC = 0;
+
+ // Make sure a thread that's ever called handleArg0 has a thread-local
+ // live reference to the buffer queue for this particular instance of
+ // FDRLogging, and that we're going to clean it up when the thread exits.
+ thread_local auto LocalBQ = BQ;
+ thread_local ThreadExitBufferCleanup Cleanup(LocalBQ, Buffer);
+
+ // Prevent signal handler recursion, so in case we're already in a log writing
+ // mode and the signal handler comes in (and is also instrumented) then we
+ // don't want to be clobbering potentially partial writes already happening in
+ // the thread. We use a simple thread_local latch to only allow one on-going
+ // handleArg0 to happen at any given time.
+ thread_local bool Running = false;
+ RecursionGuard Guard{Running};
+ if (!Guard) {
+ assert(Running == true && "RecursionGuard is buggy!");
+ return;
+ }
+
+ if (!loggingInitialized(LoggingStatus) || LocalBQ->finalizing()) {
+ writeEOBMetadata();
+ if (!releaseThreadLocalBuffer(BQ.get()))
+ return;
+ RecordPtr = nullptr;
+ }
+
+ if (Buffer.Buffer == nullptr) {
+ auto EC = LocalBQ->getBuffer(Buffer);
+ if (EC != BufferQueue::ErrorCode::Ok) {
+ auto LS = __sanitizer::atomic_load(&LoggingStatus,
+ __sanitizer::memory_order_acquire);
+ if (LS != XRayLogInitStatus::XRAY_LOG_FINALIZING &&
+ LS != XRayLogInitStatus::XRAY_LOG_FINALIZED)
+ Report("Failed to acquire a buffer; error=%s\n",
+ BufferQueue::getErrorString(EC));
+ return;
+ }
+
+ setupNewBuffer(wall_clock_reader);
+ }
+
+ if (CurrentCPU == std::numeric_limits<uint16_t>::max()) {
+ // This means this is the first CPU this thread has ever run on. We set the
+ // current CPU and record this as the first TSC we've seen.
+ CurrentCPU = CPU;
+ writeNewCPUIdMetadata(CPU, TSC);
+ }
+
+ // Before we go setting up writing new function entries, we need to be really
+ // careful about the pointer math we're doing. This means we need to ensure
+ // that the record we are about to write is going to fit into the buffer,
+ // without overflowing the buffer.
+ //
+ // To do this properly, we use the following assumptions:
+ //
+ // - The least number of bytes we will ever write is 8
+ // (sizeof(FunctionRecord)) only if the delta between the previous entry
+ // and this entry is within 32 bits.
+ // - The most number of bytes we will ever write is 8 + 16 = 24. This is
+ // computed by:
+ //
+ // sizeof(FunctionRecord) + sizeof(MetadataRecord)
+ //
+ // These arise in the following cases:
+ //
+ // 1. When the delta between the TSC we get and the previous TSC for the
+ // same CPU is outside of the uint32_t range, we end up having to
+ // write a MetadataRecord to indicate a "tsc wrap" before the actual
+ // FunctionRecord.
+ // 2. When we learn that we've moved CPUs, we need to write a
+ // MetadataRecord to indicate a "cpu change", and thus write out the
+ // current TSC for that CPU before writing out the actual
+ // FunctionRecord.
+ // 3. When we learn about a new CPU ID, we need to write down a "new cpu
+ // id" MetadataRecord before writing out the actual FunctionRecord.
+ //
+ // - An End-of-Buffer (EOB) MetadataRecord is 16 bytes.
+ //
+ // So the math we need to do is to determine whether writing 24 bytes past the
+ // current pointer leaves us with enough bytes to write the EOB
+ // MetadataRecord. If we don't have enough space after writing as much as 24
+ // bytes in the end of the buffer, we need to write out the EOB, get a new
+ // Buffer, set it up properly before doing any further writing.
+ //
+ char *BufferStart = static_cast<char *>(Buffer.Buffer);
+ if ((RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart <
+ static_cast<ptrdiff_t>(MetadataRecSize)) {
+ writeEOBMetadata();
+ if (!releaseThreadLocalBuffer(LocalBQ.get()))
+ return;
+ auto EC = LocalBQ->getBuffer(Buffer);
+ if (EC != BufferQueue::ErrorCode::Ok) {
+ Report("Failed to acquire a buffer; error=%s\n",
+ BufferQueue::getErrorString(EC));
+ return;
+ }
+ setupNewBuffer(wall_clock_reader);
+ }
+
+ // By this point, we are now ready to write at most 24 bytes (one metadata
+ // record and one function record).
+ BufferStart = static_cast<char *>(Buffer.Buffer);
+ assert((RecordPtr + (MetadataRecSize + FunctionRecSize)) - BufferStart >=
+ static_cast<ptrdiff_t>(MetadataRecSize) &&
+ "Misconfigured BufferQueue provided; Buffer size not large enough.");
+
+ // Here we compute the TSC Delta. There are a few interesting situations we
+ // need to account for:
+ //
+ // - The thread has migrated to a different CPU. If this is the case, then
+ // we write down the following records:
+ //
+ // 1. A 'NewCPUId' Metadata record.
+ // 2. A FunctionRecord with a 0 for the TSCDelta field.
+ //
+ // - The TSC delta is greater than the 32 bits we can store in a
+ // FunctionRecord. In this case we write down the following records:
+ //
+ // 1. A 'TSCWrap' Metadata record.
+ // 2. A FunctionRecord with a 0 for the TSCDelta field.
+ //
+ // - The TSC delta is representable within the 32 bits we can store in a
+ // FunctionRecord. In this case we write down just a FunctionRecord with
+ // the correct TSC delta.
+ //
+
+ uint32_t RecordTSCDelta = 0;
+ if (CPU != CurrentCPU) {
+ // We've moved to a new CPU.
+ writeNewCPUIdMetadata(CPU, TSC);
+ } else {
+ // If the delta is greater than the range for a uint32_t, then we write out
+ // the TSC wrap metadata entry with the full TSC, and the TSC for the
+ // function record be 0.
+ auto Delta = TSC - LastTSC;
+ if (Delta > (1ULL << 32) - 1)
+ writeTSCWrapMetadata(TSC);
+ else
+ RecordTSCDelta = Delta;
+ }
+
+ LastTSC = TSC;
+ CurrentCPU = CPU;
+ switch (Entry) {
+ case XRayEntryType::ENTRY:
+ case XRayEntryType::LOG_ARGS_ENTRY:
+ // Update the thread local state for the next invocation.
+ LastFunctionEntryTSC = TSC;
+ break;
+ case XRayEntryType::TAIL:
+ break;
+ case XRayEntryType::EXIT:
+ // Break out and write the exit record if we can't erase any functions.
+ if (NumConsecutiveFnEnters == 0 ||
+ (TSC - LastFunctionEntryTSC) >= thresholdTicks())
+ break;
+ rewindRecentCall(TSC, LastTSC, LastFunctionEntryTSC, FuncId);
+ return; // without writing log.
+ }
+
+ writeFunctionRecord(FuncId, RecordTSCDelta, Entry, RecordPtr);
+
+ // If we've exhausted the buffer by this time, we then release the buffer to
+ // make sure that other threads may start using this buffer.
+ if ((RecordPtr + MetadataRecSize) - BufferStart == MetadataRecSize) {
+ writeEOBMetadata();
+ if (!releaseThreadLocalBuffer(LocalBQ.get()))
+ return;
+ RecordPtr = nullptr;
+ }
+}
+
+} // namespace __xray_fdr_internal
+
+} // namespace __xray
+#endif // XRAY_XRAY_FDR_LOGGING_IMPL_H
diff --git a/lib/xray/xray_flags.cc b/lib/xray/xray_flags.cc
index 338c2378b8cd..1ee4d10d753c 100644
--- a/lib/xray/xray_flags.cc
+++ b/lib/xray/xray_flags.cc
@@ -24,31 +24,55 @@ namespace __xray {
Flags xray_flags_dont_use_directly; // use via flags().
-void Flags::SetDefaults() XRAY_NEVER_INSTRUMENT {
+void Flags::setDefaults() XRAY_NEVER_INSTRUMENT {
#define XRAY_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "xray_flags.inc"
#undef XRAY_FLAG
}
-static void RegisterXRayFlags(FlagParser *P, Flags *F) XRAY_NEVER_INSTRUMENT {
+static void registerXRayFlags(FlagParser *P, Flags *F) XRAY_NEVER_INSTRUMENT {
#define XRAY_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(P, #Name, Description, &F->Name);
#include "xray_flags.inc"
#undef XRAY_FLAG
}
-void InitializeFlags() XRAY_NEVER_INSTRUMENT {
+// This function, as defined with the help of a macro meant to be introduced at
+// build time of the XRay runtime, passes in a statically defined list of
+// options that control XRay. This means users/deployments can tweak the
+// defaults that override the hard-coded defaults in the xray_flags.inc at
+// compile-time using the XRAY_DEFAULT_OPTIONS macro.
+static const char *useCompilerDefinedFlags() XRAY_NEVER_INSTRUMENT {
+#ifdef XRAY_DEFAULT_OPTIONS
+// Do the double-layered string conversion to prevent badly crafted strings
+// provided through the XRAY_DEFAULT_OPTIONS from causing compilation issues (or
+// changing the semantics of the implementation through the macro). This ensures
+// that we convert whatever XRAY_DEFAULT_OPTIONS is defined as a string literal.
+#define XRAY_STRINGIZE(x) #x
+#define XRAY_STRINGIZE_OPTIONS(options) XRAY_STRINGIZE(options)
+ return XRAY_STRINGIZE_OPTIONS(XRAY_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+void initializeFlags() XRAY_NEVER_INSTRUMENT {
SetCommonFlagsDefaults();
auto *F = flags();
- F->SetDefaults();
+ F->setDefaults();
FlagParser XRayParser;
- RegisterXRayFlags(&XRayParser, F);
+ registerXRayFlags(&XRayParser, F);
RegisterCommonFlags(&XRayParser);
- // Override from command line.
+ // Use options defaulted at compile-time for the runtime.
+ const char *XRayCompileFlags = useCompilerDefinedFlags();
+ XRayParser.ParseString(XRayCompileFlags);
+
+ // Override from environment variables.
XRayParser.ParseString(GetEnv("XRAY_OPTIONS"));
+ // Override from command line.
InitializeCommonFlags();
if (Verbosity())
diff --git a/lib/xray/xray_flags.h b/lib/xray/xray_flags.h
index 2ecf5fb9ba1d..f4e30283b8de 100644
--- a/lib/xray/xray_flags.h
+++ b/lib/xray/xray_flags.h
@@ -24,13 +24,13 @@ struct Flags {
#include "xray_flags.inc"
#undef XRAY_FLAG
- void SetDefaults();
+ void setDefaults();
};
extern Flags xray_flags_dont_use_directly;
inline Flags *flags() { return &xray_flags_dont_use_directly; }
-void InitializeFlags();
+void initializeFlags();
} // namespace __xray
diff --git a/lib/xray/xray_flags.inc b/lib/xray/xray_flags.inc
index 0f6ced8ead0c..7ddce78eb413 100644
--- a/lib/xray/xray_flags.inc
+++ b/lib/xray/xray_flags.inc
@@ -14,9 +14,14 @@
#error "Define XRAY_FLAG prior to including this file!"
#endif
-XRAY_FLAG(bool, patch_premain, true,
+XRAY_FLAG(bool, patch_premain, false,
"Whether to patch instrumentation points before main.")
XRAY_FLAG(bool, xray_naive_log, true,
"Whether to install the naive log implementation.")
XRAY_FLAG(const char *, xray_logfile_base, "xray-log.",
"Filename base for the xray logfile.")
+XRAY_FLAG(bool, xray_fdr_log, false,
+ "Whether to install the flight data recorder logging implementation.")
+XRAY_FLAG(int, xray_fdr_log_func_duration_threshold_us, 5,
+ "FDR logging will try to skip functions that execute for fewer "
+ "microseconds than this threshold.")
diff --git a/lib/xray/xray_init.cc b/lib/xray/xray_init.cc
index eb86182910cf..6f558d656147 100644
--- a/lib/xray/xray_init.cc
+++ b/lib/xray/xray_init.cc
@@ -12,7 +12,6 @@
// XRay initialisation logic.
//===----------------------------------------------------------------------===//
-#include <atomic>
#include <fcntl.h>
#include <strings.h>
#include <unistd.h>
@@ -28,7 +27,6 @@ extern const XRaySledEntry __start_xray_instr_map[] __attribute__((weak));
extern const XRaySledEntry __stop_xray_instr_map[] __attribute__((weak));
}
-using namespace __sanitizer;
using namespace __xray;
// When set to 'true' this means the XRay runtime has been initialised. We use
@@ -38,29 +36,28 @@ using namespace __xray;
//
// FIXME: Support DSO instrumentation maps too. The current solution only works
// for statically linked executables.
-std::atomic<bool> XRayInitialized{false};
+__sanitizer::atomic_uint8_t XRayInitialized{0};
// This should always be updated before XRayInitialized is updated.
-std::atomic<__xray::XRaySledMap> XRayInstrMap{};
+__sanitizer::SpinMutex XRayInstrMapMutex;
+XRaySledMap XRayInstrMap;
// __xray_init() will do the actual loading of the current process' memory map
// and then proceed to look for the .xray_instr_map section/segment.
void __xray_init() XRAY_NEVER_INSTRUMENT {
- InitializeFlags();
+ initializeFlags();
if (__start_xray_instr_map == nullptr) {
Report("XRay instrumentation map missing. Not initializing XRay.\n");
return;
}
- // Now initialize the XRayInstrMap global struct with the address of the
- // entries, reinterpreted as an array of XRaySledEntry objects. We use the
- // virtual pointer we have from the section to provide us the correct
- // information.
- __xray::XRaySledMap SledMap{};
- SledMap.Sleds = __start_xray_instr_map;
- SledMap.Entries = __stop_xray_instr_map - __start_xray_instr_map;
- XRayInstrMap.store(SledMap, std::memory_order_release);
- XRayInitialized.store(true, std::memory_order_release);
+ {
+ __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ XRayInstrMap.Sleds = __start_xray_instr_map;
+ XRayInstrMap.Entries = __stop_xray_instr_map - __start_xray_instr_map;
+ }
+ __sanitizer::atomic_store(&XRayInitialized, true,
+ __sanitizer::memory_order_release);
if (flags()->patch_premain)
__xray_patch();
diff --git a/lib/xray/xray_inmemory_log.cc b/lib/xray/xray_inmemory_log.cc
index adcb21671cbc..cdaa6d1b5c86 100644
--- a/lib/xray/xray_inmemory_log.cc
+++ b/lib/xray/xray_inmemory_log.cc
@@ -16,8 +16,6 @@
//===----------------------------------------------------------------------===//
#include <cassert>
-#include <cstdint>
-#include <cstdio>
#include <fcntl.h>
#include <mutex>
#include <sys/stat.h>
@@ -26,19 +24,13 @@
#include <thread>
#include <unistd.h>
-#if defined(__x86_64__)
-#include "xray_x86_64.h"
-#elif defined(__arm__) || defined(__aarch64__)
-#include "xray_emulate_tsc.h"
-#else
-#error "Unsupported CPU Architecture"
-#endif /* Architecture-specific inline intrinsics */
-
#include "sanitizer_common/sanitizer_libc.h"
#include "xray/xray_records.h"
#include "xray_defs.h"
#include "xray_flags.h"
#include "xray_interface_internal.h"
+#include "xray_tsc.h"
+#include "xray_utils.h"
// __xray_InMemoryRawLog will use a thread-local aligned buffer capped to a
// certain size (32kb by default) and use it as if it were a circular buffer for
@@ -53,25 +45,6 @@ namespace __xray {
std::mutex LogMutex;
-static void retryingWriteAll(int Fd, char *Begin,
- char *End) XRAY_NEVER_INSTRUMENT {
- if (Begin == End)
- return;
- auto TotalBytes = std::distance(Begin, End);
- while (auto Written = write(Fd, Begin, TotalBytes)) {
- if (Written < 0) {
- if (errno == EINTR)
- continue; // Try again.
- Report("Failed to write; errno = %d\n", errno);
- return;
- }
- TotalBytes -= Written;
- if (TotalBytes == 0)
- break;
- Begin += Written;
- }
-}
-
class ThreadExitFlusher {
int Fd;
XRayRecord *Start;
@@ -102,61 +75,32 @@ public:
using namespace __xray;
-void PrintToStdErr(const char *Buffer) XRAY_NEVER_INSTRUMENT {
- fprintf(stderr, "%s", Buffer);
-}
-
static int __xray_OpenLogFile() XRAY_NEVER_INSTRUMENT {
- // FIXME: Figure out how to make this less stderr-dependent.
- SetPrintfAndReportCallback(PrintToStdErr);
- // Open a temporary file once for the log.
- static char TmpFilename[256] = {};
- static char TmpWildcardPattern[] = "XXXXXX";
- auto Argv = GetArgv();
- const char *Progname = Argv[0] == nullptr ? "(unknown)" : Argv[0];
- const char *LastSlash = internal_strrchr(Progname, '/');
-
- if (LastSlash != nullptr)
- Progname = LastSlash + 1;
-
- const int HalfLength = sizeof(TmpFilename) / 2 - sizeof(TmpWildcardPattern);
- int NeededLength = internal_snprintf(TmpFilename, sizeof(TmpFilename),
- "%.*s%.*s.%s",
- HalfLength, flags()->xray_logfile_base,
- HalfLength, Progname,
- TmpWildcardPattern);
- if (NeededLength > int(sizeof(TmpFilename))) {
- Report("XRay log file name too long (%d): %s\n", NeededLength, TmpFilename);
- return -1;
- }
- int Fd = mkstemp(TmpFilename);
- if (Fd == -1) {
- Report("XRay: Failed opening temporary file '%s'; not logging events.\n",
- TmpFilename);
+ int F = getLogFD();
+ if (F == -1)
return -1;
- }
- if (Verbosity())
- fprintf(stderr, "XRay: Log file in '%s'\n", TmpFilename);
-
// Since we're here, we get to write the header. We set it up so that the
// header will only be written once, at the start, and let the threads
// logging do writes which just append.
XRayFileHeader Header;
Header.Version = 1;
Header.Type = FileTypes::NAIVE_LOG;
- Header.CycleFrequency = __xray::cycleFrequency();
+ Header.CycleFrequency = probeRequiredCPUFeatures()
+ ? getTSCFrequency()
+ : __xray::NanosecondsPerSecond;
// FIXME: Actually check whether we have 'constant_tsc' and 'nonstop_tsc'
// before setting the values in the header.
Header.ConstantTSC = 1;
Header.NonstopTSC = 1;
- retryingWriteAll(Fd, reinterpret_cast<char *>(&Header),
+ retryingWriteAll(F, reinterpret_cast<char *>(&Header),
reinterpret_cast<char *>(&Header) + sizeof(Header));
- return Fd;
+ return F;
}
-void __xray_InMemoryRawLog(int32_t FuncId,
- XRayEntryType Type) XRAY_NEVER_INSTRUMENT {
+template <class RDTSC>
+void __xray_InMemoryRawLog(int32_t FuncId, XRayEntryType Type,
+ RDTSC ReadTSC) XRAY_NEVER_INSTRUMENT {
using Buffer =
std::aligned_storage<sizeof(XRayRecord), alignof(XRayRecord)>::type;
static constexpr size_t BuffLen = 1024;
@@ -173,7 +117,7 @@ void __xray_InMemoryRawLog(int32_t FuncId,
// through a pointer offset.
auto &R = reinterpret_cast<__xray::XRayRecord *>(InMemoryBuffer)[Offset];
R.RecordType = RecordTypes::NORMAL;
- R.TSC = __xray::readTSC(R.CPU);
+ R.TSC = ReadTSC(R.CPU);
R.TId = TId;
R.Type = Type;
R.FuncId = FuncId;
@@ -187,8 +131,32 @@ void __xray_InMemoryRawLog(int32_t FuncId,
}
}
-static auto Unused = [] {
+void __xray_InMemoryRawLogRealTSC(int32_t FuncId,
+ XRayEntryType Type) XRAY_NEVER_INSTRUMENT {
+ __xray_InMemoryRawLog(FuncId, Type, __xray::readTSC);
+}
+
+void __xray_InMemoryEmulateTSC(int32_t FuncId,
+ XRayEntryType Type) XRAY_NEVER_INSTRUMENT {
+ __xray_InMemoryRawLog(FuncId, Type, [](uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ timespec TS;
+ int result = clock_gettime(CLOCK_REALTIME, &TS);
+ if (result != 0) {
+ Report("clock_gettimg(2) return %d, errno=%d.", result, int(errno));
+ TS = {0, 0};
+ }
+ CPU = 0;
+ return TS.tv_sec * __xray::NanosecondsPerSecond + TS.tv_nsec;
+ });
+}
+
+static auto UNUSED Unused = [] {
+ auto UseRealTSC = probeRequiredCPUFeatures();
+ if (!UseRealTSC)
+ Report("WARNING: Required CPU features missing for XRay instrumentation, "
+ "using emulation instead.\n");
if (flags()->xray_naive_log)
- __xray_set_handler(__xray_InMemoryRawLog);
+ __xray_set_handler(UseRealTSC ? __xray_InMemoryRawLogRealTSC
+ : __xray_InMemoryEmulateTSC);
return true;
}();
diff --git a/lib/xray/xray_interface.cc b/lib/xray/xray_interface.cc
index 20a2b66c4401..26ec161fe860 100644
--- a/lib/xray/xray_interface.cc
+++ b/lib/xray/xray_interface.cc
@@ -15,7 +15,6 @@
#include "xray_interface_internal.h"
-#include <atomic>
#include <cstdint>
#include <cstdio>
#include <errno.h>
@@ -35,12 +34,21 @@ static const int16_t cSledLength = 12;
static const int16_t cSledLength = 32;
#elif defined(__arm__)
static const int16_t cSledLength = 28;
+#elif SANITIZER_MIPS32
+static const int16_t cSledLength = 48;
+#elif SANITIZER_MIPS64
+static const int16_t cSledLength = 64;
+#elif defined(__powerpc64__)
+static const int16_t cSledLength = 8;
#else
#error "Unsupported CPU Architecture"
#endif /* CPU architecture */
// This is the function to call when we encounter the entry or exit sleds.
-std::atomic<void (*)(int32_t, XRayEntryType)> XRayPatchedFunction{nullptr};
+__sanitizer::atomic_uintptr_t XRayPatchedFunction{0};
+
+// This is the function to call from the arg1-enabled sleds/trampolines.
+__sanitizer::atomic_uintptr_t XRayArgLogger{0};
// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will undo
// any successful mprotect(...) changes. This is used to make a page writeable
@@ -79,13 +87,18 @@ public:
} // namespace __xray
-extern std::atomic<bool> XRayInitialized;
-extern std::atomic<__xray::XRaySledMap> XRayInstrMap;
+extern __sanitizer::SpinMutex XRayInstrMapMutex;
+extern __sanitizer::atomic_uint8_t XRayInitialized;
+extern __xray::XRaySledMap XRayInstrMap;
int __xray_set_handler(void (*entry)(int32_t,
XRayEntryType)) XRAY_NEVER_INSTRUMENT {
- if (XRayInitialized.load(std::memory_order_acquire)) {
- __xray::XRayPatchedFunction.store(entry, std::memory_order_release);
+ if (__sanitizer::atomic_load(&XRayInitialized,
+ __sanitizer::memory_order_acquire)) {
+
+ __sanitizer::atomic_store(&__xray::XRayPatchedFunction,
+ reinterpret_cast<uint64_t>(entry),
+ __sanitizer::memory_order_release);
return 1;
}
return 0;
@@ -95,7 +108,7 @@ int __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
return __xray_set_handler(nullptr);
}
-std::atomic<bool> XRayPatching{false};
+__sanitizer::atomic_uint8_t XRayPatching{0};
using namespace __xray;
@@ -115,34 +128,37 @@ public:
};
template <class Function>
-CleanupInvoker<Function> ScopeCleanup(Function Fn) XRAY_NEVER_INSTRUMENT {
+CleanupInvoker<Function> scopeCleanup(Function Fn) XRAY_NEVER_INSTRUMENT {
return CleanupInvoker<Function>{Fn};
}
-// ControlPatching implements the common internals of the patching/unpatching
+// controlPatching implements the common internals of the patching/unpatching
// implementation. |Enable| defines whether we're enabling or disabling the
// runtime XRay instrumentation.
-XRayPatchingStatus ControlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
- if (!XRayInitialized.load(std::memory_order_acquire))
+XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
+ if (!__sanitizer::atomic_load(&XRayInitialized,
+ __sanitizer::memory_order_acquire))
return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
- static bool NotPatching = false;
- if (!XRayPatching.compare_exchange_strong(NotPatching, true,
- std::memory_order_acq_rel,
- std::memory_order_acquire)) {
+ uint8_t NotPatching = false;
+ if (!__sanitizer::atomic_compare_exchange_strong(
+ &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel))
return XRayPatchingStatus::ONGOING; // Already patching.
- }
- bool PatchingSuccess = false;
- auto XRayPatchingStatusResetter = ScopeCleanup([&PatchingSuccess] {
- if (!PatchingSuccess) {
- XRayPatching.store(false, std::memory_order_release);
- }
+ uint8_t PatchingSuccess = false;
+ auto XRayPatchingStatusResetter = scopeCleanup([&PatchingSuccess] {
+ if (!PatchingSuccess)
+ __sanitizer::atomic_store(&XRayPatching, false,
+ __sanitizer::memory_order_release);
});
// Step 1: Compute the function id, as a unique identifier per function in the
// instrumentation map.
- XRaySledMap InstrMap = XRayInstrMap.load(std::memory_order_acquire);
+ XRaySledMap InstrMap;
+ {
+ __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ InstrMap = XRayInstrMap;
+ }
if (InstrMap.Entries == 0)
return XRayPatchingStatus::NOT_INITIALIZED;
@@ -179,7 +195,7 @@ XRayPatchingStatus ControlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
bool Success = false;
switch (Sled.Kind) {
case XRayEntryType::ENTRY:
- Success = patchFunctionEntry(Enable, FuncId, Sled);
+ Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_FunctionEntry);
break;
case XRayEntryType::EXIT:
Success = patchFunctionExit(Enable, FuncId, Sled);
@@ -187,21 +203,39 @@ XRayPatchingStatus ControlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
case XRayEntryType::TAIL:
Success = patchFunctionTailExit(Enable, FuncId, Sled);
break;
+ case XRayEntryType::LOG_ARGS_ENTRY:
+ Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_ArgLoggerEntry);
+ break;
default:
Report("Unsupported sled kind: %d\n", int(Sled.Kind));
continue;
}
(void)Success;
}
- XRayPatching.store(false, std::memory_order_release);
+ __sanitizer::atomic_store(&XRayPatching, false,
+ __sanitizer::memory_order_release);
PatchingSuccess = true;
return XRayPatchingStatus::SUCCESS;
}
XRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT {
- return ControlPatching(true);
+ return controlPatching(true);
}
XRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
- return ControlPatching(false);
+ return controlPatching(false);
+}
+
+int __xray_set_handler_arg1(void (*Handler)(int32_t, XRayEntryType, uint64_t)) {
+ if (!__sanitizer::atomic_load(&XRayInitialized,
+ __sanitizer::memory_order_acquire))
+ return 0;
+
+ // A relaxed write might not be visible even if the current thread gets
+ // scheduled on a different CPU/NUMA node. We need to wait for everyone to
+ // have this handler installed for consistency of collected data across CPUs.
+ __sanitizer::atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(Handler),
+ __sanitizer::memory_order_release);
+ return 1;
}
+int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
diff --git a/lib/xray/xray_interface_internal.h b/lib/xray/xray_interface_internal.h
index a8434a699f86..0e3a251f3ad7 100644
--- a/lib/xray/xray_interface_internal.h
+++ b/lib/xray/xray_interface_internal.h
@@ -48,10 +48,8 @@ struct XRaySledMap {
size_t Entries;
};
-uint64_t cycleFrequency();
-
bool patchFunctionEntry(bool Enable, uint32_t FuncId,
- const XRaySledEntry &Sled);
+ const XRaySledEntry &Sled, void (*Trampoline)());
bool patchFunctionExit(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled);
bool patchFunctionTailExit(bool Enable, uint32_t FuncId,
const XRaySledEntry &Sled);
@@ -64,6 +62,7 @@ extern "C" {
extern void __xray_FunctionEntry();
extern void __xray_FunctionExit();
extern void __xray_FunctionTailExit();
+extern void __xray_ArgLoggerEntry();
}
#endif
diff --git a/lib/xray/xray_log_interface.cc b/lib/xray/xray_log_interface.cc
new file mode 100644
index 000000000000..ffed601c05c6
--- /dev/null
+++ b/lib/xray/xray_log_interface.cc
@@ -0,0 +1,59 @@
+//===-- xray_log_interface.cc ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#include "xray/xray_log_interface.h"
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "xray/xray_interface.h"
+#include "xray_defs.h"
+
+#include <memory>
+
+__sanitizer::SpinMutex XRayImplMutex;
+std::unique_ptr<XRayLogImpl> GlobalXRayImpl;
+
+void __xray_set_log_impl(XRayLogImpl Impl) XRAY_NEVER_INSTRUMENT {
+ if (Impl.log_init == nullptr || Impl.log_finalize == nullptr ||
+ Impl.handle_arg0 == nullptr || Impl.flush_log == nullptr) {
+ __sanitizer::SpinMutexLock Guard(&XRayImplMutex);
+ GlobalXRayImpl.reset();
+ return;
+ }
+
+ __sanitizer::SpinMutexLock Guard(&XRayImplMutex);
+ GlobalXRayImpl.reset(new XRayLogImpl);
+ *GlobalXRayImpl = Impl;
+}
+
+XRayLogInitStatus __xray_log_init(size_t BufferSize, size_t MaxBuffers,
+ void *Args,
+ size_t ArgsSize) XRAY_NEVER_INSTRUMENT {
+ __sanitizer::SpinMutexLock Guard(&XRayImplMutex);
+ if (!GlobalXRayImpl)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ return GlobalXRayImpl->log_init(BufferSize, MaxBuffers, Args, ArgsSize);
+}
+
+XRayLogInitStatus __xray_log_finalize() XRAY_NEVER_INSTRUMENT {
+ __sanitizer::SpinMutexLock Guard(&XRayImplMutex);
+ if (!GlobalXRayImpl)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ return GlobalXRayImpl->log_finalize();
+}
+
+XRayLogFlushStatus __xray_log_flushLog() XRAY_NEVER_INSTRUMENT {
+ __sanitizer::SpinMutexLock Guard(&XRayImplMutex);
+ if (!GlobalXRayImpl)
+ return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ return GlobalXRayImpl->flush_log();
+}
diff --git a/lib/xray/xray_mips.cc b/lib/xray/xray_mips.cc
new file mode 100644
index 000000000000..c8ff39936c5a
--- /dev/null
+++ b/lib/xray/xray_mips.cc
@@ -0,0 +1,158 @@
+//===-- xray_mips.cc --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of MIPS-specific routines (32-bit).
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+
+namespace __xray {
+
+// The machine codes for some instructions used in runtime patching.
+enum PatchOpcodes : uint32_t {
+ PO_ADDIU = 0x24000000, // addiu rt, rs, imm
+ PO_SW = 0xAC000000, // sw rt, offset(sp)
+ PO_LUI = 0x3C000000, // lui rs, %hi(address)
+ PO_ORI = 0x34000000, // ori rt, rs, %lo(address)
+ PO_JALR = 0x0000F809, // jalr rs
+ PO_LW = 0x8C000000, // lw rt, offset(address)
+ PO_B44 = 0x1000000b, // b #44
+ PO_NOP = 0x0, // nop
+};
+
+enum RegNum : uint32_t {
+ RN_T0 = 0x8,
+ RN_T9 = 0x19,
+ RN_RA = 0x1F,
+ RN_SP = 0x1D,
+};
+
+inline static uint32_t encodeInstruction(uint32_t Opcode, uint32_t Rs,
+ uint32_t Rt,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return (Opcode | Rs << 21 | Rt << 16 | Imm);
+}
+
+inline static uint32_t
+encodeSpecialInstruction(uint32_t Opcode, uint32_t Rs, uint32_t Rt, uint32_t Rd,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return (Rs << 21 | Rt << 16 | Rd << 11 | Imm << 6 | Opcode);
+}
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // xray_sled_n:
+ // B .tmpN
+ // 11 NOPs (44 bytes)
+ // .tmpN
+ // ADDIU T9, T9, 44
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n (32-bit):
+ // addiu sp, sp, -8 ;create stack frame
+ // nop
+ // sw ra, 4(sp) ;save return address
+ // sw t9, 0(sp) ;save register t9
+ // lui t9, %hi(__xray_FunctionEntry/Exit)
+ // ori t9, t9, %lo(__xray_FunctionEntry/Exit)
+ // lui t0, %hi(function_id)
+ // jalr t9 ;call Tracing hook
+ // ori t0, t0, %lo(function_id) ;pass function id (delay slot)
+ // lw t9, 0(sp) ;restore register t9
+ // lw ra, 4(sp) ;restore return address
+ // addiu sp, sp, 8 ;delete stack frame
+ //
+ // We add 44 bytes to t9 because we want to adjust the function pointer to
+ // the actual start of function i.e. the address just after the noop sled.
+ // We do this because gp displacement relocation is emitted at the start of
+ // of the function i.e after the nop sled and to correctly calculate the
+ // global offset table address, t9 must hold the address of the instruction
+ // containing the gp displacement relocation.
+ // FIXME: Is this correct for the static relocation model?
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // B #44
+
+ if (Enable) {
+ uint32_t LoTracingHookAddr = reinterpret_cast<int32_t>(TracingHook) & 0xffff;
+ uint32_t HiTracingHookAddr =
+ (reinterpret_cast<int32_t>(TracingHook) >> 16) & 0xffff;
+ uint32_t LoFunctionID = FuncId & 0xffff;
+ uint32_t HiFunctionID = (FuncId >> 16) & 0xffff;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 8) = encodeInstruction(
+ PatchOpcodes::PO_SW, RegNum::RN_SP, RegNum::RN_RA, 0x4);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 12) = encodeInstruction(
+ PatchOpcodes::PO_SW, RegNum::RN_SP, RegNum::RN_T9, 0x0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 16) = encodeInstruction(
+ PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T9, HiTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 20) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9, LoTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 24) = encodeInstruction(
+ PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T0, HiFunctionID);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 28) = encodeSpecialInstruction(
+ PatchOpcodes::PO_JALR, RegNum::RN_T9, 0x0, RegNum::RN_RA, 0X0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 32) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T0, RegNum::RN_T0, LoFunctionID);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 36) = encodeInstruction(
+ PatchOpcodes::PO_LW, RegNum::RN_SP, RegNum::RN_T9, 0x0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 40) = encodeInstruction(
+ PatchOpcodes::PO_LW, RegNum::RN_SP, RegNum::RN_RA, 0x4);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 44) = encodeInstruction(
+ PatchOpcodes::PO_ADDIU, RegNum::RN_SP, RegNum::RN_SP, 0x8);
+ uint32_t CreateStackSpaceInstr = encodeInstruction(
+ PatchOpcodes::PO_ADDIU, RegNum::RN_SP, RegNum::RN_SP, 0xFFF8);
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ uint32_t(CreateStackSpaceInstr), std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ uint32_t(PatchOpcodes::PO_B44), std::memory_order_release);
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: In the future we'd need to distinguish between non-tail exits and
+ // tail exits for better information preservation.
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
diff --git a/lib/xray/xray_mips64.cc b/lib/xray/xray_mips64.cc
new file mode 100644
index 000000000000..21136848c8af
--- /dev/null
+++ b/lib/xray/xray_mips64.cc
@@ -0,0 +1,167 @@
+//===-- xray_mips64.cc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of MIPS64-specific routines.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+
+namespace __xray {
+
+// The machine codes for some instructions used in runtime patching.
+enum PatchOpcodes : uint32_t {
+ PO_DADDIU = 0x64000000, // daddiu rt, rs, imm
+ PO_SD = 0xFC000000, // sd rt, base(offset)
+ PO_LUI = 0x3C000000, // lui rt, imm
+ PO_ORI = 0x34000000, // ori rt, rs, imm
+ PO_DSLL = 0x00000038, // dsll rd, rt, sa
+ PO_JALR = 0x00000009, // jalr rs
+ PO_LD = 0xDC000000, // ld rt, base(offset)
+ PO_B60 = 0x1000000f, // b #60
+ PO_NOP = 0x0, // nop
+};
+
+enum RegNum : uint32_t {
+ RN_T0 = 0xC,
+ RN_T9 = 0x19,
+ RN_RA = 0x1F,
+ RN_SP = 0x1D,
+};
+
+inline static uint32_t encodeInstruction(uint32_t Opcode, uint32_t Rs,
+ uint32_t Rt,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return (Opcode | Rs << 21 | Rt << 16 | Imm);
+}
+
+inline static uint32_t
+encodeSpecialInstruction(uint32_t Opcode, uint32_t Rs, uint32_t Rt, uint32_t Rd,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return (Rs << 21 | Rt << 16 | Rd << 11 | Imm << 6 | Opcode);
+}
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // xray_sled_n:
+ // B .tmpN
+ // 15 NOPs (60 bytes)
+ // .tmpN
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n (64-bit):
+ // daddiu sp, sp, -16 ;create stack frame
+ // nop
+ // sd ra, 8(sp) ;save return address
+ // sd t9, 0(sp) ;save register t9
+ // lui t9, %highest(__xray_FunctionEntry/Exit)
+ // ori t9, t9, %higher(__xray_FunctionEntry/Exit)
+ // dsll t9, t9, 16
+ // ori t9, t9, %hi(__xray_FunctionEntry/Exit)
+ // dsll t9, t9, 16
+ // ori t9, t9, %lo(__xray_FunctionEntry/Exit)
+ // lui t0, %hi(function_id)
+ // jalr t9 ;call Tracing hook
+ // ori t0, t0, %lo(function_id) ;pass function id (delay slot)
+ // ld t9, 0(sp) ;restore register t9
+ // ld ra, 8(sp) ;restore return address
+ // daddiu sp, sp, 16 ;delete stack frame
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // B #60
+
+ if (Enable) {
+ uint32_t LoTracingHookAddr =
+ reinterpret_cast<int64_t>(TracingHook) & 0xffff;
+ uint32_t HiTracingHookAddr = (reinterpret_cast<int64_t>(TracingHook) >> 16) & 0xffff;
+ uint32_t HigherTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 32) & 0xffff;
+ uint32_t HighestTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 48) & 0xffff;
+ uint32_t LoFunctionID = FuncId & 0xffff;
+ uint32_t HiFunctionID = (FuncId >> 16) & 0xffff;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 8) = encodeInstruction(
+ PatchOpcodes::PO_SD, RegNum::RN_SP, RegNum::RN_RA, 0x8);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 12) = encodeInstruction(
+ PatchOpcodes::PO_SD, RegNum::RN_SP, RegNum::RN_T9, 0x0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 16) = encodeInstruction(
+ PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T9, HighestTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 20) =
+ encodeInstruction(PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9,
+ HigherTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 24) = encodeSpecialInstruction(
+ PatchOpcodes::PO_DSLL, 0x0, RegNum::RN_T9, RegNum::RN_T9, 0x10);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 28) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9, HiTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 32) = encodeSpecialInstruction(
+ PatchOpcodes::PO_DSLL, 0x0, RegNum::RN_T9, RegNum::RN_T9, 0x10);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 36) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9, LoTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 40) = encodeInstruction(
+ PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T0, HiFunctionID);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 44) = encodeSpecialInstruction(
+ PatchOpcodes::PO_JALR, RegNum::RN_T9, 0x0, RegNum::RN_RA, 0X0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 48) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T0, RegNum::RN_T0, LoFunctionID);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 52) = encodeInstruction(
+ PatchOpcodes::PO_LD, RegNum::RN_SP, RegNum::RN_T9, 0x0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 56) = encodeInstruction(
+ PatchOpcodes::PO_LD, RegNum::RN_SP, RegNum::RN_RA, 0x8);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 60) = encodeInstruction(
+ PatchOpcodes::PO_DADDIU, RegNum::RN_SP, RegNum::RN_SP, 0x10);
+ uint32_t CreateStackSpace = encodeInstruction(
+ PatchOpcodes::PO_DADDIU, RegNum::RN_SP, RegNum::RN_SP, 0xfff0);
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ CreateStackSpace, std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ uint32_t(PatchOpcodes::PO_B60), std::memory_order_release);
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: In the future we'd need to distinguish between non-tail exits and
+ // tail exits for better information preservation.
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
diff --git a/lib/xray/xray_powerpc64.cc b/lib/xray/xray_powerpc64.cc
new file mode 100644
index 000000000000..6a7554cfc1b6
--- /dev/null
+++ b/lib/xray/xray_powerpc64.cc
@@ -0,0 +1,100 @@
+//===-- xray_powerpc64.cc ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of powerpc64 and powerpc64le routines.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include "xray_utils.h"
+#include <atomic>
+#include <cassert>
+#include <cstring>
+
+#ifndef __LITTLE_ENDIAN__
+#error powerpc64 big endian is not supported for now.
+#endif
+
+namespace {
+
+constexpr unsigned long long JumpOverInstNum = 7;
+
+void clearCache(void *Addr, size_t Len) {
+ const size_t LineSize = 32;
+
+ const intptr_t Mask = ~(LineSize - 1);
+ const intptr_t StartLine = ((intptr_t)Addr) & Mask;
+ const intptr_t EndLine = ((intptr_t)Addr + Len + LineSize - 1) & Mask;
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("dcbf 0, %0" : : "r"(Line));
+ asm volatile("sync");
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("icbi 0, %0" : : "r"(Line));
+ asm volatile("isync");
+}
+
+} // namespace
+
+extern "C" void __clear_cache(void *start, void *end);
+
+namespace __xray {
+
+bool patchFunctionEntry(const bool Enable, uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ if (Enable) {
+ // lis 0, FuncId[16..32]
+ // li 0, FuncId[0..15]
+ *reinterpret_cast<uint64_t *>(Sled.Address) =
+ (0x3c000000ull + (FuncId >> 16)) +
+ ((0x60000000ull + (FuncId & 0xffff)) << 32);
+ } else {
+ // b +JumpOverInstNum instructions.
+ *reinterpret_cast<uint32_t *>(Sled.Address) =
+ 0x48000000ull + (JumpOverInstNum << 2);
+ }
+ clearCache(reinterpret_cast<void *>(Sled.Address), 8);
+ return true;
+}
+
+bool patchFunctionExit(const bool Enable, uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ if (Enable) {
+ // lis 0, FuncId[16..32]
+ // li 0, FuncId[0..15]
+ *reinterpret_cast<uint64_t *>(Sled.Address) =
+ (0x3c000000ull + (FuncId >> 16)) +
+ ((0x60000000ull + (FuncId & 0xffff)) << 32);
+ } else {
+ // Copy the blr/b instruction after JumpOverInstNum instructions.
+ *reinterpret_cast<uint32_t *>(Sled.Address) =
+ *(reinterpret_cast<uint32_t *>(Sled.Address) + JumpOverInstNum);
+ }
+ clearCache(reinterpret_cast<void *>(Sled.Address), 8);
+ return true;
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchFunctionExit(Enable, FuncId, Sled);
+}
+
+// FIXME: Maybe implement this better?
+bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
+
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
diff --git a/lib/xray/xray_powerpc64.inc b/lib/xray/xray_powerpc64.inc
new file mode 100644
index 000000000000..c1a1bac1ad0a
--- /dev/null
+++ b/lib/xray/xray_powerpc64.inc
@@ -0,0 +1,37 @@
+//===-- xray_powerpc64.inc --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+
+#include <cstdint>
+#include <mutex>
+#include <sys/platform/ppc.h>
+
+#include "xray_defs.h"
+
+namespace __xray {
+
+ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ CPU = 0;
+ return __ppc_get_timebase();
+}
+
+inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
+ static std::mutex M;
+ std::lock_guard<std::mutex> Guard(M);
+ return __ppc_get_timebase_freq();
+}
+
+inline bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT {
+ return true;
+}
+
+} // namespace __xray
diff --git a/lib/xray/xray_trampoline_AArch64.S b/lib/xray/xray_trampoline_AArch64.S
index f1a471c041f9..4d1b04fb7d90 100644
--- a/lib/xray/xray_trampoline_AArch64.S
+++ b/lib/xray/xray_trampoline_AArch64.S
@@ -1,3 +1,5 @@
+#include "../builtins/assembly.h"
+
.text
/* The variable containing the handler function pointer */
.global _ZN6__xray19XRayPatchedFunctionE
@@ -87,3 +89,56 @@ FunctionExit_restore:
LDP X3, X4, [SP], #16
LDP X1, X2, [SP], #16
RET
+
+ /* Word-aligned function entry point */
+ .p2align 2
+ /* Let C/C++ see the symbol */
+ .global __xray_FunctionTailExit
+ .type __xray_FunctionTailExit, %function
+ /* In C++ it is void extern "C" __xray_FunctionTailExit(uint32_t FuncId)
+ with FuncId passed in W0 register. */
+__xray_FunctionTailExit:
+ /* Move the return address beyond the end of sled data. The 12 bytes of
+ data are inserted in the code of the runtime patch, between the call
+ instruction and the instruction returned into. The data contains 32
+ bits of instrumented function ID and 64 bits of the address of
+ the current trampoline. */
+ ADD X30, X30, #12
+ /* Push the registers which may be modified by the handler function */
+ STP X1, X2, [SP, #-16]!
+ STP X3, X4, [SP, #-16]!
+ STP X5, X6, [SP, #-16]!
+ STP X7, X30, [SP, #-16]!
+ /* Push the parameters of the tail called function */
+ STP Q0, Q1, [SP, #-32]!
+ STP Q2, Q3, [SP, #-32]!
+ STP Q4, Q5, [SP, #-32]!
+ STP Q6, Q7, [SP, #-32]!
+ /* Load the address of _ZN6__xray19XRayPatchedFunctionE into X1 */
+ LDR X1, =_ZN6__xray19XRayPatchedFunctionE
+ /* Load the handler function pointer into X2 */
+ LDR X2, [X1]
+ /* Handler address is nullptr if handler is not set */
+ CMP X2, #0
+ BEQ FunctionTailExit_restore
+ /* Function ID is already in W0 (the first parameter).
+ X1=2 means that we are tracing a tail exit event, but before the
+ logging part of XRay is ready, we pretend that here a normal function
+ exit happens, so we give the handler code 1 */
+ MOV X1, #1
+ /* Call the handler with 2 parameters in W0 and X1 */
+ BLR X2
+FunctionTailExit_restore:
+ /* Pop the parameters of the tail called function */
+ LDP Q6, Q7, [SP], #32
+ LDP Q4, Q5, [SP], #32
+ LDP Q2, Q3, [SP], #32
+ LDP Q0, Q1, [SP], #32
+ /* Pop the registers which may be modified by the handler function */
+ LDP X7, X30, [SP], #16
+ LDP X5, X6, [SP], #16
+ LDP X3, X4, [SP], #16
+ LDP X1, X2, [SP], #16
+ RET
+
+NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/xray/xray_trampoline_arm.S b/lib/xray/xray_trampoline_arm.S
index 5d87c971364d..71dbee65d825 100644
--- a/lib/xray/xray_trampoline_arm.S
+++ b/lib/xray/xray_trampoline_arm.S
@@ -1,8 +1,11 @@
+#include "../builtins/assembly.h"
+
.syntax unified
.arch armv6t2
.fpu vfpv2
.code 32
.global _ZN6__xray19XRayPatchedFunctionE
+
@ Word-aligned function entry point
.p2align 2
@ Let C/C++ see the symbol
@@ -63,3 +66,37 @@ FunctionExit_restore:
@ Restore the floating-point return value of the instrumented function
VPOP {d0}
POP {r1-r3,pc}
+
+ @ Word-aligned function entry point
+ .p2align 2
+ @ Let C/C++ see the symbol
+ .global __xray_FunctionTailExit
+ @ It preserves all registers except r0, r12(ip), r14(lr) and r15(pc)
+ @ Assume that "q" part of the floating-point registers is not used
+ @ for passing parameters to C/C++ functions.
+ .type __xray_FunctionTailExit, %function
+ @ In C++ it is void extern "C" __xray_FunctionTailExit(uint32_t FuncId)
+ @ with FuncId passed in r0 register.
+__xray_FunctionTailExit:
+ PUSH {r1-r3,lr}
+ @ Save floating-point parameters of the instrumented function
+ VPUSH {d0-d7}
+ MOVW r1,#:lower16:_ZN6__xray19XRayPatchedFunctionE
+ MOVT r1,#:upper16:_ZN6__xray19XRayPatchedFunctionE
+ LDR r2, [r1]
+ @ Handler address is nullptr if handler is not set
+ CMP r2, #0
+ BEQ FunctionTailExit_restore
+ @ Function ID is already in r0 (the first parameter).
+ @ r1=2 means that we are tracing a tail exit event
+ @ But before the logging part of XRay is ready, we pretend that here a
+ @ normal function exit happens, so we give the handler code 1
+ MOV r1, #1
+ @ Call the handler with 2 parameters in r0 and r1
+ BLX r2
+FunctionTailExit_restore:
+ @ Restore floating-point parameters of the instrumented function
+ VPOP {d0-d7}
+ POP {r1-r3,pc}
+
+NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/xray/xray_trampoline_mips.S b/lib/xray/xray_trampoline_mips.S
new file mode 100644
index 000000000000..39a1a3af35c7
--- /dev/null
+++ b/lib/xray/xray_trampoline_mips.S
@@ -0,0 +1,110 @@
+//===-- xray_trampoline_mips.s ----------------------------------*- ASM -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the MIPS-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .file "xray_trampoline_mips.S"
+ .globl __xray_FunctionEntry
+ .p2align 2
+ .type __xray_FunctionEntry,@function
+__xray_FunctionEntry:
+ .cfi_startproc
+ .set noreorder
+ .cpload $t9
+ .set reorder
+ // Save argument registers before doing any actual work
+ .cfi_def_cfa_offset 36
+ addiu $sp, $sp, -36
+ sw $ra, 32($sp)
+ .cfi_offset 31, -4
+ sw $a3, 28($sp)
+ sw $a2, 24($sp)
+ sw $a1, 20($sp)
+ sw $a0, 16($sp)
+ sdc1 $f14, 8($sp)
+ sdc1 $f12, 0($sp)
+
+ la $t9, _ZN6__xray19XRayPatchedFunctionE
+ lw $t9, 0($t9)
+
+ beqz $t9, FunctionEntry_restore
+
+ // a1=0 means that we are tracing an entry event
+ move $a1, $zero
+ // Function ID is in t0 (the first parameter).
+ move $a0, $t0
+ jalr $t9
+
+FunctionEntry_restore:
+ // Restore argument registers
+ ldc1 $f12, 0($sp)
+ ldc1 $f14, 8($sp)
+ lw $a0, 16($sp)
+ lw $a1, 20($sp)
+ lw $a2, 24($sp)
+ lw $a3, 28($sp)
+ lw $ra, 32($sp)
+ addiu $sp, $sp, 36
+ jr $ra
+FunctionEntry_end:
+ .size __xray_FunctionEntry, FunctionEntry_end-__xray_FunctionEntry
+ .cfi_endproc
+
+ .text
+ .globl __xray_FunctionExit
+ .p2align 2
+ .type __xray_FunctionExit,@function
+__xray_FunctionExit:
+ .cfi_startproc
+ .set noreorder
+ .cpload $t9
+ .set reorder
+ // Save return registers before doing any actual work.
+ .cfi_def_cfa_offset 36
+ addiu $sp, $sp, -36
+ sw $ra, 32($sp)
+ .cfi_offset 31, -4
+ sw $a1, 28($sp)
+ sw $a0, 24($sp)
+ sw $v1, 20($sp)
+ sw $v0, 16($sp)
+ sdc1 $f2, 8($sp)
+ sdc1 $f0, 0($sp)
+
+ la $t9, _ZN6__xray19XRayPatchedFunctionE
+ lw $t9, 0($t9)
+
+ beqz $t9, FunctionExit_restore
+
+ // a1=1 means that we are tracing an exit event
+ li $a1, 1
+ // Function ID is in t0 (the first parameter).
+ move $a0, $t0
+ jalr $t9
+
+FunctionExit_restore:
+ // Restore return registers
+ ldc1 $f0, 0($sp)
+ ldc1 $f2, 8($sp)
+ lw $v0, 16($sp)
+ lw $v1, 20($sp)
+ lw $a0, 24($sp)
+ lw $a1, 28($sp)
+ lw $ra, 32($sp)
+ addiu $sp, $sp, 36
+ jr $ra
+
+FunctionExit_end:
+ .size __xray_FunctionExit, FunctionExit_end-__xray_FunctionExit
+ .cfi_endproc
diff --git a/lib/xray/xray_trampoline_mips64.S b/lib/xray/xray_trampoline_mips64.S
new file mode 100644
index 000000000000..9cbc7e181d9d
--- /dev/null
+++ b/lib/xray/xray_trampoline_mips64.S
@@ -0,0 +1,136 @@
+//===-- xray_trampoline_mips64.s --------------------------------*- ASM -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the MIPS64-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .file "xray_trampoline_mips64.S"
+ .globl __xray_FunctionEntry
+ .p2align 2
+ .type __xray_FunctionEntry,@function
+__xray_FunctionEntry:
+ .cfi_startproc
+ // Save argument registers before doing any actual work.
+ .cfi_def_cfa_offset 144
+ daddiu $sp, $sp, -144
+ sd $ra, 136($sp)
+ .cfi_offset 31, -8
+ sd $gp, 128($sp)
+ sd $a7, 120($sp)
+ sd $a6, 112($sp)
+ sd $a5, 104($sp)
+ sd $a4, 96($sp)
+ sd $a3, 88($sp)
+ sd $a2, 80($sp)
+ sd $a1, 72($sp)
+ sd $a0, 64($sp)
+ sdc1 $f19, 56($sp)
+ sdc1 $f18, 48($sp)
+ sdc1 $f17, 40($sp)
+ sdc1 $f16, 32($sp)
+ sdc1 $f15, 24($sp)
+ sdc1 $f14, 16($sp)
+ sdc1 $f13, 8($sp)
+ sdc1 $f12, 0($sp)
+
+ lui $gp, %hi(%neg(%gp_rel(__xray_FunctionEntry)))
+ daddu $gp, $gp, $t9
+ daddiu $gp ,$gp, %lo(%neg(%gp_rel(__xray_FunctionEntry)))
+
+ dla $t9, _ZN6__xray19XRayPatchedFunctionE
+ ld $t9, 0($t9)
+
+ beqz $t9, FunctionEntry_restore
+
+ // a1=0 means that we are tracing an entry event
+ move $a1, $zero
+ // Function ID is in t0 (the first parameter).
+ move $a0, $t0
+ jalr $t9
+
+FunctionEntry_restore:
+ // Restore argument registers
+ ldc1 $f12, 0($sp)
+ ldc1 $f13, 8($sp)
+ ldc1 $f14, 16($sp)
+ ldc1 $f15, 24($sp)
+ ldc1 $f16, 32($sp)
+ ldc1 $f17, 40($sp)
+ ldc1 $f18, 48($sp)
+ ldc1 $f19, 56($sp)
+ ld $a0, 64($sp)
+ ld $a1, 72($sp)
+ ld $a2, 80($sp)
+ ld $a3, 88($sp)
+ ld $a4, 96($sp)
+ ld $a5, 104($sp)
+ ld $a6, 112($sp)
+ ld $a7, 120($sp)
+ ld $gp, 128($sp)
+ ld $ra, 136($sp)
+ daddiu $sp, $sp, 144
+ jr $ra
+FunctionEntry_end:
+ .size __xray_FunctionEntry, FunctionEntry_end-__xray_FunctionEntry
+ .cfi_endproc
+
+ .text
+ .globl __xray_FunctionExit
+ .p2align 2
+ .type __xray_FunctionExit,@function
+__xray_FunctionExit:
+ .cfi_startproc
+ // Save return registers before doing any actual work.
+ .cfi_def_cfa_offset 64
+ daddiu $sp, $sp, -64
+ sd $ra, 56($sp)
+ .cfi_offset 31, -8
+ sd $gp, 48($sp)
+ sd $a0, 40($sp)
+ sd $v1, 32($sp)
+ sd $v0, 24($sp)
+ sdc1 $f2, 16($sp)
+ sdc1 $f1, 8($sp)
+ sdc1 $f0, 0($sp)
+
+ lui $gp, %hi(%neg(%gp_rel(__xray_FunctionExit)))
+ daddu $gp, $gp, $t9
+ daddiu $gp ,$gp, %lo(%neg(%gp_rel(__xray_FunctionExit)))
+
+ dla $t9, _ZN6__xray19XRayPatchedFunctionE
+ ld $t9, 0($t9)
+
+ beqz $t9, FunctionExit_restore
+
+ // a1=1 means that we are tracing an exit event
+ li $a1, 1
+ // Function ID is in t0 (the first parameter).
+ move $a0, $t0
+ jalr $t9
+
+FunctionExit_restore:
+ // Restore return registers
+ ldc1 $f0, 0($sp)
+ ldc1 $f1, 8($sp)
+ ldc1 $f2, 16($sp)
+ ld $v0, 24($sp)
+ ld $v1, 32($sp)
+ ld $a0, 40($sp)
+ ld $gp, 48($sp)
+ ld $ra, 56($sp)
+ daddiu $sp, $sp, 64
+ jr $ra
+
+FunctionExit_end:
+ .size __xray_FunctionExit, FunctionExit_end-__xray_FunctionExit
+ .cfi_endproc
diff --git a/lib/xray/xray_trampoline_powerpc64.cc b/lib/xray/xray_trampoline_powerpc64.cc
new file mode 100644
index 000000000000..878c46930fee
--- /dev/null
+++ b/lib/xray/xray_trampoline_powerpc64.cc
@@ -0,0 +1,15 @@
+#include <atomic>
+#include <xray/xray_interface.h>
+
+namespace __xray {
+
+extern std::atomic<void (*)(int32_t, XRayEntryType)> XRayPatchedFunction;
+
+// Implement this in C++ instead of assembly, to avoid dealing with ToC by hand.
+void CallXRayPatchedFunction(int32_t FuncId, XRayEntryType Type) {
+ auto fptr = __xray::XRayPatchedFunction.load();
+ if (fptr != nullptr)
+ (*fptr)(FuncId, Type);
+}
+
+} // namespace __xray
diff --git a/lib/xray/xray_trampoline_powerpc64_asm.S b/lib/xray/xray_trampoline_powerpc64_asm.S
new file mode 100644
index 000000000000..d43231ead22c
--- /dev/null
+++ b/lib/xray/xray_trampoline_powerpc64_asm.S
@@ -0,0 +1,171 @@
+ .text
+ .abiversion 2
+ .globl __xray_FunctionEntry
+ .p2align 4
+__xray_FunctionEntry:
+ std 0, 16(1)
+ stdu 1, -408(1)
+# Spill r3-r10, f1-f13, and vsr34-vsr45, which are parameter registers.
+# If this appears to be slow, the caller needs to pass in number of generic,
+# floating point, and vector parameters, so that we only spill those live ones.
+ std 3, 32(1)
+ ld 3, 400(1) # FuncId
+ std 4, 40(1)
+ std 5, 48(1)
+ std 6, 56(1)
+ std 7, 64(1)
+ std 8, 72(1)
+ std 9, 80(1)
+ std 10, 88(1)
+ addi 4, 1, 96
+ stxsdx 1, 0, 4
+ addi 4, 1, 104
+ stxsdx 2, 0, 4
+ addi 4, 1, 112
+ stxsdx 3, 0, 4
+ addi 4, 1, 120
+ stxsdx 4, 0, 4
+ addi 4, 1, 128
+ stxsdx 5, 0, 4
+ addi 4, 1, 136
+ stxsdx 6, 0, 4
+ addi 4, 1, 144
+ stxsdx 7, 0, 4
+ addi 4, 1, 152
+ stxsdx 8, 0, 4
+ addi 4, 1, 160
+ stxsdx 9, 0, 4
+ addi 4, 1, 168
+ stxsdx 10, 0, 4
+ addi 4, 1, 176
+ stxsdx 11, 0, 4
+ addi 4, 1, 184
+ stxsdx 12, 0, 4
+ addi 4, 1, 192
+ stxsdx 13, 0, 4
+ addi 4, 1, 200
+ stxvd2x 34, 0, 4
+ addi 4, 1, 216
+ stxvd2x 35, 0, 4
+ addi 4, 1, 232
+ stxvd2x 36, 0, 4
+ addi 4, 1, 248
+ stxvd2x 37, 0, 4
+ addi 4, 1, 264
+ stxvd2x 38, 0, 4
+ addi 4, 1, 280
+ stxvd2x 39, 0, 4
+ addi 4, 1, 296
+ stxvd2x 40, 0, 4
+ addi 4, 1, 312
+ stxvd2x 41, 0, 4
+ addi 4, 1, 328
+ stxvd2x 42, 0, 4
+ addi 4, 1, 344
+ stxvd2x 43, 0, 4
+ addi 4, 1, 360
+ stxvd2x 44, 0, 4
+ addi 4, 1, 376
+ stxvd2x 45, 0, 4
+ std 2, 392(1)
+ mflr 0
+ std 0, 400(1)
+
+ li 4, 0
+ bl _ZN6__xray23CallXRayPatchedFunctionEi13XRayEntryType
+ nop
+
+ addi 4, 1, 96
+ lxsdx 1, 0, 4
+ addi 4, 1, 104
+ lxsdx 2, 0, 4
+ addi 4, 1, 112
+ lxsdx 3, 0, 4
+ addi 4, 1, 120
+ lxsdx 4, 0, 4
+ addi 4, 1, 128
+ lxsdx 5, 0, 4
+ addi 4, 1, 136
+ lxsdx 6, 0, 4
+ addi 4, 1, 144
+ lxsdx 7, 0, 4
+ addi 4, 1, 152
+ lxsdx 8, 0, 4
+ addi 4, 1, 160
+ lxsdx 9, 0, 4
+ addi 4, 1, 168
+ lxsdx 10, 0, 4
+ addi 4, 1, 176
+ lxsdx 11, 0, 4
+ addi 4, 1, 184
+ lxsdx 12, 0, 4
+ addi 4, 1, 192
+ lxsdx 13, 0, 4
+ addi 4, 1, 200
+ lxvd2x 34, 0, 4
+ addi 4, 1, 216
+ lxvd2x 35, 0, 4
+ addi 4, 1, 232
+ lxvd2x 36, 0, 4
+ addi 4, 1, 248
+ lxvd2x 37, 0, 4
+ addi 4, 1, 264
+ lxvd2x 38, 0, 4
+ addi 4, 1, 280
+ lxvd2x 39, 0, 4
+ addi 4, 1, 296
+ lxvd2x 40, 0, 4
+ addi 4, 1, 312
+ lxvd2x 41, 0, 4
+ addi 4, 1, 328
+ lxvd2x 42, 0, 4
+ addi 4, 1, 344
+ lxvd2x 43, 0, 4
+ addi 4, 1, 360
+ lxvd2x 44, 0, 4
+ addi 4, 1, 376
+ lxvd2x 45, 0, 4
+ ld 0, 400(1)
+ mtlr 0
+ ld 2, 392(1)
+ ld 3, 32(1)
+ ld 4, 40(1)
+ ld 5, 48(1)
+ ld 6, 56(1)
+ ld 7, 64(1)
+ ld 8, 72(1)
+ ld 9, 80(1)
+ ld 10, 88(1)
+
+ addi 1, 1, 408
+ ld 0, 16(1)
+ blr
+
+ .globl __xray_FunctionExit
+ .p2align 4
+__xray_FunctionExit:
+ std 0, 16(1)
+ ld 0, -8(1) # FuncId
+ stdu 1, -72(1)
+# Spill r3, f1, and vsr34, the return value registers.
+ std 3, 32(1)
+ mr 3, 0
+ addi 4, 1, 40
+ stxsdx 1, 0, 4
+ addi 4, 1, 48
+ stxvd2x 34, 0, 4
+ mflr 0
+ std 0, 64(1)
+ li 4, 1
+ bl _ZN6__xray23CallXRayPatchedFunctionEi13XRayEntryType
+ nop
+ ld 0, 64(1)
+ mtlr 0
+ ld 3, 32(1)
+ addi 4, 1, 40
+ lxsdx 1, 0, 4
+ addi 4, 1, 48
+ lxvd2x 34, 0, 4
+ addi 1, 1, 72
+ ld 0, 16(1)
+ blr
diff --git a/lib/xray/xray_trampoline_x86_64.S b/lib/xray/xray_trampoline_x86_64.S
index d90c30cd98e9..da0aae326bdc 100644
--- a/lib/xray/xray_trampoline_x86_64.S
+++ b/lib/xray/xray_trampoline_x86_64.S
@@ -13,6 +13,8 @@
//
//===----------------------------------------------------------------------===//
+#include "../builtins/assembly.h"
+
.macro SAVE_REGISTERS
subq $200, %rsp
movupd %xmm0, 184(%rsp)
@@ -53,6 +55,9 @@
.text
.file "xray_trampoline_x86.S"
+
+//===----------------------------------------------------------------------===//
+
.globl __xray_FunctionEntry
.align 16, 0x90
.type __xray_FunctionEntry,@function
@@ -81,6 +86,8 @@ __xray_FunctionEntry:
.size __xray_FunctionEntry, .Ltmp1-__xray_FunctionEntry
.cfi_endproc
+//===----------------------------------------------------------------------===//
+
.globl __xray_FunctionExit
.align 16, 0x90
.type __xray_FunctionExit,@function
@@ -117,6 +124,8 @@ __xray_FunctionExit:
.size __xray_FunctionExit, .Ltmp3-__xray_FunctionExit
.cfi_endproc
+//===----------------------------------------------------------------------===//
+
.global __xray_FunctionTailExit
.align 16, 0x90
.type __xray_FunctionTailExit,@function
@@ -145,3 +154,41 @@ __xray_FunctionTailExit:
.Ltmp5:
.size __xray_FunctionTailExit, .Ltmp5-__xray_FunctionTailExit
.cfi_endproc
+
+//===----------------------------------------------------------------------===//
+
+ .globl __xray_ArgLoggerEntry
+ .align 16, 0x90
+ .type __xray_ArgLoggerEntry,@function
+__xray_ArgLoggerEntry:
+ .cfi_startproc
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ SAVE_REGISTERS
+
+ // Again, these function pointer loads must be atomic; MOV is fine.
+ movq _ZN6__xray13XRayArgLoggerE(%rip), %rax
+ testq %rax, %rax
+ jne .Larg1entryLog
+
+ // If [arg1 logging handler] not set, defer to no-arg logging.
+ movq _ZN6__xray19XRayPatchedFunctionE(%rip), %rax
+ testq %rax, %rax
+ je .Larg1entryFail
+
+.Larg1entryLog:
+ movq %rdi, %rdx // first argument will become the third
+ xorq %rsi, %rsi // XRayEntryType::ENTRY into the second
+ movl %r10d, %edi // 32-bit function ID becomes the first
+ callq *%rax
+
+.Larg1entryFail:
+ RESTORE_REGISTERS
+ popq %rbp
+ retq
+
+.Larg1entryEnd:
+ .size __xray_ArgLoggerEntry, .Larg1entryEnd-__xray_ArgLoggerEntry
+ .cfi_endproc
+
+NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/xray/xray_tsc.h b/lib/xray/xray_tsc.h
new file mode 100644
index 000000000000..4507564e7cd2
--- /dev/null
+++ b/lib/xray/xray_tsc.h
@@ -0,0 +1,68 @@
+//===-- xray_tsc.h ----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_EMULATE_TSC_H
+#define XRAY_EMULATE_TSC_H
+
+namespace __xray {
+static constexpr uint64_t NanosecondsPerSecond = 1000ULL * 1000 * 1000;
+}
+
+#if defined(__x86_64__)
+#include "xray_x86_64.inc"
+#elif defined(__powerpc64__)
+#include "xray_powerpc64.inc"
+#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+// Emulated TSC.
+// There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
+// not have a constant frequency like TSC on x86(_64), it may go faster
+// or slower depending on CPU turbo or power saving mode. Furthermore,
+// to read from CP15 on ARM a kernel modification or a driver is needed.
+// We can not require this from users of compiler-rt.
+// So on ARM we use clock_gettime() which gives the result in nanoseconds.
+// To get the measurements per second, we scale this by the number of
+// nanoseconds per second, pretending that the TSC frequency is 1GHz and
+// one TSC tick is 1 nanosecond.
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "xray_defs.h"
+#include <cerrno>
+#include <cstdint>
+#include <time.h>
+
+namespace __xray {
+
+inline bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
+
+ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ timespec TS;
+ int result = clock_gettime(CLOCK_REALTIME, &TS);
+ if (result != 0) {
+ Report("clock_gettime(2) returned %d, errno=%d.", result, int(errno));
+ TS.tv_sec = 0;
+ TS.tv_nsec = 0;
+ }
+ CPU = 0;
+ return TS.tv_sec * NanosecondsPerSecond + TS.tv_nsec;
+}
+
+inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
+ return NanosecondsPerSecond;
+}
+
+} // namespace __xray
+
+#else
+#error Target architecture is not supported.
+#endif // CPU architecture
+
+#endif // XRAY_EMULATE_TSC_H
diff --git a/lib/xray/xray_utils.cc b/lib/xray/xray_utils.cc
new file mode 100644
index 000000000000..b9a38d1b98eb
--- /dev/null
+++ b/lib/xray/xray_utils.cc
@@ -0,0 +1,125 @@
+//===-- xray_utils.cc -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+#include "xray_utils.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_flags.h"
+#include <stdlib.h>
+#include <cstdio>
+#include <errno.h>
+#include <fcntl.h>
+#include <iterator>
+#include <sys/types.h>
+#include <tuple>
+#include <unistd.h>
+#include <utility>
+
+namespace __xray {
+
+void printToStdErr(const char *Buffer) XRAY_NEVER_INSTRUMENT {
+ fprintf(stderr, "%s", Buffer);
+}
+
+void retryingWriteAll(int Fd, char *Begin, char *End) XRAY_NEVER_INSTRUMENT {
+ if (Begin == End)
+ return;
+ auto TotalBytes = std::distance(Begin, End);
+ while (auto Written = write(Fd, Begin, TotalBytes)) {
+ if (Written < 0) {
+ if (errno == EINTR)
+ continue; // Try again.
+ Report("Failed to write; errno = %d\n", errno);
+ return;
+ }
+ TotalBytes -= Written;
+ if (TotalBytes == 0)
+ break;
+ Begin += Written;
+ }
+}
+
+std::pair<ssize_t, bool> retryingReadSome(int Fd, char *Begin,
+ char *End) XRAY_NEVER_INSTRUMENT {
+ auto BytesToRead = std::distance(Begin, End);
+ ssize_t BytesRead;
+ ssize_t TotalBytesRead = 0;
+ while (BytesToRead && (BytesRead = read(Fd, Begin, BytesToRead))) {
+ if (BytesRead == -1) {
+ if (errno == EINTR)
+ continue;
+ Report("Read error; errno = %d\n", errno);
+ return std::make_pair(TotalBytesRead, false);
+ }
+
+ TotalBytesRead += BytesRead;
+ BytesToRead -= BytesRead;
+ Begin += BytesRead;
+ }
+ return std::make_pair(TotalBytesRead, true);
+}
+
+bool readValueFromFile(const char *Filename,
+ long long *Value) XRAY_NEVER_INSTRUMENT {
+ int Fd = open(Filename, O_RDONLY | O_CLOEXEC);
+ if (Fd == -1)
+ return false;
+ static constexpr size_t BufSize = 256;
+ char Line[BufSize] = {};
+ ssize_t BytesRead;
+ bool Success;
+ std::tie(BytesRead, Success) = retryingReadSome(Fd, Line, Line + BufSize);
+ if (!Success)
+ return false;
+ close(Fd);
+ char *End = nullptr;
+ long long Tmp = internal_simple_strtoll(Line, &End, 10);
+ bool Result = false;
+ if (Line[0] != '\0' && (*End == '\n' || *End == '\0')) {
+ *Value = Tmp;
+ Result = true;
+ }
+ return Result;
+}
+
+int getLogFD() XRAY_NEVER_INSTRUMENT {
+ // Open a temporary file once for the log.
+ static char TmpFilename[256] = {};
+ static char TmpWildcardPattern[] = "XXXXXX";
+ auto Argv = GetArgv();
+ const char *Progname = Argv[0] == nullptr ? "(unknown)" : Argv[0];
+ const char *LastSlash = internal_strrchr(Progname, '/');
+
+ if (LastSlash != nullptr)
+ Progname = LastSlash + 1;
+
+ const int HalfLength = sizeof(TmpFilename) / 2 - sizeof(TmpWildcardPattern);
+ int NeededLength = internal_snprintf(
+ TmpFilename, sizeof(TmpFilename), "%.*s%.*s.%s", HalfLength,
+ flags()->xray_logfile_base, HalfLength, Progname, TmpWildcardPattern);
+ if (NeededLength > int(sizeof(TmpFilename))) {
+ Report("XRay log file name too long (%d): %s\n", NeededLength, TmpFilename);
+ return -1;
+ }
+ int Fd = mkstemp(TmpFilename);
+ if (Fd == -1) {
+ Report("XRay: Failed opening temporary file '%s'; not logging events.\n",
+ TmpFilename);
+ return -1;
+ }
+ Report("XRay: Log file in '%s'\n", TmpFilename);
+
+ return Fd;
+}
+
+} // namespace __xray
diff --git a/lib/xray/xray_utils.h b/lib/xray/xray_utils.h
new file mode 100644
index 000000000000..1ecc74a2dce8
--- /dev/null
+++ b/lib/xray/xray_utils.h
@@ -0,0 +1,41 @@
+//===-- xray_utils.h --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Some shared utilities for the XRay runtime implementation.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_UTILS_H
+#define XRAY_UTILS_H
+
+#include <sys/types.h>
+#include <utility>
+
+namespace __xray {
+
+// Default implementation of the reporting interface for sanitizer errors.
+void printToStdErr(const char *Buffer);
+
+// EINTR-safe write routine, provided a file descriptor and a character range.
+void retryingWriteAll(int Fd, char *Begin, char *End);
+
+// Reads a long long value from a provided file.
+bool readValueFromFile(const char *Filename, long long *Value);
+
+// EINTR-safe read routine, providing a file descriptor and a character range.
+std::pair<ssize_t, bool> retryingReadSome(int Fd, char *Begin, char *End);
+
+// EINTR-safe open routine, uses flag-provided values for initialising a log
+// file.
+int getLogFD();
+
+} // namespace __xray
+
+#endif // XRAY_UTILS_H
diff --git a/lib/xray/xray_x86_64.cc b/lib/xray/xray_x86_64.cc
index 3ee91896c6e0..8c2a4e313e3a 100644
--- a/lib/xray/xray_x86_64.cc
+++ b/lib/xray/xray_x86_64.cc
@@ -1,6 +1,8 @@
+#include "cpuid.h"
#include "sanitizer_common/sanitizer_common.h"
#include "xray_defs.h"
#include "xray_interface_internal.h"
+
#include <atomic>
#include <cstdint>
#include <errno.h>
@@ -42,9 +44,9 @@ static bool readValueFromFile(const char *Filename,
ssize_t BytesRead;
bool Success;
std::tie(BytesRead, Success) = retryingReadSome(Fd, Line, Line + BufSize);
+ close(Fd);
if (!Success)
return false;
- close(Fd);
char *End = nullptr;
long long Tmp = internal_simple_strtoll(Line, &End, 10);
bool Result = false;
@@ -55,19 +57,19 @@ static bool readValueFromFile(const char *Filename,
return Result;
}
-uint64_t cycleFrequency() XRAY_NEVER_INSTRUMENT {
- long long CPUFrequency = -1;
+uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
+ long long TSCFrequency = -1;
if (readValueFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz",
- &CPUFrequency)) {
- CPUFrequency *= 1000;
+ &TSCFrequency)) {
+ TSCFrequency *= 1000;
} else if (readValueFromFile(
- "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
- &CPUFrequency)) {
- CPUFrequency *= 1000;
+ "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+ &TSCFrequency)) {
+ TSCFrequency *= 1000;
} else {
Report("Unable to determine CPU frequency for TSC accounting.\n");
}
- return CPUFrequency == -1 ? 0 : static_cast<uint64_t>(CPUFrequency);
+ return TSCFrequency == -1 ? 0 : static_cast<uint64_t>(TSCFrequency);
}
static constexpr uint8_t CallOpCode = 0xe8;
@@ -80,7 +82,8 @@ static constexpr int64_t MinOffset{std::numeric_limits<int32_t>::min()};
static constexpr int64_t MaxOffset{std::numeric_limits<int32_t>::max()};
bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
- const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
// Here we do the dance of replacing the following sled:
//
// xray_sled_n:
@@ -101,13 +104,12 @@ bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
// 4. Do an atomic write over the jmp instruction for the "mov r10d"
// opcode and first operand.
//
- // Prerequisite is to compute the relative offset to the
- // __xray_FunctionEntry function's address.
- int64_t TrampolineOffset = reinterpret_cast<int64_t>(__xray_FunctionEntry) -
+ // Prerequisite is to compute the relative offset to the trampoline's address.
+ int64_t TrampolineOffset = reinterpret_cast<int64_t>(Trampoline) -
(static_cast<int64_t>(Sled.Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Report("XRay Entry trampoline (%p) too far from sled (%p)\n",
- __xray_FunctionEntry, reinterpret_cast<void *>(Sled.Address));
+ Trampoline, reinterpret_cast<void *>(Sled.Address));
return false;
}
if (Enable) {
@@ -199,4 +201,20 @@ bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
return true;
}
+// We determine whether the CPU we're running on has the correct features we
+// need. In x86_64 this will be rdtscp support.
+bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT {
+ unsigned int EAX, EBX, ECX, EDX;
+
+ // We check whether rdtscp support is enabled. According to the x86_64 manual,
+ // level should be set at 0x80000001, and we should have a look at bit 27 in
+ // EDX. That's 0x8000000 (or 1u << 26).
+ __get_cpuid(0x80000001, &EAX, &EBX, &ECX, &EDX);
+ if (!(EDX & (1u << 26))) {
+ Report("Missing rdtscp support.\n");
+ return false;
+ }
+ return true;
+}
+
} // namespace __xray
diff --git a/lib/xray/xray_x86_64.h b/lib/xray/xray_x86_64.inc
index 52d2dea8f0d9..4ad3f9810946 100644
--- a/lib/xray/xray_x86_64.h
+++ b/lib/xray/xray_x86_64.inc
@@ -1,4 +1,4 @@
-//===-- xray_x86_64.h -------------------------------------------*- C++ -*-===//
+//===-- xray_x86_64.inc -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,8 +10,6 @@
// This file is a part of XRay, a dynamic runtime instrumentation system.
//
//===----------------------------------------------------------------------===//
-#ifndef XRAY_X86_64_H
-#define XRAY_X86_64_H
#include <cstdint>
#include <x86intrin.h>
@@ -27,6 +25,9 @@ ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
CPU = LongCPU;
return TSC;
}
-}
-#endif // XRAY_X86_64_H
+uint64_t getTSCFrequency();
+
+bool probeRequiredCPUFeatures();
+
+} // namespace __xray