aboutsummaryrefslogtreecommitdiff
path: root/lib/tsan
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-12-18 20:11:54 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-12-18 20:11:54 +0000
commitcdf4f3055e964bb585f294cf77cb549ead82783f (patch)
tree7bceeca766b3fbe491245bc926a083f78c35d1de /lib/tsan
parent625108084a3ec7c19c7745004c5af0ed7aa417a9 (diff)
downloadsrc-cdf4f3055e964bb585f294cf77cb549ead82783f.tar.gz
src-cdf4f3055e964bb585f294cf77cb549ead82783f.zip
Vendor import of compiler-rt trunk r321017:vendor/compiler-rt/compiler-rt-trunk-r321017
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=326943 svn path=/vendor/compiler-rt/compiler-rt-trunk-r321017/; revision=326944; tag=vendor/compiler-rt/compiler-rt-trunk-r321017
Diffstat (limited to 'lib/tsan')
-rw-r--r--lib/tsan/CMakeLists.txt50
-rwxr-xr-xlib/tsan/check_analyze.sh10
-rw-r--r--lib/tsan/dd/CMakeLists.txt4
-rwxr-xr-xlib/tsan/go/buildgo.sh18
-rw-r--r--lib/tsan/rtl/tsan_fd.cc6
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc558
-rw-r--r--lib/tsan/rtl/tsan_interceptors.h12
-rw-r--r--lib/tsan/rtl/tsan_interceptors_mac.cc20
-rw-r--r--lib/tsan/rtl/tsan_interface_ann.cc6
-rw-r--r--lib/tsan/rtl/tsan_libdispatch_mac.cc15
-rw-r--r--lib/tsan/rtl/tsan_malloc_mac.cc4
-rw-r--r--lib/tsan/rtl/tsan_mman.cc91
-rw-r--r--lib/tsan/rtl/tsan_mman.h15
-rw-r--r--lib/tsan/rtl/tsan_platform.h144
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc9
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc6
-rw-r--r--lib/tsan/rtl/tsan_report.cc44
-rw-r--r--lib/tsan/rtl/tsan_report.h2
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc50
-rw-r--r--lib/tsan/rtl/tsan_rtl.h30
-rw-r--r--lib/tsan/rtl/tsan_rtl_aarch64.S50
-rw-r--r--lib/tsan/rtl/tsan_rtl_amd64.S104
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc71
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc2
-rw-r--r--lib/tsan/rtl/tsan_sync.h4
-rw-r--r--lib/tsan/rtl/tsan_trace.h2
-rw-r--r--lib/tsan/rtl/tsan_vector.h127
-rw-r--r--lib/tsan/tests/CMakeLists.txt104
-rw-r--r--lib/tsan/tests/rtl/tsan_test_util_posix.cc4
-rw-r--r--lib/tsan/tests/unit/CMakeLists.txt3
-rw-r--r--lib/tsan/tests/unit/tsan_mman_test.cc89
-rw-r--r--lib/tsan/tests/unit/tsan_vector_test.cc43
33 files changed, 1003 insertions, 698 deletions
diff --git a/lib/tsan/CMakeLists.txt b/lib/tsan/CMakeLists.txt
index 193158c54e62..3697ecb21381 100644
--- a/lib/tsan/CMakeLists.txt
+++ b/lib/tsan/CMakeLists.txt
@@ -93,21 +93,15 @@ set(TSAN_HEADERS
rtl/tsan_symbolize.h
rtl/tsan_sync.h
rtl/tsan_trace.h
- rtl/tsan_update_shadow_word_inl.h
- rtl/tsan_vector.h)
+ rtl/tsan_update_shadow_word_inl.h)
set(TSAN_RUNTIME_LIBRARIES)
add_compiler_rt_component(tsan)
if(APPLE)
- set(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S rtl/tsan_rtl_aarch64.S)
- # Xcode will try to compile this file as C ('clang -x c'), and that will fail.
- if (${CMAKE_GENERATOR} STREQUAL "Xcode")
- enable_language(ASM)
- else()
- # Pass ASM file directly to the C++ compiler.
- set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES LANGUAGE C)
- endif()
+ add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S rtl/tsan_rtl_aarch64.S)
+
+ set(TSAN_LINK_LIBS ${SANITIZER_COMMON_LINK_LIBS})
add_weak_symbols("ubsan" WEAK_SYMBOL_LINK_FLAGS)
add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS)
@@ -122,7 +116,8 @@ if(APPLE)
RTSanitizerCommonLibc
RTUbsan
CFLAGS ${TSAN_RTL_CFLAGS}
- LINK_FLAGS ${WEAK_SYMBOL_LINK_FLAGS}
+ LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS}
+ LINK_LIBS ${TSAN_LINK_LIBS}
PARENT_TARGET tsan)
add_compiler_rt_object_libraries(RTTsan_dynamic
OS ${TSAN_SUPPORTED_OS}
@@ -142,10 +137,7 @@ if(APPLE)
else()
foreach(arch ${TSAN_SUPPORTED_ARCH})
if(arch STREQUAL "x86_64")
- set(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S)
- # Pass ASM file directly to the C++ compiler.
- set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
- LANGUAGE C)
+ add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S)
# Sanity check for Go runtime.
set(BUILDGO_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/go/buildgo.sh)
add_custom_target(GotsanRuntimeCheck
@@ -156,20 +148,11 @@ else()
COMMENT "Checking TSan Go runtime..."
VERBATIM)
elseif(arch STREQUAL "aarch64")
- set(TSAN_ASM_SOURCES rtl/tsan_rtl_aarch64.S)
- # Pass ASM file directly to the C++ compiler.
- set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
- LANGUAGE C)
- elseif(arch MATCHES "powerpc64|powerpc64le")
- set(TSAN_ASM_SOURCES rtl/tsan_rtl_ppc64.S)
- # Pass ASM file directly to the C++ compiler.
- set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
- LANGUAGE C)
+ add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_aarch64.S)
+ elseif(arch MATCHES "powerpc64|powerpc64le")
+ add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_ppc64.S)
elseif(arch MATCHES "mips64|mips64le")
- set(TSAN_ASM_SOURCES rtl/tsan_rtl_mips64.S)
- # Pass ASM file directly to the C++ compiler.
- set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
- LANGUAGE C)
+ add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_mips64.S)
else()
set(TSAN_ASM_SOURCES)
endif()
@@ -181,13 +164,15 @@ else()
$<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
$<TARGET_OBJECTS:RTUbsan.${arch}>
- CFLAGS ${TSAN_RTL_CFLAGS})
+ CFLAGS ${TSAN_RTL_CFLAGS}
+ PARENT_TARGET tsan)
add_compiler_rt_runtime(clang_rt.tsan_cxx
STATIC
ARCHS ${arch}
SOURCES ${TSAN_CXX_SOURCES}
$<TARGET_OBJECTS:RTUbsan_cxx.${arch}>
- CFLAGS ${TSAN_RTL_CFLAGS})
+ CFLAGS ${TSAN_RTL_CFLAGS}
+ PARENT_TARGET tsan)
list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-${arch}
clang_rt.tsan_cxx-${arch})
add_sanitizer_rt_symbols(clang_rt.tsan
@@ -218,8 +203,9 @@ if(COMPILER_RT_HAS_SYSROOT_FLAG AND NOT CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
endif()
# Build libcxx instrumented with TSan.
-if(COMPILER_RT_HAS_LIBCXX_SOURCES AND
- COMPILER_RT_TEST_COMPILER_ID STREQUAL "Clang")
+if(COMPILER_RT_LIBCXX_PATH AND
+ COMPILER_RT_TEST_COMPILER_ID STREQUAL "Clang" AND
+ NOT ANDROID)
set(libcxx_tsan_deps)
foreach(arch ${TSAN_SUPPORTED_ARCH})
get_target_flags_for_arch(${arch} TARGET_CFLAGS)
diff --git a/lib/tsan/check_analyze.sh b/lib/tsan/check_analyze.sh
index 54dd1b0232dc..9b5abc317fbc 100755
--- a/lib/tsan/check_analyze.sh
+++ b/lib/tsan/check_analyze.sh
@@ -2,6 +2,14 @@
#
# Script that checks that critical functions in TSan runtime have correct number
# of push/pop/rsp instructions to verify that runtime is efficient enough.
+#
+# This test can fail when backend code generation changes the output for various
+# tsan interceptors. When such a change happens, you can ensure that the
+# performance has not regressed by running the following benchmarks before and
+# after the breaking change to verify that the values in this file are safe to
+# update:
+# ./projects/compiler-rt/lib/tsan/tests/rtl/TsanRtlTest
+# --gtest_also_run_disabled_tests --gtest_filter=DISABLED_BENCH.Mop*
set -u
@@ -35,7 +43,7 @@ done
for f in read1 read2 read4 read8; do
check $f rsp 1
check $f push 3
- check $f pop 3
+ check $f pop 18
done
for f in func_entry func_exit; do
diff --git a/lib/tsan/dd/CMakeLists.txt b/lib/tsan/dd/CMakeLists.txt
index bcff35f20b36..07fc300535cd 100644
--- a/lib/tsan/dd/CMakeLists.txt
+++ b/lib/tsan/dd/CMakeLists.txt
@@ -10,7 +10,8 @@ set(DD_SOURCES
dd_interceptors.cc
)
-set(DD_LINKLIBS)
+set(DD_LINKLIBS ${SANITIZER_CXX_ABI_LIBRARY} ${SANITIZER_COMMON_LINK_LIBS})
+
append_list_if(COMPILER_RT_HAS_LIBDL dl DD_LINKLIBS)
append_list_if(COMPILER_RT_HAS_LIBRT rt DD_LINKLIBS)
append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread DD_LINKLIBS)
@@ -40,6 +41,7 @@ if(CAN_TARGET_x86_64 AND UNIX AND NOT APPLE AND NOT ANDROID)
$<TARGET_OBJECTS:RTInterception.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
+ LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS}
LINK_LIBS ${DD_LINKLIBS}
PARENT_TARGET dd)
endif()
diff --git a/lib/tsan/go/buildgo.sh b/lib/tsan/go/buildgo.sh
index 617dd9e11d27..62ff0fc38ba9 100755
--- a/lib/tsan/go/buildgo.sh
+++ b/lib/tsan/go/buildgo.sh
@@ -24,6 +24,7 @@ SRCS="
../../sanitizer_common/sanitizer_common.cc
../../sanitizer_common/sanitizer_common_libcdep.cc
../../sanitizer_common/sanitizer_deadlock_detector2.cc
+ ../../sanitizer_common/sanitizer_file.cc
../../sanitizer_common/sanitizer_flag_parser.cc
../../sanitizer_common/sanitizer_flags.cc
../../sanitizer_common/sanitizer_libc.cc
@@ -67,6 +68,21 @@ elif [ "`uname -a | grep FreeBSD`" != "" ]; then
../../sanitizer_common/sanitizer_linux_libcdep.cc
../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
"
+elif [ "`uname -a | grep NetBSD`" != "" ]; then
+ SUFFIX="netbsd_amd64"
+ OSCFLAGS="-fno-strict-aliasing -fPIC -Werror"
+ OSLDFLAGS="-lpthread -fPIC -fpie"
+ SRCS="
+ $SRCS
+ ../rtl/tsan_platform_linux.cc
+ ../../sanitizer_common/sanitizer_posix.cc
+ ../../sanitizer_common/sanitizer_posix_libcdep.cc
+ ../../sanitizer_common/sanitizer_procmaps_common.cc
+ ../../sanitizer_common/sanitizer_procmaps_freebsd.cc
+ ../../sanitizer_common/sanitizer_linux.cc
+ ../../sanitizer_common/sanitizer_linux_libcdep.cc
+ ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+ "
elif [ "`uname -a | grep Darwin`" != "" ]; then
SUFFIX="darwin_amd64"
OSCFLAGS="-fPIC -Wno-unused-const-variable -Wno-unknown-warning-option -isysroot $(xcodebuild -version -sdk macosx Path) -mmacosx-version-min=10.7"
@@ -126,7 +142,7 @@ if [ "$SILENT" != "1" ]; then
fi
$CC $DIR/gotsan.cc -c -o $DIR/race_$SUFFIX.syso $FLAGS $CFLAGS
-$CC $OSCFLAGS test.c $DIR/race_$SUFFIX.syso -m64 -g -o $DIR/test $OSLDFLAGS
+$CC $OSCFLAGS test.c $DIR/race_$SUFFIX.syso -m64 -g -o $DIR/test $OSLDFLAGS $LDFLAGS
export GORACE="exitcode=0 atexit_sleep_ms=0"
if [ "$SILENT" != "1" ]; then
diff --git a/lib/tsan/rtl/tsan_fd.cc b/lib/tsan/rtl/tsan_fd.cc
index d84df4a6413c..f13a7432ec95 100644
--- a/lib/tsan/rtl/tsan_fd.cc
+++ b/lib/tsan/rtl/tsan_fd.cc
@@ -48,8 +48,8 @@ static bool bogusfd(int fd) {
}
static FdSync *allocsync(ThreadState *thr, uptr pc) {
- FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
- false);
+ FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
+ kDefaultAlignment, false);
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
@@ -79,7 +79,7 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it.
- void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
+ void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index 001123f4941e..d92976ad6e9e 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -17,6 +17,7 @@
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_posix.h"
@@ -39,6 +40,21 @@ using namespace __tsan; // NOLINT
#define stderr __stderrp
#endif
+#if SANITIZER_NETBSD
+#define dirfd(dirp) (*(int *)(dirp))
+#define fileno_unlocked fileno
+
+#if _LP64
+#define __sF_size 152
+#else
+#define __sF_size 88
+#endif
+
+#define stdout ((char*)&__sF + (__sF_size * 1))
+#define stderr ((char*)&__sF + (__sF_size * 2))
+
+#endif
+
#if SANITIZER_ANDROID
#define mallopt(a, b)
#endif
@@ -49,11 +65,6 @@ const int kSigCount = 129;
const int kSigCount = 65;
#endif
-struct my_siginfo_t {
- // The size is determined by looking at sizeof of real siginfo_t on linux.
- u64 opaque[128 / sizeof(u64)];
-};
-
#ifdef __mips__
struct ucontext_t {
u64 opaque[768 / sizeof(u64) + 1];
@@ -84,19 +95,25 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
extern "C" void *pthread_self();
extern "C" void _exit(int status);
extern "C" int fileno_unlocked(void *stream);
+#if !SANITIZER_NETBSD
extern "C" int dirfd(void *dirp);
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD
extern "C" int mallopt(int param, int value);
#endif
+#if SANITIZER_NETBSD
+extern __sanitizer_FILE __sF[];
+#else
extern __sanitizer_FILE *stdout, *stderr;
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
#else
const int PTHREAD_MUTEX_RECURSIVE = 2;
const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
const int EPOLL_CTL_ADD = 1;
#endif
const int SIGILL = 4;
@@ -105,7 +122,7 @@ const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
const int SIGTERM = 15;
-#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
const int SIGBUS = 10;
const int SIGSYS = 12;
#else
@@ -113,7 +130,9 @@ const int SIGBUS = 7;
const int SIGSYS = 31;
#endif
void *const MAP_FAILED = (void*)-1;
-#if !SANITIZER_MAC
+#if SANITIZER_NETBSD
+const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
+#elif !SANITIZER_MAC
const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
#endif
const int MAP_FIXED = 0x10;
@@ -125,48 +144,7 @@ typedef long long_t; // NOLINT
# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
# define F_TEST 3 /* Test a region for other processes locks. */
-typedef void (*sighandler_t)(int sig);
-typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx);
-
-#if SANITIZER_ANDROID
-struct sigaction_t {
- u32 sa_flags;
- union {
- sighandler_t sa_handler;
- sigactionhandler_t sa_sigaction;
- };
- __sanitizer_sigset_t sa_mask;
- void (*sa_restorer)();
-};
-#else
-struct sigaction_t {
-#ifdef __mips__
- u32 sa_flags;
-#endif
- union {
- sighandler_t sa_handler;
- sigactionhandler_t sa_sigaction;
- };
-#if SANITIZER_FREEBSD
- int sa_flags;
- __sanitizer_sigset_t sa_mask;
-#elif SANITIZER_MAC
- __sanitizer_sigset_t sa_mask;
- int sa_flags;
-#else
- __sanitizer_sigset_t sa_mask;
-#ifndef __mips__
- int sa_flags;
-#endif
- void (*sa_restorer)();
-#endif
-};
-#endif
-
-const sighandler_t SIG_DFL = (sighandler_t)0;
-const sighandler_t SIG_IGN = (sighandler_t)1;
-const sighandler_t SIG_ERR = (sighandler_t)-1;
-#if SANITIZER_FREEBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
const int SA_SIGINFO = 0x40;
const int SIG_SETMASK = 3;
#elif defined(__mips__)
@@ -180,13 +158,11 @@ const int SIG_SETMASK = 2;
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
(!cur_thread()->is_inited)
-static sigaction_t sigactions[kSigCount];
-
namespace __tsan {
struct SignalDesc {
bool armed;
bool sigaction;
- my_siginfo_t siginfo;
+ __sanitizer_siginfo siginfo;
ucontext_t ctx;
};
@@ -200,11 +176,41 @@ struct ThreadSignalContext {
__sanitizer_sigset_t oldset;
};
-// The object is 64-byte aligned, because we want hot data to be located in
-// a single cache line if possible (it's accessed in every interceptor).
-static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)];
+// The sole reason tsan wraps atexit callbacks is to establish synchronization
+// between callback setup and callback execution.
+struct AtExitCtx {
+ void (*f)();
+ void *arg;
+};
+
+// InterceptorContext holds all global data required for interceptors.
+// It's explicitly constructed in InitializeInterceptors with placement new
+// and is never destroyed. This allows usage of members with non-trivial
+// constructors and destructors.
+struct InterceptorContext {
+ // The object is 64-byte aligned, because we want hot data to be located
+ // in a single cache line if possible (it's accessed in every interceptor).
+ ALIGNED(64) LibIgnore libignore;
+ __sanitizer_sigaction sigactions[kSigCount];
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
+ unsigned finalize_key;
+#endif
+
+ BlockingMutex atexit_mu;
+ Vector<struct AtExitCtx *> AtExitStack;
+
+ InterceptorContext()
+ : libignore(LINKER_INITIALIZED), AtExitStack() {
+ }
+};
+
+static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
+InterceptorContext *interceptor_ctx() {
+ return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
+}
+
LibIgnore *libignore() {
- return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]);
+ return &interceptor_ctx()->libignore;
}
void InitializeLibIgnore() {
@@ -232,10 +238,6 @@ static ThreadSignalContext *SigCtx(ThreadState *thr) {
return ctx;
}
-#if !SANITIZER_MAC
-static unsigned g_thread_finalize_key;
-#endif
-
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
: thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
@@ -284,8 +286,18 @@ void ScopedInterceptor::DisableIgnores() {
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
#if SANITIZER_FREEBSD
# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
+#elif SANITIZER_NETBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
+ INTERCEPT_FUNCTION(__libc_##func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
+ INTERCEPT_FUNCTION(__libc_thr_##func)
#else
# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
#endif
#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
@@ -346,17 +358,30 @@ TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
return res;
}
-// The sole reason tsan wraps atexit callbacks is to establish synchronization
-// between callback setup and callback execution.
-struct AtExitCtx {
- void (*f)();
- void *arg;
-};
+TSAN_INTERCEPTOR(int, pause, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(pause, fake);
+ return BLOCK_REAL(pause)(fake);
+}
-static void at_exit_wrapper(void *arg) {
- ThreadState *thr = cur_thread();
- uptr pc = 0;
- Acquire(thr, pc, (uptr)arg);
+static void at_exit_wrapper() {
+ AtExitCtx *ctx;
+ {
+ // Ensure thread-safety.
+ BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+
+ // Pop AtExitCtx from the top of the stack of callback functions
+ uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
+ ctx = interceptor_ctx()->AtExitStack[element];
+ interceptor_ctx()->AtExitStack.PopBack();
+ }
+
+ Acquire(cur_thread(), (uptr)0, (uptr)ctx);
+ ((void(*)())ctx->f)();
+ InternalFree(ctx);
+}
+
+static void cxa_at_exit_wrapper(void *arg) {
+ Acquire(cur_thread(), 0, (uptr)arg);
AtExitCtx *ctx = (AtExitCtx*)arg;
((void(*)(void *arg))ctx->f)(ctx->arg);
InternalFree(ctx);
@@ -367,7 +392,7 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
#if !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
- if (cur_thread()->in_symbolizer)
+ if (UNLIKELY(cur_thread()->in_symbolizer))
return 0;
// We want to setup the atexit callback even if we are in ignored lib
// or after fork.
@@ -377,7 +402,7 @@ TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
#endif
TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
- if (cur_thread()->in_symbolizer)
+ if (UNLIKELY(cur_thread()->in_symbolizer))
return 0;
SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
@@ -392,12 +417,27 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
// Memory allocation in __cxa_atexit will race with free during exit,
// because we do not see synchronization around atexit callback list.
ThreadIgnoreBegin(thr, pc);
- int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso);
+ int res;
+ if (!dso) {
+ // NetBSD does not preserve the 2nd argument if dso is equal to 0
+ // Store ctx in a local stack-like structure
+
+ // Ensure thread-safety.
+ BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+
+ res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
+ // Push AtExitCtx on the top of the stack of callback functions
+ if (!res) {
+ interceptor_ctx()->AtExitStack.PushBack(ctx);
+ }
+ } else {
+ res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
+ }
ThreadIgnoreEnd(thr, pc);
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
static void on_exit_wrapper(int status, void *arg) {
ThreadState *thr = cur_thread();
uptr pc = 0;
@@ -408,7 +448,7 @@ static void on_exit_wrapper(int status, void *arg) {
}
TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
- if (cur_thread()->in_symbolizer)
+ if (UNLIKELY(cur_thread()->in_symbolizer))
return 0;
SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
@@ -422,6 +462,9 @@ TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
ThreadIgnoreEnd(thr, pc);
return res;
}
+#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT_ON_EXIT
#endif
// Cleanup old bufs.
@@ -461,13 +504,15 @@ static void LongJmp(ThreadState *thr, uptr *env) {
uptr mangled_sp = env[0];
#elif SANITIZER_FREEBSD
uptr mangled_sp = env[2];
+#elif SANITIZER_NETBSD
+ uptr mangled_sp = env[6];
#elif SANITIZER_MAC
# ifdef __aarch64__
uptr mangled_sp = env[13];
# else
uptr mangled_sp = env[2];
# endif
-#elif defined(SANITIZER_LINUX)
+#elif SANITIZER_LINUX
# ifdef __aarch64__
uptr mangled_sp = env[13];
# elif defined(__mips64)
@@ -510,10 +555,29 @@ TSAN_INTERCEPTOR(int, setjmp, void *env);
TSAN_INTERCEPTOR(int, _setjmp, void *env);
TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
#else // SANITIZER_MAC
+
+#if SANITIZER_NETBSD
+#define setjmp_symname __setjmp14
+#define sigsetjmp_symname __sigsetjmp14
+#else
+#define setjmp_symname setjmp
+#define sigsetjmp_symname sigsetjmp
+#endif
+
+#define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
+#define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
+#define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
+#define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
+
+#define TSAN_STRING_SETJMP_(x) # x
+#define TSAN_STRING_SETJMP__(x) TSAN_STRING_SETJMP_(x)
+#define TSAN_STRING_SETJMP TSAN_STRING_SETJMP__(setjmp_symname)
+#define TSAN_STRING_SIGSETJMP TSAN_STRING_SETJMP__(sigsetjmp_symname)
+
// Not called. Merely to satisfy TSAN_INTERCEPT().
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int __interceptor_setjmp(void *env);
-extern "C" int __interceptor_setjmp(void *env) {
+int TSAN_INTERCEPTOR_SETJMP(void *env);
+extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
CHECK(0);
return 0;
}
@@ -527,51 +591,75 @@ extern "C" int __interceptor__setjmp(void *env) {
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int __interceptor_sigsetjmp(void *env);
-extern "C" int __interceptor_sigsetjmp(void *env) {
+int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
+extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
CHECK(0);
return 0;
}
+#if !SANITIEZER_NETBSD
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
int __interceptor___sigsetjmp(void *env);
extern "C" int __interceptor___sigsetjmp(void *env) {
CHECK(0);
return 0;
}
+#endif
-extern "C" int setjmp(void *env);
+extern "C" int setjmp_symname(void *env);
extern "C" int _setjmp(void *env);
-extern "C" int sigsetjmp(void *env);
+extern "C" int sigsetjmp_symname(void *env);
+#if !SANITIEZER_NETBSD
extern "C" int __sigsetjmp(void *env);
-DEFINE_REAL(int, setjmp, void *env)
+#endif
+DEFINE_REAL(int, setjmp_symname, void *env)
DEFINE_REAL(int, _setjmp, void *env)
-DEFINE_REAL(int, sigsetjmp, void *env)
+DEFINE_REAL(int, sigsetjmp_symname, void *env)
+#if !SANITIEZER_NETBSD
DEFINE_REAL(int, __sigsetjmp, void *env)
+#endif
#endif // SANITIZER_MAC
-TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
+#if SANITIZER_NETBSD
+#define longjmp_symname __longjmp14
+#define siglongjmp_symname __siglongjmp14
+#else
+#define longjmp_symname longjmp
+#define siglongjmp_symname siglongjmp
+#endif
+
+TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
// Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
// bad things will happen. We will jump over ScopedInterceptor dtor and can
// leave thr->in_ignored_lib set.
{
- SCOPED_INTERCEPTOR_RAW(longjmp, env, val);
+ SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
}
LongJmp(cur_thread(), env);
- REAL(longjmp)(env, val);
+ REAL(longjmp_symname)(env, val);
}
-TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
+TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
{
- SCOPED_INTERCEPTOR_RAW(siglongjmp, env, val);
+ SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
}
LongJmp(cur_thread(), env);
- REAL(siglongjmp)(env, val);
+ REAL(siglongjmp_symname)(env, val);
}
+#if SANITIZER_NETBSD
+TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
+ {
+ SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(_longjmp)(env, val);
+}
+#endif
+
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
- if (cur_thread()->in_symbolizer)
+ if (UNLIKELY(cur_thread()->in_symbolizer))
return InternalAlloc(size);
void *p = 0;
{
@@ -584,11 +672,11 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) {
TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
- return user_alloc(thr, pc, sz, align);
+ return user_memalign(thr, pc, align, sz);
}
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
- if (cur_thread()->in_symbolizer)
+ if (UNLIKELY(cur_thread()->in_symbolizer))
return InternalCalloc(size, n);
void *p = 0;
{
@@ -600,7 +688,7 @@ TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
}
TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
- if (cur_thread()->in_symbolizer)
+ if (UNLIKELY(cur_thread()->in_symbolizer))
return InternalRealloc(p, size);
if (p)
invoke_free_hook(p);
@@ -615,7 +703,7 @@ TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
TSAN_INTERCEPTOR(void, free, void *p) {
if (p == 0)
return;
- if (cur_thread()->in_symbolizer)
+ if (UNLIKELY(cur_thread()->in_symbolizer))
return InternalFree(p);
invoke_free_hook(p);
SCOPED_INTERCEPTOR_RAW(free, p);
@@ -625,7 +713,7 @@ TSAN_INTERCEPTOR(void, free, void *p) {
TSAN_INTERCEPTOR(void, cfree, void *p) {
if (p == 0)
return;
- if (cur_thread()->in_symbolizer)
+ if (UNLIKELY(cur_thread()->in_symbolizer))
return InternalFree(p);
invoke_free_hook(p);
SCOPED_INTERCEPTOR_RAW(cfree, p);
@@ -730,7 +818,7 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
- return user_alloc(thr, pc, sz, align);
+ return user_memalign(thr, pc, align, sz);
}
#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
#else
@@ -739,21 +827,29 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
- SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
- return user_alloc(thr, pc, sz, align);
+ if (UNLIKELY(cur_thread()->in_symbolizer))
+ return InternalAlloc(sz, nullptr, align);
+ SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
+ return user_aligned_alloc(thr, pc, align, sz);
}
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
+ if (UNLIKELY(cur_thread()->in_symbolizer))
+ return InternalAlloc(sz, nullptr, GetPageSizeCached());
SCOPED_INTERCEPTOR_RAW(valloc, sz);
- return user_alloc(thr, pc, sz, GetPageSizeCached());
+ return user_valloc(thr, pc, sz);
}
#endif
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
+ if (UNLIKELY(cur_thread()->in_symbolizer)) {
+ uptr PageSize = GetPageSizeCached();
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return InternalAlloc(sz, nullptr, PageSize);
+ }
SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
- sz = RoundUp(sz, GetPageSizeCached());
- return user_alloc(thr, pc, sz, GetPageSizeCached());
+ return user_pvalloc(thr, pc, sz);
}
#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
#else
@@ -762,9 +858,15 @@ TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
+ if (UNLIKELY(cur_thread()->in_symbolizer)) {
+ void *p = InternalAlloc(sz, nullptr, align);
+ if (!p)
+ return errno_ENOMEM;
+ *memptr = p;
+ return 0;
+ }
SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
- *memptr = user_alloc(thr, pc, sz, align);
- return 0;
+ return user_posix_memalign(thr, pc, memptr, align, sz);
}
#endif
@@ -830,11 +932,12 @@ void DestroyThreadState() {
}
} // namespace __tsan
-#if !SANITIZER_MAC
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
- if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
+ if (pthread_setspecific(interceptor_ctx()->finalize_key,
+ (void*)(iter - 1))) {
Printf("ThreadSanitizer: failed to set thread key\n");
Die();
}
@@ -860,9 +963,9 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
ThreadState *thr = cur_thread();
// Thread-local state is not initialized yet.
ScopedIgnoreInterceptors ignore;
-#if !SANITIZER_MAC
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
ThreadIgnoreBegin(thr, 0);
- if (pthread_setspecific(g_thread_finalize_key,
+ if (pthread_setspecific(interceptor_ctx()->finalize_key,
(void *)GetPthreadDestructorIterations())) {
Printf("ThreadSanitizer: failed to set thread key\n");
Die();
@@ -887,6 +990,9 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
TSAN_INTERCEPTOR(int, pthread_create,
void *th, void *attr, void *(*callback)(void*), void * param) {
SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
+
+ MaybeSpawnBackgroundThread();
+
if (ctx->after_multithreaded_fork) {
if (flags()->die_after_fork) {
Report("ThreadSanitizer: starting new threads after multi-threaded "
@@ -1312,10 +1418,15 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
if (o == 0 || f == 0)
return errno_EINVAL;
atomic_uint32_t *a;
- if (!SANITIZER_MAC)
- a = static_cast<atomic_uint32_t*>(o);
- else // On OS X, pthread_once_t has a header with a long-sized signature.
+
+ if (SANITIZER_MAC)
a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
+ else if (SANITIZER_NETBSD)
+ a = static_cast<atomic_uint32_t*>
+ ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
+ else
+ a = static_cast<atomic_uint32_t*>(o);
+
u32 v = atomic_load(a, memory_order_acquire);
if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
memory_order_relaxed)) {
@@ -1347,7 +1458,7 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
#endif
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
@@ -1762,7 +1873,9 @@ TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
namespace __tsan {
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
- bool sigact, int sig, my_siginfo_t *info, void *uctx) {
+ bool sigact, int sig,
+ __sanitizer_siginfo *info, void *uctx) {
+ __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
if (acquire)
Acquire(thr, 0, (uptr)&sigactions[sig]);
// Signals are generally asynchronous, so if we receive a signals when
@@ -1784,14 +1897,13 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
// This code races with sigaction. Be careful to not read sa_sigaction twice.
// Also need to remember pc for reporting before the call,
// because the handler can reset it.
- volatile uptr pc = sigact ?
- (uptr)sigactions[sig].sa_sigaction :
- (uptr)sigactions[sig].sa_handler;
- if (pc != (uptr)SIG_DFL && pc != (uptr)SIG_IGN) {
+ volatile uptr pc =
+ sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler;
+ if (pc != sig_dfl && pc != sig_ign) {
if (sigact)
- ((sigactionhandler_t)pc)(sig, info, uctx);
+ ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
else
- ((sighandler_t)pc)(sig);
+ ((__sanitizer_sighandler_ptr)pc)(sig);
}
if (!ctx->after_multithreaded_fork) {
thr->ignore_reads_and_writes = ignore_reads_and_writes;
@@ -1855,7 +1967,8 @@ static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
}
void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
- my_siginfo_t *info, void *ctx) {
+ __sanitizer_siginfo *info,
+ void *ctx) {
ThreadState *thr = cur_thread();
ThreadSignalContext *sctx = SigCtx(thr);
if (sig < 0 || sig >= kSigCount) {
@@ -1905,58 +2018,10 @@ static void rtl_sighandler(int sig) {
rtl_generic_sighandler(false, sig, 0, 0);
}
-static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) {
+static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) {
rtl_generic_sighandler(true, sig, info, ctx);
}
-TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
- // Note: if we call REAL(sigaction) directly for any reason without proxying
- // the signal handler through rtl_sigaction, very bad things will happen.
- // The handler will run synchronously and corrupt tsan per-thread state.
- SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
- if (old)
- internal_memcpy(old, &sigactions[sig], sizeof(*old));
- if (act == 0)
- return 0;
- // Copy act into sigactions[sig].
- // Can't use struct copy, because compiler can emit call to memcpy.
- // Can't use internal_memcpy, because it copies byte-by-byte,
- // and signal handler reads the sa_handler concurrently. It it can read
- // some bytes from old value and some bytes from new value.
- // Use volatile to prevent insertion of memcpy.
- sigactions[sig].sa_handler = *(volatile sighandler_t*)&act->sa_handler;
- sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags;
- internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
- sizeof(sigactions[sig].sa_mask));
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
- sigactions[sig].sa_restorer = act->sa_restorer;
-#endif
- sigaction_t newact;
- internal_memcpy(&newact, act, sizeof(newact));
- internal_sigfillset(&newact.sa_mask);
- if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) {
- if (newact.sa_flags & SA_SIGINFO)
- newact.sa_sigaction = rtl_sigaction;
- else
- newact.sa_handler = rtl_sighandler;
- }
- ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
- int res = REAL(sigaction)(sig, &newact, 0);
- return res;
-}
-
-TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
- sigaction_t act;
- act.sa_handler = h;
- internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
- act.sa_flags = 0;
- sigaction_t old;
- int res = sigaction(sig, &act, &old);
- if (res)
- return SIG_ERR;
- return old.sa_handler;
-}
-
TSAN_INTERCEPTOR(int, raise, int sig) {
SCOPED_TSAN_INTERCEPTOR(raise, sig);
ThreadSignalContext *sctx = SigCtx(thr);
@@ -2020,7 +2085,7 @@ TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
}
TSAN_INTERCEPTOR(int, fork, int fake) {
- if (cur_thread()->in_symbolizer)
+ if (UNLIKELY(cur_thread()->in_symbolizer))
return REAL(fork)(fake);
SCOPED_INTERCEPTOR_RAW(fork, fake);
ForkBefore(thr, pc);
@@ -2270,6 +2335,77 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#include "sanitizer_common/sanitizer_common_interceptors.inc"
+static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *old);
+static __sanitizer_sighandler_ptr signal_impl(int sig,
+ __sanitizer_sighandler_ptr h);
+
+#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
+ { return sigaction_impl(signo, act, oldact); }
+
+#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
+ { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
+
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+int sigaction_impl(int sig, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *old) {
+ // Note: if we call REAL(sigaction) directly for any reason without proxying
+ // the signal handler through rtl_sigaction, very bad things will happen.
+ // The handler will run synchronously and corrupt tsan per-thread state.
+ SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
+ __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
+ __sanitizer_sigaction old_stored;
+ if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
+ __sanitizer_sigaction newact;
+ if (act) {
+ // Copy act into sigactions[sig].
+ // Can't use struct copy, because compiler can emit call to memcpy.
+ // Can't use internal_memcpy, because it copies byte-by-byte,
+ // and signal handler reads the handler concurrently. It it can read
+ // some bytes from old value and some bytes from new value.
+ // Use volatile to prevent insertion of memcpy.
+ sigactions[sig].handler =
+ *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
+ sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
+ internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
+ sizeof(sigactions[sig].sa_mask));
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+ sigactions[sig].sa_restorer = act->sa_restorer;
+#endif
+ internal_memcpy(&newact, act, sizeof(newact));
+ internal_sigfillset(&newact.sa_mask);
+ if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) {
+ if (newact.sa_flags & SA_SIGINFO)
+ newact.sigaction = rtl_sigaction;
+ else
+ newact.handler = rtl_sighandler;
+ }
+ ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
+ act = &newact;
+ }
+ int res = REAL(sigaction)(sig, act, old);
+ if (res == 0 && old) {
+ uptr cb = (uptr)old->sigaction;
+ if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) {
+ internal_memcpy(old, &old_stored, sizeof(*old));
+ }
+ }
+ return res;
+}
+
+static __sanitizer_sighandler_ptr signal_impl(int sig,
+ __sanitizer_sighandler_ptr h) {
+ __sanitizer_sigaction act;
+ act.handler = h;
+ internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
+ act.sa_flags = 0;
+ __sanitizer_sigaction old;
+ int res = sigaction_symname(sig, &act, &old);
+ if (res) return (__sanitizer_sighandler_ptr)sig_err;
+ return old.handler;
+}
+
#define TSAN_SYSCALL() \
ThreadState *thr = cur_thread(); \
if (thr->ignore_interceptors) \
@@ -2290,7 +2426,7 @@ struct ScopedSyscall {
}
};
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
TSAN_SYSCALL();
MemoryAccessRange(thr, pc, p, s, write);
@@ -2409,6 +2545,34 @@ TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
}
#endif
+#if SANITIZER_NETBSD
+TSAN_INTERCEPTOR(void, _lwp_exit) {
+ SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
+ DestroyThreadState();
+ REAL(_lwp_exit)();
+}
+#define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT__LWP_EXIT
+#endif
+
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m);
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m);
+TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)());
+
namespace __tsan {
static void finalize(void *arg) {
@@ -2440,20 +2604,30 @@ void InitializeInterceptors() {
mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
#endif
+ new(interceptor_ctx()) InterceptorContext();
+
InitializeCommonInterceptors();
+ InitializeSignalInterceptors();
#if !SANITIZER_MAC
// We can not use TSAN_INTERCEPT to get setjmp addr,
// because it does &setjmp and setjmp is not present in some versions of libc.
using __interception::GetRealFunctionAddress;
- GetRealFunctionAddress("setjmp", (uptr*)&REAL(setjmp), 0, 0);
+ GetRealFunctionAddress(TSAN_STRING_SETJMP,
+ (uptr*)&REAL(setjmp_symname), 0, 0);
GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
- GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0);
+ GetRealFunctionAddress(TSAN_STRING_SIGSETJMP,
+ (uptr*)&REAL(sigsetjmp_symname), 0, 0);
+#if !SANITIZER_NETBSD
GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
#endif
+#endif
- TSAN_INTERCEPT(longjmp);
- TSAN_INTERCEPT(siglongjmp);
+ TSAN_INTERCEPT(longjmp_symname);
+ TSAN_INTERCEPT(siglongjmp_symname);
+#if SANITIZER_NETBSD
+ TSAN_INTERCEPT(_longjmp);
+#endif
TSAN_INTERCEPT(malloc);
TSAN_INTERCEPT(__libc_memalign);
@@ -2548,8 +2722,6 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(rmdir);
TSAN_INTERCEPT(closedir);
- TSAN_INTERCEPT(sigaction);
- TSAN_INTERCEPT(signal);
TSAN_INTERCEPT(sigsuspend);
TSAN_INTERCEPT(sigblock);
TSAN_INTERCEPT(sigsetmask);
@@ -2560,6 +2732,7 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(sleep);
TSAN_INTERCEPT(usleep);
TSAN_INTERCEPT(nanosleep);
+ TSAN_INTERCEPT(pause);
TSAN_INTERCEPT(gettimeofday);
TSAN_INTERCEPT(getaddrinfo);
@@ -2568,7 +2741,7 @@ void InitializeInterceptors() {
#if !SANITIZER_ANDROID
TSAN_INTERCEPT(dl_iterate_phdr);
#endif
- TSAN_INTERCEPT(on_exit);
+ TSAN_MAYBE_INTERCEPT_ON_EXIT;
TSAN_INTERCEPT(__cxa_atexit);
TSAN_INTERCEPT(_exit);
@@ -2576,6 +2749,8 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(__tls_get_addr);
#endif
+ TSAN_MAYBE_INTERCEPT__LWP_EXIT;
+
#if !SANITIZER_MAC && !SANITIZER_ANDROID
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
@@ -2587,13 +2762,30 @@ void InitializeInterceptors() {
Die();
}
-#if !SANITIZER_MAC
- if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
+ if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
Printf("ThreadSanitizer: failed to create thread key\n");
Die();
}
#endif
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
+
FdInit();
}
diff --git a/lib/tsan/rtl/tsan_interceptors.h b/lib/tsan/rtl/tsan_interceptors.h
index de47466501da..959a39465e33 100644
--- a/lib/tsan/rtl/tsan_interceptors.h
+++ b/lib/tsan/rtl/tsan_interceptors.h
@@ -49,4 +49,16 @@ LibIgnore *libignore();
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+#if SANITIZER_NETBSD
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func));
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func));
+#else
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...)
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...)
+#endif
+
#endif // TSAN_INTERCEPTORS_H
diff --git a/lib/tsan/rtl/tsan_interceptors_mac.cc b/lib/tsan/rtl/tsan_interceptors_mac.cc
index 4f1079467331..3b3a4a2815c3 100644
--- a/lib/tsan/rtl/tsan_interceptors_mac.cc
+++ b/lib/tsan/rtl/tsan_interceptors_mac.cc
@@ -100,7 +100,7 @@ OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
return tsan_atomic_f##_compare_exchange_strong( \
- (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
kMacOrderNonBarrier, kMacOrderNonBarrier); \
} \
\
@@ -108,7 +108,7 @@ OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
t volatile *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
return tsan_atomic_f##_compare_exchange_strong( \
- (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
kMacOrderBarrier, kMacOrderNonBarrier); \
}
@@ -122,14 +122,14 @@ OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
int64_t)
-#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
- TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
- char *byte_ptr = ((char *)ptr) + (n >> 3); \
- char bit = 0x80u >> (n & 7); \
- char mask = clear ? ~bit : bit; \
- char orig_byte = op((a8 *)byte_ptr, mask, mo); \
- return orig_byte & bit; \
+#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
+ TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
+ volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
+ char bit = 0x80u >> (n & 7); \
+ char mask = clear ? ~bit : bit; \
+ char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \
+ return orig_byte & bit; \
}
#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
diff --git a/lib/tsan/rtl/tsan_interface_ann.cc b/lib/tsan/rtl/tsan_interface_ann.cc
index f68a0468de53..a7922a42e42c 100644
--- a/lib/tsan/rtl/tsan_interface_ann.cc
+++ b/lib/tsan/rtl/tsan_interface_ann.cc
@@ -14,6 +14,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_vector.h"
#include "tsan_interface_ann.h"
#include "tsan_mutex.h"
#include "tsan_report.h"
@@ -21,7 +22,6 @@
#include "tsan_mman.h"
#include "tsan_flags.h"
#include "tsan_platform.h"
-#include "tsan_vector.h"
#define CALLERPC ((uptr)__builtin_return_address(0))
@@ -185,10 +185,10 @@ void PrintMatchedBenignRaces() {
int unique_count = 0;
int hit_count = 0;
int add_count = 0;
- Vector<ExpectRace> hit_matched(MBlockScopedBuf);
+ Vector<ExpectRace> hit_matched;
CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
&ExpectRace::hitcount);
- Vector<ExpectRace> add_matched(MBlockScopedBuf);
+ Vector<ExpectRace> add_matched;
CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
&ExpectRace::addcount);
if (hit_matched.Size()) {
diff --git a/lib/tsan/rtl/tsan_libdispatch_mac.cc b/lib/tsan/rtl/tsan_libdispatch_mac.cc
index 8c759a3be4e1..eb22e4baa710 100644
--- a/lib/tsan/rtl/tsan_libdispatch_mac.cc
+++ b/lib/tsan/rtl/tsan_libdispatch_mac.cc
@@ -86,7 +86,8 @@ static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
void *orig_context,
dispatch_function_t orig_work) {
tsan_block_context_t *new_context =
- (tsan_block_context_t *)user_alloc(thr, pc, sizeof(tsan_block_context_t));
+ (tsan_block_context_t *)user_alloc_internal(thr, pc,
+ sizeof(tsan_block_context_t));
new_context->queue = queue;
new_context->orig_context = orig_context;
new_context->orig_work = orig_work;
@@ -176,7 +177,8 @@ static void invoke_and_release_block(void *param) {
}
#define DISPATCH_INTERCEPT_SYNC_B(name, barrier) \
- TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, dispatch_block_t block) { \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, \
+ DISPATCH_NOESCAPE dispatch_block_t block) { \
SCOPED_TSAN_INTERCEPTOR(name, q, block); \
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
dispatch_block_t heap_block = Block_copy(block); \
@@ -266,7 +268,7 @@ TSAN_INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
// need to undefine the macro.
#undef dispatch_once
TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate,
- dispatch_block_t block) {
+ DISPATCH_NOESCAPE dispatch_block_t block) {
SCOPED_INTERCEPTOR_RAW(dispatch_once, predicate, block);
atomic_uint32_t *a = reinterpret_cast<atomic_uint32_t *>(predicate);
u32 v = atomic_load(a, memory_order_acquire);
@@ -476,7 +478,8 @@ TSAN_INTERCEPTOR(void, dispatch_source_set_registration_handler_f,
}
TSAN_INTERCEPTOR(void, dispatch_apply, size_t iterations,
- dispatch_queue_t queue, void (^block)(size_t)) {
+ dispatch_queue_t queue,
+ DISPATCH_NOESCAPE void (^block)(size_t)) {
SCOPED_TSAN_INTERCEPTOR(dispatch_apply, iterations, queue, block);
void *parent_to_child_sync = nullptr;
@@ -519,9 +522,9 @@ TSAN_INTERCEPTOR(dispatch_data_t, dispatch_data_create, const void *buffer,
return REAL(dispatch_data_create)(buffer, size, q, destructor);
if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE)
- destructor = ^(void) { WRAP(free)((void *)buffer); };
+ destructor = ^(void) { WRAP(free)((void *)(uintptr_t)buffer); };
else if (destructor == DISPATCH_DATA_DESTRUCTOR_MUNMAP)
- destructor = ^(void) { WRAP(munmap)((void *)buffer, size); };
+ destructor = ^(void) { WRAP(munmap)((void *)(uintptr_t)buffer, size); };
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
dispatch_block_t heap_block = Block_copy(destructor);
diff --git a/lib/tsan/rtl/tsan_malloc_mac.cc b/lib/tsan/rtl/tsan_malloc_mac.cc
index 8d31ccbcaaca..455c95df6c50 100644
--- a/lib/tsan/rtl/tsan_malloc_mac.cc
+++ b/lib/tsan/rtl/tsan_malloc_mac.cc
@@ -26,7 +26,7 @@ using namespace __tsan;
#define COMMON_MALLOC_FORCE_UNLOCK()
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
void *p = \
- user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment)
+ user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
#define COMMON_MALLOC_MALLOC(size) \
if (cur_thread()->in_symbolizer) return InternalAlloc(size); \
SCOPED_INTERCEPTOR_RAW(malloc, size); \
@@ -43,7 +43,7 @@ using namespace __tsan;
if (cur_thread()->in_symbolizer) \
return InternalAlloc(size, nullptr, GetPageSizeCached()); \
SCOPED_INTERCEPTOR_RAW(valloc, size); \
- void *p = user_alloc(thr, pc, size, GetPageSizeCached())
+ void *p = user_valloc(thr, pc, size)
#define COMMON_MALLOC_FREE(ptr) \
if (cur_thread()->in_symbolizer) return InternalFree(ptr); \
SCOPED_INTERCEPTOR_RAW(free, ptr); \
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index f79dccddba9f..19680238bf76 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -13,6 +13,7 @@
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_mman.h"
#include "tsan_rtl.h"
@@ -149,11 +150,12 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
OutputReport(thr, rep);
}
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
+ bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return Allocator::FailureHandler::OnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
- if (p == 0)
+ if (UNLIKELY(p == 0))
return 0;
if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true);
@@ -162,15 +164,6 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
return p;
}
-void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
- if (CheckForCallocOverflow(size, n))
- return Allocator::FailureHandler::OnBadRequest();
- void *p = user_alloc(thr, pc, n * size);
- if (p)
- internal_memset(p, 0, n * size);
- return p;
-}
-
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
ScopedGlobalProcessor sgp;
if (ctx && ctx->initialized)
@@ -180,6 +173,19 @@ void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
SignalUnsafeCall(thr, pc);
}
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
+}
+
+void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
+ if (UNLIKELY(CheckForCallocOverflow(size, n)))
+ return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
+ void *p = user_alloc_internal(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ return SetErrnoOnNull(p);
+}
+
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
ctx->metamap.AllocBlock(thr, pc, p, sz);
@@ -200,15 +206,64 @@ void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
- void *p2 = user_alloc(thr, pc, sz);
- if (p2 == 0)
- return 0;
- if (p) {
- uptr oldsz = user_alloc_usable_size(p);
- internal_memcpy(p2, p, min(oldsz, sz));
+ if (!p)
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
+ if (!sz) {
+ user_free(thr, pc, p);
+ return nullptr;
+ }
+ void *new_p = user_alloc_internal(thr, pc, sz);
+ if (new_p) {
+ uptr old_sz = user_alloc_usable_size(p);
+ internal_memcpy(new_p, p, min(old_sz, sz));
user_free(thr, pc, p);
}
- return p2;
+ return SetErrnoOnNull(new_p);
+}
+
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!IsPowerOfTwo(align))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
+ Allocator::FailureHandler::OnBadRequest();
+ return errno_EINVAL;
+ }
+ void *ptr = user_alloc_internal(thr, pc, sz, align);
+ if (UNLIKELY(!ptr))
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, align));
+ *memptr = ptr;
+ return 0;
+}
+
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
+}
+
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
+ errno = errno_ENOMEM;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ // pvalloc(0) should allocate one page.
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
}
uptr user_alloc_usable_size(const void *p) {
diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h
index 8cdeeb35aa6b..6042c5c5d96d 100644
--- a/lib/tsan/rtl/tsan_mman.h
+++ b/lib/tsan/rtl/tsan_mman.h
@@ -27,13 +27,20 @@ void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
// For user allocations.
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
- uptr align = kDefaultAlignment, bool signal = true);
-void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
+ uptr align = kDefaultAlignment, bool signal = true);
// Does not accept NULL.
void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
+// Interceptor implementations.
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
-void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz);
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz);
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz);
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz);
uptr user_alloc_usable_size(const void *p);
// Invoking malloc/free hooks that may be installed by the user.
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index bea1daba3952..bfac70df12d9 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -42,6 +42,19 @@ C/C++ on linux/x86_64 and freebsd/x86_64
7b00 0000 0000 - 7c00 0000 0000: heap
7c00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+
+C/C++ on netbsd/amd64 can reuse the same mapping:
+ * The address space starts from 0x1000 (option with 0x0) and ends with
+ 0x7f7ffffff000.
+ * LoAppMem-kHeapMemEnd can be reused as it is.
+ * No VDSO support.
+ * No MidAppMem region.
+ * No additional HeapMem region.
+ * HiAppMem contains the stack, loader, shared libraries and heap.
+ * Stack on NetBSD/amd64 has prereserved 128MB.
+ * Heap grows downwards (top-down).
+ * ASLR must be disabled per-process or globally.
+
*/
struct Mapping {
static const uptr kMetaShadowBeg = 0x300000000000ull;
@@ -111,7 +124,8 @@ C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
0c00 0000 00 - 0d00 0000 00: - (4 GB)
0d00 0000 00 - 0e00 0000 00: metainfo (4 GB)
0e00 0000 00 - 0f00 0000 00: - (4 GB)
-0f00 0000 00 - 1000 0000 00: traces (4 GB)
+0f00 0000 00 - 0fc0 0000 00: traces (3 GB)
+0fc0 0000 00 - 1000 0000 00: -
*/
struct Mapping {
static const uptr kLoAppMemBeg = 0x0100000000ull;
@@ -123,9 +137,9 @@ struct Mapping {
static const uptr kMetaShadowBeg = 0x0d00000000ull;
static const uptr kMetaShadowEnd = 0x0e00000000ull;
static const uptr kTraceMemBeg = 0x0f00000000ull;
- static const uptr kTraceMemEnd = 0x1000000000ull;
- static const uptr kHiAppMemBeg = 0x1000000000ull;
- static const uptr kHiAppMemEnd = 0x1000000000ull;
+ static const uptr kTraceMemEnd = 0x0fc0000000ull;
+ static const uptr kHiAppMemBeg = 0x0fc0000000ull;
+ static const uptr kHiAppMemEnd = 0x0fc0000000ull;
static const uptr kAppMemMsk = 0x0ull;
static const uptr kAppMemXor = 0x0ull;
static const uptr kVdsoBeg = 0x7000000000000000ull;
@@ -303,6 +317,38 @@ struct Mapping46 {
static const uptr kVdsoBeg = 0x7800000000000000ull;
};
+/*
+C/C++ on linux/powerpc64 (47-bit VMA)
+0000 0000 1000 - 0100 0000 0000: main binary
+0100 0000 0000 - 0200 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
+2000 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2200 0000 0000: traces
+2200 0000 0000 - 7d00 0000 0000: -
+7d00 0000 0000 - 7e00 0000 0000: heap
+7e00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+*/
+struct Mapping47 {
+ static const uptr kMetaShadowBeg = 0x100000000000ull;
+ static const uptr kMetaShadowEnd = 0x200000000000ull;
+ static const uptr kTraceMemBeg = 0x200000000000ull;
+ static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x7d0000000000ull;
+ static const uptr kHeapMemEnd = 0x7e0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull; // 47 bits
+ static const uptr kAppMemMsk = 0x7c0000000000ull;
+ static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kVdsoBeg = 0x7800000000000000ull;
+};
+
// Indicates the runtime will define the memory regions at runtime.
#define TSAN_RUNTIME_VMA 1
#endif
@@ -429,11 +475,13 @@ uptr MappingArchImpl(void) {
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
- if (vmaSize == 44)
- return MappingImpl<Mapping44, Type>();
- else
- return MappingImpl<Mapping46, Type>();
+ switch (vmaSize) {
+ case 44: return MappingImpl<Mapping44, Type>();
+ case 46: return MappingImpl<Mapping46, Type>();
+ case 47: return MappingImpl<Mapping47, Type>();
+ }
DCHECK(0);
+ return 0;
#else
return MappingImpl<Mapping, Type>();
#endif
@@ -582,11 +630,13 @@ bool IsAppMem(uptr mem) {
DCHECK(0);
return false;
#elif defined(__powerpc64__)
- if (vmaSize == 44)
- return IsAppMemImpl<Mapping44>(mem);
- else
- return IsAppMemImpl<Mapping46>(mem);
+ switch (vmaSize) {
+ case 44: return IsAppMemImpl<Mapping44>(mem);
+ case 46: return IsAppMemImpl<Mapping46>(mem);
+ case 47: return IsAppMemImpl<Mapping47>(mem);
+ }
DCHECK(0);
+ return false;
#else
return IsAppMemImpl<Mapping>(mem);
#endif
@@ -609,11 +659,13 @@ bool IsShadowMem(uptr mem) {
DCHECK(0);
return false;
#elif defined(__powerpc64__)
- if (vmaSize == 44)
- return IsShadowMemImpl<Mapping44>(mem);
- else
- return IsShadowMemImpl<Mapping46>(mem);
+ switch (vmaSize) {
+ case 44: return IsShadowMemImpl<Mapping44>(mem);
+ case 46: return IsShadowMemImpl<Mapping46>(mem);
+ case 47: return IsShadowMemImpl<Mapping47>(mem);
+ }
DCHECK(0);
+ return false;
#else
return IsShadowMemImpl<Mapping>(mem);
#endif
@@ -636,11 +688,13 @@ bool IsMetaMem(uptr mem) {
DCHECK(0);
return false;
#elif defined(__powerpc64__)
- if (vmaSize == 44)
- return IsMetaMemImpl<Mapping44>(mem);
- else
- return IsMetaMemImpl<Mapping46>(mem);
+ switch (vmaSize) {
+ case 44: return IsMetaMemImpl<Mapping44>(mem);
+ case 46: return IsMetaMemImpl<Mapping46>(mem);
+ case 47: return IsMetaMemImpl<Mapping47>(mem);
+ }
DCHECK(0);
+ return false;
#else
return IsMetaMemImpl<Mapping>(mem);
#endif
@@ -673,11 +727,13 @@ uptr MemToShadow(uptr x) {
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
- if (vmaSize == 44)
- return MemToShadowImpl<Mapping44>(x);
- else
- return MemToShadowImpl<Mapping46>(x);
+ switch (vmaSize) {
+ case 44: return MemToShadowImpl<Mapping44>(x);
+ case 46: return MemToShadowImpl<Mapping46>(x);
+ case 47: return MemToShadowImpl<Mapping47>(x);
+ }
DCHECK(0);
+ return 0;
#else
return MemToShadowImpl<Mapping>(x);
#endif
@@ -712,11 +768,13 @@ u32 *MemToMeta(uptr x) {
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
- if (vmaSize == 44)
- return MemToMetaImpl<Mapping44>(x);
- else
- return MemToMetaImpl<Mapping46>(x);
+ switch (vmaSize) {
+ case 44: return MemToMetaImpl<Mapping44>(x);
+ case 46: return MemToMetaImpl<Mapping46>(x);
+ case 47: return MemToMetaImpl<Mapping47>(x);
+ }
DCHECK(0);
+ return 0;
#else
return MemToMetaImpl<Mapping>(x);
#endif
@@ -764,11 +822,13 @@ uptr ShadowToMem(uptr s) {
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
- if (vmaSize == 44)
- return ShadowToMemImpl<Mapping44>(s);
- else
- return ShadowToMemImpl<Mapping46>(s);
+ switch (vmaSize) {
+ case 44: return ShadowToMemImpl<Mapping44>(s);
+ case 46: return ShadowToMemImpl<Mapping46>(s);
+ case 47: return ShadowToMemImpl<Mapping47>(s);
+ }
DCHECK(0);
+ return 0;
#else
return ShadowToMemImpl<Mapping>(s);
#endif
@@ -799,11 +859,13 @@ uptr GetThreadTrace(int tid) {
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
- if (vmaSize == 44)
- return GetThreadTraceImpl<Mapping44>(tid);
- else
- return GetThreadTraceImpl<Mapping46>(tid);
+ switch (vmaSize) {
+ case 44: return GetThreadTraceImpl<Mapping44>(tid);
+ case 46: return GetThreadTraceImpl<Mapping46>(tid);
+ case 47: return GetThreadTraceImpl<Mapping47>(tid);
+ }
DCHECK(0);
+ return 0;
#else
return GetThreadTraceImpl<Mapping>(tid);
#endif
@@ -829,11 +891,13 @@ uptr GetThreadTraceHeader(int tid) {
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
- if (vmaSize == 44)
- return GetThreadTraceHeaderImpl<Mapping44>(tid);
- else
- return GetThreadTraceHeaderImpl<Mapping46>(tid);
+ switch (vmaSize) {
+ case 44: return GetThreadTraceHeaderImpl<Mapping44>(tid);
+ case 46: return GetThreadTraceHeaderImpl<Mapping46>(tid);
+ case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
+ }
DCHECK(0);
+ return 0;
#else
return GetThreadTraceHeaderImpl<Mapping>(tid);
#endif
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index ead1e5704989..e14d5f575a2e 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -14,11 +14,12 @@
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_FREEBSD
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@@ -216,9 +217,9 @@ void InitializePlatformEarly() {
Die();
}
#elif defined(__powerpc64__)
- if (vmaSize != 44 && vmaSize != 46) {
+ if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
- Printf("FATAL: Found %d - Supported 44 and 46\n", vmaSize);
+ Printf("FATAL: Found %d - Supported 44, 46, and 47\n", vmaSize);
Die();
}
#endif
@@ -401,4 +402,4 @@ void cur_thread_finalize() {
} // namespace __tsan
-#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index 73a656ffca5e..f8d7324b763c 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -167,8 +167,8 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
#else // !SANITIZER_GO
"app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
#endif
- "stacks: %ld unique IDs, %ld kB allocated\n"
- "threads: %ld total, %ld live\n"
+ "stacks: %zd unique IDs, %zd kB allocated\n"
+ "threads: %zd total, %zd live\n"
"------------------------------\n",
ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
@@ -231,7 +231,7 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
void InitializePlatformEarly() {
#if defined(__aarch64__)
- uptr max_vm = GetMaxVirtualAddress() + 1;
+ uptr max_vm = GetMaxUserVirtualAddress() + 1;
if (max_vm != Mapping::kHiAppMemEnd) {
Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
max_vm, Mapping::kHiAppMemEnd);
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index 32cc3325fe40..af47076968d3 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -13,6 +13,7 @@
#include "tsan_report.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace_printer.h"
@@ -38,34 +39,27 @@ ReportLocation *ReportLocation::New(ReportLocationType type) {
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
- const char *Warning() { return Red(); }
- const char *EndWarning() { return Default(); }
const char *Access() { return Blue(); }
- const char *EndAccess() { return Default(); }
const char *ThreadDescription() { return Cyan(); }
- const char *EndThreadDescription() { return Default(); }
const char *Location() { return Green(); }
- const char *EndLocation() { return Default(); }
const char *Sleep() { return Yellow(); }
- const char *EndSleep() { return Default(); }
const char *Mutex() { return Magenta(); }
- const char *EndMutex() { return Default(); }
};
ReportDesc::ReportDesc()
: tag(kExternalTagNone)
- , stacks(MBlockReportStack)
- , mops(MBlockReportMop)
- , locs(MBlockReportLoc)
- , mutexes(MBlockReportMutex)
- , threads(MBlockReportThread)
- , unique_tids(MBlockReportThread)
+ , stacks()
+ , mops()
+ , locs()
+ , mutexes()
+ , threads()
+ , unique_tids()
, sleep()
, count() {
}
ReportMop::ReportMop()
- : mset(MBlockReportMutex) {
+ : mset() {
}
ReportDesc::~ReportDesc() {
@@ -180,7 +174,7 @@ static void PrintMop(const ReportMop *mop, bool first) {
}
PrintMutexSet(mop->mset);
Printf(":\n");
- Printf("%s", d.EndAccess());
+ Printf("%s", d.Default());
PrintStack(mop->stack);
}
@@ -221,20 +215,20 @@ static void PrintLocation(const ReportLocation *loc) {
loc->fd, thread_name(thrbuf, loc->tid));
print_stack = true;
}
- Printf("%s", d.EndLocation());
+ Printf("%s", d.Default());
if (print_stack)
PrintStack(loc->stack);
}
static void PrintMutexShort(const ReportMutex *rm, const char *after) {
Decorator d;
- Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.EndMutex(), after);
+ Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.Default(), after);
}
static void PrintMutexShortWithAddress(const ReportMutex *rm,
const char *after) {
Decorator d;
- Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.EndMutex(), after);
+ Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.Default(), after);
}
static void PrintMutex(const ReportMutex *rm) {
@@ -242,11 +236,11 @@ static void PrintMutex(const ReportMutex *rm) {
if (rm->destroyed) {
Printf("%s", d.Mutex());
Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
- Printf("%s", d.EndMutex());
+ Printf("%s", d.Default());
} else {
Printf("%s", d.Mutex());
Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
- Printf("%s", d.EndMutex());
+ Printf("%s", d.Default());
PrintStack(rm->stack);
}
}
@@ -264,7 +258,7 @@ static void PrintThread(const ReportThread *rt) {
if (rt->workerthread) {
Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status);
Printf("\n");
- Printf("%s", d.EndThreadDescription());
+ Printf("%s", d.Default());
return;
}
Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status,
@@ -272,7 +266,7 @@ static void PrintThread(const ReportThread *rt) {
if (rt->stack)
Printf(" at:");
Printf("\n");
- Printf("%s", d.EndThreadDescription());
+ Printf("%s", d.Default());
PrintStack(rt->stack);
}
@@ -280,7 +274,7 @@ static void PrintSleep(const ReportStack *s) {
Decorator d;
Printf("%s", d.Sleep());
Printf(" As if synchronized via sleep:\n");
- Printf("%s", d.EndSleep());
+ Printf("%s", d.Default());
PrintStack(s);
}
@@ -324,7 +318,7 @@ void PrintReport(const ReportDesc *rep) {
Printf("%s", d.Warning());
Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str,
(int)internal_getpid());
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
if (rep->typ == ReportTypeDeadlock) {
char thrbuf[kThreadBufSize];
@@ -342,7 +336,7 @@ void PrintReport(const ReportDesc *rep) {
PrintMutexShort(rep->mutexes[i], " in ");
Printf("%s", d.ThreadDescription());
Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i]));
- Printf("%s", d.EndThreadDescription());
+ Printf("%s", d.Default());
if (flags()->second_deadlock_stack) {
PrintStack(rep->stacks[2*i]);
Printf(" Mutex ");
diff --git a/lib/tsan/rtl/tsan_report.h b/lib/tsan/rtl/tsan_report.h
index bc1582f90fe4..cdc999c6af1d 100644
--- a/lib/tsan/rtl/tsan_report.h
+++ b/lib/tsan/rtl/tsan_report.h
@@ -14,8 +14,8 @@
#define TSAN_REPORT_H
#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "sanitizer_common/sanitizer_vector.h"
#include "tsan_defs.h"
-#include "tsan_vector.h"
namespace __tsan {
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index a01525302b02..17b820977c26 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -14,6 +14,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_placement_new.h"
@@ -101,8 +102,8 @@ Context::Context()
, thread_registry(new(thread_registry_placeholder) ThreadRegistry(
CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
, racy_mtx(MutexTypeRacy, StatMtxRacy)
- , racy_stacks(MBlockRacyStacks)
- , racy_addresses(MBlockRacyAddresses)
+ , racy_stacks()
+ , racy_addresses()
, fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
, fired_suppressions(8)
, clock_alloc("clock allocator") {
@@ -120,7 +121,7 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
// , ignore_interceptors()
, clock(tid, reuse_count)
#if !SANITIZER_GO
- , jmp_bufs(MBlockJmpBuf)
+ , jmp_bufs()
#endif
, tid(tid)
, unique_id(unique_id)
@@ -213,7 +214,7 @@ static void BackgroundThread(void *arg) {
memory_order_relaxed);
if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
Lock l(&ctx->report_mtx);
- SpinMutexLock l2(&CommonSanitizerReportMutex);
+ ScopedErrorReportLock l2;
SymbolizeFlush();
atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
}
@@ -323,6 +324,21 @@ static void CheckShadowMapping() {
}
}
+#if !SANITIZER_GO
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ uptr top = 0;
+ uptr bottom = 0;
+ bool fast = common_flags()->fast_unwind_on_fatal;
+ if (fast) GetThreadStackTopAndBottom(false, &top, &bottom);
+ stack->Unwind(kStackTraceMax, sig.pc, sig.bp, sig.context, top, bottom, fast);
+}
+
+static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
+}
+#endif
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
static bool is_initialized = false;
@@ -360,6 +376,7 @@ void Initialize(ThreadState *thr) {
#if !SANITIZER_GO
InitializeShadowMemory();
InitializeAllocatorLate();
+ InstallDeadlySignalHandlers(TsanOnDeadlySignal);
#endif
// Setup correct file descriptor for error reports.
__sanitizer_set_report_path(common_flags()->log_path);
@@ -367,13 +384,6 @@ void Initialize(ThreadState *thr) {
#if !SANITIZER_GO
InitializeLibIgnore();
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
- // On MIPS, TSan initialization is run before
- // __pthread_initialize_minimal_internal() is finished, so we can not spawn
- // new threads.
-#ifndef __mips__
- StartBackgroundThread();
- SetSandboxingCallback(StopBackgroundThread);
-#endif
#endif
VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
@@ -402,6 +412,21 @@ void Initialize(ThreadState *thr) {
OnInitialize();
}
+void MaybeSpawnBackgroundThread() {
+ // On MIPS, TSan initialization is run before
+ // __pthread_initialize_minimal_internal() is finished, so we can not spawn
+ // new threads.
+#if !SANITIZER_GO && !defined(__mips__)
+ static atomic_uint32_t bg_thread = {};
+ if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
+ atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
+ StartBackgroundThread();
+ SetSandboxingCallback(StopBackgroundThread);
+ }
+#endif
+}
+
+
int Finalize(ThreadState *thr) {
bool failed = false;
@@ -412,8 +437,7 @@ int Finalize(ThreadState *thr) {
// Wait for pending reports.
ctx->report_mtx.Lock();
- CommonSanitizerReportMutex.Lock();
- CommonSanitizerReportMutex.Unlock();
+ { ScopedErrorReportLock l; }
ctx->report_mtx.Unlock();
#if !SANITIZER_GO
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 2cf2e168454d..7d44ccae9893 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -34,12 +34,13 @@
#include "sanitizer_common/sanitizer_libignore.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_vector.h"
#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
+#include "tsan_mman.h"
#include "tsan_sync.h"
#include "tsan_trace.h"
-#include "tsan_vector.h"
#include "tsan_report.h"
#include "tsan_platform.h"
#include "tsan_mutexset.h"
@@ -574,11 +575,8 @@ const char *GetObjectTypeFromTag(uptr tag);
const char *GetReportHeaderFromTag(uptr tag);
uptr TagFromShadowStackFrame(uptr pc);
-class ScopedReport {
+class ScopedReportBase {
public:
- explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
- ~ScopedReport();
-
void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
const MutexSet *mset);
void AddStack(StackTrace stack, bool suppressable = false);
@@ -593,6 +591,10 @@ class ScopedReport {
const ReportDesc *GetReport() const;
+ protected:
+ ScopedReportBase(ReportType typ, uptr tag);
+ ~ScopedReportBase();
+
private:
ReportDesc *rep_;
// Symbolizer makes lots of intercepted calls. If we try to process them,
@@ -601,8 +603,17 @@ class ScopedReport {
void AddDeadMutex(u64 id);
- ScopedReport(const ScopedReport&);
- void operator = (const ScopedReport&);
+ ScopedReportBase(const ScopedReportBase &) = delete;
+ void operator=(const ScopedReportBase &) = delete;
+};
+
+class ScopedReport : public ScopedReportBase {
+ public:
+ explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
+ ~ScopedReport();
+
+ private:
+ ScopedErrorReportLock lock_;
};
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
@@ -690,6 +701,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc);
void PrintCurrentStackSlow(uptr pc); // uses libunwind
void Initialize(ThreadState *thr);
+void MaybeSpawnBackgroundThread();
int Finalize(ThreadState *thr);
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
@@ -825,7 +837,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
return;
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
- DCHECK_EQ(GetLsb(addr, 61), addr);
+ DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
@@ -837,7 +849,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
}
Event *trace = (Event*)GetThreadTrace(fs.tid());
Event *evp = &trace[pos];
- Event ev = (u64)addr | ((u64)typ << 61);
+ Event ev = (u64)addr | ((u64)typ << kEventPCBits);
*evp = ev;
}
diff --git a/lib/tsan/rtl/tsan_rtl_aarch64.S b/lib/tsan/rtl/tsan_rtl_aarch64.S
index 61171d635c18..844c2e23db4f 100644
--- a/lib/tsan/rtl/tsan_rtl_aarch64.S
+++ b/lib/tsan/rtl/tsan_rtl_aarch64.S
@@ -6,7 +6,7 @@
#if !defined(__APPLE__)
.section .bss
.type __tsan_pointer_chk_guard, %object
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__tsan_pointer_chk_guard))
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__tsan_pointer_chk_guard))
__tsan_pointer_chk_guard:
.zero 8
#endif
@@ -51,7 +51,7 @@ _sigsetjmp$non_lazy_ptr:
// original ones.
ASM_HIDDEN(_Z18InitializeGuardPtrv)
.global _Z18InitializeGuardPtrv
-ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv))
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv))
_Z18InitializeGuardPtrv:
CFI_STARTPROC
// Allocates a jmp_buf for the setjmp call.
@@ -88,14 +88,14 @@ _Z18InitializeGuardPtrv:
CFI_DEF_CFA (31, 0)
ret
CFI_ENDPROC
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv))
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv))
#endif
ASM_HIDDEN(__tsan_setjmp)
.comm _ZN14__interception11real_setjmpE,8,8
-.globl ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)
-ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
-ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp):
+.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SYMBOL_INTERCEPTOR(setjmp):
CFI_STARTPROC
// save env parameters for function call
@@ -125,7 +125,7 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp):
#endif
// call tsan interceptor
- bl ASM_TSAN_SYMBOL(__tsan_setjmp)
+ bl ASM_SYMBOL(__tsan_setjmp)
// restore env parameter
mov x0, x19
@@ -148,12 +148,12 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp):
br x1
CFI_ENDPROC
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
.comm _ZN14__interception12real__setjmpE,8,8
-.globl ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)
-ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
-ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp):
+.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SYMBOL_INTERCEPTOR(_setjmp):
CFI_STARTPROC
// save env parameters for function call
@@ -183,7 +183,7 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp):
#endif
// call tsan interceptor
- bl ASM_TSAN_SYMBOL(__tsan_setjmp)
+ bl ASM_SYMBOL(__tsan_setjmp)
// Restore jmp_buf parameter
mov x0, x19
@@ -206,12 +206,12 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp):
br x1
CFI_ENDPROC
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
.comm _ZN14__interception14real_sigsetjmpE,8,8
-.globl ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)
-ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
-ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
+.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
CFI_STARTPROC
// save env parameters for function call
@@ -243,7 +243,7 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
#endif
// call tsan interceptor
- bl ASM_TSAN_SYMBOL(__tsan_setjmp)
+ bl ASM_SYMBOL(__tsan_setjmp)
// restore env parameter
mov w1, w20
@@ -268,13 +268,13 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
#endif
br x2
CFI_ENDPROC
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
#if !defined(__APPLE__)
.comm _ZN14__interception16real___sigsetjmpE,8,8
-.globl ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)
-ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
-ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp):
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
CFI_STARTPROC
// save env parameters for function call
@@ -303,7 +303,7 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp):
#endif
// call tsan interceptor
- bl ASM_TSAN_SYMBOL(__tsan_setjmp)
+ bl ASM_SYMBOL(__tsan_setjmp)
mov w1, w20
mov x0, x19
@@ -321,12 +321,12 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp):
ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE]
ldr x2, [x2]
#else
- adrp x2, ASM_TSAN_SYMBOL(__sigsetjmp)@page
- add x2, x2, ASM_TSAN_SYMBOL(__sigsetjmp)@pageoff
+ adrp x2, ASM_SYMBOL(__sigsetjmp)@page
+ add x2, x2, ASM_SYMBOL(__sigsetjmp)@pageoff
#endif
br x2
CFI_ENDPROC
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
#endif
#if defined(__linux__)
diff --git a/lib/tsan/rtl/tsan_rtl_amd64.S b/lib/tsan/rtl/tsan_rtl_amd64.S
index 98947fd2a1ba..8af61bf0e892 100644
--- a/lib/tsan/rtl/tsan_rtl_amd64.S
+++ b/lib/tsan/rtl/tsan_rtl_amd64.S
@@ -10,8 +10,8 @@
#endif
ASM_HIDDEN(__tsan_trace_switch)
-.globl ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk)
-ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk):
+.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
+ASM_SYMBOL(__tsan_trace_switch_thunk):
CFI_STARTPROC
# Save scratch registers.
push %rax
@@ -50,7 +50,7 @@ ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk):
shr $4, %rsp # clear 4 lsb, align to 16
shl $4, %rsp
- call ASM_TSAN_SYMBOL(__tsan_trace_switch)
+ call ASM_SYMBOL(__tsan_trace_switch)
# Unalign stack frame back.
mov %rbx, %rsp # restore the original rsp
@@ -90,8 +90,8 @@ ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk):
CFI_ENDPROC
ASM_HIDDEN(__tsan_report_race)
-.globl ASM_TSAN_SYMBOL(__tsan_report_race_thunk)
-ASM_TSAN_SYMBOL(__tsan_report_race_thunk):
+.globl ASM_SYMBOL(__tsan_report_race_thunk)
+ASM_SYMBOL(__tsan_report_race_thunk):
CFI_STARTPROC
# Save scratch registers.
push %rax
@@ -130,7 +130,7 @@ ASM_TSAN_SYMBOL(__tsan_report_race_thunk):
shr $4, %rsp # clear 4 lsb, align to 16
shl $4, %rsp
- call ASM_TSAN_SYMBOL(__tsan_report_race)
+ call ASM_SYMBOL(__tsan_report_race)
# Unalign stack frame back.
mov %rbx, %rsp # restore the original rsp
@@ -170,19 +170,27 @@ ASM_TSAN_SYMBOL(__tsan_report_race_thunk):
CFI_ENDPROC
ASM_HIDDEN(__tsan_setjmp)
-#if !defined(__APPLE__)
+#if defined(__NetBSD__)
+.comm _ZN14__interception15real___setjmp14E,8,8
+#elif !defined(__APPLE__)
.comm _ZN14__interception11real_setjmpE,8,8
#endif
-.globl ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)
-ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
-ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp):
+#if defined(__NetBSD__)
+.globl ASM_SYMBOL_INTERCEPTOR(__setjmp14)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__setjmp14))
+ASM_SYMBOL_INTERCEPTOR(__setjmp14):
+#else
+.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SYMBOL_INTERCEPTOR(setjmp):
+#endif
CFI_STARTPROC
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
-#if defined(__FreeBSD__)
+#if defined(__FreeBSD__) || defined(__NetBSD__)
lea 8(%rsp), %rdi
mov %rdi, %rsi
#elif defined(__APPLE__)
@@ -197,33 +205,40 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp):
# error "Unknown platform"
#endif
// call tsan interceptor
- call ASM_TSAN_SYMBOL(__tsan_setjmp)
+ call ASM_SYMBOL(__tsan_setjmp)
// restore env parameter
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rdi)
// tail jump to libc setjmp
movl $0, %eax
-#if !defined(__APPLE__)
+#if defined(__NetBSD__)
+ movq _ZN14__interception15real___setjmp14E@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#elif !defined(__APPLE__)
movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
#else
- jmp ASM_TSAN_SYMBOL(setjmp)
+ jmp ASM_SYMBOL(setjmp)
#endif
CFI_ENDPROC
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
+#if defined(__NetBSD__)
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__setjmp14))
+#else
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
+#endif
.comm _ZN14__interception12real__setjmpE,8,8
-.globl ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)
-ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
-ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp):
+.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SYMBOL_INTERCEPTOR(_setjmp):
CFI_STARTPROC
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
-#if defined(__FreeBSD__)
+#if defined(__FreeBSD__) || defined(__NetBSD__)
lea 8(%rsp), %rdi
mov %rdi, %rsi
#elif defined(__APPLE__)
@@ -238,7 +253,7 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp):
# error "Unknown platform"
#endif
// call tsan interceptor
- call ASM_TSAN_SYMBOL(__tsan_setjmp)
+ call ASM_SYMBOL(__tsan_setjmp)
// restore env parameter
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
@@ -249,15 +264,22 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp):
movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
#else
- jmp ASM_TSAN_SYMBOL(_setjmp)
+ jmp ASM_SYMBOL(_setjmp)
#endif
CFI_ENDPROC
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+#if defined(__NetBSD__)
+.comm _ZN14__interception18real___sigsetjmp14E,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14):
+#else
.comm _ZN14__interception14real_sigsetjmpE,8,8
-.globl ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)
-ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
-ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
+.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
+#endif
CFI_STARTPROC
// save env parameter
push %rdi
@@ -271,7 +293,7 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp
-#if defined(__FreeBSD__)
+#if defined(__FreeBSD__) || defined(__NetBSD__)
lea 24(%rsp), %rdi
mov %rdi, %rsi
#elif defined(__APPLE__)
@@ -286,7 +308,7 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
# error "Unknown platform"
#endif
// call tsan interceptor
- call ASM_TSAN_SYMBOL(__tsan_setjmp)
+ call ASM_SYMBOL(__tsan_setjmp)
// unalign stack frame
add $8, %rsp
CFI_ADJUST_CFA_OFFSET(-8)
@@ -300,20 +322,27 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
CFI_RESTORE(%rdi)
// tail jump to libc sigsetjmp
movl $0, %eax
-#if !defined(__APPLE__)
+#if defined(__NetBSD__)
+ movq _ZN14__interception18real___sigsetjmp14E@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#elif !defined(__APPLE__)
movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
#else
- jmp ASM_TSAN_SYMBOL(sigsetjmp)
+ jmp ASM_SYMBOL(sigsetjmp)
#endif
CFI_ENDPROC
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
+#if defined(__NetBSD__)
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14))
+#else
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+#endif
-#if !defined(__APPLE__)
+#if !defined(__APPLE__) && !defined(__NetBSD__)
.comm _ZN14__interception16real___sigsetjmpE,8,8
-.globl ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)
-ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
-ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp):
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
CFI_STARTPROC
// save env parameter
push %rdi
@@ -337,7 +366,7 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp):
rol $0x11, %rsi
#endif
// call tsan interceptor
- call ASM_TSAN_SYMBOL(__tsan_setjmp)
+ call ASM_SYMBOL(__tsan_setjmp)
// unalign stack frame
add $8, %rsp
CFI_ADJUST_CFA_OFFSET(-8)
@@ -354,11 +383,12 @@ ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp):
movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
CFI_ENDPROC
-ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
-#endif // !defined(__APPLE__)
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+#endif // !defined(__APPLE__) && !defined(__NetBSD__)
#if defined(__FreeBSD__) || defined(__linux__)
/* We do not need executable stack. */
+/* This note is not needed on NetBSD. */
.section .note.GNU-stack,"",@progbits
#endif
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 2f85811620f1..152b965ad535 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -84,7 +84,9 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
if (s == 0)
return;
- if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit)) {
+ if ((flagz & MutexFlagLinkerInit)
+ || s->IsFlagSet(MutexFlagLinkerInit)
+ || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
// Destroy is no-op for linker-initialized mutexes.
s->mtx.Unlock();
return;
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index 85a982941ed7..cc582ab50190 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -143,30 +143,28 @@ static ReportStack *SymbolizeStack(StackTrace trace) {
return stack;
}
-ScopedReport::ScopedReport(ReportType typ, uptr tag) {
+ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
ctx->thread_registry->CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc;
rep_->typ = typ;
rep_->tag = tag;
ctx->report_mtx.Lock();
- CommonSanitizerReportMutex.Lock();
}
-ScopedReport::~ScopedReport() {
- CommonSanitizerReportMutex.Unlock();
+ScopedReportBase::~ScopedReportBase() {
ctx->report_mtx.Unlock();
DestroyAndFree(rep_);
}
-void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
+void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
ReportStack **rs = rep_->stacks.PushBack();
*rs = SymbolizeStack(stack);
(*rs)->suppressable = suppressable;
}
-void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
- StackTrace stack, const MutexSet *mset) {
+void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
+ StackTrace stack, const MutexSet *mset) {
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
ReportMop *mop = new(mem) ReportMop;
rep_->mops.PushBack(mop);
@@ -187,11 +185,11 @@ void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
}
}
-void ScopedReport::AddUniqueTid(int unique_tid) {
+void ScopedReportBase::AddUniqueTid(int unique_tid) {
rep_->unique_tids.PushBack(unique_tid);
}
-void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
+void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
for (uptr i = 0; i < rep_->threads.Size(); i++) {
if ((u32)rep_->threads[i]->id == tctx->tid)
return;
@@ -255,14 +253,14 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
}
#endif
-void ScopedReport::AddThread(int unique_tid, bool suppressable) {
+void ScopedReportBase::AddThread(int unique_tid, bool suppressable) {
#if !SANITIZER_GO
if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
AddThread(tctx, suppressable);
#endif
}
-void ScopedReport::AddMutex(const SyncVar *s) {
+void ScopedReportBase::AddMutex(const SyncVar *s) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == s->uid)
return;
@@ -276,7 +274,7 @@ void ScopedReport::AddMutex(const SyncVar *s) {
rm->stack = SymbolizeStackId(s->creation_stack_id);
}
-u64 ScopedReport::AddMutex(u64 id) {
+u64 ScopedReportBase::AddMutex(u64 id) {
u64 uid = 0;
u64 mid = id;
uptr addr = SyncVar::SplitId(id, &uid);
@@ -295,7 +293,7 @@ u64 ScopedReport::AddMutex(u64 id) {
return mid;
}
-void ScopedReport::AddDeadMutex(u64 id) {
+void ScopedReportBase::AddDeadMutex(u64 id) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == id)
return;
@@ -309,7 +307,7 @@ void ScopedReport::AddDeadMutex(u64 id) {
rm->stack = 0;
}
-void ScopedReport::AddLocation(uptr addr, uptr size) {
+void ScopedReportBase::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
#if !SANITIZER_GO
@@ -364,18 +362,19 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
}
#if !SANITIZER_GO
-void ScopedReport::AddSleep(u32 stack_id) {
+void ScopedReportBase::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
#endif
-void ScopedReport::SetCount(int count) {
- rep_->count = count;
-}
+void ScopedReportBase::SetCount(int count) { rep_->count = count; }
-const ReportDesc *ScopedReport::GetReport() const {
- return rep_;
-}
+const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
+
+ScopedReport::ScopedReport(ReportType typ, uptr tag)
+ : ScopedReportBase(typ, tag) {}
+
+ScopedReport::~ScopedReport() {}
void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
MutexSet *mset, uptr *tag) {
@@ -394,7 +393,7 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
const u64 ebegin = RoundDown(eend, kTracePartSize);
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
- Vector<uptr> stack(MBlockReportStack);
+ Vector<uptr> stack;
stack.Resize(hdr->stack0.size + 64);
for (uptr i = 0; i < hdr->stack0.size; i++) {
stack[i] = hdr->stack0.trace[i];
@@ -406,8 +405,8 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
Event *events = (Event*)GetThreadTrace(tid);
for (uptr i = ebegin; i <= eend; i++) {
Event ev = events[i];
- EventType typ = (EventType)(ev >> 61);
- uptr pc = (uptr)(ev & ((1ull << 61) - 1));
+ EventType typ = (EventType)(ev >> kEventPCBits);
+ uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
if (typ == EventTypeMop) {
stack[pos] = pc;
@@ -634,13 +633,33 @@ void ReportRace(ThreadState *thr) {
const uptr kMop = 2;
VarSizeStackTrace traces[kMop];
uptr tags[kMop] = {kExternalTagNone};
- const uptr toppc = TraceTopPC(thr);
+ uptr toppc = TraceTopPC(thr);
+ if (toppc >> kEventPCBits) {
+ // This is a work-around for a known issue.
+ // The scenario where this happens is rather elaborate and requires
+ // an instrumented __sanitizer_report_error_summary callback and
+ // a __tsan_symbolize_external callback and a race during a range memory
+ // access larger than 8 bytes. MemoryAccessRange adds the current PC to
+ // the trace and starts processing memory accesses. A first memory access
+ // triggers a race, we report it and call the instrumented
+ // __sanitizer_report_error_summary, which adds more stuff to the trace
+ // since it is intrumented. Then a second memory access in MemoryAccessRange
+ // also triggers a race and we get here and call TraceTopPC to get the
+ // current PC, however now it contains some unrelated events from the
+ // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
+ // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
+ // and the resulting PC has kExternalPCBit set, so we pass it to
+ // __tsan_symbolize_external. __tsan_symbolize_external is within its rights
+ // to crash since the PC is completely bogus.
+ // test/tsan/double_race.cc contains a test case for this.
+ toppc = 0;
+ }
ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
if (IsFiredSuppression(ctx, typ, traces[0]))
return;
// MutexSet is too large to live on stack.
- Vector<u64> mset_buffer(MBlockScopedBuf);
+ Vector<u64> mset_buffer;
mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index 83fab082afe3..e4d65b9a909b 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -211,7 +211,7 @@ void ThreadFinalize(ThreadState *thr) {
if (!flags()->report_thread_leaks)
return;
ThreadRegistryLock l(ctx->thread_registry);
- Vector<ThreadLeak> leaks(MBlockScopedBuf);
+ Vector<ThreadLeak> leaks;
ctx->thread_registry->RunCallbackForEachThreadLocked(
MaybeReportThreadLeak, &leaks);
for (uptr i = 0; i < leaks.Size(); i++) {
diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h
index b83c09ff784e..9039970bcf86 100644
--- a/lib/tsan/rtl/tsan_sync.h
+++ b/lib/tsan/rtl/tsan_sync.h
@@ -34,6 +34,7 @@ enum MutexFlags {
MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed
MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock
MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock
+ MutexFlagNotStatic = 1 << 8, // __tsan_mutex_not_static
// The following flags are runtime private.
// Mutex API misuse was detected, so don't report any more.
@@ -43,7 +44,8 @@ enum MutexFlags {
// Must list all mutex creation flags.
MutexCreationFlagMask = MutexFlagLinkerInit |
MutexFlagWriteReentrant |
- MutexFlagReadReentrant,
+ MutexFlagReadReentrant |
+ MutexFlagNotStatic,
};
struct SyncVar {
diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h
index 96a18ac4101a..9aef375cbfe6 100644
--- a/lib/tsan/rtl/tsan_trace.h
+++ b/lib/tsan/rtl/tsan_trace.h
@@ -41,6 +41,8 @@ enum EventType {
// u64 addr : 61; // Associated pc.
typedef u64 Event;
+const uptr kEventPCBits = 61;
+
struct TraceHeader {
#if !SANITIZER_GO
BufferedStackTrace stack0; // Start stack for the trace.
diff --git a/lib/tsan/rtl/tsan_vector.h b/lib/tsan/rtl/tsan_vector.h
deleted file mode 100644
index a7fb3fa58d54..000000000000
--- a/lib/tsan/rtl/tsan_vector.h
+++ /dev/null
@@ -1,127 +0,0 @@
-//===-- tsan_vector.h -------------------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-
-// Low-fat STL-like vector container.
-
-#ifndef TSAN_VECTOR_H
-#define TSAN_VECTOR_H
-
-#include "tsan_defs.h"
-#include "tsan_mman.h"
-
-namespace __tsan {
-
-template<typename T>
-class Vector {
- public:
- explicit Vector(MBlockType typ)
- : typ_(typ)
- , begin_()
- , end_()
- , last_() {
- }
-
- ~Vector() {
- if (begin_)
- internal_free(begin_);
- }
-
- void Reset() {
- if (begin_)
- internal_free(begin_);
- begin_ = 0;
- end_ = 0;
- last_ = 0;
- }
-
- uptr Size() const {
- return end_ - begin_;
- }
-
- T &operator[](uptr i) {
- DCHECK_LT(i, end_ - begin_);
- return begin_[i];
- }
-
- const T &operator[](uptr i) const {
- DCHECK_LT(i, end_ - begin_);
- return begin_[i];
- }
-
- T *PushBack() {
- EnsureSize(Size() + 1);
- T *p = &end_[-1];
- internal_memset(p, 0, sizeof(*p));
- return p;
- }
-
- T *PushBack(const T& v) {
- EnsureSize(Size() + 1);
- T *p = &end_[-1];
- internal_memcpy(p, &v, sizeof(*p));
- return p;
- }
-
- void PopBack() {
- DCHECK_GT(end_, begin_);
- end_--;
- }
-
- void Resize(uptr size) {
- if (size == 0) {
- end_ = begin_;
- return;
- }
- uptr old_size = Size();
- EnsureSize(size);
- if (old_size < size) {
- for (uptr i = old_size; i < size; i++)
- internal_memset(&begin_[i], 0, sizeof(begin_[i]));
- }
- }
-
- private:
- const MBlockType typ_;
- T *begin_;
- T *end_;
- T *last_;
-
- void EnsureSize(uptr size) {
- if (size <= Size())
- return;
- if (size <= (uptr)(last_ - begin_)) {
- end_ = begin_ + size;
- return;
- }
- uptr cap0 = last_ - begin_;
- uptr cap = cap0 * 5 / 4; // 25% growth
- if (cap == 0)
- cap = 16;
- if (cap < size)
- cap = size;
- T *p = (T*)internal_alloc(typ_, cap * sizeof(T));
- if (cap0) {
- internal_memcpy(p, begin_, cap0 * sizeof(T));
- internal_free(begin_);
- }
- begin_ = p;
- end_ = begin_ + size;
- last_ = begin_ + cap;
- }
-
- Vector(const Vector&);
- void operator=(const Vector&);
-};
-} // namespace __tsan
-
-#endif // #ifndef TSAN_VECTOR_H
diff --git a/lib/tsan/tests/CMakeLists.txt b/lib/tsan/tests/CMakeLists.txt
index f8aec6854bb3..ad8d02ed331f 100644
--- a/lib/tsan/tests/CMakeLists.txt
+++ b/lib/tsan/tests/CMakeLists.txt
@@ -2,7 +2,7 @@ include_directories(../rtl)
add_custom_target(TsanUnitTests)
set_target_properties(TsanUnitTests PROPERTIES
- FOLDER "TSan unittests")
+ FOLDER "Compiler-RT Tests")
set(TSAN_UNITTEST_CFLAGS
${TSAN_CFLAGS}
@@ -13,9 +13,29 @@ set(TSAN_UNITTEST_CFLAGS
-I${COMPILER_RT_SOURCE_DIR}/lib/tsan/rtl
-DGTEST_HAS_RTTI=0)
+set(TSAN_TEST_ARCH ${TSAN_SUPPORTED_ARCH})
if(APPLE)
+
+ # Create a static library for test dependencies.
+ set(TSAN_TEST_RUNTIME_OBJECTS
+ $<TARGET_OBJECTS:RTTsan_dynamic.osx>
+ $<TARGET_OBJECTS:RTInterception.osx>
+ $<TARGET_OBJECTS:RTSanitizerCommon.osx>
+ $<TARGET_OBJECTS:RTSanitizerCommonLibc.osx>
+ $<TARGET_OBJECTS:RTUbsan.osx>)
+ set(TSAN_TEST_RUNTIME RTTsanTest)
+ add_library(${TSAN_TEST_RUNTIME} STATIC ${TSAN_TEST_RUNTIME_OBJECTS})
+ set_target_properties(${TSAN_TEST_RUNTIME} PROPERTIES
+ ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+
+ darwin_filter_host_archs(TSAN_SUPPORTED_ARCH TSAN_TEST_ARCH)
list(APPEND TSAN_UNITTEST_CFLAGS ${DARWIN_osx_CFLAGS})
- list(APPEND TSAN_UNITTEST_LINKFLAGS ${DARWIN_osx_LINKFLAGS})
+
+ set(LINK_FLAGS "-lc++")
+ add_weak_symbols("ubsan" LINK_FLAGS)
+ add_weak_symbols("sanitizer_common" LINK_FLAGS)
+else()
+ set(LINK_FLAGS "-fsanitize=thread;-lstdc++;-lm")
endif()
set(TSAN_RTL_HEADERS)
@@ -23,79 +43,27 @@ foreach (header ${TSAN_HEADERS})
list(APPEND TSAN_RTL_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
endforeach()
-# tsan_compile(obj_list, source, arch, {headers})
-macro(tsan_compile obj_list source arch)
- get_filename_component(basename ${source} NAME)
- set(output_obj "${basename}.${arch}.o")
- get_target_flags_for_arch(${arch} TARGET_CFLAGS)
- set(COMPILE_DEPS ${TSAN_RTL_HEADERS} ${ARGN})
- if(NOT COMPILER_RT_STANDALONE_BUILD)
- list(APPEND COMPILE_DEPS gtest tsan)
- endif()
- clang_compile(${output_obj} ${source}
- CFLAGS ${TSAN_UNITTEST_CFLAGS} ${TARGET_CFLAGS}
- DEPS ${COMPILE_DEPS})
- list(APPEND ${obj_list} ${output_obj})
-endmacro()
-
+# add_tsan_unittest(<name>
+# SOURCES <sources list>
+# HEADERS <extra headers list>)
macro(add_tsan_unittest testname)
- set(TSAN_TEST_ARCH ${TSAN_SUPPORTED_ARCH})
- if(APPLE)
- darwin_filter_host_archs(TSAN_SUPPORTED_ARCH TSAN_TEST_ARCH)
- endif()
+ cmake_parse_arguments(TEST "" "" "SOURCES;HEADERS" ${ARGN})
if(UNIX)
foreach(arch ${TSAN_TEST_ARCH})
- cmake_parse_arguments(TEST "" "" "SOURCES;HEADERS" ${ARGN})
- set(TEST_OBJECTS)
- foreach(SOURCE ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE})
- tsan_compile(TEST_OBJECTS ${SOURCE} ${arch} ${TEST_HEADERS})
- endforeach()
- get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
- set(TEST_DEPS ${TEST_OBJECTS})
- if(NOT COMPILER_RT_STANDALONE_BUILD)
- list(APPEND TEST_DEPS tsan)
- endif()
- if(NOT APPLE)
- # FIXME: Looks like we should link TSan with just-built runtime,
- # and not rely on -fsanitize=thread, as these tests are essentially
- # unit tests.
- add_compiler_rt_test(TsanUnitTests ${testname}
- OBJECTS ${TEST_OBJECTS}
- DEPS ${TEST_DEPS}
- LINK_FLAGS ${TARGET_LINK_FLAGS}
- -fsanitize=thread
- -lstdc++ -lm)
- else()
- set(TSAN_TEST_RUNTIME_OBJECTS
- $<TARGET_OBJECTS:RTTsan_dynamic.osx>
- $<TARGET_OBJECTS:RTInterception.osx>
- $<TARGET_OBJECTS:RTSanitizerCommon.osx>
- $<TARGET_OBJECTS:RTSanitizerCommonLibc.osx>
- $<TARGET_OBJECTS:RTUbsan.osx>)
- set(TSAN_TEST_RUNTIME RTTsanTest.${testname}.${arch})
- add_library(${TSAN_TEST_RUNTIME} STATIC ${TSAN_TEST_RUNTIME_OBJECTS})
- set_target_properties(${TSAN_TEST_RUNTIME} PROPERTIES
- ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
- list(APPEND TEST_OBJECTS lib${TSAN_TEST_RUNTIME}.a)
- list(APPEND TEST_DEPS ${TSAN_TEST_RUNTIME})
-
- add_weak_symbols("ubsan" WEAK_SYMBOL_LINK_FLAGS)
- add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS)
-
- # Intentionally do *not* link with `-fsanitize=thread`. We already link
- # against a static version of the runtime, and we don't want the dynamic
- # one.
- add_compiler_rt_test(TsanUnitTests "${testname}-${arch}-Test"
- OBJECTS ${TEST_OBJECTS}
- DEPS ${TEST_DEPS}
- LINK_FLAGS ${TARGET_LINK_FLAGS} ${DARWIN_osx_LINK_FLAGS}
- ${WEAK_SYMBOL_LINK_FLAGS} -lc++)
- endif()
+ set(TsanUnitTestsObjects)
+ generate_compiler_rt_tests(TsanUnitTestsObjects TsanUnitTests
+ "${testname}-${arch}-Test" ${arch}
+ SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE}
+ RUNTIME ${TSAN_TEST_RUNTIME}
+ COMPILE_DEPS ${TEST_HEADERS} ${TSAN_RTL_HEADERS}
+ DEPS gtest tsan
+ CFLAGS ${TSAN_UNITTEST_CFLAGS}
+ LINK_FLAGS ${LINK_FLAGS})
endforeach()
endif()
endmacro()
-if(COMPILER_RT_CAN_EXECUTE_TESTS)
+if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID)
add_subdirectory(rtl)
add_subdirectory(unit)
endif()
diff --git a/lib/tsan/tests/rtl/tsan_test_util_posix.cc b/lib/tsan/tests/rtl/tsan_test_util_posix.cc
index 834a271aa8c0..d00e26dd5ca6 100644
--- a/lib/tsan/tests/rtl/tsan_test_util_posix.cc
+++ b/lib/tsan/tests/rtl/tsan_test_util_posix.cc
@@ -9,7 +9,7 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
-// Test utils, Linux, FreeBSD and Darwin implementation.
+// Test utils, Linux, FreeBSD, NetBSD and Darwin implementation.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_atomic.h"
@@ -270,7 +270,7 @@ void ScopedThread::Impl::HandleEvent(Event *ev) {
}
}
CHECK_NE(tsan_mop, 0);
-#if defined(__FreeBSD__) || defined(__APPLE__)
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__NetBSD__)
const int ErrCode = ESOCKTNOSUPPORT;
#else
const int ErrCode = ECHRNG;
diff --git a/lib/tsan/tests/unit/CMakeLists.txt b/lib/tsan/tests/unit/CMakeLists.txt
index 6898f641d6a0..c08508d50791 100644
--- a/lib/tsan/tests/unit/CMakeLists.txt
+++ b/lib/tsan/tests/unit/CMakeLists.txt
@@ -6,8 +6,7 @@ set(TSAN_UNIT_TEST_SOURCES
tsan_shadow_test.cc
tsan_stack_test.cc
tsan_sync_test.cc
- tsan_unit_test_main.cc
- tsan_vector_test.cc)
+ tsan_unit_test_main.cc)
add_tsan_unittest(TsanUnitTest
SOURCES ${TSAN_UNIT_TEST_SOURCES})
diff --git a/lib/tsan/tests/unit/tsan_mman_test.cc b/lib/tsan/tests/unit/tsan_mman_test.cc
index 60dea3d43240..05ae4286704c 100644
--- a/lib/tsan/tests/unit/tsan_mman_test.cc
+++ b/lib/tsan/tests/unit/tsan_mman_test.cc
@@ -56,6 +56,7 @@ TEST(Mman, UserRealloc) {
// Realloc(NULL, N) is equivalent to malloc(N), thus must return
// non-NULL pointer.
EXPECT_NE(p, (void*)0);
+ user_free(thr, pc, p);
}
{
void *p = user_realloc(thr, pc, 0, 100);
@@ -67,8 +68,9 @@ TEST(Mman, UserRealloc) {
void *p = user_alloc(thr, pc, 100);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
+ // Realloc(P, 0) is equivalent to free(P) and returns NULL.
void *p2 = user_realloc(thr, pc, p, 0);
- EXPECT_NE(p2, (void*)0);
+ EXPECT_EQ(p2, (void*)0);
}
{
void *p = user_realloc(thr, pc, 0, 100);
@@ -135,12 +137,34 @@ TEST(Mman, Stats) {
EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes());
}
+TEST(Mman, Valloc) {
+ ThreadState *thr = cur_thread();
+ uptr page_size = GetPageSizeCached();
+
+ void *p = user_valloc(thr, 0, 100);
+ EXPECT_NE(p, (void*)0);
+ user_free(thr, 0, p);
+
+ p = user_pvalloc(thr, 0, 100);
+ EXPECT_NE(p, (void*)0);
+ user_free(thr, 0, p);
+
+ p = user_pvalloc(thr, 0, 0);
+ EXPECT_NE(p, (void*)0);
+ EXPECT_EQ(page_size, __sanitizer_get_allocated_size(p));
+ user_free(thr, 0, p);
+
+ EXPECT_DEATH(p = user_pvalloc(thr, 0, (uptr)-(page_size - 1)),
+ "allocator is terminating the process instead of returning 0");
+ EXPECT_DEATH(p = user_pvalloc(thr, 0, (uptr)-1),
+ "allocator is terminating the process instead of returning 0");
+}
+
+#if !SANITIZER_DEBUG
+// EXPECT_DEATH clones a thread with 4K stack,
+// which is overflown by tsan memory accesses functions in debug mode.
+
TEST(Mman, CallocOverflow) {
-#if SANITIZER_DEBUG
- // EXPECT_DEATH clones a thread with 4K stack,
- // which is overflown by tsan memory accesses functions in debug mode.
- return;
-#endif
ThreadState *thr = cur_thread();
uptr pc = 0;
size_t kArraySize = 4096;
@@ -152,4 +176,57 @@ TEST(Mman, CallocOverflow) {
EXPECT_EQ(0L, p);
}
+TEST(Mman, Memalign) {
+ ThreadState *thr = cur_thread();
+
+ void *p = user_memalign(thr, 0, 8, 100);
+ EXPECT_NE(p, (void*)0);
+ user_free(thr, 0, p);
+
+ p = NULL;
+ EXPECT_DEATH(p = user_memalign(thr, 0, 7, 100),
+ "allocator is terminating the process instead of returning 0");
+ EXPECT_EQ(0L, p);
+}
+
+TEST(Mman, PosixMemalign) {
+ ThreadState *thr = cur_thread();
+
+ void *p = NULL;
+ int res = user_posix_memalign(thr, 0, &p, 8, 100);
+ EXPECT_NE(p, (void*)0);
+ EXPECT_EQ(res, 0);
+ user_free(thr, 0, p);
+
+ p = NULL;
+ // Alignment is not a power of two, although is a multiple of sizeof(void*).
+ EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 3 * sizeof(p), 100),
+ "allocator is terminating the process instead of returning 0");
+ EXPECT_EQ(0L, p);
+ // Alignment is not a multiple of sizeof(void*), although is a power of 2.
+ EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 2, 100),
+ "allocator is terminating the process instead of returning 0");
+ EXPECT_EQ(0L, p);
+}
+
+TEST(Mman, AlignedAlloc) {
+ ThreadState *thr = cur_thread();
+
+ void *p = user_aligned_alloc(thr, 0, 8, 64);
+ EXPECT_NE(p, (void*)0);
+ user_free(thr, 0, p);
+
+ p = NULL;
+ // Alignement is not a power of 2.
+ EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 7, 100),
+ "allocator is terminating the process instead of returning 0");
+ EXPECT_EQ(0L, p);
+ // Size is not a multiple of alignment.
+ EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 8, 100),
+ "allocator is terminating the process instead of returning 0");
+ EXPECT_EQ(0L, p);
+}
+
+#endif
+
} // namespace __tsan
diff --git a/lib/tsan/tests/unit/tsan_vector_test.cc b/lib/tsan/tests/unit/tsan_vector_test.cc
deleted file mode 100644
index c54ac1ee6de9..000000000000
--- a/lib/tsan/tests/unit/tsan_vector_test.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-//===-- tsan_vector_test.cc -----------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "tsan_vector.h"
-#include "tsan_rtl.h"
-#include "gtest/gtest.h"
-
-namespace __tsan {
-
-TEST(Vector, Basic) {
- Vector<int> v(MBlockScopedBuf);
- EXPECT_EQ(v.Size(), (uptr)0);
- v.PushBack(42);
- EXPECT_EQ(v.Size(), (uptr)1);
- EXPECT_EQ(v[0], 42);
- v.PushBack(43);
- EXPECT_EQ(v.Size(), (uptr)2);
- EXPECT_EQ(v[0], 42);
- EXPECT_EQ(v[1], 43);
-}
-
-TEST(Vector, Stride) {
- Vector<int> v(MBlockScopedBuf);
- for (int i = 0; i < 1000; i++) {
- v.PushBack(i);
- EXPECT_EQ(v.Size(), (uptr)(i + 1));
- EXPECT_EQ(v[i], i);
- }
- for (int i = 0; i < 1000; i++) {
- EXPECT_EQ(v[i], i);
- }
-}
-
-} // namespace __tsan