aboutsummaryrefslogtreecommitdiff
path: root/lib/tsan
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-01-02 19:18:27 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-01-02 19:18:27 +0000
commit316d58822dada9440bd06ecfc758dcc2364d617c (patch)
treefe72ec2e6ce9a360dda74d9d57f7acdb0e3c39d6 /lib/tsan
parent0230fcf22fe7d19f03d981c9c2c59a3db0b72ea5 (diff)
downloadsrc-316d58822dada9440bd06ecfc758dcc2364d617c.tar.gz
src-316d58822dada9440bd06ecfc758dcc2364d617c.zip
Vendor import of compiler-rt trunk r290819:vendor/compiler-rt/compiler-rt-trunk-r290819
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=311120 svn path=/vendor/compiler-rt/compiler-rt-trunk-r290819/; revision=311121; tag=vendor/compiler-rt/compiler-rt-trunk-r290819
Diffstat (limited to 'lib/tsan')
-rw-r--r--lib/tsan/CMakeLists.txt12
-rw-r--r--lib/tsan/go/build.bat2
-rwxr-xr-xlib/tsan/go/buildgo.sh2
-rw-r--r--lib/tsan/go/tsan_go.cc5
-rw-r--r--lib/tsan/rtl/tsan_clock.cc2
-rw-r--r--lib/tsan/rtl/tsan_debugging.cc77
-rw-r--r--lib/tsan/rtl/tsan_defs.h14
-rw-r--r--lib/tsan/rtl/tsan_flags.cc2
-rw-r--r--lib/tsan/rtl/tsan_flags.inc3
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc43
-rw-r--r--lib/tsan/rtl/tsan_interceptors_mac.cc68
-rw-r--r--lib/tsan/rtl/tsan_interface.h19
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc20
-rw-r--r--lib/tsan/rtl/tsan_interface_inl.h8
-rw-r--r--lib/tsan/rtl/tsan_interface_java.cc17
-rw-r--r--lib/tsan/rtl/tsan_interface_java.h4
-rw-r--r--lib/tsan/rtl/tsan_libdispatch_mac.cc75
-rw-r--r--lib/tsan/rtl/tsan_mman.cc29
-rw-r--r--lib/tsan/rtl/tsan_mutexset.h4
-rw-r--r--lib/tsan/rtl/tsan_platform.h223
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc31
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc82
-rw-r--r--lib/tsan/rtl/tsan_platform_posix.cc2
-rw-r--r--lib/tsan/rtl/tsan_platform_windows.cc4
-rw-r--r--lib/tsan/rtl/tsan_report.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc93
-rw-r--r--lib/tsan/rtl/tsan_rtl.h37
-rw-r--r--lib/tsan/rtl/tsan_rtl_aarch64.S76
-rw-r--r--lib/tsan/rtl/tsan_rtl_mips64.S214
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc8
-rw-r--r--lib/tsan/rtl/tsan_rtl_proc.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc18
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc25
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc4
-rw-r--r--lib/tsan/rtl/tsan_sync.cc4
-rw-r--r--lib/tsan/rtl/tsan_sync.h8
-rw-r--r--lib/tsan/rtl/tsan_trace.h4
-rw-r--r--lib/tsan/tests/CMakeLists.txt6
-rw-r--r--lib/tsan/tests/rtl/tsan_posix.cc69
-rw-r--r--lib/tsan/tests/rtl/tsan_posix_util.h77
-rw-r--r--lib/tsan/tests/rtl/tsan_test_util_posix.cc53
-rw-r--r--lib/tsan/tests/unit/tsan_mman_test.cc8
42 files changed, 1080 insertions, 380 deletions
diff --git a/lib/tsan/CMakeLists.txt b/lib/tsan/CMakeLists.txt
index 7c84e0aa8d85..b26a884b1a4c 100644
--- a/lib/tsan/CMakeLists.txt
+++ b/lib/tsan/CMakeLists.txt
@@ -17,7 +17,7 @@ endif()
set(TSAN_RTL_CFLAGS ${TSAN_CFLAGS})
append_list_if(COMPILER_RT_HAS_MSSE3_FLAG -msse3 TSAN_RTL_CFLAGS)
-append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=512
+append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=530
TSAN_RTL_CFLAGS)
append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors
TSAN_RTL_CFLAGS)
@@ -96,8 +96,7 @@ set(TSAN_HEADERS
rtl/tsan_vector.h)
set(TSAN_RUNTIME_LIBRARIES)
-add_custom_target(tsan)
-set_target_properties(tsan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(tsan)
if(APPLE)
set(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S)
@@ -160,6 +159,11 @@ else()
# Pass ASM file directly to the C++ compiler.
set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
LANGUAGE C)
+ elseif(arch MATCHES "mips64|mips64le")
+ set(TSAN_ASM_SOURCES rtl/tsan_rtl_mips64.S)
+ # Pass ASM file directly to the C++ compiler.
+ set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
+ LANGUAGE C)
else()
set(TSAN_ASM_SOURCES)
endif()
@@ -193,8 +197,6 @@ else()
endforeach()
endif()
-add_dependencies(compiler-rt tsan)
-
# Make sure that non-platform-specific files don't include any system headers.
# FreeBSD does not install a number of Clang-provided headers for the compiler
# in the base system due to incompatibilities between FreeBSD's and Clang's
diff --git a/lib/tsan/go/build.bat b/lib/tsan/go/build.bat
index 3ada9ab116f1..3a64a2413b97 100644
--- a/lib/tsan/go/build.bat
+++ b/lib/tsan/go/build.bat
@@ -1,4 +1,4 @@
type tsan_go.cc ..\rtl\tsan_interface_atomic.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_rtl_proc.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\rtl\tsan_stack_trace.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc ..\..\sanitizer_common\sanitizer_stackdepot.cc ..\..\sanitizer_common\sanitizer_persistent_allocator.cc ..\..\sanitizer_common\sanitizer_flag_parser.cc ..\..\sanitizer_common\sanitizer_symbolizer.cc ..\..\sanitizer_common\sanitizer_termination.cc > gotsan.cc
-gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO -Wno-error=attributes -Wno-attributes -Wno-format -Wno-maybe-uninitialized -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer -std=c++11
+gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -Wno-error=attributes -Wno-attributes -Wno-format -Wno-maybe-uninitialized -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer -std=c++11
diff --git a/lib/tsan/go/buildgo.sh b/lib/tsan/go/buildgo.sh
index 834e325bcb0a..42d479064c49 100755
--- a/lib/tsan/go/buildgo.sh
+++ b/lib/tsan/go/buildgo.sh
@@ -113,7 +113,7 @@ for F in $SRCS; do
cat $F >> $DIR/gotsan.cc
done
-FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
+FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
if [ "$DEBUG" = "" ]; then
FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -msse3 -fomit-frame-pointer"
else
diff --git a/lib/tsan/go/tsan_go.cc b/lib/tsan/go/tsan_go.cc
index bc0d55304339..34625c8af0b0 100644
--- a/lib/tsan/go/tsan_go.cc
+++ b/lib/tsan/go/tsan_go.cc
@@ -271,6 +271,11 @@ void __tsan_go_ignore_sync_end(ThreadState *thr) {
ThreadIgnoreSyncEnd(thr, 0);
}
+void __tsan_report_count(u64 *pn) {
+ Lock lock(&ctx->report_mtx);
+ *pn = ctx->nreported;
+}
+
} // extern "C"
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc
index 1e2050d1f203..32435adfdf33 100644
--- a/lib/tsan/rtl/tsan_clock.cc
+++ b/lib/tsan/rtl/tsan_clock.cc
@@ -82,7 +82,7 @@
// We don't have ThreadState in these methods, so this is an ugly hack that
// works only in C++.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
#else
# define CPP_STAT_INC(typ) (void)0
diff --git a/lib/tsan/rtl/tsan_debugging.cc b/lib/tsan/rtl/tsan_debugging.cc
index ac24c89be7da..d9fb6861bc0c 100644
--- a/lib/tsan/rtl/tsan_debugging.cc
+++ b/lib/tsan/rtl/tsan_debugging.cc
@@ -15,6 +15,8 @@
#include "tsan_report.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
using namespace __tsan;
static const char *ReportTypeDescription(ReportType typ) {
@@ -160,3 +162,78 @@ int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid) {
*tid = rep->unique_tids[idx];
return 1;
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (IsMetaMem(addr)) {
+ region_kind = "meta shadow";
+ } else if (IsShadowMem(addr)) {
+ region_kind = "shadow";
+ } else {
+ bool is_stack = false;
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+
+ if (b != 0) {
+ region_address = (uptr)allocator()->GetBlockBegin((void *)addr);
+ region_size = b->siz;
+ region_kind = "heap";
+ } else {
+ // TODO(kuba.brecka): We should not lock. This is supposed to be called
+ // from within the debugger when other threads are stopped.
+ ctx->thread_registry->Lock();
+ ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
+ ctx->thread_registry->Unlock();
+ if (tctx) {
+ region_kind = is_stack ? "stack" : "tls";
+ } else {
+ region_kind = "global";
+ DataInfo info;
+ if (Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) {
+ internal_strncpy(name, info.name, name_size);
+ region_address = info.start;
+ region_size = info.size;
+ }
+ }
+ }
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ uptr *os_id) {
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b == 0) return 0;
+
+ *thread_id = b->tid;
+ // No locking. This is supposed to be called from within the debugger when
+ // other threads are stopped.
+ ThreadContextBase *tctx = ctx->thread_registry->GetThreadLocked(b->tid);
+ *os_id = tctx->os_id;
+
+ StackTrace stack = StackDepotGet(b->stk);
+ size = Min(size, (uptr)stack.size);
+ for (uptr i = 0; i < size; i++) trace[i] = stack.trace[stack.size - i - 1];
+ return size;
+}
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index cdc23d0a7e49..55580a5c4436 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -29,7 +29,7 @@
#endif
#ifndef TSAN_CONTAINS_UBSAN
-# if CAN_SANITIZE_UB && !defined(SANITIZER_GO)
+# if CAN_SANITIZE_UB && !SANITIZER_GO
# define TSAN_CONTAINS_UBSAN 1
# else
# define TSAN_CONTAINS_UBSAN 0
@@ -38,19 +38,9 @@
namespace __tsan {
-#ifdef SANITIZER_GO
-const bool kGoMode = true;
-const bool kCppMode = false;
-const char *const kTsanOptionsEnv = "GORACE";
-#else
-const bool kGoMode = false;
-const bool kCppMode = true;
-const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
-#endif
-
const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
#else
const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
index 93f598616f3a..d8d4746ab59b 100644
--- a/lib/tsan/rtl/tsan_flags.cc
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -61,7 +61,7 @@ void InitializeFlags(Flags *f, const char *env) {
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.allow_addr2line = true;
- if (kGoMode) {
+ if (SANITIZER_GO) {
// Does not work as expected for Go: runtime handles SIGABRT and crashes.
cf.abort_on_error = false;
// Go does not have mutexes.
diff --git a/lib/tsan/rtl/tsan_flags.inc b/lib/tsan/rtl/tsan_flags.inc
index 4fb443612c42..071cf427d23b 100644
--- a/lib/tsan/rtl/tsan_flags.inc
+++ b/lib/tsan/rtl/tsan_flags.inc
@@ -61,8 +61,9 @@ TSAN_FLAG(bool, stop_on_start, false,
"Stops on start until __tsan_resume() is called (for debugging).")
TSAN_FLAG(bool, running_on_valgrind, false,
"Controls whether RunningOnValgrind() returns true or false.")
+// There are a lot of goroutines in Go, so we use smaller history.
TSAN_FLAG(
- int, history_size, kGoMode ? 1 : 3, // There are a lot of goroutines in Go.
+ int, history_size, SANITIZER_GO ? 1 : 3,
"Per-thread history size, controls how many previous memory accesses "
"are remembered per thread. Possible values are [0..7]. "
"history_size=0 amounts to 32K memory accesses. Each next value doubles "
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index fb6227651d21..a3a50e13f9bf 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -88,8 +88,6 @@ extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
extern "C" int pthread_setspecific(unsigned key, const void *v);
DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
-extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
- __sanitizer_sigset_t *oldset);
DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
@@ -485,6 +483,8 @@ static void LongJmp(ThreadState *thr, uptr *env) {
#elif defined(SANITIZER_LINUX)
# ifdef __aarch64__
uptr mangled_sp = env[13];
+# elif defined(__mips64)
+ uptr mangled_sp = env[1];
# else
uptr mangled_sp = env[6];
# endif
@@ -1762,6 +1762,31 @@ TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
#define TSAN_MAYBE_INTERCEPT_EPOLL
#endif
+// The following functions are intercepted merely to process pending signals.
+// If program blocks signal X, we must deliver the signal before the function
+// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
+// it's better to deliver the signal straight away.
+TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
+ return REAL(sigsuspend)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigblock, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
+ return REAL(sigblock)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
+ return REAL(sigsetmask)(mask);
+}
+
+TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
+ return REAL(pthread_sigmask)(how, set, oldset);
+}
+
namespace __tsan {
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
@@ -1833,7 +1858,8 @@ void ProcessPendingSignals(ThreadState *thr) {
atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
internal_sigfillset(&sctx->emptyset);
- CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->emptyset, &sctx->oldset));
+ int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
+ CHECK_EQ(res, 0);
for (int sig = 0; sig < kSigCount; sig++) {
SignalDesc *signal = &sctx->pending_signals[sig];
if (signal->armed) {
@@ -1842,7 +1868,8 @@ void ProcessPendingSignals(ThreadState *thr) {
&signal->siginfo, &signal->ctx);
}
}
- CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->oldset, 0));
+ res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
+ CHECK_EQ(res, 0);
atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
}
@@ -1958,11 +1985,6 @@ TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
return old.sa_handler;
}
-TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
- SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
- return REAL(sigsuspend)(mask);
-}
-
TSAN_INTERCEPTOR(int, raise, int sig) {
SCOPED_TSAN_INTERCEPTOR(raise, sig);
ThreadSignalContext *sctx = SigCtx(thr);
@@ -2553,6 +2575,9 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(sigaction);
TSAN_INTERCEPT(signal);
TSAN_INTERCEPT(sigsuspend);
+ TSAN_INTERCEPT(sigblock);
+ TSAN_INTERCEPT(sigsetmask);
+ TSAN_INTERCEPT(pthread_sigmask);
TSAN_INTERCEPT(raise);
TSAN_INTERCEPT(kill);
TSAN_INTERCEPT(pthread_kill);
diff --git a/lib/tsan/rtl/tsan_interceptors_mac.cc b/lib/tsan/rtl/tsan_interceptors_mac.cc
index 593963825c58..fc5eb0499076 100644
--- a/lib/tsan/rtl/tsan_interceptors_mac.cc
+++ b/lib/tsan/rtl/tsan_interceptors_mac.cc
@@ -119,24 +119,23 @@ OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
int64_t)
-#define OSATOMIC_INTERCEPTOR_BITOP(f, op, m, mo) \
+#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
char *byte_ptr = ((char *)ptr) + (n >> 3); \
- char bit_index = n & 7; \
- char mask = m; \
+ char bit = 0x80u >> (n & 7); \
+ char mask = clear ? ~bit : bit; \
char orig_byte = op((a8 *)byte_ptr, mask, mo); \
- return orig_byte & mask; \
+ return orig_byte & bit; \
}
-#define OSATOMIC_INTERCEPTORS_BITOP(f, op, m) \
- OSATOMIC_INTERCEPTOR_BITOP(f, op, m, kMacOrderNonBarrier) \
- OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, m, kMacOrderBarrier)
+#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
+ OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
+ OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
-OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or,
- 0x80u >> bit_index)
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
- ~(0x80u >> bit_index))
+ true)
TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
size_t offset) {
@@ -298,18 +297,20 @@ struct fake_shared_weak_count {
};
} // namespace
-// This adds a libc++ interceptor for:
+// The following code adds libc++ interceptors for:
// void __shared_weak_count::__release_shared() _NOEXCEPT;
+// bool __shared_count::__release_shared() _NOEXCEPT;
// Shared and weak pointers in C++ maintain reference counts via atomics in
// libc++.dylib, which are TSan-invisible, and this leads to false positives in
-// destructor code. This interceptor re-implements the whole function so that
+// destructor code. These interceptors re-implements the whole functions so that
// the mo_acq_rel semantics of the atomic decrement are visible.
//
-// Unfortunately, this interceptor cannot simply Acquire/Release some sync
+// Unfortunately, the interceptors cannot simply Acquire/Release some sync
// object and call the original function, because it would have a race between
// the sync and the destruction of the object. Calling both under a lock will
// not work because the destructor can invoke this interceptor again (and even
// in a different thread, so recursive locks don't help).
+
STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
fake_shared_weak_count *o) {
if (!flags()->shared_ptr_interceptor)
@@ -328,6 +329,47 @@ STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
}
}
+STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
+ fake_shared_weak_count *o) {
+ if (!flags()->shared_ptr_interceptor)
+ return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
+
+ SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+ Acquire(thr, pc, (uptr)&o->shared_owners);
+ o->on_zero_shared();
+ return true;
+ }
+ return false;
+}
+
+namespace {
+struct call_once_callback_args {
+ void (*orig_func)(void *arg);
+ void *orig_arg;
+ void *flag;
+};
+
+void call_once_callback_wrapper(void *arg) {
+ call_once_callback_args *new_args = (call_once_callback_args *)arg;
+ new_args->orig_func(new_args->orig_arg);
+ __tsan_release(new_args->flag);
+}
+} // namespace
+
+// This adds a libc++ interceptor for:
+// void __call_once(volatile unsigned long&, void*, void(*)(void*));
+// C++11 call_once is implemented via an internal function __call_once which is
+// inside libc++.dylib, and the atomic release store inside it is thus
+// TSan-invisible. To avoid false positives, this interceptor wraps the callback
+// function and performs an explicit Release after the user code has run.
+STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
+ void *arg, void (*func)(void *arg)) {
+ call_once_callback_args new_args = {func, arg, flag};
+ REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
+ call_once_callback_wrapper);
+}
+
} // namespace __tsan
#endif // SANITIZER_MAC
diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h
index fbb099d07764..bae01bd707d7 100644
--- a/lib/tsan/rtl/tsan_interface.h
+++ b/lib/tsan/rtl/tsan_interface.h
@@ -17,6 +17,7 @@
#define TSAN_INTERFACE_H
#include <sanitizer_common/sanitizer_internal_defs.h>
+using __sanitizer::uptr;
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
@@ -25,7 +26,7 @@
extern "C" {
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
@@ -72,6 +73,9 @@ void __tsan_vptr_update(void **vptr_p, void *new_val);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
+
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_read_range(void *addr, unsigned long size); // NOLINT
SANITIZER_INTERFACE_ATTRIBUTE
@@ -132,6 +136,17 @@ int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id,
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid);
+// Returns the type of the pointer (heap, stack, global, ...) and if possible
+// also the starting address (e.g. of a heap allocation) and size.
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size);
+
+// Returns the allocation stack for a heap pointer.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ uptr *os_id);
+
#endif // SANITIZER_GO
#ifdef __cplusplus
@@ -145,7 +160,7 @@ typedef unsigned char a8;
typedef unsigned short a16; // NOLINT
typedef unsigned int a32;
typedef unsigned long long a64; // NOLINT
-#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
+#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
__extension__ typedef __int128 a128;
# define __TSAN_HAS_INT128 1
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index dc0873f7948b..5238b66a2e51 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -28,7 +28,7 @@
using namespace __tsan; // NOLINT
-#if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
+#if !SANITIZER_GO && __TSAN_HAS_INT128
// Protects emulation of 128-bit atomic operations.
static StaticSpinMutex mutex128;
#endif
@@ -102,7 +102,7 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
&& __TSAN_HAS_INT128
a128 func_xchg(volatile a128 *v, a128 op) {
SpinMutexLock lock(&mutex128);
@@ -176,7 +176,7 @@ static int SizeLog() {
// this leads to false negatives only in very obscure cases.
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static atomic_uint8_t *to_atomic(const volatile a8 *a) {
return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
}
@@ -212,7 +212,7 @@ static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
return atomic_load(to_atomic(a), to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
SpinMutexLock lock(&mutex128);
return *a;
@@ -242,7 +242,7 @@ static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
atomic_store(to_atomic(a), v, to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
SpinMutexLock lock(&mutex128);
*a = v;
@@ -267,7 +267,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(thr, pc, &s->clock);
+ ReleaseStoreImpl(thr, pc, &s->clock);
NoTsanAtomicStore(a, v, mo);
s->mtx.Unlock();
}
@@ -434,7 +434,7 @@ static T AtomicCAS(ThreadState *thr, uptr pc,
return c;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void NoTsanAtomicFence(morder mo) {
__sync_synchronize();
}
@@ -446,7 +446,7 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
#endif
// Interface functions follow.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// C/C++
@@ -845,7 +845,7 @@ void __tsan_atomic_signal_fence(morder mo) {
}
} // extern "C"
-#else // #ifndef SANITIZER_GO
+#else // #if !SANITIZER_GO
// Go
@@ -928,4 +928,4 @@ void __tsan_go_atomic64_compare_exchange(
*(bool*)(a+24) = (cur == cmp);
}
} // extern "C"
-#endif // #ifndef SANITIZER_GO
+#endif // #if !SANITIZER_GO
diff --git a/lib/tsan/rtl/tsan_interface_inl.h b/lib/tsan/rtl/tsan_interface_inl.h
index 8852aa348b8e..fff83ee17806 100644
--- a/lib/tsan/rtl/tsan_interface_inl.h
+++ b/lib/tsan/rtl/tsan_interface_inl.h
@@ -108,6 +108,14 @@ void __tsan_func_exit() {
FuncExit(cur_thread());
}
+void __tsan_ignore_thread_begin() {
+ ThreadIgnoreBegin(cur_thread(), CALLERPC);
+}
+
+void __tsan_ignore_thread_end() {
+ ThreadIgnoreEnd(cur_thread(), CALLERPC);
+}
+
void __tsan_read_range(void *addr, uptr size) {
MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
}
diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc
index 95be85994c64..5bdc04f07567 100644
--- a/lib/tsan/rtl/tsan_interface_java.cc
+++ b/lib/tsan/rtl/tsan_interface_java.cc
@@ -150,6 +150,23 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) {
}
}
+jptr __tsan_java_find(jptr *from_ptr, jptr to) {
+ SCOPED_JAVA_FUNC(__tsan_java_find);
+ DPrintf("#%d: java_find(&%p, %p)\n", *from_ptr, to);
+ CHECK_EQ((*from_ptr) % kHeapAlignment, 0);
+ CHECK_EQ(to % kHeapAlignment, 0);
+ CHECK_GE(*from_ptr, jctx->heap_begin);
+ CHECK_LE(to, jctx->heap_begin + jctx->heap_size);
+ for (uptr from = *from_ptr; from < to; from += kHeapAlignment) {
+ MBlock *b = ctx->metamap.GetBlock(from);
+ if (b) {
+ *from_ptr = from;
+ return b->siz;
+ }
+ }
+ return 0;
+}
+
void __tsan_java_finalize() {
SCOPED_JAVA_FUNC(__tsan_java_finalize);
DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
diff --git a/lib/tsan/rtl/tsan_interface_java.h b/lib/tsan/rtl/tsan_interface_java.h
index 30153a1d8505..0bd49ac3c999 100644
--- a/lib/tsan/rtl/tsan_interface_java.h
+++ b/lib/tsan/rtl/tsan_interface_java.h
@@ -57,6 +57,10 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
// It ensures necessary synchronization between
// java object creation and finalization.
void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
+// Finds the first allocated memory block in the [*from_ptr, to) range, saves
+// its address in *from_ptr and returns its size. Returns 0 if there are no
+// allocated memory blocks in the range.
+jptr __tsan_java_find(jptr *from_ptr, jptr to) INTERFACE_ATTRIBUTE;
// Mutex lock.
// Addr is any unique address associated with the mutex.
diff --git a/lib/tsan/rtl/tsan_libdispatch_mac.cc b/lib/tsan/rtl/tsan_libdispatch_mac.cc
index 529cedba4d2b..d8c689ebb5fc 100644
--- a/lib/tsan/rtl/tsan_libdispatch_mac.cc
+++ b/lib/tsan/rtl/tsan_libdispatch_mac.cc
@@ -68,13 +68,17 @@ static bool IsQueueSerial(dispatch_queue_t q) {
return width == 1;
}
-static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+static dispatch_queue_t GetTargetQueueFromQueue(dispatch_queue_t q) {
CHECK_EQ(dispatch_queue_offsets.dqo_target_queue_size, 8);
- dispatch_queue_t target_queue =
- *(dispatch_queue_t *)(((uptr)source) +
- dispatch_queue_offsets.dqo_target_queue);
- CHECK_NE(target_queue, 0);
- return target_queue;
+ dispatch_queue_t tq = *(
+ dispatch_queue_t *)(((uptr)q) + dispatch_queue_offsets.dqo_target_queue);
+ return tq;
+}
+
+static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+ dispatch_queue_t tq = GetTargetQueueFromQueue((dispatch_queue_t)source);
+ CHECK_NE(tq, 0);
+ return tq;
}
static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
@@ -92,27 +96,53 @@ static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
return new_context;
}
+#define GET_QUEUE_SYNC_VARS(context, q) \
+ bool is_queue_serial = q && IsQueueSerial(q); \
+ uptr sync_ptr = (uptr)q ?: context->non_queue_sync_object; \
+ uptr serial_sync = (uptr)sync_ptr; \
+ uptr concurrent_sync = ((uptr)sync_ptr) + sizeof(uptr); \
+ bool serial_task = context->is_barrier_block || is_queue_serial
+
+static void dispatch_sync_pre_execute(ThreadState *thr, uptr pc,
+ tsan_block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ Acquire(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ Acquire(thr, pc, serial_sync);
+ if (serial_task) Acquire(thr, pc, concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
+static void dispatch_sync_post_execute(ThreadState *thr, uptr pc,
+ tsan_block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ Release(thr, pc, serial_task ? serial_sync : concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
static void dispatch_callback_wrap(void *param) {
SCOPED_INTERCEPTOR_RAW(dispatch_callback_wrap);
tsan_block_context_t *context = (tsan_block_context_t *)param;
- bool is_queue_serial = context->queue && IsQueueSerial(context->queue);
- uptr sync_ptr = (uptr)context->queue ?: context->non_queue_sync_object;
- uptr serial_sync = (uptr)sync_ptr;
- uptr concurrent_sync = ((uptr)sync_ptr) + sizeof(uptr);
- uptr submit_sync = (uptr)context;
- bool serial_task = context->is_barrier_block || is_queue_serial;
-
- Acquire(thr, pc, submit_sync);
- Acquire(thr, pc, serial_sync);
- if (serial_task) Acquire(thr, pc, concurrent_sync);
+ dispatch_sync_pre_execute(thr, pc, context);
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
context->orig_work(context->orig_context);
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
- Release(thr, pc, serial_task ? serial_sync : concurrent_sync);
- if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+ dispatch_sync_post_execute(thr, pc, context);
if (context->free_context_in_callback) user_free(thr, pc, context);
}
@@ -676,6 +706,15 @@ TSAN_INTERCEPTOR(void, dispatch_io_close, dispatch_io_t channel,
return REAL(dispatch_io_close)(channel, flags);
}
+// Resuming a suspended queue needs to synchronize with all subsequent
+// executions of blocks in that queue.
+TSAN_INTERCEPTOR(void, dispatch_resume, dispatch_object_t o) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_resume, o);
+ Release(thr, pc, (uptr)o); // Synchronizes with the Acquire() on serial_sync
+ // in dispatch_sync_pre_execute
+ return REAL(dispatch_resume)(o);
+}
+
} // namespace __tsan
#endif // SANITIZER_MAC
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 7693077f622b..2dea24915e8a 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -54,7 +54,8 @@ struct MapUnmapCallback {
diff = p + size - RoundDown(p + size, kPageSize);
if (diff != 0)
size -= diff;
- FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio);
+ uptr p_meta = (uptr)MemToMeta(p);
+ ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
}
};
@@ -111,7 +112,9 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
}
void InitializeAllocator() {
- allocator()->Init(common_flags()->allocator_may_return_null);
+ allocator()->Init(
+ common_flags()->allocator_may_return_null,
+ common_flags()->allocator_release_to_os_interval_ms);
}
void InitializeAllocatorLate() {
@@ -148,7 +151,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return allocator()->ReturnNullOrDie();
+ return allocator()->ReturnNullOrDieOnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
return 0;
@@ -161,7 +164,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (CallocShouldReturnNullDueToOverflow(size, n))
- return allocator()->ReturnNullOrDie();
+ return allocator()->ReturnNullOrDieOnBadRequest();
void *p = user_alloc(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);
@@ -195,20 +198,16 @@ void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
}
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
- void *p2 = 0;
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
- if (sz) {
- p2 = user_alloc(thr, pc, sz);
- if (p2 == 0)
- return 0;
- if (p) {
- uptr oldsz = user_alloc_usable_size(p);
- internal_memcpy(p2, p, min(oldsz, sz));
- }
- }
- if (p)
+ void *p2 = user_alloc(thr, pc, sz);
+ if (p2 == 0)
+ return 0;
+ if (p) {
+ uptr oldsz = user_alloc_usable_size(p);
+ internal_memcpy(p2, p, min(oldsz, sz));
user_free(thr, pc, p);
+ }
return p2;
}
diff --git a/lib/tsan/rtl/tsan_mutexset.h b/lib/tsan/rtl/tsan_mutexset.h
index 68f0ec26fa25..605c21a9c08f 100644
--- a/lib/tsan/rtl/tsan_mutexset.h
+++ b/lib/tsan/rtl/tsan_mutexset.h
@@ -43,7 +43,7 @@ class MutexSet {
}
private:
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr size_;
Desc descs_[kMaxSize];
#endif
@@ -55,7 +55,7 @@ class MutexSet {
// Go does not have mutexes, so do not spend memory and time.
// (Go sync.Mutex is actually a semaphore -- can be unlocked
// in different goroutine).
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
MutexSet::MutexSet() {}
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
void MutexSet::Del(u64 id, bool write) {}
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index 2bd6637d0ea3..1dd9d91d4c92 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -24,40 +24,46 @@
namespace __tsan {
-#if !defined(SANITIZER_GO)
+#if !SANITIZER_GO
#if defined(__x86_64__)
/*
C/C++ on linux/x86_64 and freebsd/x86_64
-0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
-0100 0000 0000 - 0200 0000 0000: -
-0200 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 3000 0000 0000: -
+0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
+0040 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 2000 0000 0000: shadow
+2000 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
+4000 0000 0000 - 5500 0000 0000: -
+5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
+5680 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
-7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7e80 0000 0000: -
+7b00 0000 0000 - 7c00 0000 0000: heap
+7c00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
*/
struct Mapping {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
static const uptr kTraceMemBeg = 0x600000000000ull;
static const uptr kTraceMemEnd = 0x620000000000ull;
- static const uptr kShadowBeg = 0x020000000000ull;
- static const uptr kShadowEnd = 0x100000000000ull;
- static const uptr kHeapMemBeg = 0x7d0000000000ull;
- static const uptr kHeapMemEnd = 0x7e0000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x200000000000ull;
+ static const uptr kHeapMemBeg = 0x7b0000000000ull;
+ static const uptr kHeapMemEnd = 0x7c0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
- static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kMidAppMemBeg = 0x550000000000ull;
+ static const uptr kMidAppMemEnd = 0x568000000000ull;
static const uptr kHiAppMemBeg = 0x7e8000000000ull;
static const uptr kHiAppMemEnd = 0x800000000000ull;
- static const uptr kAppMemMsk = 0x7c0000000000ull;
- static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kAppMemMsk = 0x780000000000ull;
+ static const uptr kAppMemXor = 0x040000000000ull;
static const uptr kVdsoBeg = 0xf000000000000000ull;
};
+
+#define TSAN_MID_APP_RANGE 1
#elif defined(__mips64)
/*
C/C++ on linux/mips64
@@ -74,22 +80,26 @@ ff00 0000 00 - ff80 0000 00: -
ff80 0000 00 - ffff ffff ff: modules and main thread stack
*/
struct Mapping {
- static const uptr kMetaShadowBeg = 0x3000000000ull;
- static const uptr kMetaShadowEnd = 0x4000000000ull;
- static const uptr kTraceMemBeg = 0x6000000000ull;
- static const uptr kTraceMemEnd = 0x6200000000ull;
- static const uptr kShadowBeg = 0x1400000000ull;
- static const uptr kShadowEnd = 0x2400000000ull;
+ static const uptr kMetaShadowBeg = 0x4000000000ull;
+ static const uptr kMetaShadowEnd = 0x5000000000ull;
+ static const uptr kTraceMemBeg = 0xb000000000ull;
+ static const uptr kTraceMemEnd = 0xb200000000ull;
+ static const uptr kShadowBeg = 0x2400000000ull;
+ static const uptr kShadowEnd = 0x4000000000ull;
static const uptr kHeapMemBeg = 0xfe00000000ull;
static const uptr kHeapMemEnd = 0xff00000000ull;
static const uptr kLoAppMemBeg = 0x0100000000ull;
static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kMidAppMemBeg = 0xaa00000000ull;
+ static const uptr kMidAppMemEnd = 0xab00000000ull;
static const uptr kHiAppMemBeg = 0xff80000000ull;
static const uptr kHiAppMemEnd = 0xffffffffffull;
- static const uptr kAppMemMsk = 0xfc00000000ull;
- static const uptr kAppMemXor = 0x0400000000ull;
+ static const uptr kAppMemMsk = 0xf800000000ull;
+ static const uptr kAppMemXor = 0x0800000000ull;
static const uptr kVdsoBeg = 0xfffff00000ull;
};
+
+#define TSAN_MID_APP_RANGE 1
#elif defined(__aarch64__)
// AArch64 supports multiple VMA which leads to multiple address transformation
// functions. To support these multiple VMAS transformations and mappings TSAN
@@ -121,7 +131,6 @@ struct Mapping39 {
static const uptr kMetaShadowEnd = 0x3400000000ull;
static const uptr kMidAppMemBeg = 0x5500000000ull;
static const uptr kMidAppMemEnd = 0x5600000000ull;
- static const uptr kMidShadowOff = 0x5000000000ull;
static const uptr kTraceMemBeg = 0x6000000000ull;
static const uptr kTraceMemEnd = 0x6200000000ull;
static const uptr kHeapMemBeg = 0x7c00000000ull;
@@ -157,7 +166,6 @@ struct Mapping42 {
static const uptr kMetaShadowEnd = 0x28000000000ull;
static const uptr kMidAppMemBeg = 0x2aa00000000ull;
static const uptr kMidAppMemEnd = 0x2ab00000000ull;
- static const uptr kMidShadowOff = 0x28000000000ull;
static const uptr kTraceMemBeg = 0x36200000000ull;
static const uptr kTraceMemEnd = 0x36400000000ull;
static const uptr kHeapMemBeg = 0x3e000000000ull;
@@ -169,6 +177,26 @@ struct Mapping42 {
static const uptr kVdsoBeg = 0x37f00000000ull;
};
+struct Mapping48 {
+ static const uptr kLoAppMemBeg = 0x0000000001000ull;
+ static const uptr kLoAppMemEnd = 0x0000200000000ull;
+ static const uptr kShadowBeg = 0x0002000000000ull;
+ static const uptr kShadowEnd = 0x0004000000000ull;
+ static const uptr kMetaShadowBeg = 0x0005000000000ull;
+ static const uptr kMetaShadowEnd = 0x0006000000000ull;
+ static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
+ static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
+ static const uptr kTraceMemBeg = 0x0f06000000000ull;
+ static const uptr kTraceMemEnd = 0x0f06200000000ull;
+ static const uptr kHeapMemBeg = 0x0ffff00000000ull;
+ static const uptr kHeapMemEnd = 0x0ffff00000000ull;
+ static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
+ static const uptr kHiAppMemEnd = 0x1000000000000ull;
+ static const uptr kAppMemMsk = 0x0fff800000000ull;
+ static const uptr kAppMemXor = 0x0000800000000ull;
+ static const uptr kVdsoBeg = 0xffff000000000ull;
+};
+
// Indicates the runtime will define the memory regions at runtime.
#define TSAN_RUNTIME_VMA 1
// Indicates that mapping defines a mid range memory segment.
@@ -248,7 +276,7 @@ struct Mapping46 {
#define TSAN_RUNTIME_VMA 1
#endif
-#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
+#elif SANITIZER_GO && !SANITIZER_WINDOWS
/* Go on linux, darwin and freebsd
0000 0000 1000 - 0000 1000 0000: executable
@@ -274,7 +302,7 @@ struct Mapping {
static const uptr kAppMemEnd = 0x00e000000000ull;
};
-#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS
+#elif SANITIZER_GO && SANITIZER_WINDOWS
/* Go on windows
0000 0000 1000 - 0000 1000 0000: executable
@@ -334,7 +362,7 @@ enum MappingType {
template<typename Mapping, int Type>
uptr MappingImpl(void) {
switch (Type) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
# ifdef TSAN_MID_APP_RANGE
@@ -362,11 +390,13 @@ uptr MappingImpl(void) {
template<int Type>
uptr MappingArchImpl(void) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return MappingImpl<Mapping39, Type>();
- else
- return MappingImpl<Mapping42, Type>();
+ switch (vmaSize) {
+ case 39: return MappingImpl<Mapping39, Type>();
+ case 42: return MappingImpl<Mapping42, Type>();
+ case 48: return MappingImpl<Mapping48, Type>();
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return MappingImpl<Mapping44, Type>();
@@ -378,7 +408,7 @@ uptr MappingArchImpl(void) {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
ALWAYS_INLINE
uptr LoAppMemBeg(void) {
return MappingArchImpl<MAPPING_LO_APP_BEG>();
@@ -440,7 +470,7 @@ bool GetUserRegion(int i, uptr *start, uptr *end) {
switch (i) {
default:
return false;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
case 0:
*start = LoAppMemBeg();
*end = LoAppMemEnd();
@@ -498,7 +528,7 @@ uptr TraceMemEnd(void) {
template<typename Mapping>
bool IsAppMemImpl(uptr mem) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
# ifdef TSAN_MID_APP_RANGE
(mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
@@ -513,11 +543,13 @@ bool IsAppMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return IsAppMemImpl<Mapping39>(mem);
- else
- return IsAppMemImpl<Mapping42>(mem);
+ switch (vmaSize) {
+ case 39: return IsAppMemImpl<Mapping39>(mem);
+ case 42: return IsAppMemImpl<Mapping42>(mem);
+ case 48: return IsAppMemImpl<Mapping48>(mem);
+ }
DCHECK(0);
+ return false;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return IsAppMemImpl<Mapping44>(mem);
@@ -538,11 +570,13 @@ bool IsShadowMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return IsShadowMemImpl<Mapping39>(mem);
- else
- return IsShadowMemImpl<Mapping42>(mem);
+ switch (vmaSize) {
+ case 39: return IsShadowMemImpl<Mapping39>(mem);
+ case 42: return IsShadowMemImpl<Mapping42>(mem);
+ case 48: return IsShadowMemImpl<Mapping48>(mem);
+ }
DCHECK(0);
+ return false;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return IsShadowMemImpl<Mapping44>(mem);
@@ -563,11 +597,13 @@ bool IsMetaMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return IsMetaMemImpl<Mapping39>(mem);
- else
- return IsMetaMemImpl<Mapping42>(mem);
+ switch (vmaSize) {
+ case 39: return IsMetaMemImpl<Mapping39>(mem);
+ case 42: return IsMetaMemImpl<Mapping42>(mem);
+ case 48: return IsMetaMemImpl<Mapping48>(mem);
+ }
DCHECK(0);
+ return false;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return IsMetaMemImpl<Mapping44>(mem);
@@ -583,7 +619,7 @@ bool IsMetaMem(uptr mem) {
template<typename Mapping>
uptr MemToShadowImpl(uptr x) {
DCHECK(IsAppMem(x));
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
^ Mapping::kAppMemXor) * kShadowCnt;
#else
@@ -598,11 +634,13 @@ uptr MemToShadowImpl(uptr x) {
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return MemToShadowImpl<Mapping39>(x);
- else
- return MemToShadowImpl<Mapping42>(x);
+ switch (vmaSize) {
+ case 39: return MemToShadowImpl<Mapping39>(x);
+ case 42: return MemToShadowImpl<Mapping42>(x);
+ case 48: return MemToShadowImpl<Mapping48>(x);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return MemToShadowImpl<Mapping44>(x);
@@ -618,24 +656,30 @@ uptr MemToShadow(uptr x) {
template<typename Mapping>
u32 *MemToMetaImpl(uptr x) {
DCHECK(IsAppMem(x));
-#ifndef SANITIZER_GO
- return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))
- ^ Mapping::kAppMemXor) / kMetaShadowCell * kMetaShadowSize)
- | Mapping::kMetaShadowBeg);
+#if !SANITIZER_GO
+ return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) /
+ kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
#else
+# ifndef SANITIZER_WINDOWS
return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
+# else
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) + Mapping::kMetaShadowBeg);
+# endif
#endif
}
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return MemToMetaImpl<Mapping39>(x);
- else
- return MemToMetaImpl<Mapping42>(x);
+ switch (vmaSize) {
+ case 39: return MemToMetaImpl<Mapping39>(x);
+ case 42: return MemToMetaImpl<Mapping42>(x);
+ case 48: return MemToMetaImpl<Mapping48>(x);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return MemToMetaImpl<Mapping44>(x);
@@ -651,18 +695,25 @@ u32 *MemToMeta(uptr x) {
template<typename Mapping>
uptr ShadowToMemImpl(uptr s) {
DCHECK(IsShadowMem(s));
-#ifndef SANITIZER_GO
- if (s >= MemToShadow(Mapping::kLoAppMemBeg)
- && s <= MemToShadow(Mapping::kLoAppMemEnd - 1))
- return (s / kShadowCnt) ^ Mapping::kAppMemXor;
+#if !SANITIZER_GO
+ // The shadow mapping is non-linear and we've lost some bits, so we don't have
+ // an easy way to restore the original app address. But the mapping is a
+ // bijection, so we try to restore the address as belonging to low/mid/high
+ // range consecutively and see if shadow->app->shadow mapping gives us the
+ // same address.
+ uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor;
+ if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
# ifdef TSAN_MID_APP_RANGE
- if (s >= MemToShadow(Mapping::kMidAppMemBeg)
- && s <= MemToShadow(Mapping::kMidAppMemEnd - 1))
- return ((s / kShadowCnt) ^ Mapping::kAppMemXor) + Mapping::kMidShadowOff;
+ p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) +
+ (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk);
+ if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
# endif
- else
- return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
-#else
+ return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
+#else // #if !SANITIZER_GO
# ifndef SANITIZER_WINDOWS
return (s & ~Mapping::kShadowBeg) / kShadowCnt;
# else
@@ -674,11 +725,13 @@ uptr ShadowToMemImpl(uptr s) {
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return ShadowToMemImpl<Mapping39>(s);
- else
- return ShadowToMemImpl<Mapping42>(s);
+ switch (vmaSize) {
+ case 39: return ShadowToMemImpl<Mapping39>(s);
+ case 42: return ShadowToMemImpl<Mapping42>(s);
+ case 48: return ShadowToMemImpl<Mapping48>(s);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return ShadowToMemImpl<Mapping44>(s);
@@ -707,11 +760,13 @@ uptr GetThreadTraceImpl(int tid) {
ALWAYS_INLINE
uptr GetThreadTrace(int tid) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return GetThreadTraceImpl<Mapping39>(tid);
- else
- return GetThreadTraceImpl<Mapping42>(tid);
+ switch (vmaSize) {
+ case 39: return GetThreadTraceImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceImpl<Mapping48>(tid);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return GetThreadTraceImpl<Mapping44>(tid);
@@ -735,11 +790,13 @@ uptr GetThreadTraceHeaderImpl(int tid) {
ALWAYS_INLINE
uptr GetThreadTraceHeader(int tid) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return GetThreadTraceHeaderImpl<Mapping39>(tid);
- else
- return GetThreadTraceHeaderImpl<Mapping42>(tid);
+ switch (vmaSize) {
+ case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return GetThreadTraceHeaderImpl<Mapping44>(tid);
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index d7182fdc1a4d..3313288a728a 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -38,6 +38,7 @@
#include <sys/mman.h>
#if SANITIZER_LINUX
#include <sys/personality.h>
+#include <setjmp.h>
#endif
#include <sys/syscall.h>
#include <sys/socket.h>
@@ -67,6 +68,10 @@ extern "C" void *__libc_stack_end;
void *__libc_stack_end = 0;
#endif
+#if SANITIZER_LINUX && defined(__aarch64__)
+void InitializeGuardPtr() __attribute__((visibility("hidden")));
+#endif
+
namespace __tsan {
#ifdef TSAN_RUNTIME_VMA
@@ -93,7 +98,7 @@ void FillProfileCallback(uptr p, uptr rss, bool file,
mem[MemShadow] += rss;
else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
mem[MemMeta] += rss;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
else if (p >= HeapMemBeg() && p < HeapMemEnd())
mem[MemHeap] += rss;
else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
@@ -129,7 +134,7 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- FlushUnneededShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg());
+ ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
}
#endif
@@ -139,7 +144,7 @@ void FlushShadowMemory() {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
@@ -201,16 +206,16 @@ void InitializeShadowMemoryPlatform() {
MapRodata();
}
-#endif // #ifndef SANITIZER_GO
+#endif // #if !SANITIZER_GO
void InitializePlatformEarly() {
#ifdef TSAN_RUNTIME_VMA
vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
#if defined(__aarch64__)
- if (vmaSize != 39 && vmaSize != 42) {
+ if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
- Printf("FATAL: Found %d - Supported 39 and 42\n", vmaSize);
+ Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize);
Die();
}
#elif defined(__powerpc64__)
@@ -229,7 +234,7 @@ void InitializePlatform() {
// Go maps shadow memory lazily and works fine with limited address space.
// Unlimited stack is not a problem as well, because the executable
// is not compiled with -pie.
- if (kCppMode) {
+ if (!SANITIZER_GO) {
bool reexec = false;
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
@@ -264,18 +269,20 @@ void InitializePlatform() {
CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
reexec = true;
}
+ // Initialize the guard pointer used in {sig}{set,long}jump.
+ InitializeGuardPtr();
#endif
if (reexec)
ReExec();
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
CheckAndProtect();
InitTlsSize();
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Extract file descriptors passed to glibc internal __res_iclose function.
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
@@ -328,11 +335,11 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
}
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ReplaceSystemMalloc() { }
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
#if SANITIZER_ANDROID
#if defined(__aarch64__)
@@ -393,7 +400,7 @@ void cur_thread_finalize() {
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
}
#endif // SANITIZER_ANDROID
-#endif // ifndef SANITIZER_GO
+#endif // if !SANITIZER_GO
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index 0cc02ab87a3e..25dd241d826f 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -20,10 +20,12 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_flags.h"
+#include <mach/mach.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
@@ -42,7 +44,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
void *val = (void *)atomic_load_relaxed(a);
@@ -100,17 +102,83 @@ void cur_thread_finalize() {
}
#endif
-uptr GetShadowMemoryConsumption() {
- return 0;
+void FlushShadowMemory() {
}
-void FlushShadowMemory() {
+static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
+ vm_address_t address = start;
+ vm_address_t end_address = end;
+ uptr resident_pages = 0;
+ uptr dirty_pages = 0;
+ while (address < end_address) {
+ vm_size_t vm_region_size;
+ mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
+ vm_region_extended_info_data_t vm_region_info;
+ mach_port_t object_name;
+ kern_return_t ret = vm_region_64(
+ mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO,
+ (vm_region_info_t)&vm_region_info, &count, &object_name);
+ if (ret != KERN_SUCCESS) break;
+
+ resident_pages += vm_region_info.pages_resident;
+ dirty_pages += vm_region_info.pages_dirtied;
+
+ address += vm_region_size;
+ }
+ *res = resident_pages * GetPageSizeCached();
+ *dirty = dirty_pages * GetPageSizeCached();
}
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+ uptr shadow_res, shadow_dirty;
+ uptr meta_res, meta_dirty;
+ uptr trace_res, trace_dirty;
+ RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
+ RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
+ RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
+
+#if !SANITIZER_GO
+ uptr low_res, low_dirty;
+ uptr high_res, high_dirty;
+ uptr heap_res, heap_dirty;
+ RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty);
+ RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty);
+ RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
+#else // !SANITIZER_GO
+ uptr app_res, app_dirty;
+ RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
+#endif
+
+ StackDepotStats *stacks = StackDepotGetStats();
+ internal_snprintf(buf, buf_size,
+ "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#if !SANITIZER_GO
+ "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#else // !SANITIZER_GO
+ "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#endif
+ "stacks: %ld unique IDs, %ld kB allocated\n"
+ "threads: %ld total, %ld live\n"
+ "------------------------------\n",
+ ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
+ MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
+ TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
+#if !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
+ HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
+ HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
+#else // !SANITIZER_GO
+ AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
+#endif
+ stacks->n_uniq_ids, stacks->allocated / 1024,
+ nthread, nlive);
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void InitializeShadowMemoryPlatform() { }
// On OS X, GCD worker threads are created without a call to pthread_create. We
@@ -160,7 +228,7 @@ void InitializePlatformEarly() {
void InitializePlatform() {
DisableCoreDumperIfNecessary();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
CheckAndProtect();
CHECK_EQ(main_thread_identity, 0);
@@ -171,7 +239,7 @@ void InitializePlatform() {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
diff --git a/lib/tsan/rtl/tsan_platform_posix.cc b/lib/tsan/rtl/tsan_platform_posix.cc
index 805ce1be4bde..0732c83d689d 100644
--- a/lib/tsan/rtl/tsan_platform_posix.cc
+++ b/lib/tsan/rtl/tsan_platform_posix.cc
@@ -23,7 +23,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
uptr shadow =
diff --git a/lib/tsan/rtl/tsan_platform_windows.cc b/lib/tsan/rtl/tsan_platform_windows.cc
index c6d5058d96fc..08aa588a4c89 100644
--- a/lib/tsan/rtl/tsan_platform_windows.cc
+++ b/lib/tsan/rtl/tsan_platform_windows.cc
@@ -21,10 +21,6 @@
namespace __tsan {
-uptr GetShadowMemoryConsumption() {
- return 0;
-}
-
void FlushShadowMemory() {
}
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index 93604946ac38..156876e5c22a 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -71,7 +71,7 @@ ReportDesc::~ReportDesc() {
// FIXME(dvyukov): it must be leaking a lot of memory.
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
@@ -361,7 +361,7 @@ void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
}
-#else // #ifndef SANITIZER_GO
+#else // #if !SANITIZER_GO
const int kMainThreadId = 1;
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 629871ef8f7a..804f3cf64ee8 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -44,7 +44,8 @@ extern "C" void __tsan_resume() {
namespace __tsan {
-#if !defined(SANITIZER_GO) && !SANITIZER_MAC
+#if !SANITIZER_GO && !SANITIZER_MAC
+__attribute__((tls_model("initial-exec")))
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
@@ -86,7 +87,7 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
return new(mem) ThreadContext(tid);
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static const u32 kThreadQuarantineSize = 16;
#else
static const u32 kThreadQuarantineSize = 64;
@@ -117,7 +118,7 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
// , ignore_reads_and_writes()
// , ignore_interceptors()
, clock(tid, reuse_count)
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
, jmp_bufs(MBlockJmpBuf)
#endif
, tid(tid)
@@ -126,13 +127,13 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, stk_size(stk_size)
, tls_addr(tls_addr)
, tls_size(tls_size)
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
, last_sleep_clock(tid)
#endif
{
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
uptr n_threads;
uptr n_running_threads;
@@ -233,16 +234,17 @@ static void StopBackgroundThread() {
#endif
void DontNeedShadowFor(uptr addr, uptr size) {
- uptr shadow_beg = MemToShadow(addr);
- uptr shadow_end = MemToShadow(addr + size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+ ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
}
void MapShadow(uptr addr, uptr size) {
// Global data is not 64K aligned, but there are no adjacent mappings,
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier, "shadow");
+ const uptr kPageSize = GetPageSizeCached();
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
+ MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow");
// Meta shadow is 2:1, so tread carefully.
static bool data_mapped = false;
@@ -287,10 +289,15 @@ void MapThreadTrace(uptr addr, uptr size, const char *name) {
static void CheckShadowMapping() {
uptr beg, end;
for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
+ // Skip cases for empty regions (heap definition for architectures that
+ // do not use 64-bit allocator).
+ if (beg == end)
+ continue;
VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ uptr prev = 0;
for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
- for (int x = -1; x <= 1; x++) {
- const uptr p = p0 + x;
+ for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
+ const uptr p = RoundDown(p0 + x, kShadowCell);
if (p < beg || p >= end)
continue;
const uptr s = MemToShadow(p);
@@ -298,8 +305,18 @@ static void CheckShadowMapping() {
VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
CHECK(IsAppMem(p));
CHECK(IsShadowMem(s));
- CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
+ CHECK_EQ(p, ShadowToMem(s));
CHECK(IsMetaMem(m));
+ if (prev) {
+ // Ensure that shadow and meta mappings are linear within a single
+ // user range. Lots of code that processes memory ranges assumes it.
+ const uptr prev_s = MemToShadow(prev);
+ const uptr prev_m = (uptr)MemToMeta(prev);
+ CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
+ CHECK_EQ((m - prev_m) / kMetaShadowSize,
+ (p - prev) / kMetaShadowCell);
+ }
+ prev = p;
}
}
}
@@ -318,12 +335,12 @@ void Initialize(ThreadState *thr) {
SetCheckFailedCallback(TsanCheckFailed);
ctx = new(ctx_placeholder) Context;
- const char *options = GetEnv(kTsanOptionsEnv);
+ const char *options = GetEnv(SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS");
CacheBinaryName();
InitializeFlags(&ctx->flags, options);
AvoidCVE_2016_2143();
InitializePlatformEarly();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
@@ -339,14 +356,14 @@ void Initialize(ThreadState *thr) {
InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
InitializeShadowMemory();
InitializeAllocatorLate();
#endif
// Setup correct file descriptor for error reports.
__sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
InitializeLibIgnore();
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
// On MIPS, TSan initialization is run before
@@ -370,7 +387,7 @@ void Initialize(ThreadState *thr) {
#endif
ctx->initialized = true;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Symbolizer::LateInitialize();
#endif
@@ -396,7 +413,7 @@ int Finalize(ThreadState *thr) {
CommonSanitizerReportMutex.Unlock();
ctx->report_mtx.Unlock();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (Verbosity()) AllocatorPrintStats();
#endif
@@ -404,7 +421,7 @@ int Finalize(ThreadState *thr) {
if (ctx->nreported) {
failed = true;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
#else
Printf("Found %d data race(s)\n", ctx->nreported);
@@ -419,7 +436,7 @@ int Finalize(ThreadState *thr) {
if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (flags()->print_benign)
PrintMatchedBenignRaces();
#endif
@@ -434,7 +451,7 @@ int Finalize(ThreadState *thr) {
return failed ? common_flags()->exitcode : 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
@@ -467,7 +484,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc) {
}
#endif
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
NOINLINE
void GrowShadowStack(ThreadState *thr) {
const int sz = thr->shadow_stack_end - thr->shadow_stack;
@@ -486,7 +503,7 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (!thr->is_inited) // May happen during bootstrap.
return 0;
if (pc != 0) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -532,7 +549,7 @@ uptr TraceParts() {
return TraceSize() / kTracePartSize;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
}
@@ -565,7 +582,7 @@ void HandleRace(ThreadState *thr, u64 *shadow_mem,
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
thr->racy_shadow_addr = shadow_mem;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
HACKY_CALL(__tsan_report_race);
#else
ReportRace(thr);
@@ -762,7 +779,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
#endif
- if (kCppMode && *shadow_mem == kShadowRodata) {
+ if (!SANITIZER_GO && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
StatInc(thr, StatMop);
@@ -849,7 +866,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do it only for C/C++.
- if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
+ if (SANITIZER_GO || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
@@ -935,7 +952,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -955,7 +972,7 @@ void FuncExit(ThreadState *thr) {
}
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#endif
thr->shadow_stack_pos--;
@@ -966,7 +983,7 @@ void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -978,17 +995,25 @@ void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
CHECK_GE(thr->ignore_reads_and_writes, 0);
if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
thr->mop_ignore_set.Reset();
#endif
}
}
+#if !SANITIZER_GO
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __tsan_testonly_shadow_stack_current_size() {
+ ThreadState *thr = cur_thread();
+ return thr->shadow_stack_pos - thr->shadow_stack;
+}
+#endif
+
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -998,7 +1023,7 @@ void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
thr->ignore_sync--;
CHECK_GE(thr->ignore_sync, 0);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (thr->ignore_sync == 0)
thr->sync_ignore_set.Reset();
#endif
@@ -1022,7 +1047,7 @@ void build_consistency_nostats() {}
} // namespace __tsan
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
#include "tsan_interface_inl.h"
#endif
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index ff69015660b6..7fcb9d48e038 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -52,7 +52,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
struct MapUnmapCallback;
#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
static const uptr kAllocatorSpace = 0;
@@ -66,9 +66,15 @@ typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
MapUnmapCallback> PrimaryAllocator;
#else
-typedef SizeClassAllocator64<Mapping::kHeapMemBeg,
- Mapping::kHeapMemEnd - Mapping::kHeapMemBeg, 0,
- DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
+ static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
+ static const uptr kMetadataSize = 0;
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
@@ -335,7 +341,7 @@ struct JmpBuf {
// A ThreadState must be wired with a Processor to handle events.
struct Processor {
ThreadState *thr; // currently wired thread, or nullptr
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
AllocatorCache alloc_cache;
InternalAllocatorCache internal_alloc_cache;
#endif
@@ -345,7 +351,7 @@ struct Processor {
DDPhysicalThread *dd_pt;
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// ScopedGlobalProcessor temporary setups a global processor for the current
// thread, if it does not have one. Intended for interceptors that can run
// at the very thread end, when we already destroyed the thread processor.
@@ -376,7 +382,7 @@ struct ThreadState {
int ignore_reads_and_writes;
int ignore_sync;
// Go does not support ignores.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
IgnoreSet mop_ignore_set;
IgnoreSet sync_ignore_set;
#endif
@@ -389,7 +395,7 @@ struct ThreadState {
u64 racy_state[2];
MutexSet mset;
ThreadClock clock;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Vector<JmpBuf> jmp_bufs;
int ignore_interceptors;
#endif
@@ -417,7 +423,7 @@ struct ThreadState {
// Current wired Processor, or nullptr. Required to handle any events.
Processor *proc1;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Processor *proc() { return proc1; }
#else
Processor *proc();
@@ -426,7 +432,7 @@ struct ThreadState {
atomic_uintptr_t in_signal_handler;
ThreadSignalContext *signal_ctx;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
#endif
@@ -443,7 +449,7 @@ struct ThreadState {
uptr tls_addr, uptr tls_size);
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
#if SANITIZER_MAC || SANITIZER_ANDROID
ThreadState *cur_thread();
void cur_thread_finalize();
@@ -541,13 +547,13 @@ extern Context *ctx; // The one and the only global runtime context.
struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptors() {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
cur_thread()->ignore_interceptors++;
#endif
}
~ScopedIgnoreInterceptors() {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
cur_thread()->ignore_interceptors--;
#endif
}
@@ -584,6 +590,7 @@ class ScopedReport {
void operator = (const ScopedReport&);
};
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
MutexSet *mset);
@@ -787,7 +794,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
HACKY_CALL(__tsan_trace_switch);
#else
TraceSwitch(thr);
@@ -799,7 +806,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
*evp = ev;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr ALWAYS_INLINE HeapEnd() {
return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
}
diff --git a/lib/tsan/rtl/tsan_rtl_aarch64.S b/lib/tsan/rtl/tsan_rtl_aarch64.S
index 9cea3cf02800..ef06f0444ae4 100644
--- a/lib/tsan/rtl/tsan_rtl_aarch64.S
+++ b/lib/tsan/rtl/tsan_rtl_aarch64.S
@@ -1,6 +1,62 @@
#include "sanitizer_common/sanitizer_asm.h"
+
+.section .bss
+.type __tsan_pointer_chk_guard, %object
+.size __tsan_pointer_chk_guard, 8
+__tsan_pointer_chk_guard:
+.zero 8
+
.section .text
+// GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp
+// functions) by XORing them with a random guard pointer. For AArch64 it is a
+// global variable rather than a TCB one (as for x86_64/powerpc) and althought
+// its value is exported by the loader, it lies within a private GLIBC
+// namespace (meaning it should be only used by GLIBC itself and the ABI is
+// not stable). So InitializeGuardPtr obtains the pointer guard value by
+// issuing a setjmp and checking the resulting pointers values against the
+// original ones.
+.hidden _Z18InitializeGuardPtrv
+.global _Z18InitializeGuardPtrv
+.type _Z18InitializeGuardPtrv, @function
+_Z18InitializeGuardPtrv:
+ CFI_STARTPROC
+ // Allocates a jmp_buf for the setjmp call.
+ stp x29, x30, [sp, -336]!
+ CFI_DEF_CFA_OFFSET (336)
+ CFI_OFFSET (29, -336)
+ CFI_OFFSET (30, -328)
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+ add x0, x29, 24
+
+ // Call libc setjmp that mangle the stack pointer value
+ adrp x1, :got:_ZN14__interception12real__setjmpE
+ ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
+ ldr x1, [x1]
+ blr x1
+
+ // glibc setjmp mangles both the frame pointer (FP, pc+4 on blr) and the
+ // stack pointer (SP). FP will be placed on ((uintptr*)jmp_buf)[11] and
+ // SP at ((uintptr*)jmp_buf)[13].
+ // The mangle operation is just 'value' xor 'pointer guard value' and
+ // if we know the original value (SP) and the expected one, we can derive
+ // the guard pointer value.
+ mov x0, sp
+
+ // Loads the mangled SP pointer.
+ ldr x1, [x29, 128]
+ eor x0, x0, x1
+ adrp x2, __tsan_pointer_chk_guard
+ str x0, [x2, #:lo12:__tsan_pointer_chk_guard]
+ ldp x29, x30, [sp], 336
+ CFI_RESTORE (30)
+ CFI_RESTORE (19)
+ CFI_DEF_CFA (31, 0)
+ ret
+ CFI_ENDPROC
+.size _Z18InitializeGuardPtrv, .-_Z18InitializeGuardPtrv
+
.hidden __tsan_setjmp
.comm _ZN14__interception11real_setjmpE,8,8
.type setjmp, @function
@@ -23,10 +79,9 @@ setjmp:
mov x19, x0
// SP pointer mangling (see glibc setjmp)
- adrp x2, :got:__pointer_chk_guard
- ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ adrp x2, __tsan_pointer_chk_guard
+ ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
- ldr x2, [x2]
eor x1, x2, x0
// call tsan interceptor
@@ -71,10 +126,9 @@ _setjmp:
mov x19, x0
// SP pointer mangling (see glibc setjmp)
- adrp x2, :got:__pointer_chk_guard
- ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ adrp x2, __tsan_pointer_chk_guard
+ ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
- ldr x2, [x2]
eor x1, x2, x0
// call tsan interceptor
@@ -121,10 +175,9 @@ sigsetjmp:
mov x19, x0
// SP pointer mangling (see glibc setjmp)
- adrp x2, :got:__pointer_chk_guard
- ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ adrp x2, __tsan_pointer_chk_guard
+ ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
- ldr x2, [x2]
eor x1, x2, x0
// call tsan interceptor
@@ -173,10 +226,9 @@ __sigsetjmp:
mov x19, x0
// SP pointer mangling (see glibc setjmp)
- adrp x2, :got:__pointer_chk_guard
- ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ adrp x2, __tsan_pointer_chk_guard
+ ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
- ldr x2, [x2]
eor x1, x2, x0
// call tsan interceptor
diff --git a/lib/tsan/rtl/tsan_rtl_mips64.S b/lib/tsan/rtl/tsan_rtl_mips64.S
new file mode 100644
index 000000000000..d0f7a3f9af98
--- /dev/null
+++ b/lib/tsan/rtl/tsan_rtl_mips64.S
@@ -0,0 +1,214 @@
+.section .text
+.set noreorder
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl setjmp
+.type setjmp, @function
+setjmp:
+
+ // save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc setjmp to t9
+ dla $t9,(_ZN14__interception11real_setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size setjmp, .-setjmp
+
+.hidden __tsan_setjmp
+.globl _setjmp
+.comm _ZN14__interception12real__setjmpE,8,8
+.type _setjmp, @function
+_setjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(_setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(_setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc _setjmp to t9
+ dla $t9,(_ZN14__interception12real__setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc _setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size _setjmp, .-_setjmp
+
+.hidden __tsan_setjmp
+.globl sigsetjmp
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.type sigsetjmp, @function
+sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc sigsetjmp to t9
+ dla $t9,(_ZN14__interception14real_sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size sigsetjmp, .-sigsetjmp
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl __sigsetjmp
+.type __sigsetjmp, @function
+__sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(__sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(__sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer to libc __sigsetjmp in t9
+ dla $t9,(_ZN14__interception16real___sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc __sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size __sigsetjmp, .-__sigsetjmp
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 1806acf063bc..f3b51c30faff 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -50,7 +50,7 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
uptr addr, u64 mid) {
// In Go, these misuses are either impossible, or detected by std lib,
// or false positives (e.g. unlock in a different thread).
- if (kGoMode)
+ if (SANITIZER_GO)
return;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(typ);
@@ -76,7 +76,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
s->is_rw = rw;
s->is_recursive = recursive;
s->is_linker_init = linker_init;
- if (kCppMode && s->creation_stack_id == 0)
+ if (!SANITIZER_GO && s->creation_stack_id == 0)
s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}
@@ -195,7 +195,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
bool report_bad_unlock = false;
- if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
if (flags()->report_mutex_bugs && !s->is_broken) {
s->is_broken = true;
report_bad_unlock = true;
@@ -412,7 +412,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
diff --git a/lib/tsan/rtl/tsan_rtl_proc.cc b/lib/tsan/rtl/tsan_rtl_proc.cc
index 0c838a1388f5..efccdb590ab3 100644
--- a/lib/tsan/rtl/tsan_rtl_proc.cc
+++ b/lib/tsan/rtl/tsan_rtl_proc.cc
@@ -23,7 +23,7 @@ Processor *ProcCreate() {
internal_memset(mem, 0, sizeof(Processor));
Processor *proc = new(mem) Processor;
proc->thr = nullptr;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
AllocatorProcStart(proc);
#endif
if (common_flags()->detect_deadlocks)
@@ -33,7 +33,7 @@ Processor *ProcCreate() {
void ProcDestroy(Processor *proc) {
CHECK_EQ(proc->thr, nullptr);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
AllocatorProcFinish(proc);
#endif
ctx->clock_alloc.FlushCache(&proc->clock_cache);
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index 810119b93a7a..bc8944fbfb58 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -38,6 +38,10 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
// on the other hand there is no sense in processing interceptors
// since we are going to die soon.
ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_GO
+ cur_thread()->ignore_sync++;
+ cur_thread()->ignore_reads_and_writes++;
+#endif
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
@@ -71,7 +75,7 @@ static void StackStripMain(SymbolizedStack *frames) {
if (last_frame2 == 0)
return;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const char *last = last_frame->info.function;
const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
@@ -204,7 +208,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
rt->stack->suppressable = suppressable;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
int unique_id = *(int *)arg;
return tctx->unique_id == (u32)unique_id;
@@ -249,7 +253,7 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
#endif
void ScopedReport::AddThread(int unique_tid, bool suppressable) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
AddThread(tctx, suppressable);
#endif
@@ -305,7 +309,7 @@ void ScopedReport::AddDeadMutex(u64 id) {
void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
@@ -355,7 +359,7 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
}
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ScopedReport::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
@@ -660,7 +664,7 @@ void ReportRace(ThreadState *thr) {
rep.AddLocation(addr_min, addr_max - addr_min);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
{ // NOLINT
Shadow s(thr->racy_state[1]);
if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
@@ -689,7 +693,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
ALWAYS_INLINE
void PrintCurrentStackSlow(uptr pc) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
BufferedStackTrace *ptrace =
new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
BufferedStackTrace();
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index ab8f3c38a960..5b17dc60bcbe 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -30,7 +30,7 @@ ThreadContext::ThreadContext(int tid)
, epoch1() {
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
@@ -68,8 +68,9 @@ void ThreadContext::OnCreated(void *arg) {
void ThreadContext::OnReset() {
CHECK_EQ(sync.size(), 0);
- FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
- //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
+ uptr trace_p = GetThreadTrace(tid);
+ ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
+ //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
}
void ThreadContext::OnDetached(void *arg) {
@@ -94,7 +95,7 @@ void ThreadContext::OnStarted(void *arg) {
epoch1 = (u64)-1;
new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
@@ -125,6 +126,12 @@ void ThreadContext::OnStarted(void *arg) {
}
void ThreadContext::OnFinished() {
+#if SANITIZER_GO
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = nullptr;
+ thr->shadow_stack_pos = nullptr;
+ thr->shadow_stack_end = nullptr;
+#endif
if (!detached) {
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
@@ -142,7 +149,7 @@ void ThreadContext::OnFinished() {
thr = 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
struct ThreadLeak {
ThreadContext *tctx;
int count;
@@ -164,7 +171,7 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
}
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == 0) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -196,7 +203,7 @@ static void ThreadCheckIgnore(ThreadState *thr) {}
void ThreadFinalize(ThreadState *thr) {
ThreadCheckIgnore(thr);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!flags()->report_thread_leaks)
return;
ThreadRegistryLock l(ctx->thread_registry);
@@ -234,7 +241,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
uptr stk_size = 0;
uptr tls_addr = 0;
uptr tls_size = 0;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
if (tid) {
@@ -265,7 +272,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
thr->ignore_interceptors++;
ThreadIgnoreBegin(thr, 0);
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index aea3cb978cb1..bfb64e0018fb 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -21,7 +21,7 @@
#include "tsan_mman.h"
#include "tsan_platform.h"
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Suppressions for true/false positives in standard libraries.
static const char *const std_suppressions =
// Libstdc++ 4.4 has data races in std::string.
@@ -54,7 +54,7 @@ void InitializeSuppressions() {
suppression_ctx = new (suppression_placeholder) // NOLINT
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
suppression_ctx->Parse(__tsan_default_suppressions());
suppression_ctx->Parse(std_suppressions);
#endif
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 58b26802b0e7..44c6a26a1e8e 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -30,7 +30,7 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
this->next = 0;
creation_stack_id = 0;
- if (kCppMode) // Go does not use them
+ if (!SANITIZER_GO) // Go does not use them
creation_stack_id = CurrentStackId(thr, pc);
if (common_flags()->detect_deadlocks)
DDMutexInit(thr, pc, this);
@@ -120,7 +120,7 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
- if (kGoMode) {
+ if (SANITIZER_GO) {
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do the optimization only for C/C++.
FreeRange(proc, p, sz);
diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h
index 2bc2f41fbe26..86e6bbd55bac 100644
--- a/lib/tsan/rtl/tsan_sync.h
+++ b/lib/tsan/rtl/tsan_sync.h
@@ -50,16 +50,16 @@ struct SyncVar {
void Reset(Processor *proc);
u64 GetId() const {
- // 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
- return GetLsb((u64)addr | (uid << 47), 61);
+ // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
+ return GetLsb((u64)addr | (uid << 48), 60);
}
bool CheckId(u64 uid) const {
CHECK_EQ(uid, GetLsb(uid, 14));
return GetLsb(this->uid, 14) == uid;
}
static uptr SplitId(u64 id, u64 *uid) {
- *uid = id >> 47;
- return (uptr)GetLsb(id, 47);
+ *uid = id >> 48;
+ return (uptr)GetLsb(id, 48);
}
};
diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h
index 2569c7e42a47..96a18ac4101a 100644
--- a/lib/tsan/rtl/tsan_trace.h
+++ b/lib/tsan/rtl/tsan_trace.h
@@ -42,7 +42,7 @@ enum EventType {
typedef u64 Event;
struct TraceHeader {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
BufferedStackTrace stack0; // Start stack for the trace.
#else
VarSizeStackTrace stack0;
@@ -55,7 +55,7 @@ struct TraceHeader {
struct Trace {
Mutex mtx;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Must be last to catch overflow as paging fault.
// Go shadow stack is dynamically allocated.
uptr shadow_stack[kShadowStackSize];
diff --git a/lib/tsan/tests/CMakeLists.txt b/lib/tsan/tests/CMakeLists.txt
index d1b1e9611a78..4587e47ba904 100644
--- a/lib/tsan/tests/CMakeLists.txt
+++ b/lib/tsan/tests/CMakeLists.txt
@@ -12,6 +12,10 @@ set(TSAN_UNITTEST_CFLAGS
-I${COMPILER_RT_SOURCE_DIR}/lib/tsan/rtl
-DGTEST_HAS_RTTI=0)
+if(APPLE)
+ list(APPEND TSAN_UNITTEST_CFLAGS ${DARWIN_osx_CFLAGS})
+endif()
+
set(TSAN_RTL_HEADERS)
foreach (header ${TSAN_HEADERS})
list(APPEND TSAN_RTL_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
@@ -78,7 +82,7 @@ macro(add_tsan_unittest testname)
add_compiler_rt_test(TsanUnitTests "${testname}-${arch}-Test"
OBJECTS ${TEST_OBJECTS}
DEPS ${TEST_DEPS}
- LINK_FLAGS ${TARGET_LINK_FLAGS}
+ LINK_FLAGS ${TARGET_LINK_FLAGS} ${DARWIN_osx_LINKFLAGS}
-lc++)
endif()
endforeach()
diff --git a/lib/tsan/tests/rtl/tsan_posix.cc b/lib/tsan/tests/rtl/tsan_posix.cc
index e1a61b5e43f1..9c0e013e5735 100644
--- a/lib/tsan/tests/rtl/tsan_posix.cc
+++ b/lib/tsan/tests/rtl/tsan_posix.cc
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "tsan_interface.h"
+#include "tsan_posix_util.h"
#include "tsan_test_util.h"
#include "gtest/gtest.h"
#include <pthread.h>
@@ -30,10 +31,10 @@ struct thread_key {
static void thread_secific_dtor(void *v) {
thread_key *k = (thread_key *)v;
- EXPECT_EQ(pthread_mutex_lock(k->mtx), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(k->mtx), 0);
(*k->cnt)++;
__tsan_write4(&k->cnt);
- EXPECT_EQ(pthread_mutex_unlock(k->mtx), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(k->mtx), 0);
if (k->val == 42) {
// Okay.
} else if (k->val == 43 || k->val == 44) {
@@ -55,22 +56,22 @@ TEST(Posix, ThreadSpecificDtors) {
pthread_key_t key;
EXPECT_EQ(pthread_key_create(&key, thread_secific_dtor), 0);
pthread_mutex_t mtx;
- EXPECT_EQ(pthread_mutex_init(&mtx, 0), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_init(&mtx, 0), 0);
pthread_t th[3];
thread_key k1 = thread_key(key, &mtx, 42, &cnt);
thread_key k2 = thread_key(key, &mtx, 43, &cnt);
thread_key k3 = thread_key(key, &mtx, 44, &cnt);
- EXPECT_EQ(pthread_create(&th[0], 0, dtors_thread, &k1), 0);
- EXPECT_EQ(pthread_create(&th[1], 0, dtors_thread, &k2), 0);
- EXPECT_EQ(pthread_join(th[0], 0), 0);
- EXPECT_EQ(pthread_create(&th[2], 0, dtors_thread, &k3), 0);
- EXPECT_EQ(pthread_join(th[1], 0), 0);
- EXPECT_EQ(pthread_join(th[2], 0), 0);
+ EXPECT_EQ(__interceptor_pthread_create(&th[0], 0, dtors_thread, &k1), 0);
+ EXPECT_EQ(__interceptor_pthread_create(&th[1], 0, dtors_thread, &k2), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th[0], 0), 0);
+ EXPECT_EQ(__interceptor_pthread_create(&th[2], 0, dtors_thread, &k3), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th[1], 0), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th[2], 0), 0);
EXPECT_EQ(pthread_key_delete(key), 0);
EXPECT_EQ(6, cnt);
}
-#ifndef __aarch64__
+#if !defined(__aarch64__) && !defined(__APPLE__)
static __thread int local_var;
static void *local_thread(void *p) {
@@ -81,10 +82,10 @@ static void *local_thread(void *p) {
const int kThreads = 4;
pthread_t th[kThreads];
for (int i = 0; i < kThreads; i++)
- EXPECT_EQ(pthread_create(&th[i], 0, local_thread,
+ EXPECT_EQ(__interceptor_pthread_create(&th[i], 0, local_thread,
(void*)((long)p - 1)), 0); // NOLINT
for (int i = 0; i < kThreads; i++)
- EXPECT_EQ(pthread_join(th[i], 0), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th[i], 0), 0);
return 0;
}
#endif
@@ -92,7 +93,9 @@ static void *local_thread(void *p) {
TEST(Posix, ThreadLocalAccesses) {
// The test is failing with high thread count for aarch64.
// FIXME: track down the issue and re-enable the test.
-#ifndef __aarch64__
+// On Darwin, we're running unit tests without interceptors and __thread is
+// using malloc and free, which causes false data race reports.
+#if !defined(__aarch64__) && !defined(__APPLE__)
local_thread((void*)2);
#endif
}
@@ -106,46 +109,46 @@ struct CondContext {
static void *cond_thread(void *p) {
CondContext &ctx = *static_cast<CondContext*>(p);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
EXPECT_EQ(ctx.data, 0);
ctx.data = 1;
- EXPECT_EQ(pthread_cond_signal(&ctx.c), 0);
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_signal(&ctx.c), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
while (ctx.data != 2)
- EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
ctx.data = 3;
EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
return 0;
}
TEST(Posix, CondBasic) {
CondContext ctx;
- EXPECT_EQ(pthread_mutex_init(&ctx.m, 0), 0);
- EXPECT_EQ(pthread_cond_init(&ctx.c, 0), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_init(&ctx.m, 0), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_init(&ctx.c, 0), 0);
ctx.data = 0;
pthread_t th;
- EXPECT_EQ(pthread_create(&th, 0, cond_thread, &ctx), 0);
+ EXPECT_EQ(__interceptor_pthread_create(&th, 0, cond_thread, &ctx), 0);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
while (ctx.data != 1)
- EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0);
ctx.data = 2;
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
while (ctx.data != 3)
- EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
- EXPECT_EQ(pthread_join(th, 0), 0);
- EXPECT_EQ(pthread_cond_destroy(&ctx.c), 0);
- EXPECT_EQ(pthread_mutex_destroy(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th, 0), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_destroy(&ctx.c), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_destroy(&ctx.m), 0);
}
diff --git a/lib/tsan/tests/rtl/tsan_posix_util.h b/lib/tsan/tests/rtl/tsan_posix_util.h
new file mode 100644
index 000000000000..340693ebb8a0
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_posix_util.h
@@ -0,0 +1,77 @@
+//===-- tsan_posix_util.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Test POSIX utils.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_POSIX_UTIL_H
+#define TSAN_POSIX_UTIL_H
+
+#include <pthread.h>
+
+#ifdef __APPLE__
+#define __interceptor_memcpy wrap_memcpy
+#define __interceptor_memset wrap_memset
+#define __interceptor_pthread_create wrap_pthread_create
+#define __interceptor_pthread_join wrap_pthread_join
+#define __interceptor_pthread_detach wrap_pthread_detach
+#define __interceptor_pthread_mutex_init wrap_pthread_mutex_init
+#define __interceptor_pthread_mutex_lock wrap_pthread_mutex_lock
+#define __interceptor_pthread_mutex_unlock wrap_pthread_mutex_unlock
+#define __interceptor_pthread_mutex_destroy wrap_pthread_mutex_destroy
+#define __interceptor_pthread_mutex_trylock wrap_pthread_mutex_trylock
+#define __interceptor_pthread_rwlock_init wrap_pthread_rwlock_init
+#define __interceptor_pthread_rwlock_destroy wrap_pthread_rwlock_destroy
+#define __interceptor_pthread_rwlock_trywrlock wrap_pthread_rwlock_trywrlock
+#define __interceptor_pthread_rwlock_wrlock wrap_pthread_rwlock_wrlock
+#define __interceptor_pthread_rwlock_unlock wrap_pthread_rwlock_unlock
+#define __interceptor_pthread_rwlock_rdlock wrap_pthread_rwlock_rdlock
+#define __interceptor_pthread_rwlock_tryrdlock wrap_pthread_rwlock_tryrdlock
+#define __interceptor_pthread_cond_init wrap_pthread_cond_init
+#define __interceptor_pthread_cond_signal wrap_pthread_cond_signal
+#define __interceptor_pthread_cond_broadcast wrap_pthread_cond_broadcast
+#define __interceptor_pthread_cond_wait wrap_pthread_cond_wait
+#define __interceptor_pthread_cond_destroy wrap_pthread_cond_destroy
+#endif
+
+extern "C" void *__interceptor_memcpy(void *, const void *, uptr);
+extern "C" void *__interceptor_memset(void *, int, uptr);
+extern "C" int __interceptor_pthread_create(pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *),
+ void *arg);
+extern "C" int __interceptor_pthread_join(pthread_t thread, void **value_ptr);
+extern "C" int __interceptor_pthread_detach(pthread_t thread);
+
+extern "C" int __interceptor_pthread_mutex_init(
+ pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
+extern "C" int __interceptor_pthread_mutex_lock(pthread_mutex_t *mutex);
+extern "C" int __interceptor_pthread_mutex_unlock(pthread_mutex_t *mutex);
+extern "C" int __interceptor_pthread_mutex_destroy(pthread_mutex_t *mutex);
+extern "C" int __interceptor_pthread_mutex_trylock(pthread_mutex_t *mutex);
+
+extern "C" int __interceptor_pthread_rwlock_init(
+ pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
+extern "C" int __interceptor_pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
+
+extern "C" int __interceptor_pthread_cond_init(pthread_cond_t *cond,
+ const pthread_condattr_t *attr);
+extern "C" int __interceptor_pthread_cond_signal(pthread_cond_t *cond);
+extern "C" int __interceptor_pthread_cond_broadcast(pthread_cond_t *cond);
+extern "C" int __interceptor_pthread_cond_wait(pthread_cond_t *cond,
+ pthread_mutex_t *mutex);
+extern "C" int __interceptor_pthread_cond_destroy(pthread_cond_t *cond);
+
+#endif // #ifndef TSAN_POSIX_UTIL_H
diff --git a/lib/tsan/tests/rtl/tsan_test_util_posix.cc b/lib/tsan/tests/rtl/tsan_test_util_posix.cc
index c8be088d266f..834a271aa8c0 100644
--- a/lib/tsan/tests/rtl/tsan_test_util_posix.cc
+++ b/lib/tsan/tests/rtl/tsan_test_util_posix.cc
@@ -14,6 +14,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "tsan_interface.h"
+#include "tsan_posix_util.h"
#include "tsan_test_util.h"
#include "tsan_report.h"
@@ -33,52 +34,6 @@ static __thread bool expect_report;
static __thread bool expect_report_reported;
static __thread ReportType expect_report_type;
-#ifdef __APPLE__
-#define __interceptor_memcpy wrap_memcpy
-#define __interceptor_memset wrap_memset
-#define __interceptor_pthread_create wrap_pthread_create
-#define __interceptor_pthread_join wrap_pthread_join
-#define __interceptor_pthread_detach wrap_pthread_detach
-#define __interceptor_pthread_mutex_init wrap_pthread_mutex_init
-#define __interceptor_pthread_mutex_lock wrap_pthread_mutex_lock
-#define __interceptor_pthread_mutex_unlock wrap_pthread_mutex_unlock
-#define __interceptor_pthread_mutex_destroy wrap_pthread_mutex_destroy
-#define __interceptor_pthread_mutex_trylock wrap_pthread_mutex_trylock
-#define __interceptor_pthread_rwlock_init wrap_pthread_rwlock_init
-#define __interceptor_pthread_rwlock_destroy wrap_pthread_rwlock_destroy
-#define __interceptor_pthread_rwlock_trywrlock wrap_pthread_rwlock_trywrlock
-#define __interceptor_pthread_rwlock_wrlock wrap_pthread_rwlock_wrlock
-#define __interceptor_pthread_rwlock_unlock wrap_pthread_rwlock_unlock
-#define __interceptor_pthread_rwlock_rdlock wrap_pthread_rwlock_rdlock
-#define __interceptor_pthread_rwlock_tryrdlock wrap_pthread_rwlock_tryrdlock
-#endif
-
-extern "C" void *__interceptor_memcpy(void *, const void *, uptr);
-extern "C" void *__interceptor_memset(void *, int, uptr);
-extern "C" int __interceptor_pthread_create(pthread_t *thread,
- const pthread_attr_t *attr,
- void *(*start_routine)(void *),
- void *arg);
-extern "C" int __interceptor_pthread_join(pthread_t thread, void **value_ptr);
-extern "C" int __interceptor_pthread_detach(pthread_t thread);
-
-extern "C" int __interceptor_pthread_mutex_init(
- pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
-extern "C" int __interceptor_pthread_mutex_lock(pthread_mutex_t *mutex);
-extern "C" int __interceptor_pthread_mutex_unlock(pthread_mutex_t *mutex);
-extern "C" int __interceptor_pthread_mutex_destroy(pthread_mutex_t *mutex);
-extern "C" int __interceptor_pthread_mutex_trylock(pthread_mutex_t *mutex);
-
-extern "C" int __interceptor_pthread_rwlock_init(
- pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
-extern "C" int __interceptor_pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
-
-
static void *BeforeInitThread(void *param) {
(void)param;
return 0;
@@ -105,11 +60,11 @@ bool OnReport(const ReportDesc *rep, bool suppressed) {
if (rep->typ != expect_report_type) {
printf("Expected report of type %d, got type %d\n",
(int)expect_report_type, (int)rep->typ);
- EXPECT_FALSE("Wrong report type");
+ EXPECT_TRUE(false) << "Wrong report type";
return false;
}
} else {
- EXPECT_FALSE("Unexpected report");
+ EXPECT_TRUE(false) << "Unexpected report";
return false;
}
expect_report_reported = true;
@@ -368,7 +323,7 @@ void ScopedThread::Impl::HandleEvent(Event *ev) {
}
if (expect_report && !expect_report_reported) {
printf("Missed expected report of type %d\n", (int)ev->report_type);
- EXPECT_FALSE("Missed expected race");
+ EXPECT_TRUE(false) << "Missed expected race";
}
expect_report = false;
}
diff --git a/lib/tsan/tests/unit/tsan_mman_test.cc b/lib/tsan/tests/unit/tsan_mman_test.cc
index 609141c59294..60dea3d43240 100644
--- a/lib/tsan/tests/unit/tsan_mman_test.cc
+++ b/lib/tsan/tests/unit/tsan_mman_test.cc
@@ -53,9 +53,9 @@ TEST(Mman, UserRealloc) {
uptr pc = 0;
{
void *p = user_realloc(thr, pc, 0, 0);
- // Strictly saying this is incorrect, realloc(NULL, N) is equivalent to
- // malloc(N), thus must return non-NULL pointer.
- EXPECT_EQ(p, (void*)0);
+ // Realloc(NULL, N) is equivalent to malloc(N), thus must return
+ // non-NULL pointer.
+ EXPECT_NE(p, (void*)0);
}
{
void *p = user_realloc(thr, pc, 0, 100);
@@ -68,7 +68,7 @@ TEST(Mman, UserRealloc) {
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
void *p2 = user_realloc(thr, pc, p, 0);
- EXPECT_EQ(p2, (void*)0);
+ EXPECT_NE(p2, (void*)0);
}
{
void *p = user_realloc(thr, pc, 0, 100);