diff options
Diffstat (limited to 'lib/tsan/rtl')
49 files changed, 3907 insertions, 1038 deletions
diff --git a/lib/tsan/rtl/CMakeLists.txt b/lib/tsan/rtl/CMakeLists.txt new file mode 100644 index 000000000000..d91e2e43ca4c --- /dev/null +++ b/lib/tsan/rtl/CMakeLists.txt @@ -0,0 +1,58 @@ +set(TSAN_SOURCES + tsan_clock.cc + tsan_flags.cc + tsan_fd.cc + tsan_interceptors.cc + tsan_interface_ann.cc + tsan_interface_atomic.cc + tsan_interface.cc + tsan_interface_java.cc + tsan_md5.cc + tsan_mman.cc + tsan_mutex.cc + tsan_mutexset.cc + tsan_report.cc + tsan_rtl.cc + tsan_rtl_mutex.cc + tsan_rtl_report.cc + tsan_rtl_thread.cc + tsan_stat.cc + tsan_suppressions.cc + tsan_symbolize.cc + tsan_sync.cc + ) + +if(APPLE) + list(APPEND TSAN_SOURCES tsan_platform_mac.cc) +elseif(UNIX) + # Assume Linux + list(APPEND TSAN_SOURCES + tsan_platform_linux.cc + tsan_symbolize_addr2line_linux.cc) +endif() + +set(TSAN_RUNTIME_LIBRARIES) +# TSan is currently supported on 64-bit Linux only. +if(CAN_TARGET_x86_64 AND UNIX AND NOT APPLE) + set(TSAN_ASM_SOURCES tsan_rtl_amd64.S) + # Pass ASM file directly to the C++ compiler. + set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES + LANGUAGE C + ) + add_library(clang_rt.tsan-x86_64 STATIC + ${TSAN_SOURCES} + ${TSAN_ASM_SOURCES} + $<TARGET_OBJECTS:RTInterception.x86_64> + $<TARGET_OBJECTS:RTSanitizerCommon.x86_64> + ) + set_target_compile_flags(clang_rt.tsan-x86_64 + ${TSAN_CFLAGS} ${TARGET_x86_64_CFLAGS} + ) + list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-x86_64) +endif() + +if(TSAN_RUNTIME_LIBRARIES) + set_property(TARGET ${TSAN_RUNTIME_LIBRARIES} APPEND PROPERTY + COMPILE_DEFINITIONS ${TSAN_COMMON_DEFINITIONS}) + add_clang_compiler_rt_libraries(${TSAN_RUNTIME_LIBRARIES}) +endif() diff --git a/lib/tsan/rtl/Makefile.mk b/lib/tsan/rtl/Makefile.mk index d5d6327b5435..a6a7fc8b86e8 100644 --- a/lib/tsan/rtl/Makefile.mk +++ b/lib/tsan/rtl/Makefile.mk @@ -18,6 +18,8 @@ Implementation := Generic # FIXME: use automatic dependencies? Dependencies := $(wildcard $(Dir)/*.h) +Dependencies += $(wildcard $(Dir)/../../interception/*.h) +Dependencies += $(wildcard $(Dir)/../../interception/mach_override/*.h) # Define a convenience variable for all the tsan functions. TsanFunctions += $(Sources:%.cc=%) $(AsmSources:%.S=%) diff --git a/lib/tsan/rtl/Makefile.old b/lib/tsan/rtl/Makefile.old index 9b79f576d4aa..f522ec6b47d7 100644 --- a/lib/tsan/rtl/Makefile.old +++ b/lib/tsan/rtl/Makefile.old @@ -1,12 +1,15 @@ -CXXFLAGS = -fPIE -g -Wall -Werror -fno-builtin -DTSAN_DEBUG=$(DEBUG) +CXXFLAGS = -fPIE -g -Wall -Werror -fno-builtin -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG) ifeq ($(DEBUG), 0) - CXXFLAGS += -O3 + CXXFLAGS += -O3 +endif +ifeq ($(CXX), clang++) + CXXFLAGS+= -Wgnu endif # For interception. FIXME: move interception one level higher. INTERCEPTION=../../interception COMMON=../../sanitizer_common -INCLUDES= -I../.. +INCLUDES= -I../.. -I../../../include EXTRA_CXXFLAGS=-fno-exceptions NO_SYSROOT=--sysroot=. CXXFLAGS+=$(EXTRA_CXXFLAGS) diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc index 32ed91dc2103..f8745ec3200c 100644 --- a/lib/tsan/rtl/tsan_clock.cc +++ b/lib/tsan/rtl/tsan_clock.cc @@ -105,13 +105,6 @@ void ThreadClock::acq_rel(SyncClock *dst) { release(dst); } -void ThreadClock::Disable(unsigned tid) { - u64 c0 = clk_[tid]; - for (uptr i = 0; i < kMaxTidInClock; i++) - clk_[i] = (u64)-1; - clk_[tid] = c0; -} - SyncClock::SyncClock() : clk_(MBlockClock) { } diff --git a/lib/tsan/rtl/tsan_clock.h b/lib/tsan/rtl/tsan_clock.h index 02ddb9abdb30..0ee93749b881 100644 --- a/lib/tsan/rtl/tsan_clock.h +++ b/lib/tsan/rtl/tsan_clock.h @@ -61,8 +61,6 @@ struct ThreadClock { nclk_ = tid + 1; } - void Disable(unsigned tid); - uptr size() const { return nclk_; } diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h index ca8f0aecc83a..e0c04733f0a3 100644 --- a/lib/tsan/rtl/tsan_defs.h +++ b/lib/tsan/rtl/tsan_defs.h @@ -24,31 +24,45 @@ namespace __tsan { +#ifdef TSAN_GO +const bool kGoMode = true; +const bool kCppMode = false; +const char *const kTsanOptionsEnv = "GORACE"; +#else +const bool kGoMode = false; +const bool kCppMode = true; +const char *const kTsanOptionsEnv = "TSAN_OPTIONS"; +#endif + const int kTidBits = 13; const unsigned kMaxTid = 1 << kTidBits; const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit. const int kClkBits = 43; #ifndef TSAN_GO -const int kShadowStackSize = 1024; +const int kShadowStackSize = 4 * 1024; +const int kTraceStackSize = 256; #endif #ifdef TSAN_SHADOW_COUNT # if TSAN_SHADOW_COUNT == 2 \ || TSAN_SHADOW_COUNT == 4 || TSAN_SHADOW_COUNT == 8 -const unsigned kShadowCnt = TSAN_SHADOW_COUNT; +const uptr kShadowCnt = TSAN_SHADOW_COUNT; # else # error "TSAN_SHADOW_COUNT must be one of 2,4,8" # endif #else // Count of shadow values in a shadow cell. -const unsigned kShadowCnt = 8; +const uptr kShadowCnt = 4; #endif // That many user bytes are mapped onto a single shadow cell. -const unsigned kShadowCell = 8; +const uptr kShadowCell = 8; // Size of a single shadow value (u64). -const unsigned kShadowSize = 8; +const uptr kShadowSize = 8; + +// Shadow memory is kShadowMultiplier times larger than user memory. +const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell; #if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS const bool kCollectStats = true; @@ -114,11 +128,23 @@ T max(T a, T b) { } template<typename T> -T RoundUp(T p, int align) { +T RoundUp(T p, u64 align) { DCHECK_EQ(align & (align - 1), 0); return (T)(((u64)p + align - 1) & ~(align - 1)); } +template<typename T> +T RoundDown(T p, u64 align) { + DCHECK_EQ(align & (align - 1), 0); + return (T)((u64)p & ~(align - 1)); +} + +// Zeroizes high part, returns 'bits' lsb bits. +template<typename T> +T GetLsb(T v, int bits) { + return (T)((u64)v & ((1ull << bits) - 1)); +} + struct MD5Hash { u64 hash[2]; bool operator==(const MD5Hash &other) const; @@ -133,6 +159,7 @@ struct ReportStack; class ReportDesc; class RegionAlloc; class StackTrace; +struct MBlock; } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_fd.cc b/lib/tsan/rtl/tsan_fd.cc new file mode 100644 index 000000000000..ef375a4d98f6 --- /dev/null +++ b/lib/tsan/rtl/tsan_fd.cc @@ -0,0 +1,265 @@ +//===-- tsan_fd.cc --------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "tsan_fd.h" +#include "tsan_rtl.h" +#include <sanitizer_common/sanitizer_atomic.h> + +namespace __tsan { + +const int kTableSizeL1 = 1024; +const int kTableSizeL2 = 1024; +const int kTableSize = kTableSizeL1 * kTableSizeL2; + +struct FdSync { + atomic_uint64_t rc; +}; + +struct FdDesc { + FdSync *sync; + int creation_tid; + u32 creation_stack; +}; + +struct FdContext { + atomic_uintptr_t tab[kTableSizeL1]; + // Addresses used for synchronization. + FdSync globsync; + FdSync filesync; + FdSync socksync; + u64 connectsync; +}; + +static FdContext fdctx; + +static FdSync *allocsync() { + FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync)); + atomic_store(&s->rc, 1, memory_order_relaxed); + return s; +} + +static FdSync *ref(FdSync *s) { + if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) + atomic_fetch_add(&s->rc, 1, memory_order_relaxed); + return s; +} + +static void unref(ThreadState *thr, uptr pc, FdSync *s) { + if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) { + if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) { + CHECK_NE(s, &fdctx.globsync); + CHECK_NE(s, &fdctx.filesync); + CHECK_NE(s, &fdctx.socksync); + SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s); + if (v) + DestroyAndFree(v); + internal_free(s); + } + } +} + +static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) { + CHECK_LT(fd, kTableSize); + atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2]; + uptr l1 = atomic_load(pl1, memory_order_consume); + if (l1 == 0) { + uptr size = kTableSizeL2 * sizeof(FdDesc); + void *p = internal_alloc(MBlockFD, size); + internal_memset(p, 0, size); + MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size); + if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel)) + l1 = (uptr)p; + else + internal_free(p); + } + return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT +} + +// pd must be already ref'ed. +static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) { + FdDesc *d = fddesc(thr, pc, fd); + // As a matter of fact, we don't intercept all close calls. + // See e.g. libc __res_iclose(). + if (d->sync) { + unref(thr, pc, d->sync); + d->sync = 0; + } + if (flags()->io_sync == 0) { + unref(thr, pc, s); + } else if (flags()->io_sync == 1) { + d->sync = s; + } else if (flags()->io_sync == 2) { + unref(thr, pc, s); + d->sync = &fdctx.globsync; + } + d->creation_tid = thr->tid; + d->creation_stack = CurrentStackId(thr, pc); + // To catch races between fd usage and open. + MemoryRangeImitateWrite(thr, pc, (uptr)d, 8); +} + +void FdInit() { + atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed); + atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed); + atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed); +} + +void FdOnFork(ThreadState *thr, uptr pc) { + // On fork() we need to reset all fd's, because the child is going + // close all them, and that will cause races between previous read/write + // and the close. + for (int l1 = 0; l1 < kTableSizeL1; l1++) { + FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed); + if (tab == 0) + break; + for (int l2 = 0; l2 < kTableSizeL2; l2++) { + FdDesc *d = &tab[l2]; + MemoryResetRange(thr, pc, (uptr)d, 8); + } + } +} + +bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) { + for (int l1 = 0; l1 < kTableSizeL1; l1++) { + FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed); + if (tab == 0) + break; + if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) { + int l2 = (addr - (uptr)tab) / sizeof(FdDesc); + FdDesc *d = &tab[l2]; + *fd = l1 * kTableSizeL1 + l2; + *tid = d->creation_tid; + *stack = d->creation_stack; + return true; + } + } + return false; +} + +void FdAcquire(ThreadState *thr, uptr pc, int fd) { + FdDesc *d = fddesc(thr, pc, fd); + FdSync *s = d->sync; + DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s); + MemoryRead8Byte(thr, pc, (uptr)d); + if (s) + Acquire(thr, pc, (uptr)s); +} + +void FdRelease(ThreadState *thr, uptr pc, int fd) { + FdDesc *d = fddesc(thr, pc, fd); + FdSync *s = d->sync; + DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s); + if (s) + Release(thr, pc, (uptr)s); + MemoryRead8Byte(thr, pc, (uptr)d); +} + +void FdAccess(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd); + FdDesc *d = fddesc(thr, pc, fd); + MemoryRead8Byte(thr, pc, (uptr)d); +} + +void FdClose(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdClose(%d)\n", thr->tid, fd); + FdDesc *d = fddesc(thr, pc, fd); + // To catch races between fd usage and close. + MemoryWrite8Byte(thr, pc, (uptr)d); + // We need to clear it, because if we do not intercept any call out there + // that creates fd, we will hit false postives. + MemoryResetRange(thr, pc, (uptr)d, 8); + unref(thr, pc, d->sync); + d->sync = 0; + d->creation_tid = 0; + d->creation_stack = 0; +} + +void FdFileCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd); + init(thr, pc, fd, &fdctx.filesync); +} + +void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) { + DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd); + // Ignore the case when user dups not yet connected socket. + FdDesc *od = fddesc(thr, pc, oldfd); + MemoryRead8Byte(thr, pc, (uptr)od); + FdClose(thr, pc, newfd); + init(thr, pc, newfd, ref(od->sync)); +} + +void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) { + DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd); + FdSync *s = allocsync(); + init(thr, pc, rfd, ref(s)); + init(thr, pc, wfd, ref(s)); + unref(thr, pc, s); +} + +void FdEventCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd); + init(thr, pc, fd, allocsync()); +} + +void FdSignalCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd); + init(thr, pc, fd, 0); +} + +void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd); + init(thr, pc, fd, 0); +} + +void FdPollCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd); + init(thr, pc, fd, allocsync()); +} + +void FdSocketCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd); + // It can be a UDP socket. + init(thr, pc, fd, &fdctx.socksync); +} + +void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) { + DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd); + // Synchronize connect->accept. + Acquire(thr, pc, (uptr)&fdctx.connectsync); + init(thr, pc, newfd, &fdctx.socksync); +} + +void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd); + // Synchronize connect->accept. + Release(thr, pc, (uptr)&fdctx.connectsync); +} + +void FdSocketConnect(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd); + init(thr, pc, fd, &fdctx.socksync); +} + +uptr File2addr(char *path) { + (void)path; + static u64 addr; + return (uptr)&addr; +} + +uptr Dir2addr(char *path) { + (void)path; + static u64 addr; + return (uptr)&addr; +} + +} // namespace __tsan diff --git a/lib/tsan/rtl/tsan_fd.h b/lib/tsan/rtl/tsan_fd.h new file mode 100644 index 000000000000..979198e2e74d --- /dev/null +++ b/lib/tsan/rtl/tsan_fd.h @@ -0,0 +1,65 @@ +//===-- tsan_fd.h -----------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// This file handles synchronization via IO. +// People use IO for synchronization along the lines of: +// +// int X; +// int client_socket; // initialized elsewhere +// int server_socket; // initialized elsewhere +// +// Thread 1: +// X = 42; +// send(client_socket, ...); +// +// Thread 2: +// if (recv(server_socket, ...) > 0) +// assert(X == 42); +// +// This file determines the scope of the file descriptor (pipe, socket, +// all local files, etc) and executes acquire and release operations on +// the scope as necessary. Some scopes are very fine grained (e.g. pipe +// operations synchronize only with operations on the same pipe), while +// others are corse-grained (e.g. all operations on local files synchronize +// with each other). +//===----------------------------------------------------------------------===// +#ifndef TSAN_FD_H +#define TSAN_FD_H + +#include "tsan_rtl.h" + +namespace __tsan { + +void FdInit(); +void FdAcquire(ThreadState *thr, uptr pc, int fd); +void FdRelease(ThreadState *thr, uptr pc, int fd); +void FdAccess(ThreadState *thr, uptr pc, int fd); +void FdClose(ThreadState *thr, uptr pc, int fd); +void FdFileCreate(ThreadState *thr, uptr pc, int fd); +void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd); +void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd); +void FdEventCreate(ThreadState *thr, uptr pc, int fd); +void FdSignalCreate(ThreadState *thr, uptr pc, int fd); +void FdInotifyCreate(ThreadState *thr, uptr pc, int fd); +void FdPollCreate(ThreadState *thr, uptr pc, int fd); +void FdSocketCreate(ThreadState *thr, uptr pc, int fd); +void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd); +void FdSocketConnecting(ThreadState *thr, uptr pc, int fd); +void FdSocketConnect(ThreadState *thr, uptr pc, int fd); +bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack); +void FdOnFork(ThreadState *thr, uptr pc); + +uptr File2addr(char *path); +uptr Dir2addr(char *path); + +} // namespace __tsan + +#endif // TSAN_INTERFACE_H diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc index 8f91939db1de..88c4bb6a2e44 100644 --- a/lib/tsan/rtl/tsan_flags.cc +++ b/lib/tsan/rtl/tsan_flags.cc @@ -27,6 +27,7 @@ Flags *flags() { #ifdef TSAN_EXTERNAL_HOOKS void OverrideFlags(Flags *f); #else +SANITIZER_INTERFACE_ATTRIBUTE void WEAK OverrideFlags(Flags *f) { (void)f; } @@ -39,20 +40,25 @@ void InitializeFlags(Flags *f, const char *env) { f->enable_annotations = true; f->suppress_equal_stacks = true; f->suppress_equal_addresses = true; + f->suppress_java = false; + f->report_bugs = true; f->report_thread_leaks = true; + f->report_destroy_locked = true; f->report_signal_unsafe = true; f->force_seq_cst_atomics = false; f->strip_path_prefix = ""; f->suppressions = ""; f->exitcode = 66; - f->log_fileno = 2; + f->log_path = "stderr"; f->atexit_sleep_ms = 1000; f->verbosity = 0; f->profile_memory = ""; f->flush_memory_ms = 0; f->stop_on_start = false; f->running_on_valgrind = false; - f->use_internal_symbolizer = false; + f->external_symbolizer_path = ""; + f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go. + f->io_sync = 1; // Let a frontend override. OverrideFlags(f); @@ -61,19 +67,42 @@ void InitializeFlags(Flags *f, const char *env) { ParseFlag(env, &f->enable_annotations, "enable_annotations"); ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks"); ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses"); + ParseFlag(env, &f->suppress_java, "suppress_java"); + ParseFlag(env, &f->report_bugs, "report_bugs"); ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks"); + ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked"); ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe"); ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics"); ParseFlag(env, &f->strip_path_prefix, "strip_path_prefix"); ParseFlag(env, &f->suppressions, "suppressions"); ParseFlag(env, &f->exitcode, "exitcode"); - ParseFlag(env, &f->log_fileno, "log_fileno"); + ParseFlag(env, &f->log_path, "log_path"); ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms"); ParseFlag(env, &f->verbosity, "verbosity"); ParseFlag(env, &f->profile_memory, "profile_memory"); ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms"); ParseFlag(env, &f->stop_on_start, "stop_on_start"); - ParseFlag(env, &f->use_internal_symbolizer, "use_internal_symbolizer"); + ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path"); + ParseFlag(env, &f->history_size, "history_size"); + ParseFlag(env, &f->io_sync, "io_sync"); + + if (!f->report_bugs) { + f->report_thread_leaks = false; + f->report_destroy_locked = false; + f->report_signal_unsafe = false; + } + + if (f->history_size < 0 || f->history_size > 7) { + Printf("ThreadSanitizer: incorrect value for history_size" + " (must be [0..7])\n"); + Die(); + } + + if (f->io_sync < 0 || f->io_sync > 2) { + Printf("ThreadSanitizer: incorrect value for io_sync" + " (must be [0..2])\n"); + Die(); + } } } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_flags.h b/lib/tsan/rtl/tsan_flags.h index c22132f2d32f..6547911ec7a3 100644 --- a/lib/tsan/rtl/tsan_flags.h +++ b/lib/tsan/rtl/tsan_flags.h @@ -31,8 +31,15 @@ struct Flags { // Supress a race report if we've already output another race report // on the same address. bool suppress_equal_addresses; + // Suppress weird race reports that can be seen if JVM is embed + // into the process. + bool suppress_java; + // Turns off bug reporting entirely (useful for benchmarking). + bool report_bugs; // Report thread leaks at exit? bool report_thread_leaks; + // Report destruction of a locked mutex? + bool report_destroy_locked; // Report violations of async signal-safety // (e.g. malloc() call from a signal handler). bool report_signal_unsafe; @@ -45,8 +52,10 @@ struct Flags { const char *suppressions; // Override exit status if something was reported. int exitcode; - // Log fileno (1 - stdout, 2 - stderr). - int log_fileno; + // Write logs to "log_path.pid". + // The special values are "stdout" and "stderr". + // The default is "stderr". + const char *log_path; // Sleep in main thread before exiting for that many ms // (useful to catch "at exit" races). int atexit_sleep_ms; @@ -60,8 +69,19 @@ struct Flags { bool stop_on_start; // Controls whether RunningOnValgrind() returns true or false. bool running_on_valgrind; - // If set, uses in-process symbolizer from common sanitizer runtime. - bool use_internal_symbolizer; + // Path to external symbolizer. + const char *external_symbolizer_path; + // Per-thread history size, controls how many previous memory accesses + // are remembered per thread. Possible values are [0..7]. + // history_size=0 amounts to 32K memory accesses. Each next value doubles + // the amount of memory accesses, up to history_size=7 that amounts to + // 4M memory accesses. The default value is 2 (128K memory accesses). + int history_size; + // Controls level of synchronization implied by IO operations. + // 0 - no synchronization + // 1 - reasonable level of synchronization (write->read) + // 2 - global synchronization of all IO operations + int io_sync; }; Flags *flags(); diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc index a962250568b7..be58ca92cf91 100644 --- a/lib/tsan/rtl/tsan_interceptors.cc +++ b/lib/tsan/rtl/tsan_interceptors.cc @@ -1,4 +1,4 @@ -//===-- tsan_interceptors_linux.cc ----------------------------------------===// +//===-- tsan_interceptors.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -9,16 +9,20 @@ // // This file is a part of ThreadSanitizer (TSan), a race detector. // +// FIXME: move as many interceptors as possible into +// sanitizer_common/sanitizer_common_interceptors.h //===----------------------------------------------------------------------===// -#include "interception/interception.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_placement_new.h" -#include "tsan_rtl.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "interception/interception.h" #include "tsan_interface.h" #include "tsan_platform.h" +#include "tsan_rtl.h" #include "tsan_mman.h" +#include "tsan_fd.h" using namespace __tsan; // NOLINT @@ -51,7 +55,7 @@ extern "C" void *pthread_self(); extern "C" void _exit(int status); extern "C" int __cxa_atexit(void (*func)(void *arg), void *arg, void *dso); extern "C" int *__errno_location(); -extern "C" int usleep(unsigned usec); +extern "C" int fileno_unlocked(void *stream); const int PTHREAD_MUTEX_RECURSIVE = 1; const int PTHREAD_MUTEX_RECURSIVE_NP = 1; const int kPthreadAttrSize = 56; @@ -69,6 +73,12 @@ const int PTHREAD_BARRIER_SERIAL_THREAD = -1; const int MAP_FIXED = 0x10; typedef long long_t; // NOLINT +// From /usr/include/unistd.h +# define F_ULOCK 0 /* Unlock a previously locked region. */ +# define F_LOCK 1 /* Lock a region for exclusive use. */ +# define F_TLOCK 2 /* Test and lock a region for exclusive use. */ +# define F_TEST 3 /* Test a region for other processes locks. */ + typedef void (*sighandler_t)(int sig); #define errno (*__errno_location()) @@ -94,6 +104,10 @@ const sighandler_t SIG_ERR = (sighandler_t)-1; const int SA_SIGINFO = 4; const int SIG_SETMASK = 2; +namespace std { +struct nothrow_t {}; +} // namespace std + static sigaction_t sigactions[kSigCount]; namespace __tsan { @@ -105,6 +119,7 @@ struct SignalDesc { }; struct SignalContext { + int in_blocking_func; int int_signal_send; int pending_signal_count; SignalDesc pending_signals[kSigCount]; @@ -115,10 +130,8 @@ static SignalContext *SigCtx(ThreadState *thr) { SignalContext *ctx = (SignalContext*)thr->signal_ctx; if (ctx == 0 && thr->is_alive) { ScopedInRtl in_rtl; - ctx = (SignalContext*)internal_alloc( - MBlockSignal, sizeof(*ctx)); - MemoryResetRange(thr, 0, (uptr)ctx, sizeof(*ctx)); - internal_memset(ctx, 0, sizeof(*ctx)); + ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext"); + MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx)); thr->signal_ctx = ctx; } return ctx; @@ -126,60 +139,55 @@ static SignalContext *SigCtx(ThreadState *thr) { static unsigned g_thread_finalize_key; -static void process_pending_signals(ThreadState *thr); - class ScopedInterceptor { public: - ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc) - : thr_(thr) - , in_rtl_(thr->in_rtl) { - if (thr_->in_rtl == 0) { - Initialize(thr); - FuncEntry(thr, pc); - thr_->in_rtl++; - DPrintf("#%d: intercept %s()\n", thr_->tid, fname); - } else { - thr_->in_rtl++; - } - } - - ~ScopedInterceptor() { - thr_->in_rtl--; - if (thr_->in_rtl == 0) { - FuncExit(thr_); - process_pending_signals(thr_); - } - CHECK_EQ(in_rtl_, thr_->in_rtl); - } - + ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc); + ~ScopedInterceptor(); private: ThreadState *const thr_; const int in_rtl_; }; +ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, + uptr pc) + : thr_(thr) + , in_rtl_(thr->in_rtl) { + if (thr_->in_rtl == 0) { + Initialize(thr); + FuncEntry(thr, pc); + thr_->in_rtl++; + DPrintf("#%d: intercept %s()\n", thr_->tid, fname); + } else { + thr_->in_rtl++; + } +} + +ScopedInterceptor::~ScopedInterceptor() { + thr_->in_rtl--; + if (thr_->in_rtl == 0) { + FuncExit(thr_); + ProcessPendingSignals(thr_); + } + CHECK_EQ(in_rtl_, thr_->in_rtl); +} + #define SCOPED_INTERCEPTOR_RAW(func, ...) \ ThreadState *thr = cur_thread(); \ StatInc(thr, StatInterceptor); \ StatInc(thr, StatInt_##func); \ - ScopedInterceptor si(thr, #func, \ - (__sanitizer::uptr)__builtin_return_address(0)); \ - const uptr pc = (uptr)&func; \ + const uptr caller_pc = GET_CALLER_PC(); \ + ScopedInterceptor si(thr, #func, caller_pc); \ + const uptr pc = __sanitizer::StackTrace::GetPreviousInstructionPc( \ + __sanitizer::StackTrace::GetCurrentPc()); \ (void)pc; \ /**/ #define SCOPED_TSAN_INTERCEPTOR(func, ...) \ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ - if (thr->in_rtl > 1) \ - return REAL(func)(__VA_ARGS__); \ -/**/ - -#define SCOPED_INTERCEPTOR_LIBC(func, ...) \ - ThreadState *thr = cur_thread(); \ - StatInc(thr, StatInterceptor); \ - StatInc(thr, StatInt_##func); \ - ScopedInterceptor si(thr, #func, callpc); \ - const uptr pc = (uptr)&func; \ - (void)pc; \ + if (REAL(func) == 0) { \ + Printf("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \ + Die(); \ + } \ if (thr->in_rtl > 1) \ return REAL(func)(__VA_ARGS__); \ /**/ @@ -187,30 +195,40 @@ class ScopedInterceptor { #define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__) #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) -// May be overriden by front-end. -extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) { - (void)ptr; - (void)size; -} +#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name)) -extern "C" void WEAK __tsan_free_hook(void *ptr) { - (void)ptr; +struct BlockingCall { + explicit BlockingCall(ThreadState *thr) + : ctx(SigCtx(thr)) { + ctx->in_blocking_func++; + } + + ~BlockingCall() { + ctx->in_blocking_func--; + } + + SignalContext *ctx; +}; + +TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) { + SCOPED_TSAN_INTERCEPTOR(sleep, sec); + unsigned res = BLOCK_REAL(sleep)(sec); + AfterSleep(thr, pc); + return res; } -static void invoke_malloc_hook(void *ptr, uptr size) { - Context *ctx = CTX(); - ThreadState *thr = cur_thread(); - if (ctx == 0 || !ctx->initialized || thr->in_rtl) - return; - __tsan_malloc_hook(ptr, size); +TSAN_INTERCEPTOR(int, usleep, long_t usec) { + SCOPED_TSAN_INTERCEPTOR(usleep, usec); + int res = BLOCK_REAL(usleep)(usec); + AfterSleep(thr, pc); + return res; } -static void invoke_free_hook(void *ptr) { - Context *ctx = CTX(); - ThreadState *thr = cur_thread(); - if (ctx == 0 || !ctx->initialized || thr->in_rtl) - return; - __tsan_free_hook(ptr); +TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) { + SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem); + int res = BLOCK_REAL(nanosleep)(req, rem); + AfterSleep(thr, pc); + return res; } class AtExitContext { @@ -269,7 +287,6 @@ static void finalize(void *arg) { { ScopedInRtl in_rtl; DestroyAndFree(atexit_ctx); - usleep(flags()->atexit_sleep_ms * 1000); } int status = Finalize(cur_thread()); if (status) @@ -279,45 +296,20 @@ static void finalize(void *arg) { TSAN_INTERCEPTOR(int, atexit, void (*f)()) { SCOPED_TSAN_INTERCEPTOR(atexit, f); return atexit_ctx->atexit(thr, pc, f); - return 0; } TSAN_INTERCEPTOR(void, longjmp, void *env, int val) { SCOPED_TSAN_INTERCEPTOR(longjmp, env, val); - TsanPrintf("ThreadSanitizer: longjmp() is not supported\n"); + Printf("ThreadSanitizer: longjmp() is not supported\n"); Die(); } TSAN_INTERCEPTOR(void, siglongjmp, void *env, int val) { SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val); - TsanPrintf("ThreadSanitizer: siglongjmp() is not supported\n"); + Printf("ThreadSanitizer: siglongjmp() is not supported\n"); Die(); } -static uptr fd2addr(int fd) { - (void)fd; - static u64 addr; - return (uptr)&addr; -} - -static uptr epollfd2addr(int fd) { - (void)fd; - static u64 addr; - return (uptr)&addr; -} - -static uptr file2addr(char *path) { - (void)path; - static u64 addr; - return (uptr)&addr; -} - -static uptr dir2addr(char *path) { - (void)path; - static u64 addr; - return (uptr)&addr; -} - TSAN_INTERCEPTOR(void*, malloc, uptr size) { void *p = 0; { @@ -328,12 +320,17 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) { return p; } +TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { + SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz); + return user_alloc(thr, pc, sz, align); +} + TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { void *p = 0; { SCOPED_INTERCEPTOR_RAW(calloc, size, n); p = user_alloc(thr, pc, n * size); - internal_memset(p, 0, n * size); + if (p) internal_memset(p, 0, n * size); } invoke_malloc_hook(p, n * size); return p; @@ -366,6 +363,47 @@ TSAN_INTERCEPTOR(void, cfree, void *p) { user_free(thr, pc, p); } +#define OPERATOR_NEW_BODY(mangled_name) \ + void *p = 0; \ + { \ + SCOPED_INTERCEPTOR_RAW(mangled_name, size); \ + p = user_alloc(thr, pc, size); \ + } \ + invoke_malloc_hook(p, size); \ + return p; + +void *operator new(__sanitizer::uptr size) { + OPERATOR_NEW_BODY(_Znwm); +} +void *operator new[](__sanitizer::uptr size) { + OPERATOR_NEW_BODY(_Znam); +} +void *operator new(__sanitizer::uptr size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t); +} +void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t); +} + +#define OPERATOR_DELETE_BODY(mangled_name) \ + if (ptr == 0) return; \ + invoke_free_hook(ptr); \ + SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \ + user_free(thr, pc, ptr); + +void operator delete(void *ptr) { + OPERATOR_DELETE_BODY(_ZdlPv); +} +void operator delete[](void *ptr) { + OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t); +} +void operator delete(void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(_ZdaPv); +} +void operator delete[](void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t); +} + TSAN_INTERCEPTOR(uptr, strlen, const char *s) { SCOPED_TSAN_INTERCEPTOR(strlen, s); uptr len = internal_strlen(s); @@ -536,137 +574,61 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { return res; } -#ifdef __LP64__ - -// void *operator new(size_t) -TSAN_INTERCEPTOR(void*, _Znwm, uptr sz) { - void *p = 0; - { - SCOPED_TSAN_INTERCEPTOR(_Znwm, sz); - p = user_alloc(thr, pc, sz); - } - invoke_malloc_hook(p, sz); - return p; -} - -// void *operator new(size_t, nothrow_t) -TSAN_INTERCEPTOR(void*, _ZnwmRKSt9nothrow_t, uptr sz) { - void *p = 0; - { - SCOPED_TSAN_INTERCEPTOR(_ZnwmRKSt9nothrow_t, sz); - p = user_alloc(thr, pc, sz); - } - invoke_malloc_hook(p, sz); - return p; -} - -// void *operator new[](size_t) -TSAN_INTERCEPTOR(void*, _Znam, uptr sz) { - void *p = 0; - { - SCOPED_TSAN_INTERCEPTOR(_Znam, sz); - p = user_alloc(thr, pc, sz); - } - invoke_malloc_hook(p, sz); - return p; -} - -// void *operator new[](size_t, nothrow_t) -TSAN_INTERCEPTOR(void*, _ZnamRKSt9nothrow_t, uptr sz) { - void *p = 0; - { - SCOPED_TSAN_INTERCEPTOR(_ZnamRKSt9nothrow_t, sz); - p = user_alloc(thr, pc, sz); - } - invoke_malloc_hook(p, sz); - return p; -} - -#else -#error "Not implemented" -#endif - -// void operator delete(void*) -TSAN_INTERCEPTOR(void, _ZdlPv, void *p) { - if (p == 0) - return; - invoke_free_hook(p); - SCOPED_TSAN_INTERCEPTOR(_ZdlPv, p); - user_free(thr, pc, p); -} - -// void operator delete(void*, nothrow_t) -TSAN_INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *p) { - if (p == 0) - return; - invoke_free_hook(p); - SCOPED_TSAN_INTERCEPTOR(_ZdlPvRKSt9nothrow_t, p); - user_free(thr, pc, p); -} - -// void operator delete[](void*) -TSAN_INTERCEPTOR(void, _ZdaPv, void *p) { - if (p == 0) - return; - invoke_free_hook(p); - SCOPED_TSAN_INTERCEPTOR(_ZdaPv, p); - user_free(thr, pc, p); -} - -// void operator delete[](void*, nothrow_t) -TSAN_INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *p) { - if (p == 0) - return; - invoke_free_hook(p); - SCOPED_TSAN_INTERCEPTOR(_ZdaPvRKSt9nothrow_t, p); - user_free(thr, pc, p); -} - TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { SCOPED_TSAN_INTERCEPTOR(memalign, align, sz); - return user_alloc_aligned(thr, pc, sz, align); + return user_alloc(thr, pc, sz, align); } TSAN_INTERCEPTOR(void*, valloc, uptr sz) { SCOPED_TSAN_INTERCEPTOR(valloc, sz); - return user_alloc_aligned(thr, pc, sz, kPageSize); + return user_alloc(thr, pc, sz, GetPageSizeCached()); } TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { SCOPED_TSAN_INTERCEPTOR(pvalloc, sz); - sz = RoundUp(sz, kPageSize); - return user_alloc_aligned(thr, pc, sz, kPageSize); + sz = RoundUp(sz, GetPageSizeCached()); + return user_alloc(thr, pc, sz, GetPageSizeCached()); } TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) { SCOPED_TSAN_INTERCEPTOR(posix_memalign, memptr, align, sz); - *memptr = user_alloc_aligned(thr, pc, sz, align); + *memptr = user_alloc(thr, pc, sz, align); return 0; } // Used in thread-safe function static initialization. -TSAN_INTERCEPTOR(int, __cxa_guard_acquire, char *m) { - SCOPED_TSAN_INTERCEPTOR(__cxa_guard_acquire, m); - int res = REAL(__cxa_guard_acquire)(m); - if (res) { - // This thread does the init. - } else { - Acquire(thr, pc, (uptr)m); +extern "C" int INTERFACE_ATTRIBUTE __cxa_guard_acquire(atomic_uint32_t *g) { + SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g); + for (;;) { + u32 cmp = atomic_load(g, memory_order_acquire); + if (cmp == 0) { + if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed)) + return 1; + } else if (cmp == 1) { + Acquire(thr, pc, (uptr)g); + return 0; + } else { + internal_sched_yield(); + } } - return res; } -TSAN_INTERCEPTOR(void, __cxa_guard_release, char *m) { - SCOPED_TSAN_INTERCEPTOR(__cxa_guard_release, m); - Release(thr, pc, (uptr)m); - REAL(__cxa_guard_release)(m); +extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_release(atomic_uint32_t *g) { + SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g); + Release(thr, pc, (uptr)g); + atomic_store(g, 1, memory_order_release); +} + +extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_abort(atomic_uint32_t *g) { + SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g); + atomic_store(g, 0, memory_order_relaxed); } static void thread_finalize(void *v) { uptr iter = (uptr)v; if (iter > 1) { if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) { - TsanPrintf("ThreadSanitizer: failed to set thread key\n"); + Printf("ThreadSanitizer: failed to set thread key\n"); Die(); } return; @@ -678,7 +640,7 @@ static void thread_finalize(void *v) { SignalContext *sctx = thr->signal_ctx; if (sctx) { thr->signal_ctx = 0; - internal_free(sctx); + UnmapOrDie(sctx, sizeof(*sctx)); } } } @@ -699,13 +661,13 @@ extern "C" void *__tsan_thread_start_func(void *arg) { ThreadState *thr = cur_thread(); ScopedInRtl in_rtl; if (pthread_setspecific(g_thread_finalize_key, (void*)4)) { - TsanPrintf("ThreadSanitizer: failed to set thread key\n"); + Printf("ThreadSanitizer: failed to set thread key\n"); Die(); } while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) pthread_yield(); atomic_store(&p->tid, 0, memory_order_release); - ThreadStart(thr, tid); + ThreadStart(thr, tid, GetTid()); CHECK_EQ(thr->in_rtl, 1); } void *res = callback(param); @@ -740,7 +702,7 @@ TSAN_INTERCEPTOR(int, pthread_create, atomic_store(&p.tid, 0, memory_order_relaxed); int res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p); if (res == 0) { - int tid = ThreadCreate(cur_thread(), pc, *(uptr*)th, detached); + int tid = ThreadCreate(thr, pc, *(uptr*)th, detached); CHECK_NE(tid, 0); atomic_store(&p.tid, tid, memory_order_release); while (atomic_load(&p.tid, memory_order_acquire) != 0) @@ -754,9 +716,9 @@ TSAN_INTERCEPTOR(int, pthread_create, TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { SCOPED_TSAN_INTERCEPTOR(pthread_join, th, ret); int tid = ThreadTid(thr, pc, (uptr)th); - int res = REAL(pthread_join)(th, ret); + int res = BLOCK_REAL(pthread_join)(th, ret); if (res == 0) { - ThreadJoin(cur_thread(), pc, tid); + ThreadJoin(thr, pc, tid); } return res; } @@ -766,7 +728,7 @@ TSAN_INTERCEPTOR(int, pthread_detach, void *th) { int tid = ThreadTid(thr, pc, (uptr)th); int res = REAL(pthread_detach)(th); if (res == 0) { - ThreadDetach(cur_thread(), pc, tid); + ThreadDetach(thr, pc, tid); } return res; } @@ -782,7 +744,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) { recursive = (type == PTHREAD_MUTEX_RECURSIVE || type == PTHREAD_MUTEX_RECURSIVE_NP); } - MutexCreate(cur_thread(), pc, (uptr)m, false, recursive); + MutexCreate(thr, pc, (uptr)m, false, recursive, false); } return res; } @@ -791,7 +753,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m); int res = REAL(pthread_mutex_destroy)(m); if (res == 0 || res == EBUSY) { - MutexDestroy(cur_thread(), pc, (uptr)m); + MutexDestroy(thr, pc, (uptr)m); } return res; } @@ -800,7 +762,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m); int res = REAL(pthread_mutex_lock)(m); if (res == 0) { - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); } return res; } @@ -809,7 +771,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m); int res = REAL(pthread_mutex_trylock)(m); if (res == 0) { - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); } return res; } @@ -818,14 +780,14 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime); int res = REAL(pthread_mutex_timedlock)(m, abstime); if (res == 0) { - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m); - MutexUnlock(cur_thread(), pc, (uptr)m); + MutexUnlock(thr, pc, (uptr)m); int res = REAL(pthread_mutex_unlock)(m); return res; } @@ -834,7 +796,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared); int res = REAL(pthread_spin_init)(m, pshared); if (res == 0) { - MutexCreate(cur_thread(), pc, (uptr)m, false, false); + MutexCreate(thr, pc, (uptr)m, false, false, false); } return res; } @@ -843,7 +805,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m); int res = REAL(pthread_spin_destroy)(m); if (res == 0) { - MutexDestroy(cur_thread(), pc, (uptr)m); + MutexDestroy(thr, pc, (uptr)m); } return res; } @@ -852,7 +814,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m); int res = REAL(pthread_spin_lock)(m); if (res == 0) { - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); } return res; } @@ -861,14 +823,14 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m); int res = REAL(pthread_spin_trylock)(m); if (res == 0) { - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m); - MutexUnlock(cur_thread(), pc, (uptr)m); + MutexUnlock(thr, pc, (uptr)m); int res = REAL(pthread_spin_unlock)(m); return res; } @@ -877,7 +839,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a); int res = REAL(pthread_rwlock_init)(m, a); if (res == 0) { - MutexCreate(cur_thread(), pc, (uptr)m, true, false); + MutexCreate(thr, pc, (uptr)m, true, false, false); } return res; } @@ -886,7 +848,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m); int res = REAL(pthread_rwlock_destroy)(m); if (res == 0) { - MutexDestroy(cur_thread(), pc, (uptr)m); + MutexDestroy(thr, pc, (uptr)m); } return res; } @@ -895,7 +857,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m); int res = REAL(pthread_rwlock_rdlock)(m); if (res == 0) { - MutexReadLock(cur_thread(), pc, (uptr)m); + MutexReadLock(thr, pc, (uptr)m); } return res; } @@ -904,7 +866,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m); int res = REAL(pthread_rwlock_tryrdlock)(m); if (res == 0) { - MutexReadLock(cur_thread(), pc, (uptr)m); + MutexReadLock(thr, pc, (uptr)m); } return res; } @@ -913,7 +875,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime); int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); if (res == 0) { - MutexReadLock(cur_thread(), pc, (uptr)m); + MutexReadLock(thr, pc, (uptr)m); } return res; } @@ -922,7 +884,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m); int res = REAL(pthread_rwlock_wrlock)(m); if (res == 0) { - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); } return res; } @@ -931,7 +893,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m); int res = REAL(pthread_rwlock_trywrlock)(m); if (res == 0) { - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); } return res; } @@ -940,23 +902,27 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime); int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); if (res == 0) { - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); } return res; } TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m); - MutexReadOrWriteUnlock(cur_thread(), pc, (uptr)m); + MutexReadOrWriteUnlock(thr, pc, (uptr)m); int res = REAL(pthread_rwlock_unlock)(m); return res; } +// libpthread.so contains several versions of pthread_cond_init symbol. +// When we just dlsym() it, we get the wrong (old) version. +/* TSAN_INTERCEPTOR(int, pthread_cond_init, void *c, void *a) { SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, c, a); int res = REAL(pthread_cond_init)(c, a); return res; } +*/ TSAN_INTERCEPTOR(int, pthread_cond_destroy, void *c) { SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, c); @@ -978,17 +944,17 @@ TSAN_INTERCEPTOR(int, pthread_cond_broadcast, void *c) { TSAN_INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, c, m); - MutexUnlock(cur_thread(), pc, (uptr)m); + MutexUnlock(thr, pc, (uptr)m); int res = REAL(pthread_cond_wait)(c, m); - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); return res; } TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, c, m, abstime); - MutexUnlock(cur_thread(), pc, (uptr)m); + MutexUnlock(thr, pc, (uptr)m); int res = REAL(pthread_cond_timedwait)(c, m, abstime); - MutexLock(cur_thread(), pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); return res; } @@ -1008,12 +974,12 @@ TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b); - Release(cur_thread(), pc, (uptr)b); + Release(thr, pc, (uptr)b); MemoryRead1Byte(thr, pc, (uptr)b); int res = REAL(pthread_barrier_wait)(b); MemoryRead1Byte(thr, pc, (uptr)b); if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) { - Acquire(cur_thread(), pc, (uptr)b); + Acquire(thr, pc, (uptr)b); } return res; } @@ -1031,14 +997,14 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { (*f)(); CHECK_EQ(thr->in_rtl, 0); thr->in_rtl = old_in_rtl; - Release(cur_thread(), pc, (uptr)o); + Release(thr, pc, (uptr)o); atomic_store(a, 2, memory_order_release); } else { while (v != 2) { pthread_yield(); v = atomic_load(a, memory_order_acquire); } - Acquire(cur_thread(), pc, (uptr)o); + Acquire(thr, pc, (uptr)o); } return 0; } @@ -1057,34 +1023,34 @@ TSAN_INTERCEPTOR(int, sem_destroy, void *s) { TSAN_INTERCEPTOR(int, sem_wait, void *s) { SCOPED_TSAN_INTERCEPTOR(sem_wait, s); - int res = REAL(sem_wait)(s); + int res = BLOCK_REAL(sem_wait)(s); if (res == 0) { - Acquire(cur_thread(), pc, (uptr)s); + Acquire(thr, pc, (uptr)s); } return res; } TSAN_INTERCEPTOR(int, sem_trywait, void *s) { SCOPED_TSAN_INTERCEPTOR(sem_trywait, s); - int res = REAL(sem_trywait)(s); + int res = BLOCK_REAL(sem_trywait)(s); if (res == 0) { - Acquire(cur_thread(), pc, (uptr)s); + Acquire(thr, pc, (uptr)s); } return res; } TSAN_INTERCEPTOR(int, sem_timedwait, void *s, void *abstime) { SCOPED_TSAN_INTERCEPTOR(sem_timedwait, s, abstime); - int res = REAL(sem_timedwait)(s, abstime); + int res = BLOCK_REAL(sem_timedwait)(s, abstime); if (res == 0) { - Acquire(cur_thread(), pc, (uptr)s); + Acquire(thr, pc, (uptr)s); } return res; } TSAN_INTERCEPTOR(int, sem_post, void *s) { SCOPED_TSAN_INTERCEPTOR(sem_post, s); - Release(cur_thread(), pc, (uptr)s); + Release(thr, pc, (uptr)s); int res = REAL(sem_post)(s); return res; } @@ -1093,43 +1059,193 @@ TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) { SCOPED_TSAN_INTERCEPTOR(sem_getvalue, s, sval); int res = REAL(sem_getvalue)(s, sval); if (res == 0) { - Acquire(cur_thread(), pc, (uptr)s); + Acquire(thr, pc, (uptr)s); } return res; } -TSAN_INTERCEPTOR(long_t, read, int fd, void *buf, long_t sz) { - SCOPED_TSAN_INTERCEPTOR(read, fd, buf, sz); - int res = REAL(read)(fd, buf, sz); - if (res >= 0) { - Acquire(cur_thread(), pc, fd2addr(fd)); - } +TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { + SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode); + int fd = REAL(open)(name, flags, mode); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) { + SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode); + int fd = REAL(open64)(name, flags, mode); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { + SCOPED_TSAN_INTERCEPTOR(creat, name, mode); + int fd = REAL(creat)(name, mode); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) { + SCOPED_TSAN_INTERCEPTOR(creat64, name, mode); + int fd = REAL(creat64)(name, mode); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, dup, int oldfd) { + SCOPED_TSAN_INTERCEPTOR(dup, oldfd); + int newfd = REAL(dup)(oldfd); + if (oldfd >= 0 && newfd >= 0 && newfd != oldfd) + FdDup(thr, pc, oldfd, newfd); + return newfd; +} + +TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) { + SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd); + int newfd2 = REAL(dup2)(oldfd, newfd); + if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) + FdDup(thr, pc, oldfd, newfd2); + return newfd2; +} + +TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { + SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags); + int newfd2 = REAL(dup3)(oldfd, newfd, flags); + if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) + FdDup(thr, pc, oldfd, newfd2); + return newfd2; +} + +TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { + SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags); + int fd = REAL(eventfd)(initval, flags); + if (fd >= 0) + FdEventCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { + SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags); + if (fd >= 0) + FdClose(thr, pc, fd); + fd = REAL(signalfd)(fd, mask, flags); + if (fd >= 0) + FdSignalCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, inotify_init, int fake) { + SCOPED_TSAN_INTERCEPTOR(inotify_init, fake); + int fd = REAL(inotify_init)(fake); + if (fd >= 0) + FdInotifyCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, inotify_init1, int flags) { + SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags); + int fd = REAL(inotify_init1)(flags); + if (fd >= 0) + FdInotifyCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) { + SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol); + int fd = REAL(socket)(domain, type, protocol); + if (fd >= 0) + FdSocketCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) { + SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd); + int res = REAL(socketpair)(domain, type, protocol, fd); + if (res == 0 && fd[0] >= 0 && fd[1] >= 0) + FdPipeCreate(thr, pc, fd[0], fd[1]); return res; } -TSAN_INTERCEPTOR(long_t, pread, int fd, void *buf, long_t sz, unsigned off) { - SCOPED_TSAN_INTERCEPTOR(pread, fd, buf, sz, off); - int res = REAL(pread)(fd, buf, sz, off); - if (res >= 0) { - Acquire(cur_thread(), pc, fd2addr(fd)); - } +TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) { + SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen); + FdSocketConnecting(thr, pc, fd); + int res = REAL(connect)(fd, addr, addrlen); + if (res == 0 && fd >= 0) + FdSocketConnect(thr, pc, fd); return res; } -TSAN_INTERCEPTOR(long_t, pread64, int fd, void *buf, long_t sz, u64 off) { - SCOPED_TSAN_INTERCEPTOR(pread64, fd, buf, sz, off); - int res = REAL(pread64)(fd, buf, sz, off); - if (res >= 0) { - Acquire(cur_thread(), pc, fd2addr(fd)); - } +TSAN_INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) { + SCOPED_TSAN_INTERCEPTOR(accept, fd, addr, addrlen); + int fd2 = REAL(accept)(fd, addr, addrlen); + if (fd >= 0 && fd2 >= 0) + FdSocketAccept(thr, pc, fd, fd2); + return fd2; +} + +TSAN_INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) { + SCOPED_TSAN_INTERCEPTOR(accept4, fd, addr, addrlen, f); + int fd2 = REAL(accept4)(fd, addr, addrlen, f); + if (fd >= 0 && fd2 >= 0) + FdSocketAccept(thr, pc, fd, fd2); + return fd2; +} + +TSAN_INTERCEPTOR(int, epoll_create, int size) { + SCOPED_TSAN_INTERCEPTOR(epoll_create, size); + int fd = REAL(epoll_create)(size); + if (fd >= 0) + FdPollCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, epoll_create1, int flags) { + SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags); + int fd = REAL(epoll_create1)(flags); + if (fd >= 0) + FdPollCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, close, int fd) { + SCOPED_TSAN_INTERCEPTOR(close, fd); + if (fd >= 0) + FdClose(thr, pc, fd); + return REAL(close)(fd); +} + +TSAN_INTERCEPTOR(int, __close, int fd) { + SCOPED_TSAN_INTERCEPTOR(__close, fd); + if (fd >= 0) + FdClose(thr, pc, fd); + return REAL(__close)(fd); +} + +TSAN_INTERCEPTOR(int, pipe, int *pipefd) { + SCOPED_TSAN_INTERCEPTOR(pipe, pipefd); + int res = REAL(pipe)(pipefd); + if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) + FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); + return res; +} + +TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { + SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags); + int res = REAL(pipe2)(pipefd, flags); + if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) + FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); return res; } TSAN_INTERCEPTOR(long_t, readv, int fd, void *vec, int cnt) { SCOPED_TSAN_INTERCEPTOR(readv, fd, vec, cnt); int res = REAL(readv)(fd, vec, cnt); - if (res >= 0) { - Acquire(cur_thread(), pc, fd2addr(fd)); + if (res >= 0 && fd >= 0) { + FdAcquire(thr, pc, fd); } return res; } @@ -1137,57 +1253,40 @@ TSAN_INTERCEPTOR(long_t, readv, int fd, void *vec, int cnt) { TSAN_INTERCEPTOR(long_t, preadv64, int fd, void *vec, int cnt, u64 off) { SCOPED_TSAN_INTERCEPTOR(preadv64, fd, vec, cnt, off); int res = REAL(preadv64)(fd, vec, cnt, off); - if (res >= 0) { - Acquire(cur_thread(), pc, fd2addr(fd)); + if (res >= 0 && fd >= 0) { + FdAcquire(thr, pc, fd); } return res; } -TSAN_INTERCEPTOR(long_t, write, int fd, void *buf, long_t sz) { - SCOPED_TSAN_INTERCEPTOR(write, fd, buf, sz); - Release(cur_thread(), pc, fd2addr(fd)); - int res = REAL(write)(fd, buf, sz); - return res; -} - -TSAN_INTERCEPTOR(long_t, pwrite, int fd, void *buf, long_t sz, unsigned off) { - SCOPED_TSAN_INTERCEPTOR(pwrite, fd, buf, sz, off); - Release(cur_thread(), pc, fd2addr(fd)); - int res = REAL(pwrite)(fd, buf, sz, off); - return res; -} - -TSAN_INTERCEPTOR(long_t, pwrite64, int fd, void *buf, long_t sz, unsigned off) { - SCOPED_TSAN_INTERCEPTOR(pwrite64, fd, buf, sz, off); - Release(cur_thread(), pc, fd2addr(fd)); - int res = REAL(pwrite64)(fd, buf, sz, off); - return res; -} - TSAN_INTERCEPTOR(long_t, writev, int fd, void *vec, int cnt) { SCOPED_TSAN_INTERCEPTOR(writev, fd, vec, cnt); - Release(cur_thread(), pc, fd2addr(fd)); + if (fd >= 0) + FdRelease(thr, pc, fd); int res = REAL(writev)(fd, vec, cnt); return res; } TSAN_INTERCEPTOR(long_t, pwritev64, int fd, void *vec, int cnt, u64 off) { SCOPED_TSAN_INTERCEPTOR(pwritev64, fd, vec, cnt, off); - Release(cur_thread(), pc, fd2addr(fd)); + if (fd >= 0) + FdRelease(thr, pc, fd); int res = REAL(pwritev64)(fd, vec, cnt, off); return res; } TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) { SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags); - Release(cur_thread(), pc, fd2addr(fd)); + if (fd >= 0) + FdRelease(thr, pc, fd); int res = REAL(send)(fd, buf, len, flags); return res; } TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) { SCOPED_TSAN_INTERCEPTOR(sendmsg, fd, msg, flags); - Release(cur_thread(), pc, fd2addr(fd)); + if (fd >= 0) + FdRelease(thr, pc, fd); int res = REAL(sendmsg)(fd, msg, flags); return res; } @@ -1195,8 +1294,8 @@ TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) { TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) { SCOPED_TSAN_INTERCEPTOR(recv, fd, buf, len, flags); int res = REAL(recv)(fd, buf, len, flags); - if (res >= 0) { - Acquire(cur_thread(), pc, fd2addr(fd)); + if (res >= 0 && fd >= 0) { + FdAcquire(thr, pc, fd); } return res; } @@ -1204,15 +1303,15 @@ TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) { TSAN_INTERCEPTOR(long_t, recvmsg, int fd, void *msg, int flags) { SCOPED_TSAN_INTERCEPTOR(recvmsg, fd, msg, flags); int res = REAL(recvmsg)(fd, msg, flags); - if (res >= 0) { - Acquire(cur_thread(), pc, fd2addr(fd)); + if (res >= 0 && fd >= 0) { + FdAcquire(thr, pc, fd); } return res; } TSAN_INTERCEPTOR(int, unlink, char *path) { SCOPED_TSAN_INTERCEPTOR(unlink, path); - Release(cur_thread(), pc, file2addr(path)); + Release(thr, pc, File2addr(path)); int res = REAL(unlink)(path); return res; } @@ -1220,19 +1319,57 @@ TSAN_INTERCEPTOR(int, unlink, char *path) { TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) { SCOPED_TSAN_INTERCEPTOR(fopen, path, mode); void *res = REAL(fopen)(path, mode); - Acquire(cur_thread(), pc, file2addr(path)); + Acquire(thr, pc, File2addr(path)); + if (res) { + int fd = fileno_unlocked(res); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + } return res; } +TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) { + SCOPED_TSAN_INTERCEPTOR(freopen, path, mode, stream); + if (stream) { + int fd = fileno_unlocked(stream); + if (fd >= 0) + FdClose(thr, pc, fd); + } + void *res = REAL(freopen)(path, mode, stream); + Acquire(thr, pc, File2addr(path)); + if (res) { + int fd = fileno_unlocked(res); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + } + return res; +} + +TSAN_INTERCEPTOR(int, fclose, void *stream) { + { + SCOPED_TSAN_INTERCEPTOR(fclose, stream); + if (stream) { + int fd = fileno_unlocked(stream); + if (fd >= 0) + FdClose(thr, pc, fd); + } + } + return REAL(fclose)(stream); +} + TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) { - SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f); - MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true); + { + SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f); + MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true); + } return REAL(fread)(ptr, size, nmemb, f); } TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) { - SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f); - MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false); + { + SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f); + MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false); + } return REAL(fwrite)(p, size, nmemb, f); } @@ -1244,7 +1381,7 @@ TSAN_INTERCEPTOR(int, puts, const char *s) { TSAN_INTERCEPTOR(int, rmdir, char *path) { SCOPED_TSAN_INTERCEPTOR(rmdir, path); - Release(cur_thread(), pc, dir2addr(path)); + Release(thr, pc, Dir2addr(path)); int res = REAL(rmdir)(path); return res; } @@ -1252,28 +1389,37 @@ TSAN_INTERCEPTOR(int, rmdir, char *path) { TSAN_INTERCEPTOR(void*, opendir, char *path) { SCOPED_TSAN_INTERCEPTOR(opendir, path); void *res = REAL(opendir)(path); - Acquire(cur_thread(), pc, dir2addr(path)); + if (res != 0) + Acquire(thr, pc, Dir2addr(path)); return res; } TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev); - if (op == EPOLL_CTL_ADD) { - Release(cur_thread(), pc, epollfd2addr(epfd)); + if (op == EPOLL_CTL_ADD && epfd >= 0) { + FdRelease(thr, pc, epfd); } int res = REAL(epoll_ctl)(epfd, op, fd, ev); + if (fd >= 0) + FdAccess(thr, pc, fd); return res; } TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) { SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout); - int res = REAL(epoll_wait)(epfd, ev, cnt, timeout); - if (res > 0) { - Acquire(cur_thread(), pc, epollfd2addr(epfd)); + int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout); + if (res > 0 && epfd >= 0) { + FdAcquire(thr, pc, epfd); } return res; } +TSAN_INTERCEPTOR(int, poll, void *fds, long_t nfds, int timeout) { + SCOPED_TSAN_INTERCEPTOR(poll, fds, nfds, timeout); + int res = BLOCK_REAL(poll)(fds, nfds, timeout); + return res; +} + static void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, my_siginfo_t *info, void *ctx) { ThreadState *thr = cur_thread(); @@ -1281,7 +1427,12 @@ static void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, // Don't mess with synchronous signals. if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || - (sctx && sig == sctx->int_signal_send)) { + // If we are sending signal to ourselves, we must process it now. + (sctx && sig == sctx->int_signal_send) || + // If we are in blocking function, we can safely process it now + // (but check if we are in a recursive interceptor, + // i.e. pthread_join()->munmap()). + (sctx && sctx->in_blocking_func == 1 && thr->in_rtl == 1)) { CHECK(thr->in_rtl == 0 || thr->in_rtl == 1); int in_rtl = thr->in_rtl; thr->in_rtl = 0; @@ -1340,11 +1491,11 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) { } TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) { - sigaction_t act = {}; + sigaction_t act; act.sa_handler = h; REAL(memset)(&act.sa_mask, -1, sizeof(act.sa_mask)); act.sa_flags = 0; - sigaction_t old = {}; + sigaction_t old; int res = sigaction(sig, &act, &old); if (res) return SIG_ERR; @@ -1395,11 +1546,89 @@ TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) { return res; } -static void process_pending_signals(ThreadState *thr) { +TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) { + SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz); + // It's intercepted merely to process pending signals. + return REAL(gettimeofday)(tv, tz); +} + +// Linux kernel has a bug that leads to kernel deadlock if a process +// maps TBs of memory and then calls mlock(). +static void MlockIsUnsupported() { + static atomic_uint8_t printed; + if (atomic_exchange(&printed, 1, memory_order_relaxed)) + return; + Printf("INFO: ThreadSanitizer ignores mlock/mlockall/munlock/munlockall\n"); +} + +TSAN_INTERCEPTOR(int, mlock, const void *addr, uptr len) { + MlockIsUnsupported(); + return 0; +} + +TSAN_INTERCEPTOR(int, munlock, const void *addr, uptr len) { + MlockIsUnsupported(); + return 0; +} + +TSAN_INTERCEPTOR(int, mlockall, int flags) { + MlockIsUnsupported(); + return 0; +} + +TSAN_INTERCEPTOR(int, munlockall, void) { + MlockIsUnsupported(); + return 0; +} + +TSAN_INTERCEPTOR(int, fork, int fake) { + SCOPED_TSAN_INTERCEPTOR(fork, fake); + // It's intercepted merely to process pending signals. + int pid = REAL(fork)(fake); + if (pid == 0) { + // child + FdOnFork(thr, pc); + } else if (pid > 0) { + // parent + } + return pid; +} + +struct TsanInterceptorContext { + ThreadState *thr; + const uptr caller_pc; + const uptr pc; +}; + +#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ + MemoryAccessRange(((TsanInterceptorContext*)ctx)->thr, \ + ((TsanInterceptorContext*)ctx)->pc, \ + (uptr)ptr, size, true) +#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ + MemoryAccessRange(((TsanInterceptorContext*)ctx)->thr, \ + ((TsanInterceptorContext*)ctx)->pc, \ + (uptr)ptr, size, false) +#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__) \ + TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ + ctx = (void*)&_ctx; \ + (void)ctx; +#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ + FdAcquire(((TsanInterceptorContext*)ctx)->thr, pc, fd) +#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ + FdRelease(((TsanInterceptorContext*)ctx)->thr, pc, fd) +#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ + ThreadSetName(((TsanInterceptorContext*)ctx)->thr, name) +#include "sanitizer_common/sanitizer_common_interceptors.inc" + +namespace __tsan { + +void ProcessPendingSignals(ThreadState *thr) { CHECK_EQ(thr->in_rtl, 0); SignalContext *sctx = SigCtx(thr); if (sctx == 0 || sctx->pending_signal_count == 0 || thr->in_signal_handler) return; + Context *ctx = CTX(); thr->in_signal_handler = true; sctx->pending_signal_count = 0; // These are too big for stack. @@ -1419,16 +1648,19 @@ static void process_pending_signals(ThreadState *thr) { sigactions[sig].sa_sigaction(sig, &signal->siginfo, &signal->ctx); else sigactions[sig].sa_handler(sig); - if (errno != 0) { + if (flags()->report_bugs && errno != 0) { ScopedInRtl in_rtl; - StackTrace stack; + __tsan::StackTrace stack; uptr pc = signal->sigaction ? (uptr)sigactions[sig].sa_sigaction : (uptr)sigactions[sig].sa_handler; stack.Init(&pc, 1); + Lock l(&ctx->thread_mtx); ScopedReport rep(ReportTypeErrnoInSignal); - rep.AddStack(&stack); - OutputReport(rep, rep.GetReport()->stacks[0]); + if (!IsFiredSuppression(ctx, rep, stack)) { + rep.AddStack(&stack); + OutputReport(ctx, rep, rep.GetReport()->stacks[0]); + } } errno = saved_errno; } @@ -1439,7 +1671,10 @@ static void process_pending_signals(ThreadState *thr) { thr->in_signal_handler = false; } -namespace __tsan { +static void unreachable() { + Printf("FATAL: ThreadSanitizer: unreachable called\n"); + Die(); +} void InitializeInterceptors() { CHECK_GT(cur_thread()->in_rtl, 0); @@ -1449,10 +1684,13 @@ void InitializeInterceptors() { REAL(memcpy) = internal_memcpy; REAL(memcmp) = internal_memcmp; + SANITIZER_COMMON_INTERCEPTORS_INIT; + TSAN_INTERCEPT(longjmp); TSAN_INTERCEPT(siglongjmp); TSAN_INTERCEPT(malloc); + TSAN_INTERCEPT(__libc_memalign); TSAN_INTERCEPT(calloc); TSAN_INTERCEPT(realloc); TSAN_INTERCEPT(free); @@ -1465,15 +1703,6 @@ void InitializeInterceptors() { TSAN_INTERCEPT(pvalloc); TSAN_INTERCEPT(posix_memalign); - TSAN_INTERCEPT(_Znwm); - TSAN_INTERCEPT(_ZnwmRKSt9nothrow_t); - TSAN_INTERCEPT(_Znam); - TSAN_INTERCEPT(_ZnamRKSt9nothrow_t); - TSAN_INTERCEPT(_ZdlPv); - TSAN_INTERCEPT(_ZdlPvRKSt9nothrow_t); - TSAN_INTERCEPT(_ZdaPv); - TSAN_INTERCEPT(_ZdaPvRKSt9nothrow_t); - TSAN_INTERCEPT(strlen); TSAN_INTERCEPT(memset); TSAN_INTERCEPT(memcpy); @@ -1490,9 +1719,6 @@ void InitializeInterceptors() { TSAN_INTERCEPT(strncpy); TSAN_INTERCEPT(strstr); - TSAN_INTERCEPT(__cxa_guard_acquire); - TSAN_INTERCEPT(__cxa_guard_release); - TSAN_INTERCEPT(pthread_create); TSAN_INTERCEPT(pthread_join); TSAN_INTERCEPT(pthread_detach); @@ -1520,7 +1746,7 @@ void InitializeInterceptors() { TSAN_INTERCEPT(pthread_rwlock_timedwrlock); TSAN_INTERCEPT(pthread_rwlock_unlock); - TSAN_INTERCEPT(pthread_cond_init); + // TSAN_INTERCEPT(pthread_cond_init); TSAN_INTERCEPT(pthread_cond_destroy); TSAN_INTERCEPT(pthread_cond_signal); TSAN_INTERCEPT(pthread_cond_broadcast); @@ -1541,14 +1767,30 @@ void InitializeInterceptors() { TSAN_INTERCEPT(sem_post); TSAN_INTERCEPT(sem_getvalue); - TSAN_INTERCEPT(read); - TSAN_INTERCEPT(pread); - TSAN_INTERCEPT(pread64); + TSAN_INTERCEPT(open); + TSAN_INTERCEPT(open64); + TSAN_INTERCEPT(creat); + TSAN_INTERCEPT(creat64); + TSAN_INTERCEPT(dup); + TSAN_INTERCEPT(dup2); + TSAN_INTERCEPT(dup3); + TSAN_INTERCEPT(eventfd); + TSAN_INTERCEPT(signalfd); + TSAN_INTERCEPT(inotify_init); + TSAN_INTERCEPT(inotify_init1); + TSAN_INTERCEPT(socket); + TSAN_INTERCEPT(socketpair); + TSAN_INTERCEPT(connect); + TSAN_INTERCEPT(accept); + TSAN_INTERCEPT(accept4); + TSAN_INTERCEPT(epoll_create); + TSAN_INTERCEPT(epoll_create1); + TSAN_INTERCEPT(close); + TSAN_INTERCEPT(pipe); + TSAN_INTERCEPT(pipe2); + TSAN_INTERCEPT(readv); TSAN_INTERCEPT(preadv64); - TSAN_INTERCEPT(write); - TSAN_INTERCEPT(pwrite); - TSAN_INTERCEPT(pwrite64); TSAN_INTERCEPT(writev); TSAN_INTERCEPT(pwritev64); TSAN_INTERCEPT(send); @@ -1558,6 +1800,8 @@ void InitializeInterceptors() { TSAN_INTERCEPT(unlink); TSAN_INTERCEPT(fopen); + TSAN_INTERCEPT(freopen); + TSAN_INTERCEPT(fclose); TSAN_INTERCEPT(fread); TSAN_INTERCEPT(fwrite); TSAN_INTERCEPT(puts); @@ -1566,25 +1810,42 @@ void InitializeInterceptors() { TSAN_INTERCEPT(epoll_ctl); TSAN_INTERCEPT(epoll_wait); + TSAN_INTERCEPT(poll); TSAN_INTERCEPT(sigaction); TSAN_INTERCEPT(signal); TSAN_INTERCEPT(raise); TSAN_INTERCEPT(kill); TSAN_INTERCEPT(pthread_kill); + TSAN_INTERCEPT(sleep); + TSAN_INTERCEPT(usleep); + TSAN_INTERCEPT(nanosleep); + TSAN_INTERCEPT(gettimeofday); + TSAN_INTERCEPT(mlock); + TSAN_INTERCEPT(munlock); + TSAN_INTERCEPT(mlockall); + TSAN_INTERCEPT(munlockall); + + TSAN_INTERCEPT(fork); + + // Need to setup it, because interceptors check that the function is resolved. + // But atexit is emitted directly into the module, so can't be resolved. + REAL(atexit) = (int(*)(void(*)()))unreachable; atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext))) AtExitContext(); if (__cxa_atexit(&finalize, 0, 0)) { - TsanPrintf("ThreadSanitizer: failed to setup atexit callback\n"); + Printf("ThreadSanitizer: failed to setup atexit callback\n"); Die(); } if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { - TsanPrintf("ThreadSanitizer: failed to create thread key\n"); + Printf("ThreadSanitizer: failed to create thread key\n"); Die(); } + + FdInit(); } void internal_start_thread(void(*func)(void *arg), void *arg) { diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h index ed21ec60544e..7480fc893f2d 100644 --- a/lib/tsan/rtl/tsan_interface.h +++ b/lib/tsan/rtl/tsan_interface.h @@ -16,6 +16,8 @@ #ifndef TSAN_INTERFACE_H #define TSAN_INTERFACE_H +#include <sanitizer/common_interface_defs.h> + // This header should NOT include any other headers. // All functions in this header are extern "C" and start with __tsan_. @@ -25,24 +27,30 @@ extern "C" { // This function should be called at the very beginning of the process, // before any instrumented code is executed and before any call to malloc. -void __tsan_init(); - -void __tsan_read1(void *addr); -void __tsan_read2(void *addr); -void __tsan_read4(void *addr); -void __tsan_read8(void *addr); -void __tsan_read16(void *addr); - -void __tsan_write1(void *addr); -void __tsan_write2(void *addr); -void __tsan_write4(void *addr); -void __tsan_write8(void *addr); -void __tsan_write16(void *addr); - -void __tsan_vptr_update(void **vptr_p, void *new_val); - -void __tsan_func_entry(void *call_pc); -void __tsan_func_exit(); +void __tsan_init() SANITIZER_INTERFACE_ATTRIBUTE; + +void __tsan_read1(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_read2(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_read4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_read8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_read16(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; + +void __tsan_write1(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_write2(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_write4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_write8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_write16(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; + +void __tsan_vptr_update(void **vptr_p, void *new_val) + SANITIZER_INTERFACE_ATTRIBUTE; + +void __tsan_func_entry(void *call_pc) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_func_exit() SANITIZER_INTERFACE_ATTRIBUTE; + +void __tsan_read_range(void *addr, unsigned long size) // NOLINT + SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_write_range(void *addr, unsigned long size) // NOLINT + SANITIZER_INTERFACE_ATTRIBUTE; #ifdef __cplusplus } // extern "C" diff --git a/lib/tsan/rtl/tsan_interface_ann.cc b/lib/tsan/rtl/tsan_interface_ann.cc index a605b6c9d3f0..51ebbf2266dd 100644 --- a/lib/tsan/rtl/tsan_interface_ann.cc +++ b/lib/tsan/rtl/tsan_interface_ann.cc @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "tsan_interface_ann.h" #include "tsan_mutex.h" @@ -52,11 +53,11 @@ class ScopedAnnotation { if (!flags()->enable_annotations) \ return; \ ThreadState *thr = cur_thread(); \ + const uptr pc = (uptr)__builtin_return_address(0); \ StatInc(thr, StatAnnotation); \ StatInc(thr, Stat##typ); \ ScopedAnnotation sa(thr, __FUNCTION__, f, l, \ (uptr)__builtin_return_address(0)); \ - const uptr pc = (uptr)&__FUNCTION__; \ (void)pc; \ /**/ @@ -159,73 +160,92 @@ bool IsExpectedReport(uptr addr, uptr size) { using namespace __tsan; // NOLINT extern "C" { -void AnnotateHappensBefore(char *f, int l, uptr addr) { +void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensBefore); Release(cur_thread(), CALLERPC, addr); } -void AnnotateHappensAfter(char *f, int l, uptr addr) { +void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensAfter); Acquire(cur_thread(), CALLERPC, addr); } -void AnnotateCondVarSignal(char *f, int l, uptr cv) { +void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) { SCOPED_ANNOTATION(AnnotateCondVarSignal); } -void AnnotateCondVarSignalAll(char *f, int l, uptr cv) { +void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) { SCOPED_ANNOTATION(AnnotateCondVarSignalAll); } -void AnnotateMutexIsNotPHB(char *f, int l, uptr mu) { +void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) { SCOPED_ANNOTATION(AnnotateMutexIsNotPHB); } -void AnnotateCondVarWait(char *f, int l, uptr cv, uptr lock) { +void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv, + uptr lock) { SCOPED_ANNOTATION(AnnotateCondVarWait); } -void AnnotateRWLockCreate(char *f, int l, uptr lock) { +void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockCreate); + MutexCreate(thr, pc, m, true, true, false); } -void AnnotateRWLockDestroy(char *f, int l, uptr lock) { +void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) { + SCOPED_ANNOTATION(AnnotateRWLockCreateStatic); + MutexCreate(thr, pc, m, true, true, true); +} + +void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockDestroy); + MutexDestroy(thr, pc, m); } -void AnnotateRWLockAcquired(char *f, int l, uptr lock, uptr is_w) { +void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m, + uptr is_w) { SCOPED_ANNOTATION(AnnotateRWLockAcquired); + if (is_w) + MutexLock(thr, pc, m); + else + MutexReadLock(thr, pc, m); } -void AnnotateRWLockReleased(char *f, int l, uptr lock, uptr is_w) { +void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m, + uptr is_w) { SCOPED_ANNOTATION(AnnotateRWLockReleased); + if (is_w) + MutexUnlock(thr, pc, m); + else + MutexReadUnlock(thr, pc, m); } -void AnnotateTraceMemory(char *f, int l, uptr mem) { +void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) { SCOPED_ANNOTATION(AnnotateTraceMemory); } -void AnnotateFlushState(char *f, int l) { +void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) { SCOPED_ANNOTATION(AnnotateFlushState); } -void AnnotateNewMemory(char *f, int l, uptr mem, uptr size) { +void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem, + uptr size) { SCOPED_ANNOTATION(AnnotateNewMemory); } -void AnnotateNoOp(char *f, int l, uptr mem) { +void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) { SCOPED_ANNOTATION(AnnotateNoOp); } static void ReportMissedExpectedRace(ExpectRace *race) { - TsanPrintf("==================\n"); - TsanPrintf("WARNING: ThreadSanitizer: missed expected data race\n"); - TsanPrintf(" %s addr=%zx %s:%d\n", + Printf("==================\n"); + Printf("WARNING: ThreadSanitizer: missed expected data race\n"); + Printf(" %s addr=%zx %s:%d\n", race->desc, race->addr, race->file, race->line); - TsanPrintf("==================\n"); + Printf("==================\n"); } -void AnnotateFlushExpectedRaces(char *f, int l) { +void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) { SCOPED_ANNOTATION(AnnotateFlushExpectedRaces); Lock lock(&dyn_ann_ctx->mtx); while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) { @@ -240,32 +260,39 @@ void AnnotateFlushExpectedRaces(char *f, int l) { } } -void AnnotateEnableRaceDetection(char *f, int l, int enable) { +void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection( + char *f, int l, int enable) { SCOPED_ANNOTATION(AnnotateEnableRaceDetection); // FIXME: Reconsider this functionality later. It may be irrelevant. } -void AnnotateMutexIsUsedAsCondVar(char *f, int l, uptr mu) { +void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar( + char *f, int l, uptr mu) { SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar); } -void AnnotatePCQGet(char *f, int l, uptr pcq) { +void INTERFACE_ATTRIBUTE AnnotatePCQGet( + char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQGet); } -void AnnotatePCQPut(char *f, int l, uptr pcq) { +void INTERFACE_ATTRIBUTE AnnotatePCQPut( + char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQPut); } -void AnnotatePCQDestroy(char *f, int l, uptr pcq) { +void INTERFACE_ATTRIBUTE AnnotatePCQDestroy( + char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQDestroy); } -void AnnotatePCQCreate(char *f, int l, uptr pcq) { +void INTERFACE_ATTRIBUTE AnnotatePCQCreate( + char *f, int l, uptr pcq) { SCOPED_ANNOTATION(AnnotatePCQCreate); } -void AnnotateExpectRace(char *f, int l, uptr mem, char *desc) { +void INTERFACE_ATTRIBUTE AnnotateExpectRace( + char *f, int l, uptr mem, char *desc) { SCOPED_ANNOTATION(AnnotateExpectRace); Lock lock(&dyn_ann_ctx->mtx); AddExpectRace(&dyn_ann_ctx->expect, @@ -273,7 +300,8 @@ void AnnotateExpectRace(char *f, int l, uptr mem, char *desc) { DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l); } -static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) { +static void BenignRaceImpl( + char *f, int l, uptr mem, uptr size, char *desc) { Lock lock(&dyn_ann_ctx->mtx); AddExpectRace(&dyn_ann_ctx->benign, f, l, mem, size, desc); @@ -281,69 +309,76 @@ static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) { } // FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm. -void AnnotateBenignRaceSized(char *f, int l, uptr mem, uptr size, char *desc) { +void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized( + char *f, int l, uptr mem, uptr size, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRaceSized); BenignRaceImpl(f, l, mem, size, desc); } -void AnnotateBenignRace(char *f, int l, uptr mem, char *desc) { +void INTERFACE_ATTRIBUTE AnnotateBenignRace( + char *f, int l, uptr mem, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRace); BenignRaceImpl(f, l, mem, 1, desc); } -void AnnotateIgnoreReadsBegin(char *f, int l) { +void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin); IgnoreCtl(cur_thread(), false, true); } -void AnnotateIgnoreReadsEnd(char *f, int l) { +void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd); IgnoreCtl(cur_thread(), false, false); } -void AnnotateIgnoreWritesBegin(char *f, int l) { +void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin); IgnoreCtl(cur_thread(), true, true); } -void AnnotateIgnoreWritesEnd(char *f, int l) { +void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) { SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd); - IgnoreCtl(cur_thread(), true, false); + IgnoreCtl(thr, true, false); } -void AnnotatePublishMemoryRange(char *f, int l, uptr addr, uptr size) { +void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange( + char *f, int l, uptr addr, uptr size) { SCOPED_ANNOTATION(AnnotatePublishMemoryRange); } -void AnnotateUnpublishMemoryRange(char *f, int l, uptr addr, uptr size) { +void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange( + char *f, int l, uptr addr, uptr size) { SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange); } -void AnnotateThreadName(char *f, int l, char *name) { +void INTERFACE_ATTRIBUTE AnnotateThreadName( + char *f, int l, char *name) { SCOPED_ANNOTATION(AnnotateThreadName); + ThreadSetName(thr, name); } -void WTFAnnotateHappensBefore(char *f, int l, uptr addr) { +void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensBefore); } -void WTFAnnotateHappensAfter(char *f, int l, uptr addr) { +void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensAfter); } -void WTFAnnotateBenignRaceSized(char *f, int l, uptr mem, uptr sz, char *desc) { +void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized( + char *f, int l, uptr mem, uptr sz, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRaceSized); } -int RunningOnValgrind() { +int INTERFACE_ATTRIBUTE RunningOnValgrind() { return flags()->running_on_valgrind; } -double __attribute__((weak)) ValgrindSlowdown(void) { +double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) { return 10.0; } -const char *ThreadSanitizerQuery(const char *query) { +const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) { if (internal_strcmp(query, "pure_happens_before") == 0) return "1"; else diff --git a/lib/tsan/rtl/tsan_interface_ann.h b/lib/tsan/rtl/tsan_interface_ann.h index 09e807a0087d..ed809073327e 100644 --- a/lib/tsan/rtl/tsan_interface_ann.h +++ b/lib/tsan/rtl/tsan_interface_ann.h @@ -14,6 +14,8 @@ #ifndef TSAN_INTERFACE_ANN_H #define TSAN_INTERFACE_ANN_H +#include <sanitizer/common_interface_defs.h> + // This header should NOT include any other headers. // All functions in this header are extern "C" and start with __tsan_. @@ -21,8 +23,8 @@ extern "C" { #endif -void __tsan_acquire(void *addr); -void __tsan_release(void *addr); +void __tsan_acquire(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_release(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; #ifdef __cplusplus } // extern "C" diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc index a3982a161b9b..a9d75e5bf76c 100644 --- a/lib/tsan/rtl/tsan_interface_atomic.cc +++ b/lib/tsan/rtl/tsan_interface_atomic.cc @@ -11,6 +11,14 @@ // //===----------------------------------------------------------------------===// +// ThreadSanitizer atomic operations are based on C++11/C1x standards. +// For background see C++11 standard. A slightly older, publically +// available draft of the standard (not entirely up-to-date, but close enough +// for casual browsing) is available here: +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf +// The following page contains more background information: +// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ + #include "sanitizer_common/sanitizer_placement_new.h" #include "tsan_interface_atomic.h" #include "tsan_flags.h" @@ -39,12 +47,13 @@ typedef __tsan_atomic8 a8; typedef __tsan_atomic16 a16; typedef __tsan_atomic32 a32; typedef __tsan_atomic64 a64; -const int mo_relaxed = __tsan_memory_order_relaxed; -const int mo_consume = __tsan_memory_order_consume; -const int mo_acquire = __tsan_memory_order_acquire; -const int mo_release = __tsan_memory_order_release; -const int mo_acq_rel = __tsan_memory_order_acq_rel; -const int mo_seq_cst = __tsan_memory_order_seq_cst; +typedef __tsan_atomic128 a128; +const morder mo_relaxed = __tsan_memory_order_relaxed; +const morder mo_consume = __tsan_memory_order_consume; +const morder mo_acquire = __tsan_memory_order_acquire; +const morder mo_release = __tsan_memory_order_release; +const morder mo_acq_rel = __tsan_memory_order_acq_rel; +const morder mo_seq_cst = __tsan_memory_order_seq_cst; static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { StatInc(thr, StatAtomic); @@ -52,7 +61,8 @@ static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { StatInc(thr, size == 1 ? StatAtomic1 : size == 2 ? StatAtomic2 : size == 4 ? StatAtomic4 - : StatAtomic8); + : size == 8 ? StatAtomic8 + : StatAtomic16); StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed : mo == mo_consume ? StatAtomicConsume : mo == mo_acquire ? StatAtomicAcquire @@ -61,9 +71,152 @@ static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { : StatAtomicSeq_Cst); } +static bool IsLoadOrder(morder mo) { + return mo == mo_relaxed || mo == mo_consume + || mo == mo_acquire || mo == mo_seq_cst; +} + +static bool IsStoreOrder(morder mo) { + return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst; +} + +static bool IsReleaseOrder(morder mo) { + return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst; +} + +static bool IsAcquireOrder(morder mo) { + return mo == mo_consume || mo == mo_acquire + || mo == mo_acq_rel || mo == mo_seq_cst; +} + +static bool IsAcqRelOrder(morder mo) { + return mo == mo_acq_rel || mo == mo_seq_cst; +} + +static morder ConvertOrder(morder mo) { + if (mo > (morder)100500) { + mo = morder(mo - 100500); + if (mo == morder(1 << 0)) + mo = mo_relaxed; + else if (mo == morder(1 << 1)) + mo = mo_consume; + else if (mo == morder(1 << 2)) + mo = mo_acquire; + else if (mo == morder(1 << 3)) + mo = mo_release; + else if (mo == morder(1 << 4)) + mo = mo_acq_rel; + else if (mo == morder(1 << 5)) + mo = mo_seq_cst; + } + CHECK_GE(mo, mo_relaxed); + CHECK_LE(mo, mo_seq_cst); + return mo; +} + +template<typename T> T func_xchg(volatile T *v, T op) { + T res = __sync_lock_test_and_set(v, op); + // __sync_lock_test_and_set does not contain full barrier. + __sync_synchronize(); + return res; +} + +template<typename T> T func_add(volatile T *v, T op) { + return __sync_fetch_and_add(v, op); +} + +template<typename T> T func_sub(volatile T *v, T op) { + return __sync_fetch_and_sub(v, op); +} + +template<typename T> T func_and(volatile T *v, T op) { + return __sync_fetch_and_and(v, op); +} + +template<typename T> T func_or(volatile T *v, T op) { + return __sync_fetch_and_or(v, op); +} + +template<typename T> T func_xor(volatile T *v, T op) { + return __sync_fetch_and_xor(v, op); +} + +template<typename T> T func_nand(volatile T *v, T op) { + // clang does not support __sync_fetch_and_nand. + T cmp = *v; + for (;;) { + T newv = ~(cmp & op); + T cur = __sync_val_compare_and_swap(v, cmp, newv); + if (cmp == cur) + return cmp; + cmp = cur; + } +} + +template<typename T> T func_cas(volatile T *v, T cmp, T xch) { + return __sync_val_compare_and_swap(v, cmp, xch); +} + +// clang does not support 128-bit atomic ops. +// Atomic ops are executed under tsan internal mutex, +// here we assume that the atomic variables are not accessed +// from non-instrumented code. +#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 +a128 func_xchg(volatile a128 *v, a128 op) { + a128 cmp = *v; + *v = op; + return cmp; +} + +a128 func_add(volatile a128 *v, a128 op) { + a128 cmp = *v; + *v = cmp + op; + return cmp; +} + +a128 func_sub(volatile a128 *v, a128 op) { + a128 cmp = *v; + *v = cmp - op; + return cmp; +} + +a128 func_and(volatile a128 *v, a128 op) { + a128 cmp = *v; + *v = cmp & op; + return cmp; +} + +a128 func_or(volatile a128 *v, a128 op) { + a128 cmp = *v; + *v = cmp | op; + return cmp; +} + +a128 func_xor(volatile a128 *v, a128 op) { + a128 cmp = *v; + *v = cmp ^ op; + return cmp; +} + +a128 func_nand(volatile a128 *v, a128 op) { + a128 cmp = *v; + *v = ~(cmp & op); + return cmp; +} + +a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) { + a128 cur = *v; + if (cur == cmp) + *v = xch; + return cur; +} +#endif + #define SCOPED_ATOMIC(func, ...) \ + mo = ConvertOrder(mo); \ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ ThreadState *const thr = cur_thread(); \ + ProcessPendingSignals(thr); \ const uptr pc = (uptr)__builtin_return_address(0); \ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \ ScopedAtomic sa(thr, pc, __FUNCTION__); \ @@ -73,93 +226,130 @@ static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { template<typename T> static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) { - CHECK(mo & (mo_relaxed | mo_consume | mo_acquire | mo_seq_cst)); + CHECK(IsLoadOrder(mo)); + // This fast-path is critical for performance. + // Assume the access is atomic. + if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) + return *a; + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false); + thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->clock.acquire(&s->clock); T v = *a; - if (mo & (mo_consume | mo_acquire | mo_seq_cst)) - Acquire(thr, pc, (uptr)a); + s->mtx.ReadUnlock(); + __sync_synchronize(); return v; } template<typename T> static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { - CHECK(mo & (mo_relaxed | mo_release | mo_seq_cst)); - if (mo & (mo_release | mo_seq_cst)) - Release(thr, pc, (uptr)a); + CHECK(IsStoreOrder(mo)); + // This fast-path is critical for performance. + // Assume the access is atomic. + // Strictly saying even relaxed store cuts off release sequence, + // so must reset the clock. + if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) { + *a = v; + return; + } + __sync_synchronize(); + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); + thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->clock.ReleaseStore(&s->clock); *a = v; + s->mtx.Unlock(); + // Trainling memory barrier to provide sequential consistency + // for Dekker-like store-load synchronization. + __sync_synchronize(); +} + +template<typename T, T (*F)(volatile T *v, T op)> +static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); + thr->clock.set(thr->tid, thr->fast_state.epoch()); + if (IsAcqRelOrder(mo)) + thr->clock.acq_rel(&s->clock); + else if (IsReleaseOrder(mo)) + thr->clock.release(&s->clock); + else if (IsAcquireOrder(mo)) + thr->clock.acquire(&s->clock); + v = F(a, v); + s->mtx.Unlock(); + return v; } template<typename T> static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { - if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) - Release(thr, pc, (uptr)a); - v = __sync_lock_test_and_set(a, v); - if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst)) - Acquire(thr, pc, (uptr)a); - return v; + return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo); } template<typename T> static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { - if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) - Release(thr, pc, (uptr)a); - v = __sync_fetch_and_add(a, v); - if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst)) - Acquire(thr, pc, (uptr)a); - return v; + return AtomicRMW<T, func_add>(thr, pc, a, v, mo); +} + +template<typename T> +static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + return AtomicRMW<T, func_sub>(thr, pc, a, v, mo); } template<typename T> static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { - if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) - Release(thr, pc, (uptr)a); - v = __sync_fetch_and_and(a, v); - if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst)) - Acquire(thr, pc, (uptr)a); - return v; + return AtomicRMW<T, func_and>(thr, pc, a, v, mo); } template<typename T> static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { - if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) - Release(thr, pc, (uptr)a); - v = __sync_fetch_and_or(a, v); - if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst)) - Acquire(thr, pc, (uptr)a); - return v; + return AtomicRMW<T, func_or>(thr, pc, a, v, mo); } template<typename T> static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { - if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) - Release(thr, pc, (uptr)a); - v = __sync_fetch_and_xor(a, v); - if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst)) - Acquire(thr, pc, (uptr)a); - return v; + return AtomicRMW<T, func_xor>(thr, pc, a, v, mo); +} + +template<typename T> +static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + return AtomicRMW<T, func_nand>(thr, pc, a, v, mo); } template<typename T> static bool AtomicCAS(ThreadState *thr, uptr pc, - volatile T *a, T *c, T v, morder mo) { - if (mo & (mo_release | mo_acq_rel | mo_seq_cst)) - Release(thr, pc, (uptr)a); + volatile T *a, T *c, T v, morder mo, morder fmo) { + (void)fmo; // Unused because llvm does not pass it yet. + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); + thr->clock.set(thr->tid, thr->fast_state.epoch()); + if (IsAcqRelOrder(mo)) + thr->clock.acq_rel(&s->clock); + else if (IsReleaseOrder(mo)) + thr->clock.release(&s->clock); + else if (IsAcquireOrder(mo)) + thr->clock.acquire(&s->clock); T cc = *c; - T pr = __sync_val_compare_and_swap(a, cc, v); - if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst)) - Acquire(thr, pc, (uptr)a); + T pr = func_cas(a, cc, v); + s->mtx.Unlock(); if (pr == cc) return true; *c = pr; return false; } +template<typename T> +static T AtomicCAS(ThreadState *thr, uptr pc, + volatile T *a, T c, T v, morder mo, morder fmo) { + AtomicCAS(thr, pc, a, &c, v, mo, fmo); + return c; +} + static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { + // FIXME(dvyukov): not implemented. __sync_synchronize(); } @@ -179,6 +369,12 @@ a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) { SCOPED_ATOMIC(Load, a, mo); } +#if __TSAN_HAS_INT128 +a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) { + SCOPED_ATOMIC(Load, a, mo); +} +#endif + void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } @@ -195,6 +391,12 @@ void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(Store, a, v, mo); } +#if __TSAN_HAS_INT128 +void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(Store, a, v, mo); +} +#endif + a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } @@ -211,6 +413,12 @@ a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(Exchange, a, v, mo); } +#if __TSAN_HAS_INT128 +a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(Exchange, a, v, mo); +} +#endif + a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } @@ -227,6 +435,34 @@ a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchAdd, a, v, mo); } +#if __TSAN_HAS_INT128 +a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchAdd, a, v, mo); +} +#endif + +a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} + +a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} + +a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} + +a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} + +#if __TSAN_HAS_INT128 +a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} +#endif + a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchAnd, a, v, mo); } @@ -243,6 +479,12 @@ a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchAnd, a, v, mo); } +#if __TSAN_HAS_INT128 +a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} +#endif + a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchOr, a, v, mo); } @@ -259,6 +501,12 @@ a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchOr, a, v, mo); } +#if __TSAN_HAS_INT128 +a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} +#endif + a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { SCOPED_ATOMIC(FetchXor, a, v, mo); } @@ -275,47 +523,118 @@ a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { SCOPED_ATOMIC(FetchXor, a, v, mo); } +#if __TSAN_HAS_INT128 +a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} +#endif + +a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} + +a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} + +a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} + +a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} + +#if __TSAN_HAS_INT128 +a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} +#endif + int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +#if __TSAN_HAS_INT128 +int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } +#endif int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +#if __TSAN_HAS_INT128 +int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} +#endif + +a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} +a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } +a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +#if __TSAN_HAS_INT128 +a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} +#endif + void __tsan_atomic_thread_fence(morder mo) { char* a; SCOPED_ATOMIC(Fence, mo); } + +void __tsan_atomic_signal_fence(morder mo) { +} diff --git a/lib/tsan/rtl/tsan_interface_atomic.h b/lib/tsan/rtl/tsan_interface_atomic.h index dff32b1473f4..5352d56679f7 100644 --- a/lib/tsan/rtl/tsan_interface_atomic.h +++ b/lib/tsan/rtl/tsan_interface_atomic.h @@ -13,109 +13,193 @@ #ifndef TSAN_INTERFACE_ATOMIC_H #define TSAN_INTERFACE_ATOMIC_H +#ifndef INTERFACE_ATTRIBUTE +# define INTERFACE_ATTRIBUTE __attribute__((visibility("default"))) +#endif + #ifdef __cplusplus extern "C" { #endif -typedef char __tsan_atomic8; -typedef short __tsan_atomic16; // NOLINT -typedef int __tsan_atomic32; -typedef long __tsan_atomic64; // NOLINT +typedef char __tsan_atomic8; +typedef short __tsan_atomic16; // NOLINT +typedef int __tsan_atomic32; +typedef long __tsan_atomic64; // NOLINT + +#if defined(__SIZEOF_INT128__) \ + || (__clang_major__ * 100 + __clang_minor__ >= 302) +__extension__ typedef __int128 __tsan_atomic128; +#define __TSAN_HAS_INT128 1 +#else +typedef char __tsan_atomic128; +#define __TSAN_HAS_INT128 0 +#endif +// Part of ABI, do not change. +// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup typedef enum { - __tsan_memory_order_relaxed = 1 << 0, - __tsan_memory_order_consume = 1 << 1, - __tsan_memory_order_acquire = 1 << 2, - __tsan_memory_order_release = 1 << 3, - __tsan_memory_order_acq_rel = 1 << 4, - __tsan_memory_order_seq_cst = 1 << 5, + __tsan_memory_order_relaxed, + __tsan_memory_order_consume, + __tsan_memory_order_acquire, + __tsan_memory_order_release, + __tsan_memory_order_acq_rel, + __tsan_memory_order_seq_cst } __tsan_memory_order; __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a, - __tsan_memory_order mo); + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a, - __tsan_memory_order mo); + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a, - __tsan_memory_order mo); + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a, - __tsan_memory_order mo); + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a, + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v, - __tsan_memory_order mo); + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v, - __tsan_memory_order mo); + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v, - __tsan_memory_order mo); + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v, - __tsan_memory_order mo); + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v, + __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a, - __tsan_atomic8 v, __tsan_memory_order mo); + __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a, - __tsan_atomic16 v, __tsan_memory_order mo); + __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a, - __tsan_atomic32 v, __tsan_memory_order mo); + __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a, - __tsan_atomic64 v, __tsan_memory_order mo); + __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, - __tsan_atomic8 v, __tsan_memory_order mo); + __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a, - __tsan_atomic16 v, __tsan_memory_order mo); + __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a, - __tsan_atomic32 v, __tsan_memory_order mo); + __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a, - __tsan_atomic64 v, __tsan_memory_order mo); + __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; + +__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, + __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a, + __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a, + __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a, + __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, - __tsan_atomic8 v, __tsan_memory_order mo); + __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a, - __tsan_atomic16 v, __tsan_memory_order mo); + __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a, - __tsan_atomic32 v, __tsan_memory_order mo); + __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a, - __tsan_atomic64 v, __tsan_memory_order mo); + __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, - __tsan_atomic8 v, __tsan_memory_order mo); + __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a, - __tsan_atomic16 v, __tsan_memory_order mo); + __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a, - __tsan_atomic32 v, __tsan_memory_order mo); + __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a, - __tsan_atomic64 v, __tsan_memory_order mo); + __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, - __tsan_atomic8 v, __tsan_memory_order mo); + __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a, - __tsan_atomic16 v, __tsan_memory_order mo); + __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a, - __tsan_atomic32 v, __tsan_memory_order mo); + __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a, - __tsan_atomic64 v, __tsan_memory_order mo); + __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; + +__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, + __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a, + __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a, + __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a, + __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; +__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE; int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a, - __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo); + __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a, - __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo); + __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a, - __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo); + __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a, - __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo); + __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; +int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a, + __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a, - __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo); + __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a, - __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo); + __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a, - __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo); + __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a, - __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo); + __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; +int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a, + __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; -void __tsan_atomic_thread_fence(__tsan_memory_order mo); +__tsan_atomic8 __tsan_atomic8_compare_exchange_val( + volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; +__tsan_atomic16 __tsan_atomic16_compare_exchange_val( + volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; +__tsan_atomic32 __tsan_atomic32_compare_exchange_val( + volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; +__tsan_atomic64 __tsan_atomic64_compare_exchange_val( + volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; +__tsan_atomic128 __tsan_atomic128_compare_exchange_val( + volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE; + +void __tsan_atomic_thread_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE; +void __tsan_atomic_signal_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE; #ifdef __cplusplus } // extern "C" #endif +#undef INTERFACE_ATTRIBUTE + #endif // #ifndef TSAN_INTERFACE_ATOMIC_H diff --git a/lib/tsan/rtl/tsan_interface_inl.h b/lib/tsan/rtl/tsan_interface_inl.h index 233f9028a63b..8a92155d57ef 100644 --- a/lib/tsan/rtl/tsan_interface_inl.h +++ b/lib/tsan/rtl/tsan_interface_inl.h @@ -63,3 +63,11 @@ void __tsan_func_entry(void *pc) { void __tsan_func_exit() { FuncExit(cur_thread()); } + +void __tsan_read_range(void *addr, uptr size) { + MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false); +} + +void __tsan_write_range(void *addr, uptr size) { + MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true); +} diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc new file mode 100644 index 000000000000..e425c75800be --- /dev/null +++ b/lib/tsan/rtl/tsan_interface_java.cc @@ -0,0 +1,305 @@ +//===-- tsan_interface_java.cc --------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "tsan_interface_java.h" +#include "tsan_rtl.h" +#include "tsan_mutex.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" + +using namespace __tsan; // NOLINT + +namespace __tsan { + +const uptr kHeapShadow = 0x300000000000ull; +const uptr kHeapAlignment = 8; + +struct BlockDesc { + bool begin; + Mutex mtx; + SyncVar *head; + + BlockDesc() + : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock) + , head() { + CHECK_EQ(begin, false); + begin = true; + } + + ~BlockDesc() { + CHECK_EQ(begin, true); + begin = false; + ThreadState *thr = cur_thread(); + SyncVar *s = head; + while (s) { + SyncVar *s1 = s->next; + StatInc(thr, StatSyncDestroyed); + s->mtx.Lock(); + s->mtx.Unlock(); + thr->mset.Remove(s->GetId()); + DestroyAndFree(s); + s = s1; + } + } +}; + +struct JavaContext { + const uptr heap_begin; + const uptr heap_size; + BlockDesc *heap_shadow; + + JavaContext(jptr heap_begin, jptr heap_size) + : heap_begin(heap_begin) + , heap_size(heap_size) { + uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc); + heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size); + if ((uptr)heap_shadow != kHeapShadow) { + Printf("ThreadSanitizer: failed to mmap Java heap shadow\n"); + Die(); + } + } +}; + +class ScopedJavaFunc { + public: + ScopedJavaFunc(ThreadState *thr, uptr pc) + : thr_(thr) { + Initialize(thr_); + FuncEntry(thr, pc); + CHECK_EQ(thr_->in_rtl, 0); + thr_->in_rtl++; + } + + ~ScopedJavaFunc() { + thr_->in_rtl--; + CHECK_EQ(thr_->in_rtl, 0); + FuncExit(thr_); + // FIXME(dvyukov): process pending signals. + } + + private: + ThreadState *thr_; +}; + +static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1]; +static JavaContext *jctx; + +static BlockDesc *getblock(uptr addr) { + uptr i = (addr - jctx->heap_begin) / kHeapAlignment; + return &jctx->heap_shadow[i]; +} + +static uptr USED getmem(BlockDesc *b) { + uptr i = b - jctx->heap_shadow; + uptr p = jctx->heap_begin + i * kHeapAlignment; + CHECK_GE(p, jctx->heap_begin); + CHECK_LT(p, jctx->heap_begin + jctx->heap_size); + return p; +} + +static BlockDesc *getblockbegin(uptr addr) { + for (BlockDesc *b = getblock(addr);; b--) { + CHECK_GE(b, jctx->heap_shadow); + if (b->begin) + return b; + } + return 0; +} + +SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr, + bool write_lock, bool create) { + if (jctx == 0 || addr < jctx->heap_begin + || addr >= jctx->heap_begin + jctx->heap_size) + return 0; + BlockDesc *b = getblockbegin(addr); + DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b); + Lock l(&b->mtx); + SyncVar *s = b->head; + for (; s; s = s->next) { + if (s->addr == addr) { + DPrintf("#%d: found existing sync for %p\n", thr->tid, addr); + break; + } + } + if (s == 0 && create) { + DPrintf("#%d: creating new sync for %p\n", thr->tid, addr); + s = CTX()->synctab.Create(thr, pc, addr); + s->next = b->head; + b->head = s; + } + if (s) { + if (write_lock) + s->mtx.Lock(); + else + s->mtx.ReadLock(); + } + return s; +} + +SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) { + // We do not destroy Java mutexes other than in __tsan_java_free(). + return 0; +} + +} // namespace __tsan { + +#define SCOPED_JAVA_FUNC(func) \ + ThreadState *thr = cur_thread(); \ + const uptr caller_pc = GET_CALLER_PC(); \ + const uptr pc = (uptr)&func; \ + (void)pc; \ + ScopedJavaFunc scoped(thr, caller_pc); \ +/**/ + +void __tsan_java_init(jptr heap_begin, jptr heap_size) { + SCOPED_JAVA_FUNC(__tsan_java_init); + DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size); + CHECK_EQ(jctx, 0); + CHECK_GT(heap_begin, 0); + CHECK_GT(heap_size, 0); + CHECK_EQ(heap_begin % kHeapAlignment, 0); + CHECK_EQ(heap_size % kHeapAlignment, 0); + CHECK_LT(heap_begin, heap_begin + heap_size); + jctx = new(jctx_buf) JavaContext(heap_begin, heap_size); +} + +int __tsan_java_fini() { + SCOPED_JAVA_FUNC(__tsan_java_fini); + DPrintf("#%d: java_fini()\n", thr->tid); + CHECK_NE(jctx, 0); + // FIXME(dvyukov): this does not call atexit() callbacks. + int status = Finalize(thr); + DPrintf("#%d: java_fini() = %d\n", thr->tid, status); + return status; +} + +void __tsan_java_alloc(jptr ptr, jptr size) { + SCOPED_JAVA_FUNC(__tsan_java_alloc); + DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size); + CHECK_NE(jctx, 0); + CHECK_NE(size, 0); + CHECK_EQ(ptr % kHeapAlignment, 0); + CHECK_EQ(size % kHeapAlignment, 0); + CHECK_GE(ptr, jctx->heap_begin); + CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size); + + BlockDesc *b = getblock(ptr); + new(b) BlockDesc(); +} + +void __tsan_java_free(jptr ptr, jptr size) { + SCOPED_JAVA_FUNC(__tsan_java_free); + DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size); + CHECK_NE(jctx, 0); + CHECK_NE(size, 0); + CHECK_EQ(ptr % kHeapAlignment, 0); + CHECK_EQ(size % kHeapAlignment, 0); + CHECK_GE(ptr, jctx->heap_begin); + CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size); + + BlockDesc *beg = getblock(ptr); + BlockDesc *end = getblock(ptr + size); + for (BlockDesc *b = beg; b != end; b++) { + if (b->begin) + b->~BlockDesc(); + } +} + +void __tsan_java_move(jptr src, jptr dst, jptr size) { + SCOPED_JAVA_FUNC(__tsan_java_move); + DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size); + CHECK_NE(jctx, 0); + CHECK_NE(size, 0); + CHECK_EQ(src % kHeapAlignment, 0); + CHECK_EQ(dst % kHeapAlignment, 0); + CHECK_EQ(size % kHeapAlignment, 0); + CHECK_GE(src, jctx->heap_begin); + CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size); + CHECK_GE(dst, jctx->heap_begin); + CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size); + CHECK(dst >= src + size || src >= dst + size); + + // Assuming it's not running concurrently with threads that do + // memory accesses and mutex operations (stop-the-world phase). + { // NOLINT + BlockDesc *s = getblock(src); + BlockDesc *d = getblock(dst); + BlockDesc *send = getblock(src + size); + for (; s != send; s++, d++) { + CHECK_EQ(d->begin, false); + if (s->begin) { + DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d)); + new(d) BlockDesc; + d->head = s->head; + for (SyncVar *sync = d->head; sync; sync = sync->next) { + uptr newaddr = sync->addr - src + dst; + DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr); + sync->addr = newaddr; + } + s->head = 0; + s->~BlockDesc(); + } + } + } + + { // NOLINT + u64 *s = (u64*)MemToShadow(src); + u64 *d = (u64*)MemToShadow(dst); + u64 *send = (u64*)MemToShadow(src + size); + for (; s != send; s++, d++) { + *d = *s; + *s = 0; + } + } +} + +void __tsan_java_mutex_lock(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_lock); + DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + MutexLock(thr, pc, addr); +} + +void __tsan_java_mutex_unlock(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock); + DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + MutexUnlock(thr, pc, addr); +} + +void __tsan_java_mutex_read_lock(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock); + DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + MutexReadLock(thr, pc, addr); +} + +void __tsan_java_mutex_read_unlock(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock); + DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + MutexReadUnlock(thr, pc, addr); +} diff --git a/lib/tsan/rtl/tsan_interface_java.h b/lib/tsan/rtl/tsan_interface_java.h new file mode 100644 index 000000000000..241483aaa015 --- /dev/null +++ b/lib/tsan/rtl/tsan_interface_java.h @@ -0,0 +1,74 @@ +//===-- tsan_interface_java.h -----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Interface for verification of Java or mixed Java/C++ programs. +// The interface is intended to be used from within a JVM and notify TSan +// about such events like Java locks and GC memory compaction. +// +// For plain memory accesses and function entry/exit a JVM is intended to use +// C++ interfaces: __tsan_readN/writeN and __tsan_func_enter/exit. +// +// For volatile memory accesses and atomic operations JVM is intended to use +// standard atomics API: __tsan_atomicN_load/store/etc. +// +// For usage examples see lit_tests/java_*.cc +//===----------------------------------------------------------------------===// +#ifndef TSAN_INTERFACE_JAVA_H +#define TSAN_INTERFACE_JAVA_H + +#ifndef INTERFACE_ATTRIBUTE +# define INTERFACE_ATTRIBUTE __attribute__((visibility("default"))) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef unsigned long jptr; // NOLINT + +// Must be called before any other callback from Java. +void __tsan_java_init(jptr heap_begin, jptr heap_size) INTERFACE_ATTRIBUTE; +// Must be called when the application exits. +// Not necessary the last callback (concurrently running threads are OK). +// Returns exit status or 0 if tsan does not want to override it. +int __tsan_java_fini() INTERFACE_ATTRIBUTE; + +// Callback for memory allocations. +// May be omitted for allocations that are not subject to data races +// nor contain synchronization objects (e.g. String). +void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE; +// Callback for memory free. +// Can be aggregated for several objects (preferably). +void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE; +// Callback for memory move by GC. +// Can be aggregated for several objects (preferably). +// The ranges must not overlap. +void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE; + +// Mutex lock. +// Addr is any unique address associated with the mutex. +// Must not be called on recursive reentry. +// Object.wait() is handled as a pair of unlock/lock. +void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE; +// Mutex unlock. +void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE; +// Mutex read lock. +void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE; +// Mutex read unlock. +void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE; + +#ifdef __cplusplus +} // extern "C" +#endif + +#undef INTERFACE_ATTRIBUTE + +#endif // #ifndef TSAN_INTERFACE_JAVA_H diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc index 7f956dfe26bf..82f7105d60db 100644 --- a/lib/tsan/rtl/tsan_mman.cc +++ b/lib/tsan/rtl/tsan_mman.cc @@ -11,34 +11,63 @@ // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" #include "tsan_mman.h" #include "tsan_rtl.h" #include "tsan_report.h" #include "tsan_flags.h" +// May be overriden by front-end. +extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) { + (void)ptr; + (void)size; +} + +extern "C" void WEAK __tsan_free_hook(void *ptr) { + (void)ptr; +} + namespace __tsan { +static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64); +Allocator *allocator() { + return reinterpret_cast<Allocator*>(&allocator_placeholder); +} + +void InitializeAllocator() { + allocator()->Init(); +} + +void AlloctorThreadFinish(ThreadState *thr) { + allocator()->SwallowCache(&thr->alloc_cache); +} + static void SignalUnsafeCall(ThreadState *thr, uptr pc) { if (!thr->in_signal_handler || !flags()->report_signal_unsafe) return; + Context *ctx = CTX(); StackTrace stack; stack.ObtainCurrent(thr, pc); + Lock l(&ctx->thread_mtx); ScopedReport rep(ReportTypeSignalUnsafe); - rep.AddStack(&stack); - OutputReport(rep, rep.GetReport()->stacks[0]); + if (!IsFiredSuppression(ctx, rep, stack)) { + rep.AddStack(&stack); + OutputReport(ctx, rep, rep.GetReport()->stacks[0]); + } } -void *user_alloc(ThreadState *thr, uptr pc, uptr sz) { +void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) { CHECK_GT(thr->in_rtl, 0); - if (sz + sizeof(MBlock) < sz) - return 0; - MBlock *b = (MBlock*)InternalAlloc(sz + sizeof(MBlock)); - if (b == 0) + void *p = allocator()->Allocate(&thr->alloc_cache, sz, align); + if (p == 0) return 0; + MBlock *b = new(allocator()->GetMetaData(p)) MBlock; b->size = sz; - void *p = b + 1; + b->head = 0; + b->alloc_tid = thr->unique_id; + b->alloc_stack_id = CurrentStackId(thr, pc); if (CTX() && CTX()->initialized) { - MemoryResetRange(thr, pc, (uptr)p, sz); + MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); } DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); SignalUnsafeCall(thr, pc); @@ -49,12 +78,24 @@ void user_free(ThreadState *thr, uptr pc, void *p) { CHECK_GT(thr->in_rtl, 0); CHECK_NE(p, (void*)0); DPrintf("#%d: free(%p)\n", thr->tid, p); - MBlock *b = user_mblock(thr, p); - p = b + 1; + MBlock *b = (MBlock*)allocator()->GetMetaData(p); + if (b->head) { + Lock l(&b->mtx); + for (SyncVar *s = b->head; s;) { + SyncVar *res = s; + s = s->next; + StatInc(thr, StatSyncDestroyed); + res->mtx.Lock(); + res->mtx.Unlock(); + DestroyAndFree(res); + } + b->head = 0; + } if (CTX() && CTX()->initialized && thr->in_rtl == 1) { MemoryRangeFreed(thr, pc, (uptr)p, b->size); } - InternalFree(b); + b->~MBlock(); + allocator()->Deallocate(&thr->alloc_cache, p); SignalUnsafeCall(thr, pc); } @@ -78,26 +119,28 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { return p2; } -void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align) { - CHECK_GT(thr->in_rtl, 0); - void *p = user_alloc(thr, pc, sz + align); - void *pa = RoundUp(p, align); - DCHECK_LE((uptr)pa + sz, (uptr)p + sz + align); - return pa; -} - MBlock *user_mblock(ThreadState *thr, void *p) { - CHECK_GT(thr->in_rtl, 0); CHECK_NE(p, (void*)0); - MBlock *b = (MBlock*)InternalAllocBlock(p); - // FIXME: Output a warning, it's a user error. - if (p < (char*)(b + 1) || p > (char*)(b + 1) + b->size) { - TsanPrintf("user_mblock p=%p b=%p size=%zu beg=%p end=%p\n", - p, b, b->size, (char*)(b + 1), (char*)(b + 1) + b->size); - CHECK_GE(p, (char*)(b + 1)); - CHECK_LE(p, (char*)(b + 1) + b->size); - } - return b; + Allocator *a = allocator(); + void *b = a->GetBlockBegin(p); + CHECK_NE(b, 0); + return (MBlock*)a->GetMetaData(b); +} + +void invoke_malloc_hook(void *ptr, uptr size) { + Context *ctx = CTX(); + ThreadState *thr = cur_thread(); + if (ctx == 0 || !ctx->initialized || thr->in_rtl) + return; + __tsan_malloc_hook(ptr, size); +} + +void invoke_free_hook(void *ptr) { + Context *ctx = CTX(); + ThreadState *thr = cur_thread(); + if (ctx == 0 || !ctx->initialized || thr->in_rtl) + return; + __tsan_free_hook(ptr); } void *internal_alloc(MBlockType typ, uptr sz) { diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h index 53f147e40cea..5cf00eac8d03 100644 --- a/lib/tsan/rtl/tsan_mman.h +++ b/lib/tsan/rtl/tsan_mman.h @@ -17,13 +17,14 @@ namespace __tsan { -// Descriptor of user's memory block. -struct MBlock { - uptr size; -}; +const uptr kDefaultAlignment = 16; + +void InitializeAllocator(); +void AlloctorThreadFinish(ThreadState *thr); // For user allocations. -void *user_alloc(ThreadState *thr, uptr pc, uptr sz); +void *user_alloc(ThreadState *thr, uptr pc, uptr sz, + uptr align = kDefaultAlignment); // Does not accept NULL. void user_free(ThreadState *thr, uptr pc, void *p); void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz); @@ -32,6 +33,10 @@ void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align); // returns the descriptor of the block. MBlock *user_mblock(ThreadState *thr, void *p); +// Invoking malloc/free hooks that may be installed by the user. +void invoke_malloc_hook(void *ptr, uptr size); +void invoke_free_hook(void *ptr); + enum MBlockType { MBlockScopedBuf, MBlockString, @@ -54,9 +59,10 @@ enum MBlockType { MBlockSuppression, MBlockExpectRace, MBlockSignal, + MBlockFD, // This must be the last. - MBlockTypeCount, + MBlockTypeCount }; // For internal data structures. @@ -70,45 +76,5 @@ void DestroyAndFree(T *&p) { p = 0; } -template<typename T> -class InternalScopedBuf { - public: - explicit InternalScopedBuf(uptr cnt) { - cnt_ = cnt; - ptr_ = (T*)internal_alloc(MBlockScopedBuf, cnt * sizeof(T)); - } - - ~InternalScopedBuf() { - internal_free(ptr_); - } - - operator T *() { - return ptr_; - } - - T &operator[](uptr i) { - return ptr_[i]; - } - - T *Ptr() { - return ptr_; - } - - uptr Count() { - return cnt_; - } - - uptr Size() { - return cnt_ * sizeof(T); - } - - private: - T *ptr_; - uptr cnt_; - - InternalScopedBuf(const InternalScopedBuf&); - void operator = (const InternalScopedBuf&); -}; - } // namespace __tsan #endif // TSAN_MMAN_H diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc index 1a70f8fe4430..335ca2211d13 100644 --- a/lib/tsan/rtl/tsan_mutex.cc +++ b/lib/tsan/rtl/tsan_mutex.cc @@ -25,22 +25,28 @@ namespace __tsan { // then Report mutex can be locked while under Threads mutex. // The leaf mutexes can be locked under any other mutexes. // Recursive locking is not supported. +#if TSAN_DEBUG && !TSAN_GO const MutexType MutexTypeLeaf = (MutexType)-1; static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { - /*0 MutexTypeInvalid*/ {}, - /*1 MutexTypeTrace*/ {MutexTypeLeaf}, - /*2 MutexTypeThreads*/ {MutexTypeReport}, - /*3 MutexTypeReport*/ {}, - /*4 MutexTypeSyncVar*/ {}, - /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar}, - /*6 MutexTypeSlab*/ {MutexTypeLeaf}, - /*7 MutexTypeAnnotations*/ {}, - /*8 MutexTypeAtExit*/ {MutexTypeSyncTab}, + /*0 MutexTypeInvalid*/ {}, + /*1 MutexTypeTrace*/ {MutexTypeLeaf}, + /*2 MutexTypeThreads*/ {MutexTypeReport}, + /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeMBlock, + MutexTypeJavaMBlock}, + /*4 MutexTypeSyncVar*/ {}, + /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar}, + /*6 MutexTypeSlab*/ {MutexTypeLeaf}, + /*7 MutexTypeAnnotations*/ {}, + /*8 MutexTypeAtExit*/ {MutexTypeSyncTab}, + /*9 MutexTypeMBlock*/ {MutexTypeSyncVar}, + /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar}, }; static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; +#endif void InitializeMutex() { +#if TSAN_DEBUG && !TSAN_GO // Build the "can lock" adjacency matrix. // If [i][j]==true, then one can lock mutex j while under mutex i. const int N = MutexTypeCount; @@ -48,7 +54,7 @@ void InitializeMutex() { bool leaf[N] = {}; for (int i = 1; i < N; i++) { for (int j = 0; j < N; j++) { - int z = CanLockTab[i][j]; + MutexType z = CanLockTab[i][j]; if (z == MutexTypeInvalid) continue; if (z == MutexTypeLeaf) { @@ -56,8 +62,8 @@ void InitializeMutex() { leaf[i] = true; continue; } - CHECK(!CanLockAdj[i][z]); - CanLockAdj[i][z] = true; + CHECK(!CanLockAdj[i][(int)z]); + CanLockAdj[i][(int)z] = true; cnt[i]++; } } @@ -92,36 +98,40 @@ void InitializeMutex() { } } #if 0 - TsanPrintf("Can lock graph:\n"); + Printf("Can lock graph:\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { - TsanPrintf("%d ", CanLockAdj[i][j]); + Printf("%d ", CanLockAdj[i][j]); } - TsanPrintf("\n"); + Printf("\n"); } - TsanPrintf("Can lock graph closure:\n"); + Printf("Can lock graph closure:\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { - TsanPrintf("%d ", CanLockAdj2[i][j]); + Printf("%d ", CanLockAdj2[i][j]); } - TsanPrintf("\n"); + Printf("\n"); } #endif // Verify that the graph is acyclic. for (int i = 0; i < N; i++) { if (CanLockAdj2[i][i]) { - TsanPrintf("Mutex %d participates in a cycle\n", i); + Printf("Mutex %d participates in a cycle\n", i); Die(); } } +#endif } DeadlockDetector::DeadlockDetector() { // Rely on zero initialization because some mutexes can be locked before ctor. } +#if TSAN_DEBUG && !TSAN_GO void DeadlockDetector::Lock(MutexType t) { - // TsanPrintf("LOCK %d @%zu\n", t, seq_ + 1); + // Printf("LOCK %d @%zu\n", t, seq_ + 1); + CHECK_GT(t, MutexTypeInvalid); + CHECK_LT(t, MutexTypeCount); u64 max_seq = 0; u64 max_idx = MutexTypeInvalid; for (int i = 0; i != MutexTypeCount; i++) { @@ -136,20 +146,21 @@ void DeadlockDetector::Lock(MutexType t) { locked_[t] = ++seq_; if (max_idx == MutexTypeInvalid) return; - // TsanPrintf(" last %d @%zu\n", max_idx, max_seq); + // Printf(" last %d @%zu\n", max_idx, max_seq); if (!CanLockAdj[max_idx][t]) { - TsanPrintf("ThreadSanitizer: internal deadlock detected\n"); - TsanPrintf("ThreadSanitizer: can't lock %d while under %zu\n", + Printf("ThreadSanitizer: internal deadlock detected\n"); + Printf("ThreadSanitizer: can't lock %d while under %zu\n", t, (uptr)max_idx); - Die(); + CHECK(0); } } void DeadlockDetector::Unlock(MutexType t) { - // TsanPrintf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]); + // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]); CHECK(locked_[t]); locked_[t] = 0; } +#endif const uptr kUnlocked = 0; const uptr kWriteLock = 1; @@ -256,4 +267,8 @@ void Mutex::ReadUnlock() { #endif } +void Mutex::CheckLocked() { + CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0); +} + } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_mutex.h b/lib/tsan/rtl/tsan_mutex.h index 5b22a4145185..a2b489107a98 100644 --- a/lib/tsan/rtl/tsan_mutex.h +++ b/lib/tsan/rtl/tsan_mutex.h @@ -29,9 +29,11 @@ enum MutexType { MutexTypeSlab, MutexTypeAnnotations, MutexTypeAtExit, + MutexTypeMBlock, + MutexTypeJavaMBlock, // This must be the last. - MutexTypeCount, + MutexTypeCount }; class Mutex { @@ -45,6 +47,8 @@ class Mutex { void ReadLock(); void ReadUnlock(); + void CheckLocked(); + private: atomic_uintptr_t state_; #if TSAN_DEBUG diff --git a/lib/tsan/rtl/tsan_mutexset.cc b/lib/tsan/rtl/tsan_mutexset.cc new file mode 100644 index 000000000000..21587770f687 --- /dev/null +++ b/lib/tsan/rtl/tsan_mutexset.cc @@ -0,0 +1,89 @@ +//===-- tsan_mutexset.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_mutexset.h" +#include "tsan_rtl.h" + +namespace __tsan { + +const uptr MutexSet::kMaxSize; + +MutexSet::MutexSet() { + size_ = 0; + internal_memset(&descs_, 0, sizeof(descs_)); +} + +void MutexSet::Add(u64 id, bool write, u64 epoch) { + // Look up existing mutex with the same id. + for (uptr i = 0; i < size_; i++) { + if (descs_[i].id == id) { + descs_[i].count++; + descs_[i].epoch = epoch; + return; + } + } + // On overflow, find the oldest mutex and drop it. + if (size_ == kMaxSize) { + u64 minepoch = (u64)-1; + u64 mini = (u64)-1; + for (uptr i = 0; i < size_; i++) { + if (descs_[i].epoch < minepoch) { + minepoch = descs_[i].epoch; + mini = i; + } + } + RemovePos(mini); + CHECK_EQ(size_, kMaxSize - 1); + } + // Add new mutex descriptor. + descs_[size_].id = id; + descs_[size_].write = write; + descs_[size_].epoch = epoch; + descs_[size_].count = 1; + size_++; +} + +void MutexSet::Del(u64 id, bool write) { + for (uptr i = 0; i < size_; i++) { + if (descs_[i].id == id) { + if (--descs_[i].count == 0) + RemovePos(i); + return; + } + } +} + +void MutexSet::Remove(u64 id) { + for (uptr i = 0; i < size_; i++) { + if (descs_[i].id == id) { + RemovePos(i); + return; + } + } +} + +void MutexSet::RemovePos(uptr i) { + CHECK_LT(i, size_); + descs_[i] = descs_[size_ - 1]; + size_--; +} + +uptr MutexSet::Size() const { + return size_; +} + +MutexSet::Desc MutexSet::Get(uptr i) const { + CHECK_LT(i, size_); + return descs_[i]; +} + +} // namespace __tsan diff --git a/lib/tsan/rtl/tsan_mutexset.h b/lib/tsan/rtl/tsan_mutexset.h new file mode 100644 index 000000000000..09223ff6cc48 --- /dev/null +++ b/lib/tsan/rtl/tsan_mutexset.h @@ -0,0 +1,65 @@ +//===-- tsan_mutexset.h -----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// MutexSet holds the set of mutexes currently held by a thread. +//===----------------------------------------------------------------------===// +#ifndef TSAN_MUTEXSET_H +#define TSAN_MUTEXSET_H + +#include "tsan_defs.h" + +namespace __tsan { + +class MutexSet { + public: + // Holds limited number of mutexes. + // The oldest mutexes are discarded on overflow. + static const uptr kMaxSize = 64; + struct Desc { + u64 id; + u64 epoch; + int count; + bool write; + }; + + MutexSet(); + // The 'id' is obtained from SyncVar::GetId(). + void Add(u64 id, bool write, u64 epoch); + void Del(u64 id, bool write); + void Remove(u64 id); // Removes the mutex completely (if it's destroyed). + uptr Size() const; + Desc Get(uptr i) const; + + private: +#ifndef TSAN_GO + uptr size_; + Desc descs_[kMaxSize]; +#endif + + void RemovePos(uptr i); +}; + +// Go does not have mutexes, so do not spend memory and time. +// (Go sync.Mutex is actually a semaphore -- can be unlocked +// in different goroutine). +#ifdef TSAN_GO +MutexSet::MutexSet() {} +void MutexSet::Add(u64 id, bool write, u64 epoch) {} +void MutexSet::Del(u64 id, bool write) {} +void MutexSet::Remove(u64 id) {} +void MutexSet::RemovePos(uptr i) {} +uptr MutexSet::Size() const { return 0; } +MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); } +#endif + +} // namespace __tsan + +#endif // TSAN_REPORT_H diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h index b557fa1caec5..c859c3e85b19 100644 --- a/lib/tsan/rtl/tsan_platform.h +++ b/lib/tsan/rtl/tsan_platform.h @@ -12,31 +12,85 @@ // Platform-specific code. //===----------------------------------------------------------------------===// +/* +C++ linux memory layout: +0000 0000 0000 - 03c0 0000 0000: protected +03c0 0000 0000 - 1000 0000 0000: shadow +1000 0000 0000 - 6000 0000 0000: protected +6000 0000 0000 - 6200 0000 0000: traces +6200 0000 0000 - 7d00 0000 0000: - +7d00 0000 0000 - 7e00 0000 0000: heap +7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack + +C++ COMPAT linux memory layout: +0000 0000 0000 - 0400 0000 0000: protected +0400 0000 0000 - 1000 0000 0000: shadow +1000 0000 0000 - 2900 0000 0000: protected +2900 0000 0000 - 2c00 0000 0000: modules +2c00 0000 0000 - 6000 0000 0000: - +6000 0000 0000 - 6200 0000 0000: traces +6200 0000 0000 - 7d00 0000 0000: - +7d00 0000 0000 - 7e00 0000 0000: heap +7e00 0000 0000 - 7f00 0000 0000: - +7f00 0000 0000 - 7fff ffff ffff: main thread stack + +Go linux and darwin memory layout: +0000 0000 0000 - 0000 1000 0000: executable +0000 1000 0000 - 00f8 0000 0000: - +00f8 0000 0000 - 0118 0000 0000: heap +0118 0000 0000 - 1000 0000 0000: - +1000 0000 0000 - 1460 0000 0000: shadow +1460 0000 0000 - 6000 0000 0000: - +6000 0000 0000 - 6200 0000 0000: traces +6200 0000 0000 - 7fff ffff ffff: - + +Go windows memory layout: +0000 0000 0000 - 0000 1000 0000: executable +0000 1000 0000 - 00f8 0000 0000: - +00f8 0000 0000 - 0118 0000 0000: heap +0118 0000 0000 - 0100 0000 0000: - +0100 0000 0000 - 0560 0000 0000: shadow +0560 0000 0000 - 0760 0000 0000: traces +0760 0000 0000 - 07ff ffff ffff: - +*/ + #ifndef TSAN_PLATFORM_H #define TSAN_PLATFORM_H -#include "tsan_rtl.h" +#include "tsan_defs.h" +#include "tsan_trace.h" -#if __LP64__ +#if defined(__LP64__) || defined(_WIN64) namespace __tsan { #if defined(TSAN_GO) static const uptr kLinuxAppMemBeg = 0x000000000000ULL; static const uptr kLinuxAppMemEnd = 0x00fcffffffffULL; +# if defined(_WIN32) +static const uptr kLinuxShadowMsk = 0x010000000000ULL; +# else static const uptr kLinuxShadowMsk = 0x100000000000ULL; +# endif // TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout, // when memory addresses are of the 0x2axxxxxxxxxx form. // The option is enabled with 'setarch x86_64 -L'. #elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW -static const uptr kLinuxAppMemBeg = 0x2a0000000000ULL; +static const uptr kLinuxAppMemBeg = 0x290000000000ULL; static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL; #else -static const uptr kLinuxAppMemBeg = 0x7ef000000000ULL; +static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL; static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL; #endif static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL; +#if defined(_WIN32) +const uptr kTraceMemBegin = 0x056000000000ULL; +#else +const uptr kTraceMemBegin = 0x600000000000ULL; +#endif +const uptr kTraceMemSize = 0x020000000000ULL; + // This has to be a macro to allow constant initialization of constants below. #ifndef TSAN_GO #define MemToShadow(addr) \ @@ -48,7 +102,7 @@ static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL; static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg); static const uptr kLinuxShadowEnd = - MemToShadow(kLinuxAppMemEnd) | (kPageSize - 1); + MemToShadow(kLinuxAppMemEnd) | 0xff; static inline bool IsAppMem(uptr mem) { return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd; @@ -62,9 +116,6 @@ static inline uptr ShadowToMem(uptr shadow) { CHECK(IsShadowMem(shadow)); #ifdef TSAN_GO return (shadow & ~kLinuxShadowMsk) / kShadowCnt; -#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW - // COMPAT mapping is not quite one-to-one. - return (shadow / kShadowCnt) | 0x280000000000ULL; #else return (shadow / kShadowCnt) | kLinuxAppMemMsk; #endif @@ -72,9 +123,10 @@ static inline uptr ShadowToMem(uptr shadow) { // For COMPAT mapping returns an alternative address // that mapped to the same shadow address. +// COMPAT mapping is not quite one-to-one. static inline uptr AlternativeAddress(uptr addr) { #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW - return addr | kLinuxAppMemMsk; + return (addr & ~kLinuxAppMemMsk) | 0x280000000000ULL; #else return 0; #endif @@ -85,16 +137,24 @@ void FlushShadowMemory(); const char *InitializePlatform(); void FinalizePlatform(); +uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) { + uptr p = kTraceMemBegin + (uptr)tid * kTraceSize * sizeof(Event); + DCHECK_LT(p, kTraceMemBegin + kTraceMemSize); + return p; +} void internal_start_thread(void(*func)(void*), void *arg); +// Says whether the addr relates to a global var. +// Guesses with high probability, may yield both false positives and negatives. +bool IsGlobalVar(uptr addr); uptr GetTlsSize(); void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size); } // namespace __tsan -#else // __LP64__ +#else // defined(__LP64__) || defined(_WIN64) # error "Only 64-bit is supported" #endif diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc index c791c96c14ac..6cc424975125 100644 --- a/lib/tsan/rtl/tsan_platform_linux.cc +++ b/lib/tsan/rtl/tsan_platform_linux.cc @@ -43,14 +43,6 @@ extern "C" int arch_prctl(int code, __sanitizer::uptr *addr); -namespace __sanitizer { - -void Die() { - _exit(1); -} - -} // namespace __sanitizer - namespace __tsan { #ifndef TSAN_GO @@ -79,9 +71,7 @@ uptr GetShadowMemoryConsumption() { } void FlushShadowMemory() { - madvise((void*)kLinuxShadowBeg, - kLinuxShadowEnd - kLinuxShadowBeg, - MADV_DONTNEED); + FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg); } #ifndef TSAN_GO @@ -91,65 +81,88 @@ static void ProtectRange(uptr beg, uptr end) { if (beg == end) return; if (beg != (uptr)Mprotect(beg, end - beg)) { - TsanPrintf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); - TsanPrintf("FATAL: Make sure you are not using unlimited stack\n"); + Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); + Printf("FATAL: Make sure you are not using unlimited stack\n"); Die(); } } #endif +#ifndef TSAN_GO void InitializeShadowMemory() { uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg); if (shadow != kLinuxShadowBeg) { - TsanPrintf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - TsanPrintf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie.\n"); + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg); Die(); } -#ifndef TSAN_GO const uptr kClosedLowBeg = 0x200000; const uptr kClosedLowEnd = kLinuxShadowBeg - 1; const uptr kClosedMidBeg = kLinuxShadowEnd + 1; - const uptr kClosedMidEnd = kLinuxAppMemBeg - 1; + const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin); ProtectRange(kClosedLowBeg, kClosedLowEnd); ProtectRange(kClosedMidBeg, kClosedMidEnd); -#endif -#ifndef TSAN_GO DPrintf("kClosedLow %zx-%zx (%zuGB)\n", kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30); -#endif DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n", kLinuxShadowBeg, kLinuxShadowEnd, (kLinuxShadowEnd - kLinuxShadowBeg) >> 30); -#ifndef TSAN_GO DPrintf("kClosedMid %zx-%zx (%zuGB)\n", kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30); -#endif DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n", kLinuxAppMemBeg, kLinuxAppMemEnd, (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30); DPrintf("stack %zx\n", (uptr)&shadow); } +#endif + +static uptr g_data_start; +static uptr g_data_end; #ifndef TSAN_GO static void CheckPIE() { // Ensure that the binary is indeed compiled with -pie. - ProcessMaps proc_maps; + MemoryMappingLayout proc_maps; uptr start, end; if (proc_maps.Next(&start, &end, /*offset*/0, /*filename*/0, /*filename_size*/0)) { if ((u64)start < kLinuxAppMemBeg) { - TsanPrintf("FATAL: ThreadSanitizer can not mmap the shadow memory (" + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory (" "something is mapped at 0x%zx < 0x%zx)\n", start, kLinuxAppMemBeg); - TsanPrintf("FATAL: Make sure to compile with -fPIE" + Printf("FATAL: Make sure to compile with -fPIE" " and to link with -pie.\n"); Die(); } } } +static void InitDataSeg() { + MemoryMappingLayout proc_maps; + uptr start, end, offset; + char name[128]; + bool prev_is_data = false; + while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name))) { + DPrintf("%p-%p %p %s\n", start, end, offset, name); + bool is_data = offset != 0 && name[0] != 0; + // BSS may get merged with [heap] in /proc/self/maps. This is not very + // reliable. + bool is_bss = offset == 0 && + (name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data; + if (g_data_start == 0 && is_data) + g_data_start = start; + if (is_bss) + g_data_end = end; + prev_is_data = is_data; + } + DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end); + CHECK_LT(g_data_start, g_data_end); + CHECK_GE((uptr)&g_data_start, g_data_start); + CHECK_LT((uptr)&g_data_start, g_data_end); +} + static uptr g_tls_size; #ifdef __i386__ @@ -157,14 +170,14 @@ static uptr g_tls_size; #else # define INTERNAL_FUNCTION #endif -extern "C" void _dl_get_tls_static_info(size_t*, size_t*) - __attribute__((weak)) INTERNAL_FUNCTION; static int InitTlsSize() { typedef void (*get_tls_func)(size_t*, size_t*) INTERNAL_FUNCTION; - get_tls_func get_tls = &_dl_get_tls_static_info; - if (get_tls == 0) - get_tls = (get_tls_func)dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); + get_tls_func get_tls; + void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); + CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr)); + internal_memcpy(&get_tls, &get_tls_static_info_ptr, + sizeof(get_tls_static_info_ptr)); CHECK_NE(get_tls, 0); size_t tls_size = 0; size_t tls_align = 0; @@ -173,22 +186,62 @@ static int InitTlsSize() { } #endif // #ifndef TSAN_GO +static rlim_t getlim(int res) { + rlimit rlim; + CHECK_EQ(0, getrlimit(res, &rlim)); + return rlim.rlim_cur; +} + +static void setlim(int res, rlim_t lim) { + // The following magic is to prevent clang from replacing it with memset. + volatile rlimit rlim; + rlim.rlim_cur = lim; + rlim.rlim_max = lim; + setrlimit(res, (rlimit*)&rlim); +} + const char *InitializePlatform() { void *p = 0; if (sizeof(p) == 8) { // Disable core dumps, dumping of 16TB usually takes a bit long. - // The following magic is to prevent clang from replacing it with memset. - volatile rlimit lim; - lim.rlim_cur = 0; - lim.rlim_max = 0; - setrlimit(RLIMIT_CORE, (rlimit*)&lim); + setlim(RLIMIT_CORE, 0); + } + + // Go maps shadow memory lazily and works fine with limited address space. + // Unlimited stack is not a problem as well, because the executable + // is not compiled with -pie. + if (kCppMode) { + bool reexec = false; + // TSan doesn't play well with unlimited stack size (as stack + // overlaps with shadow memory). If we detect unlimited stack size, + // we re-exec the program with limited stack size as a best effort. + if (getlim(RLIMIT_STACK) == (rlim_t)-1) { + const uptr kMaxStackSize = 32 * 1024 * 1024; + Report("WARNING: Program is run with unlimited stack size, which " + "wouldn't work with ThreadSanitizer.\n"); + Report("Re-execing with stack size limited to %zd bytes.\n", + kMaxStackSize); + SetStackSizeLimitInBytes(kMaxStackSize); + reexec = true; + } + + if (getlim(RLIMIT_AS) != (rlim_t)-1) { + Report("WARNING: Program is run with limited virtual address space," + " which wouldn't work with ThreadSanitizer.\n"); + Report("Re-execing with unlimited virtual address space.\n"); + setlim(RLIMIT_AS, -1); + reexec = true; + } + if (reexec) + ReExec(); } #ifndef TSAN_GO CheckPIE(); g_tls_size = (uptr)InitTlsSize(); + InitDataSeg(); #endif - return getenv("TSAN_OPTIONS"); + return getenv(kTsanOptionsEnv); } void FinalizePlatform() { @@ -232,6 +285,9 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, #endif } +bool IsGlobalVar(uptr addr) { + return g_data_start && addr >= g_data_start && addr < g_data_end; +} } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc index 7451492744f2..183061d14638 100644 --- a/lib/tsan/rtl/tsan_platform_mac.cc +++ b/lib/tsan/rtl/tsan_platform_mac.cc @@ -37,14 +37,6 @@ #include <errno.h> #include <sched.h> -namespace __sanitizer { - -void Die() { - _exit(1); -} - -} // namespace __sanitizer - namespace __tsan { ScopedInRtl::ScopedInRtl() { @@ -60,13 +52,14 @@ uptr GetShadowMemoryConsumption() { void FlushShadowMemory() { } +#ifndef TSAN_GO void InitializeShadowMemory() { uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg); if (shadow != kLinuxShadowBeg) { - TsanPrintf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - TsanPrintf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie.\n"); + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie.\n"); Die(); } DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n", @@ -76,6 +69,7 @@ void InitializeShadowMemory() { kLinuxAppMemBeg, kLinuxAppMemEnd, (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30); } +#endif const char *InitializePlatform() { void *p = 0; @@ -88,7 +82,7 @@ const char *InitializePlatform() { setrlimit(RLIMIT_CORE, (rlimit*)&lim); } - return getenv("TSAN_OPTIONS"); + return getenv(kTsanOptionsEnv); } void FinalizePlatform() { diff --git a/lib/tsan/rtl/tsan_platform_windows.cc b/lib/tsan/rtl/tsan_platform_windows.cc new file mode 100644 index 000000000000..f23e84e7875d --- /dev/null +++ b/lib/tsan/rtl/tsan_platform_windows.cc @@ -0,0 +1,58 @@ +//===-- tsan_platform_windows.cc ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Windows-specific code. +//===----------------------------------------------------------------------===// + +#ifdef _WIN32 + +#include "tsan_platform.h" + +#include <stdlib.h> + +namespace __tsan { + +ScopedInRtl::ScopedInRtl() { +} + +ScopedInRtl::~ScopedInRtl() { +} + +uptr GetShadowMemoryConsumption() { + return 0; +} + +void FlushShadowMemory() { +} + +const char *InitializePlatform() { + return getenv(kTsanOptionsEnv); +} + +void FinalizePlatform() { + fflush(0); +} + +uptr GetTlsSize() { + return 0; +} + +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size) { + *stk_addr = 0; + *stk_size = 0; + *tls_addr = 0; + *tls_size = 0; +} + +} // namespace __tsan + +#endif // #ifdef _WIN32 diff --git a/lib/tsan/rtl/tsan_printf.cc b/lib/tsan/rtl/tsan_printf.cc deleted file mode 100644 index 6f41440fb929..000000000000 --- a/lib/tsan/rtl/tsan_printf.cc +++ /dev/null @@ -1,39 +0,0 @@ -//===-- tsan_printf.cc ----------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of ThreadSanitizer (TSan), a race detector. -// -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_libc.h" -#include "tsan_defs.h" -#include "tsan_mman.h" -#include "tsan_platform.h" - -#include <stdarg.h> // va_list - -namespace __sanitizer { -int VSNPrintf(char *buff, int buff_length, const char *format, va_list args); -} // namespace __sanitizer - -namespace __tsan { - -void TsanPrintf(const char *format, ...) { - ScopedInRtl in_rtl; - const uptr kMaxLen = 16 * 1024; - InternalScopedBuf<char> buffer(kMaxLen); - va_list args; - va_start(args, format); - uptr len = VSNPrintf(buffer, buffer.Size(), format, args); - va_end(args); - internal_write(CTX() ? flags()->log_fileno : 2, - buffer, len < buffer.Size() ? len : buffer.Size() - 1); -} - -} // namespace __tsan diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc index c841a9827f4c..056dc97387b9 100644 --- a/lib/tsan/rtl/tsan_report.cc +++ b/lib/tsan/rtl/tsan_report.cc @@ -21,100 +21,153 @@ ReportDesc::ReportDesc() , mops(MBlockReportMop) , locs(MBlockReportLoc) , mutexes(MBlockReportMutex) - , threads(MBlockReportThread) { + , threads(MBlockReportThread) + , sleep() { +} + +ReportMop::ReportMop() + : mset(MBlockReportMutex) { } ReportDesc::~ReportDesc() { + // FIXME(dvyukov): it must be leaking a lot of memory. } #ifndef TSAN_GO +const int kThreadBufSize = 32; +const char *thread_name(char *buf, int tid) { + if (tid == 0) + return "main thread"; + internal_snprintf(buf, kThreadBufSize, "thread T%d", tid); + return buf; +} + static void PrintHeader(ReportType typ) { - TsanPrintf("WARNING: ThreadSanitizer: "); + Printf("WARNING: ThreadSanitizer: "); if (typ == ReportTypeRace) - TsanPrintf("data race"); + Printf("data race"); else if (typ == ReportTypeUseAfterFree) - TsanPrintf("heap-use-after-free"); + Printf("heap-use-after-free"); else if (typ == ReportTypeThreadLeak) - TsanPrintf("thread leak"); + Printf("thread leak"); else if (typ == ReportTypeMutexDestroyLocked) - TsanPrintf("destroy of a locked mutex"); + Printf("destroy of a locked mutex"); else if (typ == ReportTypeSignalUnsafe) - TsanPrintf("signal-unsafe call inside of a signal"); + Printf("signal-unsafe call inside of a signal"); else if (typ == ReportTypeErrnoInSignal) - TsanPrintf("signal handler spoils errno"); + Printf("signal handler spoils errno"); - TsanPrintf(" (pid=%d)\n", GetPid()); + Printf(" (pid=%d)\n", GetPid()); } -static void PrintStack(const ReportStack *ent) { +void PrintStack(const ReportStack *ent) { + if (ent == 0) { + Printf(" [failed to restore the stack]\n\n"); + return; + } for (int i = 0; ent; ent = ent->next, i++) { - TsanPrintf(" #%d %s %s:%d", i, ent->func, ent->file, ent->line); + Printf(" #%d %s %s:%d", i, ent->func, ent->file, ent->line); if (ent->col) - TsanPrintf(":%d", ent->col); + Printf(":%d", ent->col); if (ent->module && ent->offset) - TsanPrintf(" (%s+%p)\n", ent->module, (void*)ent->offset); + Printf(" (%s+%p)\n", ent->module, (void*)ent->offset); else - TsanPrintf(" (%p)\n", (void*)ent->pc); + Printf(" (%p)\n", (void*)ent->pc); + } + Printf("\n"); +} + +static void PrintMutexSet(Vector<ReportMopMutex> const& mset) { + for (uptr i = 0; i < mset.Size(); i++) { + if (i == 0) + Printf(" (mutexes:"); + const ReportMopMutex m = mset[i]; + Printf(" %s M%llu", m.write ? "write" : "read", m.id); + Printf(i == mset.Size() - 1 ? ")" : ","); } } static void PrintMop(const ReportMop *mop, bool first) { - TsanPrintf(" %s of size %d at %p", + char thrbuf[kThreadBufSize]; + Printf(" %s of size %d at %p by %s", (first ? (mop->write ? "Write" : "Read") : (mop->write ? "Previous write" : "Previous read")), - mop->size, (void*)mop->addr); - if (mop->tid == 0) - TsanPrintf(" by main thread:\n"); - else - TsanPrintf(" by thread %d:\n", mop->tid); + mop->size, (void*)mop->addr, + thread_name(thrbuf, mop->tid)); + PrintMutexSet(mop->mset); + Printf(":\n"); PrintStack(mop->stack); } static void PrintLocation(const ReportLocation *loc) { + char thrbuf[kThreadBufSize]; if (loc->type == ReportLocationGlobal) { - TsanPrintf(" Location is global '%s' of size %zu at %zx %s:%d\n", - loc->name, loc->size, loc->addr, loc->file, loc->line); + Printf(" Location is global '%s' of size %zu at %zx (%s+%p)\n\n", + loc->name, loc->size, loc->addr, loc->module, loc->offset); } else if (loc->type == ReportLocationHeap) { - TsanPrintf(" Location is heap of size %zu at %zx allocated " - "by thread %d:\n", loc->size, loc->addr, loc->tid); + char thrbuf[kThreadBufSize]; + Printf(" Location is heap block of size %zu at %p allocated by %s:\n", + loc->size, loc->addr, thread_name(thrbuf, loc->tid)); PrintStack(loc->stack); } else if (loc->type == ReportLocationStack) { - TsanPrintf(" Location is stack of thread %d:\n", loc->tid); + Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid)); + } else if (loc->type == ReportLocationTLS) { + Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid)); + } else if (loc->type == ReportLocationFD) { + Printf(" Location is file descriptor %d created by %s at:\n", + loc->fd, thread_name(thrbuf, loc->tid)); + PrintStack(loc->stack); } } static void PrintMutex(const ReportMutex *rm) { - if (rm->stack == 0) - return; - TsanPrintf(" Mutex %d created at:\n", rm->id); - PrintStack(rm->stack); + if (rm->destroyed) { + Printf(" Mutex M%llu is already destroyed.\n\n", rm->id); + } else { + Printf(" Mutex M%llu created at:\n", rm->id); + PrintStack(rm->stack); + } } static void PrintThread(const ReportThread *rt) { if (rt->id == 0) // Little sense in describing the main thread. return; - TsanPrintf(" Thread %d", rt->id); + Printf(" Thread T%d", rt->id); if (rt->name) - TsanPrintf(" '%s'", rt->name); - TsanPrintf(" (%s)", rt->running ? "running" : "finished"); + Printf(" '%s'", rt->name); + char thrbuf[kThreadBufSize]; + Printf(" (tid=%zu, %s) created by %s", + rt->pid, rt->running ? "running" : "finished", + thread_name(thrbuf, rt->parent_tid)); if (rt->stack) - TsanPrintf(" created at:"); - TsanPrintf("\n"); + Printf(" at:"); + Printf("\n"); PrintStack(rt->stack); } +static void PrintSleep(const ReportStack *s) { + Printf(" As if synchronized via sleep:\n"); + PrintStack(s); +} + void PrintReport(const ReportDesc *rep) { - TsanPrintf("==================\n"); + Printf("==================\n"); PrintHeader(rep->typ); - for (uptr i = 0; i < rep->stacks.Size(); i++) + for (uptr i = 0; i < rep->stacks.Size(); i++) { + if (i) + Printf(" and:\n"); PrintStack(rep->stacks[i]); + } for (uptr i = 0; i < rep->mops.Size(); i++) PrintMop(rep->mops[i], i == 0); + if (rep->sleep) + PrintSleep(rep->sleep); + for (uptr i = 0; i < rep->locs.Size(); i++) PrintLocation(rep->locs[i]); @@ -124,20 +177,25 @@ void PrintReport(const ReportDesc *rep) { for (uptr i = 0; i < rep->threads.Size(); i++) PrintThread(rep->threads[i]); - TsanPrintf("==================\n"); + Printf("==================\n"); } #else -static void PrintStack(const ReportStack *ent) { +void PrintStack(const ReportStack *ent) { + if (ent == 0) { + Printf(" [failed to restore the stack]\n\n"); + return; + } for (int i = 0; ent; ent = ent->next, i++) { - TsanPrintf(" %s()\n %s:%d +0x%zx\n", + Printf(" %s()\n %s:%d +0x%zx\n", ent->func, ent->file, ent->line, (void*)ent->offset); } + Printf("\n"); } static void PrintMop(const ReportMop *mop, bool first) { - TsanPrintf("%s by goroutine %d:\n", + Printf("%s by goroutine %d:\n", (first ? (mop->write ? "Write" : "Read") : (mop->write ? "Previous write" : "Previous read")), mop->tid); @@ -147,19 +205,19 @@ static void PrintMop(const ReportMop *mop, bool first) { static void PrintThread(const ReportThread *rt) { if (rt->id == 0) // Little sense in describing the main thread. return; - TsanPrintf("Goroutine %d (%s) created at:\n", + Printf("Goroutine %d (%s) created at:\n", rt->id, rt->running ? "running" : "finished"); PrintStack(rt->stack); } void PrintReport(const ReportDesc *rep) { - TsanPrintf("==================\n"); - TsanPrintf("WARNING: DATA RACE at %p\n", (void*)rep->mops[0]->addr); + Printf("==================\n"); + Printf("WARNING: DATA RACE\n"); for (uptr i = 0; i < rep->mops.Size(); i++) PrintMop(rep->mops[i], i == 0); for (uptr i = 0; i < rep->threads.Size(); i++) PrintThread(rep->threads[i]); - TsanPrintf("==================\n"); + Printf("==================\n"); } #endif diff --git a/lib/tsan/rtl/tsan_report.h b/lib/tsan/rtl/tsan_report.h index d139296d54c0..f6715d1aae9b 100644 --- a/lib/tsan/rtl/tsan_report.h +++ b/lib/tsan/rtl/tsan_report.h @@ -24,7 +24,7 @@ enum ReportType { ReportTypeThreadLeak, ReportTypeMutexDestroyLocked, ReportTypeSignalUnsafe, - ReportTypeErrnoInSignal, + ReportTypeErrnoInSignal }; struct ReportStack { @@ -38,27 +38,38 @@ struct ReportStack { int col; }; +struct ReportMopMutex { + u64 id; + bool write; +}; + struct ReportMop { int tid; uptr addr; int size; bool write; - int nmutex; - int *mutex; + Vector<ReportMopMutex> mset; ReportStack *stack; + + ReportMop(); }; enum ReportLocationType { ReportLocationGlobal, ReportLocationHeap, ReportLocationStack, + ReportLocationTLS, + ReportLocationFD }; struct ReportLocation { ReportLocationType type; uptr addr; uptr size; + char *module; + uptr offset; int tid; + int fd; char *name; char *file; int line; @@ -67,13 +78,16 @@ struct ReportLocation { struct ReportThread { int id; + uptr pid; bool running; char *name; + int parent_tid; ReportStack *stack; }; struct ReportMutex { - int id; + u64 id; + bool destroyed; ReportStack *stack; }; @@ -85,6 +99,7 @@ class ReportDesc { Vector<ReportLocation*> locs; Vector<ReportMutex*> mutexes; Vector<ReportThread*> threads; + ReportStack *sleep; ReportDesc(); ~ReportDesc(); @@ -96,6 +111,7 @@ class ReportDesc { // Format and output the report to the console/log. No additional logic. void PrintReport(const ReportDesc *rep); +void PrintStack(const ReportStack *stack); } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc index 0ceb26c90e67..493ed2055dfc 100644 --- a/lib/tsan/rtl/tsan_rtl.cc +++ b/lib/tsan/rtl/tsan_rtl.cc @@ -15,7 +15,9 @@ #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_symbolizer.h" #include "tsan_defs.h" #include "tsan_platform.h" #include "tsan_rtl.h" @@ -47,11 +49,12 @@ Context::Context() , nmissed_expected() , thread_mtx(MutexTypeThreads, StatMtxThreads) , racy_stacks(MBlockRacyStacks) - , racy_addresses(MBlockRacyAddresses) { + , racy_addresses(MBlockRacyAddresses) + , fired_suppressions(MBlockRacyAddresses) { } // The objects are allocated in TLS, so one may rely on zero-initialization. -ThreadState::ThreadState(Context *ctx, int tid, u64 epoch, +ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, uptr stk_addr, uptr stk_size, uptr tls_addr, uptr tls_size) : fast_state(tid, epoch) @@ -62,6 +65,7 @@ ThreadState::ThreadState(Context *ctx, int tid, u64 epoch, // , in_rtl() , shadow_stack_pos(&shadow_stack[0]) , tid(tid) + , unique_id(unique_id) , stk_addr(stk_addr) , stk_size(stk_size) , tls_addr(tls_addr) @@ -71,6 +75,7 @@ ThreadState::ThreadState(Context *ctx, int tid, u64 epoch, ThreadContext::ThreadContext(int tid) : tid(tid) , unique_id() + , os_id() , user_id() , thr() , status(ThreadStatusInvalid) @@ -79,7 +84,8 @@ ThreadContext::ThreadContext(int tid) , epoch0() , epoch1() , dead_info() - , dead_next() { + , dead_next() + , name() { } static void WriteMemoryProfile(char *buf, uptr buf_size, int num) { @@ -119,9 +125,9 @@ static void MemoryProfileThread(void *arg) { ScopedInRtl in_rtl; fd_t fd = (fd_t)(uptr)arg; for (int i = 0; ; i++) { - InternalScopedBuf<char> buf(4096); - WriteMemoryProfile(buf.Ptr(), buf.Size(), i); - internal_write(fd, buf.Ptr(), internal_strlen(buf.Ptr())); + InternalScopedBuffer<char> buf(4096); + WriteMemoryProfile(buf.data(), buf.size(), i); + internal_write(fd, buf.data(), internal_strlen(buf.data())); SleepForSeconds(1); } } @@ -129,12 +135,12 @@ static void MemoryProfileThread(void *arg) { static void InitializeMemoryProfile() { if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0) return; - InternalScopedBuf<char> filename(4096); - internal_snprintf(filename.Ptr(), filename.Size(), "%s.%d", + InternalScopedBuffer<char> filename(4096); + internal_snprintf(filename.data(), filename.size(), "%s.%d", flags()->profile_memory, GetPid()); - fd_t fd = internal_open(filename.Ptr(), true); + fd_t fd = internal_open(filename.data(), true); if (fd == kInvalidFd) { - TsanPrintf("Failed to open memory profile file '%s'\n", &filename[0]); + Printf("Failed to open memory profile file '%s'\n", &filename[0]); Die(); } internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd); @@ -156,41 +162,81 @@ static void InitializeMemoryFlush() { internal_start_thread(&MemoryFlushThread, 0); } +void MapShadow(uptr addr, uptr size) { + MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier); +} + +void MapThreadTrace(uptr addr, uptr size) { + DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); + CHECK_GE(addr, kTraceMemBegin); + CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize); + if (addr != (uptr)MmapFixedNoReserve(addr, size)) { + Printf("FATAL: ThreadSanitizer can not mmap thread trace\n"); + Die(); + } +} + void Initialize(ThreadState *thr) { // Thread safe because done before all threads exist. static bool is_initialized = false; if (is_initialized) return; is_initialized = true; + // Install tool-specific callbacks in sanitizer_common. + SetCheckFailedCallback(TsanCheckFailed); + ScopedInRtl in_rtl; +#ifndef TSAN_GO + InitializeAllocator(); +#endif InitializeInterceptors(); const char *env = InitializePlatform(); InitializeMutex(); InitializeDynamicAnnotations(); ctx = new(ctx_placeholder) Context; +#ifndef TSAN_GO InitializeShadowMemory(); +#endif ctx->dead_list_size = 0; ctx->dead_list_head = 0; ctx->dead_list_tail = 0; InitializeFlags(&ctx->flags, env); + // Setup correct file descriptor for error reports. + if (internal_strcmp(flags()->log_path, "stdout") == 0) + __sanitizer_set_report_fd(kStdoutFd); + else if (internal_strcmp(flags()->log_path, "stderr") == 0) + __sanitizer_set_report_fd(kStderrFd); + else + __sanitizer_set_report_path(flags()->log_path); InitializeSuppressions(); +#ifndef TSAN_GO + // Initialize external symbolizer before internal threads are started. + const char *external_symbolizer = flags()->external_symbolizer_path; + if (external_symbolizer != 0 && external_symbolizer[0] != '\0') { + if (!InitializeExternalSymbolizer(external_symbolizer)) { + Printf("Failed to start external symbolizer: '%s'\n", + external_symbolizer); + Die(); + } + } +#endif InitializeMemoryProfile(); InitializeMemoryFlush(); if (ctx->flags.verbosity) - TsanPrintf("***** Running under ThreadSanitizer v2 (pid %d) *****\n", + Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n", GetPid()); // Initialize thread 0. ctx->thread_seq = 0; int tid = ThreadCreate(thr, 0, 0, true); CHECK_EQ(tid, 0); - ThreadStart(thr, tid); + ThreadStart(thr, tid, GetPid()); CHECK_EQ(thr->in_rtl, 1); ctx->initialized = true; if (flags()->stop_on_start) { - TsanPrintf("ThreadSanitizer is suspended at startup (pid %d)." + Printf("ThreadSanitizer is suspended at startup (pid %d)." " Call __tsan_resume().\n", GetPid()); while (__tsan_resumed == 0); @@ -202,34 +248,77 @@ int Finalize(ThreadState *thr) { Context *ctx = __tsan::ctx; bool failed = false; + if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) + SleepForMillis(flags()->atexit_sleep_ms); + + // Wait for pending reports. + ctx->report_mtx.Lock(); + ctx->report_mtx.Unlock(); + ThreadFinalize(thr); if (ctx->nreported) { failed = true; - TsanPrintf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); +#ifndef TSAN_GO + Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); +#else + Printf("Found %d data race(s)\n", ctx->nreported); +#endif } if (ctx->nmissed_expected) { failed = true; - TsanPrintf("ThreadSanitizer: missed %d expected races\n", + Printf("ThreadSanitizer: missed %d expected races\n", ctx->nmissed_expected); } + StatAggregate(ctx->stat, thr->stat); StatOutput(ctx->stat); return failed ? flags()->exitcode : 0; } +#ifndef TSAN_GO +u32 CurrentStackId(ThreadState *thr, uptr pc) { + if (thr->shadow_stack_pos == 0) // May happen during bootstrap. + return 0; + if (pc) { + thr->shadow_stack_pos[0] = pc; + thr->shadow_stack_pos++; + } + u32 id = StackDepotPut(thr->shadow_stack, + thr->shadow_stack_pos - thr->shadow_stack); + if (pc) + thr->shadow_stack_pos--; + return id; +} +#endif + void TraceSwitch(ThreadState *thr) { thr->nomalloc++; ScopedInRtl in_rtl; Lock l(&thr->trace.mtx); - unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts; + unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); TraceHeader *hdr = &thr->trace.headers[trace]; hdr->epoch0 = thr->fast_state.epoch(); hdr->stack0.ObtainCurrent(thr, 0); + hdr->mset0 = thr->mset; thr->nomalloc--; } +uptr TraceTopPC(ThreadState *thr) { + Event *events = (Event*)GetThreadTrace(thr->tid); + uptr pc = events[thr->fast_state.GetTracePos()]; + return pc; +} + +uptr TraceSize() { + return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); +} + +uptr TraceParts() { + return TraceSize() / kTracePartSize; +} + #ifndef TSAN_GO extern "C" void __tsan_trace_switch() { TraceSwitch(cur_thread()); @@ -273,11 +362,11 @@ static inline bool BothReads(Shadow s, int kAccessIsWrite) { return !kAccessIsWrite && !s.is_write(); } -static inline bool OldIsRWStronger(Shadow old, int kAccessIsWrite) { +static inline bool OldIsRWNotWeaker(Shadow old, int kAccessIsWrite) { return old.is_write() || !kAccessIsWrite; } -static inline bool OldIsRWWeaker(Shadow old, int kAccessIsWrite) { +static inline bool OldIsRWWeakerOrEqual(Shadow old, int kAccessIsWrite) { return !old.is_write() || kAccessIsWrite; } @@ -286,12 +375,12 @@ static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) { } static inline bool HappensBefore(Shadow old, ThreadState *thr) { - return thr->clock.get(old.tid()) >= old.epoch(); + return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); } ALWAYS_INLINE void MemoryAccessImpl(ThreadState *thr, uptr addr, - int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state, + int kAccessSizeLog, bool kAccessIsWrite, u64 *shadow_mem, Shadow cur) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); @@ -367,7 +456,7 @@ ALWAYS_INLINE void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite) { u64 *shadow_mem = (u64*)MemToShadow(addr); - DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d" + DPrintf2("#%d: MemoryAccess: @%p %p size=%d" " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", (int)thr->fast_state.tid(), (void*)pc, (void*)addr, (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, @@ -375,11 +464,11 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, (uptr)shadow_mem[2], (uptr)shadow_mem[3]); #if TSAN_DEBUG if (!IsAppMem(addr)) { - TsanPrintf("Access to non app mem %zx\n", addr); + Printf("Access to non app mem %zx\n", addr); DCHECK(IsAppMem(addr)); } if (!IsShadowMem((uptr)shadow_mem)) { - TsanPrintf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); + Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); DCHECK(IsShadowMem((uptr)shadow_mem)); } #endif @@ -395,9 +484,9 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, // We must not store to the trace if we do not store to the shadow. // That is, this call must be moved somewhere below. - TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc); + TraceAddEvent(thr, fast_state, EventTypeMop, pc); - MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, fast_state, + MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, shadow_mem, cur); } @@ -414,24 +503,29 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, addr += offset; size -= offset; } - CHECK_EQ(addr % 8, 0); - CHECK(IsAppMem(addr)); - CHECK(IsAppMem(addr + size - 1)); + DCHECK_EQ(addr % 8, 0); + // If a user passes some insane arguments (memset(0)), + // let it just crash as usual. + if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) + return; (void)thr; (void)pc; // Some programs mmap like hundreds of GBs but actually used a small part. // So, it's better to report a false positive on the memory // then to hang here senselessly. - const uptr kMaxResetSize = 1024*1024*1024; + const uptr kMaxResetSize = 4ull*1024*1024*1024; if (size > kMaxResetSize) size = kMaxResetSize; - size = (size + 7) & ~7; + size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); u64 *p = (u64*)MemToShadow(addr); CHECK(IsShadowMem((uptr)p)); CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); // FIXME: may overwrite a part outside the region - for (uptr i = 0; i < size * kShadowCnt / kShadowCell; i++) - p[i] = val; + for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) { + p[i++] = val; + for (uptr j = 1; j < kShadowCnt; j++) + p[i++] = 0; + } } void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { @@ -441,18 +535,28 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { MemoryAccessRange(thr, pc, addr, size, true); Shadow s(thr->fast_state); + s.ClearIgnoreBit(); s.MarkAsFreed(); s.SetWrite(true); s.SetAddr0AndSizeLog(0, 3); MemoryRangeSet(thr, pc, addr, size, s.raw()); } +void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { + Shadow s(thr->fast_state); + s.ClearIgnoreBit(); + s.SetWrite(true); + s.SetAddr0AndSizeLog(0, 3); + MemoryRangeSet(thr, pc, addr, size, s.raw()); +} + +ALWAYS_INLINE void FuncEntry(ThreadState *thr, uptr pc) { DCHECK_EQ(thr->in_rtl, 0); StatInc(thr, StatFuncEnter); DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc); + TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); // Shadow stack maintenance can be replaced with // stack unwinding during trace switch (which presumably must be faster). @@ -476,12 +580,13 @@ void FuncEntry(ThreadState *thr, uptr pc) { thr->shadow_stack_pos++; } +ALWAYS_INLINE void FuncExit(ThreadState *thr) { DCHECK_EQ(thr->in_rtl, 0); StatInc(thr, StatFuncExit); DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0); + TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]); #ifndef TSAN_GO diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h index c559cb2f080c..6b0ab0d385ef 100644 --- a/lib/tsan/rtl/tsan_rtl.h +++ b/lib/tsan/rtl/tsan_rtl.h @@ -27,6 +27,7 @@ #define TSAN_RTL_H #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_allocator.h" #include "tsan_clock.h" #include "tsan_defs.h" #include "tsan_flags.h" @@ -34,31 +35,90 @@ #include "tsan_trace.h" #include "tsan_vector.h" #include "tsan_report.h" +#include "tsan_platform.h" +#include "tsan_mutexset.h" + +#if SANITIZER_WORDSIZE != 64 +# error "ThreadSanitizer is supported only on 64-bit platforms" +#endif namespace __tsan { -void TsanPrintf(const char *format, ...); +// Descriptor of user's memory block. +struct MBlock { + Mutex mtx; + uptr size; + u32 alloc_tid; + u32 alloc_stack_id; + SyncVar *head; + + MBlock() + : mtx(MutexTypeMBlock, StatMtxMBlock) { + } +}; + +#ifndef TSAN_GO +#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW +const uptr kAllocatorSpace = 0x7d0000000000ULL; +#else +const uptr kAllocatorSpace = 0x7d0000000000ULL; +#endif +const uptr kAllocatorSize = 0x10000000000ULL; // 1T. + +struct TsanMapUnmapCallback { + void OnMap(uptr p, uptr size) const { } + void OnUnmap(uptr p, uptr size) const { + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + uptr shadow_beg = MemToShadow(p); + uptr shadow_end = MemToShadow(p + size); + CHECK(IsAligned(shadow_end|shadow_beg, GetPageSizeCached())); + FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); + } +}; + +typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), + DefaultSizeClassMap> PrimaryAllocator; +typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; +typedef LargeMmapAllocator<TsanMapUnmapCallback> SecondaryAllocator; +typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, + SecondaryAllocator> Allocator; +Allocator *allocator(); +#endif + +void TsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2); // FastState (from most significant bit): -// unused : 1 +// ignore : 1 // tid : kTidBits // epoch : kClkBits // unused : - -// ignore_bit : 1 +// history_size : 3 class FastState { public: FastState(u64 tid, u64 epoch) { x_ = tid << kTidShift; x_ |= epoch << kClkShift; - DCHECK(tid == this->tid()); - DCHECK(epoch == this->epoch()); + DCHECK_EQ(tid, this->tid()); + DCHECK_EQ(epoch, this->epoch()); + DCHECK_EQ(GetIgnoreBit(), false); } explicit FastState(u64 x) : x_(x) { } + u64 raw() const { + return x_; + } + u64 tid() const { + u64 res = (x_ & ~kIgnoreBit) >> kTidShift; + return res; + } + + u64 TidWithIgnore() const { u64 res = x_ >> kTidShift; return res; } @@ -77,13 +137,34 @@ class FastState { void SetIgnoreBit() { x_ |= kIgnoreBit; } void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } - bool GetIgnoreBit() const { return x_ & kIgnoreBit; } + bool GetIgnoreBit() const { return (s64)x_ < 0; } + + void SetHistorySize(int hs) { + CHECK_GE(hs, 0); + CHECK_LE(hs, 7); + x_ = (x_ & ~7) | hs; + } + + int GetHistorySize() const { + return (int)(x_ & 7); + } + + void ClearHistorySize() { + x_ &= ~7; + } + + u64 GetTracePos() const { + const int hs = GetHistorySize(); + // When hs == 0, the trace consists of 2 parts. + const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; + return epoch() & mask; + } private: friend class Shadow; static const int kTidShift = 64 - kTidBits - 1; static const int kClkShift = kTidShift - kClkBits; - static const u64 kIgnoreBit = 1ull; + static const u64 kIgnoreBit = 1ull << 63; static const u64 kFreedBit = 1ull << 63; u64 x_; }; @@ -97,9 +178,14 @@ class FastState { // addr0 : 3 class Shadow : public FastState { public: - explicit Shadow(u64 x) : FastState(x) { } + explicit Shadow(u64 x) + : FastState(x) { + } - explicit Shadow(const FastState &s) : FastState(s.x_) { } + explicit Shadow(const FastState &s) + : FastState(s.x_) { + ClearHistorySize(); + } void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { DCHECK_EQ(x_ & 31, 0); @@ -118,11 +204,10 @@ class Shadow : public FastState { } bool IsZero() const { return x_ == 0; } - u64 raw() const { return x_; } static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; - DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid()); + DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore()); return shifted_xor == 0; } @@ -199,10 +284,6 @@ class Shadow : public FastState { } }; -// Freed memory. -// As if 8-byte write by thread 0xff..f at epoch 0xff..f, races with everything. -const u64 kShadowFreed = 0xfffffffffffffff8ull; - struct SignalContext; // This struct is stored in TLS. @@ -236,9 +317,14 @@ struct ThreadState { uptr *shadow_stack; uptr *shadow_stack_end; #endif + MutexSet mset; ThreadClock clock; +#ifndef TSAN_GO + AllocatorCache alloc_cache; +#endif u64 stat[StatCnt]; const int tid; + const int unique_id; int in_rtl; bool is_alive; const uptr stk_addr; @@ -251,11 +337,16 @@ struct ThreadState { bool in_signal_handler; SignalContext *signal_ctx; +#ifndef TSAN_GO + u32 last_sleep_stack_id; + ThreadClock last_sleep_clock; +#endif + // Set in regions of runtime that must be signal-safe and fork-safe. // If set, malloc must not be called. int nomalloc; - explicit ThreadState(Context *ctx, int tid, u64 epoch, + explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, uptr stk_addr, uptr stk_size, uptr tls_addr, uptr tls_size); }; @@ -274,7 +365,7 @@ enum ThreadStatus { ThreadStatusCreated, // Created but not yet running. ThreadStatusRunning, // The thread is currently running. ThreadStatusFinished, // Joinable thread is finished but not yet joined. - ThreadStatusDead, // Joined, but some info (trace) is still alive. + ThreadStatusDead // Joined, but some info (trace) is still alive. }; // An info about a thread that is hold for some time after its termination. @@ -285,6 +376,7 @@ struct ThreadDeadInfo { struct ThreadContext { const int tid; int unique_id; // Non-rolling thread id. + uptr os_id; // pid uptr user_id; // Some opaque user thread id (e.g. pthread_t). ThreadState *thr; ThreadStatus status; @@ -297,8 +389,10 @@ struct ThreadContext { u64 epoch0; u64 epoch1; StackTrace creation_stack; + int creation_tid; ThreadDeadInfo *dead_info; ThreadContext *dead_next; // In dead thread list. + char *name; // As annotated by user. explicit ThreadContext(int tid); }; @@ -319,6 +413,11 @@ struct RacyAddress { uptr addr_max; }; +struct FiredSuppression { + ReportType type; + uptr pc; +}; + struct Context { Context(); @@ -342,6 +441,7 @@ struct Context { Vector<RacyStacks> racy_stacks; Vector<RacyAddress> racy_addresses; + Vector<FiredSuppression> fired_suppressions; Flags flags; @@ -366,10 +466,12 @@ class ScopedReport { ~ScopedReport(); void AddStack(const StackTrace *stack); - void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack); + void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack, + const MutexSet *mset); void AddThread(const ThreadContext *tctx); void AddMutex(const SyncVar *s); void AddLocation(uptr addr, uptr size); + void AddSleep(u32 stack_id); const ReportDesc *GetReport() const; @@ -377,10 +479,14 @@ class ScopedReport { Context *ctx_; ReportDesc *rep_; + void AddMutex(u64 id); + ScopedReport(const ScopedReport&); void operator = (const ScopedReport&); }; +void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset); + void StatAggregate(u64 *dst, u64 *src); void StatOutput(u64 *stat); void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { @@ -388,34 +494,47 @@ void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { thr->stat[typ] += n; } +void MapShadow(uptr addr, uptr size); +void MapThreadTrace(uptr addr, uptr size); void InitializeShadowMemory(); void InitializeInterceptors(); void InitializeDynamicAnnotations(); void ReportRace(ThreadState *thr); -bool OutputReport(const ScopedReport &srep, +bool OutputReport(Context *ctx, + const ScopedReport &srep, const ReportStack *suppress_stack = 0); +bool IsFiredSuppression(Context *ctx, + const ScopedReport &srep, + const StackTrace &trace); bool IsExpectedReport(uptr addr, uptr size); #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 -# define DPrintf TsanPrintf +# define DPrintf Printf #else # define DPrintf(...) #endif #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 -# define DPrintf2 TsanPrintf +# define DPrintf2 Printf #else # define DPrintf2(...) #endif +u32 CurrentStackId(ThreadState *thr, uptr pc); +void PrintCurrentStack(ThreadState *thr, uptr pc); + void Initialize(ThreadState *thr); int Finalize(ThreadState *thr); +SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr, + bool write_lock, bool create); +SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr); + void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite); void MemoryAccessImpl(ThreadState *thr, uptr addr, - int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state, + int kAccessSizeLog, bool kAccessIsWrite, u64 *shadow_mem, Shadow cur); void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr); void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr); @@ -425,21 +544,25 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size, bool is_write); void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); +void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); void IgnoreCtl(ThreadState *thr, bool write, bool begin); void FuncEntry(ThreadState *thr, uptr pc); void FuncExit(ThreadState *thr); int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); -void ThreadStart(ThreadState *thr, int tid); +void ThreadStart(ThreadState *thr, int tid, uptr os_id); void ThreadFinish(ThreadState *thr); int ThreadTid(ThreadState *thr, uptr pc, uptr uid); void ThreadJoin(ThreadState *thr, uptr pc, int tid); void ThreadDetach(ThreadState *thr, uptr pc, int tid); void ThreadFinalize(ThreadState *thr); -void ThreadFinalizerGoroutine(ThreadState *thr); +void ThreadSetName(ThreadState *thr, const char *name); +int ThreadCount(ThreadState *thr); +void ProcessPendingSignals(ThreadState *thr); -void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive); +void MutexCreate(ThreadState *thr, uptr pc, uptr addr, + bool rw, bool recursive, bool linker_init); void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); void MutexLock(ThreadState *thr, uptr pc, uptr addr); void MutexUnlock(ThreadState *thr, uptr pc, uptr addr); @@ -448,8 +571,10 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); void Acquire(ThreadState *thr, uptr pc, uptr addr); +void AcquireGlobal(ThreadState *thr, uptr pc); void Release(ThreadState *thr, uptr pc, uptr addr); void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); +void AfterSleep(ThreadState *thr, uptr pc); // The hacky call uses custom calling convention and an assembly thunk. // It is considerably faster that a normal call for the caller @@ -461,27 +586,39 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); // The caller may not create the stack frame for itself at all, // so we create a reserve stack frame for it (1024b must be enough). #define HACKY_CALL(f) \ - __asm__ __volatile__("sub $0x400, %%rsp;" \ + __asm__ __volatile__("sub $1024, %%rsp;" \ + "/*.cfi_adjust_cfa_offset 1024;*/" \ + ".hidden " #f "_thunk;" \ "call " #f "_thunk;" \ - "add $0x400, %%rsp;" ::: "memory"); + "add $1024, %%rsp;" \ + "/*.cfi_adjust_cfa_offset -1024;*/" \ + ::: "memory", "cc"); #else #define HACKY_CALL(f) f() #endif void TraceSwitch(ThreadState *thr); +uptr TraceTopPC(ThreadState *thr); +uptr TraceSize(); +uptr TraceParts(); extern "C" void __tsan_trace_switch(); -void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch, - EventType typ, uptr addr) { +void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs, + EventType typ, u64 addr) { + DCHECK_GE((int)typ, 0); + DCHECK_LE((int)typ, 7); + DCHECK_EQ(GetLsb(addr, 61), addr); StatInc(thr, StatEvents); - if (UNLIKELY((epoch % kTracePartSize) == 0)) { + u64 pos = fs.GetTracePos(); + if (UNLIKELY((pos % kTracePartSize) == 0)) { #ifndef TSAN_GO HACKY_CALL(__tsan_trace_switch); #else TraceSwitch(thr); #endif } - Event *evp = &thr->trace.events[epoch % kTraceSize]; + Event *trace = (Event*)GetThreadTrace(fs.tid()); + Event *evp = &trace[pos]; Event ev = (u64)addr | ((u64)typ << 61); *evp = ev; } diff --git a/lib/tsan/rtl/tsan_rtl_amd64.S b/lib/tsan/rtl/tsan_rtl_amd64.S index 2028ec508611..af878563573e 100644 --- a/lib/tsan/rtl/tsan_rtl_amd64.S +++ b/lib/tsan/rtl/tsan_rtl_amd64.S @@ -1,20 +1,43 @@ .section .text +.hidden __tsan_trace_switch .globl __tsan_trace_switch_thunk __tsan_trace_switch_thunk: + .cfi_startproc # Save scratch registers. push %rax + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rax, 0 push %rcx + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rcx, 0 push %rdx + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rdx, 0 push %rsi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rsi, 0 push %rdi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rdi, 0 push %r8 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r8, 0 push %r9 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r9, 0 push %r10 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r10, 0 push %r11 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r11, 0 # Align stack frame. push %rbx # non-scratch + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rbx, 0 mov %rsp, %rbx # save current rsp + .cfi_def_cfa_register %rbx shr $4, %rsp # clear 4 lsb, align to 16 shl $4, %rsp @@ -22,34 +45,79 @@ __tsan_trace_switch_thunk: # Unalign stack frame back. mov %rbx, %rsp # restore the original rsp + .cfi_def_cfa_register %rsp pop %rbx + .cfi_adjust_cfa_offset -8 # Restore scratch registers. pop %r11 + .cfi_adjust_cfa_offset -8 pop %r10 + .cfi_adjust_cfa_offset -8 pop %r9 + .cfi_adjust_cfa_offset -8 pop %r8 + .cfi_adjust_cfa_offset -8 pop %rdi + .cfi_adjust_cfa_offset -8 pop %rsi + .cfi_adjust_cfa_offset -8 pop %rdx + .cfi_adjust_cfa_offset -8 pop %rcx + .cfi_adjust_cfa_offset -8 pop %rax + .cfi_adjust_cfa_offset -8 + .cfi_restore %rax + .cfi_restore %rbx + .cfi_restore %rcx + .cfi_restore %rdx + .cfi_restore %rsi + .cfi_restore %rdi + .cfi_restore %r8 + .cfi_restore %r9 + .cfi_restore %r10 + .cfi_restore %r11 ret + .cfi_endproc +.hidden __tsan_report_race .globl __tsan_report_race_thunk __tsan_report_race_thunk: + .cfi_startproc # Save scratch registers. push %rax + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rax, 0 push %rcx + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rcx, 0 push %rdx + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rdx, 0 push %rsi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rsi, 0 push %rdi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rdi, 0 push %r8 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r8, 0 push %r9 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r9, 0 push %r10 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r10, 0 push %r11 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r11, 0 # Align stack frame. push %rbx # non-scratch + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rbx, 0 mov %rsp, %rbx # save current rsp + .cfi_def_cfa_register %rbx shr $4, %rsp # clear 4 lsb, align to 16 shl $4, %rsp @@ -57,15 +125,42 @@ __tsan_report_race_thunk: # Unalign stack frame back. mov %rbx, %rsp # restore the original rsp + .cfi_def_cfa_register %rsp pop %rbx + .cfi_adjust_cfa_offset -8 # Restore scratch registers. pop %r11 + .cfi_adjust_cfa_offset -8 pop %r10 + .cfi_adjust_cfa_offset -8 pop %r9 + .cfi_adjust_cfa_offset -8 pop %r8 + .cfi_adjust_cfa_offset -8 pop %rdi + .cfi_adjust_cfa_offset -8 pop %rsi + .cfi_adjust_cfa_offset -8 pop %rdx + .cfi_adjust_cfa_offset -8 pop %rcx + .cfi_adjust_cfa_offset -8 pop %rax + .cfi_adjust_cfa_offset -8 + .cfi_restore %rax + .cfi_restore %rbx + .cfi_restore %rcx + .cfi_restore %rdx + .cfi_restore %rsi + .cfi_restore %rdi + .cfi_restore %r8 + .cfi_restore %r9 + .cfi_restore %r10 + .cfi_restore %r11 ret + .cfi_endproc + +#ifdef __linux__ +/* We do not need executable stack. */ +.section .note.GNU-stack,"",@progbits +#endif diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc index 882def835658..d812f12be560 100644 --- a/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -12,22 +12,26 @@ //===----------------------------------------------------------------------===// #include "tsan_rtl.h" +#include "tsan_flags.h" #include "tsan_sync.h" #include "tsan_report.h" #include "tsan_symbolize.h" +#include "tsan_platform.h" namespace __tsan { void MutexCreate(ThreadState *thr, uptr pc, uptr addr, - bool rw, bool recursive) { + bool rw, bool recursive, bool linker_init) { Context *ctx = CTX(); CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr); StatInc(thr, StatMutexCreate); - MemoryWrite1Byte(thr, pc, addr); - SyncVar *s = ctx->synctab.GetAndLock(thr, pc, addr, true); + if (!linker_init && IsAppMem(addr)) + MemoryWrite1Byte(thr, pc, addr); + SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); s->is_rw = rw; s->is_recursive = recursive; + s->is_linker_init = linker_init; s->mtx.Unlock(); } @@ -36,34 +40,54 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr); StatInc(thr, StatMutexDestroy); - MemoryWrite1Byte(thr, pc, addr); +#ifndef TSAN_GO + // Global mutexes not marked as LINKER_INITIALIZED + // cause tons of not interesting reports, so just ignore it. + if (IsGlobalVar(addr)) + return; +#endif SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr); if (s == 0) return; - if (s->owner_tid != SyncVar::kInvalidTid && !s->is_broken) { + if (IsAppMem(addr)) + MemoryWrite1Byte(thr, pc, addr); + if (flags()->report_destroy_locked + && s->owner_tid != SyncVar::kInvalidTid + && !s->is_broken) { s->is_broken = true; + Lock l(&ctx->thread_mtx); ScopedReport rep(ReportTypeMutexDestroyLocked); rep.AddMutex(s); + StackTrace trace; + trace.ObtainCurrent(thr, pc); + rep.AddStack(&trace); + FastState last(s->last_lock); + RestoreStack(last.tid(), last.epoch(), &trace, 0); + rep.AddStack(&trace); rep.AddLocation(s->addr, 1); - OutputReport(rep); + OutputReport(ctx, rep); } + thr->mset.Remove(s->GetId()); DestroyAndFree(s); } void MutexLock(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexLock %zx\n", thr->tid, addr); - MemoryRead1Byte(thr, pc, addr); + if (IsAppMem(addr)) + MemoryRead1Byte(thr, pc, addr); + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeLock, addr); - SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true); + TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); if (s->owner_tid == SyncVar::kInvalidTid) { CHECK_EQ(s->recursion, 0); s->owner_tid = thr->tid; + s->last_lock = thr->fast_state.raw(); } else if (s->owner_tid == thr->tid) { CHECK_GT(s->recursion, 0); } else { - TsanPrintf("ThreadSanitizer WARNING: double lock\n"); + Printf("ThreadSanitizer WARNING: double lock\n"); + PrintCurrentStack(thr, pc); } if (s->recursion == 0) { StatInc(thr, StatMutexLock); @@ -76,25 +100,29 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) { StatInc(thr, StatMutexRecLock); } s->recursion++; + thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); s->mtx.Unlock(); } void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr); - MemoryRead1Byte(thr, pc, addr); + if (IsAppMem(addr)) + MemoryRead1Byte(thr, pc, addr); + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr); - SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true); + TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); if (s->recursion == 0) { if (!s->is_broken) { s->is_broken = true; - TsanPrintf("ThreadSanitizer WARNING: unlock of unlocked mutex\n"); + Printf("ThreadSanitizer WARNING: unlock of unlocked mutex\n"); + PrintCurrentStack(thr, pc); } } else if (s->owner_tid != thr->tid) { if (!s->is_broken) { s->is_broken = true; - TsanPrintf("ThreadSanitizer WARNING: mutex unlock by another thread\n"); + Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n"); + PrintCurrentStack(thr, pc); } } else { s->recursion--; @@ -103,12 +131,13 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) { s->owner_tid = SyncVar::kInvalidTid; thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&s->clock); + thr->clock.ReleaseStore(&s->clock); StatInc(thr, StatSyncRelease); } else { StatInc(thr, StatMutexRecUnlock); } } + thr->mset.Del(s->GetId(), true); s->mtx.Unlock(); } @@ -116,15 +145,20 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr); StatInc(thr, StatMutexReadLock); - MemoryRead1Byte(thr, pc, addr); + if (IsAppMem(addr)) + MemoryRead1Byte(thr, pc, addr); + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false); thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRLock, addr); - SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false); - if (s->owner_tid != SyncVar::kInvalidTid) - TsanPrintf("ThreadSanitizer WARNING: read lock of a write locked mutex\n"); + TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); + if (s->owner_tid != SyncVar::kInvalidTid) { + Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n"); + PrintCurrentStack(thr, pc); + } thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.acquire(&s->clock); + s->last_lock = thr->fast_state.raw(); StatInc(thr, StatSyncAcquire); + thr->mset.Add(s->GetId(), false, thr->fast_state.epoch()); s->mtx.ReadUnlock(); } @@ -132,36 +166,45 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); StatInc(thr, StatMutexReadUnlock); - MemoryRead1Byte(thr, pc, addr); + if (IsAppMem(addr)) + MemoryRead1Byte(thr, pc, addr); + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr); - SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true); - if (s->owner_tid != SyncVar::kInvalidTid) - TsanPrintf("ThreadSanitizer WARNING: read unlock of a write " + TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); + if (s->owner_tid != SyncVar::kInvalidTid) { + Printf("ThreadSanitizer WARNING: read unlock of a write " "locked mutex\n"); + PrintCurrentStack(thr, pc); + } thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.release(&s->read_clock); StatInc(thr, StatSyncRelease); s->mtx.Unlock(); + thr->mset.Del(s->GetId(), false); } void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); - MemoryRead1Byte(thr, pc, addr); - SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true); + if (IsAppMem(addr)) + MemoryRead1Byte(thr, pc, addr); + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); + bool write = true; if (s->owner_tid == SyncVar::kInvalidTid) { // Seems to be read unlock. + write = false; StatInc(thr, StatMutexReadUnlock); thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr); + TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.release(&s->read_clock); StatInc(thr, StatSyncRelease); } else if (s->owner_tid == thr->tid) { // Seems to be write unlock. + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); CHECK_GT(s->recursion, 0); s->recursion--; if (s->recursion == 0) { @@ -171,36 +214,50 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { // The sequence of events is quite tricky and doubled in several places. // First, it's a bug to increment the epoch w/o writing to the trace. // Then, the acquire/release logic can be factored out as well. - thr->fast_state.IncrementEpoch(); - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr); thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&s->clock); + thr->clock.ReleaseStore(&s->clock); StatInc(thr, StatSyncRelease); } else { StatInc(thr, StatMutexRecUnlock); } } else if (!s->is_broken) { s->is_broken = true; - TsanPrintf("ThreadSanitizer WARNING: mutex unlock by another thread\n"); + Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n"); + PrintCurrentStack(thr, pc); } + thr->mset.Del(s->GetId(), write); s->mtx.Unlock(); } void Acquire(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: Acquire %zx\n", thr->tid, addr); - SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false); + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false); thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.acquire(&s->clock); StatInc(thr, StatSyncAcquire); s->mtx.ReadUnlock(); } +void AcquireGlobal(ThreadState *thr, uptr pc) { + Context *ctx = CTX(); + Lock l(&ctx->thread_mtx); + for (unsigned i = 0; i < kMaxTid; i++) { + ThreadContext *tctx = ctx->threads[i]; + if (tctx == 0) + continue; + if (tctx->status == ThreadStatusRunning) + thr->clock.set(i, tctx->thr->fast_state.epoch()); + else + thr->clock.set(i, tctx->epoch1); + } +} + void Release(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: Release %zx\n", thr->tid, addr); - SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true); + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.release(&s->clock); StatInc(thr, StatSyncRelease); @@ -210,11 +267,28 @@ void Release(ThreadState *thr, uptr pc, uptr addr) { void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); - SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true); + SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.ReleaseStore(&s->clock); StatInc(thr, StatSyncRelease); s->mtx.Unlock(); } +#ifndef TSAN_GO +void AfterSleep(ThreadState *thr, uptr pc) { + Context *ctx = CTX(); + thr->last_sleep_stack_id = CurrentStackId(thr, pc); + Lock l(&ctx->thread_mtx); + for (unsigned i = 0; i < kMaxTid; i++) { + ThreadContext *tctx = ctx->threads[i]; + if (tctx == 0) + continue; + if (tctx->status == ThreadStatusRunning) + thr->last_sleep_clock.set(i, tctx->thr->fast_state.epoch()); + else + thr->last_sleep_clock.set(i, tctx->epoch1); + } +} +#endif + } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc index f66e17e4815c..1a780e4b8070 100644 --- a/lib/tsan/rtl/tsan_rtl_report.cc +++ b/lib/tsan/rtl/tsan_rtl_report.cc @@ -13,6 +13,8 @@ #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_common.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_suppressions.h" @@ -21,26 +23,26 @@ #include "tsan_sync.h" #include "tsan_mman.h" #include "tsan_flags.h" +#include "tsan_fd.h" -namespace __sanitizer { -using namespace __tsan; +namespace __tsan { + +using namespace __sanitizer; // NOLINT -void CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) { +void TsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { ScopedInRtl in_rtl; - TsanPrintf("FATAL: ThreadSanitizer CHECK failed: " - "%s:%d \"%s\" (0x%zx, 0x%zx)\n", - file, line, cond, (uptr)v1, (uptr)v2); + Printf("FATAL: ThreadSanitizer CHECK failed: " + "%s:%d \"%s\" (0x%zx, 0x%zx)\n", + file, line, cond, (uptr)v1, (uptr)v2); Die(); } -} // namespace __sanitizer - -namespace __tsan { - // Can be overriden by an application/test to intercept reports. #ifdef TSAN_EXTERNAL_HOOKS bool OnReport(const ReportDesc *rep, bool suppressed); #else +SANITIZER_INTERFACE_ATTRIBUTE bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { (void)rep; return suppressed; @@ -84,9 +86,9 @@ static void StackStripMain(ReportStack *stack) { } else if (last || last2) { // Ensure that we recovered stack completely. Trimmed stack // can actually happen if we do not instrument some code, - // so it's only a DCHECK. However we must try hard to not miss it + // so it's only a debug print. However we must try hard to not miss it // due to our fault. - TsanPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc); + DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc); } #else if (last && 0 == internal_strcmp(last, "schedunlock")) @@ -119,6 +121,7 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) { ScopedReport::ScopedReport(ReportType typ) { ctx_ = CTX(); + ctx_->thread_mtx.CheckLocked(); void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); rep_ = new(mem) ReportDesc; rep_->typ = typ; @@ -127,8 +130,7 @@ ScopedReport::ScopedReport(ReportType typ) { ScopedReport::~ScopedReport() { ctx_->report_mtx.Unlock(); - rep_->~ReportDesc(); - internal_free(rep_); + DestroyAndFree(rep_); } void ScopedReport::AddStack(const StackTrace *stack) { @@ -137,7 +139,7 @@ void ScopedReport::AddStack(const StackTrace *stack) { } void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, - const StackTrace *stack) { + const StackTrace *stack, const MutexSet *mset) { void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); ReportMop *mop = new(mem) ReportMop; rep_->mops.PushBack(mop); @@ -145,50 +147,195 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, mop->addr = addr + s.addr0(); mop->size = s.size(); mop->write = s.is_write(); - mop->nmutex = 0; mop->stack = SymbolizeStack(*stack); + for (uptr i = 0; i < mset->Size(); i++) { + MutexSet::Desc d = mset->Get(i); + u64 uid = 0; + uptr addr = SyncVar::SplitId(d.id, &uid); + SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false); + // Check that the mutex is still alive. + // Another mutex can be created at the same address, + // so check uid as well. + if (s && s->CheckId(uid)) { + ReportMopMutex mtx = {s->uid, d.write}; + mop->mset.PushBack(mtx); + AddMutex(s); + } else { + ReportMopMutex mtx = {d.id, d.write}; + mop->mset.PushBack(mtx); + AddMutex(d.id); + } + if (s) + s->mtx.ReadUnlock(); + } } void ScopedReport::AddThread(const ThreadContext *tctx) { + for (uptr i = 0; i < rep_->threads.Size(); i++) { + if (rep_->threads[i]->id == tctx->tid) + return; + } void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); ReportThread *rt = new(mem) ReportThread(); rep_->threads.PushBack(rt); rt->id = tctx->tid; + rt->pid = tctx->os_id; rt->running = (tctx->status == ThreadStatusRunning); + rt->name = tctx->name ? internal_strdup(tctx->name) : 0; + rt->parent_tid = tctx->creation_tid; rt->stack = SymbolizeStack(tctx->creation_stack); } +#ifndef TSAN_GO +static ThreadContext *FindThread(int unique_id) { + Context *ctx = CTX(); + ctx->thread_mtx.CheckLocked(); + for (unsigned i = 0; i < kMaxTid; i++) { + ThreadContext *tctx = ctx->threads[i]; + if (tctx && tctx->unique_id == unique_id) { + return tctx; + } + } + return 0; +} + +ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { + Context *ctx = CTX(); + ctx->thread_mtx.CheckLocked(); + for (unsigned i = 0; i < kMaxTid; i++) { + ThreadContext *tctx = ctx->threads[i]; + if (tctx == 0 || tctx->status != ThreadStatusRunning) + continue; + ThreadState *thr = tctx->thr; + CHECK(thr); + if (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) { + *is_stack = true; + return tctx; + } + if (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size) { + *is_stack = false; + return tctx; + } + } + return 0; +} +#endif + void ScopedReport::AddMutex(const SyncVar *s) { + for (uptr i = 0; i < rep_->mutexes.Size(); i++) { + if (rep_->mutexes[i]->id == s->uid) + return; + } void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); ReportMutex *rm = new(mem) ReportMutex(); rep_->mutexes.PushBack(rm); - rm->id = 42; + rm->id = s->uid; + rm->destroyed = false; rm->stack = SymbolizeStack(s->creation_stack); } +void ScopedReport::AddMutex(u64 id) { + for (uptr i = 0; i < rep_->mutexes.Size(); i++) { + if (rep_->mutexes[i]->id == id) + return; + } + void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); + ReportMutex *rm = new(mem) ReportMutex(); + rep_->mutexes.PushBack(rm); + rm->id = id; + rm->destroyed = true; + rm->stack = 0; +} + void ScopedReport::AddLocation(uptr addr, uptr size) { - ReportStack *symb = SymbolizeData(addr); - if (symb) { + if (addr == 0) + return; +#ifndef TSAN_GO + int fd = -1; + int creat_tid = -1; + u32 creat_stack = 0; + if (FdLocation(addr, &fd, &creat_tid, &creat_stack) + || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) { + void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); + ReportLocation *loc = new(mem) ReportLocation(); + rep_->locs.PushBack(loc); + loc->type = ReportLocationFD; + loc->fd = fd; + loc->tid = creat_tid; + uptr ssz = 0; + const uptr *stack = StackDepotGet(creat_stack, &ssz); + if (stack) { + StackTrace trace; + trace.Init(stack, ssz); + loc->stack = SymbolizeStack(trace); + } + ThreadContext *tctx = FindThread(creat_tid); + if (tctx) + AddThread(tctx); + return; + } + if (allocator()->PointerIsMine((void*)addr)) { + MBlock *b = user_mblock(0, (void*)addr); + ThreadContext *tctx = FindThread(b->alloc_tid); void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); ReportLocation *loc = new(mem) ReportLocation(); rep_->locs.PushBack(loc); - loc->type = ReportLocationGlobal; - loc->addr = addr; - loc->size = size; - loc->tid = 0; - loc->name = symb->func; - loc->file = symb->file; - loc->line = symb->line; + loc->type = ReportLocationHeap; + loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr); + loc->size = b->size; + loc->tid = tctx ? tctx->tid : b->alloc_tid; + loc->name = 0; + loc->file = 0; + loc->line = 0; loc->stack = 0; - internal_free(symb); + uptr ssz = 0; + const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz); + if (stack) { + StackTrace trace; + trace.Init(stack, ssz); + loc->stack = SymbolizeStack(trace); + } + if (tctx) + AddThread(tctx); + return; } + bool is_stack = false; + if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { + void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); + ReportLocation *loc = new(mem) ReportLocation(); + rep_->locs.PushBack(loc); + loc->type = is_stack ? ReportLocationStack : ReportLocationTLS; + loc->tid = tctx->tid; + AddThread(tctx); + } + ReportLocation *loc = SymbolizeData(addr); + if (loc) { + rep_->locs.PushBack(loc); + return; + } +#endif } +#ifndef TSAN_GO +void ScopedReport::AddSleep(u32 stack_id) { + uptr ssz = 0; + const uptr *stack = StackDepotGet(stack_id, &ssz); + if (stack) { + StackTrace trace; + trace.Init(stack, ssz); + rep_->sleep = SymbolizeStack(trace); + } +} +#endif + const ReportDesc *ScopedReport::GetReport() const { return rep_; } -static void RestoreStack(int tid, const u64 epoch, StackTrace *stk) { +void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) { + // This function restores stack trace and mutex set for the thread/epoch. + // It does so by getting stack trace and mutex set at the beginning of + // trace part, and then replaying the trace till the given epoch. ThreadContext *tctx = CTX()->threads[tid]; if (tctx == 0) return; @@ -205,49 +352,62 @@ static void RestoreStack(int tid, const u64 epoch, StackTrace *stk) { return; } Lock l(&trace->mtx); - const int partidx = (epoch / (kTraceSize / kTraceParts)) % kTraceParts; + const int partidx = (epoch / kTracePartSize) % TraceParts(); TraceHeader* hdr = &trace->headers[partidx]; if (epoch < hdr->epoch0) return; - const u64 eend = epoch % kTraceSize; - const u64 ebegin = eend / kTracePartSize * kTracePartSize; + const u64 epoch0 = RoundDown(epoch, TraceSize()); + const u64 eend = epoch % TraceSize(); + const u64 ebegin = RoundDown(eend, kTracePartSize); DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); - InternalScopedBuf<uptr> stack(1024); // FIXME: de-hardcode 1024 + InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024 for (uptr i = 0; i < hdr->stack0.Size(); i++) { stack[i] = hdr->stack0.Get(i); DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); } + if (mset) + *mset = hdr->mset0; uptr pos = hdr->stack0.Size(); + Event *events = (Event*)GetThreadTrace(tid); for (uptr i = ebegin; i <= eend; i++) { - Event ev = trace->events[i]; + Event ev = events[i]; EventType typ = (EventType)(ev >> 61); - uptr pc = (uptr)(ev & 0xffffffffffffull); + uptr pc = (uptr)(ev & ((1ull << 61) - 1)); DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); if (typ == EventTypeMop) { stack[pos] = pc; } else if (typ == EventTypeFuncEnter) { stack[pos++] = pc; } else if (typ == EventTypeFuncExit) { - // Since we have full stacks, this should never happen. - DCHECK_GT(pos, 0); if (pos > 0) pos--; } + if (mset) { + if (typ == EventTypeLock) { + mset->Add(pc, true, epoch0 + i); + } else if (typ == EventTypeUnlock) { + mset->Del(pc, true); + } else if (typ == EventTypeRLock) { + mset->Add(pc, false, epoch0 + i); + } else if (typ == EventTypeRUnlock) { + mset->Del(pc, false); + } + } for (uptr j = 0; j <= pos; j++) DPrintf2(" #%zu: %zx\n", j, stack[j]); } if (pos == 0 && stack[0] == 0) return; pos++; - stk->Init(stack, pos); + stk->Init(stack.data(), pos); } static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], uptr addr_min, uptr addr_max) { Context *ctx = CTX(); bool equal_stack = false; - RacyStacks hash = {}; + RacyStacks hash; if (flags()->suppress_equal_stacks) { hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); @@ -298,20 +458,81 @@ static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], } } -bool OutputReport(const ScopedReport &srep, const ReportStack *suppress_stack) { +bool OutputReport(Context *ctx, + const ScopedReport &srep, + const ReportStack *suppress_stack) { const ReportDesc *rep = srep.GetReport(); - bool suppressed = IsSuppressed(rep->typ, suppress_stack); - suppressed = OnReport(rep, suppressed); - if (suppressed) + const uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack); + if (suppress_pc != 0) { + FiredSuppression supp = {srep.GetReport()->typ, suppress_pc}; + ctx->fired_suppressions.PushBack(supp); + } + if (OnReport(rep, suppress_pc != 0)) return false; PrintReport(rep); CTX()->nreported++; return true; } +bool IsFiredSuppression(Context *ctx, + const ScopedReport &srep, + const StackTrace &trace) { + for (uptr k = 0; k < ctx->fired_suppressions.Size(); k++) { + if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) + continue; + for (uptr j = 0; j < trace.Size(); j++) { + if (trace.Get(j) == ctx->fired_suppressions[k].pc) + return true; + } + } + return false; +} + +// On programs that use Java we see weird reports like: +// WARNING: ThreadSanitizer: data race (pid=22512) +// Read of size 8 at 0x7d2b00084318 by thread 100: +// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3) +// #1 <null> <null>:0 (0x7f7ad9b40193) +// Previous write of size 8 at 0x7d2b00084318 by thread 105: +// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919) +// #1 <null> <null>:0 (0x7f7ad9b42707) +static bool IsJavaNonsense(const ReportDesc *rep) { + for (uptr i = 0; i < rep->mops.Size(); i++) { + ReportMop *mop = rep->mops[i]; + ReportStack *frame = mop->stack; + if (frame != 0 && frame->func != 0 + && (internal_strcmp(frame->func, "memset") == 0 + || internal_strcmp(frame->func, "memcpy") == 0 + || internal_strcmp(frame->func, "memmove") == 0 + || internal_strcmp(frame->func, "strcmp") == 0 + || internal_strcmp(frame->func, "strncpy") == 0 + || internal_strcmp(frame->func, "strlen") == 0 + || internal_strcmp(frame->func, "free") == 0 + || internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) { + frame = frame->next; + if (frame == 0 + || (frame->func == 0 && frame->file == 0 && frame->line == 0 + && frame->module == 0)) { + if (frame) { + FiredSuppression supp = {rep->typ, frame->pc}; + CTX()->fired_suppressions.PushBack(supp); + } + return true; + } + } + } + return false; +} + void ReportRace(ThreadState *thr) { + if (!flags()->report_bugs) + return; ScopedInRtl in_rtl; + if (thr->in_signal_handler) + Printf("ThreadSanitizer: printing report from signal handler." + " Can crash or hang.\n"); + bool freed = false; { Shadow s(thr->racy_state[1]); @@ -339,21 +560,26 @@ void ReportRace(ThreadState *thr) { ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace); const uptr kMop = 2; StackTrace traces[kMop]; - for (uptr i = 0; i < kMop; i++) { - Shadow s(thr->racy_state[i]); - RestoreStack(s.tid(), s.epoch(), &traces[i]); - } + const uptr toppc = TraceTopPC(thr); + traces[0].ObtainCurrent(thr, toppc); + if (IsFiredSuppression(ctx, rep, traces[0])) + return; + InternalScopedBuffer<MutexSet> mset2(1); + new(mset2.data()) MutexSet(); + Shadow s2(thr->racy_state[1]); + RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); if (HandleRacyStacks(thr, traces, addr_min, addr_max)) return; for (uptr i = 0; i < kMop; i++) { Shadow s(thr->racy_state[i]); - rep.AddMemoryAccess(addr, s, &traces[i]); + rep.AddMemoryAccess(addr, s, &traces[i], + i == 0 ? &thr->mset : mset2.data()); } - // Ensure that we have at least something for the current thread. - CHECK_EQ(traces[0].IsEmpty(), false); + if (flags()->suppress_java && IsJavaNonsense(rep.GetReport())) + return; for (uptr i = 0; i < kMop; i++) { FastState s(thr->racy_state[i]); @@ -363,10 +589,26 @@ void ReportRace(ThreadState *thr) { rep.AddThread(tctx); } - if (!OutputReport(rep, rep.GetReport()->mops[0]->stack)) + rep.AddLocation(addr_min, addr_max - addr_min); + +#ifndef TSAN_GO + { // NOLINT + Shadow s(thr->racy_state[1]); + if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) + rep.AddSleep(thr->last_sleep_stack_id); + } +#endif + + if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack)) return; AddRacyStacks(thr, traces, addr_min, addr_max); } +void PrintCurrentStack(ThreadState *thr, uptr pc) { + StackTrace trace; + trace.ObtainCurrent(thr, pc); + PrintStack(SymbolizeStack(trace)); +} + } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc index f7d5f13dca78..359775927834 100644 --- a/lib/tsan/rtl/tsan_rtl_thread.cc +++ b/lib/tsan/rtl/tsan_rtl_thread.cc @@ -35,7 +35,7 @@ static void MaybeReportThreadLeak(ThreadContext *tctx) { return; ScopedReport rep(ReportTypeThreadLeak); rep.AddThread(tctx); - OutputReport(rep); + OutputReport(CTX(), rep); } void ThreadFinalize(ThreadState *thr) { @@ -52,6 +52,23 @@ void ThreadFinalize(ThreadState *thr) { } } +int ThreadCount(ThreadState *thr) { + CHECK_GT(thr->in_rtl, 0); + Context *ctx = CTX(); + Lock l(&ctx->thread_mtx); + int cnt = 0; + for (unsigned i = 0; i < kMaxTid; i++) { + ThreadContext *tctx = ctx->threads[i]; + if (tctx == 0) + continue; + if (tctx->status != ThreadStatusCreated + && tctx->status != ThreadStatusRunning) + continue; + cnt++; + } + return cnt; +} + static void ThreadDead(ThreadState *thr, ThreadContext *tctx) { Context *ctx = CTX(); CHECK_GT(thr->in_rtl, 0); @@ -81,8 +98,9 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { ThreadContext *tctx = 0; if (ctx->dead_list_size > kThreadQuarantineSize || ctx->thread_seq >= kMaxTid) { + // Reusing old thread descriptor and tid. if (ctx->dead_list_size == 0) { - TsanPrintf("ThreadSanitizer: %d thread limit exceeded. Dying.\n", + Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n", kMaxTid); Die(); } @@ -100,12 +118,18 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { tctx->sync.Reset(); tid = tctx->tid; DestroyAndFree(tctx->dead_info); + if (tctx->name) { + internal_free(tctx->name); + tctx->name = 0; + } } else { + // Allocating new thread descriptor and tid. StatInc(thr, StatThreadMaxTid); tid = ctx->thread_seq++; void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); tctx = new(mem) ThreadContext(tid); ctx->threads[tid] = tctx; + MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); } CHECK_NE(tctx, 0); CHECK_GE(tid, 0); @@ -126,18 +150,18 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { if (tid) { thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0); + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.release(&tctx->sync); StatInc(thr, StatSyncRelease); - tctx->creation_stack.ObtainCurrent(thr, pc); + tctx->creation_tid = thr->tid; } return tid; } -void ThreadStart(ThreadState *thr, int tid) { +void ThreadStart(ThreadState *thr, int tid, uptr os_id) { CHECK_GT(thr->in_rtl, 0); uptr stk_addr = 0; uptr stk_size = 0; @@ -169,10 +193,14 @@ void ThreadStart(ThreadState *thr, int tid) { CHECK_NE(tctx, 0); CHECK_EQ(tctx->status, ThreadStatusCreated); tctx->status = ThreadStatusRunning; - tctx->epoch0 = tctx->epoch1 + 1; + tctx->os_id = os_id; + // RoundUp so that one trace part does not contain events + // from different threads. + tctx->epoch0 = RoundUp(tctx->epoch1 + 1, kTracePartSize); tctx->epoch1 = (u64)-1; - new(thr) ThreadState(CTX(), tid, tctx->epoch0, stk_addr, stk_size, - tls_addr, tls_size); + new(thr) ThreadState(CTX(), tid, tctx->unique_id, + tctx->epoch0, stk_addr, stk_size, + tls_addr, tls_size); #ifdef TSAN_GO // Setup dynamic shadow stack. const int kInitStackSize = 8; @@ -185,6 +213,9 @@ void ThreadStart(ThreadState *thr, int tid) { thr->fast_synch_epoch = tctx->epoch0; thr->clock.set(tid, tctx->epoch0); thr->clock.acquire(&tctx->sync); + thr->fast_state.SetHistorySize(flags()->history_size); + const uptr trace = (tctx->epoch0 / kTracePartSize) % TraceParts(); + thr->trace.headers[trace].epoch0 = tctx->epoch0; StatInc(thr, StatSyncAcquire); DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " "tls_addr=%zx tls_size=%zx\n", @@ -219,7 +250,7 @@ void ThreadFinish(ThreadState *thr) { } else { thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0); + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); thr->clock.release(&tctx->sync); @@ -230,14 +261,16 @@ void ThreadFinish(ThreadState *thr) { // Save from info about the thread. tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo))) ThreadDeadInfo(); - internal_memcpy(&tctx->dead_info->trace.events[0], - &thr->trace.events[0], sizeof(thr->trace.events)); - for (int i = 0; i < kTraceParts; i++) { + for (uptr i = 0; i < TraceParts(); i++) { + tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0; tctx->dead_info->trace.headers[i].stack0.CopyFrom( thr->trace.headers[i].stack0); } tctx->epoch1 = thr->fast_state.epoch(); +#ifndef TSAN_GO + AlloctorThreadFinish(thr); +#endif thr->~ThreadState(); StatAggregate(ctx->stat, thr->stat); tctx->thr = 0; @@ -270,9 +303,10 @@ void ThreadJoin(ThreadState *thr, uptr pc, int tid) { Lock l(&ctx->thread_mtx); ThreadContext *tctx = ctx->threads[tid]; if (tctx->status == ThreadStatusInvalid) { - TsanPrintf("ThreadSanitizer: join of non-existent thread\n"); + Printf("ThreadSanitizer: join of non-existent thread\n"); return; } + // FIXME(dvyukov): print message and continue (it's user error). CHECK_EQ(tctx->detached, false); CHECK_EQ(tctx->status, ThreadStatusFinished); thr->clock.acquire(&tctx->sync); @@ -288,7 +322,7 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) { Lock l(&ctx->thread_mtx); ThreadContext *tctx = ctx->threads[tid]; if (tctx->status == ThreadStatusInvalid) { - TsanPrintf("ThreadSanitizer: detach of non-existent thread\n"); + Printf("ThreadSanitizer: detach of non-existent thread\n"); return; } if (tctx->status == ThreadStatusFinished) { @@ -298,8 +332,18 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) { } } -void ThreadFinalizerGoroutine(ThreadState *thr) { - thr->clock.Disable(thr->tid); +void ThreadSetName(ThreadState *thr, const char *name) { + Context *ctx = CTX(); + Lock l(&ctx->thread_mtx); + ThreadContext *tctx = ctx->threads[thr->tid]; + CHECK_NE(tctx, 0); + CHECK_EQ(tctx->status, ThreadStatusRunning); + if (tctx->name) { + internal_free(tctx->name); + tctx->name = 0; + } + if (name) + tctx->name = internal_strdup(name); } void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, @@ -314,19 +358,19 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, #if TSAN_DEBUG if (!IsAppMem(addr)) { - TsanPrintf("Access to non app mem %zx\n", addr); + Printf("Access to non app mem %zx\n", addr); DCHECK(IsAppMem(addr)); } if (!IsAppMem(addr + size - 1)) { - TsanPrintf("Access to non app mem %zx\n", addr + size - 1); + Printf("Access to non app mem %zx\n", addr + size - 1); DCHECK(IsAppMem(addr + size - 1)); } if (!IsShadowMem((uptr)shadow_mem)) { - TsanPrintf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); + Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); DCHECK(IsShadowMem((uptr)shadow_mem)); } if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { - TsanPrintf("Bad shadow addr %p (%zx)\n", + Printf("Bad shadow addr %p (%zx)\n", shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); } @@ -340,7 +384,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, fast_state.IncrementEpoch(); thr->fast_state = fast_state; - TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc); + TraceAddEvent(thr, fast_state, EventTypeMop, pc); bool unaligned = (addr % kShadowCell) != 0; @@ -350,7 +394,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, Shadow cur(fast_state); cur.SetWrite(is_write); cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); - MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, + MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, shadow_mem, cur); } if (unaligned) @@ -361,7 +405,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, Shadow cur(fast_state); cur.SetWrite(is_write); cur.SetAddr0AndSizeLog(0, kAccessSizeLog); - MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, + MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, shadow_mem, cur); shadow_mem += kShadowCnt; } @@ -371,7 +415,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, Shadow cur(fast_state); cur.SetWrite(is_write); cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); - MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, + MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, shadow_mem, cur); } } diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc index a7c33a5de760..82f1d6b5620f 100644 --- a/lib/tsan/rtl/tsan_stat.cc +++ b/lib/tsan/rtl/tsan_stat.cc @@ -77,6 +77,11 @@ void StatOutput(u64 *stat) { name[StatAtomicStore] = " store "; name[StatAtomicExchange] = " exchange "; name[StatAtomicFetchAdd] = " fetch_add "; + name[StatAtomicFetchSub] = " fetch_sub "; + name[StatAtomicFetchAnd] = " fetch_and "; + name[StatAtomicFetchOr] = " fetch_or "; + name[StatAtomicFetchXor] = " fetch_xor "; + name[StatAtomicFetchNand] = " fetch_nand "; name[StatAtomicCAS] = " compare_exchange "; name[StatAtomicFence] = " fence "; name[StatAtomicRelaxed] = " Including relaxed "; @@ -89,11 +94,13 @@ void StatOutput(u64 *stat) { name[StatAtomic2] = " size 2 "; name[StatAtomic4] = " size 4 "; name[StatAtomic8] = " size 8 "; + name[StatAtomic16] = " size 16 "; name[StatInterceptor] = "Interceptors "; name[StatInt_longjmp] = " longjmp "; name[StatInt_siglongjmp] = " siglongjmp "; name[StatInt_malloc] = " malloc "; + name[StatInt___libc_memalign] = " __libc_memalign "; name[StatInt_calloc] = " calloc "; name[StatInt_realloc] = " realloc "; name[StatInt_free] = " free "; @@ -131,6 +138,7 @@ void StatOutput(u64 *stat) { name[StatInt_atexit] = " atexit "; name[StatInt___cxa_guard_acquire] = " __cxa_guard_acquire "; name[StatInt___cxa_guard_release] = " __cxa_guard_release "; + name[StatInt___cxa_guard_abort] = " __cxa_guard_abort "; name[StatInt_pthread_create] = " pthread_create "; name[StatInt_pthread_join] = " pthread_join "; name[StatInt_pthread_detach] = " pthread_detach "; @@ -173,7 +181,30 @@ void StatOutput(u64 *stat) { name[StatInt_sem_timedwait] = " sem_timedwait "; name[StatInt_sem_post] = " sem_post "; name[StatInt_sem_getvalue] = " sem_getvalue "; + name[StatInt_open] = " open "; + name[StatInt_open64] = " open64 "; + name[StatInt_creat] = " creat "; + name[StatInt_creat64] = " creat64 "; + name[StatInt_dup] = " dup "; + name[StatInt_dup2] = " dup2 "; + name[StatInt_dup3] = " dup3 "; + name[StatInt_eventfd] = " eventfd "; + name[StatInt_signalfd] = " signalfd "; + name[StatInt_inotify_init] = " inotify_init "; + name[StatInt_inotify_init1] = " inotify_init1 "; + name[StatInt_socket] = " socket "; + name[StatInt_socketpair] = " socketpair "; + name[StatInt_connect] = " connect "; + name[StatInt_accept] = " accept "; + name[StatInt_accept4] = " accept4 "; + name[StatInt_epoll_create] = " epoll_create "; + name[StatInt_epoll_create1] = " epoll_create1 "; + name[StatInt_close] = " close "; + name[StatInt___close] = " __close "; + name[StatInt_pipe] = " pipe "; + name[StatInt_pipe2] = " pipe2 "; name[StatInt_read] = " read "; + name[StatInt_prctl] = " prctl "; name[StatInt_pread] = " pread "; name[StatInt_pread64] = " pread64 "; name[StatInt_readv] = " readv "; @@ -189,6 +220,8 @@ void StatOutput(u64 *stat) { name[StatInt_recvmsg] = " recvmsg "; name[StatInt_unlink] = " unlink "; name[StatInt_fopen] = " fopen "; + name[StatInt_freopen] = " freopen "; + name[StatInt_fclose] = " fclose "; name[StatInt_fread] = " fread "; name[StatInt_fwrite] = " fwrite "; name[StatInt_puts] = " puts "; @@ -196,7 +229,19 @@ void StatOutput(u64 *stat) { name[StatInt_opendir] = " opendir "; name[StatInt_epoll_ctl] = " epoll_ctl "; name[StatInt_epoll_wait] = " epoll_wait "; + name[StatInt_poll] = " poll "; name[StatInt_sigaction] = " sigaction "; + name[StatInt_sleep] = " sleep "; + name[StatInt_usleep] = " usleep "; + name[StatInt_nanosleep] = " nanosleep "; + name[StatInt_gettimeofday] = " gettimeofday "; + name[StatInt_fork] = " fork "; + name[StatInt_vscanf] = " vscanf "; + name[StatInt_vsscanf] = " vsscanf "; + name[StatInt_vfscanf] = " vfscanf "; + name[StatInt_scanf] = " scanf "; + name[StatInt_sscanf] = " sscanf "; + name[StatInt_fscanf] = " fscanf "; name[StatAnnotation] = "Dynamic annotations "; name[StatAnnotateHappensBefore] = " HappensBefore "; @@ -240,10 +285,12 @@ void StatOutput(u64 *stat) { name[StatMtxSlab] = " Slab "; name[StatMtxAtExit] = " Atexit "; name[StatMtxAnnotations] = " Annotations "; + name[StatMtxMBlock] = " MBlock "; + name[StatMtxJavaMBlock] = " JavaMBlock "; - TsanPrintf("Statistics:\n"); + Printf("Statistics:\n"); for (int i = 0; i < StatCnt; i++) - TsanPrintf("%s: %zu\n", name[i], (uptr)stat[i]); + Printf("%s: %zu\n", name[i], (uptr)stat[i]); } } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_stat.h b/lib/tsan/rtl/tsan_stat.h index 71b1b13c0876..58c5f23af40b 100644 --- a/lib/tsan/rtl/tsan_stat.h +++ b/lib/tsan/rtl/tsan_stat.h @@ -73,9 +73,11 @@ enum StatType { StatAtomicStore, StatAtomicExchange, StatAtomicFetchAdd, + StatAtomicFetchSub, StatAtomicFetchAnd, StatAtomicFetchOr, StatAtomicFetchXor, + StatAtomicFetchNand, StatAtomicCAS, StatAtomicFence, StatAtomicRelaxed, @@ -88,12 +90,14 @@ enum StatType { StatAtomic2, StatAtomic4, StatAtomic8, + StatAtomic16, // Interceptors. StatInterceptor, StatInt_longjmp, StatInt_siglongjmp, StatInt_malloc, + StatInt___libc_memalign, StatInt_calloc, StatInt_realloc, StatInt_free, @@ -131,6 +135,7 @@ enum StatType { StatInt_atexit, StatInt___cxa_guard_acquire, StatInt___cxa_guard_release, + StatInt___cxa_guard_abort, StatInt_pthread_create, StatInt_pthread_join, StatInt_pthread_detach, @@ -171,7 +176,30 @@ enum StatType { StatInt_sem_timedwait, StatInt_sem_post, StatInt_sem_getvalue, + StatInt_open, + StatInt_open64, + StatInt_creat, + StatInt_creat64, + StatInt_dup, + StatInt_dup2, + StatInt_dup3, + StatInt_eventfd, + StatInt_signalfd, + StatInt_inotify_init, + StatInt_inotify_init1, + StatInt_socket, + StatInt_socketpair, + StatInt_connect, + StatInt_accept, + StatInt_accept4, + StatInt_epoll_create, + StatInt_epoll_create1, + StatInt_close, + StatInt___close, + StatInt_pipe, + StatInt_pipe2, StatInt_read, + StatInt_prctl, StatInt_pread, StatInt_pread64, StatInt_readv, @@ -187,6 +215,8 @@ enum StatType { StatInt_recvmsg, StatInt_unlink, StatInt_fopen, + StatInt_freopen, + StatInt_fclose, StatInt_fread, StatInt_fwrite, StatInt_puts, @@ -194,11 +224,23 @@ enum StatType { StatInt_opendir, StatInt_epoll_ctl, StatInt_epoll_wait, + StatInt_poll, StatInt_sigaction, StatInt_signal, StatInt_raise, StatInt_kill, StatInt_pthread_kill, + StatInt_sleep, + StatInt_usleep, + StatInt_nanosleep, + StatInt_gettimeofday, + StatInt_fork, + StatInt_vscanf, + StatInt_vsscanf, + StatInt_vfscanf, + StatInt_scanf, + StatInt_sscanf, + StatInt_fscanf, // Dynamic annotations. StatAnnotation, @@ -209,6 +251,7 @@ enum StatType { StatAnnotateMutexIsNotPHB, StatAnnotateCondVarWait, StatAnnotateRWLockCreate, + StatAnnotateRWLockCreateStatic, StatAnnotateRWLockDestroy, StatAnnotateRWLockAcquired, StatAnnotateRWLockReleased, @@ -244,9 +287,11 @@ enum StatType { StatMtxSlab, StatMtxAnnotations, StatMtxAtExit, + StatMtxMBlock, + StatMtxJavaMBlock, // This must be the last. - StatCnt, + StatCnt }; } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc index 7549a4f8ba83..5316f6db6a0a 100644 --- a/lib/tsan/rtl/tsan_suppressions.cc +++ b/lib/tsan/rtl/tsan_suppressions.cc @@ -26,27 +26,27 @@ static Suppression *g_suppressions; static char *ReadFile(const char *filename) { if (filename == 0 || filename[0] == 0) return 0; - InternalScopedBuf<char> tmp(4*1024); - if (filename[0] == '/') - internal_snprintf(tmp, tmp.Size(), "%s", filename); + InternalScopedBuffer<char> tmp(4*1024); + if (filename[0] == '/' || GetPwd() == 0) + internal_snprintf(tmp.data(), tmp.size(), "%s", filename); else - internal_snprintf(tmp, tmp.Size(), "%s/%s", GetPwd(), filename); - fd_t fd = internal_open(tmp, false); + internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename); + fd_t fd = internal_open(tmp.data(), false); if (fd == kInvalidFd) { - TsanPrintf("ThreadSanitizer: failed to open suppressions file '%s'\n", - tmp.Ptr()); + Printf("ThreadSanitizer: failed to open suppressions file '%s'\n", + tmp.data()); Die(); } const uptr fsize = internal_filesize(fd); if (fsize == (uptr)-1) { - TsanPrintf("ThreadSanitizer: failed to stat suppressions file '%s'\n", - tmp.Ptr()); + Printf("ThreadSanitizer: failed to stat suppressions file '%s'\n", + tmp.data()); Die(); } char *buf = (char*)internal_alloc(MBlockSuppression, fsize + 1); if (fsize != internal_read(fd, buf, fsize)) { - TsanPrintf("ThreadSanitizer: failed to read suppressions file '%s'\n", - tmp.Ptr()); + Printf("ThreadSanitizer: failed to read suppressions file '%s'\n", + tmp.data()); Die(); } internal_close(fd); @@ -110,7 +110,7 @@ Suppression *SuppressionParse(const char* supp) { stype = SuppressionSignal; line += sizeof("signal:") - 1; } else { - TsanPrintf("ThreadSanitizer: failed to parse suppressions file\n"); + Printf("ThreadSanitizer: failed to parse suppressions file\n"); Die(); } Suppression *s = (Suppression*)internal_alloc(MBlockSuppression, @@ -134,9 +134,9 @@ void InitializeSuppressions() { g_suppressions = SuppressionParse(supp); } -bool IsSuppressed(ReportType typ, const ReportStack *stack) { +uptr IsSuppressed(ReportType typ, const ReportStack *stack) { if (g_suppressions == 0 || stack == 0) - return false; + return 0; SuppressionType stype; if (typ == ReportTypeRace) stype = SuppressionRace; @@ -147,17 +147,17 @@ bool IsSuppressed(ReportType typ, const ReportStack *stack) { else if (typ == ReportTypeSignalUnsafe) stype = SuppressionSignal; else - return false; + return 0; for (const ReportStack *frame = stack; frame; frame = frame->next) { for (Suppression *supp = g_suppressions; supp; supp = supp->next) { if (stype == supp->type && (SuppressionMatch(supp->templ, frame->func) || SuppressionMatch(supp->templ, frame->file))) { DPrintf("ThreadSanitizer: matched suppression '%s'\n", supp->templ); - return true; + return frame->pc; } } } - return false; + return 0; } } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_suppressions.h b/lib/tsan/rtl/tsan_suppressions.h index 29311c123d6d..61a4cca9d17a 100644 --- a/lib/tsan/rtl/tsan_suppressions.h +++ b/lib/tsan/rtl/tsan_suppressions.h @@ -19,14 +19,14 @@ namespace __tsan { void InitializeSuppressions(); void FinalizeSuppressions(); -bool IsSuppressed(ReportType typ, const ReportStack *stack); +uptr IsSuppressed(ReportType typ, const ReportStack *stack); // Exposed for testing. enum SuppressionType { SuppressionRace, SuppressionMutex, SuppressionThread, - SuppressionSignal, + SuppressionSignal }; struct Suppression { diff --git a/lib/tsan/rtl/tsan_symbolize.cc b/lib/tsan/rtl/tsan_symbolize.cc index f757d070e169..29dfe237ffd9 100644 --- a/lib/tsan/rtl/tsan_symbolize.cc +++ b/lib/tsan/rtl/tsan_symbolize.cc @@ -29,14 +29,24 @@ ReportStack *NewReportStackEntry(uptr addr) { return ent; } +// Strip module path to make output shorter. +static char *StripModuleName(const char *module) { + if (module == 0) + return 0; + const char *short_module_name = internal_strrchr(module, '/'); + if (short_module_name) + short_module_name += 1; + else + short_module_name = module; + return internal_strdup(short_module_name); +} + static ReportStack *NewReportStackEntry(const AddressInfo &info) { ReportStack *ent = NewReportStackEntry(info.address); - if (info.module) - ent->module = internal_strdup(info.module); + ent->module = StripModuleName(info.module); ent->offset = info.module_offset; - if (info.function) { + if (info.function) ent->func = internal_strdup(info.function); - } if (info.file) ent->file = internal_strdup(info.file); ent->line = info.line; @@ -45,12 +55,12 @@ static ReportStack *NewReportStackEntry(const AddressInfo &info) { } ReportStack *SymbolizeCode(uptr addr) { - if (flags()->use_internal_symbolizer) { + if (flags()->external_symbolizer_path[0]) { static const uptr kMaxAddrFrames = 16; - InternalScopedBuf<AddressInfo> addr_frames(kMaxAddrFrames); + InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); for (uptr i = 0; i < kMaxAddrFrames; i++) new(&addr_frames[i]) AddressInfo(); - uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames, + uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames.data(), kMaxAddrFrames); if (addr_frames_num == 0) return NewReportStackEntry(addr); @@ -71,8 +81,23 @@ ReportStack *SymbolizeCode(uptr addr) { return SymbolizeCodeAddr2Line(addr); } -ReportStack *SymbolizeData(uptr addr) { - return SymbolizeDataAddr2Line(addr); +ReportLocation *SymbolizeData(uptr addr) { + if (flags()->external_symbolizer_path[0] == 0) + return 0; + DataInfo info; + if (!__sanitizer::SymbolizeData(addr, &info)) + return 0; + ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack, + sizeof(ReportLocation)); + internal_memset(ent, 0, sizeof(*ent)); + ent->type = ReportLocationGlobal; + ent->module = StripModuleName(info.module); + ent->offset = info.module_offset; + if (info.name) + ent->name = internal_strdup(info.name); + ent->addr = info.start; + ent->size = info.size; + return ent; } } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_symbolize.h b/lib/tsan/rtl/tsan_symbolize.h index 115339be38a9..29193043cd70 100644 --- a/lib/tsan/rtl/tsan_symbolize.h +++ b/lib/tsan/rtl/tsan_symbolize.h @@ -19,10 +19,9 @@ namespace __tsan { ReportStack *SymbolizeCode(uptr addr); -ReportStack *SymbolizeData(uptr addr); +ReportLocation *SymbolizeData(uptr addr); ReportStack *SymbolizeCodeAddr2Line(uptr addr); -ReportStack *SymbolizeDataAddr2Line(uptr addr); ReportStack *NewReportStackEntry(uptr addr); diff --git a/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc b/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc index 5eed977ec278..76926e2b5aaf 100644 --- a/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc +++ b/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc @@ -50,17 +50,17 @@ struct DlIteratePhdrCtx { static void NOINLINE InitModule(ModuleDesc *m) { int outfd[2] = {}; if (pipe(&outfd[0])) { - TsanPrintf("ThreadSanitizer: outfd pipe() failed (%d)\n", errno); + Printf("ThreadSanitizer: outfd pipe() failed (%d)\n", errno); Die(); } int infd[2] = {}; if (pipe(&infd[0])) { - TsanPrintf("ThreadSanitizer: infd pipe() failed (%d)\n", errno); + Printf("ThreadSanitizer: infd pipe() failed (%d)\n", errno); Die(); } int pid = fork(); if (pid == 0) { - flags()->log_fileno = STDERR_FILENO; + __sanitizer_set_report_fd(STDERR_FILENO); internal_close(STDOUT_FILENO); internal_close(STDIN_FILENO); internal_dup2(outfd[0], STDIN_FILENO); @@ -74,7 +74,7 @@ static void NOINLINE InitModule(ModuleDesc *m) { execl("/usr/bin/addr2line", "/usr/bin/addr2line", "-Cfe", m->fullname, 0); _exit(0); } else if (pid < 0) { - TsanPrintf("ThreadSanitizer: failed to fork symbolizer\n"); + Printf("ThreadSanitizer: failed to fork symbolizer\n"); Die(); } internal_close(outfd[0]); @@ -85,10 +85,10 @@ static void NOINLINE InitModule(ModuleDesc *m) { static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { DlIteratePhdrCtx *ctx = (DlIteratePhdrCtx*)arg; - InternalScopedBuf<char> tmp(128); + InternalScopedBuffer<char> tmp(128); if (ctx->is_first) { - internal_snprintf(tmp.Ptr(), tmp.Size(), "/proc/%d/exe", GetPid()); - info->dlpi_name = tmp.Ptr(); + internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe", GetPid()); + info->dlpi_name = tmp.data(); } ctx->is_first = false; if (info->dlpi_name == 0 || info->dlpi_name[0] == 0) @@ -104,11 +104,11 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { m->base = (uptr)info->dlpi_addr; m->inp_fd = -1; m->out_fd = -1; - DPrintf("Module %s %zx\n", m->name, m->base); + DPrintf2("Module %s %zx\n", m->name, m->base); for (int i = 0; i < info->dlpi_phnum; i++) { const Elf64_Phdr *s = &info->dlpi_phdr[i]; - DPrintf(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx" - " p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n", + DPrintf2(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx" + " p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n", (uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr, (uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz, (uptr)s->p_flags, (uptr)s->p_align); @@ -121,7 +121,7 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { sec->end = sec->base + s->p_memsz; sec->next = ctx->sections; ctx->sections = sec; - DPrintf(" Section %zx-%zx\n", sec->base, sec->end); + DPrintf2(" Section %zx-%zx\n", sec->base, sec->end); } return 0; } @@ -155,26 +155,26 @@ ReportStack *SymbolizeCodeAddr2Line(uptr addr) { char addrstr[32]; internal_snprintf(addrstr, sizeof(addrstr), "%p\n", (void*)offset); if (0 >= internal_write(m->out_fd, addrstr, internal_strlen(addrstr))) { - TsanPrintf("ThreadSanitizer: can't write from symbolizer (%d, %d)\n", + Printf("ThreadSanitizer: can't write from symbolizer (%d, %d)\n", m->out_fd, errno); Die(); } - InternalScopedBuf<char> func(1024); - ssize_t len = internal_read(m->inp_fd, func, func.Size() - 1); + InternalScopedBuffer<char> func(1024); + ssize_t len = internal_read(m->inp_fd, func.data(), func.size() - 1); if (len <= 0) { - TsanPrintf("ThreadSanitizer: can't read from symbolizer (%d, %d)\n", + Printf("ThreadSanitizer: can't read from symbolizer (%d, %d)\n", m->inp_fd, errno); Die(); } - func.Ptr()[len] = 0; + func.data()[len] = 0; ReportStack *res = NewReportStackEntry(addr); res->module = internal_strdup(m->name); res->offset = offset; - char *pos = (char*)internal_strchr(func, '\n'); + char *pos = (char*)internal_strchr(func.data(), '\n'); if (pos && func[0] != '?') { - res->func = (char*)internal_alloc(MBlockReportStack, pos - func + 1); - internal_memcpy(res->func, func, pos - func); - res->func[pos - func] = 0; + res->func = (char*)internal_alloc(MBlockReportStack, pos - func.data() + 1); + internal_memcpy(res->func, func.data(), pos - func.data()); + res->func[pos - func.data()] = 0; char *pos2 = (char*)internal_strchr(pos, ':'); if (pos2) { res->file = (char*)internal_alloc(MBlockReportStack, pos2 - pos - 1 + 1); diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc index abb5a2ad298f..b25346ef344f 100644 --- a/lib/tsan/rtl/tsan_sync.cc +++ b/lib/tsan/rtl/tsan_sync.cc @@ -17,14 +17,17 @@ namespace __tsan { -SyncVar::SyncVar(uptr addr) +SyncVar::SyncVar(uptr addr, u64 uid) : mtx(MutexTypeSyncVar, StatMtxSyncVar) , addr(addr) + , uid(uid) , owner_tid(kInvalidTid) + , last_lock() , recursion() , is_rw() , is_recursive() - , is_broken() { + , is_broken() + , is_linker_init() { } SyncTab::Part::Part() @@ -45,8 +48,61 @@ SyncTab::~SyncTab() { } } +SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc, + uptr addr, bool write_lock) { + return GetAndLock(thr, pc, addr, write_lock, true); +} + +SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) { + return GetAndLock(0, 0, addr, write_lock, false); +} + +SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) { + StatInc(thr, StatSyncCreated); + void *mem = internal_alloc(MBlockSync, sizeof(SyncVar)); + const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); + SyncVar *res = new(mem) SyncVar(addr, uid); +#ifndef TSAN_GO + res->creation_stack.ObtainCurrent(thr, pc); +#endif + return res; +} + SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, - uptr addr, bool write_lock) { + uptr addr, bool write_lock, bool create) { +#ifndef TSAN_GO + { // NOLINT + SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create); + if (res) + return res; + } + + // Here we ask only PrimaryAllocator, because + // SecondaryAllocator::PointerIsMine() is slow and we have fallback on + // the hashmap anyway. + if (PrimaryAllocator::PointerIsMine((void*)addr)) { + MBlock *b = user_mblock(thr, (void*)addr); + Lock l(&b->mtx); + SyncVar *res = 0; + for (res = b->head; res; res = res->next) { + if (res->addr == addr) + break; + } + if (res == 0) { + if (!create) + return 0; + res = Create(thr, pc, addr); + res->next = b->head; + b->head = res; + } + if (write_lock) + res->mtx.Lock(); + else + res->mtx.ReadLock(); + return res; + } +#endif + Part *p = &tab_[PartIdx(addr)]; { ReadLock l(&p->mtx); @@ -60,6 +116,8 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, } } } + if (!create) + return 0; { Lock l(&p->mtx); SyncVar *res = p->val; @@ -68,12 +126,7 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, break; } if (res == 0) { - StatInc(thr, StatSyncCreated); - void *mem = internal_alloc(MBlockSync, sizeof(SyncVar)); - res = new(mem) SyncVar(addr); -#ifndef TSAN_GO - res->creation_stack.ObtainCurrent(thr, pc); -#endif + res = Create(thr, pc, addr); res->next = p->val; p->val = res; } @@ -86,6 +139,39 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, } SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) { +#ifndef TSAN_GO + { // NOLINT + SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr); + if (res) + return res; + } + if (PrimaryAllocator::PointerIsMine((void*)addr)) { + MBlock *b = user_mblock(thr, (void*)addr); + SyncVar *res = 0; + { + Lock l(&b->mtx); + SyncVar **prev = &b->head; + res = *prev; + while (res) { + if (res->addr == addr) { + if (res->is_linker_init) + return 0; + *prev = res->next; + break; + } + prev = &res->next; + res = *prev; + } + } + if (res) { + StatInc(thr, StatSyncDestroyed); + res->mtx.Lock(); + res->mtx.Unlock(); + } + return res; + } +#endif + Part *p = &tab_[PartIdx(addr)]; SyncVar *res = 0; { @@ -94,6 +180,8 @@ SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) { res = *prev; while (res) { if (res->addr == addr) { + if (res->is_linker_init) + return 0; *prev = res->next; break; } @@ -179,15 +267,19 @@ void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) { n_ = thr->shadow_stack_pos - thr->shadow_stack; if (n_ + !!toppc == 0) return; + uptr start = 0; if (c_) { CHECK_NE(s_, 0); - CHECK_LE(n_ + !!toppc, c_); + if (n_ + !!toppc > c_) { + start = n_ - c_ + !!toppc; + n_ = c_ - !!toppc; + } } else { s_ = (uptr*)internal_alloc(MBlockStackTrace, (n_ + !!toppc) * sizeof(s_[0])); } for (uptr i = 0; i < n_; i++) - s_[i] = thr->shadow_stack[i]; + s_[i] = thr->shadow_stack[start + i]; if (toppc) { s_[n_] = toppc; n_++; diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h index 34d3e0b2132f..77749e22ffc2 100644 --- a/lib/tsan/rtl/tsan_sync.h +++ b/lib/tsan/rtl/tsan_sync.h @@ -50,23 +50,38 @@ class StackTrace { }; struct SyncVar { - explicit SyncVar(uptr addr); + explicit SyncVar(uptr addr, u64 uid); static const int kInvalidTid = -1; Mutex mtx; - const uptr addr; + uptr addr; + const u64 uid; // Globally unique id. SyncClock clock; SyncClock read_clock; // Used for rw mutexes only. StackTrace creation_stack; int owner_tid; // Set only by exclusive owners. + u64 last_lock; int recursion; bool is_rw; bool is_recursive; bool is_broken; + bool is_linker_init; SyncVar *next; // In SyncTab hashtable. uptr GetMemoryConsumption(); + u64 GetId() const { + // 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits. + return GetLsb((u64)addr | (uid << 47), 61); + } + bool CheckId(u64 uid) const { + CHECK_EQ(uid, GetLsb(uid, 14)); + return GetLsb(this->uid, 14) == uid; + } + static uptr SplitId(u64 id, u64 *uid) { + *uid = id >> 47; + return (uptr)GetLsb(id, 47); + } }; class SyncTab { @@ -74,13 +89,15 @@ class SyncTab { SyncTab(); ~SyncTab(); - // If the SyncVar does not exist yet, it is created. - SyncVar* GetAndLock(ThreadState *thr, uptr pc, - uptr addr, bool write_lock); + SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc, + uptr addr, bool write_lock); + SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock); // If the SyncVar does not exist, returns 0. SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr); + SyncVar* Create(ThreadState *thr, uptr pc, uptr addr); + uptr GetMemoryConsumption(uptr *nsync); private: @@ -94,9 +111,13 @@ class SyncTab { // FIXME: Implement something more sane. static const int kPartCount = 1009; Part tab_[kPartCount]; + atomic_uint64_t uid_gen_; int PartIdx(uptr addr); + SyncVar* GetAndLock(ThreadState *thr, uptr pc, + uptr addr, bool write_lock, bool create); + SyncTab(const SyncTab&); // Not implemented. void operator = (const SyncTab&); // Not implemented. }; diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h index bf15bf5cc239..7df716046567 100644 --- a/lib/tsan/rtl/tsan_trace.h +++ b/lib/tsan/rtl/tsan_trace.h @@ -16,12 +16,14 @@ #include "tsan_defs.h" #include "tsan_mutex.h" #include "tsan_sync.h" +#include "tsan_mutexset.h" namespace __tsan { -const int kTraceParts = 8; -const int kTraceSize = 128*1024; -const int kTracePartSize = kTraceSize / kTraceParts; +const int kTracePartSizeBits = 14; +const int kTracePartSize = 1 << kTracePartSizeBits; +const int kTraceParts = 4 * 1024 * 1024 / kTracePartSize; +const int kTraceSize = kTracePartSize * kTraceParts; // Must fit into 3 bits. enum EventType { @@ -31,7 +33,7 @@ enum EventType { EventTypeLock, EventTypeUnlock, EventTypeRLock, - EventTypeRUnlock, + EventTypeRUnlock }; // Represents a thread event (from most significant bit): @@ -42,13 +44,14 @@ typedef u64 Event; struct TraceHeader { StackTrace stack0; // Start stack for the trace. u64 epoch0; // Start epoch for the trace. + MutexSet mset0; #ifndef TSAN_GO - uptr stack0buf[kShadowStackSize]; + uptr stack0buf[kTraceStackSize]; #endif TraceHeader() #ifndef TSAN_GO - : stack0(stack0buf, kShadowStackSize) + : stack0(stack0buf, kTraceStackSize) #else : stack0() #endif @@ -57,7 +60,6 @@ struct TraceHeader { }; struct Trace { - Event events[kTraceSize]; TraceHeader headers[kTraceParts]; Mutex mtx; diff --git a/lib/tsan/rtl/tsan_update_shadow_word_inl.h b/lib/tsan/rtl/tsan_update_shadow_word_inl.h index c7864ce00af3..2c435556abb2 100644 --- a/lib/tsan/rtl/tsan_update_shadow_word_inl.h +++ b/lib/tsan/rtl/tsan_update_shadow_word_inl.h @@ -34,7 +34,7 @@ do { if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); if (OldIsInSameSynchEpoch(old, thr)) { - if (OldIsRWStronger(old, kAccessIsWrite)) { + if (OldIsRWNotWeaker(old, kAccessIsWrite)) { // found a slot that holds effectively the same info // (that is, same tid, same sync epoch and same size) StatInc(thr, StatMopSame); @@ -43,7 +43,7 @@ do { StoreIfNotYetStored(sp, &store_word); break; } - if (OldIsRWWeaker(old, kAccessIsWrite)) + if (OldIsRWWeakerOrEqual(old, kAccessIsWrite)) StoreIfNotYetStored(sp, &store_word); break; } |