aboutsummaryrefslogtreecommitdiff
path: root/lib/tsan/tests/rtl
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tsan/tests/rtl')
-rw-r--r--lib/tsan/tests/rtl/CMakeLists.txt15
-rw-r--r--lib/tsan/tests/rtl/tsan_bench.cc105
-rw-r--r--lib/tsan/tests/rtl/tsan_mop.cc233
-rw-r--r--lib/tsan/tests/rtl/tsan_mutex.cc221
-rw-r--r--lib/tsan/tests/rtl/tsan_posix.cc146
-rw-r--r--lib/tsan/tests/rtl/tsan_string.cc86
-rw-r--r--lib/tsan/tests/rtl/tsan_test.cc50
-rw-r--r--lib/tsan/tests/rtl/tsan_test_util.h122
-rw-r--r--lib/tsan/tests/rtl/tsan_test_util_linux.cc465
-rw-r--r--lib/tsan/tests/rtl/tsan_thread.cc59
10 files changed, 1502 insertions, 0 deletions
diff --git a/lib/tsan/tests/rtl/CMakeLists.txt b/lib/tsan/tests/rtl/CMakeLists.txt
new file mode 100644
index 000000000000..b585660e8b4a
--- /dev/null
+++ b/lib/tsan/tests/rtl/CMakeLists.txt
@@ -0,0 +1,15 @@
+set(TSAN_RTL_TESTS
+ tsan_bench.cc
+ tsan_mop.cc
+ tsan_mutex.cc
+ tsan_posix.cc
+ tsan_string.cc
+ tsan_test.cc
+ tsan_thread.cc
+ )
+
+if(UNIX AND NOT APPLE)
+ list(APPEND TSAN_RTL_TESTS tsan_test_util_linux.cc)
+endif()
+
+add_tsan_unittest(TsanRtlTest ${TSAN_RTL_TESTS})
diff --git a/lib/tsan/tests/rtl/tsan_bench.cc b/lib/tsan/tests/rtl/tsan_bench.cc
new file mode 100644
index 000000000000..a3cf22f2c626
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_bench.cc
@@ -0,0 +1,105 @@
+//===-- tsan_bench.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_test_util.h"
+#include "tsan_interface.h"
+#include "tsan_defs.h"
+#include "gtest/gtest.h"
+#include <stdint.h>
+
+const int kSize = 128;
+const int kRepeat = 2*1024*1024;
+
+void noinstr(void *p) {}
+
+template<typename T, void(*__tsan_mop)(void *p)>
+static void Benchmark() {
+ volatile T data[kSize];
+ for (int i = 0; i < kRepeat; i++) {
+ for (int j = 0; j < kSize; j++) {
+ __tsan_mop((void*)&data[j]);
+ data[j]++;
+ }
+ }
+}
+
+TEST(DISABLED_BENCH, Mop1) {
+ Benchmark<uint8_t, noinstr>();
+}
+
+TEST(DISABLED_BENCH, Mop1Read) {
+ Benchmark<uint8_t, __tsan_read1>();
+}
+
+TEST(DISABLED_BENCH, Mop1Write) {
+ Benchmark<uint8_t, __tsan_write1>();
+}
+
+TEST(DISABLED_BENCH, Mop2) {
+ Benchmark<uint16_t, noinstr>();
+}
+
+TEST(DISABLED_BENCH, Mop2Read) {
+ Benchmark<uint16_t, __tsan_read2>();
+}
+
+TEST(DISABLED_BENCH, Mop2Write) {
+ Benchmark<uint16_t, __tsan_write2>();
+}
+
+TEST(DISABLED_BENCH, Mop4) {
+ Benchmark<uint32_t, noinstr>();
+}
+
+TEST(DISABLED_BENCH, Mop4Read) {
+ Benchmark<uint32_t, __tsan_read4>();
+}
+
+TEST(DISABLED_BENCH, Mop4Write) {
+ Benchmark<uint32_t, __tsan_write4>();
+}
+
+TEST(DISABLED_BENCH, Mop8) {
+ Benchmark<uint8_t, noinstr>();
+}
+
+TEST(DISABLED_BENCH, Mop8Read) {
+ Benchmark<uint64_t, __tsan_read8>();
+}
+
+TEST(DISABLED_BENCH, Mop8Write) {
+ Benchmark<uint64_t, __tsan_write8>();
+}
+
+TEST(DISABLED_BENCH, FuncCall) {
+ for (int i = 0; i < kRepeat; i++) {
+ for (int j = 0; j < kSize; j++)
+ __tsan_func_entry((void*)(uintptr_t)j);
+ for (int j = 0; j < kSize; j++)
+ __tsan_func_exit();
+ }
+}
+
+TEST(DISABLED_BENCH, MutexLocal) {
+ Mutex m;
+ ScopedThread().Create(m);
+ for (int i = 0; i < 50; i++) {
+ ScopedThread t;
+ t.Lock(m);
+ t.Unlock(m);
+ }
+ for (int i = 0; i < 16*1024*1024; i++) {
+ m.Lock();
+ m.Unlock();
+ }
+ ScopedThread().Destroy(m);
+}
diff --git a/lib/tsan/tests/rtl/tsan_mop.cc b/lib/tsan/tests/rtl/tsan_mop.cc
new file mode 100644
index 000000000000..f21742825050
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_mop.cc
@@ -0,0 +1,233 @@
+//===-- tsan_mop.cc -------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+#include <stddef.h>
+#include <stdint.h>
+
+TEST(ThreadSanitizer, SimpleWrite) {
+ ScopedThread t;
+ MemLoc l;
+ t.Write1(l);
+}
+
+TEST(ThreadSanitizer, SimpleWriteWrite) {
+ ScopedThread t1, t2;
+ MemLoc l1, l2;
+ t1.Write1(l1);
+ t2.Write1(l2);
+}
+
+TEST(ThreadSanitizer, WriteWriteRace) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Write1(l);
+ t2.Write1(l, true);
+}
+
+TEST(ThreadSanitizer, ReadWriteRace) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Read1(l);
+ t2.Write1(l, true);
+}
+
+TEST(ThreadSanitizer, WriteReadRace) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Write1(l);
+ t2.Read1(l, true);
+}
+
+TEST(ThreadSanitizer, ReadReadNoRace) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Read1(l);
+ t2.Read1(l);
+}
+
+TEST(ThreadSanitizer, WriteThenRead) {
+ MemLoc l;
+ ScopedThread t1, t2;
+ t1.Write1(l);
+ t1.Read1(l);
+ t2.Read1(l, true);
+}
+
+TEST(ThreadSanitizer, WriteThenLockedRead) {
+ Mutex m(Mutex::RW);
+ MainThread t0;
+ t0.Create(m);
+ MemLoc l;
+ {
+ ScopedThread t1, t2;
+
+ t1.Write8(l);
+
+ t1.Lock(m);
+ t1.Read8(l);
+ t1.Unlock(m);
+
+ t2.Read8(l, true);
+ }
+ t0.Destroy(m);
+}
+
+TEST(ThreadSanitizer, LockedWriteThenRead) {
+ Mutex m(Mutex::RW);
+ MainThread t0;
+ t0.Create(m);
+ MemLoc l;
+ {
+ ScopedThread t1, t2;
+
+ t1.Lock(m);
+ t1.Write8(l);
+ t1.Unlock(m);
+
+ t1.Read8(l);
+
+ t2.Read8(l, true);
+ }
+ t0.Destroy(m);
+}
+
+
+TEST(ThreadSanitizer, RaceWithOffset) {
+ ScopedThread t1, t2;
+ {
+ MemLoc l;
+ t1.Access(l.loc(), true, 8, false);
+ t2.Access((char*)l.loc() + 4, true, 4, true);
+ }
+ {
+ MemLoc l;
+ t1.Access(l.loc(), true, 8, false);
+ t2.Access((char*)l.loc() + 7, true, 1, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 4, true, 4, false);
+ t2.Access((char*)l.loc() + 4, true, 2, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 4, true, 4, false);
+ t2.Access((char*)l.loc() + 6, true, 2, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 3, true, 2, false);
+ t2.Access((char*)l.loc() + 4, true, 1, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 1, true, 8, false);
+ t2.Access((char*)l.loc() + 3, true, 1, true);
+ }
+}
+
+TEST(ThreadSanitizer, RaceWithOffset2) {
+ ScopedThread t1, t2;
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc(), true, 4, false);
+ t2.Access((char*)l.loc() + 2, true, 1, true);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 2, true, 1, false);
+ t2.Access((char*)l.loc(), true, 4, true);
+ }
+}
+
+TEST(ThreadSanitizer, NoRaceWithOffset) {
+ ScopedThread t1, t2;
+ {
+ MemLoc l;
+ t1.Access(l.loc(), true, 4, false);
+ t2.Access((char*)l.loc() + 4, true, 4, false);
+ }
+ {
+ MemLoc l;
+ t1.Access((char*)l.loc() + 3, true, 2, false);
+ t2.Access((char*)l.loc() + 1, true, 2, false);
+ t2.Access((char*)l.loc() + 5, true, 2, false);
+ }
+}
+
+TEST(ThreadSanitizer, RaceWithDeadThread) {
+ MemLoc l;
+ ScopedThread t;
+ ScopedThread().Write1(l);
+ t.Write1(l, true);
+}
+
+TEST(ThreadSanitizer, BenignRaceOnVptr) {
+ void *vptr_storage;
+ MemLoc vptr(&vptr_storage), val;
+ vptr_storage = val.loc();
+ ScopedThread t1, t2;
+ t1.VptrUpdate(vptr, val);
+ t2.Read8(vptr);
+}
+
+TEST(ThreadSanitizer, HarmfulRaceOnVptr) {
+ void *vptr_storage;
+ MemLoc vptr(&vptr_storage), val1, val2;
+ vptr_storage = val1.loc();
+ ScopedThread t1, t2;
+ t1.VptrUpdate(vptr, val2);
+ t2.Read8(vptr, true);
+}
+
+static void foo() {
+ volatile int x = 42;
+ int x2 = x;
+ (void)x2;
+}
+
+static void bar() {
+ volatile int x = 43;
+ int x2 = x;
+ (void)x2;
+}
+
+TEST(ThreadSanitizer, ReportDeadThread) {
+ MemLoc l;
+ ScopedThread t1;
+ {
+ ScopedThread t2;
+ t2.Call(&foo);
+ t2.Call(&bar);
+ t2.Write1(l);
+ }
+ t1.Write1(l, true);
+}
+
+struct ClassWithStatic {
+ static int Data[4];
+};
+
+int ClassWithStatic::Data[4];
+
+static void foobarbaz() {}
+
+TEST(ThreadSanitizer, ReportRace) {
+ ScopedThread t1;
+ MainThread().Access(&ClassWithStatic::Data, true, 4, false);
+ t1.Call(&foobarbaz);
+ t1.Access(&ClassWithStatic::Data, true, 2, true);
+ t1.Return();
+}
diff --git a/lib/tsan/tests/rtl/tsan_mutex.cc b/lib/tsan/tests/rtl/tsan_mutex.cc
new file mode 100644
index 000000000000..4d9c77961818
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_mutex.cc
@@ -0,0 +1,221 @@
+//===-- tsan_mutex.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+#include <stdint.h>
+
+namespace __tsan {
+
+TEST(ThreadSanitizer, BasicMutex) {
+ ScopedThread t;
+ Mutex m;
+ t.Create(m);
+
+ t.Lock(m);
+ t.Unlock(m);
+
+ CHECK(t.TryLock(m));
+ t.Unlock(m);
+
+ t.Lock(m);
+ CHECK(!t.TryLock(m));
+ t.Unlock(m);
+
+ t.Destroy(m);
+}
+
+TEST(ThreadSanitizer, BasicSpinMutex) {
+ ScopedThread t;
+ Mutex m(Mutex::Spin);
+ t.Create(m);
+
+ t.Lock(m);
+ t.Unlock(m);
+
+ CHECK(t.TryLock(m));
+ t.Unlock(m);
+
+ t.Lock(m);
+ CHECK(!t.TryLock(m));
+ t.Unlock(m);
+
+ t.Destroy(m);
+}
+
+TEST(ThreadSanitizer, BasicRwMutex) {
+ ScopedThread t;
+ Mutex m(Mutex::RW);
+ t.Create(m);
+
+ t.Lock(m);
+ t.Unlock(m);
+
+ CHECK(t.TryLock(m));
+ t.Unlock(m);
+
+ t.Lock(m);
+ CHECK(!t.TryLock(m));
+ t.Unlock(m);
+
+ t.ReadLock(m);
+ t.ReadUnlock(m);
+
+ CHECK(t.TryReadLock(m));
+ t.ReadUnlock(m);
+
+ t.Lock(m);
+ CHECK(!t.TryReadLock(m));
+ t.Unlock(m);
+
+ t.ReadLock(m);
+ CHECK(!t.TryLock(m));
+ t.ReadUnlock(m);
+
+ t.ReadLock(m);
+ CHECK(t.TryReadLock(m));
+ t.ReadUnlock(m);
+ t.ReadUnlock(m);
+
+ t.Destroy(m);
+}
+
+TEST(ThreadSanitizer, Mutex) {
+ Mutex m;
+ MainThread t0;
+ t0.Create(m);
+
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Lock(m);
+ t1.Write1(l);
+ t1.Unlock(m);
+ t2.Lock(m);
+ t2.Write1(l);
+ t2.Unlock(m);
+ t2.Destroy(m);
+}
+
+TEST(ThreadSanitizer, SpinMutex) {
+ Mutex m(Mutex::Spin);
+ MainThread t0;
+ t0.Create(m);
+
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Lock(m);
+ t1.Write1(l);
+ t1.Unlock(m);
+ t2.Lock(m);
+ t2.Write1(l);
+ t2.Unlock(m);
+ t2.Destroy(m);
+}
+
+TEST(ThreadSanitizer, RwMutex) {
+ Mutex m(Mutex::RW);
+ MainThread t0;
+ t0.Create(m);
+
+ ScopedThread t1, t2, t3;
+ MemLoc l;
+ t1.Lock(m);
+ t1.Write1(l);
+ t1.Unlock(m);
+ t2.Lock(m);
+ t2.Write1(l);
+ t2.Unlock(m);
+ t1.ReadLock(m);
+ t3.ReadLock(m);
+ t1.Read1(l);
+ t3.Read1(l);
+ t1.ReadUnlock(m);
+ t3.ReadUnlock(m);
+ t2.Lock(m);
+ t2.Write1(l);
+ t2.Unlock(m);
+ t2.Destroy(m);
+}
+
+TEST(ThreadSanitizer, StaticMutex) {
+ // Emulates statically initialized mutex.
+ Mutex m;
+ m.StaticInit();
+ {
+ ScopedThread t1, t2;
+ t1.Lock(m);
+ t1.Unlock(m);
+ t2.Lock(m);
+ t2.Unlock(m);
+ }
+ MainThread().Destroy(m);
+}
+
+static void *singleton_thread(void *param) {
+ atomic_uintptr_t *singleton = (atomic_uintptr_t *)param;
+ for (int i = 0; i < 4*1024*1024; i++) {
+ int *val = (int *)atomic_load(singleton, memory_order_acquire);
+ __tsan_acquire(singleton);
+ __tsan_read4(val);
+ CHECK_EQ(*val, 42);
+ }
+ return 0;
+}
+
+TEST(DISABLED_BENCH_ThreadSanitizer, Singleton) {
+ const int kClockSize = 100;
+ const int kThreadCount = 8;
+
+ // Puff off thread's clock.
+ for (int i = 0; i < kClockSize; i++) {
+ ScopedThread t1;
+ (void)t1;
+ }
+ // Create the singleton.
+ int val = 42;
+ __tsan_write4(&val);
+ atomic_uintptr_t singleton;
+ __tsan_release(&singleton);
+ atomic_store(&singleton, (uintptr_t)&val, memory_order_release);
+ // Create reader threads.
+ pthread_t threads[kThreadCount];
+ for (int t = 0; t < kThreadCount; t++)
+ pthread_create(&threads[t], 0, singleton_thread, &singleton);
+ for (int t = 0; t < kThreadCount; t++)
+ pthread_join(threads[t], 0);
+}
+
+TEST(DISABLED_BENCH_ThreadSanitizer, StopFlag) {
+ const int kClockSize = 100;
+ const int kIters = 16*1024*1024;
+
+ // Puff off thread's clock.
+ for (int i = 0; i < kClockSize; i++) {
+ ScopedThread t1;
+ (void)t1;
+ }
+ // Create the stop flag.
+ atomic_uintptr_t flag;
+ __tsan_release(&flag);
+ atomic_store(&flag, 0, memory_order_release);
+ // Read it a lot.
+ for (int i = 0; i < kIters; i++) {
+ uptr v = atomic_load(&flag, memory_order_acquire);
+ __tsan_acquire(&flag);
+ CHECK_EQ(v, 0);
+ }
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tests/rtl/tsan_posix.cc b/lib/tsan/tests/rtl/tsan_posix.cc
new file mode 100644
index 000000000000..0caedd7207e6
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_posix.cc
@@ -0,0 +1,146 @@
+//===-- tsan_posix.cc -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+#include <pthread.h>
+
+struct thread_key {
+ pthread_key_t key;
+ pthread_mutex_t *mtx;
+ int val;
+ int *cnt;
+ thread_key(pthread_key_t key, pthread_mutex_t *mtx, int val, int *cnt)
+ : key(key)
+ , mtx(mtx)
+ , val(val)
+ , cnt(cnt) {
+ }
+};
+
+static void thread_secific_dtor(void *v) {
+ thread_key *k = (thread_key *)v;
+ EXPECT_EQ(pthread_mutex_lock(k->mtx), 0);
+ (*k->cnt)++;
+ __tsan_write4(&k->cnt);
+ EXPECT_EQ(pthread_mutex_unlock(k->mtx), 0);
+ if (k->val == 42) {
+ delete k;
+ } else if (k->val == 43 || k->val == 44) {
+ k->val--;
+ EXPECT_EQ(pthread_setspecific(k->key, k), 0);
+ } else {
+ ASSERT_TRUE(false);
+ }
+}
+
+static void *dtors_thread(void *p) {
+ thread_key *k = (thread_key *)p;
+ EXPECT_EQ(pthread_setspecific(k->key, k), 0);
+ return 0;
+}
+
+TEST(Posix, ThreadSpecificDtors) {
+ int cnt = 0;
+ pthread_key_t key;
+ EXPECT_EQ(pthread_key_create(&key, thread_secific_dtor), 0);
+ pthread_mutex_t mtx;
+ EXPECT_EQ(pthread_mutex_init(&mtx, 0), 0);
+ pthread_t th[3];
+ thread_key *k[3];
+ k[0] = new thread_key(key, &mtx, 42, &cnt);
+ k[1] = new thread_key(key, &mtx, 43, &cnt);
+ k[2] = new thread_key(key, &mtx, 44, &cnt);
+ EXPECT_EQ(pthread_create(&th[0], 0, dtors_thread, k[0]), 0);
+ EXPECT_EQ(pthread_create(&th[1], 0, dtors_thread, k[1]), 0);
+ EXPECT_EQ(pthread_join(th[0], 0), 0);
+ EXPECT_EQ(pthread_create(&th[2], 0, dtors_thread, k[2]), 0);
+ EXPECT_EQ(pthread_join(th[1], 0), 0);
+ EXPECT_EQ(pthread_join(th[2], 0), 0);
+ EXPECT_EQ(pthread_key_delete(key), 0);
+ EXPECT_EQ(6, cnt);
+}
+
+static __thread int local_var;
+
+static void *local_thread(void *p) {
+ __tsan_write1(&local_var);
+ __tsan_write1(&p);
+ if (p == 0)
+ return 0;
+ const int kThreads = 4;
+ pthread_t th[kThreads];
+ for (int i = 0; i < kThreads; i++)
+ EXPECT_EQ(pthread_create(&th[i], 0, local_thread,
+ (void*)((long)p - 1)), 0); // NOLINT
+ for (int i = 0; i < kThreads; i++)
+ EXPECT_EQ(pthread_join(th[i], 0), 0);
+ return 0;
+}
+
+TEST(Posix, ThreadLocalAccesses) {
+ local_thread((void*)2);
+}
+
+struct CondContext {
+ pthread_mutex_t m;
+ pthread_cond_t c;
+ int data;
+};
+
+static void *cond_thread(void *p) {
+ CondContext &ctx = *static_cast<CondContext*>(p);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(ctx.data, 0);
+ ctx.data = 1;
+ EXPECT_EQ(pthread_cond_signal(&ctx.c), 0);
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ while (ctx.data != 2)
+ EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ ctx.data = 3;
+ EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+
+ return 0;
+}
+
+TEST(Posix, CondBasic) {
+ CondContext ctx;
+ EXPECT_EQ(pthread_mutex_init(&ctx.m, 0), 0);
+ EXPECT_EQ(pthread_cond_init(&ctx.c, 0), 0);
+ ctx.data = 0;
+ pthread_t th;
+ EXPECT_EQ(pthread_create(&th, 0, cond_thread, &ctx), 0);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ while (ctx.data != 1)
+ EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ ctx.data = 2;
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
+
+ EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ while (ctx.data != 3)
+ EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+
+ EXPECT_EQ(pthread_join(th, 0), 0);
+ EXPECT_EQ(pthread_cond_destroy(&ctx.c), 0);
+ EXPECT_EQ(pthread_mutex_destroy(&ctx.m), 0);
+}
diff --git a/lib/tsan/tests/rtl/tsan_string.cc b/lib/tsan/tests/rtl/tsan_string.cc
new file mode 100644
index 000000000000..c402f7cbd679
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_string.cc
@@ -0,0 +1,86 @@
+//===-- tsan_string.cc ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+#include <string.h>
+
+namespace __tsan {
+
+TEST(ThreadSanitizer, Memcpy) {
+ char data0[7] = {1, 2, 3, 4, 5, 6, 7};
+ char data[7] = {42, 42, 42, 42, 42, 42, 42};
+ MainThread().Memcpy(data+1, data0+1, 5);
+ EXPECT_EQ(data[0], 42);
+ EXPECT_EQ(data[1], 2);
+ EXPECT_EQ(data[2], 3);
+ EXPECT_EQ(data[3], 4);
+ EXPECT_EQ(data[4], 5);
+ EXPECT_EQ(data[5], 6);
+ EXPECT_EQ(data[6], 42);
+ MainThread().Memset(data+1, 13, 5);
+ EXPECT_EQ(data[0], 42);
+ EXPECT_EQ(data[1], 13);
+ EXPECT_EQ(data[2], 13);
+ EXPECT_EQ(data[3], 13);
+ EXPECT_EQ(data[4], 13);
+ EXPECT_EQ(data[5], 13);
+ EXPECT_EQ(data[6], 42);
+}
+
+TEST(ThreadSanitizer, MemcpyRace1) {
+ char *data = new char[10];
+ char *data1 = new char[10];
+ char *data2 = new char[10];
+ ScopedThread t1, t2;
+ t1.Memcpy(data, data1, 10);
+ t2.Memcpy(data, data2, 10, true);
+}
+
+// The test fails with TSAN_SHADOW_COUNT=2,
+// because the old racy access is evicted.
+#if defined(TSAN_SHADOW_COUNT) && TSAN_SHADOW_COUNT >= 4
+TEST(ThreadSanitizer, MemcpyRace2) {
+ char *data = new char[10];
+ char *data1 = new char[10];
+ char *data2 = new char[10];
+ ScopedThread t1, t2;
+ t1.Memcpy(data+5, data1, 1);
+ t2.Memcpy(data+3, data2, 4, true);
+}
+#endif
+
+TEST(ThreadSanitizer, MemcpyRace3) {
+ char *data = new char[10];
+ char *data1 = new char[10];
+ char *data2 = new char[10];
+ ScopedThread t1, t2;
+ t1.Memcpy(data, data1, 10);
+ t2.Memcpy(data1, data2, 10, true);
+}
+
+TEST(ThreadSanitizer, MemcpyStack) {
+ char *data = new char[10];
+ char *data1 = new char[10];
+ ScopedThread t1, t2;
+ t1.Memcpy(data, data1, 10);
+ t2.Memcpy(data, data1, 10, true);
+}
+
+TEST(ThreadSanitizer, MemsetRace1) {
+ char *data = new char[10];
+ ScopedThread t1, t2;
+ t1.Memset(data, 1, 10);
+ t2.Memset(data, 2, 10, true);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tests/rtl/tsan_test.cc b/lib/tsan/tests/rtl/tsan_test.cc
new file mode 100644
index 000000000000..2184284d39ce
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_test.cc
@@ -0,0 +1,50 @@
+//===-- tsan_test.cc ------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+
+static void foo() {}
+static void bar() {}
+
+TEST(ThreadSanitizer, FuncCall) {
+ ScopedThread t1, t2;
+ MemLoc l;
+ t1.Write1(l);
+ t2.Call(foo);
+ t2.Call(bar);
+ t2.Write1(l, true);
+ t2.Return();
+ t2.Return();
+}
+
+// We use this function instead of main, as ISO C++ forbids taking the address
+// of main, which we need to pass inside __tsan_func_entry.
+int run_tests(int argc, char **argv) {
+ TestMutexBeforeInit(); // Mutexes must be usable before __tsan_init();
+ __tsan_init();
+ __tsan_func_entry(__builtin_return_address(0));
+ __tsan_func_entry((void*)((intptr_t)&run_tests + 1));
+
+ testing::GTEST_FLAG(death_test_style) = "threadsafe";
+ testing::InitGoogleTest(&argc, argv);
+ int res = RUN_ALL_TESTS();
+
+ __tsan_func_exit();
+ __tsan_func_exit();
+ return res;
+}
+
+int main(int argc, char **argv) {
+ return run_tests(argc, argv);
+}
diff --git a/lib/tsan/tests/rtl/tsan_test_util.h b/lib/tsan/tests/rtl/tsan_test_util.h
new file mode 100644
index 000000000000..483a564c8475
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_test_util.h
@@ -0,0 +1,122 @@
+//===-- tsan_test_util.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Test utils.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_TEST_UTIL_H
+#define TSAN_TEST_UTIL_H
+
+void TestMutexBeforeInit();
+
+// A location of memory on which a race may be detected.
+class MemLoc {
+ public:
+ explicit MemLoc(int offset_from_aligned = 0);
+ explicit MemLoc(void *const real_addr) : loc_(real_addr) { }
+ ~MemLoc();
+ void *loc() const { return loc_; }
+ private:
+ void *const loc_;
+ MemLoc(const MemLoc&);
+ void operator = (const MemLoc&);
+};
+
+class Mutex {
+ public:
+ enum Type { Normal, Spin, RW };
+
+ explicit Mutex(Type type = Normal);
+ ~Mutex();
+
+ void Init();
+ void StaticInit(); // Emulates static initalization (tsan invisible).
+ void Destroy();
+ void Lock();
+ bool TryLock();
+ void Unlock();
+ void ReadLock();
+ bool TryReadLock();
+ void ReadUnlock();
+
+ private:
+ // Placeholder for pthread_mutex_t, CRITICAL_SECTION or whatever.
+ void *mtx_[128];
+ bool alive_;
+ const Type type_;
+
+ Mutex(const Mutex&);
+ void operator = (const Mutex&);
+};
+
+// A thread is started in CTOR and joined in DTOR.
+class ScopedThread {
+ public:
+ explicit ScopedThread(bool detached = false, bool main = false);
+ ~ScopedThread();
+ void Detach();
+
+ void Access(void *addr, bool is_write, int size, bool expect_race);
+ void Read(const MemLoc &ml, int size, bool expect_race = false) {
+ Access(ml.loc(), false, size, expect_race);
+ }
+ void Write(const MemLoc &ml, int size, bool expect_race = false) {
+ Access(ml.loc(), true, size, expect_race);
+ }
+ void Read1(const MemLoc &ml, bool expect_race = false) {
+ Read(ml, 1, expect_race); }
+ void Read2(const MemLoc &ml, bool expect_race = false) {
+ Read(ml, 2, expect_race); }
+ void Read4(const MemLoc &ml, bool expect_race = false) {
+ Read(ml, 4, expect_race); }
+ void Read8(const MemLoc &ml, bool expect_race = false) {
+ Read(ml, 8, expect_race); }
+ void Write1(const MemLoc &ml, bool expect_race = false) {
+ Write(ml, 1, expect_race); }
+ void Write2(const MemLoc &ml, bool expect_race = false) {
+ Write(ml, 2, expect_race); }
+ void Write4(const MemLoc &ml, bool expect_race = false) {
+ Write(ml, 4, expect_race); }
+ void Write8(const MemLoc &ml, bool expect_race = false) {
+ Write(ml, 8, expect_race); }
+
+ void VptrUpdate(const MemLoc &vptr, const MemLoc &new_val,
+ bool expect_race = false);
+
+ void Call(void(*pc)());
+ void Return();
+
+ void Create(const Mutex &m);
+ void Destroy(const Mutex &m);
+ void Lock(const Mutex &m);
+ bool TryLock(const Mutex &m);
+ void Unlock(const Mutex &m);
+ void ReadLock(const Mutex &m);
+ bool TryReadLock(const Mutex &m);
+ void ReadUnlock(const Mutex &m);
+
+ void Memcpy(void *dst, const void *src, int size, bool expect_race = false);
+ void Memset(void *dst, int val, int size, bool expect_race = false);
+
+ private:
+ struct Impl;
+ Impl *impl_;
+ ScopedThread(const ScopedThread&); // Not implemented.
+ void operator = (const ScopedThread&); // Not implemented.
+};
+
+class MainThread : public ScopedThread {
+ public:
+ MainThread()
+ : ScopedThread(false, true) {
+ }
+};
+
+#endif // #ifndef TSAN_TEST_UTIL_H
diff --git a/lib/tsan/tests/rtl/tsan_test_util_linux.cc b/lib/tsan/tests/rtl/tsan_test_util_linux.cc
new file mode 100644
index 000000000000..dce8db90de70
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_test_util_linux.cc
@@ -0,0 +1,465 @@
+
+//===-- tsan_test_util_linux.cc -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Test utils, linux implementation.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "tsan_interface.h"
+#include "tsan_test_util.h"
+#include "tsan_report.h"
+
+#include "gtest/gtest.h"
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+using namespace __tsan; // NOLINT
+
+static __thread bool expect_report;
+static __thread bool expect_report_reported;
+static __thread ReportType expect_report_type;
+
+extern "C" void *__interceptor_memcpy(void*, const void*, uptr);
+extern "C" void *__interceptor_memset(void*, int, uptr);
+
+static void *BeforeInitThread(void *param) {
+ (void)param;
+ return 0;
+}
+
+static void AtExit() {
+}
+
+void TestMutexBeforeInit() {
+ // Mutexes must be usable before __tsan_init();
+ pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
+ pthread_mutex_lock(&mtx);
+ pthread_mutex_unlock(&mtx);
+ pthread_mutex_destroy(&mtx);
+ pthread_t thr;
+ pthread_create(&thr, 0, BeforeInitThread, 0);
+ pthread_join(thr, 0);
+ atexit(AtExit);
+}
+
+namespace __tsan {
+bool OnReport(const ReportDesc *rep, bool suppressed) {
+ if (expect_report) {
+ if (rep->typ != expect_report_type) {
+ printf("Expected report of type %d, got type %d\n",
+ (int)expect_report_type, (int)rep->typ);
+ EXPECT_FALSE("Wrong report type");
+ return false;
+ }
+ } else {
+ EXPECT_FALSE("Unexpected report");
+ return false;
+ }
+ expect_report_reported = true;
+ return true;
+}
+}
+
+static void* allocate_addr(int size, int offset_from_aligned = 0) {
+ static uintptr_t foo;
+ static atomic_uintptr_t uniq = {(uintptr_t)&foo}; // Some real address.
+ const int kAlign = 16;
+ CHECK(offset_from_aligned < kAlign);
+ size = (size + 2 * kAlign) & ~(kAlign - 1);
+ uintptr_t addr = atomic_fetch_add(&uniq, size, memory_order_relaxed);
+ return (void*)(addr + offset_from_aligned);
+}
+
+MemLoc::MemLoc(int offset_from_aligned)
+ : loc_(allocate_addr(16, offset_from_aligned)) {
+}
+
+MemLoc::~MemLoc() {
+}
+
+Mutex::Mutex(Type type)
+ : alive_()
+ , type_(type) {
+}
+
+Mutex::~Mutex() {
+ CHECK(!alive_);
+}
+
+void Mutex::Init() {
+ CHECK(!alive_);
+ alive_ = true;
+ if (type_ == Normal)
+ CHECK_EQ(pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
+ else if (type_ == Spin)
+ CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
+ else if (type_ == RW)
+ CHECK_EQ(pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
+ else
+ CHECK(0);
+}
+
+void Mutex::StaticInit() {
+ CHECK(!alive_);
+ CHECK(type_ == Normal);
+ alive_ = true;
+ pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
+ memcpy(mtx_, &tmp, sizeof(tmp));
+}
+
+void Mutex::Destroy() {
+ CHECK(alive_);
+ alive_ = false;
+ if (type_ == Normal)
+ CHECK_EQ(pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
+ else if (type_ == Spin)
+ CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
+ else if (type_ == RW)
+ CHECK_EQ(pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
+}
+
+void Mutex::Lock() {
+ CHECK(alive_);
+ if (type_ == Normal)
+ CHECK_EQ(pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
+ else if (type_ == Spin)
+ CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
+ else if (type_ == RW)
+ CHECK_EQ(pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
+}
+
+bool Mutex::TryLock() {
+ CHECK(alive_);
+ if (type_ == Normal)
+ return pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
+ else if (type_ == Spin)
+ return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
+ else if (type_ == RW)
+ return pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
+ return false;
+}
+
+void Mutex::Unlock() {
+ CHECK(alive_);
+ if (type_ == Normal)
+ CHECK_EQ(pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
+ else if (type_ == Spin)
+ CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
+ else if (type_ == RW)
+ CHECK_EQ(pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
+}
+
+void Mutex::ReadLock() {
+ CHECK(alive_);
+ CHECK(type_ == RW);
+ CHECK_EQ(pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
+}
+
+bool Mutex::TryReadLock() {
+ CHECK(alive_);
+ CHECK(type_ == RW);
+ return pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) == 0;
+}
+
+void Mutex::ReadUnlock() {
+ CHECK(alive_);
+ CHECK(type_ == RW);
+ CHECK_EQ(pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
+}
+
+struct Event {
+ enum Type {
+ SHUTDOWN,
+ READ,
+ WRITE,
+ VPTR_UPDATE,
+ CALL,
+ RETURN,
+ MUTEX_CREATE,
+ MUTEX_DESTROY,
+ MUTEX_LOCK,
+ MUTEX_TRYLOCK,
+ MUTEX_UNLOCK,
+ MUTEX_READLOCK,
+ MUTEX_TRYREADLOCK,
+ MUTEX_READUNLOCK,
+ MEMCPY,
+ MEMSET
+ };
+ Type type;
+ void *ptr;
+ uptr arg;
+ uptr arg2;
+ bool res;
+ bool expect_report;
+ ReportType report_type;
+
+ Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
+ : type(type)
+ , ptr(const_cast<void*>(ptr))
+ , arg(arg)
+ , arg2(arg2)
+ , res()
+ , expect_report()
+ , report_type() {
+ }
+
+ void ExpectReport(ReportType type) {
+ expect_report = true;
+ report_type = type;
+ }
+};
+
+struct ScopedThread::Impl {
+ pthread_t thread;
+ bool main;
+ bool detached;
+ atomic_uintptr_t event; // Event*
+
+ static void *ScopedThreadCallback(void *arg);
+ void send(Event *ev);
+ void HandleEvent(Event *ev);
+};
+
+void ScopedThread::Impl::HandleEvent(Event *ev) {
+ CHECK_EQ(expect_report, false);
+ expect_report = ev->expect_report;
+ expect_report_reported = false;
+ expect_report_type = ev->report_type;
+ switch (ev->type) {
+ case Event::READ:
+ case Event::WRITE: {
+ void (*tsan_mop)(void *addr) = 0;
+ if (ev->type == Event::READ) {
+ switch (ev->arg /*size*/) {
+ case 1: tsan_mop = __tsan_read1; break;
+ case 2: tsan_mop = __tsan_read2; break;
+ case 4: tsan_mop = __tsan_read4; break;
+ case 8: tsan_mop = __tsan_read8; break;
+ case 16: tsan_mop = __tsan_read16; break;
+ }
+ } else {
+ switch (ev->arg /*size*/) {
+ case 1: tsan_mop = __tsan_write1; break;
+ case 2: tsan_mop = __tsan_write2; break;
+ case 4: tsan_mop = __tsan_write4; break;
+ case 8: tsan_mop = __tsan_write8; break;
+ case 16: tsan_mop = __tsan_write16; break;
+ }
+ }
+ CHECK_NE(tsan_mop, 0);
+ errno = ECHRNG;
+ tsan_mop(ev->ptr);
+ CHECK_EQ(errno, ECHRNG); // In no case must errno be changed.
+ break;
+ }
+ case Event::VPTR_UPDATE:
+ __tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
+ break;
+ case Event::CALL:
+ __tsan_func_entry((void*)((uptr)ev->ptr));
+ break;
+ case Event::RETURN:
+ __tsan_func_exit();
+ break;
+ case Event::MUTEX_CREATE:
+ static_cast<Mutex*>(ev->ptr)->Init();
+ break;
+ case Event::MUTEX_DESTROY:
+ static_cast<Mutex*>(ev->ptr)->Destroy();
+ break;
+ case Event::MUTEX_LOCK:
+ static_cast<Mutex*>(ev->ptr)->Lock();
+ break;
+ case Event::MUTEX_TRYLOCK:
+ ev->res = static_cast<Mutex*>(ev->ptr)->TryLock();
+ break;
+ case Event::MUTEX_UNLOCK:
+ static_cast<Mutex*>(ev->ptr)->Unlock();
+ break;
+ case Event::MUTEX_READLOCK:
+ static_cast<Mutex*>(ev->ptr)->ReadLock();
+ break;
+ case Event::MUTEX_TRYREADLOCK:
+ ev->res = static_cast<Mutex*>(ev->ptr)->TryReadLock();
+ break;
+ case Event::MUTEX_READUNLOCK:
+ static_cast<Mutex*>(ev->ptr)->ReadUnlock();
+ break;
+ case Event::MEMCPY:
+ __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
+ break;
+ case Event::MEMSET:
+ __interceptor_memset(ev->ptr, ev->arg, ev->arg2);
+ break;
+ default: CHECK(0);
+ }
+ if (expect_report && !expect_report_reported) {
+ printf("Missed expected report of type %d\n", (int)ev->report_type);
+ EXPECT_FALSE("Missed expected race");
+ }
+ expect_report = false;
+}
+
+void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
+ __tsan_func_entry(__builtin_return_address(0));
+ Impl *impl = (Impl*)arg;
+ for (;;) {
+ Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire);
+ if (ev == 0) {
+ pthread_yield();
+ continue;
+ }
+ if (ev->type == Event::SHUTDOWN) {
+ atomic_store(&impl->event, 0, memory_order_release);
+ break;
+ }
+ impl->HandleEvent(ev);
+ atomic_store(&impl->event, 0, memory_order_release);
+ }
+ __tsan_func_exit();
+ return 0;
+}
+
+void ScopedThread::Impl::send(Event *e) {
+ if (main) {
+ HandleEvent(e);
+ } else {
+ CHECK_EQ(atomic_load(&event, memory_order_relaxed), 0);
+ atomic_store(&event, (uintptr_t)e, memory_order_release);
+ while (atomic_load(&event, memory_order_acquire) != 0)
+ pthread_yield();
+ }
+}
+
+ScopedThread::ScopedThread(bool detached, bool main) {
+ impl_ = new Impl;
+ impl_->main = main;
+ impl_->detached = detached;
+ atomic_store(&impl_->event, 0, memory_order_relaxed);
+ if (!main) {
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, detached);
+ pthread_attr_setstacksize(&attr, 64*1024);
+ pthread_create(&impl_->thread, &attr,
+ ScopedThread::Impl::ScopedThreadCallback, impl_);
+ }
+}
+
+ScopedThread::~ScopedThread() {
+ if (!impl_->main) {
+ Event event(Event::SHUTDOWN);
+ impl_->send(&event);
+ if (!impl_->detached)
+ pthread_join(impl_->thread, 0);
+ }
+ delete impl_;
+}
+
+void ScopedThread::Detach() {
+ CHECK(!impl_->main);
+ CHECK(!impl_->detached);
+ impl_->detached = true;
+ pthread_detach(impl_->thread);
+}
+
+void ScopedThread::Access(void *addr, bool is_write,
+ int size, bool expect_race) {
+ Event event(is_write ? Event::WRITE : Event::READ, addr, size);
+ if (expect_race)
+ event.ExpectReport(ReportTypeRace);
+ impl_->send(&event);
+}
+
+void ScopedThread::VptrUpdate(const MemLoc &vptr,
+ const MemLoc &new_val,
+ bool expect_race) {
+ Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
+ if (expect_race)
+ event.ExpectReport(ReportTypeRace);
+ impl_->send(&event);
+}
+
+void ScopedThread::Call(void(*pc)()) {
+ Event event(Event::CALL, (void*)((uintptr_t)pc));
+ impl_->send(&event);
+}
+
+void ScopedThread::Return() {
+ Event event(Event::RETURN);
+ impl_->send(&event);
+}
+
+void ScopedThread::Create(const Mutex &m) {
+ Event event(Event::MUTEX_CREATE, &m);
+ impl_->send(&event);
+}
+
+void ScopedThread::Destroy(const Mutex &m) {
+ Event event(Event::MUTEX_DESTROY, &m);
+ impl_->send(&event);
+}
+
+void ScopedThread::Lock(const Mutex &m) {
+ Event event(Event::MUTEX_LOCK, &m);
+ impl_->send(&event);
+}
+
+bool ScopedThread::TryLock(const Mutex &m) {
+ Event event(Event::MUTEX_TRYLOCK, &m);
+ impl_->send(&event);
+ return event.res;
+}
+
+void ScopedThread::Unlock(const Mutex &m) {
+ Event event(Event::MUTEX_UNLOCK, &m);
+ impl_->send(&event);
+}
+
+void ScopedThread::ReadLock(const Mutex &m) {
+ Event event(Event::MUTEX_READLOCK, &m);
+ impl_->send(&event);
+}
+
+bool ScopedThread::TryReadLock(const Mutex &m) {
+ Event event(Event::MUTEX_TRYREADLOCK, &m);
+ impl_->send(&event);
+ return event.res;
+}
+
+void ScopedThread::ReadUnlock(const Mutex &m) {
+ Event event(Event::MUTEX_READUNLOCK, &m);
+ impl_->send(&event);
+}
+
+void ScopedThread::Memcpy(void *dst, const void *src, int size,
+ bool expect_race) {
+ Event event(Event::MEMCPY, dst, (uptr)src, size);
+ if (expect_race)
+ event.ExpectReport(ReportTypeRace);
+ impl_->send(&event);
+}
+
+void ScopedThread::Memset(void *dst, int val, int size,
+ bool expect_race) {
+ Event event(Event::MEMSET, dst, val, size);
+ if (expect_race)
+ event.ExpectReport(ReportTypeRace);
+ impl_->send(&event);
+}
diff --git a/lib/tsan/tests/rtl/tsan_thread.cc b/lib/tsan/tests/rtl/tsan_thread.cc
new file mode 100644
index 000000000000..5646415a79b8
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_thread.cc
@@ -0,0 +1,59 @@
+//===-- tsan_thread.cc ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_test_util.h"
+#include "gtest/gtest.h"
+
+TEST(ThreadSanitizer, ThreadSync) {
+ MainThread t0;
+ MemLoc l;
+ t0.Write1(l);
+ {
+ ScopedThread t1;
+ t1.Write1(l);
+ }
+ t0.Write1(l);
+}
+
+TEST(ThreadSanitizer, ThreadDetach1) {
+ ScopedThread t1(true);
+ MemLoc l;
+ t1.Write1(l);
+}
+
+TEST(ThreadSanitizer, ThreadDetach2) {
+ ScopedThread t1;
+ MemLoc l;
+ t1.Write1(l);
+ t1.Detach();
+}
+
+static void *thread_alot_func(void *arg) {
+ (void)arg;
+ int usleep(unsigned);
+ usleep(50);
+ return 0;
+}
+
+TEST(DISABLED_SLOW_ThreadSanitizer, ThreadALot) {
+ const int kThreads = 70000;
+ const int kAlive = 1000;
+ pthread_t threads[kAlive] = {};
+ for (int i = 0; i < kThreads; i++) {
+ if (threads[i % kAlive])
+ pthread_join(threads[i % kAlive], 0);
+ pthread_create(&threads[i % kAlive], 0, thread_alot_func, 0);
+ }
+ for (int i = 0; i < kAlive; i++) {
+ pthread_join(threads[i], 0);
+ }
+}