aboutsummaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_sync.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tsan/rtl/tsan_sync.cc')
-rw-r--r--lib/tsan/rtl/tsan_sync.cc114
1 files changed, 103 insertions, 11 deletions
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index abb5a2ad298f..b25346ef344f 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -17,14 +17,17 @@
namespace __tsan {
-SyncVar::SyncVar(uptr addr)
+SyncVar::SyncVar(uptr addr, u64 uid)
: mtx(MutexTypeSyncVar, StatMtxSyncVar)
, addr(addr)
+ , uid(uid)
, owner_tid(kInvalidTid)
+ , last_lock()
, recursion()
, is_rw()
, is_recursive()
- , is_broken() {
+ , is_broken()
+ , is_linker_init() {
}
SyncTab::Part::Part()
@@ -45,8 +48,61 @@ SyncTab::~SyncTab() {
}
}
+SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ return GetAndLock(thr, pc, addr, write_lock, true);
+}
+
+SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
+ return GetAndLock(0, 0, addr, write_lock, false);
+}
+
+SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
+ StatInc(thr, StatSyncCreated);
+ void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
+ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ SyncVar *res = new(mem) SyncVar(addr, uid);
+#ifndef TSAN_GO
+ res->creation_stack.ObtainCurrent(thr, pc);
+#endif
+ return res;
+}
+
SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock) {
+ uptr addr, bool write_lock, bool create) {
+#ifndef TSAN_GO
+ { // NOLINT
+ SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
+ if (res)
+ return res;
+ }
+
+ // Here we ask only PrimaryAllocator, because
+ // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
+ // the hashmap anyway.
+ if (PrimaryAllocator::PointerIsMine((void*)addr)) {
+ MBlock *b = user_mblock(thr, (void*)addr);
+ Lock l(&b->mtx);
+ SyncVar *res = 0;
+ for (res = b->head; res; res = res->next) {
+ if (res->addr == addr)
+ break;
+ }
+ if (res == 0) {
+ if (!create)
+ return 0;
+ res = Create(thr, pc, addr);
+ res->next = b->head;
+ b->head = res;
+ }
+ if (write_lock)
+ res->mtx.Lock();
+ else
+ res->mtx.ReadLock();
+ return res;
+ }
+#endif
+
Part *p = &tab_[PartIdx(addr)];
{
ReadLock l(&p->mtx);
@@ -60,6 +116,8 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
}
}
}
+ if (!create)
+ return 0;
{
Lock l(&p->mtx);
SyncVar *res = p->val;
@@ -68,12 +126,7 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
break;
}
if (res == 0) {
- StatInc(thr, StatSyncCreated);
- void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
- res = new(mem) SyncVar(addr);
-#ifndef TSAN_GO
- res->creation_stack.ObtainCurrent(thr, pc);
-#endif
+ res = Create(thr, pc, addr);
res->next = p->val;
p->val = res;
}
@@ -86,6 +139,39 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
}
SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
+#ifndef TSAN_GO
+ { // NOLINT
+ SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
+ if (res)
+ return res;
+ }
+ if (PrimaryAllocator::PointerIsMine((void*)addr)) {
+ MBlock *b = user_mblock(thr, (void*)addr);
+ SyncVar *res = 0;
+ {
+ Lock l(&b->mtx);
+ SyncVar **prev = &b->head;
+ res = *prev;
+ while (res) {
+ if (res->addr == addr) {
+ if (res->is_linker_init)
+ return 0;
+ *prev = res->next;
+ break;
+ }
+ prev = &res->next;
+ res = *prev;
+ }
+ }
+ if (res) {
+ StatInc(thr, StatSyncDestroyed);
+ res->mtx.Lock();
+ res->mtx.Unlock();
+ }
+ return res;
+ }
+#endif
+
Part *p = &tab_[PartIdx(addr)];
SyncVar *res = 0;
{
@@ -94,6 +180,8 @@ SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
res = *prev;
while (res) {
if (res->addr == addr) {
+ if (res->is_linker_init)
+ return 0;
*prev = res->next;
break;
}
@@ -179,15 +267,19 @@ void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
n_ = thr->shadow_stack_pos - thr->shadow_stack;
if (n_ + !!toppc == 0)
return;
+ uptr start = 0;
if (c_) {
CHECK_NE(s_, 0);
- CHECK_LE(n_ + !!toppc, c_);
+ if (n_ + !!toppc > c_) {
+ start = n_ - c_ + !!toppc;
+ n_ = c_ - !!toppc;
+ }
} else {
s_ = (uptr*)internal_alloc(MBlockStackTrace,
(n_ + !!toppc) * sizeof(s_[0]));
}
for (uptr i = 0; i < n_; i++)
- s_[i] = thr->shadow_stack[i];
+ s_[i] = thr->shadow_stack[start + i];
if (toppc) {
s_[n_] = toppc;
n_++;