aboutsummaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_sync.cc
diff options
context:
space:
mode:
authorAndrew Turner <andrew@FreeBSD.org>2012-07-30 10:58:13 +0000
committerAndrew Turner <andrew@FreeBSD.org>2012-07-30 10:58:13 +0000
commit37dfff057418e02f8e5322da12684dd927e3d881 (patch)
tree40cc44a3d02ed86de24f2117a55680e4f0eb01a0 /lib/tsan/rtl/tsan_sync.cc
parent864a7b98b54e1f984c248f3be83dfcc082a382ea (diff)
downloadsrc-37dfff057418e02f8e5322da12684dd927e3d881.tar.gz
src-37dfff057418e02f8e5322da12684dd927e3d881.zip
Import compiler-rt r160957.vendor/compiler-rt/compiler-rt-r160957
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=238901 svn path=/vendor/compiler-rt/compiler-rt-r160957/; revision=238902; tag=vendor/compiler-rt/compiler-rt-r160957
Diffstat (limited to 'lib/tsan/rtl/tsan_sync.cc')
-rw-r--r--lib/tsan/rtl/tsan_sync.cc219
1 files changed, 219 insertions, 0 deletions
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
new file mode 100644
index 000000000000..abb5a2ad298f
--- /dev/null
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -0,0 +1,219 @@
+//===-- tsan_sync.cc ------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_sync.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+SyncVar::SyncVar(uptr addr)
+ : mtx(MutexTypeSyncVar, StatMtxSyncVar)
+ , addr(addr)
+ , owner_tid(kInvalidTid)
+ , recursion()
+ , is_rw()
+ , is_recursive()
+ , is_broken() {
+}
+
+SyncTab::Part::Part()
+ : mtx(MutexTypeSyncTab, StatMtxSyncTab)
+ , val() {
+}
+
+SyncTab::SyncTab() {
+}
+
+SyncTab::~SyncTab() {
+ for (int i = 0; i < kPartCount; i++) {
+ while (tab_[i].val) {
+ SyncVar *tmp = tab_[i].val;
+ tab_[i].val = tmp->next;
+ DestroyAndFree(tmp);
+ }
+ }
+}
+
+SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ Part *p = &tab_[PartIdx(addr)];
+ {
+ ReadLock l(&p->mtx);
+ for (SyncVar *res = p->val; res; res = res->next) {
+ if (res->addr == addr) {
+ if (write_lock)
+ res->mtx.Lock();
+ else
+ res->mtx.ReadLock();
+ return res;
+ }
+ }
+ }
+ {
+ Lock l(&p->mtx);
+ SyncVar *res = p->val;
+ for (; res; res = res->next) {
+ if (res->addr == addr)
+ break;
+ }
+ if (res == 0) {
+ StatInc(thr, StatSyncCreated);
+ void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
+ res = new(mem) SyncVar(addr);
+#ifndef TSAN_GO
+ res->creation_stack.ObtainCurrent(thr, pc);
+#endif
+ res->next = p->val;
+ p->val = res;
+ }
+ if (write_lock)
+ res->mtx.Lock();
+ else
+ res->mtx.ReadLock();
+ return res;
+ }
+}
+
+SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
+ Part *p = &tab_[PartIdx(addr)];
+ SyncVar *res = 0;
+ {
+ Lock l(&p->mtx);
+ SyncVar **prev = &p->val;
+ res = *prev;
+ while (res) {
+ if (res->addr == addr) {
+ *prev = res->next;
+ break;
+ }
+ prev = &res->next;
+ res = *prev;
+ }
+ }
+ if (res) {
+ StatInc(thr, StatSyncDestroyed);
+ res->mtx.Lock();
+ res->mtx.Unlock();
+ }
+ return res;
+}
+
+uptr SyncVar::GetMemoryConsumption() {
+ return sizeof(*this)
+ + clock.size() * sizeof(u64)
+ + read_clock.size() * sizeof(u64)
+ + creation_stack.Size() * sizeof(uptr);
+}
+
+uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
+ uptr mem = 0;
+ for (int i = 0; i < kPartCount; i++) {
+ Part *p = &tab_[i];
+ Lock l(&p->mtx);
+ for (SyncVar *s = p->val; s; s = s->next) {
+ *nsync += 1;
+ mem += s->GetMemoryConsumption();
+ }
+ }
+ return mem;
+}
+
+int SyncTab::PartIdx(uptr addr) {
+ return (addr >> 3) % kPartCount;
+}
+
+StackTrace::StackTrace()
+ : n_()
+ , s_()
+ , c_() {
+}
+
+StackTrace::StackTrace(uptr *buf, uptr cnt)
+ : n_()
+ , s_(buf)
+ , c_(cnt) {
+ CHECK_NE(buf, 0);
+ CHECK_NE(cnt, 0);
+}
+
+StackTrace::~StackTrace() {
+ Reset();
+}
+
+void StackTrace::Reset() {
+ if (s_ && !c_) {
+ CHECK_NE(n_, 0);
+ internal_free(s_);
+ s_ = 0;
+ }
+ n_ = 0;
+}
+
+void StackTrace::Init(const uptr *pcs, uptr cnt) {
+ Reset();
+ if (cnt == 0)
+ return;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ CHECK_LE(cnt, c_);
+ } else {
+ s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+ }
+ n_ = cnt;
+ internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
+}
+
+void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
+ Reset();
+ n_ = thr->shadow_stack_pos - thr->shadow_stack;
+ if (n_ + !!toppc == 0)
+ return;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ CHECK_LE(n_ + !!toppc, c_);
+ } else {
+ s_ = (uptr*)internal_alloc(MBlockStackTrace,
+ (n_ + !!toppc) * sizeof(s_[0]));
+ }
+ for (uptr i = 0; i < n_; i++)
+ s_[i] = thr->shadow_stack[i];
+ if (toppc) {
+ s_[n_] = toppc;
+ n_++;
+ }
+}
+
+void StackTrace::CopyFrom(const StackTrace& other) {
+ Reset();
+ Init(other.Begin(), other.Size());
+}
+
+bool StackTrace::IsEmpty() const {
+ return n_ == 0;
+}
+
+uptr StackTrace::Size() const {
+ return n_;
+}
+
+uptr StackTrace::Get(uptr i) const {
+ CHECK_LT(i, n_);
+ return s_[i];
+}
+
+const uptr *StackTrace::Begin() const {
+ return s_;
+}
+
+} // namespace __tsan