aboutsummaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_clock.cc
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-07-13 19:25:48 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-07-13 19:25:48 +0000
commit1992b790c2c12b7850bdf86662b67302052ec2fe (patch)
tree623c69b5fbf527bba17ecb9431ae5189871cecd4 /lib/tsan/rtl/tsan_clock.cc
parent50aa32eff79f252ab05a0c0a589cf2ca37cd9923 (diff)
downloadsrc-1992b790c2c12b7850bdf86662b67302052ec2fe.tar.gz
src-1992b790c2c12b7850bdf86662b67302052ec2fe.zip
Vendor import of compiler-rt trunk r307894:vendor/compiler-rt/compiler-rt-trunk-r307894
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=320961 svn path=/vendor/compiler-rt/compiler-rt-trunk-r307894/; revision=320962; tag=vendor/compiler-rt/compiler-rt-trunk-r307894
Diffstat (limited to 'lib/tsan/rtl/tsan_clock.cc')
-rw-r--r--lib/tsan/rtl/tsan_clock.cc119
1 files changed, 59 insertions, 60 deletions
diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc
index 32435adfdf33..9ee9104283f8 100644
--- a/lib/tsan/rtl/tsan_clock.cc
+++ b/lib/tsan/rtl/tsan_clock.cc
@@ -101,6 +101,9 @@ ThreadClock::ThreadClock(unsigned tid, unsigned reused)
clk_[tid_].reused = reused_;
}
+void ThreadClock::ResetCached(ClockCache *c) {
+}
+
void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(src->size_, kMaxTid);
@@ -116,9 +119,7 @@ void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
// Check if we've already acquired src after the last release operation on src
bool acquired = false;
if (nclk > tid_) {
- CPP_STAT_INC(StatClockAcquireLarge);
if (src->elem(tid_).reused == reused_) {
- CPP_STAT_INC(StatClockAcquireRepeat);
for (unsigned i = 0; i < kDirtyTids; i++) {
unsigned tid = src->dirty_tids_[i];
if (tid != kInvalidTid) {
@@ -266,11 +267,11 @@ void ThreadClock::UpdateCurrentThread(SyncClock *dst) const {
for (unsigned i = 0; i < kDirtyTids; i++) {
if (dst->dirty_tids_[i] == tid_) {
- CPP_STAT_INC(StatClockReleaseFast1);
+ CPP_STAT_INC(StatClockReleaseFast);
return;
}
if (dst->dirty_tids_[i] == kInvalidTid) {
- CPP_STAT_INC(StatClockReleaseFast2);
+ CPP_STAT_INC(StatClockReleaseFast);
dst->dirty_tids_[i] = tid_;
return;
}
@@ -297,56 +298,9 @@ bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
return true;
}
-void SyncClock::Resize(ClockCache *c, uptr nclk) {
- CPP_STAT_INC(StatClockReleaseResize);
- if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
- RoundUpTo(size_, ClockBlock::kClockCount)) {
- // Growing within the same block.
- // Memory is already allocated, just increase the size.
- size_ = nclk;
- return;
- }
- if (nclk <= ClockBlock::kClockCount) {
- // Grow from 0 to one-level table.
- CHECK_EQ(size_, 0);
- CHECK_EQ(tab_, 0);
- CHECK_EQ(tab_idx_, 0);
- size_ = nclk;
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- return;
- }
- // Growing two-level table.
- if (size_ == 0) {
- // Allocate first level table.
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- } else if (size_ <= ClockBlock::kClockCount) {
- // Transform one-level table to two-level table.
- u32 old = tab_idx_;
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- tab_->table[0] = old;
- }
- // At this point we have first level table allocated.
- // Add second level tables as necessary.
- for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
- i < nclk; i += ClockBlock::kClockCount) {
- u32 idx = ctx->clock_alloc.Alloc(c);
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- internal_memset(cb, 0, sizeof(*cb));
- CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
- tab_->table[i/ClockBlock::kClockCount] = idx;
- }
- size_ = nclk;
-}
-
// Sets a single element in the vector clock.
// This function is called only from weird places like AcquireGlobal.
-void ThreadClock::set(unsigned tid, u64 v) {
+void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
DCHECK_LT(tid, kMaxTid);
DCHECK_GE(v, clk_[tid].epoch);
clk_[tid].epoch = v;
@@ -366,14 +320,8 @@ void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
tid_, reused_, last_acquire_);
}
-SyncClock::SyncClock()
- : release_store_tid_(kInvalidTid)
- , release_store_reused_()
- , tab_()
- , tab_idx_()
- , size_() {
- for (uptr i = 0; i < kDirtyTids; i++)
- dirty_tids_[i] = kInvalidTid;
+SyncClock::SyncClock() {
+ ResetImpl();
}
SyncClock::~SyncClock() {
@@ -395,6 +343,10 @@ void SyncClock::Reset(ClockCache *c) {
ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
ctx->clock_alloc.Free(c, tab_idx_);
}
+ ResetImpl();
+}
+
+void SyncClock::ResetImpl() {
tab_ = 0;
tab_idx_ = 0;
size_ = 0;
@@ -404,6 +356,53 @@ void SyncClock::Reset(ClockCache *c) {
dirty_tids_[i] = kInvalidTid;
}
+void SyncClock::Resize(ClockCache *c, uptr nclk) {
+ CPP_STAT_INC(StatClockReleaseResize);
+ if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
+ RoundUpTo(size_, ClockBlock::kClockCount)) {
+ // Growing within the same block.
+ // Memory is already allocated, just increase the size.
+ size_ = nclk;
+ return;
+ }
+ if (nclk <= ClockBlock::kClockCount) {
+ // Grow from 0 to one-level table.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+ size_ = nclk;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ return;
+ }
+ // Growing two-level table.
+ if (size_ == 0) {
+ // Allocate first level table.
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // Transform one-level table to two-level table.
+ u32 old = tab_idx_;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ tab_->table[0] = old;
+ }
+ // At this point we have first level table allocated.
+ // Add second level tables as necessary.
+ for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
+ i < nclk; i += ClockBlock::kClockCount) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ internal_memset(cb, 0, sizeof(*cb));
+ CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
+ tab_->table[i/ClockBlock::kClockCount] = idx;
+ }
+ size_ = nclk;
+}
+
ClockElem &SyncClock::elem(unsigned tid) const {
DCHECK_LT(tid, size_);
if (size_ <= ClockBlock::kClockCount)