aboutsummaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_interface_atomic.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tsan/rtl/tsan_interface_atomic.cc')
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc73
1 files changed, 39 insertions, 34 deletions
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index 80266969849a..d9f8cdf5b106 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -30,23 +30,37 @@ using namespace __tsan; // NOLINT
#define SCOPED_ATOMIC(func, ...) \
const uptr callpc = (uptr)__builtin_return_address(0); \
uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
- pc = __sanitizer::StackTrace::GetPreviousInstructionPc(pc); \
mo = ConvertOrder(mo); \
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
ThreadState *const thr = cur_thread(); \
AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
- ScopedAtomic sa(thr, callpc, __FUNCTION__); \
+ ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
return Atomic##func(thr, pc, __VA_ARGS__); \
/**/
+// Some shortcuts.
+typedef __tsan_memory_order morder;
+typedef __tsan_atomic8 a8;
+typedef __tsan_atomic16 a16;
+typedef __tsan_atomic32 a32;
+typedef __tsan_atomic64 a64;
+typedef __tsan_atomic128 a128;
+const morder mo_relaxed = __tsan_memory_order_relaxed;
+const morder mo_consume = __tsan_memory_order_consume;
+const morder mo_acquire = __tsan_memory_order_acquire;
+const morder mo_release = __tsan_memory_order_release;
+const morder mo_acq_rel = __tsan_memory_order_acq_rel;
+const morder mo_seq_cst = __tsan_memory_order_seq_cst;
+
class ScopedAtomic {
public:
- ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
+ ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
+ morder mo, const char *func)
: thr_(thr) {
CHECK_EQ(thr_->in_rtl, 0);
ProcessPendingSignals(thr);
FuncEntry(thr_, pc);
- DPrintf("#%d: %s\n", thr_->tid, func);
+ DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
thr_->in_rtl++;
}
~ScopedAtomic() {
@@ -58,20 +72,6 @@ class ScopedAtomic {
ThreadState *thr_;
};
-// Some shortcuts.
-typedef __tsan_memory_order morder;
-typedef __tsan_atomic8 a8;
-typedef __tsan_atomic16 a16;
-typedef __tsan_atomic32 a32;
-typedef __tsan_atomic64 a64;
-typedef __tsan_atomic128 a128;
-const morder mo_relaxed = __tsan_memory_order_relaxed;
-const morder mo_consume = __tsan_memory_order_consume;
-const morder mo_acquire = __tsan_memory_order_acquire;
-const morder mo_release = __tsan_memory_order_release;
-const morder mo_acq_rel = __tsan_memory_order_acq_rel;
-const morder mo_seq_cst = __tsan_memory_order_seq_cst;
-
static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
StatInc(thr, StatAtomic);
StatInc(thr, t);
@@ -251,11 +251,10 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
// Assume the access is atomic.
if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
- return *a;
+ return *a; // as if atomic
}
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- thr->clock.acquire(&s->clock);
+ AcquireImpl(thr, pc, &s->clock);
T v = *a;
s->mtx.ReadUnlock();
__sync_synchronize();
@@ -273,13 +272,15 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
// Strictly saying even relaxed store cuts off release sequence,
// so must reset the clock.
if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
- *a = v;
+ *a = v; // as if atomic
return;
}
__sync_synchronize();
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- thr->clock.ReleaseStore(&s->clock);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, pc, &s->clock);
*a = v;
s->mtx.Unlock();
// Trainling memory barrier to provide sequential consistency
@@ -293,13 +294,15 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
SyncVar *s = 0;
if (mo != mo_relaxed) {
s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
if (IsAcqRelOrder(mo))
- thr->clock.acq_rel(&s->clock);
+ AcquireReleaseImpl(thr, pc, &s->clock);
else if (IsReleaseOrder(mo))
- thr->clock.release(&s->clock);
+ ReleaseImpl(thr, pc, &s->clock);
else if (IsAcquireOrder(mo))
- thr->clock.acquire(&s->clock);
+ AcquireImpl(thr, pc, &s->clock);
}
v = F(a, v);
if (s)
@@ -357,13 +360,15 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
SyncVar *s = 0;
if (mo != mo_relaxed) {
s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
if (IsAcqRelOrder(mo))
- thr->clock.acq_rel(&s->clock);
+ AcquireReleaseImpl(thr, pc, &s->clock);
else if (IsReleaseOrder(mo))
- thr->clock.release(&s->clock);
+ ReleaseImpl(thr, pc, &s->clock);
else if (IsAcquireOrder(mo))
- thr->clock.acquire(&s->clock);
+ AcquireImpl(thr, pc, &s->clock);
}
T cc = *c;
T pr = func_cas(a, cc, v);
@@ -659,14 +664,14 @@ a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
}
#if __TSAN_HAS_INT128
-a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
void __tsan_atomic_thread_fence(morder mo) {
- char* a;
+ char* a = 0;
SCOPED_ATOMIC(Fence, mo);
}