aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h')
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h85
1 files changed, 59 insertions, 26 deletions
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
index e596bde36662..82f6c70a03f8 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
@@ -47,8 +47,8 @@
#include "hwasan_allocator.h"
#include "hwasan_flags.h"
#include "hwasan_thread.h"
-
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
namespace __hwasan {
@@ -71,7 +71,7 @@ struct ThreadStats {
uptr total_stack_size;
};
-class HwasanThreadList {
+class SANITIZER_MUTEX HwasanThreadList {
public:
HwasanThreadList(uptr storage, uptr size)
: free_space_(storage), free_space_end_(storage + size) {
@@ -85,21 +85,27 @@ class HwasanThreadList {
RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
}
- Thread *CreateCurrentThread() {
- Thread *t;
+ Thread *CreateCurrentThread(const Thread::InitState *state = nullptr)
+ SANITIZER_EXCLUDES(free_list_mutex_, live_list_mutex_) {
+ Thread *t = nullptr;
{
- SpinMutexLock l(&list_mutex_);
+ SpinMutexLock l(&free_list_mutex_);
if (!free_list_.empty()) {
t = free_list_.back();
free_list_.pop_back();
- uptr start = (uptr)t - ring_buffer_size_;
- internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
- } else {
- t = AllocThread();
}
+ }
+ if (t) {
+ uptr start = (uptr)t - ring_buffer_size_;
+ internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
+ } else {
+ t = AllocThread();
+ }
+ {
+ SpinMutexLock l(&live_list_mutex_);
live_list_.push_back(t);
}
- t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
+ t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_, state);
AddThreadStats(t);
return t;
}
@@ -109,7 +115,9 @@ class HwasanThreadList {
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
}
- void RemoveThreadFromLiveList(Thread *t) {
+ void RemoveThreadFromLiveList(Thread *t)
+ SANITIZER_EXCLUDES(live_list_mutex_) {
+ SpinMutexLock l(&live_list_mutex_);
for (Thread *&t2 : live_list_)
if (t2 == t) {
// To remove t2, copy the last element of the list in t2's position, and
@@ -121,13 +129,13 @@ class HwasanThreadList {
CHECK(0 && "thread not found in live list");
}
- void ReleaseThread(Thread *t) {
+ void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
RemoveThreadStats(t);
- t->Destroy();
- SpinMutexLock l(&list_mutex_);
RemoveThreadFromLiveList(t);
- free_list_.push_back(t);
+ t->Destroy();
DontNeedThread(t);
+ SpinMutexLock l(&free_list_mutex_);
+ free_list_.push_back(t);
}
Thread *GetThreadByBufferAddress(uptr p) {
@@ -143,52 +151,77 @@ class HwasanThreadList {
}
template <class CB>
- void VisitAllLiveThreads(CB cb) {
- SpinMutexLock l(&list_mutex_);
+ void VisitAllLiveThreads(CB cb) SANITIZER_EXCLUDES(live_list_mutex_) {
+ SpinMutexLock l(&live_list_mutex_);
for (Thread *t : live_list_) cb(t);
}
- void AddThreadStats(Thread *t) {
+ template <class CB>
+ Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(live_list_mutex_) {
+ CheckLocked();
+ for (Thread *t : live_list_)
+ if (cb(t))
+ return t;
+ return nullptr;
+ }
+
+ void AddThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
stats_.n_live_threads++;
stats_.total_stack_size += t->stack_size();
}
- void RemoveThreadStats(Thread *t) {
+ void RemoveThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
stats_.n_live_threads--;
stats_.total_stack_size -= t->stack_size();
}
- ThreadStats GetThreadStats() {
+ ThreadStats GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
return stats_;
}
+ uptr GetRingBufferSize() const { return ring_buffer_size_; }
+
+ void Lock() SANITIZER_ACQUIRE(live_list_mutex_) { live_list_mutex_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_) {
+ live_list_mutex_.CheckLocked();
+ }
+ void Unlock() SANITIZER_RELEASE(live_list_mutex_) {
+ live_list_mutex_.Unlock();
+ }
+
private:
Thread *AllocThread() {
+ SpinMutexLock l(&free_space_mutex_);
uptr align = ring_buffer_size_ * 2;
CHECK(IsAligned(free_space_, align));
Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
free_space_ += thread_alloc_size_;
- CHECK(free_space_ <= free_space_end_ && "out of thread memory");
+ CHECK_LE(free_space_, free_space_end_);
return t;
}
+ SpinMutex free_space_mutex_;
uptr free_space_;
uptr free_space_end_;
uptr ring_buffer_size_;
uptr thread_alloc_size_;
- InternalMmapVector<Thread *> free_list_;
- InternalMmapVector<Thread *> live_list_;
- SpinMutex list_mutex_;
+ SpinMutex free_list_mutex_;
+ InternalMmapVector<Thread *> free_list_
+ SANITIZER_GUARDED_BY(free_list_mutex_);
+ SpinMutex live_list_mutex_;
+ InternalMmapVector<Thread *> live_list_
+ SANITIZER_GUARDED_BY(live_list_mutex_);
- ThreadStats stats_;
SpinMutex stats_mutex_;
+ ThreadStats stats_ SANITIZER_GUARDED_BY(stats_mutex_);
};
void InitThreadList(uptr storage, uptr size);
HwasanThreadList &hwasanThreadList();
+ThreadArgRetval &hwasanThreadArgRetval();
-} // namespace
+} // namespace __hwasan