aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/compiler-rt/lib/lsan
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/compiler-rt/lib/lsan')
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp116
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan.h57
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp403
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h140
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp1107
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h350
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp168
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp147
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp239
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_flags.inc46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp131
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.h35
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp592
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp190
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_malloc_mac.cpp59
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp128
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.h49
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_preinit.cpp21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp116
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h66
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/weak_symbols.txt3
22 files changed, 4196 insertions, 0 deletions
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
new file mode 100644
index 000000000000..7a27b600f203
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
@@ -0,0 +1,116 @@
+//=-- lsan.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan.h"
+
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
+
+bool lsan_inited;
+bool lsan_init_is_running;
+
+namespace __lsan {
+
+///// Interface to the common LSan module. /////
+bool WordIsPoisoned(uptr addr) {
+ return false;
+}
+
+} // namespace __lsan
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ using namespace __lsan;
+ uptr stack_top = 0, stack_bottom = 0;
+ if (ThreadContextLsanBase *t = GetCurrentThread()) {
+ stack_top = t->stack_end();
+ stack_bottom = t->stack_begin();
+ }
+ if (SANITIZER_MIPS && !IsValidFrame(bp, stack_top, stack_bottom))
+ return;
+ bool fast = StackTrace::WillUseFastUnwind(request_fast);
+ Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
+}
+
+using namespace __lsan;
+
+static void InitializeFlags() {
+ // Set all the default values.
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = 30;
+ cf.intercept_tls_get_addr = true;
+ cf.detect_leaks = true;
+ cf.exitcode = 23;
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterLsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+ // Override from user-specified string.
+ const char *lsan_default_options = __lsan_default_options();
+ parser.ParseString(lsan_default_options);
+ parser.ParseStringFromEnv("LSAN_OPTIONS");
+
+ InitializeCommonFlags();
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ __sanitizer_set_report_path(common_flags()->log_path);
+}
+
+extern "C" void __lsan_init() {
+ CHECK(!lsan_init_is_running);
+ if (lsan_inited)
+ return;
+ lsan_init_is_running = true;
+ SanitizerToolName = "LeakSanitizer";
+ CacheBinaryName();
+ AvoidCVE_2016_2143();
+ InitializeFlags();
+ InitCommonLsan();
+ InitializeAllocator();
+ ReplaceSystemMalloc();
+ InitTlsSize();
+ InitializeInterceptors();
+ InitializeThreads();
+ InstallDeadlySignalHandlers(LsanOnDeadlySignal);
+ InitializeMainThread();
+ InstallAtExitCheckLeaks();
+ InstallAtForkHandler();
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ lsan_inited = true;
+ lsan_init_is_running = false;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_STACK_TRACE_FATAL;
+ stack.Print();
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.h
new file mode 100644
index 000000000000..0074ad530878
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.h
@@ -0,0 +1,57 @@
+//=-- lsan.h --------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Private header for standalone LSan RTL.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_thread.h"
+#if SANITIZER_POSIX
+# include "lsan_posix.h"
+#elif SANITIZER_FUCHSIA
+# include "lsan_fuchsia.h"
+#endif
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+#define GET_STACK_TRACE(max_size, fast) \
+ __sanitizer::BufferedStackTrace stack; \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, fast, \
+ max_size);
+
+#define GET_STACK_TRACE_FATAL \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
+ common_flags()->fast_unwind_on_malloc)
+
+#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
+
+namespace __lsan {
+
+void InitializeInterceptors();
+void ReplaceSystemMalloc();
+void LsanOnDeadlySignal(int signo, void *siginfo, void *context);
+void InstallAtExitCheckLeaks();
+void InstallAtForkHandler();
+
+#define ENSURE_LSAN_INITED \
+ do { \
+ CHECK(!lsan_init_is_running); \
+ if (!lsan_inited) \
+ __lsan_init(); \
+ } while (0)
+
+} // namespace __lsan
+
+extern bool lsan_inited;
+extern bool lsan_init_is_running;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __lsan_init();
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
new file mode 100644
index 000000000000..493bf5f9efc5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -0,0 +1,403 @@
+//=-- lsan_allocator.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// See lsan_allocator.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_allocator.h"
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "lsan_common.h"
+
+extern "C" void *memset(void *ptr, int value, uptr num);
+
+namespace __lsan {
+#if defined(__i386__) || defined(__arm__)
+static const uptr kMaxAllowedMallocSize = 1ULL << 30;
+#elif defined(__mips64) || defined(__aarch64__)
+static const uptr kMaxAllowedMallocSize = 4ULL << 30;
+#else
+static const uptr kMaxAllowedMallocSize = 1ULL << 40;
+#endif
+
+static Allocator allocator;
+
+static uptr max_malloc_size;
+
+void InitializeAllocator() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms);
+ if (common_flags()->max_allocation_size_mb)
+ max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
+ kMaxAllowedMallocSize);
+ else
+ max_malloc_size = kMaxAllowedMallocSize;
+}
+
+void AllocatorThreadStart() { allocator.InitCache(GetAllocatorCache()); }
+
+void AllocatorThreadFinish() {
+ allocator.SwallowCache(GetAllocatorCache());
+ allocator.DestroyCache(GetAllocatorCache());
+}
+
+static ChunkMetadata *Metadata(const void *p) {
+ return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
+}
+
+static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
+ if (!p) return;
+ ChunkMetadata *m = Metadata(p);
+ CHECK(m);
+ m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
+ m->stack_trace_id = StackDepotPut(stack);
+ m->requested_size = size;
+ atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
+ RunMallocHooks(p, size);
+}
+
+static void RegisterDeallocation(void *p) {
+ if (!p) return;
+ ChunkMetadata *m = Metadata(p);
+ CHECK(m);
+ RunFreeHooks(p);
+ atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
+}
+
+static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
+ return nullptr;
+ }
+ ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
+}
+
+void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
+ bool cleared) {
+ if (size == 0)
+ size = 1;
+ if (size > max_malloc_size)
+ return ReportAllocationSizeTooBig(size, stack);
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(&stack);
+ }
+ void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
+ if (UNLIKELY(!p)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, &stack);
+ }
+ // Do not rely on the allocator to clear the memory (it's slow).
+ if (cleared && allocator.FromPrimary(p))
+ memset(p, 0, size);
+ RegisterAllocation(stack, p, size);
+ return p;
+}
+
+static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, &stack);
+ }
+ size *= nmemb;
+ return Allocate(stack, size, 1, true);
+}
+
+void Deallocate(void *p) {
+ RegisterDeallocation(p);
+ allocator.Deallocate(GetAllocatorCache(), p);
+}
+
+void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
+ uptr alignment) {
+ if (new_size > max_malloc_size) {
+ ReportAllocationSizeTooBig(new_size, stack);
+ return nullptr;
+ }
+ RegisterDeallocation(p);
+ void *new_p =
+ allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
+ if (new_p)
+ RegisterAllocation(stack, new_p, new_size);
+ else if (new_size != 0)
+ RegisterAllocation(stack, p, new_size);
+ return new_p;
+}
+
+void GetAllocatorCacheRange(uptr *begin, uptr *end) {
+ *begin = (uptr)GetAllocatorCache();
+ *end = *begin + sizeof(AllocatorCache);
+}
+
+static const void *GetMallocBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ ChunkMetadata *m = Metadata(beg);
+ if (!m)
+ return nullptr;
+ if (!m->allocated)
+ return nullptr;
+ if (m->requested_size == 0)
+ return nullptr;
+ return (const void *)beg;
+}
+
+uptr GetMallocUsableSize(const void *p) {
+ if (!p)
+ return 0;
+ ChunkMetadata *m = Metadata(p);
+ if (!m) return 0;
+ return m->requested_size;
+}
+
+uptr GetMallocUsableSizeFast(const void *p) {
+ return Metadata(p)->requested_size;
+}
+
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ const StackTrace &stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, &stack);
+ }
+ void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by Allocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
+ }
+ return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, &stack);
+ }
+ return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_malloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
+}
+
+void lsan_free(void *p) {
+ Deallocate(p);
+}
+
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Reallocate(stack, p, size, 1));
+}
+
+void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,
+ const StackTrace &stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportReallocArrayOverflow(nmemb, size, &stack);
+ }
+ return lsan_realloc(ptr, nmemb * size, stack);
+}
+
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Calloc(nmemb, size, stack));
+}
+
+void *lsan_valloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(
+ Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
+}
+
+void *lsan_pvalloc(uptr size, const StackTrace &stack) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, &stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
+}
+
+uptr lsan_mz_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
+///// Interface to the common LSan module. /////
+
+void LockAllocator() {
+ allocator.ForceLock();
+}
+
+void UnlockAllocator() {
+ allocator.ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&allocator;
+ *end = *begin + sizeof(allocator);
+}
+
+uptr PointsIntoChunk(void* p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
+ if (!chunk) return 0;
+ // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
+ // valid, but we don't want that.
+ if (addr < chunk) return 0;
+ ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
+ CHECK(m);
+ if (!m->allocated)
+ return 0;
+ if (addr < chunk + m->requested_size)
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ return chunk;
+}
+
+uptr GetUserAddr(uptr chunk) {
+ return chunk;
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ metadata_ = Metadata(reinterpret_cast<void *>(chunk));
+ CHECK(metadata_);
+}
+
+bool LsanMetadata::allocated() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
+}
+
+ChunkTag LsanMetadata::tag() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ allocator.ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObject(const void *p) {
+ void *chunk = allocator.GetBlockBegin(p);
+ if (!chunk || p < chunk) return kIgnoreObjectInvalid;
+ ChunkMetadata *m = Metadata(chunk);
+ CHECK(m);
+ if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
+ if (m->tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->tag = kIgnored;
+ return kIgnoreObjectSuccess;
+ } else {
+ return kIgnoreObjectInvalid;
+ }
+}
+
+} // namespace __lsan
+
+using namespace __lsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_free_bytes() { return 1; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_unmapped_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_ownership(const void *p) {
+ return GetMallocBegin(p) != nullptr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const void * __sanitizer_get_allocated_begin(const void *p) {
+ return GetMallocBegin(p);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = GetMallocUsableSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
+
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h
new file mode 100644
index 000000000000..5eed0cbdb309
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h
@@ -0,0 +1,140 @@
+//=-- lsan_allocator.h ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Allocator for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_ALLOCATOR_H
+#define LSAN_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "lsan_common.h"
+
+namespace __lsan {
+
+void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
+ bool cleared);
+void Deallocate(void *p);
+void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
+ uptr alignment);
+uptr GetMallocUsableSize(const void *p);
+
+template<typename Callable>
+void ForEachChunk(const Callable &callback);
+
+void GetAllocatorCacheRange(uptr *begin, uptr *end);
+void AllocatorThreadStart();
+void AllocatorThreadFinish();
+void InitializeAllocator();
+
+const bool kAlwaysClearMemory = true;
+
+struct ChunkMetadata {
+ u8 allocated : 8; // Must be first.
+ ChunkTag tag : 2;
+#if SANITIZER_WORDSIZE == 64
+ uptr requested_size : 54;
+#else
+ uptr requested_size : 32;
+ uptr padding : 22;
+#endif
+ u32 stack_trace_id;
+};
+
+#if !SANITIZER_CAN_USE_ALLOCATOR64
+template <typename AddressSpaceViewTy>
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = AddressSpaceViewTy;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#else
+# if SANITIZER_FUCHSIA || defined(__powerpc64__)
+const uptr kAllocatorSpace = ~(uptr)0;
+# if SANITIZER_RISCV64
+// See the comments in compiler-rt/lib/asan/asan_allocator.h for why these
+// values were chosen.
+const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB
+using LSanSizeClassMap = SizeClassMap</*kNumBits=*/2,
+ /*kMinSizeLog=*/5,
+ /*kMidSizeLog=*/8,
+ /*kMaxSizeLog=*/18,
+ /*kNumCachedHintT=*/8,
+ /*kMaxBytesCachedLog=*/10>;
+static_assert(LSanSizeClassMap::kNumClassesRounded <= 32,
+ "32 size classes is the optimal number to ensure tests run "
+ "effieciently on Fuchsia.");
+# else
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+using LSanSizeClassMap = DefaultSizeClassMap;
+# endif
+# elif SANITIZER_RISCV64
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+using LSanSizeClassMap = DefaultSizeClassMap;
+# elif SANITIZER_APPLE
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+using LSanSizeClassMap = DefaultSizeClassMap;
+# else
+const uptr kAllocatorSpace = 0x500000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+using LSanSizeClassMap = DefaultSizeClassMap;
+# endif
+template <typename AddressSpaceViewTy>
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ using SizeClassMap = LSanSizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = AddressSpaceViewTy;
+};
+
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#endif
+
+template <typename AddressSpaceView>
+using AllocatorASVT = CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;
+using Allocator = AllocatorASVT<LocalAddressSpaceView>;
+using AllocatorCache = Allocator::AllocatorCache;
+
+Allocator::AllocatorCache *GetAllocatorCache();
+
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ const StackTrace &stack);
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_malloc(uptr size, const StackTrace &stack);
+void lsan_free(void *p);
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
+void *lsan_reallocarray(void *p, uptr nmemb, uptr size,
+ const StackTrace &stack);
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
+void *lsan_valloc(uptr size, const StackTrace &stack);
+void *lsan_pvalloc(uptr size, const StackTrace &stack);
+uptr lsan_mz_size(const void *p);
+
+} // namespace __lsan
+
+#endif // LSAN_ALLOCATOR_H
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
new file mode 100644
index 000000000000..183df6e5ca14
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
@@ -0,0 +1,1107 @@
+//=-- lsan_common.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_common.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+#if CAN_SANITIZE_LEAKS
+
+# if SANITIZER_APPLE
+// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
+# if SANITIZER_IOS && !SANITIZER_IOSSIM
+# define OBJC_DATA_MASK 0x0000007ffffffff8UL
+# else
+# define OBJC_DATA_MASK 0x00007ffffffffff8UL
+# endif
+# endif
+
+namespace __lsan {
+
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
+// also to protect the global list of root regions.
+static Mutex global_mutex;
+
+void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); }
+void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); }
+
+Flags lsan_flags;
+
+void DisableCounterUnderflow() {
+ if (common_flags()->detect_leaks) {
+ Report("Unmatched call to __lsan_enable().\n");
+ Die();
+ }
+}
+
+void Flags::SetDefaults() {
+# define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+# include "lsan_flags.inc"
+# undef LSAN_FLAG
+}
+
+void RegisterLsanFlags(FlagParser *parser, Flags *f) {
+# define LSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+# include "lsan_flags.inc"
+# undef LSAN_FLAG
+}
+
+# define LOG_POINTERS(...) \
+ do { \
+ if (flags()->log_pointers) \
+ Report(__VA_ARGS__); \
+ } while (0)
+
+# define LOG_THREADS(...) \
+ do { \
+ if (flags()->log_threads) \
+ Report(__VA_ARGS__); \
+ } while (0)
+
+class LeakSuppressionContext {
+ bool parsed = false;
+ SuppressionContext context;
+ bool suppressed_stacks_sorted = true;
+ InternalMmapVector<u32> suppressed_stacks;
+ const LoadedModule *suppress_module = nullptr;
+
+ void LazyInit();
+ Suppression *GetSuppressionForAddr(uptr addr);
+ bool SuppressInvalid(const StackTrace &stack);
+ bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
+
+ public:
+ LeakSuppressionContext(const char *supprression_types[],
+ int suppression_types_num)
+ : context(supprression_types, suppression_types_num) {}
+
+ bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
+
+ const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
+ if (!suppressed_stacks_sorted) {
+ suppressed_stacks_sorted = true;
+ SortAndDedup(suppressed_stacks);
+ }
+ return suppressed_stacks;
+ }
+ void PrintMatchedSuppressions();
+};
+
+alignas(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
+static LeakSuppressionContext *suppression_ctx = nullptr;
+static const char kSuppressionLeak[] = "leak";
+static const char *kSuppressionTypes[] = {kSuppressionLeak};
+static const char kStdSuppressions[] =
+# if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // definition.
+ "leak:*pthread_exit*\n"
+# endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+# if SANITIZER_APPLE
+ // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
+ "leak:*_os_trace*\n"
+# endif
+ // TLS leak in some glibc versions, described in
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
+ "leak:*tls_get_addr*\n";
+
+void InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder)
+ LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+}
+
+void LeakSuppressionContext::LazyInit() {
+ if (!parsed) {
+ parsed = true;
+ context.ParseFromFile(flags()->suppressions);
+ if (&__lsan_default_suppressions)
+ context.Parse(__lsan_default_suppressions());
+ context.Parse(kStdSuppressions);
+ if (flags()->use_tls && flags()->use_ld_allocations)
+ suppress_module = GetLinker();
+ }
+}
+
+Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
+ Suppression *s = nullptr;
+
+ // Suppress by module name.
+ const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
+ if (!module_name)
+ module_name = "<unknown module>";
+ if (context.Match(module_name, kSuppressionLeak, &s))
+ return s;
+
+ // Suppress by file or function name.
+ SymbolizedStackHolder symbolized_stack(
+ Symbolizer::GetOrInit()->SymbolizePC(addr));
+ const SymbolizedStack *frames = symbolized_stack.get();
+ for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
+ context.Match(cur->info.file, kSuppressionLeak, &s)) {
+ break;
+ }
+ }
+ return s;
+}
+
+static uptr GetCallerPC(const StackTrace &stack) {
+ // The top frame is our malloc/calloc/etc. The next frame is the caller.
+ if (stack.size >= 2)
+ return stack.trace[1];
+ return 0;
+}
+
+# if SANITIZER_APPLE
+// Several pointers in the Objective-C runtime (method cache and class_rw_t,
+// for example) are tagged with additional bits we need to strip.
+static inline void *TransformPointer(void *p) {
+ uptr ptr = reinterpret_cast<uptr>(p);
+ return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
+}
+# endif
+
+// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
+// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
+// modules accounting etc.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
+ uptr caller_pc = GetCallerPC(stack);
+ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+ // it as reachable, as we can't properly report its allocation stack anyway.
+ return !caller_pc ||
+ (suppress_module && suppress_module->containsAddress(caller_pc));
+}
+
+bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
+ uptr hit_count, uptr total_size) {
+ for (uptr i = 0; i < stack.size; i++) {
+ Suppression *s = GetSuppressionForAddr(
+ StackTrace::GetPreviousInstructionPc(stack.trace[i]));
+ if (s) {
+ s->weight += total_size;
+ atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
+ uptr total_size) {
+ LazyInit();
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
+ return false;
+ suppressed_stacks_sorted = false;
+ suppressed_stacks.push_back(stack_trace_id);
+ return true;
+}
+
+static LeakSuppressionContext *GetSuppressionContext() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+void InitCommonLsan() {
+ if (common_flags()->detect_leaks) {
+ // Initialization which can fail or print warnings should only be done if
+ // LSan is actually enabled.
+ InitializeSuppressions();
+ InitializePlatformSpecificModules();
+ }
+}
+
+class Decorator : public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Error() { return Red(); }
+ const char *Leak() { return Blue(); }
+};
+
+static inline bool MaybeUserPointer(uptr p) {
+ // Since our heap is located in mmap-ed memory, we can assume a sensible lower
+ // bound on heap addresses.
+ const uptr kMinAddress = 4 * 4096;
+ if (p < kMinAddress)
+ return false;
+# if defined(__x86_64__)
+ // TODO: support LAM48 and 5 level page tables.
+ // LAM_U57 mask format
+ // * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
+ // * top-1 byte: 0xff because it should be 0
+ // * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
+ constexpr uptr kLAM_U57Mask = 0x81ff80;
+ constexpr uptr kPointerMask = kLAM_U57Mask << 40;
+ return ((p & kPointerMask) == 0);
+# elif defined(__mips64)
+ return ((p >> 40) == 0);
+# elif defined(__aarch64__)
+ // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
+ // address translation and can be used to store a tag.
+ constexpr uptr kPointerMask = 255ULL << 48;
+ // Accept up to 48 bit VMA.
+ return ((p & kPointerMask) == 0);
+# elif defined(__loongarch_lp64)
+ // Allow 47-bit user-space VMA at current.
+ return ((p >> 47) == 0);
+# else
+ return true;
+# endif
+}
+
+// Scans the memory range, looking for byte patterns that point into allocator
+// chunks. Marks those chunks with |tag| and adds them to |frontier|.
+// There are two usage modes for this function: finding reachable chunks
+// (|tag| = kReachable) and finding indirectly leaked chunks
+// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
+// so |frontier| = 0.
+void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
+ const char *region_type, ChunkTag tag) {
+ CHECK(tag == kReachable || tag == kIndirectlyLeaked);
+ const uptr alignment = flags()->pointer_alignment();
+ LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
+ (void *)end);
+ uptr pp = begin;
+ if (pp % alignment)
+ pp = pp + alignment - pp % alignment;
+ for (; pp + sizeof(void *) <= end; pp += alignment) {
+ void *p = *reinterpret_cast<void **>(pp);
+# if SANITIZER_APPLE
+ p = TransformPointer(p);
+# endif
+ if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
+ continue;
+ uptr chunk = PointsIntoChunk(p);
+ if (!chunk)
+ continue;
+ // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
+ if (chunk == begin)
+ continue;
+ LsanMetadata m(chunk);
+ if (m.tag() == kReachable || m.tag() == kIgnored)
+ continue;
+
+ // Do this check relatively late so we can log only the interesting cases.
+ if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
+ LOG_POINTERS(
+ "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
+ "%zu.\n",
+ (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
+ m.requested_size());
+ continue;
+ }
+
+ m.set_tag(tag);
+ LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
+ (void *)pp, p, (void *)chunk,
+ (void *)(chunk + m.requested_size()), m.requested_size());
+ if (frontier)
+ frontier->push_back(chunk);
+ }
+}
+
+// Scans a global range for pointers
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
+ uptr allocator_begin = 0, allocator_end = 0;
+ GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
+ if (begin <= allocator_begin && allocator_begin < end) {
+ CHECK_LE(allocator_begin, allocator_end);
+ CHECK_LE(allocator_end, end);
+ if (begin < allocator_begin)
+ ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
+ kReachable);
+ if (allocator_end < end)
+ ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
+ }
+}
+
+void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
+ Frontier *frontier) {
+ for (uptr i = 0; i < ranges.size(); i++) {
+ ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
+ kReachable);
+ }
+}
+
+# if SANITIZER_FUCHSIA
+
+// Fuchsia handles all threads together with its own callback.
+static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
+ uptr) {}
+
+# else
+
+# if SANITIZER_ANDROID
+// FIXME: Move this out into *libcdep.cpp
+extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
+ pid_t, void (*cb)(void *, void *, uptr, void *), void *);
+# endif
+
+static void ProcessThreadRegistry(Frontier *frontier) {
+ InternalMmapVector<uptr> ptrs;
+ GetAdditionalThreadContextPtrsLocked(&ptrs);
+
+ for (uptr i = 0; i < ptrs.size(); ++i) {
+ void *ptr = reinterpret_cast<void *>(ptrs[i]);
+ uptr chunk = PointsIntoChunk(ptr);
+ if (!chunk)
+ continue;
+ LsanMetadata m(chunk);
+ if (!m.allocated())
+ continue;
+
+ // Mark as reachable and add to frontier.
+ LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
+ m.set_tag(kReachable);
+ frontier->push_back(chunk);
+ }
+}
+
+// Scans thread data (stacks and TLS) for heap pointers.
+static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
+ Frontier *frontier, tid_t caller_tid,
+ uptr caller_sp) {
+ InternalMmapVector<uptr> registers;
+ InternalMmapVector<Range> extra_ranges;
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
+ tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
+ LOG_THREADS("Processing thread %llu.\n", os_id);
+ uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
+ DTLS *dtls;
+ bool thread_found =
+ GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
+ &tls_end, &cache_begin, &cache_end, &dtls);
+ if (!thread_found) {
+ // If a thread can't be found in the thread registry, it's probably in the
+ // process of destruction. Log this event and move on.
+ LOG_THREADS("Thread %llu not found in registry.\n", os_id);
+ continue;
+ }
+ uptr sp;
+ PtraceRegistersStatus have_registers =
+ suspended_threads.GetRegistersAndSP(i, &registers, &sp);
+ if (have_registers != REGISTERS_AVAILABLE) {
+ Report("Unable to get registers from thread %llu.\n", os_id);
+ // If unable to get SP, consider the entire stack to be reachable unless
+ // GetRegistersAndSP failed with ESRCH.
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
+ continue;
+ sp = stack_begin;
+ }
+ if (suspended_threads.GetThreadID(i) == caller_tid) {
+ sp = caller_sp;
+ }
+
+ if (flags()->use_registers && have_registers) {
+ uptr registers_begin = reinterpret_cast<uptr>(registers.data());
+ uptr registers_end =
+ reinterpret_cast<uptr>(registers.data() + registers.size());
+ ScanRangeForPointers(registers_begin, registers_end, frontier,
+ "REGISTERS", kReachable);
+ }
+
+ if (flags()->use_stacks) {
+ LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
+ (void *)stack_end, (void *)sp);
+ if (sp < stack_begin || sp >= stack_end) {
+ // SP is outside the recorded stack range (e.g. the thread is running a
+ // signal handler on alternate stack, or swapcontext was used).
+ // Again, consider the entire stack range to be reachable.
+ LOG_THREADS("WARNING: stack pointer not in stack range.\n");
+ uptr page_size = GetPageSizeCached();
+ int skipped = 0;
+ while (stack_begin < stack_end &&
+ !IsAccessibleMemoryRange(stack_begin, 1)) {
+ skipped++;
+ stack_begin += page_size;
+ }
+ LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
+ skipped, (void *)stack_begin, (void *)stack_end);
+ } else {
+ // Shrink the stack range to ignore out-of-scope values.
+ stack_begin = sp;
+ }
+ ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
+ kReachable);
+ extra_ranges.clear();
+ GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
+ ScanExtraStackRanges(extra_ranges, frontier);
+ }
+
+ if (flags()->use_tls) {
+ if (tls_begin) {
+ LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
+ // If the tls and cache ranges don't overlap, scan full tls range,
+ // otherwise, only scan the non-overlapping portions
+ if (cache_begin == cache_end || tls_end < cache_begin ||
+ tls_begin > cache_end) {
+ ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
+ } else {
+ if (tls_begin < cache_begin)
+ ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
+ kReachable);
+ if (tls_end > cache_end)
+ ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
+ kReachable);
+ }
+ }
+# if SANITIZER_ANDROID
+ auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
+ void *arg) -> void {
+ ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
+ reinterpret_cast<uptr>(dtls_end),
+ reinterpret_cast<Frontier *>(arg), "DTLS",
+ kReachable);
+ };
+
+ // FIXME: There might be a race-condition here (and in Bionic) if the
+ // thread is suspended in the middle of updating its DTLS. IOWs, we
+ // could scan already freed memory. (probably fine for now)
+ __libc_iterate_dynamic_tls(os_id, cb, frontier);
+# else
+ if (dtls && !DTLSInDestruction(dtls)) {
+ ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
+ uptr dtls_beg = dtv.beg;
+ uptr dtls_end = dtls_beg + dtv.size;
+ if (dtls_beg < dtls_end) {
+ LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
+ (void *)dtls_end);
+ ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
+ kReachable);
+ }
+ });
+ } else {
+ // We are handling a thread with DTLS under destruction. Log about
+ // this and continue.
+ LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
+ }
+# endif
+ }
+ }
+
+ // Add pointers reachable from ThreadContexts
+ ProcessThreadRegistry(frontier);
+}
+
+# endif // SANITIZER_FUCHSIA
+
+// A map that contains [region_begin, region_end) pairs.
+using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
+
+static RootRegions &GetRootRegionsLocked() {
+ global_mutex.CheckLocked();
+ static RootRegions *regions = nullptr;
+ alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
+ if (!regions)
+ regions = new (placeholder) RootRegions();
+ return *regions;
+}
+
+bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
+
+void ScanRootRegions(Frontier *frontier,
+ const InternalMmapVectorNoCtor<Region> &mapped_regions) {
+ if (!flags()->use_root_regions)
+ return;
+
+ InternalMmapVector<Region> regions;
+ GetRootRegionsLocked().forEach([&](const auto &kv) {
+ regions.push_back({kv.first.first, kv.first.second});
+ return true;
+ });
+
+ InternalMmapVector<Region> intersection;
+ Intersect(mapped_regions, regions, intersection);
+
+ for (const Region &r : intersection) {
+ LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
+ (void *)r.begin, (void *)r.end);
+ ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
+ }
+}
+
+// Scans root regions for heap pointers.
+static void ProcessRootRegions(Frontier *frontier) {
+ if (!flags()->use_root_regions || !HasRootRegions())
+ return;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ InternalMmapVector<Region> mapped_regions;
+ while (proc_maps.Next(&segment))
+ if (segment.IsReadable())
+ mapped_regions.push_back({segment.start, segment.end});
+ ScanRootRegions(frontier, mapped_regions);
+}
+
+static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
+ while (frontier->size()) {
+ uptr next_chunk = frontier->back();
+ frontier->pop_back();
+ LsanMetadata m(next_chunk);
+ ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
+ "HEAP", tag);
+ }
+}
+
+// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
+// which are reachable from it as indirectly leaked.
+static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kReachable) {
+ ScanRangeForPointers(chunk, chunk + m.requested_size(),
+ /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
+ }
+}
+
+static void IgnoredSuppressedCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (!m.allocated() || m.tag() == kIgnored)
+ return;
+
+ const InternalMmapVector<u32> &suppressed =
+ *static_cast<const InternalMmapVector<u32> *>(arg);
+ uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
+ if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
+ return;
+
+ LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
+ (void *)(chunk + m.requested_size()), m.requested_size());
+ m.set_tag(kIgnored);
+}
+
+// ForEachChunk callback. If chunk is marked as ignored, adds its address to
+// frontier.
+static void CollectIgnoredCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() == kIgnored) {
+ LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
+ (void *)(chunk + m.requested_size()), m.requested_size());
+ reinterpret_cast<Frontier *>(arg)->push_back(chunk);
+ }
+}
+
+// Sets the appropriate tag on each chunk.
+static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
+ Frontier *frontier, tid_t caller_tid,
+ uptr caller_sp) {
+ const InternalMmapVector<u32> &suppressed_stacks =
+ GetSuppressionContext()->GetSortedSuppressedStacks();
+ if (!suppressed_stacks.empty()) {
+ ForEachChunk(IgnoredSuppressedCb,
+ const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
+ }
+ ForEachChunk(CollectIgnoredCb, frontier);
+ ProcessGlobalRegions(frontier);
+ ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
+ ProcessRootRegions(frontier);
+ FloodFillTag(frontier, kReachable);
+
+ // The check here is relatively expensive, so we do this in a separate flood
+ // fill. That way we can skip the check for chunks that are reachable
+ // otherwise.
+ LOG_POINTERS("Processing platform-specific allocations.\n");
+ ProcessPlatformSpecificAllocations(frontier);
+ FloodFillTag(frontier, kReachable);
+
+ // Iterate over leaked chunks and mark those that are reachable from other
+ // leaked chunks.
+ LOG_POINTERS("Scanning leaked chunks.\n");
+ ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
+}
+
+// ForEachChunk callback. Resets the tags to pre-leak-check state.
+static void ResetTagsCb(uptr chunk, void *arg) {
+ (void)arg;
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kIgnored)
+ m.set_tag(kDirectlyLeaked);
+}
+
+// ForEachChunk callback. Aggregates information about unreachable chunks into
+// a LeakReport.
+static void CollectLeaksCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (!m.allocated())
+ return;
+ if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
+ leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
+}
+
+void LeakSuppressionContext::PrintMatchedSuppressions() {
+ InternalMmapVector<Suppression *> matched;
+ context.GetMatched(&matched);
+ if (!matched.size())
+ return;
+ const char *line = "-----------------------------------------------------";
+ Printf("%s\n", line);
+ Printf("Suppressions used:\n");
+ Printf(" count bytes template\n");
+ for (uptr i = 0; i < matched.size(); i++) {
+ Printf("%7zu %10zu %s\n",
+ static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
+ matched[i]->weight, matched[i]->templ);
+ }
+ Printf("%s\n\n", line);
+}
+
+# if SANITIZER_FUCHSIA
+
+// Fuchsia provides a libc interface that guarantees all threads are
+// covered, and SuspendedThreadList is never really used.
+static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
+
+# else // !SANITIZER_FUCHSIA
+
+static void ReportUnsuspendedThreads(
+ const SuspendedThreadsList &suspended_threads) {
+ InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
+ threads[i] = suspended_threads.GetThreadID(i);
+
+ Sort(threads.data(), threads.size());
+
+ InternalMmapVector<tid_t> unsuspended;
+ GetRunningThreadsLocked(&unsuspended);
+
+ for (auto os_id : unsuspended) {
+ uptr i = InternalLowerBound(threads, os_id);
+ if (i >= threads.size() || threads[i] != os_id)
+ Report(
+ "Running thread %zu was not suspended. False leaks are possible.\n",
+ os_id);
+ }
+}
+
+# endif // !SANITIZER_FUCHSIA
+
+static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
+ void *arg) {
+ CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
+ CHECK(param);
+ CHECK(!param->success);
+ ReportUnsuspendedThreads(suspended_threads);
+ ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
+ param->caller_sp);
+ ForEachChunk(CollectLeaksCb, &param->leaks);
+ // Clean up for subsequent leak checks. This assumes we did not overwrite any
+ // kIgnored tags.
+ ForEachChunk(ResetTagsCb, nullptr);
+ param->success = true;
+}
+
+static bool PrintResults(LeakReport &report) {
+ uptr unsuppressed_count = report.UnsuppressedLeakCount();
+ if (unsuppressed_count) {
+ Decorator d;
+ Printf(
+ "\n"
+ "================================================================="
+ "\n");
+ Printf("%s", d.Error());
+ Report("ERROR: LeakSanitizer: detected memory leaks\n");
+ Printf("%s", d.Default());
+ report.ReportTopLeaks(flags()->max_leaks);
+ }
+ if (common_flags()->print_suppressions)
+ GetSuppressionContext()->PrintMatchedSuppressions();
+ if (unsuppressed_count > 0) {
+ report.PrintSummary();
+ return true;
+ }
+ return false;
+}
+
+static bool CheckForLeaks() {
+ if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
+ VReport(1, "LeakSanitizer is disabled");
+ return false;
+ }
+ VReport(1, "LeakSanitizer: checking for leaks");
+ // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
+ // suppressions. However if a stack id was previously suppressed, it should be
+ // suppressed in future checks as well.
+ for (int i = 0;; ++i) {
+ EnsureMainThreadIDIsCorrect();
+ CheckForLeaksParam param;
+ // Capture calling thread's stack pointer early, to avoid false negatives.
+ // Old frame with dead pointers might be overlapped by new frame inside
+ // CheckForLeaks which does not use bytes with pointers before the
+ // threads are suspended and stack pointers captured.
+ param.caller_tid = GetTid();
+ param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
+ LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
+ if (!param.success) {
+ Report("LeakSanitizer has encountered a fatal error.\n");
+ Report(
+ "HINT: For debugging, try setting environment variable "
+ "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
+ Report(
+ "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
+ "etc)\n");
+ Die();
+ }
+ LeakReport leak_report;
+ leak_report.AddLeakedChunks(param.leaks);
+
+ // No new suppressions stacks, so rerun will not help and we can report.
+ if (!leak_report.ApplySuppressions())
+ return PrintResults(leak_report);
+
+ // No indirect leaks to report, so we are done here.
+ if (!leak_report.IndirectUnsuppressedLeakCount())
+ return PrintResults(leak_report);
+
+ if (i >= 8) {
+ Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
+ return PrintResults(leak_report);
+ }
+
+ // We found a new previously unseen suppressed call stack. Rerun to make
+ // sure it does not hold indirect leaks.
+ VReport(1, "Rerun with %zu suppressed stacks.",
+ GetSuppressionContext()->GetSortedSuppressedStacks().size());
+ }
+}
+
+static bool has_reported_leaks = false;
+bool HasReportedLeaks() { return has_reported_leaks; }
+
+void DoLeakCheck() {
+ Lock l(&global_mutex);
+ static bool already_done;
+ if (already_done)
+ return;
+ already_done = true;
+ has_reported_leaks = CheckForLeaks();
+ if (has_reported_leaks)
+ HandleLeaks();
+}
+
+static int DoRecoverableLeakCheck() {
+ Lock l(&global_mutex);
+ bool have_leaks = CheckForLeaks();
+ return have_leaks ? 1 : 0;
+}
+
+void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
+
+///// LeakReport implementation. /////
+
+// A hard limit on the number of distinct leaks, to avoid quadratic complexity
+// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
+// in real-world applications.
+// FIXME: Get rid of this limit by moving logic into DedupLeaks.
+const uptr kMaxLeaksConsidered = 5000;
+
+void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
+ for (const LeakedChunk &leak : chunks) {
+ uptr chunk = leak.chunk;
+ u32 stack_trace_id = leak.stack_trace_id;
+ uptr leaked_size = leak.leaked_size;
+ ChunkTag tag = leak.tag;
+ CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
+
+ if (u32 resolution = flags()->resolution) {
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ stack.size = Min(stack.size, resolution);
+ stack_trace_id = StackDepotPut(stack);
+ }
+
+ bool is_directly_leaked = (tag == kDirectlyLeaked);
+ uptr i;
+ for (i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].stack_trace_id == stack_trace_id &&
+ leaks_[i].is_directly_leaked == is_directly_leaked) {
+ leaks_[i].hit_count++;
+ leaks_[i].total_size += leaked_size;
+ break;
+ }
+ }
+ if (i == leaks_.size()) {
+ if (leaks_.size() == kMaxLeaksConsidered)
+ return;
+ Leak leak = {next_id_++, /* hit_count */ 1,
+ leaked_size, stack_trace_id,
+ is_directly_leaked, /* is_suppressed */ false};
+ leaks_.push_back(leak);
+ }
+ if (flags()->report_objects) {
+ LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
+ leaked_objects_.push_back(obj);
+ }
+ }
+}
+
+static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
+ if (leak1.is_directly_leaked == leak2.is_directly_leaked)
+ return leak1.total_size > leak2.total_size;
+ else
+ return leak1.is_directly_leaked;
+}
+
+void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
+ CHECK(leaks_.size() <= kMaxLeaksConsidered);
+ Printf("\n");
+ if (leaks_.size() == kMaxLeaksConsidered)
+ Printf(
+ "Too many leaks! Only the first %zu leaks encountered will be "
+ "reported.\n",
+ kMaxLeaksConsidered);
+
+ uptr unsuppressed_count = UnsuppressedLeakCount();
+ if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
+ Printf("The %zu top leak(s):\n", num_leaks_to_report);
+ Sort(leaks_.data(), leaks_.size(), &LeakComparator);
+ uptr leaks_reported = 0;
+ for (uptr i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].is_suppressed)
+ continue;
+ PrintReportForLeak(i);
+ leaks_reported++;
+ if (leaks_reported == num_leaks_to_report)
+ break;
+ }
+ if (leaks_reported < unsuppressed_count) {
+ uptr remaining = unsuppressed_count - leaks_reported;
+ Printf("Omitting %zu more leak(s).\n", remaining);
+ }
+}
+
+void LeakReport::PrintReportForLeak(uptr index) {
+ Decorator d;
+ Printf("%s", d.Leak());
+ Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
+ leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
+ leaks_[index].total_size, leaks_[index].hit_count);
+ Printf("%s", d.Default());
+
+ CHECK(leaks_[index].stack_trace_id);
+ StackDepotGet(leaks_[index].stack_trace_id).Print();
+
+ if (flags()->report_objects) {
+ Printf("Objects leaked above:\n");
+ PrintLeakedObjectsForLeak(index);
+ Printf("\n");
+ }
+}
+
+void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
+ u32 leak_id = leaks_[index].id;
+ for (uptr j = 0; j < leaked_objects_.size(); j++) {
+ if (leaked_objects_[j].leak_id == leak_id)
+ Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
+ leaked_objects_[j].size);
+ }
+}
+
+void LeakReport::PrintSummary() {
+ CHECK(leaks_.size() <= kMaxLeaksConsidered);
+ uptr bytes = 0, allocations = 0;
+ for (uptr i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].is_suppressed)
+ continue;
+ bytes += leaks_[i].total_size;
+ allocations += leaks_[i].hit_count;
+ }
+ InternalScopedString summary;
+ summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,
+ allocations);
+ ReportErrorSummary(summary.data());
+}
+
+uptr LeakReport::ApplySuppressions() {
+ LeakSuppressionContext *suppressions = GetSuppressionContext();
+ uptr new_suppressions = 0;
+ for (uptr i = 0; i < leaks_.size(); i++) {
+ if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
+ leaks_[i].total_size)) {
+ leaks_[i].is_suppressed = true;
+ ++new_suppressions;
+ }
+ }
+ return new_suppressions;
+}
+
+uptr LeakReport::UnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed)
+ result++;
+ return result;
+}
+
+uptr LeakReport::IndirectUnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
+ result++;
+ return result;
+}
+
+} // namespace __lsan
+#else // CAN_SANITIZE_LEAKS
+namespace __lsan {
+void InitCommonLsan() {}
+void DoLeakCheck() {}
+void DoRecoverableLeakCheckVoid() {}
+void DisableInThisThread() {}
+void EnableInThisThread() {}
+} // namespace __lsan
+#endif // CAN_SANITIZE_LEAKS
+
+using namespace __lsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_ignore_object(const void *p) {
+#if CAN_SANITIZE_LEAKS
+ if (!common_flags()->detect_leaks)
+ return;
+ // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
+ // locked.
+ Lock l(&global_mutex);
+ IgnoreObjectResult res = IgnoreObject(p);
+ if (res == kIgnoreObjectInvalid)
+ VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
+ if (res == kIgnoreObjectAlreadyIgnored)
+ VReport(1,
+ "__lsan_ignore_object(): "
+ "heap object at %p is already being ignored\n",
+ p);
+ if (res == kIgnoreObjectSuccess)
+ VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ VReport(1, "Registered root region at %p of size %zu\n", begin, size);
+ uptr b = reinterpret_cast<uptr>(begin);
+ uptr e = b + size;
+ CHECK_LT(b, e);
+
+ Lock l(&global_mutex);
+ ++GetRootRegionsLocked()[{b, e}];
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ uptr b = reinterpret_cast<uptr>(begin);
+ uptr e = b + size;
+ CHECK_LT(b, e);
+ VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
+
+ {
+ Lock l(&global_mutex);
+ if (auto *f = GetRootRegionsLocked().find({b, e})) {
+ if (--(f->second) == 0)
+ GetRootRegionsLocked().erase(f);
+ return;
+ }
+ }
+ Report(
+ "__lsan_unregister_root_region(): region at %p of size %zu has not "
+ "been registered.\n",
+ begin, size);
+ Die();
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_disable() {
+#if CAN_SANITIZE_LEAKS
+ __lsan::DisableInThisThread();
+#endif
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_enable() {
+#if CAN_SANITIZE_LEAKS
+ __lsan::EnableInThisThread();
+#endif
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_do_leak_check() {
+#if CAN_SANITIZE_LEAKS
+ if (common_flags()->detect_leaks)
+ __lsan::DoLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __lsan_do_recoverable_leak_check() {
+#if CAN_SANITIZE_LEAKS
+ if (common_flags()->detect_leaks)
+ return __lsan::DoRecoverableLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+ return 0;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
+ return "";
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
+ return 0;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
+ return "";
+}
+#endif
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
new file mode 100644
index 000000000000..c598b6210587
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
@@ -0,0 +1,350 @@
+//=-- lsan_common.h -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Private LSan header.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_COMMON_H
+#define LSAN_COMMON_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_range.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
+// Also, LSan doesn't like 32 bit architectures
+// because of "small" (4 bytes) pointer size that leads to high false negative
+// ratio on large leaks. But we still want to have it for some 32 bit arches
+// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
+// To enable LeakSanitizer on a new architecture, one needs to implement the
+// internal_clone function as well as (probably) adjust the TLS machinery for
+// the new architecture inside the sanitizer library.
+// Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
+// is missing. This caused a link error.
+#if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
+# define CAN_SANITIZE_LEAKS 0
+#elif (SANITIZER_LINUX || SANITIZER_APPLE) && (SANITIZER_WORDSIZE == 64) && \
+ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
+ defined(__powerpc64__) || defined(__s390x__))
+# define CAN_SANITIZE_LEAKS 1
+#elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_APPLE)
+# define CAN_SANITIZE_LEAKS 1
+#elif defined(__arm__) && SANITIZER_LINUX
+# define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_LOONGARCH64 && SANITIZER_LINUX
+# define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_RISCV64 && SANITIZER_LINUX
+# define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
+# define CAN_SANITIZE_LEAKS 1
+#else
+# define CAN_SANITIZE_LEAKS 0
+#endif
+
+namespace __sanitizer {
+class FlagParser;
+class ThreadRegistry;
+class ThreadContextBase;
+struct DTLS;
+}
+
+// This section defines function and class prototypes which must be implemented
+// by the parent tool linking in LSan. There are implementations provided by the
+// LSan library which will be linked in when LSan is used as a standalone tool.
+namespace __lsan {
+
+// Chunk tags.
+enum ChunkTag {
+ kDirectlyLeaked = 0, // default
+ kIndirectlyLeaked = 1,
+ kReachable = 2,
+ kIgnored = 3
+};
+
+enum IgnoreObjectResult {
+ kIgnoreObjectSuccess,
+ kIgnoreObjectAlreadyIgnored,
+ kIgnoreObjectInvalid
+};
+
+//// --------------------------------------------------------------------------
+//// Poisoning prototypes.
+//// --------------------------------------------------------------------------
+
+// Returns true if [addr, addr + sizeof(void *)) is poisoned.
+bool WordIsPoisoned(uptr addr);
+
+//// --------------------------------------------------------------------------
+//// Thread prototypes.
+//// --------------------------------------------------------------------------
+
+// Wrappers for ThreadRegistry access.
+void LockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+void UnlockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+// If called from the main thread, updates the main thread's TID in the thread
+// registry. We need this to handle processes that fork() without a subsequent
+// exec(), which invalidates the recorded TID. To update it, we must call
+// gettid() from the main thread. Our solution is to call this function before
+// leak checking and also before every call to pthread_create() (to handle cases
+// where leak checking is initiated from a non-main thread).
+void EnsureMainThreadIDIsCorrect();
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls);
+void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges);
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges);
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
+
+//// --------------------------------------------------------------------------
+//// Allocator prototypes.
+//// --------------------------------------------------------------------------
+
+// Wrappers for allocator's ForceLock()/ForceUnlock().
+void LockAllocator();
+void UnlockAllocator();
+
+// Lock/unlock global mutext.
+void LockGlobal();
+void UnlockGlobal();
+
+// Returns the address range occupied by the global allocator object.
+void GetAllocatorGlobalRange(uptr *begin, uptr *end);
+// If p points into a chunk that has been allocated to the user, returns its
+// user-visible address. Otherwise, returns 0.
+uptr PointsIntoChunk(void *p);
+// Returns address of user-visible chunk contained in this allocator chunk.
+uptr GetUserBegin(uptr chunk);
+// Returns user-visible address for chunk. If memory tagging is used this
+// function will return the tagged address.
+uptr GetUserAddr(uptr chunk);
+
+// Wrapper for chunk metadata operations.
+class LsanMetadata {
+ public:
+ // Constructor accepts address of user-visible chunk.
+ explicit LsanMetadata(uptr chunk);
+ bool allocated() const;
+ ChunkTag tag() const;
+ void set_tag(ChunkTag value);
+ uptr requested_size() const;
+ u32 stack_trace_id() const;
+
+ private:
+ void *metadata_;
+};
+
+// Iterate over all existing chunks. Allocator must be locked.
+void ForEachChunk(ForEachChunkCallback callback, void *arg);
+
+// Helper for __lsan_ignore_object().
+IgnoreObjectResult IgnoreObject(const void *p);
+
+// The rest of the LSan interface which is implemented by library.
+
+struct ScopedStopTheWorldLock {
+ ScopedStopTheWorldLock() {
+ LockThreads();
+ LockAllocator();
+ }
+
+ ~ScopedStopTheWorldLock() {
+ UnlockAllocator();
+ UnlockThreads();
+ }
+
+ ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
+ ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
+};
+
+struct Flags {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+
+ void SetDefaults();
+ uptr pointer_alignment() const {
+ return use_unaligned ? 1 : sizeof(uptr);
+ }
+};
+
+extern Flags lsan_flags;
+inline Flags *flags() { return &lsan_flags; }
+void RegisterLsanFlags(FlagParser *parser, Flags *f);
+
+struct LeakedChunk {
+ uptr chunk;
+ u32 stack_trace_id;
+ uptr leaked_size;
+ ChunkTag tag;
+};
+
+using LeakedChunks = InternalMmapVector<LeakedChunk>;
+
+struct Leak {
+ u32 id;
+ uptr hit_count;
+ uptr total_size;
+ u32 stack_trace_id;
+ bool is_directly_leaked;
+ bool is_suppressed;
+};
+
+struct LeakedObject {
+ u32 leak_id;
+ uptr addr;
+ uptr size;
+};
+
+// Aggregates leaks by stack trace prefix.
+class LeakReport {
+ public:
+ LeakReport() {}
+ void AddLeakedChunks(const LeakedChunks &chunks);
+ void ReportTopLeaks(uptr max_leaks);
+ void PrintSummary();
+ uptr ApplySuppressions();
+ uptr UnsuppressedLeakCount();
+ uptr IndirectUnsuppressedLeakCount();
+
+ private:
+ void PrintReportForLeak(uptr index);
+ void PrintLeakedObjectsForLeak(uptr index);
+
+ u32 next_id_ = 0;
+ InternalMmapVector<Leak> leaks_;
+ InternalMmapVector<LeakedObject> leaked_objects_;
+};
+
+typedef InternalMmapVector<uptr> Frontier;
+
+// Platform-specific functions.
+void InitializePlatformSpecificModules();
+void ProcessGlobalRegions(Frontier *frontier);
+void ProcessPlatformSpecificAllocations(Frontier *frontier);
+
+// LockStuffAndStopTheWorld can start to use Scan* calls to collect into
+// this Frontier vector before the StopTheWorldCallback actually runs.
+// This is used when the OS has a unified callback API for suspending
+// threads and enumerating roots.
+struct CheckForLeaksParam {
+ Frontier frontier;
+ LeakedChunks leaks;
+ tid_t caller_tid;
+ uptr caller_sp;
+ bool success = false;
+};
+
+using Region = Range;
+
+bool HasRootRegions();
+void ScanRootRegions(Frontier *frontier,
+ const InternalMmapVectorNoCtor<Region> &region);
+// Run stoptheworld while holding any platform-specific locks, as well as the
+// allocator and thread registry locks.
+void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
+ CheckForLeaksParam* argument);
+
+void ScanRangeForPointers(uptr begin, uptr end,
+ Frontier *frontier,
+ const char *region_type, ChunkTag tag);
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
+void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
+ Frontier *frontier);
+
+// Functions called from the parent tool.
+const char *MaybeCallLsanDefaultOptions();
+void InitCommonLsan();
+void DoLeakCheck();
+void DoRecoverableLeakCheckVoid();
+void DisableCounterUnderflow();
+bool DisabledInThisThread();
+
+// Used to implement __lsan::ScopedDisabler.
+void DisableInThisThread();
+void EnableInThisThread();
+// Can be used to ignore memory allocated by an intercepted
+// function.
+struct ScopedInterceptorDisabler {
+ ScopedInterceptorDisabler() { DisableInThisThread(); }
+ ~ScopedInterceptorDisabler() { EnableInThisThread(); }
+};
+
+// According to Itanium C++ ABI array cookie is a one word containing
+// size of allocated array.
+static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
+// According to ARM C++ ABI array cookie consists of two words:
+// struct array_cookie {
+// std::size_t element_size; // element_size != 0
+// std::size_t element_count;
+// };
+static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
+}
+
+// Special case for "new T[0]" where T is a type with DTOR.
+// new T[0] will allocate a cookie (one or two words) for the array size (0)
+// and store a pointer to the end of allocated chunk. The actual cookie layout
+// varies between platforms according to their C++ ABI implementation.
+inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+#if defined(__arm__)
+ return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
+#else
+ return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
+#endif
+}
+
+// Return the linker module, if valid for the platform.
+LoadedModule *GetLinker();
+
+// Return true if LSan has finished leak checking and reported leaks.
+bool HasReportedLeaks();
+
+// Run platform-specific leak handlers.
+void HandleLeaks();
+
+} // namespace __lsan
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+int __lsan_is_turned_off();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_suppressions();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *p, __lsan::uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *p, __lsan::uptr size);
+
+} // extern "C"
+
+#endif // LSAN_COMMON_H
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
new file mode 100644
index 000000000000..cb3fe1f859f7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
@@ -0,0 +1,168 @@
+//=-- lsan_common_fuchsia.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Fuchsia-specific code.
+//
+//===---------------------------------------------------------------------===//
+
+#include "lsan_common.h"
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_FUCHSIA
+#include <zircon/sanitizer.h>
+
+#include "lsan_allocator.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_stoptheworld_fuchsia.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+// Ensure that the Zircon system ABI is linked in.
+#pragma comment(lib, "zircon")
+
+namespace __lsan {
+
+void InitializePlatformSpecificModules() {}
+
+LoadedModule *GetLinker() { return nullptr; }
+
+__attribute__((tls_model("initial-exec"))) THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ disable_counter--;
+}
+
+// There is nothing left to do after the globals callbacks.
+void ProcessGlobalRegions(Frontier *frontier) {}
+
+// Nothing to do here.
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
+
+// On Fuchsia, we can intercept _Exit gracefully, and return a failing exit
+// code if required at that point. Calling Die() here is undefined
+// behavior and causes rare race conditions.
+void HandleLeaks() {}
+
+// This is defined differently in asan_fuchsia.cpp and lsan_fuchsia.cpp.
+bool UseExitcodeOnLeak();
+
+int ExitHook(int status) {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ if (UseExitcodeOnLeak())
+ DoLeakCheck();
+ else
+ DoRecoverableLeakCheckVoid();
+ }
+ return status == 0 && HasReportedLeaks() ? common_flags()->exitcode : status;
+}
+
+void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
+ CheckForLeaksParam *argument) {
+ ScopedStopTheWorldLock lock;
+
+ struct Params {
+ InternalMmapVector<uptr> allocator_caches;
+ StopTheWorldCallback callback;
+ CheckForLeaksParam *argument;
+ } params = {{}, callback, argument};
+
+ // Callback from libc for globals (data/bss modulo relro), when enabled.
+ auto globals = +[](void *chunk, size_t size, void *data) {
+ auto params = static_cast<const Params *>(data);
+ uptr begin = reinterpret_cast<uptr>(chunk);
+ uptr end = begin + size;
+ ScanGlobalRange(begin, end, &params->argument->frontier);
+ };
+
+ // Callback from libc for thread stacks.
+ auto stacks = +[](void *chunk, size_t size, void *data) {
+ auto params = static_cast<const Params *>(data);
+ uptr begin = reinterpret_cast<uptr>(chunk);
+ uptr end = begin + size;
+ ScanRangeForPointers(begin, end, &params->argument->frontier, "STACK",
+ kReachable);
+ };
+
+ // Callback from libc for thread registers.
+ auto registers = +[](void *chunk, size_t size, void *data) {
+ auto params = static_cast<const Params *>(data);
+ uptr begin = reinterpret_cast<uptr>(chunk);
+ uptr end = begin + size;
+ ScanRangeForPointers(begin, end, &params->argument->frontier, "REGISTERS",
+ kReachable);
+ };
+
+ if (flags()->use_tls) {
+ // Collect the allocator cache range from each thread so these
+ // can all be excluded from the reported TLS ranges.
+ GetAllThreadAllocatorCachesLocked(&params.allocator_caches);
+ __sanitizer::Sort(params.allocator_caches.data(),
+ params.allocator_caches.size());
+ }
+
+ // Callback from libc for TLS regions. This includes thread_local
+ // variables as well as C11 tss_set and POSIX pthread_setspecific.
+ auto tls = +[](void *chunk, size_t size, void *data) {
+ auto params = static_cast<const Params *>(data);
+ uptr begin = reinterpret_cast<uptr>(chunk);
+ uptr end = begin + size;
+ auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin);
+ if (i < params->allocator_caches.size() &&
+ params->allocator_caches[i] >= begin &&
+ params->allocator_caches[i] <= end &&
+ end - params->allocator_caches[i] >= sizeof(AllocatorCache)) {
+ // Split the range in two and omit the allocator cache within.
+ ScanRangeForPointers(begin, params->allocator_caches[i],
+ &params->argument->frontier, "TLS", kReachable);
+ uptr begin2 = params->allocator_caches[i] + sizeof(AllocatorCache);
+ ScanRangeForPointers(begin2, end, &params->argument->frontier, "TLS",
+ kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, &params->argument->frontier, "TLS",
+ kReachable);
+ }
+ };
+
+ // This stops the world and then makes callbacks for various memory regions.
+ // The final callback is the last thing before the world starts up again.
+ __sanitizer_memory_snapshot(
+ flags()->use_globals ? globals : nullptr,
+ flags()->use_stacks ? stacks : nullptr,
+ flags()->use_registers ? registers : nullptr,
+ flags()->use_tls ? tls : nullptr,
+ [](zx_status_t, void *data) {
+ auto params = static_cast<const Params *>(data);
+
+ // We don't use the thread registry at all for enumerating the threads
+ // and their stacks, registers, and TLS regions. So use it separately
+ // just for the allocator cache, and to call ScanExtraStackRanges,
+ // which ASan needs.
+ if (flags()->use_stacks) {
+ InternalMmapVector<Range> ranges;
+ GetThreadExtraStackRangesLocked(&ranges);
+ ScanExtraStackRanges(ranges, &params->argument->frontier);
+ }
+ params->callback(SuspendedThreadsListFuchsia(), params->argument);
+ },
+ &params);
+}
+
+} // namespace __lsan
+
+// This is declared (in extern "C") by <zircon/sanitizer.h>.
+// _Exit calls this directly to intercept and change the status value.
+int __sanitizer_process_exit_hook(int status) {
+ return __lsan::ExitHook(status);
+}
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp
new file mode 100644
index 000000000000..7a0b2f038be0
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp
@@ -0,0 +1,147 @@
+//=-- lsan_common_linux.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Linux/NetBSD-specific
+// code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && (SANITIZER_LINUX || SANITIZER_NETBSD)
+#include <link.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_getauxval.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __lsan {
+
+static const char kLinkerName[] = "ld";
+
+alignas(64) static char linker_placeholder[sizeof(LoadedModule)];
+static LoadedModule *linker = nullptr;
+
+static bool IsLinker(const LoadedModule& module) {
+#if SANITIZER_USE_GETAUXVAL
+ return module.base_address() == getauxval(AT_BASE);
+#else
+ return LibraryNameIs(module.full_name(), kLinkerName);
+#endif // SANITIZER_USE_GETAUXVAL
+}
+
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ disable_counter--;
+}
+
+void InitializePlatformSpecificModules() {
+ ListOfModules modules;
+ modules.init();
+ for (LoadedModule &module : modules) {
+ if (!IsLinker(module))
+ continue;
+ if (linker == nullptr) {
+ linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
+ *linker = module;
+ module = LoadedModule();
+ } else {
+ VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
+ "TLS and other allocations originating from linker might be "
+ "falsely reported as leaks.\n", kLinkerName);
+ linker->clear();
+ linker = nullptr;
+ return;
+ }
+ }
+ if (linker == nullptr) {
+ VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other "
+ "allocations originating from linker might be falsely reported "
+ "as leaks.\n");
+ }
+}
+
+static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ Frontier *frontier = reinterpret_cast<Frontier *>(data);
+ for (uptr j = 0; j < info->dlpi_phnum; j++) {
+ const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
+ // We're looking for .data and .bss sections, which reside in writeable,
+ // loadable segments.
+ if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
+ (phdr->p_memsz == 0))
+ continue;
+ uptr begin = info->dlpi_addr + phdr->p_vaddr;
+ uptr end = begin + phdr->p_memsz;
+ ScanGlobalRange(begin, end, frontier);
+ }
+ return 0;
+}
+
+#if SANITIZER_ANDROID && __ANDROID_API__ < 21
+extern "C" __attribute__((weak)) int dl_iterate_phdr(
+ int (*)(struct dl_phdr_info *, size_t, void *), void *);
+#endif
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ if (!flags()->use_globals) return;
+ dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
+}
+
+LoadedModule *GetLinker() { return linker; }
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
+
+struct DoStopTheWorldParam {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+// While calling Die() here is undefined behavior and can potentially
+// cause race conditions, it isn't possible to intercept exit on linux,
+// so we have no choice but to call Die() from the atexit handler.
+void HandleLeaks() {
+ if (common_flags()->exitcode) Die();
+}
+
+static int LockStuffAndStopTheWorldCallback(struct dl_phdr_info *info,
+ size_t size, void *data) {
+ ScopedStopTheWorldLock lock;
+ DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
+ StopTheWorld(param->callback, param->argument);
+ return 1;
+}
+
+// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
+// of the threads is frozen while holding the libdl lock, the tracer will hang
+// in dl_iterate_phdr() forever.
+// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
+// tracer task and the thread that spawned it. Thus, if we run the tracer task
+// while holding the libdl lock in the parent thread, we can safely reenter it
+// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
+// callback in the parent thread.
+void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
+ CheckForLeaksParam *argument) {
+ DoStopTheWorldParam param = {callback, argument};
+ dl_iterate_phdr(LockStuffAndStopTheWorldCallback, &param);
+}
+
+} // namespace __lsan
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
new file mode 100644
index 000000000000..4e5198979b95
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
@@ -0,0 +1,239 @@
+//=-- lsan_common_mac.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Darwin-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_APPLE
+
+# include <mach/mach.h>
+# include <mach/vm_statistics.h>
+# include <pthread.h>
+
+# include "lsan_allocator.h"
+# include "sanitizer_common/sanitizer_allocator_internal.h"
+namespace __lsan {
+
+class ThreadContextLsanBase;
+
+enum class SeenRegion {
+ None = 0,
+ AllocOnce = 1 << 0,
+ LibDispatch = 1 << 1,
+ Foundation = 1 << 2,
+ All = AllocOnce | LibDispatch | Foundation
+};
+
+inline SeenRegion operator|(SeenRegion left, SeenRegion right) {
+ return static_cast<SeenRegion>(static_cast<int>(left) |
+ static_cast<int>(right));
+}
+
+inline SeenRegion &operator|=(SeenRegion &left, const SeenRegion &right) {
+ left = left | right;
+ return left;
+}
+
+struct RegionScanState {
+ SeenRegion seen_regions = SeenRegion::None;
+ bool in_libdispatch = false;
+};
+
+typedef struct {
+ int disable_counter;
+ ThreadContextLsanBase *current_thread;
+ AllocatorCache cache;
+} thread_local_data_t;
+
+static pthread_key_t key;
+static pthread_once_t key_once = PTHREAD_ONCE_INIT;
+
+// The main thread destructor requires the current thread,
+// so we can't destroy it until it's been used and reset.
+void restore_tid_data(void *ptr) {
+ thread_local_data_t *data = (thread_local_data_t *)ptr;
+ if (data->current_thread)
+ pthread_setspecific(key, data);
+}
+
+static void make_tls_key() {
+ CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
+}
+
+static thread_local_data_t *get_tls_val(bool alloc) {
+ pthread_once(&key_once, make_tls_key);
+
+ thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
+ if (ptr == NULL && alloc) {
+ ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
+ ptr->disable_counter = 0;
+ ptr->current_thread = nullptr;
+ ptr->cache = AllocatorCache();
+ pthread_setspecific(key, ptr);
+ }
+
+ return ptr;
+}
+
+bool DisabledInThisThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->disable_counter > 0 : false;
+}
+
+void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
+
+void EnableInThisThread() {
+ int *disable_counter = &get_tls_val(true)->disable_counter;
+ if (*disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ --*disable_counter;
+}
+
+ThreadContextLsanBase *GetCurrentThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->current_thread : nullptr;
+}
+
+void SetCurrentThread(ThreadContextLsanBase *tctx) {
+ get_tls_val(true)->current_thread = tctx;
+}
+
+AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
+
+LoadedModule *GetLinker() { return nullptr; }
+
+// Required on Linux for initialization of TLS behavior, but should not be
+// required on Darwin.
+void InitializePlatformSpecificModules() {}
+
+// Sections which can't contain contain global pointers. This list errs on the
+// side of caution to avoid false positives, at the expense of performance.
+//
+// Other potentially safe sections include:
+// __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break
+//
+// Sections which definitely cannot be included here are:
+// __objc_data, __objc_const, __data, __bss, __common, __thread_data,
+// __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs
+static const char *kSkippedSecNames[] = {
+ "__cfstring", "__la_symbol_ptr", "__mod_init_func",
+ "__mod_term_func", "__nl_symbol_ptr", "__objc_classlist",
+ "__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist",
+ "__objc_protolist", "__objc_selrefs", "__objc_superrefs"};
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ for (auto name : kSkippedSecNames)
+ CHECK(internal_strnlen(name, kMaxSegName + 1) <= kMaxSegName);
+
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules;
+ modules.reserve(128);
+ memory_mapping.DumpListOfModules(&modules);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ // Even when global scanning is disabled, we still need to scan
+ // system libraries for stashed pointers
+ if (!flags()->use_globals && modules[i].instrumented()) continue;
+
+ for (const __sanitizer::LoadedModule::AddressRange &range :
+ modules[i].ranges()) {
+ // Sections storing global variables are writable and non-executable
+ if (range.executable || !range.writable) continue;
+
+ for (auto name : kSkippedSecNames) {
+ if (!internal_strcmp(range.name, name)) continue;
+ }
+
+ ScanGlobalRange(range.beg, range.end, frontier);
+ }
+ }
+}
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {
+ vm_address_t address = 0;
+ kern_return_t err = KERN_SUCCESS;
+
+ InternalMmapVector<Region> mapped_regions;
+ bool use_root_regions = flags()->use_root_regions && HasRootRegions();
+
+ RegionScanState scan_state;
+ while (err == KERN_SUCCESS) {
+ vm_size_t size = 0;
+ unsigned depth = 1;
+ struct vm_region_submap_info_64 info;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+ err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
+
+ uptr end_address = address + size;
+ if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
+ // libxpc stashes some pointers in the Kernel Alloc Once page,
+ // make sure not to report those as leaks.
+ scan_state.seen_regions |= SeenRegion::AllocOnce;
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+ } else if (info.user_tag == VM_MEMORY_FOUNDATION) {
+ // Objective-C block trampolines use the Foundation region.
+ scan_state.seen_regions |= SeenRegion::Foundation;
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+ } else if (info.user_tag == VM_MEMORY_LIBDISPATCH) {
+ // Dispatch continuations use the libdispatch region. Empirically, there
+ // can be more than one region with this tag, so we'll optimistically
+ // assume that they're continguous. Otherwise, we would need to scan every
+ // region to ensure we find them all.
+ scan_state.in_libdispatch = true;
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+ } else if (scan_state.in_libdispatch) {
+ scan_state.seen_regions |= SeenRegion::LibDispatch;
+ scan_state.in_libdispatch = false;
+ }
+
+ // Recursing over the full memory map is very slow, break out
+ // early if we don't need the full iteration.
+ if (scan_state.seen_regions == SeenRegion::All && !use_root_regions) {
+ break;
+ }
+
+ // This additional root region scan is required on Darwin in order to
+ // detect root regions contained within mmap'd memory regions, because
+ // the Darwin implementation of sanitizer_procmaps traverses images
+ // as loaded by dyld, and not the complete set of all memory regions.
+ //
+ // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
+ // behavior as sanitizer_procmaps_linux and traverses all memory regions
+ if (use_root_regions && (info.protection & kProtectionRead))
+ mapped_regions.push_back({address, end_address});
+
+ address = end_address;
+ }
+ ScanRootRegions(frontier, mapped_regions);
+}
+
+// On darwin, we can intercept _exit gracefully, and return a failing exit code
+// if required at that point. Calling Die() here is undefined behavior and
+// causes rare race conditions.
+void HandleLeaks() {}
+
+void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
+ CheckForLeaksParam *argument) {
+ ScopedStopTheWorldLock lock;
+ StopTheWorld(callback, argument);
+}
+
+} // namespace __lsan
+
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_flags.inc b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_flags.inc
new file mode 100644
index 000000000000..9350f4bcdc34
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_flags.inc
@@ -0,0 +1,46 @@
+//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// LSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LSAN_FLAG
+# error "Define LSAN_FLAG prior to including this file!"
+#endif
+
+// LSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+LSAN_FLAG(bool, report_objects, false,
+ "Print addresses of leaked objects after main leak report.")
+LSAN_FLAG(
+ int, resolution, 0,
+ "Aggregate two objects into one leak if this many stack frames match. If "
+ "zero, the entire stack trace must match.")
+LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
+
+// Flags controlling the root set of reachable memory.
+LSAN_FLAG(bool, use_globals, true,
+ "Root set: include global variables (.data and .bss)")
+LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
+LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
+LSAN_FLAG(bool, use_tls, true,
+ "Root set: include TLS and thread-specific storage")
+LSAN_FLAG(bool, use_root_regions, true,
+ "Root set: include regions added via __lsan_register_root_region().")
+LSAN_FLAG(bool, use_ld_allocations, true,
+ "Root set: mark as reachable all allocations made from dynamic "
+ "linker. This was the old way to handle dynamic TLS, and will "
+ "be removed soon. Do not use this flag.")
+
+LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
+LSAN_FLAG(bool, use_poisoned, false,
+ "Consider pointers found in poisoned memory to be valid.")
+LSAN_FLAG(bool, log_pointers, false, "Debug logging")
+LSAN_FLAG(bool, log_threads, false, "Debug logging")
+LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp
new file mode 100644
index 000000000000..ba59bc9b71e3
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp
@@ -0,0 +1,131 @@
+//=-- lsan_fuchsia.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL code specific to Fuchsia.
+//
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_FUCHSIA
+#include <zircon/sanitizer.h>
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+
+using namespace __lsan;
+
+namespace __lsan {
+
+void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {}
+
+ThreadContext::ThreadContext(int tid) : ThreadContextLsanBase(tid) {}
+
+struct OnCreatedArgs {
+ uptr stack_begin, stack_end;
+};
+
+// On Fuchsia, the stack bounds of a new thread are available before
+// the thread itself has started running.
+void ThreadContext::OnCreated(void *arg) {
+ // Stack bounds passed through from __sanitizer_before_thread_create_hook
+ // or InitializeMainThread.
+ auto args = reinterpret_cast<const OnCreatedArgs *>(arg);
+ stack_begin_ = args->stack_begin;
+ stack_end_ = args->stack_end;
+}
+
+struct OnStartedArgs {
+ uptr cache_begin, cache_end;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+ ThreadContextLsanBase::OnStarted(arg);
+ auto args = reinterpret_cast<const OnStartedArgs *>(arg);
+ cache_begin_ = args->cache_begin;
+ cache_end_ = args->cache_end;
+}
+
+void ThreadStart(u32 tid) {
+ OnStartedArgs args;
+ GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
+ CHECK_EQ(args.cache_end - args.cache_begin, sizeof(AllocatorCache));
+ ThreadContextLsanBase::ThreadStart(tid, GetTid(), ThreadType::Regular, &args);
+}
+
+void InitializeMainThread() {
+ OnCreatedArgs args;
+ __sanitizer::GetThreadStackTopAndBottom(true, &args.stack_end,
+ &args.stack_begin);
+ u32 tid = ThreadCreate(kMainTid, true, &args);
+ CHECK_EQ(tid, 0);
+ ThreadStart(tid);
+}
+
+void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
+ GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *arg) {
+ auto ctx = static_cast<ThreadContext *>(tctx);
+ static_cast<decltype(caches)>(arg)->push_back(ctx->cache_begin());
+ },
+ caches);
+}
+
+// On Fuchsia, leak detection is done by a special hook after atexit hooks.
+// So this doesn't install any atexit hook like on other platforms.
+void InstallAtExitCheckLeaks() {}
+void InstallAtForkHandler() {}
+
+// ASan defines this to check its `halt_on_error` flag.
+bool UseExitcodeOnLeak() { return true; }
+
+} // namespace __lsan
+
+// These are declared (in extern "C") by <zircon/sanitizer.h>.
+// The system runtime will call our definitions directly.
+
+// This is called before each thread creation is attempted. So, in
+// its first call, the calling thread is the initial and sole thread.
+void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
+ const char *name, void *stack_base,
+ size_t stack_size) {
+ ENSURE_LSAN_INITED;
+ EnsureMainThreadIDIsCorrect();
+ OnCreatedArgs args;
+ args.stack_begin = reinterpret_cast<uptr>(stack_base);
+ args.stack_end = args.stack_begin + stack_size;
+ u32 parent_tid = GetCurrentThreadId();
+ u32 tid = ThreadCreate(parent_tid, detached, &args);
+ return reinterpret_cast<void *>(static_cast<uptr>(tid));
+}
+
+// This is called after creating a new thread (in the creating thread),
+// with the pointer returned by __sanitizer_before_thread_create_hook (above).
+void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
+ u32 tid = static_cast<u32>(reinterpret_cast<uptr>(hook));
+ // On success, there is nothing to do here.
+ if (error != thrd_success) {
+ // Clean up the thread registry for the thread creation that didn't happen.
+ GetLsanThreadRegistryLocked()->FinishThread(tid);
+ }
+}
+
+// This is called in the newly-created thread before it runs anything else,
+// with the pointer returned by __sanitizer_before_thread_create_hook (above).
+void __sanitizer_thread_start_hook(void *hook, thrd_t self) {
+ u32 tid = static_cast<u32>(reinterpret_cast<uptr>(hook));
+ ThreadStart(tid);
+}
+
+// Each thread runs this just before it exits,
+// with the pointer returned by BeforeThreadCreateHook (above).
+// All per-thread destructors have already been called.
+void __sanitizer_thread_exit_hook(void *hook, thrd_t self) { ThreadFinish(); }
+
+#endif // SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.h
new file mode 100644
index 000000000000..e730d8f25f21
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.h
@@ -0,0 +1,35 @@
+//=-- lsan_fuchsia.h ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL code specific to Fuchsia.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LSAN_FUCHSIA_H
+#define LSAN_FUCHSIA_H
+
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if !SANITIZER_FUCHSIA
+#error "lsan_fuchsia.h is used only on Fuchsia systems (SANITIZER_FUCHSIA)"
+#endif
+
+namespace __lsan {
+
+class ThreadContext final : public ThreadContextLsanBase {
+ public:
+ explicit ThreadContext(int tid);
+ void OnCreated(void *arg) override;
+ void OnStarted(void *arg) override;
+};
+
+} // namespace __lsan
+
+#endif // LSAN_FUCHSIA_H
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
new file mode 100644
index 000000000000..efbf2fdfb0ab
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
@@ -0,0 +1,592 @@
+//=-- lsan_interceptors.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Interceptors for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#if SANITIZER_POSIX
+#include "sanitizer_common/sanitizer_posix.h"
+#endif
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "lsan_thread.h"
+
+#include <stddef.h>
+
+using namespace __lsan;
+
+extern "C" {
+int pthread_attr_init(void *attr);
+int pthread_attr_destroy(void *attr);
+int pthread_attr_getdetachstate(void *attr, int *v);
+int pthread_key_create(unsigned *key, void (*destructor)(void* v));
+int pthread_setspecific(unsigned key, const void *v);
+}
+
+struct DlsymAlloc : DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return lsan_init_is_running; }
+ static void OnAllocate(const void *ptr, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ // Suppress leaks from dlerror(). Previously dlsym hack on global array was
+ // used by leak sanitizer as a root region.
+ __lsan_register_root_region(ptr, size);
+#endif
+ }
+ static void OnFree(const void *ptr, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ __lsan_unregister_root_region(ptr, size);
+#endif
+ }
+};
+
+///// Malloc/free interceptors. /////
+
+namespace std {
+ struct nothrow_t;
+ enum class align_val_t: size_t;
+}
+
+#if !SANITIZER_APPLE
+INTERCEPTOR(void*, malloc, uptr size) {
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Allocate(size);
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_malloc(size, stack);
+}
+
+INTERCEPTOR(void, free, void *p) {
+ if (UNLIKELY(!p))
+ return;
+ if (DlsymAlloc::PointerIsMine(p))
+ return DlsymAlloc::Free(p);
+ ENSURE_LSAN_INITED;
+ lsan_free(p);
+}
+
+INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_calloc(nmemb, size, stack);
+}
+
+INTERCEPTOR(void *, realloc, void *ptr, uptr size) {
+ if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Realloc(ptr, size);
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_realloc(ptr, size, stack);
+}
+
+INTERCEPTOR(void*, reallocarray, void *q, uptr nmemb, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_reallocarray(q, nmemb, size, stack);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_posix_memalign(memptr, alignment, size, stack);
+}
+
+INTERCEPTOR(void*, valloc, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_valloc(size, stack);
+}
+#endif // !SANITIZER_APPLE
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_memalign(alignment, size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
+#else
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif // SANITIZER_INTERCEPT_MEMALIGN
+
+#if SANITIZER_INTERCEPT___LIBC_MEMALIGN
+INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ void *res = lsan_memalign(alignment, size, stack);
+ DTLS_on_libc_memalign(res, size);
+ return res;
+}
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
+#else
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
+#endif // SANITIZER_INTERCEPT___LIBC_MEMALIGN
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_aligned_alloc(alignment, size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+ ENSURE_LSAN_INITED;
+ return GetMallocUsableSize(ptr);
+}
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
+ INTERCEPT_FUNCTION(malloc_usable_size)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+struct fake_mallinfo {
+ int x[10];
+};
+
+INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
+ struct fake_mallinfo res;
+ internal_memset(&res, 0, sizeof(res));
+ return res;
+}
+#define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+
+INTERCEPTOR(int, mallopt, int cmd, int value) {
+ return 0;
+}
+#define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLINFO
+#define LSAN_MAYBE_INTERCEPT_MALLOPT
+#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR(void*, pvalloc, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_pvalloc(size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_PVALLOC
+#endif // SANITIZER_INTERCEPT_PVALLOC
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAP(free));
+#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+#else
+#define LSAN_MAYBE_INTERCEPT_CFREE
+#endif // SANITIZER_INTERCEPT_CFREE
+
+#if SANITIZER_INTERCEPT_MCHECK_MPROBE
+INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mprobe, void *ptr) {
+ return 0;
+}
+#endif // SANITIZER_INTERCEPT_MCHECK_MPROBE
+
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow)\
+ ENSURE_LSAN_INITED;\
+ GET_STACK_TRACE_MALLOC;\
+ void *res = lsan_malloc(size, stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res;
+#define OPERATOR_NEW_BODY_ALIGN(nothrow)\
+ ENSURE_LSAN_INITED;\
+ GET_STACK_TRACE_MALLOC;\
+ void *res = lsan_memalign((uptr)align, size, stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res;
+
+#define OPERATOR_DELETE_BODY\
+ ENSURE_LSAN_INITED;\
+ lsan_free(ptr);
+
+// On OS X it's not enough to just provide our own 'operator new' and
+// 'operator delete' implementations, because they're going to be in the runtime
+// dylib, and the main executable will depend on both the runtime dylib and
+// libstdc++, each of has its implementation of new and delete.
+// To make sure that C++ allocation/deallocation operators are overridden on
+// OS X we need to intercept them using their mangled names.
+#if !SANITIZER_APPLE
+
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const &)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+
+#else // SANITIZER_APPLE
+
+INTERCEPTOR(void *, _Znwm, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _Znam, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+
+INTERCEPTOR(void, _ZdlPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+
+#endif // !SANITIZER_APPLE
+
+
+///// Thread initialization and finalization. /////
+
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD && !SANITIZER_FUCHSIA
+static unsigned g_thread_finalize_key;
+
+static void thread_finalize(void *v) {
+ uptr iter = (uptr)v;
+ if (iter > 1) {
+ if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
+ Report("LeakSanitizer: failed to set thread key.\n");
+ Die();
+ }
+ return;
+ }
+ ThreadFinish();
+}
+#endif
+
+#if SANITIZER_NETBSD
+INTERCEPTOR(void, _lwp_exit) {
+ ENSURE_LSAN_INITED;
+ ThreadFinish();
+ REAL(_lwp_exit)();
+}
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT
+#endif
+
+#if SANITIZER_INTERCEPT_THR_EXIT
+INTERCEPTOR(void, thr_exit, tid_t *state) {
+ ENSURE_LSAN_INITED;
+ ThreadFinish();
+ REAL(thr_exit)(state);
+}
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT
+#endif
+
+#if SANITIZER_INTERCEPT___CXA_ATEXIT
+INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
+ void *dso_handle) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ return REAL(__cxa_atexit)(func, arg, dso_handle);
+}
+#define LSAN_MAYBE_INTERCEPT___CXA_ATEXIT INTERCEPT_FUNCTION(__cxa_atexit)
+#else
+#define LSAN_MAYBE_INTERCEPT___CXA_ATEXIT
+#endif
+
+#if SANITIZER_INTERCEPT_ATEXIT
+INTERCEPTOR(int, atexit, void (*f)()) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ return REAL(__cxa_atexit)((void (*)(void *a))f, 0, 0);
+}
+#define LSAN_MAYBE_INTERCEPT_ATEXIT INTERCEPT_FUNCTION(atexit)
+#else
+#define LSAN_MAYBE_INTERCEPT_ATEXIT
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATFORK
+extern "C" {
+extern int _pthread_atfork(void (*prepare)(), void (*parent)(),
+ void (*child)());
+}
+
+INTERCEPTOR(int, pthread_atfork, void (*prepare)(), void (*parent)(),
+ void (*child)()) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ // REAL(pthread_atfork) cannot be called due to symbol indirections at least
+ // on NetBSD
+ return _pthread_atfork(prepare, parent, child);
+}
+#define LSAN_MAYBE_INTERCEPT_PTHREAD_ATFORK INTERCEPT_FUNCTION(pthread_atfork)
+#else
+#define LSAN_MAYBE_INTERCEPT_PTHREAD_ATFORK
+#endif
+
+#if SANITIZER_INTERCEPT_STRERROR
+INTERCEPTOR(char *, strerror, int errnum) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ return REAL(strerror)(errnum);
+}
+#define LSAN_MAYBE_INTERCEPT_STRERROR INTERCEPT_FUNCTION(strerror)
+#else
+#define LSAN_MAYBE_INTERCEPT_STRERROR
+#endif
+
+#if SANITIZER_POSIX
+
+template <bool Detached>
+static void *ThreadStartFunc(void *arg) {
+ u32 parent_tid = (uptr)arg;
+ uptr tid = ThreadCreate(parent_tid, Detached);
+ // Wait until the last iteration to maximize the chance that we are the last
+ // destructor to run.
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ if (pthread_setspecific(g_thread_finalize_key,
+ (void*)GetPthreadDestructorIterations())) {
+ Report("LeakSanitizer: failed to set thread key.\n");
+ Die();
+ }
+# endif
+ ThreadStart(tid, GetTid());
+ auto self = GetThreadSelf();
+ auto args = GetThreadArgRetval().GetArgs(self);
+ void *retval = (*args.routine)(args.arg_retval);
+ GetThreadArgRetval().Finish(self, retval);
+ return retval;
+}
+
+INTERCEPTOR(int, pthread_create, void *th, void *attr,
+ void *(*callback)(void *), void *param) {
+ ENSURE_LSAN_INITED;
+ EnsureMainThreadIDIsCorrect();
+
+ bool detached = [attr]() {
+ int d = 0;
+ return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d);
+ }();
+
+ __sanitizer_pthread_attr_t myattr;
+ if (!attr) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+ AdjustStackSize(attr);
+ uptr this_tid = GetCurrentThreadId();
+ int result;
+ {
+ // Ignore all allocations made by pthread_create: thread stack/TLS may be
+ // stored by pthread for future reuse even after thread destruction, and
+ // the linked list it's stored in doesn't even hold valid pointers to the
+ // objects, the latter are calculated by obscure pointer arithmetic.
+ ScopedInterceptorDisabler disabler;
+ GetThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr {
+ result = REAL(pthread_create)(
+ th, attr, detached ? ThreadStartFunc<true> : ThreadStartFunc<false>,
+ (void *)this_tid);
+ return result ? 0 : *(uptr *)(th);
+ });
+ }
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ return result;
+}
+
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ int result;
+ GetThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_join)(thread, retval);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_detach, void *thread) {
+ int result;
+ GetThreadArgRetval().Detach((uptr)thread, [&]() {
+ result = REAL(pthread_detach)(thread);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(void, pthread_exit, void *retval) {
+ GetThreadArgRetval().Finish(GetThreadSelf(), retval);
+ REAL(pthread_exit)(retval);
+}
+
+# if SANITIZER_INTERCEPT_TRYJOIN
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
+ int result;
+ GetThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_tryjoin_np)(thread, ret);
+ return !result;
+ });
+ return result;
+}
+# define LSAN_MAYBE_INTERCEPT_TRYJOIN INTERCEPT_FUNCTION(pthread_tryjoin_np)
+# else
+# define LSAN_MAYBE_INTERCEPT_TRYJOIN
+# endif // SANITIZER_INTERCEPT_TRYJOIN
+
+# if SANITIZER_INTERCEPT_TIMEDJOIN
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
+ const struct timespec *abstime) {
+ int result;
+ GetThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
+ return !result;
+ });
+ return result;
+}
+# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN \
+ INTERCEPT_FUNCTION(pthread_timedjoin_np)
+# else
+# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN
+# endif // SANITIZER_INTERCEPT_TIMEDJOIN
+
+DEFINE_INTERNAL_PTHREAD_FUNCTIONS
+
+INTERCEPTOR(void, _exit, int status) {
+ if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
+ REAL(_exit)(status);
+}
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_LSAN_INITED
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+#endif // SANITIZER_POSIX
+
+namespace __lsan {
+
+void InitializeInterceptors() {
+ // Fuchsia doesn't use interceptors that require any setup.
+#if !SANITIZER_FUCHSIA
+ __interception::DoesNotSupportStaticLinking();
+ InitializeSignalInterceptors();
+
+ INTERCEPT_FUNCTION(malloc);
+ INTERCEPT_FUNCTION(free);
+ LSAN_MAYBE_INTERCEPT_CFREE;
+ INTERCEPT_FUNCTION(calloc);
+ INTERCEPT_FUNCTION(realloc);
+ LSAN_MAYBE_INTERCEPT_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
+ INTERCEPT_FUNCTION(posix_memalign);
+ INTERCEPT_FUNCTION(valloc);
+ LSAN_MAYBE_INTERCEPT_PVALLOC;
+ LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
+ LSAN_MAYBE_INTERCEPT_MALLINFO;
+ LSAN_MAYBE_INTERCEPT_MALLOPT;
+ INTERCEPT_FUNCTION(pthread_create);
+ INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(pthread_detach);
+ INTERCEPT_FUNCTION(pthread_exit);
+ LSAN_MAYBE_INTERCEPT_TIMEDJOIN;
+ LSAN_MAYBE_INTERCEPT_TRYJOIN;
+ INTERCEPT_FUNCTION(_exit);
+
+ LSAN_MAYBE_INTERCEPT__LWP_EXIT;
+ LSAN_MAYBE_INTERCEPT_THR_EXIT;
+
+ LSAN_MAYBE_INTERCEPT___CXA_ATEXIT;
+ LSAN_MAYBE_INTERCEPT_ATEXIT;
+ LSAN_MAYBE_INTERCEPT_PTHREAD_ATFORK;
+
+ LSAN_MAYBE_INTERCEPT_STRERROR;
+
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
+ Report("LeakSanitizer: failed to create thread key.\n");
+ Die();
+ }
+#endif
+
+#endif // !SANITIZER_FUCHSIA
+}
+
+} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp
new file mode 100644
index 000000000000..5074cee1296a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp
@@ -0,0 +1,33 @@
+//=-- lsan_linux.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer. Linux/NetBSD/Fuchsia-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_NETBSD || SANITIZER_FUCHSIA
+
+# include "lsan_allocator.h"
+# include "lsan_thread.h"
+
+namespace __lsan {
+
+static THREADLOCAL ThreadContextLsanBase *current_thread = nullptr;
+ThreadContextLsanBase *GetCurrentThread() { return current_thread; }
+void SetCurrentThread(ThreadContextLsanBase *tctx) { current_thread = tctx; }
+
+static THREADLOCAL AllocatorCache allocator_cache;
+AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
+
+void ReplaceSystemMalloc() {}
+
+} // namespace __lsan
+
+#endif // SANITIZER_LINUX || SANITIZER_NETBSD || SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp
new file mode 100644
index 000000000000..990954a8b687
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp
@@ -0,0 +1,190 @@
+//===-- lsan_mac.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer, a memory leak checker.
+//
+// Mac-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_APPLE
+
+#include "interception/interception.h"
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+#include <pthread.h>
+
+namespace __lsan {
+// Support for the following functions from libdispatch on Mac OS:
+// dispatch_async_f()
+// dispatch_async()
+// dispatch_sync_f()
+// dispatch_sync()
+// dispatch_after_f()
+// dispatch_after()
+// dispatch_group_async_f()
+// dispatch_group_async()
+// TODO(glider): libdispatch API contains other functions that we don't support
+// yet.
+//
+// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
+// they can cause jobs to run on a thread different from the current one.
+// TODO(glider): if so, we need a test for this (otherwise we should remove
+// them).
+//
+// The following functions use dispatch_barrier_async_f() (which isn't a library
+// function but is exported) and are thus supported:
+// dispatch_source_set_cancel_handler_f()
+// dispatch_source_set_cancel_handler()
+// dispatch_source_set_event_handler_f()
+// dispatch_source_set_event_handler()
+//
+// The reference manual for Grand Central Dispatch is available at
+// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
+// The implementation details are at
+// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
+
+typedef void *dispatch_group_t;
+typedef void *dispatch_queue_t;
+typedef void *dispatch_source_t;
+typedef u64 dispatch_time_t;
+typedef void (*dispatch_function_t)(void *block);
+typedef void *(*worker_t)(void *block);
+
+// A wrapper for the ObjC blocks used to support libdispatch.
+typedef struct {
+ void *block;
+ dispatch_function_t func;
+ u32 parent_tid;
+} lsan_block_context_t;
+
+ALWAYS_INLINE
+void lsan_register_worker_thread(int parent_tid) {
+ if (GetCurrentThreadId() == kInvalidTid) {
+ u32 tid = ThreadCreate(parent_tid, true);
+ ThreadStart(tid, GetTid());
+ }
+}
+
+// For use by only those functions that allocated the context via
+// alloc_lsan_context().
+extern "C" void lsan_dispatch_call_block_and_release(void *block) {
+ lsan_block_context_t *context = (lsan_block_context_t *)block;
+ VReport(2,
+ "lsan_dispatch_call_block_and_release(): "
+ "context: %p, pthread_self: %p\n",
+ block, (void*)pthread_self());
+ lsan_register_worker_thread(context->parent_tid);
+ // Call the original dispatcher for the block.
+ context->func(context->block);
+ lsan_free(context);
+}
+
+} // namespace __lsan
+
+using namespace __lsan;
+
+// Wrap |ctxt| and |func| into an lsan_block_context_t.
+// The caller retains control of the allocated context.
+extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_THREAD;
+ lsan_block_context_t *lsan_ctxt =
+ (lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
+ lsan_ctxt->block = ctxt;
+ lsan_ctxt->func = func;
+ lsan_ctxt->parent_tid = GetCurrentThreadId();
+ return lsan_ctxt;
+}
+
+// Define interceptor for dispatch_*_f function with the three most common
+// parameters: dispatch_queue_t, context, dispatch_function_t.
+#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
+ INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
+ dispatch_function_t func) { \
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \
+ return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \
+ lsan_dispatch_call_block_and_release); \
+ }
+
+INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
+
+INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq,
+ void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt,
+ lsan_dispatch_call_block_and_release);
+}
+
+INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t dq, void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ REAL(dispatch_group_async_f)
+ (group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release);
+}
+
+#if !defined(MISSING_BLOCKS_SUPPORT)
+extern "C" {
+void dispatch_async(dispatch_queue_t dq, void (^work)(void));
+void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
+ void (^work)(void));
+void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void));
+void dispatch_source_set_cancel_handler(dispatch_source_t ds,
+ void (^work)(void));
+void dispatch_source_set_event_handler(dispatch_source_t ds,
+ void (^work)(void));
+}
+
+# define GET_LSAN_BLOCK(work) \
+ void (^lsan_block)(void); \
+ int parent_tid = GetCurrentThreadId(); \
+ lsan_block = ^(void) { \
+ lsan_register_worker_thread(parent_tid); \
+ work(); \
+ }
+
+INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_async)(dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg,
+ dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_group_async)(dg, dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_after)(when, queue, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ if (!work) {
+ REAL(dispatch_source_set_cancel_handler)(ds, work);
+ return;
+ }
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_cancel_handler)(ds, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_event_handler)(ds, lsan_block);
+}
+#endif
+
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_malloc_mac.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_malloc_mac.cpp
new file mode 100644
index 000000000000..525c30272ccc
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_malloc_mac.cpp
@@ -0,0 +1,59 @@
+//===-- lsan_malloc_mac.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer (LSan), a memory leak detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_APPLE
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+using namespace __lsan;
+#define COMMON_MALLOC_ZONE_NAME "lsan"
+#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
+#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_memalign(alignment, size, stack)
+#define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_malloc(size, stack)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_realloc(ptr, size, stack)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_calloc(count, size, stack)
+#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ int res = lsan_posix_memalign(memptr, alignment, size, stack)
+#define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_valloc(size, stack)
+#define COMMON_MALLOC_FREE(ptr) \
+ lsan_free(ptr)
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = lsan_mz_size(ptr)
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __lsan
+#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp
new file mode 100644
index 000000000000..422c29acca69
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp
@@ -0,0 +1,128 @@
+//=-- lsan_posix.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL code common to POSIX-like systems.
+//
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_POSIX
+# include <pthread.h>
+
+# include "lsan.h"
+# include "lsan_allocator.h"
+# include "lsan_thread.h"
+# include "sanitizer_common/sanitizer_stacktrace.h"
+# include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+namespace __lsan {
+
+ThreadContext::ThreadContext(int tid) : ThreadContextLsanBase(tid) {}
+
+struct OnStartedArgs {
+ uptr stack_begin;
+ uptr stack_end;
+ uptr cache_begin;
+ uptr cache_end;
+ uptr tls_begin;
+ uptr tls_end;
+ DTLS *dtls;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+ ThreadContextLsanBase::OnStarted(arg);
+ auto args = reinterpret_cast<const OnStartedArgs *>(arg);
+ stack_begin_ = args->stack_begin;
+ stack_end_ = args->stack_end;
+ tls_begin_ = args->tls_begin;
+ tls_end_ = args->tls_end;
+ cache_begin_ = args->cache_begin;
+ cache_end_ = args->cache_end;
+ dtls_ = args->dtls;
+}
+
+void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) {
+ OnStartedArgs args;
+ uptr stack_size = 0;
+ uptr tls_size = 0;
+ GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &stack_size,
+ &args.tls_begin, &tls_size);
+ args.stack_end = args.stack_begin + stack_size;
+ args.tls_end = args.tls_begin + tls_size;
+ GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
+ args.dtls = DTLS_Get();
+ ThreadContextLsanBase::ThreadStart(tid, os_id, thread_type, &args);
+}
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
+ ThreadContext *context = static_cast<ThreadContext *>(
+ GetLsanThreadRegistryLocked()->FindThreadContextByOsIDLocked(os_id));
+ if (!context)
+ return false;
+ *stack_begin = context->stack_begin();
+ *stack_end = context->stack_end();
+ *tls_begin = context->tls_begin();
+ *tls_end = context->tls_end();
+ *cache_begin = context->cache_begin();
+ *cache_end = context->cache_end();
+ *dtls = context->dtls();
+ return true;
+}
+
+void InitializeMainThread() {
+ u32 tid = ThreadCreate(kMainTid, true);
+ CHECK_EQ(tid, kMainTid);
+ ThreadStart(tid, GetTid());
+}
+
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetCurrentThreadId(), &OnStackUnwind,
+ nullptr);
+}
+
+void InstallAtExitCheckLeaks() {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
+ Atexit(DoLeakCheck);
+}
+
+static void BeforeFork() {
+ LockGlobal();
+ LockThreads();
+ LockAllocator();
+ StackDepotLockBeforeFork();
+}
+
+static void AfterFork(bool fork_child) {
+ StackDepotUnlockAfterFork(fork_child);
+ UnlockAllocator();
+ UnlockThreads();
+ UnlockGlobal();
+}
+
+void InstallAtForkHandler() {
+# if SANITIZER_SOLARIS || SANITIZER_NETBSD || SANITIZER_APPLE
+ return; // FIXME: Implement FutexWait.
+# endif
+ pthread_atfork(
+ &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
+ []() { AfterFork(/* fork_child= */ true); });
+}
+
+} // namespace __lsan
+
+#endif // SANITIZER_POSIX
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.h
new file mode 100644
index 000000000000..b1265f233f36
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.h
@@ -0,0 +1,49 @@
+//=-- lsan_posix.h -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL code common to POSIX-like systems.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LSAN_POSIX_H
+#define LSAN_POSIX_H
+
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if !SANITIZER_POSIX
+#error "lsan_posix.h is used only on POSIX-like systems (SANITIZER_POSIX)"
+#endif
+
+namespace __sanitizer {
+struct DTLS;
+}
+
+namespace __lsan {
+
+class ThreadContext final : public ThreadContextLsanBase {
+ public:
+ explicit ThreadContext(int tid);
+ void OnStarted(void *arg) override;
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
+ DTLS *dtls() { return dtls_; }
+
+ private:
+ uptr tls_begin_ = 0;
+ uptr tls_end_ = 0;
+ DTLS *dtls_ = nullptr;
+};
+
+void ThreadStart(u32 tid, tid_t os_id,
+ ThreadType thread_type = ThreadType::Regular);
+
+} // namespace __lsan
+
+#endif // LSAN_POSIX_H
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_preinit.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_preinit.cpp
new file mode 100644
index 000000000000..30e90f728649
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_preinit.cpp
@@ -0,0 +1,21 @@
+//===-- lsan_preinit.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+//
+// Call __lsan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+
+#include "lsan.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+// This section is linked into the main executable when -fsanitize=leak is
+// specified to perform initialization at a very early stage.
+__attribute__((section(".preinit_array"), used)) static auto preinit =
+ __lsan_init;
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
new file mode 100644
index 000000000000..07c7b923623f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
@@ -0,0 +1,116 @@
+//=-- lsan_thread.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// See lsan_thread.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_thread.h"
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+namespace __lsan {
+
+static ThreadRegistry *thread_registry;
+static ThreadArgRetval *thread_arg_retval;
+
+static Mutex mu_for_thread_context;
+static LowLevelAllocator allocator_for_thread_context;
+
+static ThreadContextBase *CreateThreadContext(u32 tid) {
+ Lock lock(&mu_for_thread_context);
+ return new (allocator_for_thread_context) ThreadContext(tid);
+}
+
+void InitializeThreads() {
+ alignas(alignof(ThreadRegistry)) static char
+ thread_registry_placeholder[sizeof(ThreadRegistry)];
+ thread_registry =
+ new (thread_registry_placeholder) ThreadRegistry(CreateThreadContext);
+
+ alignas(alignof(ThreadArgRetval)) static char
+ thread_arg_retval_placeholder[sizeof(ThreadArgRetval)];
+ thread_arg_retval = new (thread_arg_retval_placeholder) ThreadArgRetval();
+}
+
+ThreadArgRetval &GetThreadArgRetval() { return *thread_arg_retval; }
+
+ThreadContextLsanBase::ThreadContextLsanBase(int tid)
+ : ThreadContextBase(tid) {}
+
+void ThreadContextLsanBase::OnStarted(void *arg) {
+ SetCurrentThread(this);
+ AllocatorThreadStart();
+}
+
+void ThreadContextLsanBase::OnFinished() {
+ AllocatorThreadFinish();
+ DTLS_Destroy();
+ SetCurrentThread(nullptr);
+}
+
+u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) {
+ return thread_registry->CreateThread(0, detached, parent_tid, arg);
+}
+
+void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id,
+ ThreadType thread_type, void *arg) {
+ thread_registry->StartThread(tid, os_id, thread_type, arg);
+}
+
+void ThreadFinish() { thread_registry->FinishThread(GetCurrentThreadId()); }
+
+void EnsureMainThreadIDIsCorrect() {
+ if (GetCurrentThreadId() == kMainTid)
+ GetCurrentThread()->os_id = GetTid();
+}
+
+///// Interface to the common LSan module. /////
+
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {}
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
+
+void LockThreads() {
+ thread_registry->Lock();
+ thread_arg_retval->Lock();
+}
+
+void UnlockThreads() {
+ thread_arg_retval->Unlock();
+ thread_registry->Unlock();
+}
+
+ThreadRegistry *GetLsanThreadRegistryLocked() {
+ thread_registry->CheckLocked();
+ return thread_registry;
+}
+
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+ GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *threads) {
+ if (tctx->status == ThreadStatusRunning) {
+ reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+ tctx->os_id);
+ }
+ },
+ threads);
+}
+
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ GetThreadArgRetval().GetAllPtrsLocked(ptrs);
+}
+
+} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
new file mode 100644
index 000000000000..222066ee93cd
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
@@ -0,0 +1,66 @@
+//=-- lsan_thread.h -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Thread registry for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_THREAD_H
+#define LSAN_THREAD_H
+
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+namespace __lsan {
+
+class ThreadContextLsanBase : public ThreadContextBase {
+ public:
+ explicit ThreadContextLsanBase(int tid);
+ void OnStarted(void *arg) override;
+ void OnFinished() override;
+ uptr stack_begin() { return stack_begin_; }
+ uptr stack_end() { return stack_end_; }
+ uptr cache_begin() { return cache_begin_; }
+ uptr cache_end() { return cache_end_; }
+
+ // The argument is passed on to the subclass's OnStarted member function.
+ static void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type,
+ void *onstarted_arg);
+
+ protected:
+ ~ThreadContextLsanBase() {}
+ uptr stack_begin_ = 0;
+ uptr stack_end_ = 0;
+ uptr cache_begin_ = 0;
+ uptr cache_end_ = 0;
+};
+
+// This subclass of ThreadContextLsanBase is declared in an OS-specific header.
+class ThreadContext;
+
+void InitializeThreads();
+void InitializeMainThread();
+
+ThreadRegistry *GetLsanThreadRegistryLocked();
+ThreadArgRetval &GetThreadArgRetval();
+
+u32 ThreadCreate(u32 tid, bool detached, void *arg = nullptr);
+void ThreadFinish();
+
+ThreadContextLsanBase *GetCurrentThread();
+inline u32 GetCurrentThreadId() {
+ ThreadContextLsanBase *ctx = GetCurrentThread();
+ return ctx ? ctx->tid : kInvalidTid;
+}
+void SetCurrentThread(ThreadContextLsanBase *tctx);
+void EnsureMainThreadIDIsCorrect();
+
+} // namespace __lsan
+
+#endif // LSAN_THREAD_H
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/weak_symbols.txt b/contrib/llvm-project/compiler-rt/lib/lsan/weak_symbols.txt
new file mode 100644
index 000000000000..692255679bee
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/weak_symbols.txt
@@ -0,0 +1,3 @@
+___lsan_default_options
+___lsan_default_suppressions
+___lsan_is_turned_off