aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/compiler-rt/lib/hwasan
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/compiler-rt/lib/hwasan')
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp766
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h235
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.syms.extra2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp191
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp696
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h137
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h194
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp148
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.h27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp70
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.h31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc93
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp241
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp93
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_ignorelist.txt7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp549
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h252
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp596
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_mapping.h75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp74
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp162
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h1001
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.cpp36
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_preinit.cpp21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp1138
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.h35
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S102
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S92
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S79
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S160
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_riscv64.S132
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp223
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h123
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp29
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h229
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_type_test.cpp25
41 files changed, 8258 insertions, 0 deletions
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
new file mode 100644
index 000000000000..ccdc0b4bc21b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
@@ -0,0 +1,766 @@
+//===-- hwasan.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// HWAddressSanitizer runtime.
+//===----------------------------------------------------------------------===//
+
+#include "hwasan.h"
+
+#include "hwasan_checks.h"
+#include "hwasan_dynamic_shadow.h"
+#include "hwasan_globals.h"
+#include "hwasan_mapping.h"
+#include "hwasan_poisoning.h"
+#include "hwasan_report.h"
+#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "ubsan/ubsan_flags.h"
+#include "ubsan/ubsan_init.h"
+
+// ACHTUNG! No system header includes in this file.
+
+using namespace __sanitizer;
+
+namespace __hwasan {
+
+static Flags hwasan_flags;
+
+Flags *flags() {
+ return &hwasan_flags;
+}
+
+int hwasan_inited = 0;
+int hwasan_instrumentation_inited = 0;
+bool hwasan_init_is_running;
+
+int hwasan_report_count = 0;
+
+uptr kLowShadowStart;
+uptr kLowShadowEnd;
+uptr kHighShadowStart;
+uptr kHighShadowEnd;
+
+void Flags::SetDefaults() {
+#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "hwasan_flags.inc"
+#undef HWASAN_FLAG
+}
+
+static void RegisterHwasanFlags(FlagParser *parser, Flags *f) {
+#define HWASAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "hwasan_flags.inc"
+#undef HWASAN_FLAG
+}
+
+static void InitializeFlags() {
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("HWASAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = 20;
+ cf.handle_ioctl = true;
+ // FIXME: test and enable.
+ cf.check_printf = false;
+ cf.intercept_tls_get_addr = true;
+ cf.exitcode = 99;
+ // 8 shadow pages ~512kB, small enough to cover common stack sizes.
+ cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
+ // Sigtrap is used in error reporting.
+ cf.handle_sigtrap = kHandleSignalExclusive;
+ // For now only tested on Linux and Fuchsia. Other plantforms can be turned
+ // on as they become ready.
+ constexpr bool can_detect_leaks =
+ (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA;
+ cf.detect_leaks = cf.detect_leaks && can_detect_leaks;
+
+#if SANITIZER_ANDROID
+ // Let platform handle other signals. It is better at reporting them then we
+ // are.
+ cf.handle_segv = kHandleSignalNo;
+ cf.handle_sigbus = kHandleSignalNo;
+ cf.handle_abort = kHandleSignalNo;
+ cf.handle_sigill = kHandleSignalNo;
+ cf.handle_sigfpe = kHandleSignalNo;
+#endif
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterHwasanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+#if CAN_SANITIZE_LEAKS
+ __lsan::Flags *lf = __lsan::flags();
+ lf->SetDefaults();
+
+ FlagParser lsan_parser;
+ __lsan::RegisterLsanFlags(&lsan_parser, lf);
+ RegisterCommonFlags(&lsan_parser);
+#endif
+
+#if HWASAN_CONTAINS_UBSAN
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+#endif
+
+ // Override from user-specified string.
+ if (__hwasan_default_options)
+ parser.ParseString(__hwasan_default_options());
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseString(__lsan_default_options());
+#endif
+#if HWASAN_CONTAINS_UBSAN
+ const char *ubsan_default_options = __ubsan_default_options();
+ ubsan_parser.ParseString(ubsan_default_options);
+#endif
+
+ parser.ParseStringFromEnv("HWASAN_OPTIONS");
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseStringFromEnv("LSAN_OPTIONS");
+#endif
+#if HWASAN_CONTAINS_UBSAN
+ ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
+#endif
+
+ InitializeCommonFlags();
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+ // Flag validation:
+ if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ Report("%s: detect_leaks is not supported on this platform.\n",
+ SanitizerToolName);
+ Die();
+ }
+}
+
+static void CheckUnwind() {
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ stack.Print();
+}
+
+static void HwasanFormatMemoryUsage(InternalScopedString &s) {
+ HwasanThreadList &thread_list = hwasanThreadList();
+ auto thread_stats = thread_list.GetThreadStats();
+ auto sds = StackDepotGetStats();
+ AllocatorStatCounters asc;
+ GetAllocatorStats(asc);
+ s.AppendF(
+ "HWASAN pid: %d rss: %zd threads: %zd stacks: %zd"
+ " thr_aux: %zd stack_depot: %zd uniq_stacks: %zd"
+ " heap: %zd",
+ internal_getpid(), GetRSS(), thread_stats.n_live_threads,
+ thread_stats.total_stack_size,
+ thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(),
+ sds.allocated, sds.n_uniq_ids, asc[AllocatorStatMapped]);
+}
+
+#if SANITIZER_ANDROID
+static constexpr uptr kMemoryUsageBufferSize = 4096;
+
+static char *memory_usage_buffer = nullptr;
+
+static void InitMemoryUsage() {
+ memory_usage_buffer =
+ (char *)MmapOrDie(kMemoryUsageBufferSize, "memory usage string");
+ CHECK(memory_usage_buffer);
+ memory_usage_buffer[0] = '\0';
+ DecorateMapping((uptr)memory_usage_buffer, kMemoryUsageBufferSize,
+ memory_usage_buffer);
+}
+
+void UpdateMemoryUsage() {
+ if (!flags()->export_memory_stats)
+ return;
+ if (!memory_usage_buffer)
+ InitMemoryUsage();
+ InternalScopedString s;
+ HwasanFormatMemoryUsage(s);
+ internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1);
+ memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0';
+}
+#else
+void UpdateMemoryUsage() {}
+#endif
+
+void HwasanAtExit() {
+ if (common_flags()->print_module_map)
+ DumpProcessMap();
+ if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
+ ReportStats();
+ if (hwasan_report_count > 0) {
+ // ReportAtExitStatistics();
+ if (common_flags()->exitcode)
+ internal__exit(common_flags()->exitcode);
+ }
+}
+
+void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
+ uptr *registers_frame) {
+ InternalMmapVector<BufferedStackTrace> stack_buffer(1);
+ BufferedStackTrace *stack = stack_buffer.data();
+ stack->Reset();
+ stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal);
+
+ // The second stack frame contains the failure __hwasan_check function, as
+ // we have a stack frame for the registers saved in __hwasan_tag_mismatch that
+ // we wish to ignore. This (currently) only occurs on AArch64, as x64
+ // implementations use SIGTRAP to implement the failure, and thus do not go
+ // through the stack saver.
+ if (registers_frame && stack->trace && stack->size > 0) {
+ stack->trace++;
+ stack->size--;
+ }
+
+ bool fatal = flags()->halt_on_error || !ai.recover;
+ ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal,
+ registers_frame);
+}
+
+void HwasanTagMismatch(uptr addr, uptr pc, uptr frame, uptr access_info,
+ uptr *registers_frame, size_t outsize) {
+ __hwasan::AccessInfo ai;
+ ai.is_store = access_info & 0x10;
+ ai.is_load = !ai.is_store;
+ ai.recover = access_info & 0x20;
+ ai.addr = addr;
+ if ((access_info & 0xf) == 0xf)
+ ai.size = outsize;
+ else
+ ai.size = 1 << (access_info & 0xf);
+
+ HandleTagMismatch(ai, pc, frame, nullptr, registers_frame);
+}
+
+Thread *GetCurrentThread() {
+ uptr *ThreadLongPtr = GetCurrentThreadLongPtr();
+ if (UNLIKELY(*ThreadLongPtr == 0))
+ return nullptr;
+ auto *R = (StackAllocationsRingBuffer *)ThreadLongPtr;
+ return hwasanThreadList().GetThreadByBufferAddress((uptr)R->Next());
+}
+
+} // namespace __hwasan
+
+using namespace __hwasan;
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ Thread *t = GetCurrentThread();
+ if (!t) {
+ // The thread is still being created, or has already been destroyed.
+ size = 0;
+ return;
+ }
+ Unwind(max_depth, pc, bp, context, t->stack_top(), t->stack_bottom(),
+ request_fast);
+}
+
+static bool InitializeSingleGlobal(const hwasan_global &global) {
+ uptr full_granule_size = RoundDownTo(global.size(), 16);
+ TagMemoryAligned(global.addr(), full_granule_size, global.tag());
+ if (global.size() % 16)
+ TagMemoryAligned(global.addr() + full_granule_size, 16, global.size() % 16);
+ return false;
+}
+
+static void InitLoadedGlobals() {
+ // Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
+ // the startup path which calls into __hwasan_library_loaded on all
+ // initially loaded modules, so explicitly registering the globals here
+ // isn't needed.
+ if constexpr (!SANITIZER_FUCHSIA) {
+ dl_iterate_phdr(
+ [](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
+ for (const hwasan_global &global : HwasanGlobalsFor(
+ info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
+ InitializeSingleGlobal(global);
+ return 0;
+ },
+ nullptr);
+ }
+}
+
+// Prepare to run instrumented code on the main thread.
+static void InitInstrumentation() {
+ if (hwasan_instrumentation_inited) return;
+
+ InitializeOsSupport();
+
+ if (!InitShadow()) {
+ Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n");
+ DumpProcessMap();
+ Die();
+ }
+
+ InitThreads();
+
+ hwasan_instrumentation_inited = 1;
+}
+
+// Interface.
+
+uptr __hwasan_shadow_memory_dynamic_address; // Global interface symbol.
+
+// This function was used by the old frame descriptor mechanism. We keep it
+// around to avoid breaking ABI.
+void __hwasan_init_frames(uptr beg, uptr end) {}
+
+void __hwasan_init_static() {
+ InitShadowGOT();
+ InitInstrumentation();
+
+ // In the non-static code path we call dl_iterate_phdr here. But at this point
+ // libc might not have been initialized enough for dl_iterate_phdr to work.
+ // Fortunately, since this is a statically linked executable we can use the
+ // linker-defined symbol __ehdr_start to find the only relevant set of phdrs.
+ extern ElfW(Ehdr) __ehdr_start;
+ for (const hwasan_global &global : HwasanGlobalsFor(
+ /* base */ 0,
+ reinterpret_cast<const ElfW(Phdr) *>(
+ reinterpret_cast<const char *>(&__ehdr_start) +
+ __ehdr_start.e_phoff),
+ __ehdr_start.e_phnum))
+ InitializeSingleGlobal(global);
+}
+
+__attribute__((constructor(0))) void __hwasan_init() {
+ CHECK(!hwasan_init_is_running);
+ if (hwasan_inited) return;
+ hwasan_init_is_running = 1;
+ SanitizerToolName = "HWAddressSanitizer";
+
+ InitTlsSize();
+
+ CacheBinaryName();
+ InitializeFlags();
+
+ // Install tool-specific callbacks in sanitizer_common.
+ SetCheckUnwindCallback(CheckUnwind);
+
+ __sanitizer_set_report_path(common_flags()->log_path);
+
+ AndroidTestTlsSlot();
+
+ DisableCoreDumperIfNecessary();
+
+ InitInstrumentation();
+ InitLoadedGlobals();
+
+ // Needs to be called here because flags()->random_tags might not have been
+ // initialized when InitInstrumentation() was called.
+ GetCurrentThread()->EnsureRandomStateInited();
+
+ SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
+ // This may call libc -> needs initialized shadow.
+ AndroidLogInit();
+
+ InitializeInterceptors();
+ InstallDeadlySignalHandlers(HwasanOnDeadlySignal);
+ InstallAtExitHandler(); // Needs __cxa_atexit interceptor.
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ HwasanTSDInit();
+ HwasanTSDThreadInit();
+
+ HwasanAllocatorInit();
+ HwasanInstallAtForkHandler();
+
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::InitCommonLsan();
+ InstallAtExitCheckLeaks();
+ }
+
+#if HWASAN_CONTAINS_UBSAN
+ __ubsan::InitAsPlugin();
+#endif
+
+ if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ Symbolizer::LateInitialize();
+ }
+
+ VPrintf(1, "HWAddressSanitizer init done\n");
+
+ hwasan_init_is_running = 0;
+ hwasan_inited = 1;
+}
+
+void __hwasan_library_loaded(ElfW(Addr) base, const ElfW(Phdr) * phdr,
+ ElfW(Half) phnum) {
+ for (const hwasan_global &global : HwasanGlobalsFor(base, phdr, phnum))
+ InitializeSingleGlobal(global);
+}
+
+void __hwasan_library_unloaded(ElfW(Addr) base, const ElfW(Phdr) * phdr,
+ ElfW(Half) phnum) {
+ for (; phnum != 0; ++phdr, --phnum)
+ if (phdr->p_type == PT_LOAD)
+ TagMemory(base + phdr->p_vaddr, phdr->p_memsz, 0);
+}
+
+void __hwasan_print_shadow(const void *p, uptr sz) {
+ uptr ptr_raw = UntagAddr(reinterpret_cast<uptr>(p));
+ uptr shadow_first = MemToShadow(ptr_raw);
+ uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
+ Printf("HWASan shadow map for %zx .. %zx (pointer tag %x)\n", ptr_raw,
+ ptr_raw + sz, GetTagFromPointer((uptr)p));
+ for (uptr s = shadow_first; s <= shadow_last; ++s) {
+ tag_t mem_tag = *reinterpret_cast<tag_t *>(s);
+ uptr granule_addr = ShadowToMem(s);
+ if (mem_tag && mem_tag < kShadowAlignment)
+ Printf(" %zx: %02x(%02x)\n", granule_addr, mem_tag,
+ *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1));
+ else
+ Printf(" %zx: %02x\n", granule_addr, mem_tag);
+ }
+}
+
+sptr __hwasan_test_shadow(const void *p, uptr sz) {
+ if (sz == 0)
+ return -1;
+ uptr ptr = reinterpret_cast<uptr>(p);
+ tag_t ptr_tag = GetTagFromPointer(ptr);
+ uptr ptr_raw = UntagAddr(ptr);
+ uptr shadow_first = MemToShadow(ptr_raw);
+ uptr shadow_last = MemToShadow(ptr_raw + sz);
+ for (uptr s = shadow_first; s < shadow_last; ++s) {
+ if (UNLIKELY(*(tag_t *)s != ptr_tag)) {
+ uptr short_size =
+ ShortTagSize(*(tag_t *)s, AddTagToPointer(ShadowToMem(s), ptr_tag));
+ sptr offset = ShadowToMem(s) - ptr_raw + short_size;
+ return offset < 0 ? 0 : offset;
+ }
+ }
+
+ uptr end = ptr + sz;
+ uptr tail_sz = end & (kShadowAlignment - 1);
+ if (!tail_sz)
+ return -1;
+
+ uptr short_size =
+ ShortTagSize(*(tag_t *)shadow_last, end & ~(kShadowAlignment - 1));
+ if (LIKELY(tail_sz <= short_size))
+ return -1;
+
+ sptr offset = sz - tail_sz + short_size;
+ return offset < 0 ? 0 : offset;
+}
+
+u16 __sanitizer_unaligned_load16(const uu16 *p) {
+ return *p;
+}
+u32 __sanitizer_unaligned_load32(const uu32 *p) {
+ return *p;
+}
+u64 __sanitizer_unaligned_load64(const uu64 *p) {
+ return *p;
+}
+void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
+ *p = x;
+}
+void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
+ *p = x;
+}
+void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
+ *p = x;
+}
+
+void __hwasan_loadN(uptr p, uptr sz) {
+ CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
+}
+void __hwasan_load1(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 0>(p);
+}
+void __hwasan_load2(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 1>(p);
+}
+void __hwasan_load4(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 2>(p);
+}
+void __hwasan_load8(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 3>(p);
+}
+void __hwasan_load16(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p);
+}
+
+void __hwasan_loadN_noabort(uptr p, uptr sz) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
+}
+
+void __hwasan_loadN_match_all(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p);
+}
+
+void __hwasan_loadN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
+}
+
+void __hwasan_storeN(uptr p, uptr sz) {
+ CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
+}
+void __hwasan_store1(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 0>(p);
+}
+void __hwasan_store2(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 1>(p);
+}
+void __hwasan_store4(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 2>(p);
+}
+void __hwasan_store8(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 3>(p);
+}
+void __hwasan_store16(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p);
+}
+
+void __hwasan_storeN_noabort(uptr p, uptr sz) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
+}
+
+void __hwasan_storeN_match_all(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p);
+}
+
+void __hwasan_storeN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
+}
+
+void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
+ TagMemoryAligned(UntagAddr(p), sz, tag);
+}
+
+uptr __hwasan_tag_pointer(uptr p, u8 tag) {
+ return AddTagToPointer(p, tag);
+}
+
+u8 __hwasan_get_tag_from_pointer(uptr p) { return GetTagFromPointer(p); }
+
+void __hwasan_handle_longjmp(const void *sp_dst) {
+ uptr dst = (uptr)sp_dst;
+ // HWASan does not support tagged SP.
+ CHECK_EQ(GetTagFromPointer(dst), 0);
+
+ uptr sp = (uptr)__builtin_frame_address(0);
+ static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
+ if (dst < sp || dst - sp > kMaxExpectedCleanupSize) {
+ Report(
+ "WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: "
+ "stack top: %p; target %p; distance: %p (%zd)\n"
+ "False positive error reports may follow\n",
+ (void *)sp, (void *)dst, dst - sp, dst - sp);
+ return;
+ }
+ TagMemory(sp, dst - sp, 0);
+}
+
+void __hwasan_handle_vfork(const void *sp_dst) {
+ uptr sp = (uptr)sp_dst;
+ Thread *t = GetCurrentThread();
+ CHECK(t);
+ uptr top = t->stack_top();
+ uptr bottom = t->stack_bottom();
+ if (top == 0 || bottom == 0 || sp < bottom || sp >= top) {
+ Report(
+ "WARNING: HWASan is ignoring requested __hwasan_handle_vfork: "
+ "stack top: %zx; current %zx; bottom: %zx \n"
+ "False positive error reports may follow\n",
+ top, sp, bottom);
+ return;
+ }
+ TagMemory(bottom, sp - bottom, 0);
+}
+
+extern "C" void *__hwasan_extra_spill_area() {
+ Thread *t = GetCurrentThread();
+ return &t->vfork_spill();
+}
+
+void __hwasan_print_memory_usage() {
+ InternalScopedString s;
+ HwasanFormatMemoryUsage(s);
+ Printf("%s\n", s.data());
+}
+
+static const u8 kFallbackTag = 0xBB & kTagMask;
+
+u8 __hwasan_generate_tag() {
+ Thread *t = GetCurrentThread();
+ if (!t) return kFallbackTag;
+ return t->GenerateRandomTag();
+}
+
+void __hwasan_add_frame_record(u64 frame_record_info) {
+ Thread *t = GetCurrentThread();
+ if (t)
+ t->stack_allocations()->push(frame_record_info);
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __hwasan_default_options() { return ""; }
+} // extern "C"
+#endif
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ stack.Print();
+}
+
+// Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the
+// rest of the mismatch handling code (C++).
+void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
+ size_t outsize) {
+ __hwasan::HwasanTagMismatch(addr, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), access_info,
+ registers_frame, outsize);
+}
+
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
new file mode 100644
index 000000000000..df21375e8167
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
@@ -0,0 +1,235 @@
+//===-- hwasan.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Private Hwasan header.
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_H
+#define HWASAN_H
+
+#include "hwasan_flags.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_mapping.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "ubsan/ubsan_platform.h"
+
+#ifndef HWASAN_CONTAINS_UBSAN
+# define HWASAN_CONTAINS_UBSAN CAN_SANITIZE_UB
+#endif
+
+#ifndef HWASAN_WITH_INTERCEPTORS
+#define HWASAN_WITH_INTERCEPTORS 0
+#endif
+
+#ifndef HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
+#define HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE HWASAN_WITH_INTERCEPTORS
+#endif
+
+typedef u8 tag_t;
+
+#if defined(HWASAN_ALIASING_MODE)
+# if !defined(__x86_64__)
+# error Aliasing mode is only supported on x86_64
+# endif
+// Tags are done in middle bits using userspace aliasing.
+constexpr unsigned kAddressTagShift = 39;
+constexpr unsigned kTagBits = 3;
+
+// The alias region is placed next to the shadow so the upper bits of all
+// taggable addresses matches the upper bits of the shadow base. This shift
+// value determines which upper bits must match. It has a floor of 44 since the
+// shadow is always 8TB.
+// TODO(morehouse): In alias mode we can shrink the shadow and use a
+// simpler/faster shadow calculation.
+constexpr unsigned kTaggableRegionCheckShift =
+ __sanitizer::Max(kAddressTagShift + kTagBits + 1U, 44U);
+#elif defined(__x86_64__)
+// Tags are done in upper bits using Intel LAM.
+constexpr unsigned kAddressTagShift = 57;
+constexpr unsigned kTagBits = 6;
+#else
+// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
+// translation and can be used to store a tag.
+constexpr unsigned kAddressTagShift = 56;
+constexpr unsigned kTagBits = 8;
+#endif // defined(HWASAN_ALIASING_MODE)
+
+// Mask for extracting tag bits from the lower 8 bits.
+constexpr uptr kTagMask = (1UL << kTagBits) - 1;
+
+// Mask for extracting tag bits from full pointers.
+constexpr uptr kAddressTagMask = kTagMask << kAddressTagShift;
+
+// Minimal alignment of the shadow base address. Determines the space available
+// for threads and stack histories. This is an ABI constant.
+const unsigned kShadowBaseAlignment = 32;
+
+const unsigned kRecordAddrBaseTagShift = 3;
+const unsigned kRecordFPShift = 48;
+const unsigned kRecordFPLShift = 4;
+const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift);
+
+static inline bool InTaggableRegion(uptr addr) {
+#if defined(HWASAN_ALIASING_MODE)
+ // Aliases are mapped next to shadow so that the upper bits match the shadow
+ // base.
+ return (addr >> kTaggableRegionCheckShift) ==
+ (__hwasan::GetShadowOffset() >> kTaggableRegionCheckShift);
+#endif
+ return true;
+}
+
+static inline tag_t GetTagFromPointer(uptr p) {
+ return InTaggableRegion(p) ? ((p >> kAddressTagShift) & kTagMask) : 0;
+}
+
+static inline uptr UntagAddr(uptr tagged_addr) {
+ return InTaggableRegion(tagged_addr) ? (tagged_addr & ~kAddressTagMask)
+ : tagged_addr;
+}
+
+static inline void *UntagPtr(const void *tagged_ptr) {
+ return reinterpret_cast<void *>(
+ UntagAddr(reinterpret_cast<uptr>(tagged_ptr)));
+}
+
+static inline uptr AddTagToPointer(uptr p, tag_t tag) {
+ return InTaggableRegion(p) ? ((p & ~kAddressTagMask) |
+ ((uptr)(tag & kTagMask) << kAddressTagShift))
+ : p;
+}
+
+namespace __hwasan {
+
+extern int hwasan_inited;
+extern bool hwasan_init_is_running;
+extern int hwasan_report_count;
+
+bool InitShadow();
+void InitializeOsSupport();
+void InitThreads();
+void InitializeInterceptors();
+
+void HwasanAllocatorInit();
+void HwasanAllocatorLock();
+void HwasanAllocatorUnlock();
+
+void *hwasan_malloc(uptr size, StackTrace *stack);
+void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack);
+void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack);
+void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack);
+void *hwasan_valloc(uptr size, StackTrace *stack);
+void *hwasan_pvalloc(uptr size, StackTrace *stack);
+void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
+void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack);
+int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ StackTrace *stack);
+void hwasan_free(void *ptr, StackTrace *stack);
+
+void InstallAtExitHandler();
+
+#define GET_MALLOC_STACK_TRACE \
+ BufferedStackTrace stack; \
+ if (hwasan_inited) \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
+ nullptr, common_flags()->fast_unwind_on_malloc, \
+ common_flags()->malloc_context_size)
+
+#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
+ BufferedStackTrace stack; \
+ if (hwasan_inited) \
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
+
+void HwasanTSDInit();
+void HwasanTSDThreadInit();
+void HwasanAtExit();
+
+void HwasanOnDeadlySignal(int signo, void *info, void *context);
+
+void HwasanInstallAtForkHandler();
+
+void InstallAtExitCheckLeaks();
+
+void UpdateMemoryUsage();
+
+void AppendToErrorMessageBuffer(const char *buffer);
+
+void AndroidTestTlsSlot();
+
+// This is a compiler-generated struct that can be shared between hwasan
+// implementations.
+struct AccessInfo {
+ uptr addr;
+ uptr size;
+ bool is_store;
+ bool is_load;
+ bool recover;
+};
+
+// Given access info and frame information, unwind the stack and report the tag
+// mismatch.
+void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
+ uptr *registers_frame = nullptr);
+
+// This dispatches to HandleTagMismatch but sets up the AccessInfo, program
+// counter, and frame pointer.
+void HwasanTagMismatch(uptr addr, uptr pc, uptr frame, uptr access_info,
+ uptr *registers_frame, size_t outsize);
+
+} // namespace __hwasan
+
+#if HWASAN_WITH_INTERCEPTORS
+// For both bionic and glibc __sigset_t is an unsigned long.
+typedef unsigned long __hw_sigset_t;
+// Setjmp and longjmp implementations are platform specific, and hence the
+// interception code is platform specific too.
+# if defined(__aarch64__)
+constexpr size_t kHwRegisterBufSize = 22;
+# elif defined(__x86_64__)
+constexpr size_t kHwRegisterBufSize = 8;
+# elif SANITIZER_RISCV64
+// saving PC, 12 int regs, sp, 12 fp regs
+# ifndef __riscv_float_abi_soft
+constexpr size_t kHwRegisterBufSize = 1 + 12 + 1 + 12;
+# else
+constexpr size_t kHwRegisterBufSize = 1 + 12 + 1;
+# endif
+# endif
+typedef unsigned long long __hw_register_buf[kHwRegisterBufSize];
+struct __hw_jmp_buf_struct {
+ // NOTE: The machine-dependent definition of `__sigsetjmp'
+ // assume that a `__hw_jmp_buf' begins with a `__hw_register_buf' and that
+ // `__mask_was_saved' follows it. Do not move these members or add others
+ // before it.
+ //
+ // We add a __magic field to our struct to catch cases where libc's setjmp
+ // populated the jmp_buf instead of our interceptor.
+ __hw_register_buf __jmpbuf; // Calling environment.
+ unsigned __mask_was_saved : 1; // Saved the signal mask?
+ unsigned __magic : 31; // Used to distinguish __hw_jmp_buf from jmp_buf.
+ __hw_sigset_t __saved_mask; // Saved signal mask.
+};
+typedef struct __hw_jmp_buf_struct __hw_jmp_buf[1];
+typedef struct __hw_jmp_buf_struct __hw_sigjmp_buf[1];
+constexpr unsigned kHwJmpBufMagic = 0x248ACE77;
+#endif // HWASAN_WITH_INTERCEPTORS
+
+#define ENSURE_HWASAN_INITED() \
+ do { \
+ CHECK(!hwasan_init_is_running); \
+ if (!hwasan_inited) { \
+ __hwasan_init(); \
+ } \
+ } while (0)
+
+#endif // HWASAN_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.syms.extra b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.syms.extra
new file mode 100644
index 000000000000..8738627e06e8
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.syms.extra
@@ -0,0 +1,2 @@
+__hwasan_*
+__ubsan_*
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
new file mode 100644
index 000000000000..9af09e2a4bed
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
@@ -0,0 +1,191 @@
+//===-- hwasan_allocation_functions.cpp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Definitions for __sanitizer allocation functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "hwasan.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_mallinfo.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+using namespace __hwasan;
+
+struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return !hwasan_inited; }
+ static void OnAllocate(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ // Suppress leaks from dlerror(). Previously dlsym hack on global array was
+ // used by leak sanitizer as a root region.
+ __lsan_register_root_region(ptr, size);
+# endif
+ }
+ static void OnFree(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ __lsan_unregister_root_region(ptr, size);
+# endif
+ }
+};
+
+extern "C" {
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ CHECK_NE(memptr, 0);
+ int res = hwasan_posix_memalign(memptr, alignment, size, &stack);
+ return res;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_memalign(uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_memalign(alignment, size, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_aligned_alloc(uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_aligned_alloc(alignment, size, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer___libc_memalign(uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ void *ptr = hwasan_memalign(alignment, size, &stack);
+ if (ptr)
+ DTLS_on_libc_memalign(ptr, size);
+ return ptr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_valloc(uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_valloc(size, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_pvalloc(uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_pvalloc(size, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_free(void *ptr) {
+ if (!ptr)
+ return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
+ GET_MALLOC_STACK_TRACE;
+ hwasan_free(ptr, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_cfree(void *ptr) {
+ if (!ptr)
+ return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
+ GET_MALLOC_STACK_TRACE;
+ hwasan_free(ptr, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_malloc_usable_size(const void *ptr) {
+ return __sanitizer_get_allocated_size(ptr);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
+ __sanitizer_struct_mallinfo sret;
+ internal_memset(&sret, 0, sizeof(sret));
+ return sret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_mallopt(int cmd, int value) { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_malloc_stats(void) {
+ // FIXME: implement, but don't call REAL(malloc_stats)!
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_calloc(uptr nmemb, uptr size) {
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_calloc(nmemb, size, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_realloc(void *ptr, uptr size) {
+ if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Realloc(ptr, size);
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_realloc(ptr, size, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_reallocarray(ptr, nmemb, size, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_malloc(uptr size) {
+ if (UNLIKELY(!hwasan_init_is_running))
+ ENSURE_HWASAN_INITED();
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Allocate(size);
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_malloc(size, &stack);
+}
+
+} // extern "C"
+
+#if HWASAN_WITH_INTERCEPTORS || SANITIZER_FUCHSIA
+#if SANITIZER_FUCHSIA
+// Fuchsia does not use WRAP/wrappers used for the interceptor infrastructure.
+# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
+ ARGS) ALIAS(__sanitizer_##FN)
+#else
+# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
+ ALIAS(__sanitizer_##FN); \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
+ ARGS) ALIAS(__sanitizer_##FN)
+#endif
+
+INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
+ SIZE_T size);
+INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size);
+INTERCEPTOR_ALIAS(void, free, void *ptr);
+INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr);
+INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
+
+# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
+INTERCEPTOR_ALIAS(void, cfree, void *ptr);
+INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo,);
+INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
+INTERCEPTOR_ALIAS(void, malloc_stats, void);
+# endif
+#endif // #if HWASAN_WITH_INTERCEPTORS
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
new file mode 100644
index 000000000000..75dbb336e344
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -0,0 +1,696 @@
+//===-- hwasan_allocator.cpp ------------------------ ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// HWAddressSanitizer allocator.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_checks.h"
+#include "hwasan_mapping.h"
+#include "hwasan_malloc_bisect.h"
+#include "hwasan_thread.h"
+#include "hwasan_report.h"
+#include "lsan/lsan_common.h"
+
+namespace __hwasan {
+
+static Allocator allocator;
+static AllocatorCache fallback_allocator_cache;
+static SpinMutex fallback_mutex;
+static atomic_uint8_t hwasan_allocator_tagging_enabled;
+
+static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
+static constexpr tag_t kFallbackFreeTag = 0xBC;
+
+enum {
+ // Either just allocated by underlying allocator, but AsanChunk is not yet
+ // ready, or almost returned to undelying allocator and AsanChunk is already
+ // meaningless.
+ CHUNK_INVALID = 0,
+ // The chunk is allocated and not yet freed.
+ CHUNK_ALLOCATED = 1,
+};
+
+
+// Initialized in HwasanAllocatorInit, an never changed.
+alignas(16) static u8 tail_magic[kShadowAlignment - 1];
+static uptr max_malloc_size;
+
+bool HwasanChunkView::IsAllocated() const {
+ return metadata_ && metadata_->IsAllocated();
+}
+
+uptr HwasanChunkView::Beg() const {
+ return block_;
+}
+uptr HwasanChunkView::End() const {
+ return Beg() + UsedSize();
+}
+uptr HwasanChunkView::UsedSize() const {
+ return metadata_->GetRequestedSize();
+}
+u32 HwasanChunkView::GetAllocStackId() const {
+ return metadata_->GetAllocStackId();
+}
+
+u32 HwasanChunkView::GetAllocThreadId() const {
+ return metadata_->GetAllocThreadId();
+}
+
+uptr HwasanChunkView::ActualSize() const {
+ return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
+}
+
+bool HwasanChunkView::FromSmallHeap() const {
+ return allocator.FromPrimary(reinterpret_cast<void *>(block_));
+}
+
+bool HwasanChunkView::AddrIsInside(uptr addr) const {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize());
+}
+
+inline void Metadata::SetAllocated(u32 stack, u64 size) {
+ Thread *t = GetCurrentThread();
+ u64 context = t ? t->unique_id() : kMainTid;
+ context <<= 32;
+ context += stack;
+ requested_size_low = size & ((1ul << 32) - 1);
+ requested_size_high = size >> 32;
+ atomic_store(&alloc_context_id, context, memory_order_relaxed);
+ atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
+}
+
+inline void Metadata::SetUnallocated() {
+ atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
+ requested_size_low = 0;
+ requested_size_high = 0;
+ atomic_store(&alloc_context_id, 0, memory_order_relaxed);
+}
+
+inline bool Metadata::IsAllocated() const {
+ return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
+}
+
+inline u64 Metadata::GetRequestedSize() const {
+ return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
+}
+
+inline u32 Metadata::GetAllocStackId() const {
+ return atomic_load(&alloc_context_id, memory_order_relaxed);
+}
+
+inline u32 Metadata::GetAllocThreadId() const {
+ u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
+ u32 tid = context >> 32;
+ return tid;
+}
+
+void GetAllocatorStats(AllocatorStatCounters s) {
+ allocator.GetStats(s);
+}
+
+inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
+ lsan_tag = tag;
+}
+
+inline __lsan::ChunkTag Metadata::GetLsanTag() const {
+ return static_cast<__lsan::ChunkTag>(lsan_tag);
+}
+
+uptr GetAliasRegionStart() {
+#if defined(HWASAN_ALIASING_MODE)
+ constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
+ uptr AliasRegionStart =
+ __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
+
+ CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
+ __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+ CHECK_EQ(
+ (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
+ __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+ return AliasRegionStart;
+#else
+ return 0;
+#endif
+}
+
+void HwasanAllocatorInit() {
+ atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
+ !flags()->disable_allocator_tagging);
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms,
+ GetAliasRegionStart());
+ for (uptr i = 0; i < sizeof(tail_magic); i++)
+ tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
+ if (common_flags()->max_allocation_size_mb) {
+ max_malloc_size = common_flags()->max_allocation_size_mb << 20;
+ max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
+ } else {
+ max_malloc_size = kMaxAllowedMallocSize;
+ }
+}
+
+void HwasanAllocatorLock() { allocator.ForceLock(); }
+
+void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
+
+void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
+
+void AllocatorThreadFinish(AllocatorCache *cache) {
+ allocator.SwallowCache(cache);
+ allocator.DestroyCache(cache);
+}
+
+static uptr TaggedSize(uptr size) {
+ if (!size) size = 1;
+ uptr new_size = RoundUpTo(size, kShadowAlignment);
+ CHECK_GE(new_size, size);
+ return new_size;
+}
+
+static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
+ bool zeroise) {
+ // Keep this consistent with LSAN and ASAN behavior.
+ if (UNLIKELY(orig_size == 0))
+ orig_size = 1;
+ if (UNLIKELY(orig_size > max_malloc_size)) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
+ orig_size);
+ return nullptr;
+ }
+ ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
+ }
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(stack);
+ }
+
+ alignment = Max(alignment, kShadowAlignment);
+ uptr size = TaggedSize(orig_size);
+ Thread *t = GetCurrentThread();
+ void *allocated;
+ if (t) {
+ allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated = allocator.Allocate(cache, size, alignment);
+ }
+ if (UNLIKELY(!allocated)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, stack);
+ }
+ if (zeroise) {
+ // The secondary allocator mmaps memory, which should be zero-inited so we
+ // don't need to explicitly clear it.
+ if (allocator.FromPrimary(allocated))
+ internal_memset(allocated, 0, size);
+ } else if (flags()->max_malloc_fill_size > 0) {
+ uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
+ internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
+ }
+ if (size != orig_size) {
+ u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
+ uptr tail_length = size - orig_size;
+ internal_memcpy(tail, tail_magic, tail_length - 1);
+ // Short granule is excluded from magic tail, so we explicitly untag.
+ tail[tail_length - 1] = 0;
+ }
+
+ void *user_ptr = allocated;
+ if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
+ flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
+ tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
+ uptr tag_size = orig_size ? orig_size : 1;
+ uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
+ user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
+ if (full_granule_size != tag_size) {
+ u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
+ TagMemoryAligned((uptr)short_granule, kShadowAlignment,
+ tag_size % kShadowAlignment);
+ short_granule[kShadowAlignment - 1] = tag;
+ }
+ } else {
+ // Tagging can not be completely skipped. If it's disabled, we need to tag
+ // with zeros.
+ user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
+ }
+
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
+#if CAN_SANITIZE_LEAKS
+ meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
+ : __lsan::kDirectlyLeaked);
+#endif
+ meta->SetAllocated(StackDepotPut(*stack), orig_size);
+ RunMallocHooks(user_ptr, orig_size);
+ return user_ptr;
+}
+
+static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
+ CHECK(tagged_ptr);
+ uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
+ if (!InTaggableRegion(tagged_uptr))
+ return true;
+ tag_t mem_tag = *reinterpret_cast<tag_t *>(
+ MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
+ return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
+}
+
+static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
+ void *tagged_ptr) {
+ // This function can return true if halt_on_error is false.
+ if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
+ !PointerAndMemoryTagsMatch(tagged_ptr)) {
+ ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+ return true;
+ }
+ return false;
+}
+
+static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
+ CHECK(tagged_ptr);
+ void *untagged_ptr = UntagPtr(tagged_ptr);
+
+ if (RunFreeHooks(tagged_ptr))
+ return;
+
+ if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
+ return;
+
+ void *aligned_ptr = reinterpret_cast<void *>(
+ RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
+ tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
+ if (!meta) {
+ ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+ return;
+ }
+
+ uptr orig_size = meta->GetRequestedSize();
+ u32 free_context_id = StackDepotPut(*stack);
+ u32 alloc_context_id = meta->GetAllocStackId();
+ u32 alloc_thread_id = meta->GetAllocThreadId();
+
+ bool in_taggable_region =
+ InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
+
+ // Check tail magic.
+ uptr tagged_size = TaggedSize(orig_size);
+ if (flags()->free_checks_tail_magic && orig_size &&
+ tagged_size != orig_size) {
+ uptr tail_size = tagged_size - orig_size - 1;
+ CHECK_LT(tail_size, kShadowAlignment);
+ void *tail_beg = reinterpret_cast<void *>(
+ reinterpret_cast<uptr>(aligned_ptr) + orig_size);
+ tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
+ reinterpret_cast<uptr>(tail_beg) + tail_size));
+ if (tail_size &&
+ (internal_memcmp(tail_beg, tail_magic, tail_size) ||
+ (in_taggable_region && pointer_tag != short_granule_memtag)))
+ ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
+ orig_size, tail_magic);
+ }
+
+ // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
+ meta->SetUnallocated();
+ // This memory will not be reused by anyone else, so we are free to keep it
+ // poisoned.
+ Thread *t = GetCurrentThread();
+ if (flags()->max_free_fill_size > 0) {
+ uptr fill_size =
+ Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
+ internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
+ }
+ if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
+ allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {
+ // Always store full 8-bit tags on free to maximize UAF detection.
+ tag_t tag;
+ if (t) {
+ // Make sure we are not using a short granule tag as a poison tag. This
+ // would make us attempt to read the memory on a UaF.
+ // The tag can be zero if tagging is disabled on this thread.
+ do {
+ tag = t->GenerateRandomTag(/*num_bits=*/8);
+ } while (
+ UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
+ } else {
+ static_assert(kFallbackFreeTag >= kShadowAlignment,
+ "fallback tag must not be a short granule tag.");
+ tag = kFallbackFreeTag;
+ }
+ TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
+ tag);
+ }
+ if (t) {
+ allocator.Deallocate(t->allocator_cache(), aligned_ptr);
+ if (auto *ha = t->heap_allocations())
+ ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
+ alloc_context_id, free_context_id,
+ static_cast<u32>(orig_size)});
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocator.Deallocate(cache, aligned_ptr);
+ }
+}
+
+static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
+ uptr new_size, uptr alignment) {
+ void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
+ if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
+ return nullptr;
+ void *tagged_ptr_new =
+ HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
+ if (tagged_ptr_old && tagged_ptr_new) {
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
+ void *untagged_ptr_new = UntagPtr(tagged_ptr_new);
+ internal_memcpy(untagged_ptr_new, untagged_ptr_old,
+ Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
+ HwasanDeallocate(stack, tagged_ptr_old);
+ }
+ return tagged_ptr_new;
+}
+
+static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, stack);
+ }
+ return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
+}
+
+HwasanChunkView FindHeapChunkByAddress(uptr address) {
+ if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
+ return HwasanChunkView();
+ void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
+ if (!block)
+ return HwasanChunkView();
+ Metadata *metadata =
+ reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
+ return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
+}
+
+static const void *AllocationBegin(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ if (!untagged_ptr)
+ return nullptr;
+
+ const void *beg = allocator.GetBlockBegin(untagged_ptr);
+ if (!beg)
+ return nullptr;
+
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (b->GetRequestedSize() == 0)
+ return nullptr;
+
+ tag_t tag = GetTagFromPointer((uptr)p);
+ return (const void *)AddTagToPointer((uptr)beg, tag);
+}
+
+static uptr AllocationSize(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ if (!untagged_ptr) return 0;
+ const void *beg = allocator.GetBlockBegin(untagged_ptr);
+ if (!beg)
+ return 0;
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ return b->GetRequestedSize();
+}
+
+static uptr AllocationSizeFast(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ void *aligned_ptr = reinterpret_cast<void *>(
+ RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
+ return meta->GetRequestedSize();
+}
+
+void *hwasan_malloc(uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
+}
+
+void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
+}
+
+void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
+ if (!ptr)
+ return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
+ if (size == 0) {
+ HwasanDeallocate(stack, ptr);
+ return nullptr;
+ }
+ return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
+}
+
+void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportReallocArrayOverflow(nmemb, size, stack);
+ }
+ return hwasan_realloc(ptr, nmemb * size, stack);
+}
+
+void *hwasan_valloc(uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(
+ HwasanAllocate(stack, size, GetPageSizeCached(), false));
+}
+
+void *hwasan_pvalloc(uptr size, StackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
+}
+
+void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, stack);
+ }
+ return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
+}
+
+void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, stack);
+ }
+ return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
+}
+
+int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ StackTrace *stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, stack);
+ }
+ void *ptr = HwasanAllocate(stack, size, alignment, false);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by HwasanAllocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+void hwasan_free(void *ptr, StackTrace *stack) {
+ return HwasanDeallocate(stack, ptr);
+}
+
+} // namespace __hwasan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+
+void LockAllocator() {
+ __hwasan::HwasanAllocatorLock();
+}
+
+void UnlockAllocator() {
+ __hwasan::HwasanAllocatorUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&__hwasan::allocator;
+ *end = *begin + sizeof(__hwasan::allocator);
+}
+
+uptr PointsIntoChunk(void *p) {
+ p = UntagPtr(p);
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk =
+ reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
+ if (!chunk)
+ return 0;
+ __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
+ __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
+ if (!metadata || !metadata->IsAllocated())
+ return 0;
+ if (addr < chunk + metadata->GetRequestedSize())
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ CHECK_EQ(UntagAddr(chunk), chunk);
+ void *block = __hwasan::allocator.GetBlockBeginFastLocked(
+ reinterpret_cast<void *>(chunk));
+ if (!block)
+ return 0;
+ __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
+ __hwasan::allocator.GetMetaData(block));
+ if (!metadata || !metadata->IsAllocated())
+ return 0;
+
+ return reinterpret_cast<uptr>(block);
+}
+
+uptr GetUserAddr(uptr chunk) {
+ if (!InTaggableRegion(chunk))
+ return chunk;
+ tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
+ return AddTagToPointer(chunk, mem_tag);
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ CHECK_EQ(UntagAddr(chunk), chunk);
+ metadata_ =
+ chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
+ : nullptr;
+}
+
+bool LsanMetadata::allocated() const {
+ if (!metadata_)
+ return false;
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->IsAllocated();
+}
+
+ChunkTag LsanMetadata::tag() const {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->GetLsanTag();
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ m->SetLsanTag(value);
+}
+
+uptr LsanMetadata::requested_size() const {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->GetRequestedSize();
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->GetAllocStackId();
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ __hwasan::allocator.ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObject(const void *p) {
+ p = UntagPtr(p);
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
+ if (!chunk)
+ return kIgnoreObjectInvalid;
+ __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
+ __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
+ if (!metadata || !metadata->IsAllocated())
+ return kIgnoreObjectInvalid;
+ if (addr >= chunk + metadata->GetRequestedSize())
+ return kIgnoreObjectInvalid;
+ if (metadata->GetLsanTag() == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+
+ metadata->SetLsanTag(kIgnored);
+ return kIgnoreObjectSuccess;
+}
+
+} // namespace __lsan
+
+using namespace __hwasan;
+
+void __hwasan_enable_allocator_tagging() {
+ atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
+}
+
+void __hwasan_disable_allocator_tagging() {
+ atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
+}
+
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() { return 1; }
+
+uptr __sanitizer_get_unmapped_bytes() { return 1; }
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = AllocationSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
new file mode 100644
index 000000000000..2ada2a0b1851
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
@@ -0,0 +1,137 @@
+//===-- hwasan_allocator.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_ALLOCATOR_H
+#define HWASAN_ALLOCATOR_H
+
+#include "hwasan.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_mapping.h"
+#include "hwasan_poisoning.h"
+#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_ring_buffer.h"
+
+#if !defined(__aarch64__) && !defined(__x86_64__) && !(SANITIZER_RISCV64)
+# error Unsupported platform
+#endif
+
+namespace __hwasan {
+
+struct Metadata {
+ private:
+ atomic_uint64_t alloc_context_id;
+ u32 requested_size_low;
+ u16 requested_size_high;
+ atomic_uint8_t chunk_state;
+ u8 lsan_tag;
+
+ public:
+ inline void SetAllocated(u32 stack, u64 size);
+ inline void SetUnallocated();
+
+ inline bool IsAllocated() const;
+ inline u64 GetRequestedSize() const;
+ inline u32 GetAllocStackId() const;
+ inline u32 GetAllocThreadId() const;
+ inline void SetLsanTag(__lsan::ChunkTag tag);
+ inline __lsan::ChunkTag GetLsanTag() const;
+};
+static_assert(sizeof(Metadata) == 16);
+
+struct HwasanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ UpdateMemoryUsage();
+ }
+ void OnUnmap(uptr p, uptr size) const {
+ // We are about to unmap a chunk of user memory.
+ // It can return as user-requested mmap() or another thread stack.
+ // Make it accessible with zero-tagged pointer.
+ TagMemory(p, size, 0);
+ }
+};
+
+static const uptr kMaxAllowedMallocSize = 1UL << 40; // 1T
+
+struct AP64 {
+ static const uptr kSpaceBeg = ~0ULL;
+
+#if defined(HWASAN_ALIASING_MODE)
+ static const uptr kSpaceSize = 1ULL << kAddressTagShift;
+ typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
+#elif SANITIZER_LINUX && !SANITIZER_ANDROID
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
+#else
+ static const uptr kSpaceSize = 0x2000000000ULL; // 128G.
+ typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
+#endif
+
+ static const uptr kMetadataSize = sizeof(Metadata);
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef HwasanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator> Allocator;
+typedef Allocator::AllocatorCache AllocatorCache;
+
+void AllocatorThreadStart(AllocatorCache *cache);
+void AllocatorThreadFinish(AllocatorCache *cache);
+
+class HwasanChunkView {
+ public:
+ HwasanChunkView() : block_(0), metadata_(nullptr) {}
+ HwasanChunkView(uptr block, Metadata *metadata)
+ : block_(block), metadata_(metadata) {}
+ bool IsAllocated() const; // Checks if the memory is currently allocated
+ uptr Beg() const; // First byte of user memory
+ uptr End() const; // Last byte of user memory
+ uptr UsedSize() const; // Size requested by the user
+ uptr ActualSize() const; // Size allocated by the allocator.
+ u32 GetAllocStackId() const;
+ u32 GetAllocThreadId() const;
+ bool FromSmallHeap() const;
+ bool AddrIsInside(uptr addr) const;
+
+ private:
+ friend class __lsan::LsanMetadata;
+ uptr block_;
+ Metadata *const metadata_;
+};
+
+HwasanChunkView FindHeapChunkByAddress(uptr address);
+
+// Information about one (de)allocation that happened in the past.
+// These are recorded in a thread-local ring buffer.
+struct HeapAllocationRecord {
+ uptr tagged_addr;
+ u32 alloc_thread_id;
+ u32 alloc_context_id;
+ u32 free_context_id;
+ u32 requested_size;
+};
+
+typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
+
+void GetAllocatorStats(AllocatorStatCounters s);
+
+} // namespace __hwasan
+
+#endif // HWASAN_ALLOCATOR_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h
new file mode 100644
index 000000000000..735d21a5db77
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h
@@ -0,0 +1,194 @@
+//===-- hwasan_checks.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_CHECKS_H
+#define HWASAN_CHECKS_H
+
+#include "hwasan_allocator.h"
+#include "hwasan_mapping.h"
+#include "hwasan_registers.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __hwasan {
+
+enum class ErrorAction { Abort, Recover };
+enum class AccessType { Load, Store };
+
+// Used when the access size is known.
+constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT,
+ unsigned LogSize) {
+ return 0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + LogSize;
+}
+
+// Used when the access size varies at runtime.
+constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT) {
+ return SigTrapEncoding(EA, AT, 0xf);
+}
+
+template <ErrorAction EA, AccessType AT, size_t LogSize>
+__attribute__((always_inline)) static void SigTrap(uptr p) {
+ // Other platforms like linux can use signals for intercepting an exception
+ // and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
+ // use signals so we can call it here directly instead.
+#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
+ auto regs = GetRegisters();
+ size_t size = 2 << LogSize;
+ AccessInfo access_info = {
+ .addr = p,
+ .size = size,
+ .is_store = AT == AccessType::Store,
+ .is_load = AT == AccessType::Load,
+ .recover = EA == ErrorAction::Recover,
+ };
+ HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
+#elif defined(__aarch64__)
+ (void)p;
+ // 0x900 is added to do not interfere with the kernel use of lower values of
+ // brk immediate.
+ register uptr x0 asm("x0") = p;
+ asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + SigTrapEncoding(EA, AT, LogSize)));
+#elif defined(__x86_64__)
+ // INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
+ // total. The pointer is passed via rdi.
+ // 0x40 is added as a safeguard, to help distinguish our trap from others and
+ // to avoid 0 offsets in the command (otherwise it'll be reduced to a
+ // different nop command, the three bytes one).
+ asm volatile(
+ "int3\n"
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT, LogSize)),
+ "D"(p));
+#elif SANITIZER_RISCV64
+ // Put pointer into x10
+ // addiw contains immediate of 0x40 + X, where 0x40 is magic number and X
+ // encodes access size
+ register uptr x10 asm("x10") = p;
+ asm volatile(
+ "ebreak\n"
+ "addiw x0, x0, %1\n" ::"r"(x10),
+ "I"(0x40 + SigTrapEncoding(EA, AT, LogSize)));
+#else
+ // FIXME: not always sigill.
+ __builtin_trap();
+#endif
+ // __builtin_unreachable();
+}
+
+// Version with access size which is not power of 2
+template <ErrorAction EA, AccessType AT>
+__attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
+ // Other platforms like linux can use signals for intercepting an exception
+ // and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
+ // use signals so we can call it here directly instead.
+#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
+ auto regs = GetRegisters();
+ AccessInfo access_info = {
+ .addr = p,
+ .size = size,
+ .is_store = AT == AccessType::Store,
+ .is_load = AT == AccessType::Load,
+ .recover = EA == ErrorAction::Recover,
+ };
+ HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
+#elif defined(__aarch64__)
+ register uptr x0 asm("x0") = p;
+ register uptr x1 asm("x1") = size;
+ asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + SigTrapEncoding(EA, AT)));
+#elif defined(__x86_64__)
+ // Size is stored in rsi.
+ asm volatile(
+ "int3\n"
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT)),
+ "D"(p), "S"(size));
+#elif SANITIZER_RISCV64
+ // Put access size into x11
+ register uptr x10 asm("x10") = p;
+ register uptr x11 asm("x11") = size;
+ asm volatile(
+ "ebreak\n"
+ "addiw x0, x0, %2\n" ::"r"(x10),
+ "r"(x11), "I"(0x40 + SigTrapEncoding(EA, AT)));
+#else
+ __builtin_trap();
+#endif
+ // __builtin_unreachable();
+}
+
+__attribute__((always_inline, nodebug)) static inline uptr ShortTagSize(
+ tag_t mem_tag, uptr ptr) {
+ DCHECK(IsAligned(ptr, kShadowAlignment));
+ tag_t ptr_tag = GetTagFromPointer(ptr);
+ if (ptr_tag == mem_tag)
+ return kShadowAlignment;
+ if (!mem_tag || mem_tag >= kShadowAlignment)
+ return 0;
+ if (*(u8 *)(ptr | (kShadowAlignment - 1)) != ptr_tag)
+ return 0;
+ return mem_tag;
+}
+
+__attribute__((always_inline, nodebug)) static inline bool
+PossiblyShortTagMatches(tag_t mem_tag, uptr ptr, uptr sz) {
+ tag_t ptr_tag = GetTagFromPointer(ptr);
+ if (ptr_tag == mem_tag)
+ return true;
+ if (mem_tag >= kShadowAlignment)
+ return false;
+ if ((ptr & (kShadowAlignment - 1)) + sz > mem_tag)
+ return false;
+ return *(u8 *)(ptr | (kShadowAlignment - 1)) == ptr_tag;
+}
+
+template <ErrorAction EA, AccessType AT, unsigned LogSize>
+__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
+ if (!InTaggableRegion(p))
+ return;
+ uptr ptr_raw = p & ~kAddressTagMask;
+ tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
+ if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
+ SigTrap<EA, AT, LogSize>(p);
+ if (EA == ErrorAction::Abort)
+ __builtin_unreachable();
+ }
+}
+
+template <ErrorAction EA, AccessType AT>
+__attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
+ uptr sz) {
+ if (sz == 0 || !InTaggableRegion(p))
+ return;
+ tag_t ptr_tag = GetTagFromPointer(p);
+ uptr ptr_raw = p & ~kAddressTagMask;
+ tag_t *shadow_first = (tag_t *)MemToShadow(ptr_raw);
+ tag_t *shadow_last = (tag_t *)MemToShadow(ptr_raw + sz);
+ for (tag_t *t = shadow_first; t < shadow_last; ++t)
+ if (UNLIKELY(ptr_tag != *t)) {
+ SigTrap<EA, AT>(p, sz);
+ if (EA == ErrorAction::Abort)
+ __builtin_unreachable();
+ }
+ uptr end = p + sz;
+ uptr tail_sz = end & (kShadowAlignment - 1);
+ if (UNLIKELY(tail_sz != 0 &&
+ !PossiblyShortTagMatches(
+ *shadow_last, end & ~(kShadowAlignment - 1), tail_sz))) {
+ SigTrap<EA, AT>(p, sz);
+ if (EA == ErrorAction::Abort)
+ __builtin_unreachable();
+ }
+}
+
+} // end namespace __hwasan
+
+#endif // HWASAN_CHECKS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp
new file mode 100644
index 000000000000..48bc3b631ac0
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp
@@ -0,0 +1,148 @@
+//===-- hwasan_dynamic_shadow.cpp -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory
+/// region and handles ifunc resolver case, when necessary.
+///
+//===----------------------------------------------------------------------===//
+
+#include "hwasan_dynamic_shadow.h"
+
+#include <elf.h>
+#include <link.h>
+
+#include "hwasan.h"
+#include "hwasan_mapping.h"
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_posix.h"
+
+// The code in this file needs to run in an unrelocated binary. It should not
+// access any external symbol, including its own non-hidden globals.
+
+#if SANITIZER_ANDROID
+extern "C" {
+
+INTERFACE_ATTRIBUTE void __hwasan_shadow();
+decltype(__hwasan_shadow)* __hwasan_premap_shadow();
+
+} // extern "C"
+
+namespace __hwasan {
+
+// We cannot call anything in libc here (see comment above), so we need to
+// assume the biggest allowed page size.
+// Android max page size is defined as 16k here:
+// https://android.googlesource.com/platform/bionic/+/main/libc/platform/bionic/page.h#41
+static constexpr uptr kMaxGranularity = 16384;
+
+// Conservative upper limit.
+static uptr PremapShadowSize() {
+ return RoundUpTo(GetMaxVirtualAddress() >> kShadowScale, kMaxGranularity);
+}
+
+static uptr PremapShadow() {
+ return MapDynamicShadow(PremapShadowSize(), kShadowScale,
+ kShadowBaseAlignment, kHighMemEnd, kMaxGranularity);
+}
+
+static bool IsPremapShadowAvailable() {
+ const uptr shadow = reinterpret_cast<uptr>(&__hwasan_shadow);
+ const uptr resolver = reinterpret_cast<uptr>(&__hwasan_premap_shadow);
+ // shadow == resolver is how Android KitKat and older handles ifunc.
+ // shadow == 0 just in case.
+ return shadow != 0 && shadow != resolver;
+}
+
+static uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
+ const uptr granularity = kMaxGranularity;
+ const uptr shadow_start = reinterpret_cast<uptr>(&__hwasan_shadow);
+ const uptr premap_shadow_size = PremapShadowSize();
+ const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
+
+ // We may have mapped too much. Release extra memory.
+ UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
+ return shadow_start;
+}
+
+} // namespace __hwasan
+
+extern "C" {
+
+decltype(__hwasan_shadow)* __hwasan_premap_shadow() {
+ // The resolver might be called multiple times. Map the shadow just once.
+ static __sanitizer::uptr shadow = 0;
+ if (!shadow)
+ shadow = __hwasan::PremapShadow();
+ return reinterpret_cast<decltype(__hwasan_shadow)*>(shadow);
+}
+
+// __hwasan_shadow is a "function" that has the same address as the first byte
+// of the shadow mapping.
+INTERFACE_ATTRIBUTE __attribute__((ifunc("__hwasan_premap_shadow")))
+void __hwasan_shadow();
+
+extern __attribute((weak, visibility("hidden"))) ElfW(Rela) __rela_iplt_start[],
+ __rela_iplt_end[];
+
+} // extern "C"
+
+namespace __hwasan {
+
+void InitShadowGOT() {
+ // Call the ifunc resolver for __hwasan_shadow and fill in its GOT entry. This
+ // needs to be done before other ifunc resolvers (which are handled by libc)
+ // because a resolver might read __hwasan_shadow.
+ typedef ElfW(Addr) (*ifunc_resolver_t)(void);
+ for (ElfW(Rela) *r = __rela_iplt_start; r != __rela_iplt_end; ++r) {
+ ElfW(Addr)* offset = reinterpret_cast<ElfW(Addr)*>(r->r_offset);
+ ElfW(Addr) resolver = r->r_addend;
+ if (resolver == reinterpret_cast<ElfW(Addr)>(&__hwasan_premap_shadow)) {
+ *offset = reinterpret_cast<ifunc_resolver_t>(resolver)();
+ break;
+ }
+ }
+}
+
+uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
+ if (IsPremapShadowAvailable())
+ return FindPremappedShadowStart(shadow_size_bytes);
+ return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
+ kHighMemEnd, kMaxGranularity);
+}
+
+} // namespace __hwasan
+
+#elif SANITIZER_FUCHSIA
+
+namespace __hwasan {
+
+void InitShadowGOT() {}
+
+} // namespace __hwasan
+
+#else
+namespace __hwasan {
+
+void InitShadowGOT() {}
+
+uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
+# if defined(HWASAN_ALIASING_MODE)
+ constexpr uptr kAliasSize = 1ULL << kAddressTagShift;
+ constexpr uptr kNumAliases = 1ULL << kTagBits;
+ return MapDynamicShadowAndAliases(shadow_size_bytes, kAliasSize, kNumAliases,
+ RingBufferSize());
+# endif
+ return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
+ kHighMemEnd, GetMmapGranularity());
+}
+
+} // namespace __hwasan
+
+#endif // SANITIZER_ANDROID
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.h
new file mode 100644
index 000000000000..3c2e7c716a34
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.h
@@ -0,0 +1,27 @@
+//===-- hwasan_dynamic_shadow.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory
+/// region.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_PREMAP_SHADOW_H
+#define HWASAN_PREMAP_SHADOW_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __hwasan {
+
+uptr FindDynamicShadowStart(uptr shadow_size_bytes);
+void InitShadowGOT();
+
+} // namespace __hwasan
+
+#endif // HWASAN_PREMAP_SHADOW_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp
new file mode 100644
index 000000000000..bf700bf56838
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp
@@ -0,0 +1,70 @@
+//===-- hwasan_exceptions.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// HWAddressSanitizer runtime.
+//===----------------------------------------------------------------------===//
+
+#include "hwasan_poisoning.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+#include <unwind.h>
+
+using namespace __hwasan;
+using namespace __sanitizer;
+
+typedef _Unwind_Reason_Code PersonalityFn(int version, _Unwind_Action actions,
+ uint64_t exception_class,
+ _Unwind_Exception* unwind_exception,
+ _Unwind_Context* context);
+
+// Pointers to the _Unwind_GetGR and _Unwind_GetCFA functions are passed in
+// instead of being called directly. This is to handle cases where the unwinder
+// is statically linked and the sanitizer runtime and the program are linked
+// against different unwinders. The _Unwind_Context data structure is opaque so
+// it may be incompatible between unwinders.
+typedef uintptr_t GetGRFn(_Unwind_Context* context, int index);
+typedef uintptr_t GetCFAFn(_Unwind_Context* context);
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE _Unwind_Reason_Code
+__hwasan_personality_wrapper(int version, _Unwind_Action actions,
+ uint64_t exception_class,
+ _Unwind_Exception* unwind_exception,
+ _Unwind_Context* context,
+ PersonalityFn* real_personality, GetGRFn* get_gr,
+ GetCFAFn* get_cfa) {
+ _Unwind_Reason_Code rc;
+ if (real_personality)
+ rc = real_personality(version, actions, exception_class, unwind_exception,
+ context);
+ else
+ rc = _URC_CONTINUE_UNWIND;
+
+ // We only untag frames without a landing pad because landing pads are
+ // responsible for untagging the stack themselves if they resume.
+ //
+ // Here we assume that the frame record appears after any locals. This is not
+ // required by AAPCS but is a requirement for HWASAN instrumented functions.
+ if ((actions & _UA_CLEANUP_PHASE) && rc == _URC_CONTINUE_UNWIND) {
+#if defined(__x86_64__)
+ uptr fp = get_gr(context, 6); // rbp
+#elif defined(__aarch64__)
+ uptr fp = get_gr(context, 29); // x29
+#elif SANITIZER_RISCV64
+ uptr fp = get_gr(context, 8); // x8
+#else
+#error Unsupported architecture
+#endif
+ uptr sp = get_cfa(context);
+ TagMemory(UntagAddr(sp), UntagAddr(fp) - UntagAddr(sp),
+ GetTagFromPointer(sp));
+ }
+
+ return rc;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.h
new file mode 100644
index 000000000000..b17750158d02
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.h
@@ -0,0 +1,31 @@
+//===-- hwasan_flags.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef HWASAN_FLAGS_H
+#define HWASAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __hwasan {
+
+struct Flags {
+#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "hwasan_flags.inc"
+#undef HWASAN_FLAG
+
+ void SetDefaults();
+};
+
+Flags *flags();
+
+} // namespace __hwasan
+
+#endif // HWASAN_FLAGS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc
new file mode 100644
index 000000000000..058a0457b9e7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc
@@ -0,0 +1,93 @@
+//===-- hwasan_flags.inc ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Hwasan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef HWASAN_FLAG
+# error "Define HWASAN_FLAG prior to including this file!"
+#endif
+
+// HWASAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+HWASAN_FLAG(bool, verbose_threads, false,
+ "inform on thread creation/destruction")
+HWASAN_FLAG(bool, tag_in_malloc, true, "")
+HWASAN_FLAG(bool, tag_in_free, true, "")
+HWASAN_FLAG(bool, print_stats, false, "")
+HWASAN_FLAG(bool, halt_on_error, true, "")
+HWASAN_FLAG(bool, atexit, false, "")
+HWASAN_FLAG(
+ bool, print_live_threads_info, true,
+ "If set, prints the remaining threads in report as an extra information.")
+
+// Test only flag to disable malloc/realloc/free memory tagging on startup.
+// Tagging can be reenabled with __hwasan_enable_allocator_tagging().
+HWASAN_FLAG(bool, disable_allocator_tagging, false, "")
+
+// If false, use simple increment of a thread local counter to generate new
+// tags.
+HWASAN_FLAG(bool, random_tags, true, "")
+
+HWASAN_FLAG(
+ int, max_malloc_fill_size, 0,
+ "HWASan allocator flag. max_malloc_fill_size is the maximal amount of "
+ "bytes that will be filled with malloc_fill_byte on malloc.")
+
+HWASAN_FLAG(bool, free_checks_tail_magic, 1,
+ "If set, free() will check the magic values "
+ "after the allocated object "
+ "if the allocation size is not a divident of the granule size")
+HWASAN_FLAG(
+ int, max_free_fill_size, 0,
+ "HWASan allocator flag. max_free_fill_size is the maximal amount of "
+ "bytes that will be filled with free_fill_byte during free.")
+HWASAN_FLAG(int, malloc_fill_byte, 0xbe,
+ "Value used to fill the newly allocated memory.")
+HWASAN_FLAG(int, free_fill_byte, 0x55,
+ "Value used to fill deallocated memory.")
+HWASAN_FLAG(int, heap_history_size, 1023,
+ "The number of heap (de)allocations remembered per thread. "
+ "Affects the quality of heap-related reports, but not the ability "
+ "to find bugs.")
+HWASAN_FLAG(bool, export_memory_stats, true,
+ "Export up-to-date memory stats through /proc")
+HWASAN_FLAG(int, stack_history_size, 1024,
+ "The number of stack frames remembered per thread. "
+ "Affects the quality of stack-related reports, but not the ability "
+ "to find bugs.")
+
+// Malloc / free bisection. Only tag malloc and free calls when a hash of
+// allocation size and stack trace is between malloc_bisect_left and
+// malloc_bisect_right (both inclusive). [0, 0] range is special and disables
+// bisection (i.e. everything is tagged). Once the range is narrowed down
+// enough, use malloc_bisect_dump to see interesting allocations.
+HWASAN_FLAG(uptr, malloc_bisect_left, 0,
+ "Left bound of malloc bisection, inclusive.")
+HWASAN_FLAG(uptr, malloc_bisect_right, 0,
+ "Right bound of malloc bisection, inclusive.")
+HWASAN_FLAG(bool, malloc_bisect_dump, false,
+ "Print all allocations within [malloc_bisect_left, "
+ "malloc_bisect_right] range ")
+
+
+// Exit if we fail to enable the AArch64 kernel ABI relaxation which allows
+// tagged pointers in syscalls. This is the default, but being able to disable
+// that behaviour is useful for running the testsuite on more platforms (the
+// testsuite can run since we manually ensure any pointer arguments to syscalls
+// are untagged before the call.
+HWASAN_FLAG(bool, fail_without_syscall_abi, true,
+ "Exit if fail to request relaxed syscall ABI.")
+
+HWASAN_FLAG(
+ uptr, fixed_shadow_base, -1,
+ "If not -1, HWASan will attempt to allocate the shadow at this address, "
+ "instead of choosing one dynamically."
+ "Tip: this can be combined with the compiler option, "
+ "-hwasan-mapping-offset, to optimize the instrumentation.")
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp
new file mode 100644
index 000000000000..d1696f8aa796
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp
@@ -0,0 +1,241 @@
+//===-- hwasan_fuchsia.cpp --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and contains Fuchsia-specific
+/// code.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_fuchsia.h"
+#if SANITIZER_FUCHSIA
+
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+
+#include "hwasan.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_report.h"
+#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+
+// This TLS variable contains the location of the stack ring buffer and can be
+// used to always find the hwasan thread object associated with the current
+// running thread.
+[[gnu::tls_model("initial-exec")]]
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL uptr __hwasan_tls;
+
+namespace __hwasan {
+
+bool InitShadow() {
+ __sanitizer::InitShadowBounds();
+ CHECK_NE(__sanitizer::ShadowBounds.shadow_limit, 0);
+
+ // These variables are used by MemIsShadow for asserting we have a correct
+ // shadow address. On Fuchsia, we only have one region of shadow, so the
+ // bounds of Low shadow can be zero while High shadow represents the true
+ // bounds. Note that these are inclusive ranges.
+ kLowShadowStart = 0;
+ kLowShadowEnd = 0;
+ kHighShadowStart = __sanitizer::ShadowBounds.shadow_base;
+ kHighShadowEnd = __sanitizer::ShadowBounds.shadow_limit - 1;
+
+ return true;
+}
+
+bool MemIsApp(uptr p) {
+ CHECK(GetTagFromPointer(p) == 0);
+ return __sanitizer::ShadowBounds.shadow_limit <= p &&
+ p <= (__sanitizer::ShadowBounds.memory_limit - 1);
+}
+
+// These are known parameters passed to the hwasan runtime on thread creation.
+struct Thread::InitState {
+ uptr stack_bottom, stack_top;
+};
+
+static void FinishThreadInitialization(Thread *thread);
+
+void InitThreads() {
+ // This is the minimal alignment needed for the storage where hwasan threads
+ // and their stack ring buffers are placed. This alignment is necessary so the
+ // stack ring buffer can perform a simple calculation to get the next element
+ // in the RB. The instructions for this calculation are emitted by the
+ // compiler. (Full explanation in hwasan_thread_list.h.)
+ uptr alloc_size = UINT64_C(1) << kShadowBaseAlignment;
+ uptr thread_start = reinterpret_cast<uptr>(
+ MmapAlignedOrDieOnFatalError(alloc_size, alloc_size, __func__));
+
+ InitThreadList(thread_start, alloc_size);
+
+ // Create the hwasan thread object for the current (main) thread. Stack info
+ // for this thread is known from information passed via
+ // __sanitizer_startup_hook.
+ const Thread::InitState state = {
+ .stack_bottom = __sanitizer::MainThreadStackBase,
+ .stack_top =
+ __sanitizer::MainThreadStackBase + __sanitizer::MainThreadStackSize,
+ };
+ FinishThreadInitialization(hwasanThreadList().CreateCurrentThread(&state));
+}
+
+uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
+
+// This is called from the parent thread before the new thread is created. Here
+// we can propagate known info like the stack bounds to Thread::Init before
+// jumping into the thread. We cannot initialize the stack ring buffer yet since
+// we have not entered the new thread.
+static void *BeforeThreadCreateHook(uptr user_id, bool detached,
+ const char *name, uptr stack_bottom,
+ uptr stack_size) {
+ const Thread::InitState state = {
+ .stack_bottom = stack_bottom,
+ .stack_top = stack_bottom + stack_size,
+ };
+ return hwasanThreadList().CreateCurrentThread(&state);
+}
+
+// This sets the stack top and bottom according to the InitState passed to
+// CreateCurrentThread above.
+void Thread::InitStackAndTls(const InitState *state) {
+ CHECK_NE(state->stack_bottom, 0);
+ CHECK_NE(state->stack_top, 0);
+ stack_bottom_ = state->stack_bottom;
+ stack_top_ = state->stack_top;
+ tls_end_ = tls_begin_ = 0;
+}
+
+// This is called after creating a new thread with the pointer returned by
+// BeforeThreadCreateHook. We are still in the creating thread and should check
+// if it was actually created correctly.
+static void ThreadCreateHook(void *hook, bool aborted) {
+ Thread *thread = static_cast<Thread *>(hook);
+ if (!aborted) {
+ // The thread was created successfully.
+ // ThreadStartHook can already be running in the new thread.
+ } else {
+ // The thread wasn't created after all.
+ // Clean up everything we set up in BeforeThreadCreateHook.
+ atomic_signal_fence(memory_order_seq_cst);
+ hwasanThreadList().ReleaseThread(thread);
+ }
+}
+
+// This is called in the newly-created thread before it runs anything else,
+// with the pointer returned by BeforeThreadCreateHook (above). Here we can
+// setup the stack ring buffer.
+static void ThreadStartHook(void *hook, thrd_t self) {
+ Thread *thread = static_cast<Thread *>(hook);
+ FinishThreadInitialization(thread);
+ thread->EnsureRandomStateInited();
+}
+
+// This is the function that sets up the stack ring buffer and enables us to use
+// GetCurrentThread. This function should only be called while IN the thread
+// that we want to create the hwasan thread object for so __hwasan_tls can be
+// properly referenced.
+static void FinishThreadInitialization(Thread *thread) {
+ CHECK_NE(thread, nullptr);
+
+ // The ring buffer is located immediately before the thread object.
+ uptr stack_buffer_size = hwasanThreadList().GetRingBufferSize();
+ uptr stack_buffer_start = reinterpret_cast<uptr>(thread) - stack_buffer_size;
+ thread->InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
+}
+
+static void ThreadExitHook(void *hook, thrd_t self) {
+ Thread *thread = static_cast<Thread *>(hook);
+ atomic_signal_fence(memory_order_seq_cst);
+ hwasanThreadList().ReleaseThread(thread);
+}
+
+uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
+ CHECK(IsAligned(p, kShadowAlignment));
+ CHECK(IsAligned(size, kShadowAlignment));
+ __sanitizer_fill_shadow(p, size, tag,
+ common_flags()->clear_shadow_mmap_threshold);
+ return AddTagToPointer(p, tag);
+}
+
+// Not implemented because Fuchsia does not use signal handlers.
+void HwasanOnDeadlySignal(int signo, void *info, void *context) {}
+
+// Not implemented because Fuchsia does not use interceptors.
+void InitializeInterceptors() {}
+
+// Not implemented because this is only relevant for Android.
+void AndroidTestTlsSlot() {}
+
+// TSD was normally used on linux as a means of calling the hwasan thread exit
+// handler passed to pthread_key_create. This is not needed on Fuchsia because
+// we will be using __sanitizer_thread_exit_hook.
+void HwasanTSDInit() {}
+void HwasanTSDThreadInit() {}
+
+// On linux, this just would call `atexit(HwasanAtExit)`. The functions in
+// HwasanAtExit are unimplemented for Fuchsia and effectively no-ops, so this
+// function is unneeded.
+void InstallAtExitHandler() {}
+
+void HwasanInstallAtForkHandler() {}
+
+void InstallAtExitCheckLeaks() {}
+
+void InitializeOsSupport() {
+#ifdef __aarch64__
+ uint32_t features = 0;
+ CHECK_EQ(zx_system_get_features(ZX_FEATURE_KIND_ADDRESS_TAGGING, &features),
+ ZX_OK);
+ if (!(features & ZX_ARM64_FEATURE_ADDRESS_TAGGING_TBI) &&
+ flags()->fail_without_syscall_abi) {
+ Printf(
+ "FATAL: HWAddressSanitizer requires "
+ "ZX_ARM64_FEATURE_ADDRESS_TAGGING_TBI.\n");
+ Die();
+ }
+#endif
+}
+
+} // namespace __hwasan
+
+namespace __lsan {
+
+bool UseExitcodeOnLeak() { return __hwasan::flags()->halt_on_error; }
+
+} // namespace __lsan
+
+extern "C" {
+
+void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
+ const char *name, void *stack_base,
+ size_t stack_size) {
+ return __hwasan::BeforeThreadCreateHook(
+ reinterpret_cast<uptr>(thread), detached, name,
+ reinterpret_cast<uptr>(stack_base), stack_size);
+}
+
+void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
+ __hwasan::ThreadCreateHook(hook, error != thrd_success);
+}
+
+void __sanitizer_thread_start_hook(void *hook, thrd_t self) {
+ __hwasan::ThreadStartHook(hook, reinterpret_cast<uptr>(self));
+}
+
+void __sanitizer_thread_exit_hook(void *hook, thrd_t self) {
+ __hwasan::ThreadExitHook(hook, self);
+}
+
+void __sanitizer_module_loaded(const struct dl_phdr_info *info, size_t) {
+ __hwasan_library_loaded(info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum);
+}
+
+} // extern "C"
+
+#endif // SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp
new file mode 100644
index 000000000000..7e0f3df20dd0
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp
@@ -0,0 +1,93 @@
+//===-- hwasan_globals.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// HWAddressSanitizer globals-specific runtime.
+//===----------------------------------------------------------------------===//
+
+#include "hwasan_globals.h"
+
+#include "sanitizer_common/sanitizer_array_ref.h"
+
+namespace __hwasan {
+
+enum { NT_LLVM_HWASAN_GLOBALS = 3 };
+struct hwasan_global_note {
+ s32 begin_relptr;
+ s32 end_relptr;
+};
+
+// Check that the given library meets the code model requirements for tagged
+// globals. These properties are not checked at link time so they need to be
+// checked at runtime.
+static void CheckCodeModel(ElfW(Addr) base, const ElfW(Phdr) * phdr,
+ ElfW(Half) phnum) {
+ ElfW(Addr) min_addr = -1ull, max_addr = 0;
+ for (unsigned i = 0; i != phnum; ++i) {
+ if (phdr[i].p_type != PT_LOAD)
+ continue;
+ ElfW(Addr) lo = base + phdr[i].p_vaddr, hi = lo + phdr[i].p_memsz;
+ if (min_addr > lo)
+ min_addr = lo;
+ if (max_addr < hi)
+ max_addr = hi;
+ }
+
+ if (max_addr - min_addr > 1ull << 32) {
+ Report("FATAL: HWAddressSanitizer: library size exceeds 2^32\n");
+ Die();
+ }
+ if (max_addr > 1ull << 48) {
+ Report("FATAL: HWAddressSanitizer: library loaded above address 2^48\n");
+ Die();
+ }
+}
+
+ArrayRef<const hwasan_global> HwasanGlobalsFor(ElfW(Addr) base,
+ const ElfW(Phdr) * phdr,
+ ElfW(Half) phnum) {
+ // Read the phdrs from this DSO.
+ for (unsigned i = 0; i != phnum; ++i) {
+ if (phdr[i].p_type != PT_NOTE)
+ continue;
+
+ const char *note = reinterpret_cast<const char *>(base + phdr[i].p_vaddr);
+ const char *nend = note + phdr[i].p_memsz;
+
+ // Traverse all the notes until we find a HWASan note.
+ while (note < nend) {
+ auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(note);
+ const char *name = note + sizeof(ElfW(Nhdr));
+ const char *desc = name + RoundUpTo(nhdr->n_namesz, 4);
+
+ // Discard non-HWASan-Globals notes.
+ if (nhdr->n_type != NT_LLVM_HWASAN_GLOBALS ||
+ internal_strcmp(name, "LLVM") != 0) {
+ note = desc + RoundUpTo(nhdr->n_descsz, 4);
+ continue;
+ }
+
+ // Only libraries with instrumented globals need to be checked against the
+ // code model since they use relocations that aren't checked at link time.
+ CheckCodeModel(base, phdr, phnum);
+
+ auto *global_note = reinterpret_cast<const hwasan_global_note *>(desc);
+ auto *globals_begin = reinterpret_cast<const hwasan_global *>(
+ note + global_note->begin_relptr);
+ auto *globals_end = reinterpret_cast<const hwasan_global *>(
+ note + global_note->end_relptr);
+
+ return {globals_begin, globals_end};
+ }
+ }
+
+ return {};
+}
+
+} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h
new file mode 100644
index 000000000000..94cd53e1888c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h
@@ -0,0 +1,50 @@
+//===-- hwasan_globals.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Private Hwasan header.
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_GLOBALS_H
+#define HWASAN_GLOBALS_H
+
+#include <link.h>
+
+#include "sanitizer_common/sanitizer_array_ref.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __hwasan {
+// This object should only ever be casted over the global (i.e. not constructed)
+// in the ELF PT_NOTE in order for `addr()` to work correctly.
+struct hwasan_global {
+ // The size of this global variable. Note that the size in the descriptor is
+ // max 1 << 24. Larger globals have multiple descriptors.
+ uptr size() const { return info & 0xffffff; }
+ // The fully-relocated address of this global.
+ uptr addr() const { return reinterpret_cast<uintptr_t>(this) + gv_relptr; }
+ // The static tag of this global.
+ u8 tag() const { return info >> 24; };
+
+ // The relative address between the start of the descriptor for the HWASan
+ // global (in the PT_NOTE), and the fully relocated address of the global.
+ s32 gv_relptr;
+ u32 info;
+};
+
+// Walk through the specific DSO (as specified by the base, phdr, and phnum),
+// and return the range of the [beginning, end) of the HWASan globals descriptor
+// array.
+ArrayRef<const hwasan_global> HwasanGlobalsFor(ElfW(Addr) base,
+ const ElfW(Phdr) * phdr,
+ ElfW(Half) phnum);
+
+} // namespace __hwasan
+
+#endif // HWASAN_GLOBALS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_ignorelist.txt b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_ignorelist.txt
new file mode 100644
index 000000000000..70590c970f55
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_ignorelist.txt
@@ -0,0 +1,7 @@
+# Ignorelist for HWAddressSanitizer. Turns off instrumentation of particular
+# functions or sources. Use with care. You may set location of ignorelist
+# at compile-time using -fsanitize-ignorelist=<path> flag.
+
+# Example usage:
+# fun:*bad_function_name*
+# src:file_with_tricky_code.cc
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
new file mode 100644
index 000000000000..c10b5c158548
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
@@ -0,0 +1,549 @@
+//===-- hwasan_interceptors.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Interceptors for standard library functions.
+//
+// FIXME: move as many interceptors as possible into
+// sanitizer_common/sanitizer_common_interceptors.h
+//===----------------------------------------------------------------------===//
+
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
+#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_checks.h"
+#include "hwasan_mapping.h"
+#include "hwasan_platform_interceptors.h"
+#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+#if !SANITIZER_FUCHSIA
+
+using namespace __hwasan;
+
+struct HWAsanInterceptorContext {
+ const char *interceptor_name;
+};
+
+# define ACCESS_MEMORY_RANGE(offset, size, access) \
+ do { \
+ __hwasan::CheckAddressSized<ErrorAction::Recover, access>((uptr)offset, \
+ size); \
+ } while (0)
+
+# define HWASAN_READ_RANGE(offset, size) \
+ ACCESS_MEMORY_RANGE(offset, size, AccessType::Load)
+# define HWASAN_WRITE_RANGE(offset, size) \
+ ACCESS_MEMORY_RANGE(offset, size, AccessType::Store)
+
+# if !SANITIZER_APPLE
+# define HWASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "HWAddressSanitizer: failed to intercept '%s'\n", #name); \
+ } while (0)
+# define HWASAN_INTERCEPT_FUNC_VER(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver)) \
+ VReport(1, "HWAddressSanitizer: failed to intercept '%s@@%s'\n", \
+ #name, ver); \
+ } while (0)
+# define HWASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
+ VReport( \
+ 1, "HWAddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
+ #name, ver, #name); \
+ } while (0)
+
+# else
+// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
+# define HWASAN_INTERCEPT_FUNC(name)
+# endif // SANITIZER_APPLE
+
+# if HWASAN_WITH_INTERCEPTORS
+
+# define COMMON_SYSCALL_PRE_READ_RANGE(p, s) HWASAN_READ_RANGE(p, s)
+# define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) HWASAN_WRITE_RANGE(p, s)
+# define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+# define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+# include "sanitizer_common/sanitizer_common_syscalls.inc"
+# include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
+
+# define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ HWASAN_WRITE_RANGE(ptr, size)
+
+# define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ HWASAN_READ_RANGE(ptr, size)
+
+# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ HWAsanInterceptorContext _ctx = {#func}; \
+ ctx = (void *)&_ctx; \
+ do { \
+ (void)(ctx); \
+ (void)(func); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ (void)(ctx); \
+ (void)(path); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ (void)(newfd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ do { \
+ (void)(ctx); \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ do { \
+ (void)(ctx); \
+ (void)(thread); \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_BLOCK_REAL(name) \
+ do { \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (MemIsApp(UntagAddr(reinterpret_cast<uptr>(dst))) && \
+ common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+
+# define COMMON_INTERCEPTOR_STRERROR() \
+ do { \
+ } while (false)
+
+# define COMMON_INTERCEPT_FUNCTION(name) HWASAN_INTERCEPT_FUNC(name)
+
+# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!hwasan_inited)
+
+// The main purpose of the mmap interceptor is to prevent the user from
+// allocating on top of shadow pages.
+//
+// For compatibility, it does not tag pointers, nor does it allow
+// MAP_FIXED in combination with a tagged pointer. (Since mmap itself
+// will not return a tagged pointer, the tagged pointer must have come
+// from elsewhere, such as the secondary allocator, which makes it a
+// very odd usecase.)
+template <class Mmap>
+static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
+ int prot, int flags, int fd, OFF64_T offset) {
+ if (addr) {
+ if (flags & map_fixed) CHECK_EQ(addr, UntagPtr(addr));
+
+ addr = UntagPtr(addr);
+ }
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ void *end_addr = (char *)addr + (rounded_length - 1);
+ if (addr && length &&
+ (!MemIsApp(reinterpret_cast<uptr>(addr)) ||
+ !MemIsApp(reinterpret_cast<uptr>(end_addr)))) {
+ // User requested an address that is incompatible with HWASan's
+ // memory layout. Use a different address if allowed, else fail.
+ if (flags & map_fixed) {
+ errno = errno_EINVAL;
+ return (void *)-1;
+ } else {
+ addr = nullptr;
+ }
+ }
+ void *res = real_mmap(addr, length, prot, flags, fd, offset);
+ if (length && res != (void *)-1) {
+ uptr beg = reinterpret_cast<uptr>(res);
+ DCHECK(IsAligned(beg, GetPageSize()));
+ if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
+ // Application has attempted to map more memory than is supported by
+ // HWASan. Act as if we ran out of memory.
+ internal_munmap(res, length);
+ errno = errno_ENOMEM;
+ return (void *)-1;
+ }
+ __hwasan::TagMemoryAligned(beg, rounded_length, 0);
+ }
+
+ return res;
+}
+
+template <class Munmap>
+static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
+ // We should not tag if munmap fail, but it's to late to tag after
+ // real_munmap, as the pages could be mmaped by another thread.
+ uptr beg = reinterpret_cast<uptr>(addr);
+ if (length && IsAligned(beg, GetPageSize())) {
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ // Protect from unmapping the shadow.
+ if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
+ errno = errno_EINVAL;
+ return -1;
+ }
+ __hwasan::TagMemoryAligned(beg, rounded_length, 0);
+ }
+ return real_munmap(addr, length);
+}
+
+# define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, \
+ fd, offset) \
+ do { \
+ (void)(ctx); \
+ return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, length) \
+ do { \
+ (void)(ctx); \
+ return munmap_interceptor(REAL(munmap), addr, sz); \
+ } while (false)
+
+# include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+# include "sanitizer_common/sanitizer_common_interceptors.inc"
+
+struct ThreadStartArg {
+ __sanitizer_sigset_t starting_sigset_;
+};
+
+static void *HwasanThreadStartFunc(void *arg) {
+ __hwasan_thread_enter();
+ SetSigProcMask(&reinterpret_cast<ThreadStartArg *>(arg)->starting_sigset_,
+ nullptr);
+ InternalFree(arg);
+ auto self = GetThreadSelf();
+ auto args = hwasanThreadArgRetval().GetArgs(self);
+ void *retval = (*args.routine)(args.arg_retval);
+ hwasanThreadArgRetval().Finish(self, retval);
+ return retval;
+}
+
+extern "C" {
+int pthread_attr_getdetachstate(void *attr, int *v);
+}
+
+INTERCEPTOR(int, pthread_create, void *thread, void *attr,
+ void *(*callback)(void *), void *param) {
+ EnsureMainThreadIDIsCorrect();
+ ScopedTaggingDisabler tagging_disabler;
+ bool detached = [attr]() {
+ int d = 0;
+ return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d);
+ }();
+ ThreadStartArg *A = (ThreadStartArg *)InternalAlloc(sizeof(ThreadStartArg));
+ ScopedBlockSignals block(&A->starting_sigset_);
+ // ASAN uses the same approach to disable leaks from pthread_create.
+# if CAN_SANITIZE_LEAKS
+ __lsan::ScopedInterceptorDisabler lsan_disabler;
+# endif
+
+ int result;
+ hwasanThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr {
+ result = REAL(pthread_create)(thread, attr, &HwasanThreadStartFunc, A);
+ return result ? 0 : *(uptr *)(thread);
+ });
+ if (result != 0)
+ InternalFree(A);
+ return result;
+}
+
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_join)(thread, retval);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_detach, void *thread) {
+ int result;
+ hwasanThreadArgRetval().Detach((uptr)thread, [&]() {
+ result = REAL(pthread_detach)(thread);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(void, pthread_exit, void *retval) {
+ hwasanThreadArgRetval().Finish(GetThreadSelf(), retval);
+ REAL(pthread_exit)(retval);
+}
+
+# if SANITIZER_GLIBC
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_tryjoin_np)(thread, ret);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
+ const struct timespec *abstime) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
+ return !result;
+ });
+ return result;
+}
+# endif
+
+DEFINE_INTERNAL_PTHREAD_FUNCTIONS
+
+DEFINE_REAL(int, vfork,)
+DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork,)
+
+// Get and/or change the set of blocked signals.
+extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
+ __hw_sigset_t *__restrict __oset);
+# define SIG_BLOCK 0
+# define SIG_SETMASK 2
+extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
+ env[0].__magic = kHwJmpBufMagic;
+ env[0].__mask_was_saved =
+ (savemask &&
+ sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0, &env[0].__saved_mask) == 0);
+ return 0;
+}
+
+static void __attribute__((always_inline))
+InternalLongjmp(__hw_register_buf env, int retval) {
+# if defined(__aarch64__)
+ constexpr size_t kSpIndex = 13;
+# elif defined(__x86_64__)
+ constexpr size_t kSpIndex = 6;
+# elif SANITIZER_RISCV64
+ constexpr size_t kSpIndex = 13;
+# endif
+
+ // Clear all memory tags on the stack between here and where we're going.
+ unsigned long long stack_pointer = env[kSpIndex];
+ // The stack pointer should never be tagged, so we don't need to clear the
+ // tag for this function call.
+ __hwasan_handle_longjmp((void *)stack_pointer);
+
+ // Run code for handling a longjmp.
+ // Need to use a register that isn't going to be loaded from the environment
+ // buffer -- hence why we need to specify the register to use.
+ // Must implement this ourselves, since we don't know the order of registers
+ // in different libc implementations and many implementations mangle the
+ // stack pointer so we can't use it without knowing the demangling scheme.
+# if defined(__aarch64__)
+ register long int retval_tmp asm("x1") = retval;
+ register void *env_address asm("x0") = &env[0];
+ asm volatile(
+ "ldp x19, x20, [%0, #0<<3];"
+ "ldp x21, x22, [%0, #2<<3];"
+ "ldp x23, x24, [%0, #4<<3];"
+ "ldp x25, x26, [%0, #6<<3];"
+ "ldp x27, x28, [%0, #8<<3];"
+ "ldp x29, x30, [%0, #10<<3];"
+ "ldp d8, d9, [%0, #14<<3];"
+ "ldp d10, d11, [%0, #16<<3];"
+ "ldp d12, d13, [%0, #18<<3];"
+ "ldp d14, d15, [%0, #20<<3];"
+ "ldr x5, [%0, #13<<3];"
+ "mov sp, x5;"
+ // Return the value requested to return through arguments.
+ // This should be in x1 given what we requested above.
+ "cmp %1, #0;"
+ "mov x0, #1;"
+ "csel x0, %1, x0, ne;"
+ "br x30;"
+ : "+r"(env_address)
+ : "r"(retval_tmp));
+# elif defined(__x86_64__)
+ register long int retval_tmp asm("%rsi") = retval;
+ register void *env_address asm("%rdi") = &env[0];
+ asm volatile(
+ // Restore registers.
+ "mov (0*8)(%0),%%rbx;"
+ "mov (1*8)(%0),%%rbp;"
+ "mov (2*8)(%0),%%r12;"
+ "mov (3*8)(%0),%%r13;"
+ "mov (4*8)(%0),%%r14;"
+ "mov (5*8)(%0),%%r15;"
+ "mov (6*8)(%0),%%rsp;"
+ "mov (7*8)(%0),%%rdx;"
+ // Return 1 if retval is 0.
+ "mov $1,%%rax;"
+ "test %1,%1;"
+ "cmovnz %1,%%rax;"
+ "jmp *%%rdx;" ::"r"(env_address),
+ "r"(retval_tmp));
+# elif SANITIZER_RISCV64
+ register long int retval_tmp asm("x11") = retval;
+ register void *env_address asm("x10") = &env[0];
+ asm volatile(
+ "ld ra, 0<<3(%0);"
+ "ld s0, 1<<3(%0);"
+ "ld s1, 2<<3(%0);"
+ "ld s2, 3<<3(%0);"
+ "ld s3, 4<<3(%0);"
+ "ld s4, 5<<3(%0);"
+ "ld s5, 6<<3(%0);"
+ "ld s6, 7<<3(%0);"
+ "ld s7, 8<<3(%0);"
+ "ld s8, 9<<3(%0);"
+ "ld s9, 10<<3(%0);"
+ "ld s10, 11<<3(%0);"
+ "ld s11, 12<<3(%0);"
+# if __riscv_float_abi_double
+ "fld fs0, 14<<3(%0);"
+ "fld fs1, 15<<3(%0);"
+ "fld fs2, 16<<3(%0);"
+ "fld fs3, 17<<3(%0);"
+ "fld fs4, 18<<3(%0);"
+ "fld fs5, 19<<3(%0);"
+ "fld fs6, 20<<3(%0);"
+ "fld fs7, 21<<3(%0);"
+ "fld fs8, 22<<3(%0);"
+ "fld fs9, 23<<3(%0);"
+ "fld fs10, 24<<3(%0);"
+ "fld fs11, 25<<3(%0);"
+# elif __riscv_float_abi_soft
+# else
+# error "Unsupported case"
+# endif
+ "ld a4, 13<<3(%0);"
+ "mv sp, a4;"
+ // Return the value requested to return through arguments.
+ // This should be in x11 given what we requested above.
+ "seqz a0, %1;"
+ "add a0, a0, %1;"
+ "ret;"
+ : "+r"(env_address)
+ : "r"(retval_tmp));
+# endif
+}
+
+INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
+ if (env[0].__magic != kHwJmpBufMagic) {
+ Printf(
+ "WARNING: Unexpected bad jmp_buf. Either setjmp was not called or "
+ "there is a bug in HWASan.\n");
+ return REAL(siglongjmp)(env, val);
+ }
+
+ if (env[0].__mask_was_saved)
+ // Restore the saved signal mask.
+ (void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask, (__hw_sigset_t *)0);
+ InternalLongjmp(env[0].__jmpbuf, val);
+}
+
+// Required since glibc libpthread calls __libc_longjmp on pthread_exit, and
+// _setjmp on start_thread. Hence we have to intercept the longjmp on
+// pthread_exit so the __hw_jmp_buf order matches.
+INTERCEPTOR(void, __libc_longjmp, __hw_jmp_buf env, int val) {
+ if (env[0].__magic != kHwJmpBufMagic)
+ return REAL(__libc_longjmp)(env, val);
+ InternalLongjmp(env[0].__jmpbuf, val);
+}
+
+INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
+ if (env[0].__magic != kHwJmpBufMagic) {
+ Printf(
+ "WARNING: Unexpected bad jmp_buf. Either setjmp was not called or "
+ "there is a bug in HWASan.\n");
+ return REAL(longjmp)(env, val);
+ }
+ InternalLongjmp(env[0].__jmpbuf, val);
+}
+# undef SIG_BLOCK
+# undef SIG_SETMASK
+
+# endif // HWASAN_WITH_INTERCEPTORS
+
+namespace __hwasan {
+
+int OnExit() {
+ if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks &&
+ __lsan::HasReportedLeaks()) {
+ return common_flags()->exitcode;
+ }
+ // FIXME: ask frontend whether we need to return failure.
+ return 0;
+}
+
+} // namespace __hwasan
+
+namespace __hwasan {
+
+void InitializeInterceptors() {
+ static int inited = 0;
+ CHECK_EQ(inited, 0);
+
+# if HWASAN_WITH_INTERCEPTORS
+ __interception::DoesNotSupportStaticLinking();
+ InitializeCommonInterceptors();
+
+ (void)(read_iovec);
+ (void)(write_iovec);
+
+# if defined(__linux__)
+ INTERCEPT_FUNCTION(__libc_longjmp);
+ INTERCEPT_FUNCTION(longjmp);
+ INTERCEPT_FUNCTION(siglongjmp);
+ INTERCEPT_FUNCTION(vfork);
+# endif // __linux__
+ INTERCEPT_FUNCTION(pthread_create);
+ INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(pthread_detach);
+ INTERCEPT_FUNCTION(pthread_exit);
+# if SANITIZER_GLIBC
+ INTERCEPT_FUNCTION(pthread_tryjoin_np);
+ INTERCEPT_FUNCTION(pthread_timedjoin_np);
+# endif
+# endif
+
+ inited = 1;
+}
+} // namespace __hwasan
+
+#endif // #if !SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
new file mode 100644
index 000000000000..fd20825e3dac
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
@@ -0,0 +1,14 @@
+#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
+
+#if defined(__linux__) && HWASAN_WITH_INTERCEPTORS
+#define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area
+#define COMMON_INTERCEPTOR_HANDLE_VFORK __hwasan_handle_vfork
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
new file mode 100644
index 000000000000..8f2f77dad917
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
@@ -0,0 +1,252 @@
+//===-- hwasan_interface_internal.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Private Hwasan interface header.
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_INTERFACE_INTERNAL_H
+#define HWASAN_INTERFACE_INTERNAL_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include <link.h>
+
+extern "C" {
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_init_static();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_init();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_library_loaded(ElfW(Addr) base, const ElfW(Phdr) * phdr,
+ ElfW(Half) phnum);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_library_unloaded(ElfW(Addr) base, const ElfW(Phdr) * phdr,
+ ElfW(Half) phnum);
+
+using __sanitizer::uptr;
+using __sanitizer::sptr;
+using __sanitizer::uu64;
+using __sanitizer::uu32;
+using __sanitizer::uu16;
+using __sanitizer::u64;
+using __sanitizer::u32;
+using __sanitizer::u16;
+using __sanitizer::u8;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_init_frames(uptr, uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+extern uptr __hwasan_shadow_memory_dynamic_address;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN(uptr, uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16(uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_noabort(uptr, uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_noabort(uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_match_all(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_match_all(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_match_all_noabort(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_match_all_noabort(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN(uptr, uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16(uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_noabort(uptr, uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_noabort(uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_match_all(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_match_all(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_match_all_noabort(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_match_all_noabort(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __hwasan_tag_pointer(uptr p, u8 tag);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u8 __hwasan_get_tag_from_pointer(uptr p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_tag_mismatch(uptr addr, u8 ts);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
+ size_t outsize);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u8 __hwasan_generate_tag();
+
+// Returns the offset of the first tag mismatch or -1 if the whole range is
+// good.
+SANITIZER_INTERFACE_ATTRIBUTE
+sptr __hwasan_test_shadow(const void *x, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+/* OPTIONAL */ const char* __hwasan_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_print_shadow(const void *x, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_handle_longjmp(const void *sp_dst);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_handle_vfork(const void *sp_dst);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *p, u16 x);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *p, u32 x);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *p, u64 x);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_enable_allocator_tagging();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_disable_allocator_tagging();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_thread_enter();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_thread_exit();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_print_memory_usage();
+
+// The compiler will generate this when
+// `-hwasan-record-stack-history-with-calls` is added as a flag, which will add
+// frame record information to the stack ring buffer. This is an alternative to
+// the compiler emitting instructions in the prologue for doing the same thing
+// by accessing the ring buffer directly.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_add_frame_record(u64 frame_record_info);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memcpy(void *dst, const void *src, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memset(void *s, int c, uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memmove(void *dest, const void *src, uptr n);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memcpy_match_all(void *dst, const void *src, uptr size, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memset_match_all(void *s, int c, uptr n, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memmove_match_all(void *dest, const void *src, uptr n, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_set_error_report_callback(void (*callback)(const char *));
+} // extern "C"
+
+#endif // HWASAN_INTERFACE_INTERNAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
new file mode 100644
index 000000000000..68294b596256
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
@@ -0,0 +1,596 @@
+//===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
+/// FreeBSD-specific code.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
+
+# include <dlfcn.h>
+# include <elf.h>
+# include <errno.h>
+# include <link.h>
+# include <pthread.h>
+# include <signal.h>
+# include <stdio.h>
+# include <stdlib.h>
+# include <sys/prctl.h>
+# include <sys/resource.h>
+# include <sys/time.h>
+# include <unistd.h>
+# include <unwind.h>
+
+# include "hwasan.h"
+# include "hwasan_dynamic_shadow.h"
+# include "hwasan_interface_internal.h"
+# include "hwasan_mapping.h"
+# include "hwasan_report.h"
+# include "hwasan_thread.h"
+# include "hwasan_thread_list.h"
+# include "sanitizer_common/sanitizer_common.h"
+# include "sanitizer_common/sanitizer_procmaps.h"
+# include "sanitizer_common/sanitizer_stackdepot.h"
+
+// Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
+//
+// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
+// Not currently tested.
+// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
+// Integration tests downstream exist.
+// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
+// Tested with check-hwasan on x86_64-linux.
+// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
+// Tested with check-hwasan on aarch64-linux-android.
+# if !SANITIZER_ANDROID
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL uptr __hwasan_tls;
+# endif
+
+namespace __hwasan {
+
+// With the zero shadow base we can not actually map pages starting from 0.
+// This constant is somewhat arbitrary.
+constexpr uptr kZeroBaseShadowStart = 0;
+constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
+
+static void ProtectGap(uptr addr, uptr size) {
+ __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
+ kZeroBaseMaxShadowStart);
+}
+
+uptr kLowMemStart;
+uptr kLowMemEnd;
+uptr kHighMemStart;
+uptr kHighMemEnd;
+
+static void PrintRange(uptr start, uptr end, const char *name) {
+ Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
+}
+
+static void PrintAddressSpaceLayout() {
+ PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
+ if (kHighShadowEnd + 1 < kHighMemStart)
+ PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
+ PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
+ if (kLowShadowEnd + 1 < kHighShadowStart)
+ PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
+ PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
+ if (kLowMemEnd + 1 < kLowShadowStart)
+ PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
+ PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
+ CHECK_EQ(0, kLowMemStart);
+}
+
+static uptr GetHighMemEnd() {
+ // HighMem covers the upper part of the address space.
+ uptr max_address = GetMaxUserVirtualAddress();
+ // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
+ // properly aligned:
+ max_address |= (GetMmapGranularity() << kShadowScale) - 1;
+ return max_address;
+}
+
+static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
+ // FIXME: Android should init flags before shadow.
+ if (!SANITIZER_ANDROID && flags()->fixed_shadow_base != (uptr)-1) {
+ __hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base;
+ uptr beg = __hwasan_shadow_memory_dynamic_address;
+ uptr end = beg + shadow_size_bytes;
+ if (!MemoryRangeIsAvailable(beg, end)) {
+ Report(
+ "FATAL: HWAddressSanitizer: Shadow range %p-%p is not available.\n",
+ (void *)beg, (void *)end);
+ DumpProcessMap();
+ CHECK(MemoryRangeIsAvailable(beg, end));
+ }
+ } else {
+ __hwasan_shadow_memory_dynamic_address =
+ FindDynamicShadowStart(shadow_size_bytes);
+ }
+}
+
+static void MaybeDieIfNoTaggingAbi(const char *message) {
+ if (!flags()->fail_without_syscall_abi)
+ return;
+ Printf("FATAL: %s\n", message);
+ Die();
+}
+
+# define PR_SET_TAGGED_ADDR_CTRL 55
+# define PR_GET_TAGGED_ADDR_CTRL 56
+# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+# define ARCH_GET_UNTAG_MASK 0x4001
+# define ARCH_ENABLE_TAGGED_ADDR 0x4002
+# define ARCH_GET_MAX_TAG_BITS 0x4003
+
+static bool CanUseTaggingAbi() {
+# if defined(__x86_64__)
+ unsigned long num_bits = 0;
+ // Check for x86 LAM support. This API is based on a currently unsubmitted
+ // patch to the Linux kernel (as of August 2022) and is thus subject to
+ // change. The patch is here:
+ // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/
+ //
+ // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag
+ // bits the user can request, or zero if LAM is not supported by the hardware.
+ if (internal_iserror(internal_arch_prctl(ARCH_GET_MAX_TAG_BITS,
+ reinterpret_cast<uptr>(&num_bits))))
+ return false;
+ // The platform must provide enough bits for HWASan tags.
+ if (num_bits < kTagBits)
+ return false;
+ return true;
+# else
+ // Check for ARM TBI support.
+ return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
+# endif // __x86_64__
+}
+
+static bool EnableTaggingAbi() {
+# if defined(__x86_64__)
+ // Enable x86 LAM tagging for the process.
+ //
+ // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of
+ // tag bits requested by the user does not exceed that provided by the system.
+ // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant
+ // address bits. It is ~0ULL if either LAM is disabled for the process or LAM
+ // is not supported by the hardware.
+ if (internal_iserror(internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, kTagBits)))
+ return false;
+ unsigned long mask = 0;
+ // Make sure the tag bits are where we expect them to be.
+ if (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK,
+ reinterpret_cast<uptr>(&mask))))
+ return false;
+ // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag
+ // bits. Therefore these masks must not overlap.
+ if (mask & kAddressTagMask)
+ return false;
+ return true;
+# else
+ // Enable ARM TBI tagging for the process. If for some reason tagging is not
+ // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
+ // -EINVAL.
+ if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
+ PR_TAGGED_ADDR_ENABLE, 0, 0, 0)))
+ return false;
+ // Ensure that TBI is enabled.
+ if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) !=
+ PR_TAGGED_ADDR_ENABLE)
+ return false;
+ return true;
+# endif // __x86_64__
+}
+
+void InitializeOsSupport() {
+ // Check we're running on a kernel that can use the tagged address ABI.
+ bool has_abi = CanUseTaggingAbi();
+
+ if (!has_abi) {
+# if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
+ // Some older Android kernels have the tagged pointer ABI on
+ // unconditionally, and hence don't have the tagged-addr prctl while still
+ // allow the ABI.
+ // If targeting Android and the prctl is not around we assume this is the
+ // case.
+ return;
+# else
+ MaybeDieIfNoTaggingAbi(
+ "HWAddressSanitizer requires a kernel with tagged address ABI.");
+# endif
+ }
+
+ if (EnableTaggingAbi())
+ return;
+
+# if SANITIZER_ANDROID
+ MaybeDieIfNoTaggingAbi(
+ "HWAddressSanitizer failed to enable tagged address syscall ABI.\n"
+ "Check the `sysctl abi.tagged_addr_disabled` configuration.");
+# else
+ MaybeDieIfNoTaggingAbi(
+ "HWAddressSanitizer failed to enable tagged address syscall ABI.\n");
+# endif
+}
+
+bool InitShadow() {
+ // Define the entire memory range.
+ kHighMemEnd = GetHighMemEnd();
+
+ // Determine shadow memory base offset.
+ InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
+
+ // Place the low memory first.
+ kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
+ kLowMemStart = 0;
+
+ // Define the low shadow based on the already placed low memory.
+ kLowShadowEnd = MemToShadow(kLowMemEnd);
+ kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
+
+ // High shadow takes whatever memory is left up there (making sure it is not
+ // interfering with low memory in the fixed case).
+ kHighShadowEnd = MemToShadow(kHighMemEnd);
+ kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
+
+ // High memory starts where allocated shadow allows.
+ kHighMemStart = ShadowToMem(kHighShadowStart);
+
+ // Check the sanity of the defined memory ranges (there might be gaps).
+ CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
+ CHECK_GT(kHighMemStart, kHighShadowEnd);
+ CHECK_GT(kHighShadowEnd, kHighShadowStart);
+ CHECK_GT(kHighShadowStart, kLowMemEnd);
+ CHECK_GT(kLowMemEnd, kLowMemStart);
+ CHECK_GT(kLowShadowEnd, kLowShadowStart);
+ CHECK_GT(kLowShadowStart, kLowMemEnd);
+
+ // Reserve shadow memory.
+ ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
+ ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
+
+ // Protect all the gaps.
+ ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
+ if (kLowMemEnd + 1 < kLowShadowStart)
+ ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
+ if (kLowShadowEnd + 1 < kHighShadowStart)
+ ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
+ if (kHighShadowEnd + 1 < kHighMemStart)
+ ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
+
+ if (Verbosity())
+ PrintAddressSpaceLayout();
+
+ return true;
+}
+
+void InitThreads() {
+ CHECK(__hwasan_shadow_memory_dynamic_address);
+ uptr guard_page_size = GetMmapGranularity();
+ uptr thread_space_start =
+ __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
+ uptr thread_space_end =
+ __hwasan_shadow_memory_dynamic_address - guard_page_size;
+ ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
+ "hwasan threads", /*madvise_shadow*/ false);
+ ProtectGap(thread_space_end,
+ __hwasan_shadow_memory_dynamic_address - thread_space_end);
+ InitThreadList(thread_space_start, thread_space_end - thread_space_start);
+ hwasanThreadList().CreateCurrentThread();
+}
+
+bool MemIsApp(uptr p) {
+// Memory outside the alias range has non-zero tags.
+# if !defined(HWASAN_ALIASING_MODE)
+ CHECK_EQ(GetTagFromPointer(p), 0);
+# endif
+
+ return (p >= kHighMemStart && p <= kHighMemEnd) ||
+ (p >= kLowMemStart && p <= kLowMemEnd);
+}
+
+void InstallAtExitHandler() { atexit(HwasanAtExit); }
+
+// ---------------------- TSD ---------------- {{{1
+
+# if HWASAN_WITH_INTERCEPTORS
+static pthread_key_t tsd_key;
+static bool tsd_key_inited = false;
+
+void HwasanTSDThreadInit() {
+ if (tsd_key_inited)
+ CHECK_EQ(0, pthread_setspecific(tsd_key,
+ (void *)GetPthreadDestructorIterations()));
+}
+
+void HwasanTSDDtor(void *tsd) {
+ uptr iterations = (uptr)tsd;
+ if (iterations > 1) {
+ CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
+ return;
+ }
+ __hwasan_thread_exit();
+}
+
+void HwasanTSDInit() {
+ CHECK(!tsd_key_inited);
+ tsd_key_inited = true;
+ CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
+}
+# else
+void HwasanTSDInit() {}
+void HwasanTSDThreadInit() {}
+# endif
+
+# if SANITIZER_ANDROID
+uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
+# else
+uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
+# endif
+
+# if SANITIZER_ANDROID
+void AndroidTestTlsSlot() {
+ uptr kMagicValue = 0x010203040A0B0C0D;
+ uptr *tls_ptr = GetCurrentThreadLongPtr();
+ uptr old_value = *tls_ptr;
+ *tls_ptr = kMagicValue;
+ dlerror();
+ if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
+ Printf(
+ "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
+ "for dlerror().\n");
+ Die();
+ }
+ *tls_ptr = old_value;
+}
+# else
+void AndroidTestTlsSlot() {}
+# endif
+
+static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
+ // Access type is passed in a platform dependent way (see below) and encoded
+ // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
+ // recoverable. Valid values of Y are 0 to 4, which are interpreted as
+ // log2(access_size), and 0xF, which means that access size is passed via
+ // platform dependent register (see below).
+# if defined(__aarch64__)
+ // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
+ // access size is stored in X1 register. Access address is always in X0
+ // register.
+ uptr pc = (uptr)info->si_addr;
+ const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
+ if ((code & 0xff00) != 0x900)
+ return AccessInfo{}; // Not ours.
+
+ const bool is_store = code & 0x10;
+ const bool recover = code & 0x20;
+ const uptr addr = uc->uc_mcontext.regs[0];
+ const unsigned size_log = code & 0xf;
+ if (size_log > 4 && size_log != 0xf)
+ return AccessInfo{}; // Not ours.
+ const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
+
+# elif defined(__x86_64__)
+ // Access type is encoded in the instruction following INT3 as
+ // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
+ // RSI register. Access address is always in RDI register.
+ uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
+ uint8_t *nop = (uint8_t *)pc;
+ if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
+ *(nop + 3) < 0x40)
+ return AccessInfo{}; // Not ours.
+ const unsigned code = *(nop + 3);
+
+ const bool is_store = code & 0x10;
+ const bool recover = code & 0x20;
+ const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
+ const unsigned size_log = code & 0xf;
+ if (size_log > 4 && size_log != 0xf)
+ return AccessInfo{}; // Not ours.
+ const uptr size =
+ size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
+
+# elif SANITIZER_RISCV64
+ // Access type is encoded in the instruction following EBREAK as
+ // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in
+ // X11 register. Access address is always in X10 register.
+ uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC];
+ uint8_t byte1 = *((u8 *)(pc + 0));
+ uint8_t byte2 = *((u8 *)(pc + 1));
+ uint8_t byte3 = *((u8 *)(pc + 2));
+ uint8_t byte4 = *((u8 *)(pc + 3));
+ uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
+ bool isFaultShort = false;
+ bool isEbreak = (ebreak == 0x100073);
+ bool isShortEbreak = false;
+# if defined(__riscv_compressed)
+ isFaultShort = ((ebreak & 0x3) != 0x3);
+ isShortEbreak = ((ebreak & 0xffff) == 0x9002);
+# endif
+ // faulted insn is not ebreak, not our case
+ if (!(isEbreak || isShortEbreak))
+ return AccessInfo{};
+ // advance pc to point after ebreak and reconstruct addi instruction
+ pc += isFaultShort ? 2 : 4;
+ byte1 = *((u8 *)(pc + 0));
+ byte2 = *((u8 *)(pc + 1));
+ byte3 = *((u8 *)(pc + 2));
+ byte4 = *((u8 *)(pc + 3));
+ // reconstruct instruction
+ uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
+ // check if this is really 32 bit instruction
+ // code is encoded in top 12 bits, since instruction is supposed to be with
+ // imm
+ const unsigned code = (instr >> 20) & 0xffff;
+ const uptr addr = uc->uc_mcontext.__gregs[10];
+ const bool is_store = code & 0x10;
+ const bool recover = code & 0x20;
+ const unsigned size_log = code & 0xf;
+ if (size_log > 4 && size_log != 0xf)
+ return AccessInfo{}; // Not our case
+ const uptr size =
+ size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log;
+
+# else
+# error Unsupported architecture
+# endif
+
+ return AccessInfo{addr, size, is_store, !is_store, recover};
+}
+
+static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
+ AccessInfo ai = GetAccessInfo(info, uc);
+ if (!ai.is_store && !ai.is_load)
+ return false;
+
+ SignalContext sig{info, uc};
+ HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
+
+# if defined(__aarch64__)
+ uc->uc_mcontext.pc += 4;
+# elif defined(__x86_64__)
+# elif SANITIZER_RISCV64
+ // pc points to EBREAK which is 2 bytes long
+ uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]);
+ uint8_t byte1 = (uint8_t)(*(exception_source + 0));
+ uint8_t byte2 = (uint8_t)(*(exception_source + 1));
+ uint8_t byte3 = (uint8_t)(*(exception_source + 2));
+ uint8_t byte4 = (uint8_t)(*(exception_source + 3));
+ uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
+ bool isFaultShort = false;
+# if defined(__riscv_compressed)
+ isFaultShort = ((faulted & 0x3) != 0x3);
+# endif
+ uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4;
+# else
+# error Unsupported architecture
+# endif
+ return true;
+}
+
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+void HwasanOnDeadlySignal(int signo, void *info, void *context) {
+ // Probably a tag mismatch.
+ if (signo == SIGTRAP)
+ if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
+ return;
+
+ HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
+}
+
+void Thread::InitStackAndTls(const InitState *) {
+ uptr tls_size;
+ uptr stack_size;
+ GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
+ &tls_size);
+ stack_top_ = stack_bottom_ + stack_size;
+ tls_end_ = tls_begin_ + tls_size;
+}
+
+uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
+ CHECK(IsAligned(p, kShadowAlignment));
+ CHECK(IsAligned(size, kShadowAlignment));
+ uptr shadow_start = MemToShadow(p);
+ uptr shadow_size = MemToShadowSize(size);
+
+ uptr page_size = GetPageSizeCached();
+ uptr page_start = RoundUpTo(shadow_start, page_size);
+ uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
+ uptr threshold = common_flags()->clear_shadow_mmap_threshold;
+ if (SANITIZER_LINUX &&
+ UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
+ internal_memset((void *)shadow_start, tag, page_start - shadow_start);
+ internal_memset((void *)page_end, tag,
+ shadow_start + shadow_size - page_end);
+ // For an anonymous private mapping MADV_DONTNEED will return a zero page on
+ // Linux.
+ ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
+ } else {
+ internal_memset((void *)shadow_start, tag, shadow_size);
+ }
+ return AddTagToPointer(p, tag);
+}
+
+static void BeforeFork() {
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::LockGlobal();
+ }
+ // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the
+ // stuff we need.
+ __lsan::LockThreads();
+ __lsan::LockAllocator();
+ StackDepotLockBeforeFork();
+}
+
+static void AfterFork(bool fork_child) {
+ StackDepotUnlockAfterFork(fork_child);
+ // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
+ // the stuff we need.
+ __lsan::UnlockAllocator();
+ __lsan::UnlockThreads();
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::UnlockGlobal();
+ }
+}
+
+void HwasanInstallAtForkHandler() {
+ pthread_atfork(
+ &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
+ []() { AfterFork(/* fork_child= */ true); });
+}
+
+void InstallAtExitCheckLeaks() {
+ if (CAN_SANITIZE_LEAKS) {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ if (flags()->halt_on_error)
+ Atexit(__lsan::DoLeakCheck);
+ else
+ Atexit(__lsan::DoRecoverableLeakCheckVoid);
+ }
+ }
+}
+
+} // namespace __hwasan
+
+using namespace __hwasan;
+
+extern "C" void __hwasan_thread_enter() {
+ hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
+}
+
+extern "C" void __hwasan_thread_exit() {
+ Thread *t = GetCurrentThread();
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
+ if (t) {
+ // Block async signals on the thread as the handler can be instrumented.
+ // After this point instrumented code can't access essential data from TLS
+ // and will crash.
+ // Bionic already calls __hwasan_thread_exit with blocked signals.
+ if (SANITIZER_GLIBC)
+ BlockSignals();
+ hwasanThreadList().ReleaseThread(t);
+ }
+}
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h
new file mode 100644
index 000000000000..7d134e8c4b7f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h
@@ -0,0 +1,50 @@
+//===-- hwasan_malloc_bisect.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_hash.h"
+#include "hwasan.h"
+
+namespace __hwasan {
+
+static u32 malloc_hash(StackTrace *stack, uptr orig_size) {
+ uptr len = Min(stack->size, (unsigned)7);
+ MurMur2HashBuilder H(len);
+ H.add(orig_size);
+ // Start with frame #1 to skip __sanitizer_malloc frame, which is
+ // (a) almost always the same (well, could be operator new or new[])
+ // (b) can change hashes when compiler-rt is rebuilt, invalidating previous
+ // bisection results.
+ // Because of ASLR, use only offset inside the page.
+ for (uptr i = 1; i < len; ++i) H.add(((u32)stack->trace[i]) & 0xFFF);
+ return H.get();
+}
+
+static inline bool malloc_bisect(StackTrace *stack, uptr orig_size) {
+ uptr left = flags()->malloc_bisect_left;
+ uptr right = flags()->malloc_bisect_right;
+ if (LIKELY(left == 0 && right == 0))
+ return true;
+ if (!stack)
+ return true;
+ // Allow malloc_bisect_right > (u32)(-1) to avoid spelling the latter in
+ // decimal.
+ uptr h = (uptr)malloc_hash(stack, orig_size);
+ if (h < left || h > right)
+ return false;
+ if (flags()->malloc_bisect_dump) {
+ Printf("[alloc] %u %zu\n", h, orig_size);
+ stack->Print();
+ }
+ return true;
+}
+
+} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_mapping.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_mapping.h
new file mode 100644
index 000000000000..79a143632f6a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_mapping.h
@@ -0,0 +1,75 @@
+//===-- hwasan_mapping.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and defines memory mapping.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_MAPPING_H
+#define HWASAN_MAPPING_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "hwasan_interface_internal.h"
+
+// Typical mapping on Linux/x86_64:
+// with dynamic shadow mapped at [0x770d59f40000, 0x7f0d59f40000]:
+// || [0x7f0d59f40000, 0x7fffffffffff] || HighMem ||
+// || [0x7efe2f934000, 0x7f0d59f3ffff] || HighShadow ||
+// || [0x7e7e2f934000, 0x7efe2f933fff] || ShadowGap ||
+// || [0x770d59f40000, 0x7e7e2f933fff] || LowShadow ||
+// || [0x000000000000, 0x770d59f3ffff] || LowMem ||
+
+// Typical mapping on Android/AArch64
+// with dynamic shadow mapped: [0x007477480000, 0x007c77480000]:
+// || [0x007c77480000, 0x007fffffffff] || HighMem ||
+// || [0x007c3ebc8000, 0x007c7747ffff] || HighShadow ||
+// || [0x007bbebc8000, 0x007c3ebc7fff] || ShadowGap ||
+// || [0x007477480000, 0x007bbebc7fff] || LowShadow ||
+// || [0x000000000000, 0x00747747ffff] || LowMem ||
+
+// Reasonable values are 4 (for 1/16th shadow) and 6 (for 1/64th).
+constexpr uptr kShadowScale = 4;
+constexpr uptr kShadowAlignment = 1ULL << kShadowScale;
+
+namespace __hwasan {
+
+extern uptr kLowMemStart;
+extern uptr kLowMemEnd;
+extern uptr kLowShadowEnd;
+extern uptr kLowShadowStart;
+extern uptr kHighShadowStart;
+extern uptr kHighShadowEnd;
+extern uptr kHighMemStart;
+extern uptr kHighMemEnd;
+
+inline uptr GetShadowOffset() {
+ return SANITIZER_FUCHSIA ? 0 : __hwasan_shadow_memory_dynamic_address;
+}
+inline uptr MemToShadow(uptr untagged_addr) {
+ return (untagged_addr >> kShadowScale) + GetShadowOffset();
+}
+inline uptr ShadowToMem(uptr shadow_addr) {
+ return (shadow_addr - GetShadowOffset()) << kShadowScale;
+}
+inline uptr MemToShadowSize(uptr size) {
+ return size >> kShadowScale;
+}
+
+bool MemIsApp(uptr p);
+
+inline bool MemIsShadow(uptr p) {
+ return (kLowShadowStart <= p && p <= kLowShadowEnd) ||
+ (kHighShadowStart <= p && p <= kHighShadowEnd);
+}
+
+uptr GetAliasRegionStart();
+
+} // namespace __hwasan
+
+#endif // HWASAN_MAPPING_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
new file mode 100644
index 000000000000..16d6f9085924
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
@@ -0,0 +1,74 @@
+//===-- hwasan_memintrinsics.cpp --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and contains HWASAN versions of
+/// memset, memcpy and memmove
+///
+//===----------------------------------------------------------------------===//
+
+#include <string.h>
+#include "hwasan.h"
+#include "hwasan_checks.h"
+#include "hwasan_flags.h"
+#include "hwasan_interface_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+using namespace __hwasan;
+
+void *__hwasan_memset(void *block, int c, uptr size) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(block), size);
+ return memset(block, c, size);
+}
+
+void *__hwasan_memcpy(void *to, const void *from, uptr size) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memcpy(to, from, size);
+}
+
+void *__hwasan_memmove(void *to, const void *from, uptr size) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memmove(to, from, size);
+}
+
+void *__hwasan_memset_match_all(void *block, int c, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(block)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(block), size);
+ return memset(block, c, size);
+}
+
+void *__hwasan_memcpy_match_all(void *to, const void *from, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memcpy(to, from, size);
+}
+
+void *__hwasan_memmove_match_all(void *to, const void *from, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memmove(to, from, size);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
new file mode 100644
index 000000000000..f0fd3726ef1b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
@@ -0,0 +1,162 @@
+//===-- hwasan_new_delete.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+
+#include "hwasan.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+
+#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+# define OPERATOR_NEW_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_malloc(size, &stack); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+# define OPERATOR_NEW_ALIGN_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_memalign(static_cast<uptr>(align), size, &stack); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+
+# define OPERATOR_DELETE_BODY \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) \
+ hwasan_free(ptr, &stack)
+
+#elif defined(__ANDROID__)
+
+// We don't actually want to intercept operator new and delete on Android, but
+// since we previously released a runtime that intercepted these functions,
+// removing the interceptors would break ABI. Therefore we simply forward to
+// malloc and free.
+# define OPERATOR_NEW_BODY(nothrow) return malloc(size)
+# define OPERATOR_DELETE_BODY free(ptr)
+
+#endif
+
+#ifdef OPERATOR_NEW_BODY
+
+using namespace __hwasan;
+
+// Fake std::nothrow_t to avoid including <new>.
+namespace std {
+struct nothrow_t {};
+} // namespace std
+
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(size_t size) {
+ OPERATOR_NEW_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size) {
+ OPERATOR_NEW_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::nothrow_t const &) {
+ OPERATOR_NEW_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::nothrow_t const &) {
+ OPERATOR_NEW_BODY(true /*nothrow*/);
+}
+
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+
+#endif // OPERATOR_NEW_BODY
+
+#ifdef OPERATOR_NEW_ALIGN_BODY
+
+namespace std {
+enum class align_val_t : size_t {};
+} // namespace std
+
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::align_val_t align) {
+ OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::align_val_t align) {
+ OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::align_val_t align, std::nothrow_t const &) {
+ OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::align_val_t align, std::nothrow_t const &) {
+ OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+}
+
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+
+#endif // OPERATOR_NEW_ALIGN_BODY
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
new file mode 100644
index 000000000000..d92b51052194
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
@@ -0,0 +1,1001 @@
+#ifndef HWASAN_PLATFORM_INTERCEPTORS_H
+#define HWASAN_PLATFORM_INTERCEPTORS_H
+
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+// This file cancels out most of the sanitizer_common interception, thus
+// allowing HWASan to selectively reuse some of the interceptors.
+//
+// To re-enable sanitizer_common's interception of a function, comment out
+// the corresponding '#undef SANITIZER_INTERCEPT_fn' and
+// '#define SANITIZER_INTERCEPT_fn 0':
+// - We prefer to comment out rather than delete the lines, to show that
+// it is deliberate, rather than an accidental omission.
+// - We do not use '#define SANITIZE_INTERCEPT_fn 1', because
+// interception is usually conditional (e.g., based on SI_POSIX); we let
+// the condition in sanitizers_platform_interceptors.h take effect.
+
+// Originally generated with:
+// cat ../sanitizer_common/sanitizer_platform_interceptors.h | grep '^#define SANITIZER_INTERCEPT' | cut -d ' ' -f 2 | while read x; do echo "#undef $x"; echo "#define $x 0"; echo; done
+#undef SANITIZER_INTERCEPT_STRLEN
+#define SANITIZER_INTERCEPT_STRLEN 0
+
+#undef SANITIZER_INTERCEPT_STRNLEN
+#define SANITIZER_INTERCEPT_STRNLEN 0
+
+#undef SANITIZER_INTERCEPT_STRCMP
+#define SANITIZER_INTERCEPT_STRCMP 0
+
+#undef SANITIZER_INTERCEPT_STRSTR
+#define SANITIZER_INTERCEPT_STRSTR 0
+
+#undef SANITIZER_INTERCEPT_STRCASESTR
+#define SANITIZER_INTERCEPT_STRCASESTR 0
+
+#undef SANITIZER_INTERCEPT_STRTOK
+#define SANITIZER_INTERCEPT_STRTOK 0
+
+#undef SANITIZER_INTERCEPT_STRCHR
+#define SANITIZER_INTERCEPT_STRCHR 0
+
+#undef SANITIZER_INTERCEPT_STRCHRNUL
+#define SANITIZER_INTERCEPT_STRCHRNUL 0
+
+#undef SANITIZER_INTERCEPT_STRRCHR
+#define SANITIZER_INTERCEPT_STRRCHR 0
+
+#undef SANITIZER_INTERCEPT_STRSPN
+#define SANITIZER_INTERCEPT_STRSPN 0
+
+#undef SANITIZER_INTERCEPT_STRPBRK
+#define SANITIZER_INTERCEPT_STRPBRK 0
+
+#undef SANITIZER_INTERCEPT_TEXTDOMAIN
+#define SANITIZER_INTERCEPT_TEXTDOMAIN 0
+
+#undef SANITIZER_INTERCEPT_STRCASECMP
+#define SANITIZER_INTERCEPT_STRCASECMP 0
+
+// #undef SANITIZER_INTERCEPT_MEMSET
+// #define SANITIZER_INTERCEPT_MEMSET 0
+
+// #undef SANITIZER_INTERCEPT_MEMMOVE
+// #define SANITIZER_INTERCEPT_MEMMOVE 0
+
+// #undef SANITIZER_INTERCEPT_MEMCPY
+// #define SANITIZER_INTERCEPT_MEMCPY 0
+
+// #undef SANITIZER_INTERCEPT_MEMCMP
+// #define SANITIZER_INTERCEPT_MEMCMP 0
+
+// #undef SANITIZER_INTERCEPT_BCMP
+// #define SANITIZER_INTERCEPT_BCMP 0
+
+#undef SANITIZER_INTERCEPT_STRNDUP
+#define SANITIZER_INTERCEPT_STRNDUP 0
+
+#undef SANITIZER_INTERCEPT___STRNDUP
+#define SANITIZER_INTERCEPT___STRNDUP 0
+
+#undef SANITIZER_INTERCEPT_MEMMEM
+#define SANITIZER_INTERCEPT_MEMMEM 0
+
+#undef SANITIZER_INTERCEPT_MEMCHR
+#define SANITIZER_INTERCEPT_MEMCHR 0
+
+#undef SANITIZER_INTERCEPT_MEMRCHR
+#define SANITIZER_INTERCEPT_MEMRCHR 0
+
+#undef SANITIZER_INTERCEPT_READ
+#define SANITIZER_INTERCEPT_READ 0
+
+#undef SANITIZER_INTERCEPT_PREAD
+#define SANITIZER_INTERCEPT_PREAD 0
+
+#undef SANITIZER_INTERCEPT_WRITE
+#define SANITIZER_INTERCEPT_WRITE 0
+
+#undef SANITIZER_INTERCEPT_PWRITE
+#define SANITIZER_INTERCEPT_PWRITE 0
+
+#undef SANITIZER_INTERCEPT_FREAD
+#define SANITIZER_INTERCEPT_FREAD 0
+
+#undef SANITIZER_INTERCEPT_FWRITE
+#define SANITIZER_INTERCEPT_FWRITE 0
+
+#undef SANITIZER_INTERCEPT_FGETS
+#define SANITIZER_INTERCEPT_FGETS 0
+
+#undef SANITIZER_INTERCEPT_FPUTS
+#define SANITIZER_INTERCEPT_FPUTS 0
+
+#undef SANITIZER_INTERCEPT_PUTS
+#define SANITIZER_INTERCEPT_PUTS 0
+
+#undef SANITIZER_INTERCEPT_PREAD64
+#define SANITIZER_INTERCEPT_PREAD64 0
+
+#undef SANITIZER_INTERCEPT_PWRITE64
+#define SANITIZER_INTERCEPT_PWRITE64 0
+
+#undef SANITIZER_INTERCEPT_READV
+#define SANITIZER_INTERCEPT_READV 0
+
+#undef SANITIZER_INTERCEPT_WRITEV
+#define SANITIZER_INTERCEPT_WRITEV 0
+
+#undef SANITIZER_INTERCEPT_PREADV
+#define SANITIZER_INTERCEPT_PREADV 0
+
+#undef SANITIZER_INTERCEPT_PWRITEV
+#define SANITIZER_INTERCEPT_PWRITEV 0
+
+#undef SANITIZER_INTERCEPT_PREADV64
+#define SANITIZER_INTERCEPT_PREADV64 0
+
+#undef SANITIZER_INTERCEPT_PWRITEV64
+#define SANITIZER_INTERCEPT_PWRITEV64 0
+
+#undef SANITIZER_INTERCEPT_PRCTL
+#define SANITIZER_INTERCEPT_PRCTL 0
+
+#undef SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_STRPTIME
+#define SANITIZER_INTERCEPT_STRPTIME 0
+
+#undef SANITIZER_INTERCEPT_SCANF
+#define SANITIZER_INTERCEPT_SCANF 0
+
+#undef SANITIZER_INTERCEPT_ISOC99_SCANF
+#define SANITIZER_INTERCEPT_ISOC99_SCANF 0
+
+#undef SANITIZER_INTERCEPT_PRINTF
+#define SANITIZER_INTERCEPT_PRINTF 0
+
+#undef SANITIZER_INTERCEPT_PRINTF_L
+#define SANITIZER_INTERCEPT_PRINTF_L 0
+
+#undef SANITIZER_INTERCEPT_ISOC99_PRINTF
+#define SANITIZER_INTERCEPT_ISOC99_PRINTF 0
+
+#undef SANITIZER_INTERCEPT___PRINTF_CHK
+#define SANITIZER_INTERCEPT___PRINTF_CHK 0
+
+#undef SANITIZER_INTERCEPT_FREXP
+#define SANITIZER_INTERCEPT_FREXP 0
+
+#undef SANITIZER_INTERCEPT_FREXPF_FREXPL
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL 0
+
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_GETPWENT
+#define SANITIZER_INTERCEPT_GETPWENT 0
+
+#undef SANITIZER_INTERCEPT_FGETGRENT_R
+#define SANITIZER_INTERCEPT_FGETGRENT_R 0
+
+#undef SANITIZER_INTERCEPT_FGETPWENT
+#define SANITIZER_INTERCEPT_FGETPWENT 0
+
+#undef SANITIZER_INTERCEPT_GETPWENT_R
+#define SANITIZER_INTERCEPT_GETPWENT_R 0
+
+#undef SANITIZER_INTERCEPT_FGETPWENT_R
+#define SANITIZER_INTERCEPT_FGETPWENT_R 0
+
+#undef SANITIZER_INTERCEPT_SETPWENT
+#define SANITIZER_INTERCEPT_SETPWENT 0
+
+#undef SANITIZER_INTERCEPT_CLOCK_GETTIME
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME 0
+
+#undef SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID
+#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID 0
+
+#undef SANITIZER_INTERCEPT_GETITIMER
+#define SANITIZER_INTERCEPT_GETITIMER 0
+
+#undef SANITIZER_INTERCEPT_TIME
+#define SANITIZER_INTERCEPT_TIME 0
+
+#undef SANITIZER_INTERCEPT_GLOB
+#define SANITIZER_INTERCEPT_GLOB 0
+
+#undef SANITIZER_INTERCEPT_GLOB64
+#define SANITIZER_INTERCEPT_GLOB64 0
+
+#undef SANITIZER_INTERCEPT___B64_TO
+#define SANITIZER_INTERCEPT___B64_TO 0
+
+#undef SANITIZER_INTERCEPT_DN_COMP_EXPAND
+#define SANITIZER_INTERCEPT_DN_COMP_EXPAND 0
+
+#undef SANITIZER_INTERCEPT_POSIX_SPAWN
+#define SANITIZER_INTERCEPT_POSIX_SPAWN 0
+
+#undef SANITIZER_INTERCEPT_WAIT
+#define SANITIZER_INTERCEPT_WAIT 0
+
+#undef SANITIZER_INTERCEPT_INET
+#define SANITIZER_INTERCEPT_INET 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM 0
+
+#undef SANITIZER_INTERCEPT_GETADDRINFO
+#define SANITIZER_INTERCEPT_GETADDRINFO 0
+
+#undef SANITIZER_INTERCEPT_GETNAMEINFO
+#define SANITIZER_INTERCEPT_GETNAMEINFO 0
+
+#undef SANITIZER_INTERCEPT_GETSOCKNAME
+#define SANITIZER_INTERCEPT_GETSOCKNAME 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME2
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME2_R
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYADDR_R
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTENT_R
+#define SANITIZER_INTERCEPT_GETHOSTENT_R 0
+
+#undef SANITIZER_INTERCEPT_GETSOCKOPT
+#define SANITIZER_INTERCEPT_GETSOCKOPT 0
+
+#undef SANITIZER_INTERCEPT_ACCEPT
+#define SANITIZER_INTERCEPT_ACCEPT 0
+
+#undef SANITIZER_INTERCEPT_ACCEPT4
+#define SANITIZER_INTERCEPT_ACCEPT4 0
+
+#undef SANITIZER_INTERCEPT_PACCEPT
+#define SANITIZER_INTERCEPT_PACCEPT 0
+
+#undef SANITIZER_INTERCEPT_MODF
+#define SANITIZER_INTERCEPT_MODF 0
+
+#undef SANITIZER_INTERCEPT_RECVMSG
+#define SANITIZER_INTERCEPT_RECVMSG 0
+
+#undef SANITIZER_INTERCEPT_SENDMSG
+#define SANITIZER_INTERCEPT_SENDMSG 0
+
+#undef SANITIZER_INTERCEPT_RECVMMSG
+#define SANITIZER_INTERCEPT_RECVMMSG 0
+
+#undef SANITIZER_INTERCEPT_SENDMMSG
+#define SANITIZER_INTERCEPT_SENDMMSG 0
+
+#undef SANITIZER_INTERCEPT_SYSMSG
+#define SANITIZER_INTERCEPT_SYSMSG 0
+
+#undef SANITIZER_INTERCEPT_GETPEERNAME
+#define SANITIZER_INTERCEPT_GETPEERNAME 0
+
+#undef SANITIZER_INTERCEPT_IOCTL
+#define SANITIZER_INTERCEPT_IOCTL 0
+
+#undef SANITIZER_INTERCEPT_INET_ATON
+#define SANITIZER_INTERCEPT_INET_ATON 0
+
+#undef SANITIZER_INTERCEPT_SYSINFO
+#define SANITIZER_INTERCEPT_SYSINFO 0
+
+#undef SANITIZER_INTERCEPT_READDIR
+#define SANITIZER_INTERCEPT_READDIR 0
+
+#undef SANITIZER_INTERCEPT_READDIR64
+#define SANITIZER_INTERCEPT_READDIR64 0
+
+#undef SANITIZER_INTERCEPT_PTRACE
+#define SANITIZER_INTERCEPT_PTRACE 0
+
+#undef SANITIZER_INTERCEPT_PTRACE
+#define SANITIZER_INTERCEPT_PTRACE 0
+
+#undef SANITIZER_INTERCEPT_SETLOCALE
+#define SANITIZER_INTERCEPT_SETLOCALE 0
+
+#undef SANITIZER_INTERCEPT_GETCWD
+#define SANITIZER_INTERCEPT_GETCWD 0
+
+#undef SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME 0
+
+#undef SANITIZER_INTERCEPT_STRTOIMAX
+#define SANITIZER_INTERCEPT_STRTOIMAX 0
+
+#undef SANITIZER_INTERCEPT_MBSTOWCS
+#define SANITIZER_INTERCEPT_MBSTOWCS 0
+
+#undef SANITIZER_INTERCEPT_MBSNRTOWCS
+#define SANITIZER_INTERCEPT_MBSNRTOWCS 0
+
+#undef SANITIZER_INTERCEPT_WCSTOMBS
+#define SANITIZER_INTERCEPT_WCSTOMBS 0
+
+#undef SANITIZER_INTERCEPT_STRXFRM
+#define SANITIZER_INTERCEPT_STRXFRM 0
+
+#undef SANITIZER_INTERCEPT___STRXFRM_L
+#define SANITIZER_INTERCEPT___STRXFRM_L 0
+
+#undef SANITIZER_INTERCEPT_WCSXFRM
+#define SANITIZER_INTERCEPT_WCSXFRM 0
+
+#undef SANITIZER_INTERCEPT___WCSXFRM_L
+#define SANITIZER_INTERCEPT___WCSXFRM_L 0
+
+#undef SANITIZER_INTERCEPT_WCSNRTOMBS
+#define SANITIZER_INTERCEPT_WCSNRTOMBS 0
+
+#undef SANITIZER_INTERCEPT_WCRTOMB
+#define SANITIZER_INTERCEPT_WCRTOMB 0
+
+#undef SANITIZER_INTERCEPT_WCTOMB
+#define SANITIZER_INTERCEPT_WCTOMB 0
+
+#undef SANITIZER_INTERCEPT_TCGETATTR
+#define SANITIZER_INTERCEPT_TCGETATTR 0
+
+#undef SANITIZER_INTERCEPT_REALPATH
+#define SANITIZER_INTERCEPT_REALPATH 0
+
+#undef SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME 0
+
+#undef SANITIZER_INTERCEPT_CONFSTR
+#define SANITIZER_INTERCEPT_CONFSTR 0
+
+#undef SANITIZER_INTERCEPT_SCHED_GETAFFINITY
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY 0
+
+#undef SANITIZER_INTERCEPT_SCHED_GETPARAM
+#define SANITIZER_INTERCEPT_SCHED_GETPARAM 0
+
+#undef SANITIZER_INTERCEPT_STRERROR
+#define SANITIZER_INTERCEPT_STRERROR 0
+
+#undef SANITIZER_INTERCEPT_STRERROR_R
+#define SANITIZER_INTERCEPT_STRERROR_R 0
+
+#undef SANITIZER_INTERCEPT_XPG_STRERROR_R
+#define SANITIZER_INTERCEPT_XPG_STRERROR_R 0
+
+#undef SANITIZER_INTERCEPT_SCANDIR
+#define SANITIZER_INTERCEPT_SCANDIR 0
+
+#undef SANITIZER_INTERCEPT_SCANDIR64
+#define SANITIZER_INTERCEPT_SCANDIR64 0
+
+#undef SANITIZER_INTERCEPT_GETGROUPS
+#define SANITIZER_INTERCEPT_GETGROUPS 0
+
+#undef SANITIZER_INTERCEPT_POLL
+#define SANITIZER_INTERCEPT_POLL 0
+
+#undef SANITIZER_INTERCEPT_PPOLL
+#define SANITIZER_INTERCEPT_PPOLL 0
+
+#undef SANITIZER_INTERCEPT_WORDEXP
+#define SANITIZER_INTERCEPT_WORDEXP 0
+
+#undef SANITIZER_INTERCEPT_SIGWAIT
+#define SANITIZER_INTERCEPT_SIGWAIT 0
+
+#undef SANITIZER_INTERCEPT_SIGWAITINFO
+#define SANITIZER_INTERCEPT_SIGWAITINFO 0
+
+#undef SANITIZER_INTERCEPT_SIGTIMEDWAIT
+#define SANITIZER_INTERCEPT_SIGTIMEDWAIT 0
+
+#undef SANITIZER_INTERCEPT_SIGSETOPS
+#define SANITIZER_INTERCEPT_SIGSETOPS 0
+
+#undef SANITIZER_INTERCEPT_SIGSET_LOGICOPS
+#define SANITIZER_INTERCEPT_SIGSET_LOGICOPS 0
+
+#undef SANITIZER_INTERCEPT_SIGPENDING
+#define SANITIZER_INTERCEPT_SIGPENDING 0
+
+#undef SANITIZER_INTERCEPT_SIGPROCMASK
+#define SANITIZER_INTERCEPT_SIGPROCMASK 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
+#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK 0
+
+#undef SANITIZER_INTERCEPT_BACKTRACE
+#define SANITIZER_INTERCEPT_BACKTRACE 0
+
+#undef SANITIZER_INTERCEPT_GETMNTENT
+#define SANITIZER_INTERCEPT_GETMNTENT 0
+
+#undef SANITIZER_INTERCEPT_GETMNTENT_R
+#define SANITIZER_INTERCEPT_GETMNTENT_R 0
+
+#undef SANITIZER_INTERCEPT_STATFS
+#define SANITIZER_INTERCEPT_STATFS 0
+
+#undef SANITIZER_INTERCEPT_STATFS64
+#define SANITIZER_INTERCEPT_STATFS64 0
+
+#undef SANITIZER_INTERCEPT_STATVFS
+#define SANITIZER_INTERCEPT_STATVFS 0
+
+#undef SANITIZER_INTERCEPT_STATVFS64
+#define SANITIZER_INTERCEPT_STATVFS64 0
+
+#undef SANITIZER_INTERCEPT_INITGROUPS
+#define SANITIZER_INTERCEPT_INITGROUPS 0
+
+#undef SANITIZER_INTERCEPT_ETHER_NTOA_ATON
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON 0
+
+#undef SANITIZER_INTERCEPT_ETHER_HOST
+#define SANITIZER_INTERCEPT_ETHER_HOST 0
+
+#undef SANITIZER_INTERCEPT_ETHER_R
+#define SANITIZER_INTERCEPT_ETHER_R 0
+
+#undef SANITIZER_INTERCEPT_SHMCTL
+#define SANITIZER_INTERCEPT_SHMCTL 0
+
+#undef SANITIZER_INTERCEPT_RANDOM_R
+#define SANITIZER_INTERCEPT_RANDOM_R 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GET
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP
+#define SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_TRYJOIN
+#define SANITIZER_INTERCEPT_TRYJOIN 0
+
+#undef SANITIZER_INTERCEPT_TIMEDJOIN
+#define SANITIZER_INTERCEPT_TIMEDJOIN 0
+
+#undef SANITIZER_INTERCEPT_THR_EXIT
+#define SANITIZER_INTERCEPT_THR_EXIT 0
+
+#undef SANITIZER_INTERCEPT_TMPNAM
+#define SANITIZER_INTERCEPT_TMPNAM 0
+
+#undef SANITIZER_INTERCEPT_TMPNAM_R
+#define SANITIZER_INTERCEPT_TMPNAM_R 0
+
+#undef SANITIZER_INTERCEPT_PTSNAME
+#define SANITIZER_INTERCEPT_PTSNAME 0
+
+#undef SANITIZER_INTERCEPT_PTSNAME_R
+#define SANITIZER_INTERCEPT_PTSNAME_R 0
+
+#undef SANITIZER_INTERCEPT_TTYNAME
+#define SANITIZER_INTERCEPT_TTYNAME 0
+
+#undef SANITIZER_INTERCEPT_TTYNAME_R
+#define SANITIZER_INTERCEPT_TTYNAME_R 0
+
+#undef SANITIZER_INTERCEPT_TEMPNAM
+#define SANITIZER_INTERCEPT_TEMPNAM 0
+
+#undef SANITIZER_INTERCEPT_SINCOS
+#define SANITIZER_INTERCEPT_SINCOS 0
+
+#undef SANITIZER_INTERCEPT_REMQUO
+#define SANITIZER_INTERCEPT_REMQUO 0
+
+#undef SANITIZER_INTERCEPT_REMQUOL
+#define SANITIZER_INTERCEPT_REMQUOL 0
+
+#undef SANITIZER_INTERCEPT_LGAMMA
+#define SANITIZER_INTERCEPT_LGAMMA 0
+
+#undef SANITIZER_INTERCEPT_LGAMMAL
+#define SANITIZER_INTERCEPT_LGAMMAL 0
+
+#undef SANITIZER_INTERCEPT_LGAMMA_R
+#define SANITIZER_INTERCEPT_LGAMMA_R 0
+
+#undef SANITIZER_INTERCEPT_LGAMMAL_R
+#define SANITIZER_INTERCEPT_LGAMMAL_R 0
+
+#undef SANITIZER_INTERCEPT_DRAND48_R
+#define SANITIZER_INTERCEPT_DRAND48_R 0
+
+#undef SANITIZER_INTERCEPT_RAND_R
+#define SANITIZER_INTERCEPT_RAND_R 0
+
+#undef SANITIZER_INTERCEPT_ICONV
+#define SANITIZER_INTERCEPT_ICONV 0
+
+#undef SANITIZER_INTERCEPT_TIMES
+#define SANITIZER_INTERCEPT_TIMES 0
+
+#undef SANITIZER_INTERCEPT_GETLINE
+#define SANITIZER_INTERCEPT_GETLINE 0
+
+#undef SANITIZER_INTERCEPT__EXIT
+#define SANITIZER_INTERCEPT__EXIT 0
+
+#undef SANITIZER_INTERCEPT___LIBC_MUTEX
+#define SANITIZER_INTERCEPT___LIBC_MUTEX 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP
+#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP
+#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP 0
+
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR 0
+
+#undef SANITIZER_INTERCEPT_LISTXATTR
+#define SANITIZER_INTERCEPT_LISTXATTR 0
+
+#undef SANITIZER_INTERCEPT_GETXATTR
+#define SANITIZER_INTERCEPT_GETXATTR 0
+
+#undef SANITIZER_INTERCEPT_GETRESID
+#define SANITIZER_INTERCEPT_GETRESID 0
+
+#undef SANITIZER_INTERCEPT_GETIFADDRS
+#define SANITIZER_INTERCEPT_GETIFADDRS 0
+
+#undef SANITIZER_INTERCEPT_IF_INDEXTONAME
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME 0
+
+#undef SANITIZER_INTERCEPT_CAPGET
+#define SANITIZER_INTERCEPT_CAPGET 0
+
+#undef SANITIZER_INTERCEPT_AEABI_MEM
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+
+#undef SANITIZER_INTERCEPT_AEABI_MEM
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+
+#undef SANITIZER_INTERCEPT___BZERO
+#define SANITIZER_INTERCEPT___BZERO 0
+
+#undef SANITIZER_INTERCEPT_BZERO
+#define SANITIZER_INTERCEPT_BZERO 0
+
+#undef SANITIZER_INTERCEPT_FTIME
+#define SANITIZER_INTERCEPT_FTIME 0
+
+#undef SANITIZER_INTERCEPT_XDR
+#define SANITIZER_INTERCEPT_XDR 0
+
+#undef SANITIZER_INTERCEPT_XDRREC
+#define SANITIZER_INTERCEPT_XDRREC 0
+
+#undef SANITIZER_INTERCEPT_TSEARCH
+#define SANITIZER_INTERCEPT_TSEARCH 0
+
+#undef SANITIZER_INTERCEPT_LIBIO_INTERNALS
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS 0
+
+#undef SANITIZER_INTERCEPT_FOPEN
+#define SANITIZER_INTERCEPT_FOPEN 0
+
+#undef SANITIZER_INTERCEPT_FOPEN64
+#define SANITIZER_INTERCEPT_FOPEN64 0
+
+#undef SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM 0
+
+#undef SANITIZER_INTERCEPT_OBSTACK
+#define SANITIZER_INTERCEPT_OBSTACK 0
+
+#undef SANITIZER_INTERCEPT_FFLUSH
+#define SANITIZER_INTERCEPT_FFLUSH 0
+
+#undef SANITIZER_INTERCEPT_FCLOSE
+#define SANITIZER_INTERCEPT_FCLOSE 0
+
+#undef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE 0
+
+#undef SANITIZER_INTERCEPT_GETPASS
+#define SANITIZER_INTERCEPT_GETPASS 0
+
+#undef SANITIZER_INTERCEPT_TIMERFD
+#define SANITIZER_INTERCEPT_TIMERFD 0
+
+#undef SANITIZER_INTERCEPT_MLOCKX
+#define SANITIZER_INTERCEPT_MLOCKX 0
+
+#undef SANITIZER_INTERCEPT_FOPENCOOKIE
+#define SANITIZER_INTERCEPT_FOPENCOOKIE 0
+
+#undef SANITIZER_INTERCEPT_SEM
+#define SANITIZER_INTERCEPT_SEM 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
+#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL 0
+
+#undef SANITIZER_INTERCEPT_MINCORE
+#define SANITIZER_INTERCEPT_MINCORE 0
+
+#undef SANITIZER_INTERCEPT_PROCESS_VM_READV
+#define SANITIZER_INTERCEPT_PROCESS_VM_READV 0
+
+#undef SANITIZER_INTERCEPT_CTERMID
+#define SANITIZER_INTERCEPT_CTERMID 0
+
+#undef SANITIZER_INTERCEPT_CTERMID_R
+#define SANITIZER_INTERCEPT_CTERMID_R 0
+
+#undef SANITIZER_INTERCEPTOR_HOOKS
+#define SANITIZER_INTERCEPTOR_HOOKS 0
+
+#undef SANITIZER_INTERCEPT_RECV_RECVFROM
+#define SANITIZER_INTERCEPT_RECV_RECVFROM 0
+
+#undef SANITIZER_INTERCEPT_SEND_SENDTO
+#define SANITIZER_INTERCEPT_SEND_SENDTO 0
+
+#undef SANITIZER_INTERCEPT_EVENTFD_READ_WRITE
+#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE 0
+
+#undef SANITIZER_INTERCEPT_STAT
+#define SANITIZER_INTERCEPT_STAT 0
+
+#undef SANITIZER_INTERCEPT_STAT64
+#define SANITIZER_INTERCEPT_STAT64 0
+
+#undef SANITIZER_INTERCEPT_LSTAT
+#define SANITIZER_INTERCEPT_LSTAT 0
+
+#undef SANITIZER_INTERCEPT___XSTAT
+#define SANITIZER_INTERCEPT___XSTAT 0
+
+#undef SANITIZER_INTERCEPT___XSTAT64
+#define SANITIZER_INTERCEPT___XSTAT64 0
+
+#undef SANITIZER_INTERCEPT___LXSTAT
+#define SANITIZER_INTERCEPT___LXSTAT 0
+
+#undef SANITIZER_INTERCEPT___LXSTAT64
+#define SANITIZER_INTERCEPT___LXSTAT64 0
+
+#undef SANITIZER_INTERCEPT_UTMP
+#define SANITIZER_INTERCEPT_UTMP 0
+
+#undef SANITIZER_INTERCEPT_UTMPX
+#define SANITIZER_INTERCEPT_UTMPX 0
+
+#undef SANITIZER_INTERCEPT_GETLOADAVG
+#define SANITIZER_INTERCEPT_GETLOADAVG 0
+
+// #undef SANITIZER_INTERCEPT_MMAP
+// #define SANITIZER_INTERCEPT_MMAP 0
+
+#undef SANITIZER_INTERCEPT_MMAP64
+#define SANITIZER_INTERCEPT_MMAP64 0
+
+#undef SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO 0
+
+#undef SANITIZER_INTERCEPT_MEMALIGN
+#define SANITIZER_INTERCEPT_MEMALIGN 0
+
+#undef SANITIZER_INTERCEPT___LIBC_MEMALIGN
+#define SANITIZER_INTERCEPT___LIBC_MEMALIGN 0
+
+#undef SANITIZER_INTERCEPT_PVALLOC
+#define SANITIZER_INTERCEPT_PVALLOC 0
+
+#undef SANITIZER_INTERCEPT_CFREE
+#define SANITIZER_INTERCEPT_CFREE 0
+
+#undef SANITIZER_INTERCEPT_REALLOCARRAY
+#define SANITIZER_INTERCEPT_REALLOCARRAY 0
+
+#undef SANITIZER_INTERCEPT_ALIGNED_ALLOC
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC 0
+
+#undef SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE 0
+
+#undef SANITIZER_INTERCEPT_MCHECK_MPROBE
+#define SANITIZER_INTERCEPT_MCHECK_MPROBE 0
+
+#undef SANITIZER_INTERCEPT_WCSLEN
+#define SANITIZER_INTERCEPT_WCSLEN 0
+
+#undef SANITIZER_INTERCEPT_WCSCAT
+#define SANITIZER_INTERCEPT_WCSCAT 0
+
+#undef SANITIZER_INTERCEPT_WCSDUP
+#define SANITIZER_INTERCEPT_WCSDUP 0
+
+#undef SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION 0
+
+#undef SANITIZER_INTERCEPT_BSD_SIGNAL
+#define SANITIZER_INTERCEPT_BSD_SIGNAL 0
+
+#undef SANITIZER_INTERCEPT_ACCT
+#define SANITIZER_INTERCEPT_ACCT 0
+
+#undef SANITIZER_INTERCEPT_USER_FROM_UID
+#define SANITIZER_INTERCEPT_USER_FROM_UID 0
+
+#undef SANITIZER_INTERCEPT_UID_FROM_USER
+#define SANITIZER_INTERCEPT_UID_FROM_USER 0
+
+#undef SANITIZER_INTERCEPT_GROUP_FROM_GID
+#define SANITIZER_INTERCEPT_GROUP_FROM_GID 0
+
+#undef SANITIZER_INTERCEPT_GID_FROM_GROUP
+#define SANITIZER_INTERCEPT_GID_FROM_GROUP 0
+
+#undef SANITIZER_INTERCEPT_ACCESS
+#define SANITIZER_INTERCEPT_ACCESS 0
+
+#undef SANITIZER_INTERCEPT_FACCESSAT
+#define SANITIZER_INTERCEPT_FACCESSAT 0
+
+#undef SANITIZER_INTERCEPT_GETGROUPLIST
+#define SANITIZER_INTERCEPT_GETGROUPLIST 0
+
+#undef SANITIZER_INTERCEPT_STRLCPY
+#define SANITIZER_INTERCEPT_STRLCPY 0
+
+#undef SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT
+#define SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT 0
+
+#undef SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT
+#define SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT 0
+
+#undef SANITIZER_INTERCEPT_READLINK
+#define SANITIZER_INTERCEPT_READLINK 0
+
+#undef SANITIZER_INTERCEPT_READLINKAT
+#define SANITIZER_INTERCEPT_READLINKAT 0
+
+#undef SANITIZER_INTERCEPT_DEVNAME
+#define SANITIZER_INTERCEPT_DEVNAME 0
+
+#undef SANITIZER_INTERCEPT_DEVNAME_R
+#define SANITIZER_INTERCEPT_DEVNAME_R 0
+
+#undef SANITIZER_INTERCEPT_FGETLN
+#define SANITIZER_INTERCEPT_FGETLN 0
+
+#undef SANITIZER_INTERCEPT_STRMODE
+#define SANITIZER_INTERCEPT_STRMODE 0
+
+#undef SANITIZER_INTERCEPT_TTYENT
+#define SANITIZER_INTERCEPT_TTYENT 0
+
+#undef SANITIZER_INTERCEPT_TTYENTPATH
+#define SANITIZER_INTERCEPT_TTYENTPATH 0
+
+#undef SANITIZER_INTERCEPT_PROTOENT
+#define SANITIZER_INTERCEPT_PROTOENT 0
+
+#undef SANITIZER_INTERCEPT_PROTOENT_R
+#define SANITIZER_INTERCEPT_PROTOENT_R 0
+
+#undef SANITIZER_INTERCEPT_NETENT
+#define SANITIZER_INTERCEPT_NETENT 0
+
+#undef SANITIZER_INTERCEPT_SETVBUF
+#define SANITIZER_INTERCEPT_SETVBUF 0
+
+#undef SANITIZER_INTERCEPT_GETMNTINFO
+#define SANITIZER_INTERCEPT_GETMNTINFO 0
+
+#undef SANITIZER_INTERCEPT_MI_VECTOR_HASH
+#define SANITIZER_INTERCEPT_MI_VECTOR_HASH 0
+
+#undef SANITIZER_INTERCEPT_GETVFSSTAT
+#define SANITIZER_INTERCEPT_GETVFSSTAT 0
+
+#undef SANITIZER_INTERCEPT_REGEX
+#define SANITIZER_INTERCEPT_REGEX 0
+
+#undef SANITIZER_INTERCEPT_REGEXSUB
+#define SANITIZER_INTERCEPT_REGEXSUB 0
+
+#undef SANITIZER_INTERCEPT_FTS
+#define SANITIZER_INTERCEPT_FTS 0
+
+#undef SANITIZER_INTERCEPT_SYSCTL
+#define SANITIZER_INTERCEPT_SYSCTL 0
+
+#undef SANITIZER_INTERCEPT_ASYSCTL
+#define SANITIZER_INTERCEPT_ASYSCTL 0
+
+#undef SANITIZER_INTERCEPT_SYSCTLGETMIBINFO
+#define SANITIZER_INTERCEPT_SYSCTLGETMIBINFO 0
+
+#undef SANITIZER_INTERCEPT_NL_LANGINFO
+#define SANITIZER_INTERCEPT_NL_LANGINFO 0
+
+#undef SANITIZER_INTERCEPT_MODCTL
+#define SANITIZER_INTERCEPT_MODCTL 0
+
+#undef SANITIZER_INTERCEPT_CAPSICUM
+#define SANITIZER_INTERCEPT_CAPSICUM 0
+
+#undef SANITIZER_INTERCEPT_STRTONUM
+#define SANITIZER_INTERCEPT_STRTONUM 0
+
+#undef SANITIZER_INTERCEPT_FPARSELN
+#define SANITIZER_INTERCEPT_FPARSELN 0
+
+#undef SANITIZER_INTERCEPT_STATVFS1
+#define SANITIZER_INTERCEPT_STATVFS1 0
+
+#undef SANITIZER_INTERCEPT_STRTOI
+#define SANITIZER_INTERCEPT_STRTOI 0
+
+#undef SANITIZER_INTERCEPT_CAPSICUM
+#define SANITIZER_INTERCEPT_CAPSICUM 0
+
+#undef SANITIZER_INTERCEPT_SHA1
+#define SANITIZER_INTERCEPT_SHA1 0
+
+#undef SANITIZER_INTERCEPT_MD4
+#define SANITIZER_INTERCEPT_MD4 0
+
+#undef SANITIZER_INTERCEPT_RMD160
+#define SANITIZER_INTERCEPT_RMD160 0
+
+#undef SANITIZER_INTERCEPT_MD5
+#define SANITIZER_INTERCEPT_MD5 0
+
+#undef SANITIZER_INTERCEPT_FSEEK
+#define SANITIZER_INTERCEPT_FSEEK 0
+
+#undef SANITIZER_INTERCEPT_MD2
+#define SANITIZER_INTERCEPT_MD2 0
+
+#undef SANITIZER_INTERCEPT_SHA2
+#define SANITIZER_INTERCEPT_SHA2 0
+
+#undef SANITIZER_INTERCEPT_CDB
+#define SANITIZER_INTERCEPT_CDB 0
+
+#undef SANITIZER_INTERCEPT_VIS
+#define SANITIZER_INTERCEPT_VIS 0
+
+#undef SANITIZER_INTERCEPT_POPEN
+#define SANITIZER_INTERCEPT_POPEN 0
+
+#undef SANITIZER_INTERCEPT_POPENVE
+#define SANITIZER_INTERCEPT_POPENVE 0
+
+#undef SANITIZER_INTERCEPT_PCLOSE
+#define SANITIZER_INTERCEPT_PCLOSE 0
+
+#undef SANITIZER_INTERCEPT_FUNOPEN
+#define SANITIZER_INTERCEPT_FUNOPEN 0
+
+#undef SANITIZER_INTERCEPT_FUNOPEN2
+#define SANITIZER_INTERCEPT_FUNOPEN2 0
+
+#undef SANITIZER_INTERCEPT_GETFSENT
+#define SANITIZER_INTERCEPT_GETFSENT 0
+
+#undef SANITIZER_INTERCEPT_ARC4RANDOM
+#define SANITIZER_INTERCEPT_ARC4RANDOM 0
+
+#undef SANITIZER_INTERCEPT_FDEVNAME
+#define SANITIZER_INTERCEPT_FDEVNAME 0
+
+#undef SANITIZER_INTERCEPT_GETUSERSHELL
+#define SANITIZER_INTERCEPT_GETUSERSHELL 0
+
+#undef SANITIZER_INTERCEPT_SL_INIT
+#define SANITIZER_INTERCEPT_SL_INIT 0
+
+#undef SANITIZER_INTERCEPT_GETRANDOM
+#define SANITIZER_INTERCEPT_GETRANDOM 0
+
+#undef SANITIZER_INTERCEPT___CXA_ATEXIT
+#define SANITIZER_INTERCEPT___CXA_ATEXIT 0
+
+#undef SANITIZER_INTERCEPT_ATEXIT
+#define SANITIZER_INTERCEPT_ATEXIT 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATFORK
+#define SANITIZER_INTERCEPT_PTHREAD_ATFORK 0
+
+#undef SANITIZER_INTERCEPT_GETENTROPY
+#define SANITIZER_INTERCEPT_GETENTROPY 0
+
+#undef SANITIZER_INTERCEPT_QSORT
+#define SANITIZER_INTERCEPT_QSORT 0
+
+#undef SANITIZER_INTERCEPT_QSORT_R
+#define SANITIZER_INTERCEPT_QSORT_R 0
+
+#undef SANITIZER_INTERCEPT_BSEARCH
+#define SANITIZER_INTERCEPT_BSEARCH 0
+
+#undef SANITIZER_INTERCEPT_SIGALTSTACK
+#define SANITIZER_INTERCEPT_SIGALTSTACK 0
+
+#undef SANITIZER_INTERCEPT_UNAME
+#define SANITIZER_INTERCEPT_UNAME 0
+
+#undef SANITIZER_INTERCEPT___XUNAME
+#define SANITIZER_INTERCEPT___XUNAME 0
+
+#undef SANITIZER_INTERCEPT_FLOPEN
+#define SANITIZER_INTERCEPT_FLOPEN 0
+
+#undef SANITIZER_INTERCEPT_PROCCTL
+#define SANITIZER_INTERCEPT_PROCCTL 0
+
+#undef SANITIZER_INTERCEPT_HEXDUMP
+#define SANITIZER_INTERCEPT_HEXDUMP 0
+
+#undef SANITIZER_INTERCEPT_ARGP_PARSE
+#define SANITIZER_INTERCEPT_ARGP_PARSE 0
+
+#endif // HWASAN_PLATFORM_INTERCEPTORS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.cpp
new file mode 100644
index 000000000000..a4e5935754a8
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.cpp
@@ -0,0 +1,36 @@
+//===-- hwasan_poisoning.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "hwasan_poisoning.h"
+
+#include "hwasan_mapping.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_linux.h"
+
+namespace __hwasan {
+
+uptr TagMemory(uptr p, uptr size, tag_t tag) {
+ uptr start = RoundDownTo(p, kShadowAlignment);
+ uptr end = RoundUpTo(p + size, kShadowAlignment);
+ return TagMemoryAligned(start, end - start, tag);
+}
+
+} // namespace __hwasan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+bool WordIsPoisoned(uptr addr) {
+ // Fixme: implement actual tag checking.
+ return false;
+}
+} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.h
new file mode 100644
index 000000000000..61751f7d252e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.h
@@ -0,0 +1,24 @@
+//===-- hwasan_poisoning.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_POISONING_H
+#define HWASAN_POISONING_H
+
+#include "hwasan.h"
+
+namespace __hwasan {
+uptr TagMemory(uptr p, uptr size, tag_t tag);
+uptr TagMemoryAligned(uptr p, uptr size, tag_t tag);
+
+} // namespace __hwasan
+
+#endif // HWASAN_POISONING_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_preinit.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_preinit.cpp
new file mode 100644
index 000000000000..8da47b5a2b78
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_preinit.cpp
@@ -0,0 +1,21 @@
+//===-- hwasan_preinit.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer, an address sanity checker.
+//
+// Call __hwasan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+#include "hwasan_interface_internal.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+// This section is linked into the main executable when -fsanitize=hwaddress is
+// specified to perform initialization at a very early stage.
+__attribute__((section(".preinit_array"), used)) static auto preinit =
+ __hwasan_init;
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h
new file mode 100644
index 000000000000..48a140ffc923
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h
@@ -0,0 +1,56 @@
+//===-- hwasan_registers.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the register state retrieved by hwasan when error reporting.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_REGISTERS_H
+#define HWASAN_REGISTERS_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if defined(__aarch64__)
+
+# define CAN_GET_REGISTERS 1
+
+struct Registers {
+ uptr x[32];
+};
+
+__attribute__((always_inline, unused)) static Registers GetRegisters() {
+ Registers regs;
+ __asm__ volatile(
+ "stp x0, x1, [%1, #(8 * 0)]\n"
+ "stp x2, x3, [%1, #(8 * 2)]\n"
+ "stp x4, x5, [%1, #(8 * 4)]\n"
+ "stp x6, x7, [%1, #(8 * 6)]\n"
+ "stp x8, x9, [%1, #(8 * 8)]\n"
+ "stp x10, x11, [%1, #(8 * 10)]\n"
+ "stp x12, x13, [%1, #(8 * 12)]\n"
+ "stp x14, x15, [%1, #(8 * 14)]\n"
+ "stp x16, x17, [%1, #(8 * 16)]\n"
+ "stp x18, x19, [%1, #(8 * 18)]\n"
+ "stp x20, x21, [%1, #(8 * 20)]\n"
+ "stp x22, x23, [%1, #(8 * 22)]\n"
+ "stp x24, x25, [%1, #(8 * 24)]\n"
+ "stp x26, x27, [%1, #(8 * 26)]\n"
+ "stp x28, x29, [%1, #(8 * 28)]\n"
+ : "=m"(regs)
+ : "r"(regs.x));
+ regs.x[30] = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
+ regs.x[31] = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+ return regs;
+}
+
+#else
+# define CAN_GET_REGISTERS 0
+#endif
+
+#endif // HWASAN_REGISTERS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
new file mode 100644
index 000000000000..bc66e6e805c9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
@@ -0,0 +1,1138 @@
+//===-- hwasan_report.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+#include "hwasan_report.h"
+
+#include <dlfcn.h>
+
+#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_globals.h"
+#include "hwasan_mapping.h"
+#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+using namespace __sanitizer;
+
+namespace __hwasan {
+
+class ScopedReport {
+ public:
+ explicit ScopedReport(bool fatal) : fatal(fatal) {
+ Lock lock(&error_message_lock_);
+ error_message_ptr_ = &error_message_;
+ ++hwasan_report_count;
+ }
+
+ ~ScopedReport() {
+ void (*report_cb)(const char *);
+ {
+ Lock lock(&error_message_lock_);
+ report_cb = error_report_callback_;
+ error_message_ptr_ = nullptr;
+ }
+ if (report_cb)
+ report_cb(error_message_.data());
+ if (fatal)
+ SetAbortMessage(error_message_.data());
+ if (common_flags()->print_module_map >= 2 ||
+ (fatal && common_flags()->print_module_map))
+ DumpProcessMap();
+ if (fatal)
+ Die();
+ }
+
+ static void MaybeAppendToErrorMessage(const char *msg) {
+ Lock lock(&error_message_lock_);
+ if (!error_message_ptr_)
+ return;
+ error_message_ptr_->Append(msg);
+ }
+
+ static void SetErrorReportCallback(void (*callback)(const char *)) {
+ Lock lock(&error_message_lock_);
+ error_report_callback_ = callback;
+ }
+
+ private:
+ InternalScopedString error_message_;
+ bool fatal;
+
+ static Mutex error_message_lock_;
+ static InternalScopedString *error_message_ptr_
+ SANITIZER_GUARDED_BY(error_message_lock_);
+ static void (*error_report_callback_)(const char *);
+};
+
+Mutex ScopedReport::error_message_lock_;
+InternalScopedString *ScopedReport::error_message_ptr_;
+void (*ScopedReport::error_report_callback_)(const char *);
+
+// If there is an active ScopedReport, append to its error message.
+void AppendToErrorMessageBuffer(const char *buffer) {
+ ScopedReport::MaybeAppendToErrorMessage(buffer);
+}
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+static void MaybePrintAndroidHelpUrl() {
+#if SANITIZER_ANDROID
+ Printf(
+ "Learn more about HWASan reports: "
+ "https://source.android.com/docs/security/test/memory-safety/"
+ "hwasan-reports\n");
+#endif
+}
+
+namespace {
+// A RAII object that holds a copy of the current thread stack ring buffer.
+// The actual stack buffer may change while we are iterating over it (for
+// example, Printf may call syslog() which can itself be built with hwasan).
+class SavedStackAllocations {
+ public:
+ SavedStackAllocations() = default;
+
+ explicit SavedStackAllocations(Thread *t) { CopyFrom(t); }
+
+ void CopyFrom(Thread *t) {
+ StackAllocationsRingBuffer *rb = t->stack_allocations();
+ uptr size = rb->size() * sizeof(uptr);
+ void *storage =
+ MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
+ new (&rb_) StackAllocationsRingBuffer(*rb, storage);
+ thread_id_ = t->unique_id();
+ }
+
+ ~SavedStackAllocations() {
+ if (rb_) {
+ StackAllocationsRingBuffer *rb = get();
+ UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
+ }
+ }
+
+ const StackAllocationsRingBuffer *get() const {
+ return (const StackAllocationsRingBuffer *)&rb_;
+ }
+
+ StackAllocationsRingBuffer *get() {
+ return (StackAllocationsRingBuffer *)&rb_;
+ }
+
+ u32 thread_id() const { return thread_id_; }
+
+ private:
+ uptr rb_ = 0;
+ u32 thread_id_;
+};
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() { }
+ const char *Access() { return Blue(); }
+ const char *Allocation() const { return Magenta(); }
+ const char *Origin() const { return Magenta(); }
+ const char *Name() const { return Green(); }
+ const char *Location() { return Green(); }
+ const char *Thread() { return Green(); }
+};
+} // namespace
+
+static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
+ HeapAllocationRecord *har, uptr *ring_index,
+ uptr *num_matching_addrs,
+ uptr *num_matching_addrs_4b) {
+ if (!rb) return false;
+
+ *num_matching_addrs = 0;
+ *num_matching_addrs_4b = 0;
+ for (uptr i = 0, size = rb->size(); i < size; i++) {
+ auto h = (*rb)[i];
+ if (h.tagged_addr <= tagged_addr &&
+ h.tagged_addr + h.requested_size > tagged_addr) {
+ *har = h;
+ *ring_index = i;
+ return true;
+ }
+
+ // Measure the number of heap ring buffer entries that would have matched
+ // if we had only one entry per address (e.g. if the ring buffer data was
+ // stored at the address itself). This will help us tune the allocator
+ // implementation for MTE.
+ if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
+ UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
+ ++*num_matching_addrs;
+ }
+
+ // Measure the number of heap ring buffer entries that would have matched
+ // if we only had 4 tag bits, which is the case for MTE.
+ auto untag_4b = [](uptr p) {
+ return p & ((1ULL << 60) - 1);
+ };
+ if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
+ untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
+ ++*num_matching_addrs_4b;
+ }
+ }
+ return false;
+}
+
+static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
+ tag_t addr_tag, uptr untagged_addr) {
+ uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
+ bool found_local = false;
+ InternalScopedString location;
+ for (uptr i = 0; i < frames; i++) {
+ const uptr *record_addr = &(*sa)[i];
+ uptr record = *record_addr;
+ if (!record)
+ break;
+ tag_t base_tag =
+ reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
+ const uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
+ CHECK_LT(fp, kRecordFPModulus);
+ uptr pc_mask = (1ULL << kRecordFPShift) - 1;
+ uptr pc = record & pc_mask;
+ FrameInfo frame;
+ if (!Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame))
+ continue;
+ for (LocalInfo &local : frame.locals) {
+ if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
+ continue;
+ if (!(local.name && internal_strlen(local.name)) &&
+ !(local.function_name && internal_strlen(local.function_name)) &&
+ !(local.decl_file && internal_strlen(local.decl_file)))
+ continue;
+ tag_t obj_tag = base_tag ^ local.tag_offset;
+ if (obj_tag != addr_tag)
+ continue;
+
+ // We only store bits 4-19 of FP (bits 0-3 are guaranteed to be zero).
+ // So we know only `FP % kRecordFPModulus`, and we can only calculate
+ // `local_beg % kRecordFPModulus`.
+ // Out of all possible `local_beg` we will only consider 2 candidates
+ // nearest to the `untagged_addr`.
+ uptr local_beg_mod = (fp + local.frame_offset) % kRecordFPModulus;
+ // Pick `local_beg` in the same 1 MiB block as `untagged_addr`.
+ uptr local_beg =
+ RoundDownTo(untagged_addr, kRecordFPModulus) + local_beg_mod;
+ // Pick the largest `local_beg <= untagged_addr`. It's either the current
+ // one or the one before.
+ if (local_beg > untagged_addr)
+ local_beg -= kRecordFPModulus;
+
+ uptr offset = -1ull;
+ const char *whence;
+ const char *cause = nullptr;
+ uptr best_beg;
+
+ // Try two 1 MiB blocks options and pick nearest one.
+ for (uptr i = 0; i < 2; ++i, local_beg += kRecordFPModulus) {
+ uptr local_end = local_beg + local.size;
+ if (local_beg > local_end)
+ continue; // This is a wraparound.
+ if (local_beg <= untagged_addr && untagged_addr < local_end) {
+ offset = untagged_addr - local_beg;
+ whence = "inside";
+ cause = "use-after-scope";
+ best_beg = local_beg;
+ break; // This is as close at it can be.
+ }
+
+ if (untagged_addr >= local_end) {
+ uptr new_offset = untagged_addr - local_end;
+ if (new_offset < offset) {
+ offset = new_offset;
+ whence = "after";
+ cause = "stack-buffer-overflow";
+ best_beg = local_beg;
+ }
+ } else {
+ uptr new_offset = local_beg - untagged_addr;
+ if (new_offset < offset) {
+ offset = new_offset;
+ whence = "before";
+ cause = "stack-buffer-overflow";
+ best_beg = local_beg;
+ }
+ }
+ }
+
+ // To fail the `untagged_addr` must be near nullptr, which is impossible
+ // with Linux user space memory layout.
+ if (!cause)
+ continue;
+
+ if (!found_local) {
+ Printf("\nPotentially referenced stack objects:\n");
+ found_local = true;
+ }
+
+ Decorator d;
+ Printf("%s", d.Error());
+ Printf("Cause: %s\n", cause);
+ Printf("%s", d.Default());
+ Printf("%s", d.Location());
+ StackTracePrinter::GetOrInit()->RenderSourceLocation(
+ &location, local.decl_file, local.decl_line, /* column= */ 0,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ Printf(
+ "%p is located %zd bytes %s a %zd-byte local variable %s "
+ "[%p,%p) "
+ "in %s %s\n",
+ untagged_addr, offset, whence, local.size, local.name, best_beg,
+ best_beg + local.size, local.function_name, location.data());
+ location.clear();
+ Printf("%s\n", d.Default());
+ }
+ frame.Clear();
+ }
+
+ if (found_local)
+ return;
+
+ // We didn't find any locals. Most likely we don't have symbols, so dump
+ // the information that we have for offline analysis.
+ InternalScopedString frame_desc;
+ Printf("Previously allocated frames:\n");
+ for (uptr i = 0; i < frames; i++) {
+ const uptr *record_addr = &(*sa)[i];
+ uptr record = *record_addr;
+ if (!record)
+ break;
+ uptr pc_mask = (1ULL << 48) - 1;
+ uptr pc = record & pc_mask;
+ frame_desc.AppendF(" record_addr:%p record:0x%zx",
+ reinterpret_cast<const void *>(record_addr), record);
+ SymbolizedStackHolder symbolized_stack(
+ Symbolizer::GetOrInit()->SymbolizePC(pc));
+ const SymbolizedStack *frame = symbolized_stack.get();
+ if (frame) {
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ &frame_desc, " %F %L", 0, frame->info.address, &frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ }
+ Printf("%s\n", frame_desc.data());
+ frame_desc.clear();
+ }
+}
+
+// Returns true if tag == *tag_ptr, reading tags from short granules if
+// necessary. This may return a false positive if tags 1-15 are used as a
+// regular tag rather than a short granule marker.
+static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
+ if (tag == *tag_ptr)
+ return true;
+ if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
+ return false;
+ uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
+ tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
+ return tag == inline_tag;
+}
+
+// HWASan globals store the size of the global in the descriptor. In cases where
+// we don't have a binary with symbols, we can't grab the size of the global
+// from the debug info - but we might be able to retrieve it from the
+// descriptor. Returns zero if the lookup failed.
+static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
+ // Find the ELF object that this global resides in.
+ Dl_info info;
+ if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0)
+ return 0;
+ auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
+ auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
+ reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
+
+ // Get the load bias. This is normally the same as the dli_fbase address on
+ // position-independent code, but can be different on non-PIE executables,
+ // binaries using LLD's partitioning feature, or binaries compiled with a
+ // linker script.
+ ElfW(Addr) load_bias = 0;
+ for (const auto &phdr :
+ ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
+ if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
+ continue;
+ load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
+ break;
+ }
+
+ // Walk all globals in this ELF object, looking for the one we're interested
+ // in. Once we find it, we can stop iterating and return the size of the
+ // global we're interested in.
+ for (const hwasan_global &global :
+ HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
+ if (global.addr() <= ptr && ptr < global.addr() + global.size())
+ return global.size();
+
+ return 0;
+}
+
+void ReportStats() {}
+
+constexpr uptr kDumpWidth = 16;
+constexpr uptr kShadowLines = 17;
+constexpr uptr kShadowDumpSize = kShadowLines * kDumpWidth;
+
+constexpr uptr kShortLines = 3;
+constexpr uptr kShortDumpSize = kShortLines * kDumpWidth;
+constexpr uptr kShortDumpOffset = (kShadowLines - kShortLines) / 2 * kDumpWidth;
+
+static uptr GetPrintTagStart(uptr addr) {
+ addr = MemToShadow(addr);
+ addr = RoundDownTo(addr, kDumpWidth);
+ addr -= kDumpWidth * (kShadowLines / 2);
+ return addr;
+}
+
+template <typename PrintTag>
+static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows,
+ InternalScopedString &s,
+ PrintTag print_tag) {
+ uptr center_row_beg = RoundDownTo(addr, kDumpWidth);
+ uptr beg_row = center_row_beg - kDumpWidth * (num_rows / 2);
+ uptr end_row = center_row_beg + kDumpWidth * ((num_rows + 1) / 2);
+ for (uptr row = beg_row; row < end_row; row += kDumpWidth) {
+ s.Append(row == center_row_beg ? "=>" : " ");
+ s.AppendF("%p:", (void *)ShadowToMem(row));
+ for (uptr i = 0; i < kDumpWidth; i++) {
+ s.Append(row + i == addr ? "[" : " ");
+ print_tag(s, row + i);
+ s.Append(row + i == addr ? "]" : " ");
+ }
+ s.Append("\n");
+ }
+}
+
+template <typename GetTag, typename GetShortTag>
+static void PrintTagsAroundAddr(uptr addr, GetTag get_tag,
+ GetShortTag get_short_tag) {
+ InternalScopedString s;
+ addr = MemToShadow(addr);
+ s.AppendF(
+ "\nMemory tags around the buggy address (one tag corresponds to %zd "
+ "bytes):\n",
+ kShadowAlignment);
+ PrintTagInfoAroundAddr(addr, kShadowLines, s,
+ [&](InternalScopedString &s, uptr tag_addr) {
+ tag_t tag = get_tag(tag_addr);
+ s.AppendF("%02x", tag);
+ });
+
+ s.AppendF(
+ "Tags for short granules around the buggy address (one tag corresponds "
+ "to %zd bytes):\n",
+ kShadowAlignment);
+ PrintTagInfoAroundAddr(addr, kShortLines, s,
+ [&](InternalScopedString &s, uptr tag_addr) {
+ tag_t tag = get_tag(tag_addr);
+ if (tag >= 1 && tag <= kShadowAlignment) {
+ tag_t short_tag = get_short_tag(tag_addr);
+ s.AppendF("%02x", short_tag);
+ } else {
+ s.Append("..");
+ }
+ });
+ s.Append(
+ "See "
+ "https://clang.llvm.org/docs/"
+ "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
+ "description of short granule tags\n");
+ Printf("%s", s.data());
+}
+
+static uptr GetTopPc(const StackTrace *stack) {
+ return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
+ : 0;
+}
+
+namespace {
+class BaseReport {
+ public:
+ BaseReport(StackTrace *stack, bool fatal, uptr tagged_addr, uptr access_size)
+ : scoped_report(fatal),
+ stack(stack),
+ tagged_addr(tagged_addr),
+ access_size(access_size),
+ untagged_addr(UntagAddr(tagged_addr)),
+ ptr_tag(GetTagFromPointer(tagged_addr)),
+ mismatch_offset(FindMismatchOffset()),
+ heap(CopyHeapChunk()),
+ allocations(CopyAllocations()),
+ candidate(FindBufferOverflowCandidate()),
+ shadow(CopyShadow()) {}
+
+ protected:
+ struct OverflowCandidate {
+ uptr untagged_addr = 0;
+ bool after = false;
+ bool is_close = false;
+
+ struct {
+ uptr begin = 0;
+ uptr end = 0;
+ u32 thread_id = 0;
+ u32 stack_id = 0;
+ bool is_allocated = false;
+ } heap;
+ };
+
+ struct HeapAllocation {
+ HeapAllocationRecord har = {};
+ uptr ring_index = 0;
+ uptr num_matching_addrs = 0;
+ uptr num_matching_addrs_4b = 0;
+ u32 free_thread_id = 0;
+ };
+
+ struct Allocations {
+ ArrayRef<SavedStackAllocations> stack;
+ ArrayRef<HeapAllocation> heap;
+ };
+
+ struct HeapChunk {
+ uptr begin = 0;
+ uptr size = 0;
+ u32 stack_id = 0;
+ bool from_small_heap = false;
+ bool is_allocated = false;
+ };
+
+ struct Shadow {
+ uptr addr = 0;
+ tag_t tags[kShadowDumpSize] = {};
+ tag_t short_tags[kShortDumpSize] = {};
+ };
+
+ sptr FindMismatchOffset() const;
+ Shadow CopyShadow() const;
+ tag_t GetTagCopy(uptr addr) const;
+ tag_t GetShortTagCopy(uptr addr) const;
+ HeapChunk CopyHeapChunk() const;
+ Allocations CopyAllocations();
+ OverflowCandidate FindBufferOverflowCandidate() const;
+ void PrintAddressDescription() const;
+ void PrintHeapOrGlobalCandidate() const;
+ void PrintTags(uptr addr) const;
+
+ SavedStackAllocations stack_allocations_storage[16];
+ HeapAllocation heap_allocations_storage[256];
+
+ const ScopedReport scoped_report;
+ const StackTrace *stack = nullptr;
+ const uptr tagged_addr = 0;
+ const uptr access_size = 0;
+ const uptr untagged_addr = 0;
+ const tag_t ptr_tag = 0;
+ const sptr mismatch_offset = 0;
+
+ const HeapChunk heap;
+ const Allocations allocations;
+ const OverflowCandidate candidate;
+
+ const Shadow shadow;
+};
+
+sptr BaseReport::FindMismatchOffset() const {
+ if (!access_size)
+ return 0;
+ sptr offset =
+ __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
+ CHECK_GE(offset, 0);
+ CHECK_LT(offset, static_cast<sptr>(access_size));
+ tag_t *tag_ptr =
+ reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
+ tag_t mem_tag = *tag_ptr;
+
+ if (mem_tag && mem_tag < kShadowAlignment) {
+ tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
+ ~(kShadowAlignment - 1));
+ // If offset is 0, (untagged_addr + offset) is not aligned to granules.
+ // This is the offset of the leftmost accessed byte within the bad granule.
+ u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
+ tag_t short_tag = granule_ptr[kShadowAlignment - 1];
+ // The first mismatch was a short granule that matched the ptr_tag.
+ if (short_tag == ptr_tag) {
+ // If the access starts after the end of the short granule, then the first
+ // bad byte is the first byte of the access; otherwise it is the first
+ // byte past the end of the short granule
+ if (mem_tag > in_granule_offset) {
+ offset += mem_tag - in_granule_offset;
+ }
+ }
+ }
+ return offset;
+}
+
+BaseReport::Shadow BaseReport::CopyShadow() const {
+ Shadow result;
+ if (!MemIsApp(untagged_addr))
+ return result;
+
+ result.addr = GetPrintTagStart(untagged_addr + mismatch_offset);
+ uptr tag_addr = result.addr;
+ uptr short_end = kShortDumpOffset + ARRAY_SIZE(shadow.short_tags);
+ for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i, ++tag_addr) {
+ if (!MemIsShadow(tag_addr))
+ continue;
+ result.tags[i] = *reinterpret_cast<tag_t *>(tag_addr);
+ if (i < kShortDumpOffset || i >= short_end)
+ continue;
+ uptr granule_addr = ShadowToMem(tag_addr);
+ if (1 <= result.tags[i] && result.tags[i] <= kShadowAlignment &&
+ IsAccessibleMemoryRange(granule_addr, kShadowAlignment)) {
+ result.short_tags[i - kShortDumpOffset] =
+ *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1);
+ }
+ }
+ return result;
+}
+
+tag_t BaseReport::GetTagCopy(uptr addr) const {
+ CHECK_GE(addr, shadow.addr);
+ uptr idx = addr - shadow.addr;
+ CHECK_LT(idx, ARRAY_SIZE(shadow.tags));
+ return shadow.tags[idx];
+}
+
+tag_t BaseReport::GetShortTagCopy(uptr addr) const {
+ CHECK_GE(addr, shadow.addr + kShortDumpOffset);
+ uptr idx = addr - shadow.addr - kShortDumpOffset;
+ CHECK_LT(idx, ARRAY_SIZE(shadow.short_tags));
+ return shadow.short_tags[idx];
+}
+
+BaseReport::HeapChunk BaseReport::CopyHeapChunk() const {
+ HeapChunk result = {};
+ if (MemIsShadow(untagged_addr))
+ return result;
+ HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
+ result.begin = chunk.Beg();
+ if (result.begin) {
+ result.size = chunk.ActualSize();
+ result.from_small_heap = chunk.FromSmallHeap();
+ result.is_allocated = chunk.IsAllocated();
+ result.stack_id = chunk.GetAllocStackId();
+ }
+ return result;
+}
+
+BaseReport::Allocations BaseReport::CopyAllocations() {
+ if (MemIsShadow(untagged_addr))
+ return {};
+ uptr stack_allocations_count = 0;
+ uptr heap_allocations_count = 0;
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ if (stack_allocations_count < ARRAY_SIZE(stack_allocations_storage) &&
+ t->AddrIsInStack(untagged_addr)) {
+ stack_allocations_storage[stack_allocations_count++].CopyFrom(t);
+ }
+
+ if (heap_allocations_count < ARRAY_SIZE(heap_allocations_storage)) {
+ // Scan all threads' ring buffers to find if it's a heap-use-after-free.
+ HeapAllocationRecord har;
+ uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
+ if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
+ &ring_index, &num_matching_addrs,
+ &num_matching_addrs_4b)) {
+ auto &ha = heap_allocations_storage[heap_allocations_count++];
+ ha.har = har;
+ ha.ring_index = ring_index;
+ ha.num_matching_addrs = num_matching_addrs;
+ ha.num_matching_addrs_4b = num_matching_addrs_4b;
+ ha.free_thread_id = t->unique_id();
+ }
+ }
+ });
+
+ return {{stack_allocations_storage, stack_allocations_count},
+ {heap_allocations_storage, heap_allocations_count}};
+}
+
+BaseReport::OverflowCandidate BaseReport::FindBufferOverflowCandidate() const {
+ OverflowCandidate result = {};
+ if (MemIsShadow(untagged_addr))
+ return result;
+ // Check if this looks like a heap buffer overflow by scanning
+ // the shadow left and right and looking for the first adjacent
+ // object with a different memory tag. If that tag matches ptr_tag,
+ // check the allocator if it has a live chunk there.
+ tag_t *tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
+ tag_t *candidate_tag_ptr = nullptr, *left = tag_ptr, *right = tag_ptr;
+ uptr candidate_distance = 0;
+ for (; candidate_distance < 1000; candidate_distance++) {
+ if (MemIsShadow(reinterpret_cast<uptr>(left)) && TagsEqual(ptr_tag, left)) {
+ candidate_tag_ptr = left;
+ break;
+ }
+ --left;
+ if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
+ TagsEqual(ptr_tag, right)) {
+ candidate_tag_ptr = right;
+ break;
+ }
+ ++right;
+ }
+
+ constexpr auto kCloseCandidateDistance = 1;
+ result.is_close = candidate_distance <= kCloseCandidateDistance;
+
+ result.after = candidate_tag_ptr == left;
+ result.untagged_addr = ShadowToMem(reinterpret_cast<uptr>(candidate_tag_ptr));
+ HwasanChunkView chunk = FindHeapChunkByAddress(result.untagged_addr);
+ if (chunk.IsAllocated()) {
+ result.heap.is_allocated = true;
+ result.heap.begin = chunk.Beg();
+ result.heap.end = chunk.End();
+ result.heap.thread_id = chunk.GetAllocThreadId();
+ result.heap.stack_id = chunk.GetAllocStackId();
+ }
+ return result;
+}
+
+void BaseReport::PrintHeapOrGlobalCandidate() const {
+ Decorator d;
+ if (candidate.heap.is_allocated) {
+ uptr offset;
+ const char *whence;
+ if (candidate.heap.begin <= untagged_addr &&
+ untagged_addr < candidate.heap.end) {
+ offset = untagged_addr - candidate.heap.begin;
+ whence = "inside";
+ } else if (candidate.after) {
+ offset = untagged_addr - candidate.heap.end;
+ whence = "after";
+ } else {
+ offset = candidate.heap.begin - untagged_addr;
+ whence = "before";
+ }
+ Printf("%s", d.Error());
+ Printf("\nCause: heap-buffer-overflow\n");
+ Printf("%s", d.Default());
+ Printf("%s", d.Location());
+ Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
+ untagged_addr, offset, whence,
+ candidate.heap.end - candidate.heap.begin, candidate.heap.begin,
+ candidate.heap.end);
+ Printf("%s", d.Allocation());
+ Printf("allocated by thread T%u here:\n", candidate.heap.thread_id);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(candidate.heap.stack_id).Print();
+ return;
+ }
+ // Check whether the address points into a loaded library. If so, this is
+ // most likely a global variable.
+ const char *module_name;
+ uptr module_address;
+ Symbolizer *sym = Symbolizer::GetOrInit();
+ if (sym->GetModuleNameAndOffsetForPC(candidate.untagged_addr, &module_name,
+ &module_address)) {
+ Printf("%s", d.Error());
+ Printf("\nCause: global-overflow\n");
+ Printf("%s", d.Default());
+ DataInfo info;
+ Printf("%s", d.Location());
+ if (sym->SymbolizeData(candidate.untagged_addr, &info) && info.start) {
+ Printf(
+ "%p is located %zd bytes %s a %zd-byte global variable "
+ "%s [%p,%p) in %s\n",
+ untagged_addr,
+ candidate.after ? untagged_addr - (info.start + info.size)
+ : info.start - untagged_addr,
+ candidate.after ? "after" : "before", info.size, info.name,
+ info.start, info.start + info.size, module_name);
+ } else {
+ uptr size = GetGlobalSizeFromDescriptor(candidate.untagged_addr);
+ if (size == 0)
+ // We couldn't find the size of the global from the descriptors.
+ Printf(
+ "%p is located %s a global variable in "
+ "\n #0 0x%x (%s+0x%x)\n",
+ untagged_addr, candidate.after ? "after" : "before",
+ candidate.untagged_addr, module_name, module_address);
+ else
+ Printf(
+ "%p is located %s a %zd-byte global variable in "
+ "\n #0 0x%x (%s+0x%x)\n",
+ untagged_addr, candidate.after ? "after" : "before", size,
+ candidate.untagged_addr, module_name, module_address);
+ }
+ Printf("%s", d.Default());
+ }
+}
+
+void BaseReport::PrintAddressDescription() const {
+ Decorator d;
+ int num_descriptions_printed = 0;
+
+ if (MemIsShadow(untagged_addr)) {
+ Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
+ d.Default());
+ return;
+ }
+
+ // Print some very basic information about the address, if it's a heap.
+ if (heap.begin) {
+ Printf(
+ "%s[%p,%p) is a %s %s heap chunk; "
+ "size: %zd offset: %zd\n%s",
+ d.Location(), heap.begin, heap.begin + heap.size,
+ heap.from_small_heap ? "small" : "large",
+ heap.is_allocated ? "allocated" : "unallocated", heap.size,
+ untagged_addr - heap.begin, d.Default());
+ }
+
+ auto announce_by_id = [](u32 thread_id) {
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ if (thread_id == t->unique_id())
+ t->Announce();
+ });
+ };
+
+ // Check stack first. If the address is on the stack of a live thread, we
+ // know it cannot be a heap / global overflow.
+ for (const auto &sa : allocations.stack) {
+ Printf("%s", d.Error());
+ Printf("\nCause: stack tag-mismatch\n");
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
+ sa.thread_id());
+ Printf("%s", d.Default());
+ announce_by_id(sa.thread_id());
+ PrintStackAllocations(sa.get(), ptr_tag, untagged_addr);
+ num_descriptions_printed++;
+ }
+
+ if (allocations.stack.empty() && candidate.untagged_addr &&
+ candidate.is_close) {
+ PrintHeapOrGlobalCandidate();
+ num_descriptions_printed++;
+ }
+
+ for (const auto &ha : allocations.heap) {
+ const HeapAllocationRecord har = ha.har;
+
+ Printf("%s", d.Error());
+ Printf("\nCause: use-after-free\n");
+ Printf("%s", d.Location());
+ Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
+ untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
+ har.requested_size, UntagAddr(har.tagged_addr),
+ UntagAddr(har.tagged_addr) + har.requested_size);
+ Printf("%s", d.Allocation());
+ Printf("freed by thread T%u here:\n", ha.free_thread_id);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.free_context_id).Print();
+
+ Printf("%s", d.Allocation());
+ Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.alloc_context_id).Print();
+
+ // Print a developer note: the index of this heap object
+ // in the thread's deallocation ring buffer.
+ Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1,
+ flags()->heap_history_size);
+ Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs);
+ Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
+ ha.num_matching_addrs_4b);
+
+ announce_by_id(ha.free_thread_id);
+ // TODO: announce_by_id(har.alloc_thread_id);
+ num_descriptions_printed++;
+ }
+
+ if (candidate.untagged_addr && num_descriptions_printed == 0) {
+ PrintHeapOrGlobalCandidate();
+ num_descriptions_printed++;
+ }
+
+ // Print the remaining threads, as an extra information, 1 line per thread.
+ if (flags()->print_live_threads_info) {
+ Printf("\n");
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
+ }
+
+ if (!num_descriptions_printed)
+ // We exhausted our possibilities. Bail out.
+ Printf("HWAddressSanitizer can not describe address in more detail.\n");
+ if (num_descriptions_printed > 1) {
+ Printf(
+ "There are %d potential causes, printed above in order "
+ "of likeliness.\n",
+ num_descriptions_printed);
+ }
+}
+
+void BaseReport::PrintTags(uptr addr) const {
+ if (shadow.addr) {
+ PrintTagsAroundAddr(
+ addr, [&](uptr addr) { return GetTagCopy(addr); },
+ [&](uptr addr) { return GetShortTagCopy(addr); });
+ }
+}
+
+class InvalidFreeReport : public BaseReport {
+ public:
+ InvalidFreeReport(StackTrace *stack, uptr tagged_addr)
+ : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0) {}
+ ~InvalidFreeReport();
+
+ private:
+};
+
+InvalidFreeReport::~InvalidFreeReport() {
+ Decorator d;
+ Printf("%s", d.Error());
+ uptr pc = GetTopPc(stack);
+ const char *bug_type = "invalid-free";
+ const Thread *thread = GetCurrentThread();
+ if (thread) {
+ Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
+ SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id());
+ } else {
+ Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
+ SanitizerToolName, bug_type, untagged_addr, pc);
+ }
+ Printf("%s", d.Access());
+ if (shadow.addr) {
+ Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag,
+ GetTagCopy(MemToShadow(untagged_addr)));
+ }
+ Printf("%s", d.Default());
+
+ stack->Print();
+
+ PrintAddressDescription();
+ PrintTags(untagged_addr);
+ MaybePrintAndroidHelpUrl();
+ ReportErrorSummary(bug_type, stack);
+}
+
+class TailOverwrittenReport : public BaseReport {
+ public:
+ explicit TailOverwrittenReport(StackTrace *stack, uptr tagged_addr,
+ uptr orig_size, const u8 *expected)
+ : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0),
+ orig_size(orig_size),
+ tail_size(kShadowAlignment - (orig_size % kShadowAlignment)) {
+ CHECK_GT(tail_size, 0U);
+ CHECK_LT(tail_size, kShadowAlignment);
+ internal_memcpy(tail_copy,
+ reinterpret_cast<u8 *>(untagged_addr + orig_size),
+ tail_size);
+ internal_memcpy(actual_expected, expected, tail_size);
+ // Short granule is stashed in the last byte of the magic string. To avoid
+ // confusion, make the expected magic string contain the short granule tag.
+ if (orig_size % kShadowAlignment != 0)
+ actual_expected[tail_size - 1] = ptr_tag;
+ }
+ ~TailOverwrittenReport();
+
+ private:
+ const uptr orig_size = 0;
+ const uptr tail_size = 0;
+ u8 actual_expected[kShadowAlignment] = {};
+ u8 tail_copy[kShadowAlignment] = {};
+};
+
+TailOverwrittenReport::~TailOverwrittenReport() {
+ Decorator d;
+ Printf("%s", d.Error());
+ const char *bug_type = "allocation-tail-overwritten";
+ Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
+ bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
+ Printf("\n%s", d.Default());
+ Printf(
+ "Stack of invalid access unknown. Issue detected at deallocation "
+ "time.\n");
+ Printf("%s", d.Allocation());
+ Printf("deallocated here:\n");
+ Printf("%s", d.Default());
+ stack->Print();
+ if (heap.begin) {
+ Printf("%s", d.Allocation());
+ Printf("allocated here:\n");
+ Printf("%s", d.Default());
+ GetStackTraceFromId(heap.stack_id).Print();
+ }
+
+ InternalScopedString s;
+ u8 *tail = tail_copy;
+ s.Append("Tail contains: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(".. ");
+ for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", tail[i]);
+ s.Append("\n");
+ s.Append("Expected: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(".. ");
+ for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", actual_expected[i]);
+ s.Append("\n");
+ s.Append(" ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(" ");
+ for (uptr i = 0; i < tail_size; i++)
+ s.AppendF("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
+
+ s.AppendF(
+ "\nThis error occurs when a buffer overflow overwrites memory\n"
+ "after a heap object, but within the %zd-byte granule, e.g.\n"
+ " char *x = new char[20];\n"
+ " x[25] = 42;\n"
+ "%s does not detect such bugs in uninstrumented code at the time of "
+ "write,"
+ "\nbut can detect them at the time of free/delete.\n"
+ "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
+ kShadowAlignment, SanitizerToolName);
+ Printf("%s", s.data());
+ GetCurrentThread()->Announce();
+ PrintTags(untagged_addr);
+ MaybePrintAndroidHelpUrl();
+ ReportErrorSummary(bug_type, stack);
+}
+
+class TagMismatchReport : public BaseReport {
+ public:
+ explicit TagMismatchReport(StackTrace *stack, uptr tagged_addr,
+ uptr access_size, bool is_store, bool fatal,
+ uptr *registers_frame)
+ : BaseReport(stack, fatal, tagged_addr, access_size),
+ is_store(is_store),
+ registers_frame(registers_frame) {}
+ ~TagMismatchReport();
+
+ private:
+ const bool is_store;
+ const uptr *registers_frame;
+};
+
+TagMismatchReport::~TagMismatchReport() {
+ Decorator d;
+ // TODO: when possible, try to print heap-use-after-free, etc.
+ const char *bug_type = "tag-mismatch";
+ uptr pc = GetTopPc(stack);
+ Printf("%s", d.Error());
+ Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
+ untagged_addr, pc);
+
+ Thread *t = GetCurrentThread();
+
+ tag_t mem_tag = GetTagCopy(MemToShadow(untagged_addr + mismatch_offset));
+
+ Printf("%s", d.Access());
+ if (mem_tag && mem_tag < kShadowAlignment) {
+ tag_t short_tag =
+ GetShortTagCopy(MemToShadow(untagged_addr + mismatch_offset));
+ Printf(
+ "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
+ is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
+ mem_tag, short_tag, t->unique_id());
+ } else {
+ Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
+ is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
+ mem_tag, t->unique_id());
+ }
+ if (mismatch_offset)
+ Printf("Invalid access starting at offset %zu\n", mismatch_offset);
+ Printf("%s", d.Default());
+
+ stack->Print();
+
+ PrintAddressDescription();
+ t->Announce();
+
+ PrintTags(untagged_addr + mismatch_offset);
+
+ if (registers_frame)
+ ReportRegisters(registers_frame, pc);
+
+ MaybePrintAndroidHelpUrl();
+ ReportErrorSummary(bug_type, stack);
+}
+} // namespace
+
+void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
+ InvalidFreeReport R(stack, tagged_addr);
+}
+
+void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
+ const u8 *expected) {
+ TailOverwrittenReport R(stack, tagged_addr, orig_size, expected);
+}
+
+void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
+ bool is_store, bool fatal, uptr *registers_frame) {
+ TagMismatchReport R(stack, tagged_addr, access_size, is_store, fatal,
+ registers_frame);
+}
+
+// See the frame breakdown defined in __hwasan_tag_mismatch (from
+// hwasan_tag_mismatch_{aarch64,riscv64}.S).
+void ReportRegisters(const uptr *frame, uptr pc) {
+ Printf("\nRegisters where the failure occurred (pc %p):\n", pc);
+
+ // We explicitly print a single line (4 registers/line) each iteration to
+ // reduce the amount of logcat error messages printed. Each Printf() will
+ // result in a new logcat line, irrespective of whether a newline is present,
+ // and so we wish to reduce the number of Printf() calls we have to make.
+#if defined(__aarch64__)
+ Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
+ frame[0], frame[1], frame[2], frame[3]);
+#elif SANITIZER_RISCV64
+ Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
+ reinterpret_cast<const u8 *>(frame) + 256, frame[1], frame[2],
+ frame[3]);
+#endif
+ Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
+ frame[4], frame[5], frame[6], frame[7]);
+ Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
+ frame[8], frame[9], frame[10], frame[11]);
+ Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
+ frame[12], frame[13], frame[14], frame[15]);
+ Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
+ frame[16], frame[17], frame[18], frame[19]);
+ Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
+ frame[20], frame[21], frame[22], frame[23]);
+ Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
+ frame[24], frame[25], frame[26], frame[27]);
+ // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
+ // passes it to this function.
+#if defined(__aarch64__)
+ Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
+ frame[29], frame[30], reinterpret_cast<const u8 *>(frame) + 256);
+#elif SANITIZER_RISCV64
+ Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28],
+ frame[29], frame[30], frame[31]);
+#else
+#endif
+}
+
+} // namespace __hwasan
+
+void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
+ __hwasan::ScopedReport::SetErrorReportCallback(callback);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.h
new file mode 100644
index 000000000000..bb9492a18cf9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.h
@@ -0,0 +1,35 @@
+//===-- hwasan_report.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer. HWASan-private header for error
+/// reporting functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_REPORT_H
+#define HWASAN_REPORT_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __hwasan {
+
+void ReportStats();
+void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
+ bool is_store, bool fatal, uptr *registers_frame);
+void ReportInvalidFree(StackTrace *stack, uptr addr);
+void ReportTailOverwritten(StackTrace *stack, uptr addr, uptr orig_size,
+ const u8 *expected);
+void ReportRegisters(const uptr *registers_frame, uptr pc);
+void ReportAtExitStatistics();
+
+
+} // namespace __hwasan
+
+#endif // HWASAN_REPORT_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
new file mode 100644
index 000000000000..0c0abb6de861
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
@@ -0,0 +1,102 @@
+//===-- hwasan_setjmp_aarch64.S -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// HWAddressSanitizer runtime.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
+#include "sanitizer_common/sanitizer_platform.h"
+
+// We want to save the context of the calling function.
+// That requires
+// 1) No modification of the link register by this function.
+// 2) No modification of the stack pointer by this function.
+// 3) (no modification of any other saved register, but that's not really going
+// to occur, and hence isn't as much of a worry).
+//
+// There's essentially no way to ensure that the compiler will not modify the
+// stack pointer when compiling a C function.
+// Hence we have to write this function in assembly.
+
+.section .text
+.file "hwasan_setjmp_aarch64.S"
+
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
+ CFI_STARTPROC
+ BTI_C
+ mov x1, #0
+ b ASM_WRAPPER_NAME(sigsetjmp)
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
+
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
+
+#if SANITIZER_ANDROID
+// Bionic also defines a function `setjmp` that calls `sigsetjmp` saving the
+// current signal.
+.global ASM_WRAPPER_NAME(setjmp_bionic)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp_bionic))
+ASM_WRAPPER_NAME(setjmp_bionic):
+ CFI_STARTPROC
+ BTI_C
+ mov x1, #1
+ b ASM_WRAPPER_NAME(sigsetjmp)
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp_bionic))
+
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp_bionic)
+#endif
+
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
+ CFI_STARTPROC
+ BTI_C
+ stp x19, x20, [x0, #0<<3]
+ stp x21, x22, [x0, #2<<3]
+ stp x23, x24, [x0, #4<<3]
+ stp x25, x26, [x0, #6<<3]
+ stp x27, x28, [x0, #8<<3]
+ stp x29, x30, [x0, #10<<3]
+ stp d8, d9, [x0, #14<<3]
+ stp d10, d11, [x0, #16<<3]
+ stp d12, d13, [x0, #18<<3]
+ stp d14, d15, [x0, #20<<3]
+ mov x2, sp
+ str x2, [x0, #13<<3]
+ // We always have the second argument to __sigjmp_save (savemask) set, since
+ // the _setjmp function above has set it for us as `false`.
+ // This function is defined in hwasan_interceptors.cc
+ b __sigjmp_save
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
+
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
+
+
+#if SANITIZER_ANDROID
+ASM_TRAMPOLINE_ALIAS(sigsetjmp, sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(setjmp, setjmp_bionic)
+#else
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
+#endif
+
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
+#endif
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S
new file mode 100644
index 000000000000..c01f4e25e8a4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S
@@ -0,0 +1,92 @@
+//===-- hwasan_setjmp_riscv64.S -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+// setjmp interceptor for risc-v.
+// HWAddressSanitizer runtime.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__riscv) && (__riscv_xlen == 64)
+#include "sanitizer_common/sanitizer_platform.h"
+
+// We want to save the context of the calling function.
+// That requires
+// 1) No modification of the link register by this function.
+// 2) No modification of the stack pointer by this function.
+// 3) (no modification of any other saved register, but that's not really going
+// to occur, and hence isn't as much of a worry).
+//
+// There's essentially no way to ensure that the compiler will not modify the
+// stack pointer when compiling a C function.
+// Hence we have to write this function in assembly.
+
+.section .text
+.file "hwasan_setjmp_riscv64.S"
+
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
+ CFI_STARTPROC
+ addi x11, x0, 0
+ tail ASM_WRAPPER_NAME(sigsetjmp)
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
+
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
+ CFI_STARTPROC
+ sd ra, 0<<3(x10)
+ sd s0, 1<<3(x10)
+ sd s1, 2<<3(x10)
+ sd s2, 3<<3(x10)
+ sd s3, 4<<3(x10)
+ sd s4, 5<<3(x10)
+ sd s5, 6<<3(x10)
+ sd s6, 7<<3(x10)
+ sd s7, 8<<3(x10)
+ sd s8, 9<<3(x10)
+ sd s9, 10<<3(x10)
+ sd s10, 11<<3(x10)
+ sd s11, 12<<3(x10)
+ sd sp, 13<<3(x10)
+#if __riscv_float_abi_double
+ fsd fs0, 14<<3(x10)
+ fsd fs1, 15<<3(x10)
+ fsd fs2, 16<<3(x10)
+ fsd fs3, 17<<3(x10)
+ fsd fs4, 18<<3(x10)
+ fsd fs5, 19<<3(x10)
+ fsd fs6, 20<<3(x10)
+ fsd fs7, 21<<3(x10)
+ fsd fs8, 22<<3(x10)
+ fsd fs9, 23<<3(x10)
+ fsd fs10, 24<<3(x10)
+ fsd fs11, 25<<3(x10)
+#elif __riscv_float_abi_soft
+#else
+# error "Unsupported case"
+#endif
+ // We always have the second argument to __sigjmp_save (savemask) set, since
+ // the _setjmp function above has set it for us as `false`.
+ // This function is defined in hwasan_interceptors.cc
+ tail __sigjmp_save
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
+
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
+#endif
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S
new file mode 100644
index 000000000000..9804e8d7ceca
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S
@@ -0,0 +1,79 @@
+//===-- hwasan_setjmp_x86_64.S --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// setjmp interceptor for x86_64.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__x86_64__)
+#include "sanitizer_common/sanitizer_platform.h"
+
+// We want to save the context of the calling function.
+// That requires
+// 1) No modification of the return address by this function.
+// 2) No modification of the stack pointer by this function.
+// 3) (no modification of any other saved register, but that's not really going
+// to occur, and hence isn't as much of a worry).
+//
+// There's essentially no way to ensure that the compiler will not modify the
+// stack pointer when compiling a C function.
+// Hence we have to write this function in assembly.
+//
+// TODO: Handle Intel CET.
+
+.section .text
+.file "hwasan_setjmp_x86_64.S"
+
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
+ CFI_STARTPROC
+ _CET_ENDBR
+ xorl %esi, %esi
+ jmp .Linterceptor_sigsetjmp
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
+
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
+.Linterceptor_sigsetjmp:
+ CFI_STARTPROC
+ _CET_ENDBR
+
+ // Save callee save registers.
+ mov %rbx, (0*8)(%rdi)
+ mov %rbp, (1*8)(%rdi)
+ mov %r12, (2*8)(%rdi)
+ mov %r13, (3*8)(%rdi)
+ mov %r14, (4*8)(%rdi)
+ mov %r15, (5*8)(%rdi)
+
+ // Save SP as it was in caller's frame.
+ lea 8(%rsp), %rdx
+ mov %rdx, (6*8)(%rdi)
+
+ // Save return address.
+ mov (%rsp), %rax
+ mov %rax, (7*8)(%rdi)
+
+ jmp __sigjmp_save
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
+
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
+#endif
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
new file mode 100644
index 000000000000..fd060c51cd8e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
@@ -0,0 +1,160 @@
+#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
+
+// The content of this file is AArch64-only:
+#if defined(__aarch64__)
+
+// The responsibility of the HWASan entry point in compiler-rt is to primarily
+// readjust the stack from the callee and save the current register values to
+// the stack.
+// This entry point function should be called from a __hwasan_check_* symbol.
+// These are generated during a lowering pass in the backend, and are found in
+// AArch64AsmPrinter::EmitHwasanMemaccessSymbols(). Please look there for
+// further information.
+// The __hwasan_check_* caller of this function should have expanded the stack
+// and saved the previous values of x0, x1, x29, and x30. This function will
+// "consume" these saved values and treats it as part of its own stack frame.
+// In this sense, the __hwasan_check_* callee and this function "share" a stack
+// frame. This allows us to omit having unwinding information (.cfi_*) present
+// in every __hwasan_check_* function, therefore reducing binary size. This is
+// particularly important as hwasan_check_* instances are duplicated in every
+// translation unit where HWASan is enabled.
+// This function calls HwasanTagMismatch to step back into the C++ code that
+// completes the stack unwinding and error printing. This function is is not
+// permitted to return.
+
+
+// Frame from __hwasan_check_:
+// | ... |
+// | ... |
+// | Previous stack frames... |
+// +=================================+
+// | Unused 8-bytes for maintaining |
+// | 16-byte SP alignment. |
+// +---------------------------------+
+// | Return address (x30) for caller |
+// | of __hwasan_check_*. |
+// +---------------------------------+
+// | Frame address (x29) for caller |
+// | of __hwasan_check_* |
+// +---------------------------------+ <-- [SP + 232]
+// | ... |
+// | |
+// | Stack frame space for x2 - x28. |
+// | |
+// | ... |
+// +---------------------------------+ <-- [SP + 16]
+// | |
+// | Saved x1, as __hwasan_check_* |
+// | clobbers it. |
+// +---------------------------------+
+// | Saved x0, likewise above. |
+// +---------------------------------+ <-- [x30 / SP]
+
+// This function takes two arguments:
+// * x0: The data address.
+// * x1: The encoded access info for the failing access.
+
+// This function has two entry points. The first, __hwasan_tag_mismatch, is used
+// by clients that were compiled without short tag checks (i.e. binaries built
+// by older compilers and binaries targeting older runtimes). In this case the
+// outlined tag check will be missing the code handling short tags (which won't
+// be used in the binary's own stack variables but may be used on the heap
+// or stack variables in other binaries), so the check needs to be done here.
+//
+// The second, __hwasan_tag_mismatch_v2, is used by binaries targeting newer
+// runtimes. This entry point bypasses the short tag check since it will have
+// already been done as part of the outlined tag check. Since tag mismatches are
+// uncommon, there isn't a significant performance benefit to being able to
+// bypass the check; the main benefits are that we can sometimes avoid
+// clobbering the x17 register in error reports, and that the program will have
+// a runtime dependency on the __hwasan_tag_mismatch_v2 symbol therefore it will
+// fail to start up given an older (i.e. incompatible) runtime.
+.section .text
+.file "hwasan_tag_mismatch_aarch64.S"
+.global __hwasan_tag_mismatch
+.type __hwasan_tag_mismatch, %function
+__hwasan_tag_mismatch:
+ BTI_J
+
+ // Compute the granule position one past the end of the access.
+ mov x16, #1
+ and x17, x1, #0xf
+ lsl x16, x16, x17
+ and x17, x0, #0xf
+ add x17, x16, x17
+
+ // Load the shadow byte again and check whether it is a short tag within the
+ // range of the granule position computed above.
+ ubfx x16, x0, #4, #52
+ ldrb w16, [x9, x16]
+ cmp w16, #0xf
+ b.hi mismatch
+ cmp w16, w17
+ b.lo mismatch
+
+ // Load the real tag from the last byte of the granule and compare against
+ // the pointer tag.
+ orr x16, x0, #0xf
+ ldrb w16, [x16]
+ cmp x16, x0, lsr #56
+ b.ne mismatch
+
+ // Restore x0, x1 and sp to their values from before the __hwasan_tag_mismatch
+ // call and resume execution.
+ ldp x0, x1, [sp], #256
+ ret
+
+.global __hwasan_tag_mismatch_v2
+.type __hwasan_tag_mismatch_v2, %function
+__hwasan_tag_mismatch_v2:
+// Avoid using global label, to prevent "relocation out of range".
+mismatch:
+ CFI_STARTPROC
+ BTI_J
+
+ // Set the CFA to be the return address for caller of __hwasan_check_*. Note
+ // that we do not emit CFI predicates to describe the contents of this stack
+ // frame, as this proxy entry point should never be debugged. The contents
+ // are static and are handled by the unwinder after calling
+ // __hwasan_tag_mismatch. The frame pointer is already correctly setup
+ // by __hwasan_check_*.
+ add x29, sp, #232
+ CFI_DEF_CFA(w29, 24)
+ CFI_OFFSET(w30, -16)
+ CFI_OFFSET(w29, -24)
+
+ // Save the rest of the registers into the preallocated space left by
+ // __hwasan_check.
+ str x28, [sp, #224]
+ stp x26, x27, [sp, #208]
+ stp x24, x25, [sp, #192]
+ stp x22, x23, [sp, #176]
+ stp x20, x21, [sp, #160]
+ stp x18, x19, [sp, #144]
+ stp x16, x17, [sp, #128]
+ stp x14, x15, [sp, #112]
+ stp x12, x13, [sp, #96]
+ stp x10, x11, [sp, #80]
+ stp x8, x9, [sp, #64]
+ stp x6, x7, [sp, #48]
+ stp x4, x5, [sp, #32]
+ stp x2, x3, [sp, #16]
+
+ // Pass the address of the frame to __hwasan_tag_mismatch4, so that it can
+ // extract the saved registers from this frame without having to worry about
+ // finding this frame.
+ mov x2, sp
+
+ bl __hwasan_tag_mismatch4
+ CFI_ENDPROC
+
+.Lfunc_end0:
+ .size __hwasan_tag_mismatch, .Lfunc_end0-__hwasan_tag_mismatch
+
+#endif // defined(__aarch64__)
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_riscv64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_riscv64.S
new file mode 100644
index 000000000000..487a042405b6
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_riscv64.S
@@ -0,0 +1,132 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+// The content of this file is RISCV64-only:
+#if defined(__riscv) && (__riscv_xlen == 64)
+
+// The responsibility of the HWASan entry point in compiler-rt is to primarily
+// readjust the stack from the callee and save the current register values to
+// the stack.
+// This entry point function should be called from a __hwasan_check_* symbol.
+// These are generated during a lowering pass in the backend, and are found in
+// RISCVAsmPrinter::EmitHwasanMemaccessSymbols(). Please look there for
+// further information.
+// The __hwasan_check_* caller of this function should have expanded the stack
+// and saved the previous values of x10(arg0), x11(arg1), x1(ra), and x8(fp).
+// This function will "consume" these saved values and treats it as part of its
+// own stack frame. In this sense, the __hwasan_check_* callee and this function
+// "share" a stack frame. This allows us to omit having unwinding information
+// (.cfi_*) present in every __hwasan_check_* function, therefore reducing binary size.
+// This is particularly important as hwasan_check_* instances are duplicated in every
+// translation unit where HWASan is enabled.
+// This function calls HwasanTagMismatch to step back into the C++ code that
+// completes the stack unwinding and error printing. This function is is not
+// permitted to return.
+
+
+// | ... |
+// | ... |
+// | Previous stack frames... |
+// +=================================+
+// | ... |
+// | |
+// | Stack frame space for x12 - x31.|
+// | |
+// | ... |
+// +---------------------------------+ <-- [SP + 96]
+// | Saved x11(arg1), as |
+// | __hwasan_check_* clobbers it. |
+// +---------------------------------+ <-- [SP + 88]
+// | Saved x10(arg0), as |
+// | __hwasan_check_* clobbers it. |
+// +---------------------------------+ <-- [SP + 80]
+// | |
+// | Stack frame space for x9. |
+// +---------------------------------+ <-- [SP + 72]
+// | |
+// | Saved x8(fp), as |
+// | __hwasan_check_* clobbers it. |
+// +---------------------------------+ <-- [SP + 64]
+// | ... |
+// | |
+// | Stack frame space for x2 - x7. |
+// | |
+// | ... |
+// +---------------------------------+ <-- [SP + 16]
+// | Return address (x1) for caller |
+// | of __hwasan_check_*. |
+// +---------------------------------+ <-- [SP + 8]
+// | Reserved place for x0, possibly |
+// | junk, since we don't save it. |
+// +---------------------------------+ <-- [x2 / SP]
+
+// This function takes two arguments:
+// * x10/a0: The data address.
+// * x11/a1: The encoded access info for the failing access.
+
+.section .text
+.file "hwasan_tag_mismatch_riscv64.S"
+
+.global __hwasan_tag_mismatch_v2
+ASM_TYPE_FUNCTION(__hwasan_tag_mismatch_v2)
+__hwasan_tag_mismatch_v2:
+ CFI_STARTPROC
+
+ // Set the CFA to be the return address for caller of __hwasan_check_*. Note
+ // that we do not emit CFI predicates to describe the contents of this stack
+ // frame, as this proxy entry point should never be debugged. The contents
+ // are static and are handled by the unwinder after calling
+ // __hwasan_tag_mismatch. The frame pointer is already correctly setup
+ // by __hwasan_check_*.
+ addi fp, sp, 256
+ CFI_DEF_CFA(fp, 0)
+ CFI_OFFSET(ra, -248)
+ CFI_OFFSET(fp, -192)
+
+ // Save the rest of the registers into the preallocated space left by
+ // __hwasan_check.
+ sd x31, 248(sp)
+ sd x30, 240(sp)
+ sd x29, 232(sp)
+ sd x28, 224(sp)
+ sd x27, 216(sp)
+ sd x26, 208(sp)
+ sd x25, 200(sp)
+ sd x24, 192(sp)
+ sd x23, 184(sp)
+ sd x22, 176(sp)
+ sd x21, 168(sp)
+ sd x20, 160(sp)
+ sd x19, 152(sp)
+ sd x18, 144(sp)
+ sd x17, 136(sp)
+ sd x16, 128(sp)
+ sd x15, 120(sp)
+ sd x14, 112(sp)
+ sd x13, 104(sp)
+ sd x12, 96(sp)
+ // sd x11, 88(sp) ; already saved
+ // sd x10, 80(sp) ; already saved
+ sd x9, 72(sp)
+ // sd x8, 64(sp) ; already saved
+ sd x7, 56(sp)
+ sd x6, 48(sp)
+ sd x5, 40(sp)
+ sd x4, 32(sp)
+ sd x3, 24(sp)
+ sd x2, 16(sp)
+ // sd x1, 8(sp) ; already saved
+ // sd x0, 0(sp) ; don't store zero register
+
+ // Pass the address of the frame to __hwasan_tag_mismatch4, so that it can
+ // extract the saved registers from this frame without having to worry about
+ // finding this frame.
+ mv x12, sp
+
+ call __hwasan_tag_mismatch4
+ CFI_ENDPROC
+ASM_SIZE(__hwasan_tag_mismatch_v2)
+
+#endif // defined(__riscv) && (__riscv_xlen == 64)
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp
new file mode 100644
index 000000000000..3e14a718513d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp
@@ -0,0 +1,223 @@
+
+#include "hwasan_thread.h"
+
+#include "hwasan.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_mapping.h"
+#include "hwasan_poisoning.h"
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+namespace __hwasan {
+
+static u32 RandomSeed() {
+ u32 seed;
+ do {
+ if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed),
+ /*blocking=*/false))) {
+ seed = static_cast<u32>(
+ (NanoTime() >> 12) ^
+ (reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4));
+ }
+ } while (!seed);
+ return seed;
+}
+
+void Thread::InitRandomState() {
+ random_state_ = flags()->random_tags ? RandomSeed() : unique_id_;
+ random_state_inited_ = true;
+
+ // Push a random number of zeros onto the ring buffer so that the first stack
+ // tag base will be random.
+ for (tag_t i = 0, e = GenerateRandomTag(); i != e; ++i)
+ stack_allocations_->push(0);
+}
+
+void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
+ const InitState *state) {
+ CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
+ CHECK_EQ(0, stack_top_);
+ CHECK_EQ(0, stack_bottom_);
+
+ static atomic_uint64_t unique_id;
+ unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed);
+ if (!IsMainThread())
+ os_id_ = GetTid();
+
+ if (auto sz = flags()->heap_history_size)
+ heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
+
+#if !SANITIZER_FUCHSIA
+ // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
+ // be initialized before we enter the thread itself, so we will instead call
+ // this later.
+ InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
+#endif
+ InitStackAndTls(state);
+ dtls_ = DTLS_Get();
+ AllocatorThreadStart(allocator_cache());
+
+ if (flags()->verbose_threads) {
+ if (IsMainThread()) {
+ Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
+ sizeof(Thread), heap_allocations_->SizeInBytes(),
+ stack_allocations_->size() * sizeof(uptr));
+ }
+ Print("Creating : ");
+ }
+ ClearShadowForThreadStackAndTLS();
+}
+
+void Thread::InitStackRingBuffer(uptr stack_buffer_start,
+ uptr stack_buffer_size) {
+ HwasanTSDThreadInit(); // Only needed with interceptors.
+ uptr *ThreadLong = GetCurrentThreadLongPtr();
+ // The following implicitly sets (this) as the current thread.
+ stack_allocations_ = new (ThreadLong)
+ StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size);
+ // Check that it worked.
+ CHECK_EQ(GetCurrentThread(), this);
+
+ // ScopedTaggingDisable needs GetCurrentThread to be set up.
+ ScopedTaggingDisabler disabler;
+
+ if (stack_bottom_) {
+ int local;
+ CHECK(AddrIsInStack((uptr)&local));
+ CHECK(MemIsApp(stack_bottom_));
+ CHECK(MemIsApp(stack_top_ - 1));
+ }
+}
+
+void Thread::ClearShadowForThreadStackAndTLS() {
+ if (stack_top_ != stack_bottom_)
+ TagMemory(UntagAddr(stack_bottom_),
+ UntagAddr(stack_top_) - UntagAddr(stack_bottom_),
+ GetTagFromPointer(stack_top_));
+ if (tls_begin_ != tls_end_)
+ TagMemory(UntagAddr(tls_begin_),
+ UntagAddr(tls_end_) - UntagAddr(tls_begin_),
+ GetTagFromPointer(tls_begin_));
+}
+
+void Thread::Destroy() {
+ if (flags()->verbose_threads)
+ Print("Destroying: ");
+ AllocatorThreadFinish(allocator_cache());
+ ClearShadowForThreadStackAndTLS();
+ if (heap_allocations_)
+ heap_allocations_->Delete();
+ DTLS_Destroy();
+ // Unregister this as the current thread.
+ // Instrumented code can not run on this thread from this point onwards, but
+ // malloc/free can still be served. Glibc may call free() very late, after all
+ // TSD destructors are done.
+ CHECK_EQ(GetCurrentThread(), this);
+ *GetCurrentThreadLongPtr() = 0;
+}
+
+void Thread::Print(const char *Prefix) {
+ Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_,
+ (void *)this, stack_bottom(), stack_top(),
+ stack_top() - stack_bottom(), tls_begin(), tls_end());
+}
+
+static u32 xorshift(u32 state) {
+ state ^= state << 13;
+ state ^= state >> 17;
+ state ^= state << 5;
+ return state;
+}
+
+// Generate a (pseudo-)random non-zero tag.
+tag_t Thread::GenerateRandomTag(uptr num_bits) {
+ DCHECK_GT(num_bits, 0);
+ if (tagging_disabled_)
+ return 0;
+ tag_t tag;
+ const uptr tag_mask = (1ULL << num_bits) - 1;
+ do {
+ if (flags()->random_tags) {
+ if (!random_buffer_) {
+ EnsureRandomStateInited();
+ random_buffer_ = random_state_ = xorshift(random_state_);
+ }
+ CHECK(random_buffer_);
+ tag = random_buffer_ & tag_mask;
+ random_buffer_ >>= num_bits;
+ } else {
+ EnsureRandomStateInited();
+ random_state_ += 1;
+ tag = random_state_ & tag_mask;
+ }
+ } while (!tag);
+ return tag;
+}
+
+void EnsureMainThreadIDIsCorrect() {
+ auto *t = __hwasan::GetCurrentThread();
+ if (t && (t->IsMainThread()))
+ t->set_os_id(GetTid());
+}
+
+} // namespace __hwasan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+
+static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() {
+ auto &tl = __hwasan::hwasanThreadList();
+ tl.CheckLocked();
+ return &tl;
+}
+
+static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
+ return GetHwasanThreadListLocked()->FindThreadLocked(
+ [os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
+}
+
+void LockThreads() {
+ __hwasan::hwasanThreadList().Lock();
+ __hwasan::hwasanThreadArgRetval().Lock();
+}
+
+void UnlockThreads() {
+ __hwasan::hwasanThreadArgRetval().Unlock();
+ __hwasan::hwasanThreadList().Unlock();
+}
+
+void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
+ auto *t = GetThreadByOsIDLocked(os_id);
+ if (!t)
+ return false;
+ *stack_begin = t->stack_bottom();
+ *stack_end = t->stack_top();
+ *tls_begin = t->tls_begin();
+ *tls_end = t->tls_end();
+ // Fixme: is this correct for HWASan.
+ *cache_begin = 0;
+ *cache_end = 0;
+ *dtls = t->dtls();
+ return true;
+}
+
+void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
+
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {}
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
+
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ __hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
+}
+
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
+
+} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
new file mode 100644
index 000000000000..9e1b438e48f7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
@@ -0,0 +1,123 @@
+//===-- hwasan_thread.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_THREAD_H
+#define HWASAN_THREAD_H
+
+#include "hwasan_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_ring_buffer.h"
+
+namespace __hwasan {
+
+typedef __sanitizer::CompactRingBuffer<uptr> StackAllocationsRingBuffer;
+
+class Thread {
+ public:
+ // These are optional parameters that can be passed to Init.
+ struct InitState;
+
+ void Init(uptr stack_buffer_start, uptr stack_buffer_size,
+ const InitState *state = nullptr);
+
+ void InitStackAndTls(const InitState *state = nullptr);
+
+ // Must be called from the thread itself.
+ void InitStackRingBuffer(uptr stack_buffer_start, uptr stack_buffer_size);
+
+ inline void EnsureRandomStateInited() {
+ if (UNLIKELY(!random_state_inited_))
+ InitRandomState();
+ }
+
+ void Destroy();
+
+ uptr stack_top() { return stack_top_; }
+ uptr stack_bottom() { return stack_bottom_; }
+ uptr stack_size() { return stack_top() - stack_bottom(); }
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
+ DTLS *dtls() { return dtls_; }
+ bool IsMainThread() { return unique_id_ == 0; }
+
+ bool AddrIsInStack(uptr addr) {
+ return addr >= stack_bottom_ && addr < stack_top_;
+ }
+
+ AllocatorCache *allocator_cache() { return &allocator_cache_; }
+ HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; }
+ StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
+
+ tag_t GenerateRandomTag(uptr num_bits = kTagBits);
+
+ void DisableTagging() { tagging_disabled_++; }
+ void EnableTagging() { tagging_disabled_--; }
+
+ u32 unique_id() const { return unique_id_; }
+ void Announce() {
+ if (announced_) return;
+ announced_ = true;
+ Print("Thread: ");
+ }
+
+ tid_t os_id() const { return os_id_; }
+ void set_os_id(tid_t os_id) { os_id_ = os_id; }
+
+ uptr &vfork_spill() { return vfork_spill_; }
+
+ private:
+ // NOTE: There is no Thread constructor. It is allocated
+ // via mmap() and *must* be valid in zero-initialized state.
+ void ClearShadowForThreadStackAndTLS();
+ void Print(const char *prefix);
+ void InitRandomState();
+ uptr vfork_spill_;
+ uptr stack_top_;
+ uptr stack_bottom_;
+ uptr tls_begin_;
+ uptr tls_end_;
+ DTLS *dtls_;
+
+ u32 random_state_;
+ u32 random_buffer_;
+
+ AllocatorCache allocator_cache_;
+ HeapAllocationsRingBuffer *heap_allocations_;
+ StackAllocationsRingBuffer *stack_allocations_;
+
+ u32 unique_id_; // counting from zero.
+
+ tid_t os_id_;
+
+ u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
+
+ bool announced_;
+
+ bool random_state_inited_; // Whether InitRandomState() has been called.
+
+ friend struct ThreadListHead;
+};
+
+Thread *GetCurrentThread();
+uptr *GetCurrentThreadLongPtr();
+
+// Used to handle fork().
+void EnsureMainThreadIDIsCorrect();
+
+struct ScopedTaggingDisabler {
+ ScopedTaggingDisabler() { GetCurrentThread()->DisableTagging(); }
+ ~ScopedTaggingDisabler() { GetCurrentThread()->EnableTagging(); }
+};
+
+} // namespace __hwasan
+
+#endif // HWASAN_THREAD_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
new file mode 100644
index 000000000000..794cfb7550d7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
@@ -0,0 +1,29 @@
+#include "hwasan_thread_list.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
+
+namespace __hwasan {
+
+static HwasanThreadList *hwasan_thread_list;
+static ThreadArgRetval *thread_data;
+
+HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; }
+ThreadArgRetval &hwasanThreadArgRetval() { return *thread_data; }
+
+void InitThreadList(uptr storage, uptr size) {
+ CHECK_EQ(hwasan_thread_list, nullptr);
+
+ alignas(alignof(HwasanThreadList)) static char
+ thread_list_placeholder[sizeof(HwasanThreadList)];
+ hwasan_thread_list =
+ new (thread_list_placeholder) HwasanThreadList(storage, size);
+
+ CHECK_EQ(thread_data, nullptr);
+
+ alignas(alignof(ThreadArgRetval)) static char
+ thread_data_placeholder[sizeof(ThreadArgRetval)];
+ thread_data = new (thread_data_placeholder) ThreadArgRetval();
+}
+
+} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
new file mode 100644
index 000000000000..369a1c3d6f5f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
@@ -0,0 +1,229 @@
+//===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+// HwasanThreadList is a registry for live threads, as well as an allocator for
+// HwasanThread objects and their stack history ring buffers. There are
+// constraints on memory layout of the shadow region and CompactRingBuffer that
+// are part of the ABI contract between compiler-rt and llvm.
+//
+// * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
+// * All stack ring buffers are located within (2**kShadowBaseAlignment)
+// sized region below and adjacent to the shadow region.
+// * Each ring buffer has a size of (2**N)*4096 where N is in [0, 7), and is
+// aligned to twice its size. The value of N can be different for each buffer.
+//
+// These constrains guarantee that, given an address A of any element of the
+// ring buffer,
+// A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
+// is the address of the next element of that ring buffer (with wrap-around).
+// And, with K = kShadowBaseAlignment,
+// S = (A | ((1 << K) - 1)) + 1
+// (align up to kShadowBaseAlignment) is the start of the shadow region.
+//
+// These calculations are used in compiler instrumentation to update the ring
+// buffer and obtain the base address of shadow using only two inputs: address
+// of the current element of the ring buffer, and N (i.e. size of the ring
+// buffer). Since the value of N is very limited, we pack both inputs into a
+// single thread-local word as
+// (1 << (N + 56)) | A
+// See the implementation of class CompactRingBuffer, which is what is stored in
+// said thread-local word.
+//
+// Note the unusual way of aligning up the address of the shadow:
+// (A | ((1 << K) - 1)) + 1
+// It is only correct if A is not already equal to the shadow base address, but
+// it saves 2 instructions on AArch64.
+
+#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_flags.h"
+#include "hwasan_thread.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
+
+namespace __hwasan {
+
+static uptr RingBufferSize() {
+ uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
+ // FIXME: increase the limit to 8 once this bug is fixed:
+ // https://bugs.llvm.org/show_bug.cgi?id=39030
+ // Note that we *cannot* do that on Android, as the runtime will indefinitely
+ // have to support code that is compiled with ashr, which only works with
+ // shifts up to 6.
+ for (int shift = 0; shift < 7; ++shift) {
+ uptr size = 4096 * (1ULL << shift);
+ if (size >= desired_bytes)
+ return size;
+ }
+ Printf("stack history size too large: %d\n", flags()->stack_history_size);
+ CHECK(0);
+ return 0;
+}
+
+struct ThreadStats {
+ uptr n_live_threads;
+ uptr total_stack_size;
+};
+
+class SANITIZER_MUTEX HwasanThreadList {
+ public:
+ HwasanThreadList(uptr storage, uptr size)
+ : free_space_(storage), free_space_end_(storage + size) {
+ // [storage, storage + size) is used as a vector of
+ // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
+ // Each element contains
+ // * a ring buffer at offset 0,
+ // * a Thread object at offset ring_buffer_size_.
+ ring_buffer_size_ = RingBufferSize();
+ thread_alloc_size_ =
+ RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
+ }
+
+ Thread *CreateCurrentThread(const Thread::InitState *state = nullptr)
+ SANITIZER_EXCLUDES(free_list_mutex_, live_list_mutex_) {
+ Thread *t = nullptr;
+ {
+ SpinMutexLock l(&free_list_mutex_);
+ if (!free_list_.empty()) {
+ t = free_list_.back();
+ free_list_.pop_back();
+ }
+ }
+ if (t) {
+ uptr start = (uptr)t - ring_buffer_size_;
+ internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
+ } else {
+ t = AllocThread();
+ }
+ {
+ SpinMutexLock l(&live_list_mutex_);
+ live_list_.push_back(t);
+ }
+ t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_, state);
+ AddThreadStats(t);
+ return t;
+ }
+
+ void DontNeedThread(Thread *t) {
+ uptr start = (uptr)t - ring_buffer_size_;
+ ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
+ }
+
+ void RemoveThreadFromLiveList(Thread *t)
+ SANITIZER_EXCLUDES(live_list_mutex_) {
+ SpinMutexLock l(&live_list_mutex_);
+ for (Thread *&t2 : live_list_)
+ if (t2 == t) {
+ // To remove t2, copy the last element of the list in t2's position, and
+ // pop_back(). This works even if t2 is itself the last element.
+ t2 = live_list_.back();
+ live_list_.pop_back();
+ return;
+ }
+ CHECK(0 && "thread not found in live list");
+ }
+
+ void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
+ RemoveThreadStats(t);
+ RemoveThreadFromLiveList(t);
+ t->Destroy();
+ DontNeedThread(t);
+ SpinMutexLock l(&free_list_mutex_);
+ free_list_.push_back(t);
+ }
+
+ Thread *GetThreadByBufferAddress(uptr p) {
+ return (Thread *)(RoundDownTo(p, ring_buffer_size_ * 2) +
+ ring_buffer_size_);
+ }
+
+ uptr MemoryUsedPerThread() {
+ uptr res = sizeof(Thread) + ring_buffer_size_;
+ if (auto sz = flags()->heap_history_size)
+ res += HeapAllocationsRingBuffer::SizeInBytes(sz);
+ return res;
+ }
+
+ template <class CB>
+ void VisitAllLiveThreads(CB cb) SANITIZER_EXCLUDES(live_list_mutex_) {
+ SpinMutexLock l(&live_list_mutex_);
+ for (Thread *t : live_list_) cb(t);
+ }
+
+ template <class CB>
+ Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(live_list_mutex_) {
+ CheckLocked();
+ for (Thread *t : live_list_)
+ if (cb(t))
+ return t;
+ return nullptr;
+ }
+
+ void AddThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
+ SpinMutexLock l(&stats_mutex_);
+ stats_.n_live_threads++;
+ stats_.total_stack_size += t->stack_size();
+ }
+
+ void RemoveThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
+ SpinMutexLock l(&stats_mutex_);
+ stats_.n_live_threads--;
+ stats_.total_stack_size -= t->stack_size();
+ }
+
+ ThreadStats GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_) {
+ SpinMutexLock l(&stats_mutex_);
+ return stats_;
+ }
+
+ uptr GetRingBufferSize() const { return ring_buffer_size_; }
+
+ void Lock() SANITIZER_ACQUIRE(live_list_mutex_) { live_list_mutex_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_) {
+ live_list_mutex_.CheckLocked();
+ }
+ void Unlock() SANITIZER_RELEASE(live_list_mutex_) {
+ live_list_mutex_.Unlock();
+ }
+
+ private:
+ Thread *AllocThread() {
+ SpinMutexLock l(&free_space_mutex_);
+ uptr align = ring_buffer_size_ * 2;
+ CHECK(IsAligned(free_space_, align));
+ Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
+ free_space_ += thread_alloc_size_;
+ CHECK_LE(free_space_, free_space_end_);
+ return t;
+ }
+
+ SpinMutex free_space_mutex_;
+ uptr free_space_;
+ uptr free_space_end_;
+ uptr ring_buffer_size_;
+ uptr thread_alloc_size_;
+
+ SpinMutex free_list_mutex_;
+ InternalMmapVector<Thread *> free_list_
+ SANITIZER_GUARDED_BY(free_list_mutex_);
+ SpinMutex live_list_mutex_;
+ InternalMmapVector<Thread *> live_list_
+ SANITIZER_GUARDED_BY(live_list_mutex_);
+
+ SpinMutex stats_mutex_;
+ ThreadStats stats_ SANITIZER_GUARDED_BY(stats_mutex_);
+};
+
+void InitThreadList(uptr storage, uptr size);
+HwasanThreadList &hwasanThreadList();
+ThreadArgRetval &hwasanThreadArgRetval();
+
+} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_type_test.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_type_test.cpp
new file mode 100644
index 000000000000..5307073fb40b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_type_test.cpp
@@ -0,0 +1,25 @@
+//===-- hwasan_type_test.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Compile-time tests of the internal type definitions.
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "hwasan.h"
+#include <setjmp.h>
+
+#define CHECK_TYPE_SIZE_FITS(TYPE) \
+ COMPILER_CHECK(sizeof(__hw_##TYPE) <= sizeof(TYPE))
+
+#if HWASAN_WITH_INTERCEPTORS
+CHECK_TYPE_SIZE_FITS(jmp_buf);
+CHECK_TYPE_SIZE_FITS(sigjmp_buf);
+#endif