aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib')
-rw-r--r--compiler-rt/lib/asan/asan_allocator.cpp28
-rw-r--r--compiler-rt/lib/asan/asan_interface.inc1
-rw-r--r--compiler-rt/lib/asan/asan_interface_internal.h3
-rw-r--r--compiler-rt/lib/asan/asan_malloc_win.cpp3
-rw-r--r--compiler-rt/lib/builtins/clear_cache.c36
-rw-r--r--compiler-rt/lib/builtins/cpu_model.c12
-rw-r--r--compiler-rt/lib/builtins/i386/fp_mode.c39
-rw-r--r--compiler-rt/lib/builtins/ppc/fixtfti.c38
-rw-r--r--compiler-rt/lib/builtins/ppc/fixunstfti.c14
-rw-r--r--compiler-rt/lib/crt/crtbegin.c48
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerDefs.h18
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerDriver.cpp7
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp2
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp2
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerFlags.def3
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerIO.cpp2
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerIO.h2
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp8
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp8
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtil.h2
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp9
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp77
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp11
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp5
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp9
-rw-r--r--compiler-rt/lib/gwp_asan/definitions.h18
-rw-r--r--compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp9
-rw-r--r--compiler-rt/lib/gwp_asan/guarded_pool_allocator.h10
-rw-r--r--compiler-rt/lib/hwasan/hwasan.h20
-rw-r--r--compiler-rt/lib/hwasan/hwasan_interceptors.cpp102
-rw-r--r--compiler-rt/lib/hwasan/hwasan_interface_internal.h4
-rw-r--r--compiler-rt/lib/hwasan/hwasan_linux.cpp42
-rw-r--r--compiler-rt/lib/hwasan/hwasan_report.cpp5
-rw-r--r--compiler-rt/lib/hwasan/hwasan_setjmp.S100
-rw-r--r--compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S4
-rw-r--r--compiler-rt/lib/hwasan/hwasan_type_test.cpp25
-rw-r--r--compiler-rt/lib/lsan/lsan_allocator.cpp13
-rw-r--r--compiler-rt/lib/msan/msan.cpp4
-rw-r--r--compiler-rt/lib/msan/msan_allocator.cpp11
-rw-r--r--compiler-rt/lib/msan/msan_blacklist.txt3
-rw-r--r--compiler-rt/lib/msan/msan_interceptors.cpp5
-rw-r--r--compiler-rt/lib/profile/GCDAProfiling.c25
-rw-r--r--compiler-rt/lib/profile/InstrProfData.inc752
-rw-r--r--compiler-rt/lib/profile/InstrProfiling.c2
-rw-r--r--compiler-rt/lib/profile/InstrProfiling.h58
-rw-r--r--compiler-rt/lib/profile/InstrProfilingBuffer.c70
-rw-r--r--compiler-rt/lib/profile/InstrProfilingFile.c336
-rw-r--r--compiler-rt/lib/profile/InstrProfilingInternal.h7
-rw-r--r--compiler-rt/lib/profile/InstrProfilingMerge.c2
-rw-r--r--compiler-rt/lib/profile/InstrProfilingMergeFile.c2
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c2
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPort.h13
-rw-r--r--compiler-rt/lib/profile/InstrProfilingRuntime.cpp3
-rw-r--r--compiler-rt/lib/profile/InstrProfilingUtil.c9
-rw-r--r--compiler-rt/lib/profile/InstrProfilingUtil.h4
-rw-r--r--compiler-rt/lib/profile/InstrProfilingValue.c2
-rw-r--r--compiler-rt/lib/profile/InstrProfilingWriter.c40
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.h2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc118
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc128
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp11
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h49
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp10
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flags.inc3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc12
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp12
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp9
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform.h8
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h21
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp60
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h23
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp10
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h18
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_posix.h2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp12
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp22
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc76
-rw-r--r--compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp9
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_config.h7
-rw-r--r--compiler-rt/lib/scudo/standalone/atomic_helpers.h34
-rw-r--r--compiler-rt/lib/scudo/standalone/bytemap.h6
-rw-r--r--compiler-rt/lib/scudo/standalone/checksum.cpp4
-rw-r--r--compiler-rt/lib/scudo/standalone/checksum.h2
-rw-r--r--compiler-rt/lib/scudo/standalone/chunk.h24
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h172
-rw-r--r--compiler-rt/lib/scudo/standalone/common.h30
-rw-r--r--compiler-rt/lib/scudo/standalone/flags.cpp15
-rw-r--r--compiler-rt/lib/scudo/standalone/flags.h8
-rw-r--r--compiler-rt/lib/scudo/standalone/flags_parser.cpp2
-rw-r--r--compiler-rt/lib/scudo/standalone/flags_parser.h2
-rw-r--r--compiler-rt/lib/scudo/standalone/fuchsia.cpp4
-rw-r--r--compiler-rt/lib/scudo/standalone/internal_defs.h13
-rw-r--r--compiler-rt/lib/scudo/standalone/list.h236
-rw-r--r--compiler-rt/lib/scudo/standalone/platform.h6
-rw-r--r--compiler-rt/lib/scudo/standalone/primary32.h38
-rw-r--r--compiler-rt/lib/scudo/standalone/primary64.h40
-rw-r--r--compiler-rt/lib/scudo/standalone/quarantine.h23
-rw-r--r--compiler-rt/lib/scudo/standalone/release.h14
-rw-r--r--compiler-rt/lib/scudo/standalone/report.cpp2
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.cpp135
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h172
-rw-r--r--compiler-rt/lib/scudo/standalone/size_class_map.h7
-rw-r--r--compiler-rt/lib/scudo/standalone/stats.h35
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd.h9
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd_exclusive.h21
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd_shared.h25
-rw-r--r--compiler-rt/lib/scudo/standalone/vector.h3
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c.cpp19
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c.inc21
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp30
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c_checks.h10
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp3
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mman.cpp15
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp7
119 files changed, 2429 insertions, 1479 deletions
diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp
index c9e9f5a93d0d..65c51fbafdd0 100644
--- a/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/compiler-rt/lib/asan/asan_allocator.cpp
@@ -246,6 +246,7 @@ struct Allocator {
AllocatorCache fallback_allocator_cache;
QuarantineCache fallback_quarantine_cache;
+ uptr max_user_defined_malloc_size;
atomic_uint8_t rss_limit_exceeded;
// ------------------- Options --------------------------
@@ -280,6 +281,10 @@ struct Allocator {
SetAllocatorMayReturnNull(options.may_return_null);
allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
SharedInitCode(options);
+ max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
+ ? common_flags()->max_allocation_size_mb
+ << 20
+ : kMaxAllowedMallocSize;
}
bool RssLimitExceeded() {
@@ -394,6 +399,16 @@ struct Allocator {
return right_chunk;
}
+ bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
+ AsanChunk *m = GetAsanChunkByAddr(addr);
+ if (!m) return false;
+ if (m->chunk_state != CHUNK_ALLOCATED) return false;
+ if (m->Beg() != addr) return false;
+ atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack),
+ memory_order_relaxed);
+ return true;
+ }
+
// -------------------- Allocation/Deallocation routines ---------------
void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) {
@@ -435,14 +450,16 @@ struct Allocator {
using_primary_allocator = false;
}
CHECK(IsAligned(needed_size, min_alignment));
- if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
+ if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
+ size > max_user_defined_malloc_size) {
if (AllocatorMayReturnNull()) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
return nullptr;
}
- ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize,
- stack);
+ uptr malloc_limit =
+ Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
+ ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
}
AsanThread *t = GetCurrentThread();
@@ -1105,6 +1122,11 @@ void __sanitizer_purge_allocator() {
instance.Purge(&stack);
}
+int __asan_update_allocation_context(void* addr) {
+ GET_STACK_TRACE_MALLOC;
+ return instance.UpdateAllocationStack((uptr)addr, &stack);
+}
+
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
diff --git a/compiler-rt/lib/asan/asan_interface.inc b/compiler-rt/lib/asan/asan_interface.inc
index 7c341f22e15f..948010439827 100644
--- a/compiler-rt/lib/asan/asan_interface.inc
+++ b/compiler-rt/lib/asan/asan_interface.inc
@@ -164,6 +164,7 @@ INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
+INTERFACE_FUNCTION(__asan_update_allocation_context)
INTERFACE_WEAK_FUNCTION(__asan_default_options)
INTERFACE_WEAK_FUNCTION(__asan_default_suppressions)
INTERFACE_WEAK_FUNCTION(__asan_on_error)
diff --git a/compiler-rt/lib/asan/asan_interface_internal.h b/compiler-rt/lib/asan/asan_interface_internal.h
index c83aa11d741a..f14cbbcb76a3 100644
--- a/compiler-rt/lib/asan/asan_interface_internal.h
+++ b/compiler-rt/lib/asan/asan_interface_internal.h
@@ -251,6 +251,9 @@ extern "C" {
const char* __asan_default_suppressions();
SANITIZER_INTERFACE_ATTRIBUTE void __asan_handle_vfork(void *sp);
+
+ SANITIZER_INTERFACE_ATTRIBUTE int __asan_update_allocation_context(
+ void *addr);
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H
diff --git a/compiler-rt/lib/asan/asan_malloc_win.cpp b/compiler-rt/lib/asan/asan_malloc_win.cpp
index 13c6f652119b..4b76d4ebd3eb 100644
--- a/compiler-rt/lib/asan/asan_malloc_win.cpp
+++ b/compiler-rt/lib/asan/asan_malloc_win.cpp
@@ -35,11 +35,8 @@ constexpr unsigned long HEAP_REALLOC_IN_PLACE_ONLY = 0x00000010;
constexpr unsigned long HEAP_ALLOCATE_SUPPORTED_FLAGS = (HEAP_ZERO_MEMORY);
constexpr unsigned long HEAP_ALLOCATE_UNSUPPORTED_FLAGS =
(~HEAP_ALLOCATE_SUPPORTED_FLAGS);
-constexpr unsigned long HEAP_FREE_SUPPORTED_FLAGS = (0);
constexpr unsigned long HEAP_FREE_UNSUPPORTED_FLAGS =
(~HEAP_ALLOCATE_SUPPORTED_FLAGS);
-constexpr unsigned long HEAP_REALLOC_SUPPORTED_FLAGS =
- (HEAP_REALLOC_IN_PLACE_ONLY | HEAP_ZERO_MEMORY);
constexpr unsigned long HEAP_REALLOC_UNSUPPORTED_FLAGS =
(~HEAP_ALLOCATE_SUPPORTED_FLAGS);
diff --git a/compiler-rt/lib/builtins/clear_cache.c b/compiler-rt/lib/builtins/clear_cache.c
index 80d3b2f9f17d..e83e21254e85 100644
--- a/compiler-rt/lib/builtins/clear_cache.c
+++ b/compiler-rt/lib/builtins/clear_cache.c
@@ -93,24 +93,34 @@ void __clear_cache(void *start, void *end) {
#elif defined(__aarch64__) && !defined(__APPLE__)
uint64_t xstart = (uint64_t)(uintptr_t)start;
uint64_t xend = (uint64_t)(uintptr_t)end;
- uint64_t addr;
- // Get Cache Type Info
- uint64_t ctr_el0;
- __asm __volatile("mrs %0, ctr_el0" : "=r"(ctr_el0));
+ // Get Cache Type Info.
+ static uint64_t ctr_el0 = 0;
+ if (ctr_el0 == 0)
+ __asm __volatile("mrs %0, ctr_el0" : "=r"(ctr_el0));
- // dc & ic instructions must use 64bit registers so we don't use
+ // The DC and IC instructions must use 64-bit registers so we don't use
// uintptr_t in case this runs in an IPL32 environment.
- const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15);
- for (addr = xstart & ~(dcache_line_size - 1); addr < xend;
- addr += dcache_line_size)
- __asm __volatile("dc cvau, %0" ::"r"(addr));
+ uint64_t addr;
+
+ // If CTR_EL0.IDC is set, data cache cleaning to the point of unification
+ // is not required for instruction to data coherence.
+ if (((ctr_el0 >> 28) & 0x1) == 0x0) {
+ const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15);
+ for (addr = xstart & ~(dcache_line_size - 1); addr < xend;
+ addr += dcache_line_size)
+ __asm __volatile("dc cvau, %0" ::"r"(addr));
+ }
__asm __volatile("dsb ish");
- const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15);
- for (addr = xstart & ~(icache_line_size - 1); addr < xend;
- addr += icache_line_size)
- __asm __volatile("ic ivau, %0" ::"r"(addr));
+ // If CTR_EL0.DIC is set, instruction cache invalidation to the point of
+ // unification is not required for instruction to data coherence.
+ if (((ctr_el0 >> 29) & 0x1) == 0x0) {
+ const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15);
+ for (addr = xstart & ~(icache_line_size - 1); addr < xend;
+ addr += icache_line_size)
+ __asm __volatile("ic ivau, %0" ::"r"(addr));
+ }
__asm __volatile("isb sy");
#elif defined(__powerpc64__)
const size_t line_size = 32;
diff --git a/compiler-rt/lib/builtins/cpu_model.c b/compiler-rt/lib/builtins/cpu_model.c
index cdeb03794ecc..fb619037d398 100644
--- a/compiler-rt/lib/builtins/cpu_model.c
+++ b/compiler-rt/lib/builtins/cpu_model.c
@@ -471,9 +471,9 @@ static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
break; // "btver2"
case 23:
*Type = AMDFAM17H;
- if (Model >= 0x30 && Model <= 0x3f) {
+ if ((Model >= 0x30 && Model <= 0x3f) || Model == 0x71) {
*Subtype = AMDFAM17H_ZNVER2;
- break; // "znver2"; 30h-3fh: Zen2
+ break; // "znver2"; 30h-3fh, 71h: Zen2
}
if (Model <= 0x0f) {
*Subtype = AMDFAM17H_ZNVER1;
@@ -532,7 +532,15 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
const unsigned AVXBits = (1 << 27) | (1 << 28);
bool HasAVX = ((ECX & AVXBits) == AVXBits) && !getX86XCR0(&EAX, &EDX) &&
((EAX & 0x6) == 0x6);
+#if defined(__APPLE__)
+ // Darwin lazily saves the AVX512 context on first use: trust that the OS will
+ // save the AVX512 context if we use AVX512 instructions, even the bit is not
+ // set right now.
+ bool HasAVX512Save = true;
+#else
+ // AVX512 requires additional context to be saved by the OS.
bool HasAVX512Save = HasAVX && ((EAX & 0xe0) == 0xe0);
+#endif
if (HasAVX)
setFeature(FEATURE_AVX);
diff --git a/compiler-rt/lib/builtins/i386/fp_mode.c b/compiler-rt/lib/builtins/i386/fp_mode.c
new file mode 100644
index 000000000000..62ab771222c0
--- /dev/null
+++ b/compiler-rt/lib/builtins/i386/fp_mode.c
@@ -0,0 +1,39 @@
+//===----- lib/i386/fp_mode.c - Floaing-point mode utilities -----*- C -*-====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../fp_mode.h"
+
+#define X87_TONEAREST 0x0000
+#define X87_DOWNWARD 0x0400
+#define X87_UPWARD 0x0800
+#define X87_TOWARDZERO 0x0c00
+#define X87_RMODE_MASK (X87_TONEAREST | X87_UPWARD | X87_DOWNWARD | X87_TOWARDZERO)
+
+FE_ROUND_MODE __fe_getround() {
+ // Assume that the rounding mode state for the fpu agrees with the SSE unit.
+ unsigned short cw;
+ __asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
+
+ switch (cw & X87_RMODE_MASK) {
+ case X87_TONEAREST:
+ return FE_TONEAREST;
+ case X87_DOWNWARD:
+ return FE_DOWNWARD;
+ case X87_UPWARD:
+ return FE_UPWARD;
+ case X87_TOWARDZERO:
+ return FE_TOWARDZERO;
+ }
+ return FE_TONEAREST;
+}
+
+int __fe_raise_inexact() {
+ float f = 1.0f, g = 3.0f;
+ __asm__ __volatile__ ("fdivs %1" : "+t" (f) : "m" (g));
+ return 0;
+}
diff --git a/compiler-rt/lib/builtins/ppc/fixtfti.c b/compiler-rt/lib/builtins/ppc/fixtfti.c
new file mode 100644
index 000000000000..4180e7494d3b
--- /dev/null
+++ b/compiler-rt/lib/builtins/ppc/fixtfti.c
@@ -0,0 +1,38 @@
+//===--- lib/builtins/ppc/fixtfti.c - Convert long double->int128 *-C -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements converting the 128bit IBM/PowerPC long double (double-
+// double) data type to a signed 128 bit integer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../int_math.h"
+
+// Convert long double into a signed 128-bit integer.
+__int128_t __fixtfti(long double input) {
+
+ // If we are trying to convert a NaN, return the NaN bit pattern.
+ if (crt_isnan(input)) {
+ return ((__uint128_t)0x7FF8000000000000ll) << 64 |
+ (__uint128_t)0x0000000000000000ll;
+ }
+
+ // Note: overflow is an undefined behavior for this conversion.
+ // For this reason, overflow is not checked here.
+
+ // If the long double is negative, use unsigned conversion from its absolute
+ // value.
+ if (input < 0.0) {
+ __uint128_t result = (__uint128_t)(-input);
+ return -((__int128_t)result);
+ }
+
+ // Otherwise, use unsigned conversion from the input value.
+ __uint128_t result = (__uint128_t)input;
+ return result;
+}
diff --git a/compiler-rt/lib/builtins/ppc/fixunstfti.c b/compiler-rt/lib/builtins/ppc/fixunstfti.c
index 1d19e01e3a91..2469585369c1 100644
--- a/compiler-rt/lib/builtins/ppc/fixunstfti.c
+++ b/compiler-rt/lib/builtins/ppc/fixunstfti.c
@@ -34,9 +34,9 @@ __uint128_t __fixunstfti(long double input) {
} ldUnion;
// If the long double is less than 1.0 or negative,
- // return 0.0.
+ // return 0.
if (input < 1.0)
- return 0.0;
+ return 0;
// Retrieve the 64-bit patterns of high and low doubles.
// Compute the unbiased exponent of both high and low doubles by
@@ -99,6 +99,16 @@ __uint128_t __fixunstfti(long double input) {
loResult <<= shift;
}
+ // If the low double is negative, it may change the integer value of the
+ // whole number if the absolute value of its fractional part is bigger than
+ // the fractional part of the high double. Because both doubles cannot
+ // overlap, this situation only occurs when the high double has no
+ // fractional part.
+ ldUnion.ld = input;
+ if ((ldUnion.d[0] == (double)hiResult) &&
+ (ldUnion.d[1] < (double)((__int128_t)loResult)))
+ loResult--;
+
// Add the high and low doublewords together to form a 128 bit integer.
result = loResult + hiResult;
return result;
diff --git a/compiler-rt/lib/crt/crtbegin.c b/compiler-rt/lib/crt/crtbegin.c
index 2450ce54e31b..24bea1a2c3a7 100644
--- a/compiler-rt/lib/crt/crtbegin.c
+++ b/compiler-rt/lib/crt/crtbegin.c
@@ -10,11 +10,13 @@
__attribute__((visibility("hidden"))) void *__dso_handle = &__dso_handle;
+#ifdef EH_USE_FRAME_REGISTRY
__extension__ static void *__EH_FRAME_LIST__[]
__attribute__((section(".eh_frame"), aligned(sizeof(void *)))) = {};
extern void __register_frame_info(const void *, void *) __attribute__((weak));
extern void *__deregister_frame_info(const void *) __attribute__((weak));
+#endif
#ifndef CRT_HAS_INITFINI_ARRAY
typedef void (*fp)(void);
@@ -32,10 +34,11 @@ static void __attribute__((used)) __do_init() {
return;
__initialized = 1;
+#ifdef EH_USE_FRAME_REGISTRY
static struct { void *p[8]; } __object;
if (__register_frame_info)
__register_frame_info(__EH_FRAME_LIST__, &__object);
-
+#endif
#ifndef CRT_HAS_INITFINI_ARRAY
const size_t n = __CTOR_LIST_END__ - __CTOR_LIST__ - 1;
for (size_t i = n; i >= 1; i--) __CTOR_LIST__[i]();
@@ -45,17 +48,26 @@ static void __attribute__((used)) __do_init() {
#ifdef CRT_HAS_INITFINI_ARRAY
__attribute__((section(".init_array"),
used)) static void (*__init)(void) = __do_init;
-#else // CRT_HAS_INITFINI_ARRAY
-#if defined(__i386__) || defined(__x86_64__)
+#elif defined(__i386__) || defined(__x86_64__)
__asm__(".pushsection .init,\"ax\",@progbits\n\t"
"call " __USER_LABEL_PREFIX__ "__do_init\n\t"
".popsection");
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
__asm__(".pushsection .init,\"ax\",%progbits\n\t"
"bl " __USER_LABEL_PREFIX__ "__do_init\n\t"
".popsection");
-#endif // CRT_HAS_INITFINI_ARRAY
-#endif
+#elif defined(__powerpc__) || defined(__powerpc64__)
+__asm__(".pushsection .init,\"ax\",@progbits\n\t"
+ "bl " __USER_LABEL_PREFIX__ "__do_init\n\t"
+ "nop\n\t"
+ ".popsection");
+#elif defined(__sparc__)
+__asm__(".pushsection .init,\"ax\",@progbits\n\t"
+ "call " __USER_LABEL_PREFIX__ "__do_init\n\t"
+ ".popsection");
+#else
+#error "crtbegin without .init_fini array unimplemented for this architecture"
+#endif // CRT_HAS_INITFINI_ARRAY
#ifndef CRT_HAS_INITFINI_ARRAY
static fp __DTOR_LIST__[]
@@ -73,25 +85,35 @@ static void __attribute__((used)) __do_fini() {
__cxa_finalize(__dso_handle);
#ifndef CRT_HAS_INITFINI_ARRAY
- if (__deregister_frame_info)
- __deregister_frame_info(__EH_FRAME_LIST__);
-
const size_t n = __DTOR_LIST_END__ - __DTOR_LIST__ - 1;
for (size_t i = 1; i <= n; i++) __DTOR_LIST__[i]();
#endif
+#ifdef EH_USE_FRAME_REGISTRY
+ if (__deregister_frame_info)
+ __deregister_frame_info(__EH_FRAME_LIST__);
+#endif
}
#ifdef CRT_HAS_INITFINI_ARRAY
__attribute__((section(".fini_array"),
used)) static void (*__fini)(void) = __do_fini;
-#else // CRT_HAS_INITFINI_ARRAY
-#if defined(__i386__) || defined(__x86_64__)
+#elif defined(__i386__) || defined(__x86_64__)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
"call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
".popsection");
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
__asm__(".pushsection .fini,\"ax\",%progbits\n\t"
"bl " __USER_LABEL_PREFIX__ "__do_fini\n\t"
".popsection");
-#endif
+#elif defined(__powerpc__) || defined(__powerpc64__)
+__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
+ "bl " __USER_LABEL_PREFIX__ "__do_fini\n\t"
+ "nop\n\t"
+ ".popsection");
+#elif defined(__sparc__)
+__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
+ "call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
+ ".popsection");
+#else
+#error "crtbegin without .init_fini array unimplemented for this architecture"
#endif // CRT_HAS_INIT_FINI_ARRAY
diff --git a/compiler-rt/lib/fuzzer/FuzzerDefs.h b/compiler-rt/lib/fuzzer/FuzzerDefs.h
index 5dc2d8e1ac09..5793e86aa804 100644
--- a/compiler-rt/lib/fuzzer/FuzzerDefs.h
+++ b/compiler-rt/lib/fuzzer/FuzzerDefs.h
@@ -30,6 +30,7 @@
#define LIBFUZZER_FREEBSD 0
#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
+#define LIBFUZZER_EMSCRIPTEN 0
#elif __APPLE__
#define LIBFUZZER_APPLE 1
#define LIBFUZZER_FUCHSIA 0
@@ -38,6 +39,7 @@
#define LIBFUZZER_FREEBSD 0
#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
+#define LIBFUZZER_EMSCRIPTEN 0
#elif __NetBSD__
#define LIBFUZZER_APPLE 0
#define LIBFUZZER_FUCHSIA 0
@@ -46,6 +48,7 @@
#define LIBFUZZER_FREEBSD 0
#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
+#define LIBFUZZER_EMSCRIPTEN 0
#elif __FreeBSD__
#define LIBFUZZER_APPLE 0
#define LIBFUZZER_FUCHSIA 0
@@ -54,6 +57,7 @@
#define LIBFUZZER_FREEBSD 1
#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
+#define LIBFUZZER_EMSCRIPTEN 0
#elif __OpenBSD__
#define LIBFUZZER_APPLE 0
#define LIBFUZZER_FUCHSIA 0
@@ -62,6 +66,7 @@
#define LIBFUZZER_FREEBSD 0
#define LIBFUZZER_OPENBSD 1
#define LIBFUZZER_WINDOWS 0
+#define LIBFUZZER_EMSCRIPTEN 0
#elif _WIN32
#define LIBFUZZER_APPLE 0
#define LIBFUZZER_FUCHSIA 0
@@ -70,6 +75,7 @@
#define LIBFUZZER_FREEBSD 0
#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 1
+#define LIBFUZZER_EMSCRIPTEN 0
#elif __Fuchsia__
#define LIBFUZZER_APPLE 0
#define LIBFUZZER_FUCHSIA 1
@@ -78,6 +84,16 @@
#define LIBFUZZER_FREEBSD 0
#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
+#define LIBFUZZER_EMSCRIPTEN 0
+#elif __EMSCRIPTEN__
+#define LIBFUZZER_APPLE 0
+#define LIBFUZZER_FUCHSIA 0
+#define LIBFUZZER_LINUX 0
+#define LIBFUZZER_NETBSD 0
+#define LIBFUZZER_FREEBSD 0
+#define LIBFUZZER_OPENBSD 0
+#define LIBFUZZER_WINDOWS 0
+#define LIBFUZZER_EMSCRIPTEN 1
#else
#error "Support for your platform has not been implemented"
#endif
@@ -95,7 +111,7 @@
#define LIBFUZZER_POSIX \
(LIBFUZZER_APPLE || LIBFUZZER_LINUX || LIBFUZZER_NETBSD || \
- LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD)
+ LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN)
#ifdef __x86_64
# if __has_attribute(target)
diff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
index 44c90655b932..dd3cab0ee8d2 100644
--- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
@@ -280,7 +280,8 @@ static void RssThread(Fuzzer *F, size_t RssLimitMb) {
}
static void StartRssThread(Fuzzer *F, size_t RssLimitMb) {
- if (!RssLimitMb) return;
+ if (!RssLimitMb)
+ return;
std::thread T(RssThread, F, RssLimitMb);
T.detach();
}
@@ -737,7 +738,11 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
if (U.size() <= Word::GetMaxSize())
MD->AddWordToManualDictionary(Word(U.data(), U.size()));
+ // Threads are only supported by Chrome. Don't use them with emscripten
+ // for now.
+#if !LIBFUZZER_EMSCRIPTEN
StartRssThread(F, Flags.rss_limit_mb);
+#endif // LIBFUZZER_EMSCRIPTEN
Options.HandleAbrt = Flags.handle_abrt;
Options.HandleBus = Flags.handle_bus;
diff --git a/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp b/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp
index ea5b87bd5196..d56dab36c646 100644
--- a/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp
@@ -13,7 +13,7 @@
//===----------------------------------------------------------------------===//
#include "FuzzerDefs.h"
#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FUCHSIA || \
- LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD
+ LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN
#include "FuzzerExtFunctions.h"
#include "FuzzerIO.h"
diff --git a/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp b/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp
index 3f38f4fb70c1..b2face778203 100644
--- a/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp
@@ -11,7 +11,7 @@
#include "FuzzerDefs.h"
#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \
- LIBFUZZER_OPENBSD
+ LIBFUZZER_OPENBSD || LIBFUZZER_FUCHSIA || LIBFUZZER_EMSCRIPTEN
__attribute__((weak)) extern uint8_t __start___libfuzzer_extra_counters;
__attribute__((weak)) extern uint8_t __stop___libfuzzer_extra_counters;
diff --git a/compiler-rt/lib/fuzzer/FuzzerFlags.def b/compiler-rt/lib/fuzzer/FuzzerFlags.def
index 0e19a9cde6ca..a67415743032 100644
--- a/compiler-rt/lib/fuzzer/FuzzerFlags.def
+++ b/compiler-rt/lib/fuzzer/FuzzerFlags.def
@@ -56,7 +56,8 @@ FUZZER_FLAG_STRING(merge_control_file,
"Specify a control file used for the merge process. "
"If a merge process gets killed it tries to leave this file "
"in a state suitable for resuming the merge. "
- "By default a temporary file will be used.")
+ "By default a temporary file will be used."
+ "The same file can be used for multistep merge process.")
FUZZER_FLAG_INT(minimize_crash, 0, "If 1, minimizes the provided"
" crash input. Use with -runs=N or -max_total_time=N to limit "
"the number attempts."
diff --git a/compiler-rt/lib/fuzzer/FuzzerIO.cpp b/compiler-rt/lib/fuzzer/FuzzerIO.cpp
index 7e5ba30a2e7d..f0708164be87 100644
--- a/compiler-rt/lib/fuzzer/FuzzerIO.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerIO.cpp
@@ -111,7 +111,7 @@ std::string DirPlusFile(const std::string &DirPath,
void DupAndCloseStderr() {
int OutputFd = DuplicateFile(2);
- if (OutputFd > 0) {
+ if (OutputFd >= 0) {
FILE *NewOutputFile = OpenFile(OutputFd, "w");
if (NewOutputFile) {
OutputFile = NewOutputFile;
diff --git a/compiler-rt/lib/fuzzer/FuzzerIO.h b/compiler-rt/lib/fuzzer/FuzzerIO.h
index fe0d7b451758..ae8dd24e373c 100644
--- a/compiler-rt/lib/fuzzer/FuzzerIO.h
+++ b/compiler-rt/lib/fuzzer/FuzzerIO.h
@@ -94,8 +94,6 @@ int DuplicateFile(int Fd);
void RemoveFile(const std::string &Path);
void RenameFile(const std::string &OldPath, const std::string &NewPath);
-void DiscardOutput(int Fd);
-
intptr_t GetHandleFromFd(int fd);
void MkDir(const std::string &Path);
diff --git a/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp b/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp
index cfd69bbc8111..fcd9b8d8b9c7 100644
--- a/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp
@@ -124,14 +124,6 @@ void RenameFile(const std::string &OldPath, const std::string &NewPath) {
rename(OldPath.c_str(), NewPath.c_str());
}
-void DiscardOutput(int Fd) {
- FILE* Temp = fopen("/dev/null", "w");
- if (!Temp)
- return;
- dup2(fileno(Temp), Fd);
- fclose(Temp);
-}
-
intptr_t GetHandleFromFd(int fd) {
return static_cast<intptr_t>(fd);
}
diff --git a/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp b/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp
index 510afebef738..56757aa09a37 100644
--- a/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp
@@ -223,14 +223,6 @@ void RenameFile(const std::string &OldPath, const std::string &NewPath) {
rename(OldPath.c_str(), NewPath.c_str());
}
-void DiscardOutput(int Fd) {
- FILE* Temp = fopen("nul", "w");
- if (!Temp)
- return;
- _dup2(_fileno(Temp), Fd);
- fclose(Temp);
-}
-
intptr_t GetHandleFromFd(int fd) {
return _get_osfhandle(fd);
}
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtil.h b/compiler-rt/lib/fuzzer/FuzzerUtil.h
index 85c5571d684f..00ea6550646f 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtil.h
+++ b/compiler-rt/lib/fuzzer/FuzzerUtil.h
@@ -79,6 +79,8 @@ inline std::pair<std::string, std::string> SplitBefore(std::string X,
return std::make_pair(S.substr(0, Pos), S.substr(Pos));
}
+void DiscardOutput(int Fd);
+
std::string DisassembleCmd(const std::string &FileName);
std::string SearchRegexCmd(const std::string &Regex);
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp
index 171db23570c4..d449bc248f09 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp
@@ -17,6 +17,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/wait.h>
+#include <unistd.h>
// There is no header for this on macOS so declare here
extern "C" char **environ;
@@ -156,6 +157,14 @@ int ExecuteCommand(const Command &Cmd) {
return ProcessStatus;
}
+void DiscardOutput(int Fd) {
+ FILE* Temp = fopen("/dev/null", "w");
+ if (!Temp)
+ return;
+ dup2(fileno(Temp), Fd);
+ fclose(Temp);
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_APPLE
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
index 79fd950bbf97..bde9f68d62aa 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
@@ -18,6 +18,7 @@
#include <cinttypes>
#include <cstdint>
#include <fcntl.h>
+#include <lib/fdio/fdio.h>
#include <lib/fdio/spawn.h>
#include <string>
#include <sys/select.h>
@@ -76,6 +77,23 @@ void InterruptHandler() {
Fuzzer::StaticInterruptCallback();
}
+// CFAOffset is used to reference the stack pointer before entering the
+// trampoline (Stack Pointer + CFAOffset = prev Stack Pointer). Before jumping
+// to the trampoline we copy all the registers onto the stack. We need to make
+// sure that the new stack has enough space to store all the registers.
+//
+// The trampoline holds CFI information regarding the registers stored in the
+// stack, which is then used by the unwinder to restore them.
+#if defined(__x86_64__)
+// In x86_64 the crashing function might also be using the red zone (128 bytes
+// on top of their rsp).
+constexpr size_t CFAOffset = 128 + sizeof(zx_thread_state_general_regs_t);
+#elif defined(__aarch64__)
+// In aarch64 we need to always have the stack pointer aligned to 16 bytes, so we
+// make sure that we are keeping that same alignment.
+constexpr size_t CFAOffset = (sizeof(zx_thread_state_general_regs_t) + 15) & -(uintptr_t)16;
+#endif
+
// For the crash handler, we need to call Fuzzer::StaticCrashSignalCallback
// without POSIX signal handlers. To achieve this, we use an assembly function
// to add the necessary CFI unwinding information and a C function to bridge
@@ -140,7 +158,6 @@ void InterruptHandler() {
OP_NUM(27) \
OP_NUM(28) \
OP_NUM(29) \
- OP_NUM(30) \
OP_REG(sp)
#else
@@ -148,14 +165,17 @@ void InterruptHandler() {
#endif
// Produces a CFI directive for the named or numbered register.
+// The value used refers to an assembler immediate operand with the same name
+// as the register (see ASM_OPERAND_REG).
#define CFI_OFFSET_REG(reg) ".cfi_offset " #reg ", %c[" #reg "]\n"
-#define CFI_OFFSET_NUM(num) CFI_OFFSET_REG(r##num)
+#define CFI_OFFSET_NUM(num) CFI_OFFSET_REG(x##num)
-// Produces an assembler input operand for the named or numbered register.
+// Produces an assembler immediate operand for the named or numbered register.
+// This operand contains the offset of the register relative to the CFA.
#define ASM_OPERAND_REG(reg) \
- [reg] "i"(offsetof(zx_thread_state_general_regs_t, reg)),
+ [reg] "i"(offsetof(zx_thread_state_general_regs_t, reg) - CFAOffset),
#define ASM_OPERAND_NUM(num) \
- [r##num] "i"(offsetof(zx_thread_state_general_regs_t, r[num])),
+ [x##num] "i"(offsetof(zx_thread_state_general_regs_t, r[num]) - CFAOffset),
// Trampoline to bridge from the assembly below to the static C++ crash
// callback.
@@ -168,7 +188,16 @@ static void StaticCrashHandler() {
}
// Creates the trampoline with the necessary CFI information to unwind through
-// to the crashing call stack. The attribute is necessary because the function
+// to the crashing call stack:
+// * Defining the CFA so that it points to the stack pointer at the point
+// of crash.
+// * Storing all registers at the point of crash in the stack and refer to them
+// via CFI information (relative to the CFA).
+// * Setting the return column so the unwinder knows how to continue unwinding.
+// * (x86_64) making sure rsp is aligned before calling StaticCrashHandler.
+// * Calling StaticCrashHandler that will trigger the unwinder.
+//
+// The __attribute__((used)) is necessary because the function
// is never called; it's just a container around the assembly to allow it to
// use operands for compile-time computed constants.
__attribute__((used))
@@ -181,16 +210,21 @@ void MakeTrampoline() {
".cfi_signal_frame\n"
#if defined(__x86_64__)
".cfi_return_column rip\n"
- ".cfi_def_cfa rsp, 0\n"
+ ".cfi_def_cfa rsp, %c[CFAOffset]\n"
FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
+ "mov %%rsp, %%rbp\n"
+ ".cfi_def_cfa_register rbp\n"
+ "andq $-16, %%rsp\n"
"call %c[StaticCrashHandler]\n"
"ud2\n"
#elif defined(__aarch64__)
".cfi_return_column 33\n"
- ".cfi_def_cfa sp, 0\n"
- ".cfi_offset 33, %c[pc]\n"
+ ".cfi_def_cfa sp, %c[CFAOffset]\n"
FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
- "bl %[StaticCrashHandler]\n"
+ ".cfi_offset 33, %c[pc]\n"
+ ".cfi_offset 30, %c[lr]\n"
+ "bl %c[StaticCrashHandler]\n"
+ "brk 1\n"
#else
#error "Unsupported architecture for fuzzing on Fuchsia"
#endif
@@ -202,8 +236,10 @@ void MakeTrampoline() {
: FOREACH_REGISTER(ASM_OPERAND_REG, ASM_OPERAND_NUM)
#if defined(__aarch64__)
ASM_OPERAND_REG(pc)
+ ASM_OPERAND_REG(lr)
#endif
- [StaticCrashHandler] "i" (StaticCrashHandler));
+ [StaticCrashHandler] "i" (StaticCrashHandler),
+ [CFAOffset] "i" (CFAOffset));
}
void CrashHandler(zx_handle_t *Event) {
@@ -269,17 +305,14 @@ void CrashHandler(zx_handle_t *Event) {
// onto the stack and jump into a trampoline with CFI instructions on how
// to restore it.
#if defined(__x86_64__)
- uintptr_t StackPtr =
- (GeneralRegisters.rsp - (128 + sizeof(GeneralRegisters))) &
- -(uintptr_t)16;
+ uintptr_t StackPtr = GeneralRegisters.rsp - CFAOffset;
__unsanitized_memcpy(reinterpret_cast<void *>(StackPtr), &GeneralRegisters,
sizeof(GeneralRegisters));
GeneralRegisters.rsp = StackPtr;
GeneralRegisters.rip = reinterpret_cast<zx_vaddr_t>(CrashTrampolineAsm);
#elif defined(__aarch64__)
- uintptr_t StackPtr =
- (GeneralRegisters.sp - sizeof(GeneralRegisters)) & -(uintptr_t)16;
+ uintptr_t StackPtr = GeneralRegisters.sp - CFAOffset;
__unsanitized_memcpy(reinterpret_cast<void *>(StackPtr), &GeneralRegisters,
sizeof(GeneralRegisters));
GeneralRegisters.sp = StackPtr;
@@ -497,6 +530,18 @@ const void *SearchMemory(const void *Data, size_t DataLen, const void *Patt,
return memmem(Data, DataLen, Patt, PattLen);
}
+// In fuchsia, accessing /dev/null is not supported. There's nothing
+// similar to a file that discards everything that is written to it.
+// The way of doing something similar in fuchsia is by using
+// fdio_null_create and binding that to a file descriptor.
+void DiscardOutput(int Fd) {
+ fdio_t *fdio_null = fdio_null_create();
+ if (fdio_null == nullptr) return;
+ int nullfd = fdio_bind_to_fd(fdio_null, -1, 0);
+ if (nullfd < 0) return;
+ dup2(nullfd, Fd);
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_FUCHSIA
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
index d5a15d19f2a9..993023e70393 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
@@ -9,12 +9,13 @@
//===----------------------------------------------------------------------===//
#include "FuzzerDefs.h"
#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \
- LIBFUZZER_OPENBSD
+ LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN
#include "FuzzerCommand.h"
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <unistd.h>
namespace fuzzer {
@@ -27,6 +28,14 @@ int ExecuteCommand(const Command &Cmd) {
return exit_code;
}
+void DiscardOutput(int Fd) {
+ FILE* Temp = fopen("/dev/null", "w");
+ if (!Temp)
+ return;
+ dup2(fileno(Temp), Fd);
+ fclose(Temp);
+}
+
} // namespace fuzzer
#endif
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
index cefe7ae181e7..8048e6a8afd2 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
@@ -98,7 +98,8 @@ void SetTimer(int Seconds) {
}
void SetSignalHandler(const FuzzingOptions& Options) {
- if (Options.UnitTimeoutSec > 0)
+ // setitimer is not implemented in emscripten.
+ if (Options.UnitTimeoutSec > 0 && !LIBFUZZER_EMSCRIPTEN)
SetTimer(Options.UnitTimeoutSec / 2 + 1);
if (Options.HandleInt)
SetSigaction(SIGINT, InterruptHandler);
@@ -133,7 +134,7 @@ size_t GetPeakRSSMb() {
if (getrusage(RUSAGE_SELF, &usage))
return 0;
if (LIBFUZZER_LINUX || LIBFUZZER_FREEBSD || LIBFUZZER_NETBSD ||
- LIBFUZZER_OPENBSD) {
+ LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN) {
// ru_maxrss is in KiB
return usage.ru_maxrss >> 10;
} else if (LIBFUZZER_APPLE) {
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
index ed90044c3f83..527e7dbd1cf6 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
@@ -16,6 +16,7 @@
#include <chrono>
#include <cstring>
#include <errno.h>
+#include <io.h>
#include <iomanip>
#include <signal.h>
#include <stdio.h>
@@ -190,6 +191,14 @@ std::string SearchRegexCmd(const std::string &Regex) {
return "findstr /r \"" + Regex + "\"";
}
+void DiscardOutput(int Fd) {
+ FILE* Temp = fopen("nul", "w");
+ if (!Temp)
+ return;
+ _dup2(_fileno(Temp), Fd);
+ fclose(Temp);
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_WINDOWS
diff --git a/compiler-rt/lib/gwp_asan/definitions.h b/compiler-rt/lib/gwp_asan/definitions.h
index 1190adbd4f4f..bebe56c55a26 100644
--- a/compiler-rt/lib/gwp_asan/definitions.h
+++ b/compiler-rt/lib/gwp_asan/definitions.h
@@ -9,21 +9,9 @@
#ifndef GWP_ASAN_DEFINITIONS_H_
#define GWP_ASAN_DEFINITIONS_H_
-#define TLS_INITIAL_EXEC __thread __attribute__((tls_model("initial-exec")))
+#define GWP_ASAN_TLS_INITIAL_EXEC __thread __attribute__((tls_model("initial-exec")))
-#ifdef LIKELY
-# undef LIKELY
-#endif // defined(LIKELY)
-#define LIKELY(X) __builtin_expect(!!(X), 1)
-
-#ifdef UNLIKELY
-# undef UNLIKELY
-#endif // defined(UNLIKELY)
-#define UNLIKELY(X) __builtin_expect(!!(X), 0)
-
-#ifdef ALWAYS_INLINE
-# undef ALWAYS_INLINE
-#endif // defined(ALWAYS_INLINE)
-#define ALWAYS_INLINE inline __attribute__((always_inline))
+#define GWP_ASAN_UNLIKELY(X) __builtin_expect(!!(X), 0)
+#define GWP_ASAN_ALWAYS_INLINE inline __attribute__((always_inline))
#endif // GWP_ASAN_DEFINITIONS_H_
diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
index ef497336025f..df454772a231 100644
--- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
+++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
@@ -110,13 +110,6 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
Opts.MaxSimultaneousAllocations == 0)
return;
- // TODO(hctim): Add a death unit test for this.
- if (SingletonPtr) {
- (*SingletonPtr->Printf)(
- "GWP-ASan Error: init() has already been called.\n");
- exit(EXIT_FAILURE);
- }
-
if (Opts.SampleRate < 0) {
Opts.Printf("GWP-ASan Error: SampleRate is < 0.\n");
exit(EXIT_FAILURE);
@@ -527,7 +520,7 @@ void GuardedPoolAllocator::reportErrorInternal(uintptr_t AccessPtr, Error E) {
printAllocDeallocTraces(AccessPtr, Meta, Printf, PrintBacktrace);
}
-TLS_INITIAL_EXEC
+GWP_ASAN_TLS_INITIAL_EXEC
GuardedPoolAllocator::ThreadLocalPackedVariables
GuardedPoolAllocator::ThreadLocals;
} // namespace gwp_asan
diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
index 57ad61e9cf4f..7e6e13769d32 100644
--- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
+++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
@@ -100,19 +100,19 @@ public:
void init(const options::Options &Opts);
// Return whether the allocation should be randomly chosen for sampling.
- ALWAYS_INLINE bool shouldSample() {
+ GWP_ASAN_ALWAYS_INLINE bool shouldSample() {
// NextSampleCounter == 0 means we "should regenerate the counter".
// == 1 means we "should sample this allocation".
- if (UNLIKELY(ThreadLocals.NextSampleCounter == 0))
+ if (GWP_ASAN_UNLIKELY(ThreadLocals.NextSampleCounter == 0))
ThreadLocals.NextSampleCounter =
(getRandomUnsigned32() % AdjustedSampleRate) + 1;
- return UNLIKELY(--ThreadLocals.NextSampleCounter == 0);
+ return GWP_ASAN_UNLIKELY(--ThreadLocals.NextSampleCounter == 0);
}
// Returns whether the provided pointer is a current sampled allocation that
// is owned by this pool.
- ALWAYS_INLINE bool pointerIsMine(const void *Ptr) const {
+ GWP_ASAN_ALWAYS_INLINE bool pointerIsMine(const void *Ptr) const {
uintptr_t P = reinterpret_cast<uintptr_t>(Ptr);
return GuardedPagePool <= P && P < GuardedPagePoolEnd;
}
@@ -267,7 +267,7 @@ private:
// allocation.
bool RecursiveGuard = false;
};
- static TLS_INITIAL_EXEC ThreadLocalPackedVariables ThreadLocals;
+ static GWP_ASAN_TLS_INITIAL_EXEC ThreadLocalPackedVariables ThreadLocals;
};
} // namespace gwp_asan
diff --git a/compiler-rt/lib/hwasan/hwasan.h b/compiler-rt/lib/hwasan/hwasan.h
index 9e0ced93b55d..64cdcf30f5c7 100644
--- a/compiler-rt/lib/hwasan/hwasan.h
+++ b/compiler-rt/lib/hwasan/hwasan.h
@@ -172,4 +172,24 @@ void AndroidTestTlsSlot();
RunFreeHooks(ptr); \
} while (false)
+#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
+// For both bionic and glibc __sigset_t is an unsigned long.
+typedef unsigned long __hw_sigset_t;
+// Setjmp and longjmp implementations are platform specific, and hence the
+// interception code is platform specific too. As yet we've only implemented
+// the interception for AArch64.
+typedef unsigned long long __hw_register_buf[22];
+struct __hw_jmp_buf_struct {
+ // NOTE: The machine-dependent definition of `__sigsetjmp'
+ // assume that a `__hw_jmp_buf' begins with a `__hw_register_buf' and that
+ // `__mask_was_saved' follows it. Do not move these members or add others
+ // before it.
+ __hw_register_buf __jmpbuf; // Calling environment.
+ int __mask_was_saved; // Saved the signal mask?
+ __hw_sigset_t __saved_mask; // Saved signal mask.
+};
+typedef struct __hw_jmp_buf_struct __hw_jmp_buf[1];
+typedef struct __hw_jmp_buf_struct __hw_sigjmp_buf[1];
+#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
+
#endif // HWASAN_H
diff --git a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
index 95e2e865717d..44e569ee6d72 100644
--- a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
@@ -202,23 +202,107 @@ INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
INTERCEPTOR_ALIAS(void, malloc_stats, void);
#endif
-#endif // HWASAN_WITH_INTERCEPTORS
+struct ThreadStartArg {
+ thread_callback_t callback;
+ void *param;
+};
+
+static void *HwasanThreadStartFunc(void *arg) {
+ __hwasan_thread_enter();
+ ThreadStartArg A = *reinterpret_cast<ThreadStartArg*>(arg);
+ UnmapOrDie(arg, GetPageSizeCached());
+ return A.callback(A.param);
+}
-#if HWASAN_WITH_INTERCEPTORS && !defined(__aarch64__)
-INTERCEPTOR(int, pthread_create, void *th, void *attr,
- void *(*callback)(void *), void *param) {
+INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
+ void * param) {
ScopedTaggingDisabler disabler;
+ ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
+ GetPageSizeCached(), "pthread_create"));
+ *A = {callback, param};
int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr),
- callback, param);
+ &HwasanThreadStartFunc, A);
return res;
}
-#endif
-#if HWASAN_WITH_INTERCEPTORS
DEFINE_REAL(int, vfork)
DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
-#endif
+#endif // HWASAN_WITH_INTERCEPTORS
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
+// Get and/or change the set of blocked signals.
+extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
+ __hw_sigset_t *__restrict __oset);
+#define SIG_BLOCK 0
+#define SIG_SETMASK 2
+extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
+ env[0].__mask_was_saved =
+ (savemask && sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0,
+ &env[0].__saved_mask) == 0);
+ return 0;
+}
+
+static void __attribute__((always_inline))
+InternalLongjmp(__hw_register_buf env, int retval) {
+ // Clear all memory tags on the stack between here and where we're going.
+ unsigned long long stack_pointer = env[13];
+ // The stack pointer should never be tagged, so we don't need to clear the
+ // tag for this function call.
+ __hwasan_handle_longjmp((void *)stack_pointer);
+
+ // Run code for handling a longjmp.
+ // Need to use a register that isn't going to be loaded from the environment
+ // buffer -- hence why we need to specify the register to use.
+ // Must implement this ourselves, since we don't know the order of registers
+ // in different libc implementations and many implementations mangle the
+ // stack pointer so we can't use it without knowing the demangling scheme.
+ register long int retval_tmp asm("x1") = retval;
+ register void *env_address asm("x0") = &env[0];
+ asm volatile("ldp x19, x20, [%0, #0<<3];"
+ "ldp x21, x22, [%0, #2<<3];"
+ "ldp x23, x24, [%0, #4<<3];"
+ "ldp x25, x26, [%0, #6<<3];"
+ "ldp x27, x28, [%0, #8<<3];"
+ "ldp x29, x30, [%0, #10<<3];"
+ "ldp d8, d9, [%0, #14<<3];"
+ "ldp d10, d11, [%0, #16<<3];"
+ "ldp d12, d13, [%0, #18<<3];"
+ "ldp d14, d15, [%0, #20<<3];"
+ "ldr x5, [%0, #13<<3];"
+ "mov sp, x5;"
+ // Return the value requested to return through arguments.
+ // This should be in x1 given what we requested above.
+ "cmp %1, #0;"
+ "mov x0, #1;"
+ "csel x0, %1, x0, ne;"
+ "br x30;"
+ : "+r"(env_address)
+ : "r"(retval_tmp));
+}
+
+INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
+ if (env[0].__mask_was_saved)
+ // Restore the saved signal mask.
+ (void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask,
+ (__hw_sigset_t *)0);
+ InternalLongjmp(env[0].__jmpbuf, val);
+}
+
+// Required since glibc libpthread calls __libc_longjmp on pthread_exit, and
+// _setjmp on start_thread. Hence we have to intercept the longjmp on
+// pthread_exit so the __hw_jmp_buf order matches.
+INTERCEPTOR(void, __libc_longjmp, __hw_jmp_buf env, int val) {
+ InternalLongjmp(env[0].__jmpbuf, val);
+}
+
+INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
+ InternalLongjmp(env[0].__jmpbuf, val);
+}
+#undef SIG_BLOCK
+#undef SIG_SETMASK
+
+#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
static void BeforeFork() {
StackDepotLockAll();
@@ -257,9 +341,7 @@ void InitializeInterceptors() {
#if defined(__linux__)
INTERCEPT_FUNCTION(vfork);
#endif // __linux__
-#if !defined(__aarch64__)
INTERCEPT_FUNCTION(pthread_create);
-#endif // __aarch64__
#endif
inited = 1;
diff --git a/compiler-rt/lib/hwasan/hwasan_interface_internal.h b/compiler-rt/lib/hwasan/hwasan_interface_internal.h
index ca57f0fe437b..aedda317497b 100644
--- a/compiler-rt/lib/hwasan/hwasan_interface_internal.h
+++ b/compiler-rt/lib/hwasan/hwasan_interface_internal.h
@@ -112,6 +112,10 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_mismatch(uptr addr, u8 ts);
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
+ size_t outsize);
+
+SANITIZER_INTERFACE_ATTRIBUTE
u8 __hwasan_generate_tag();
// Returns the offset of the first tag mismatch or -1 if the whole range is
diff --git a/compiler-rt/lib/hwasan/hwasan_linux.cpp b/compiler-rt/lib/hwasan/hwasan_linux.cpp
index 948e40154fec..ed0f30161b02 100644
--- a/compiler-rt/lib/hwasan/hwasan_linux.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_linux.cpp
@@ -354,12 +354,7 @@ void AndroidTestTlsSlot() {}
#endif
Thread *GetCurrentThread() {
- uptr *ThreadLong = GetCurrentThreadLongPtr();
-#if HWASAN_WITH_INTERCEPTORS
- if (!*ThreadLong)
- __hwasan_thread_enter();
-#endif
- auto *R = (StackAllocationsRingBuffer *)ThreadLong;
+ auto *R = (StackAllocationsRingBuffer *)GetCurrentThreadLongPtr();
return hwasanThreadList().GetThreadByBufferAddress((uptr)(R->Next()));
}
@@ -460,21 +455,6 @@ static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
return true;
}
-// Entry point stub for interoperability between __hwasan_tag_mismatch (ASM) and
-// the rest of the mismatch handling code (C++).
-extern "C" void __hwasan_tag_mismatch_stub(uptr addr, uptr access_info,
- uptr *registers_frame) {
- AccessInfo ai;
- ai.is_store = access_info & 0x10;
- ai.recover = false;
- ai.addr = addr;
- ai.size = 1 << (access_info & 0xf);
-
- HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
- (uptr)__builtin_frame_address(0), nullptr, registers_frame);
- __builtin_unreachable();
-}
-
static void OnStackUnwind(const SignalContext &sig, const void *,
BufferedStackTrace *stack) {
stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
@@ -493,4 +473,24 @@ void HwasanOnDeadlySignal(int signo, void *info, void *context) {
} // namespace __hwasan
+// Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the
+// rest of the mismatch handling code (C++).
+void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
+ size_t outsize) {
+ __hwasan::AccessInfo ai;
+ ai.is_store = access_info & 0x10;
+ ai.is_load = !ai.is_store;
+ ai.recover = access_info & 0x20;
+ ai.addr = addr;
+ if ((access_info & 0xf) == 0xf)
+ ai.size = outsize;
+ else
+ ai.size = 1 << (access_info & 0xf);
+
+ __hwasan::HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), nullptr,
+ registers_frame);
+ __builtin_unreachable();
+}
+
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
diff --git a/compiler-rt/lib/hwasan/hwasan_report.cpp b/compiler-rt/lib/hwasan/hwasan_report.cpp
index 19cb27554bc6..5df8c0ac9106 100644
--- a/compiler-rt/lib/hwasan/hwasan_report.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_report.cpp
@@ -371,12 +371,13 @@ static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
InternalScopedString s(GetPageSizeCached() * 8);
for (tag_t *row = beg_row; row < end_row; row += row_len) {
s.append("%s", row == center_row_beg ? "=>" : " ");
+ s.append("%p:", row);
for (uptr i = 0; i < row_len; i++) {
s.append("%s", row + i == tag_ptr ? "[" : " ");
print_tag(s, &row[i]);
s.append("%s", row + i == tag_ptr ? "]" : " ");
}
- s.append("%s\n", row == center_row_beg ? "<=" : " ");
+ s.append("\n");
}
Printf("%s", s.data());
}
@@ -442,7 +443,7 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
Decorator d;
uptr untagged_addr = UntagAddr(tagged_addr);
Printf("%s", d.Error());
- const char *bug_type = "alocation-tail-overwritten";
+ const char *bug_type = "allocation-tail-overwritten";
Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
Printf("\n%s", d.Default());
diff --git a/compiler-rt/lib/hwasan/hwasan_setjmp.S b/compiler-rt/lib/hwasan/hwasan_setjmp.S
new file mode 100644
index 000000000000..0c1354331940
--- /dev/null
+++ b/compiler-rt/lib/hwasan/hwasan_setjmp.S
@@ -0,0 +1,100 @@
+//===-- hwasan_setjmp.S --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// HWAddressSanitizer runtime.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
+#include "sanitizer_common/sanitizer_platform.h"
+
+// We want to save the context of the calling function.
+// That requires
+// 1) No modification of the link register by this function.
+// 2) No modification of the stack pointer by this function.
+// 3) (no modification of any other saved register, but that's not really going
+// to occur, and hence isn't as much of a worry).
+//
+// There's essentially no way to ensure that the compiler will not modify the
+// stack pointer when compiling a C function.
+// Hence we have to write this function in assembly.
+
+.section .text
+.file "hwasan_setjmp.S"
+
+.global __interceptor_setjmp
+ASM_TYPE_FUNCTION(__interceptor_setjmp)
+__interceptor_setjmp:
+ CFI_STARTPROC
+ mov x1, #0
+ b __interceptor_sigsetjmp
+ CFI_ENDPROC
+ASM_SIZE(__interceptor_setjmp)
+
+#if SANITIZER_ANDROID
+// Bionic also defines a function `setjmp` that calls `sigsetjmp` saving the
+// current signal.
+.global __interceptor_setjmp_bionic
+ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
+__interceptor_setjmp_bionic:
+ CFI_STARTPROC
+ mov x1, #1
+ b __interceptor_sigsetjmp
+ CFI_ENDPROC
+ASM_SIZE(__interceptor_setjmp_bionic)
+#endif
+
+.global __interceptor_sigsetjmp
+ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
+__interceptor_sigsetjmp:
+ CFI_STARTPROC
+ stp x19, x20, [x0, #0<<3]
+ stp x21, x22, [x0, #2<<3]
+ stp x23, x24, [x0, #4<<3]
+ stp x25, x26, [x0, #6<<3]
+ stp x27, x28, [x0, #8<<3]
+ stp x29, x30, [x0, #10<<3]
+ stp d8, d9, [x0, #14<<3]
+ stp d10, d11, [x0, #16<<3]
+ stp d12, d13, [x0, #18<<3]
+ stp d14, d15, [x0, #20<<3]
+ mov x2, sp
+ str x2, [x0, #13<<3]
+ // We always have the second argument to __sigjmp_save (savemask) set, since
+ // the _setjmp function above has set it for us as `false`.
+ // This function is defined in hwasan_interceptors.cc
+ b __sigjmp_save
+ CFI_ENDPROC
+ASM_SIZE(__interceptor_sigsetjmp)
+
+
+.macro ALIAS first second
+ .globl \second
+ .equ \second\(), \first
+.endm
+
+#if SANITIZER_ANDROID
+ALIAS __interceptor_sigsetjmp, sigsetjmp
+.weak sigsetjmp
+
+ALIAS __interceptor_setjmp_bionic, setjmp
+.weak setjmp
+#else
+ALIAS __interceptor_sigsetjmp, __sigsetjmp
+.weak __sigsetjmp
+#endif
+
+ALIAS __interceptor_setjmp, _setjmp
+.weak _setjmp
+#endif
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
diff --git a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
index 4c060a61e98e..08df12736bb4 100644
--- a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
+++ b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
@@ -135,12 +135,12 @@ __hwasan_tag_mismatch_v2:
stp x4, x5, [sp, #32]
stp x2, x3, [sp, #16]
- // Pass the address of the frame to __hwasan_tag_mismatch_stub, so that it can
+ // Pass the address of the frame to __hwasan_tag_mismatch4, so that it can
// extract the saved registers from this frame without having to worry about
// finding this frame.
mov x2, sp
- bl __hwasan_tag_mismatch_stub
+ bl __hwasan_tag_mismatch4
CFI_ENDPROC
.Lfunc_end0:
diff --git a/compiler-rt/lib/hwasan/hwasan_type_test.cpp b/compiler-rt/lib/hwasan/hwasan_type_test.cpp
new file mode 100644
index 000000000000..8cff495bae15
--- /dev/null
+++ b/compiler-rt/lib/hwasan/hwasan_type_test.cpp
@@ -0,0 +1,25 @@
+//===-- hwasan_type_test.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Compile-time tests of the internal type definitions.
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "hwasan.h"
+#include <setjmp.h>
+
+#define CHECK_TYPE_SIZE_FITS(TYPE) \
+ COMPILER_CHECK(sizeof(__hw_##TYPE) <= sizeof(TYPE))
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
+CHECK_TYPE_SIZE_FITS(jmp_buf);
+CHECK_TYPE_SIZE_FITS(sigjmp_buf);
+#endif
diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp
index 66a81ab350e5..d86c3921395c 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -36,10 +36,17 @@ static const uptr kMaxAllowedMallocSize = 8UL << 30;
static Allocator allocator;
+static uptr max_malloc_size;
+
void InitializeAllocator() {
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator.InitLinkerInitialized(
common_flags()->allocator_release_to_os_interval_ms);
+ if (common_flags()->max_allocation_size_mb)
+ max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
+ kMaxAllowedMallocSize);
+ else
+ max_malloc_size = kMaxAllowedMallocSize;
}
void AllocatorThreadFinish() {
@@ -72,14 +79,14 @@ static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
return nullptr;
}
- ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
+ ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
}
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
bool cleared) {
if (size == 0)
size = 1;
- if (size > kMaxAllowedMallocSize)
+ if (size > max_malloc_size)
return ReportAllocationSizeTooBig(size, stack);
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
if (UNLIKELY(!p)) {
@@ -117,7 +124,7 @@ void Deallocate(void *p) {
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment) {
RegisterDeallocation(p);
- if (new_size > kMaxAllowedMallocSize) {
+ if (new_size > max_malloc_size) {
allocator.Deallocate(GetAllocatorCache(), p);
return ReportAllocationSizeTooBig(new_size, stack);
}
diff --git a/compiler-rt/lib/msan/msan.cpp b/compiler-rt/lib/msan/msan.cpp
index 6ea63cb2c48f..7095ee1bf20f 100644
--- a/compiler-rt/lib/msan/msan.cpp
+++ b/compiler-rt/lib/msan/msan.cpp
@@ -122,6 +122,10 @@ class FlagHandlerKeepGoing : public FlagHandlerBase {
*halt_on_error_ = !tmp;
return true;
}
+ bool Format(char *buffer, uptr size) final {
+ const char *keep_going_str = (*halt_on_error_) ? "false" : "true";
+ return FormatString(buffer, size, keep_going_str);
+ }
};
static void RegisterMsanFlags(FlagParser *parser, Flags *f) {
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index 6aa4e2738075..a08c1a00d2e5 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -115,9 +115,16 @@ static Allocator allocator;
static AllocatorCache fallback_allocator_cache;
static StaticSpinMutex fallback_mutex;
+static uptr max_malloc_size;
+
void MsanAllocatorInit() {
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+ if (common_flags()->max_allocation_size_mb)
+ max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
+ kMaxAllowedMallocSize);
+ else
+ max_malloc_size = kMaxAllowedMallocSize;
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
@@ -132,12 +139,12 @@ void MsanThreadLocalMallocStorage::CommitBack() {
static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
bool zeroise) {
- if (size > kMaxAllowedMallocSize) {
+ if (size > max_malloc_size) {
if (AllocatorMayReturnNull()) {
Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
return nullptr;
}
- ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, stack);
+ ReportAllocationSizeTooBig(size, max_malloc_size, stack);
}
MsanThread *t = GetCurrentThread();
void *allocated;
diff --git a/compiler-rt/lib/msan/msan_blacklist.txt b/compiler-rt/lib/msan/msan_blacklist.txt
index 44a5680d4d06..3efef5712185 100644
--- a/compiler-rt/lib/msan/msan_blacklist.txt
+++ b/compiler-rt/lib/msan/msan_blacklist.txt
@@ -5,3 +5,6 @@
# Example usage:
# fun:*bad_function_name*
# src:file_with_tricky_code.cc
+
+# https://bugs.llvm.org/show_bug.cgi?id=31877
+fun:__gxx_personality_*
diff --git a/compiler-rt/lib/msan/msan_interceptors.cpp b/compiler-rt/lib/msan/msan_interceptors.cpp
index 1d9d9f7986d7..1c6956eca0f6 100644
--- a/compiler-rt/lib/msan/msan_interceptors.cpp
+++ b/compiler-rt/lib/msan/msan_interceptors.cpp
@@ -1070,8 +1070,9 @@ INTERCEPTOR(int, pthread_key_create, __sanitizer_pthread_key_t *key,
}
#if SANITIZER_NETBSD
-INTERCEPTOR(void, __libc_thr_keycreate, void *m, void (*dtor)(void *value)) \
- ALIAS(WRAPPER_NAME(pthread_key_create));
+INTERCEPTOR(int, __libc_thr_keycreate, __sanitizer_pthread_key_t *m,
+ void (*dtor)(void *value))
+ALIAS(WRAPPER_NAME(pthread_key_create));
#endif
INTERCEPTOR(int, pthread_join, void *th, void **retval) {
diff --git a/compiler-rt/lib/profile/GCDAProfiling.c b/compiler-rt/lib/profile/GCDAProfiling.c
index 498c05900bf2..81f2cdd26450 100644
--- a/compiler-rt/lib/profile/GCDAProfiling.c
+++ b/compiler-rt/lib/profile/GCDAProfiling.c
@@ -62,8 +62,27 @@ typedef unsigned long long uint64_t;
#include "InstrProfiling.h"
#include "InstrProfilingUtil.h"
-/* #define DEBUG_GCDAPROFILING */
+#ifndef _WIN32
+#include <pthread.h>
+static pthread_mutex_t gcov_flush_mutex = PTHREAD_MUTEX_INITIALIZER;
+static __inline void gcov_flush_lock() {
+ pthread_mutex_lock(&gcov_flush_mutex);
+}
+static __inline void gcov_flush_unlock() {
+ pthread_mutex_unlock(&gcov_flush_mutex);
+}
+#else
+#include <windows.h>
+static SRWLOCK gcov_flush_mutex = SRWLOCK_INIT;
+static __inline void gcov_flush_lock() {
+ AcquireSRWLockExclusive(&gcov_flush_mutex);
+}
+static __inline void gcov_flush_unlock() {
+ ReleaseSRWLockExclusive(&gcov_flush_mutex);
+}
+#endif
+/* #define DEBUG_GCDAPROFILING */
/*
* --- GCOV file format I/O primitives ---
*/
@@ -620,12 +639,16 @@ void llvm_register_flush_function(fn_ptr fn) {
}
void __gcov_flush() {
+ gcov_flush_lock();
+
struct fn_node* curr = flush_fn_list.head;
while (curr) {
curr->fn();
curr = curr->next;
}
+
+ gcov_flush_unlock();
}
COMPILER_RT_VISIBILITY
diff --git a/compiler-rt/lib/profile/InstrProfData.inc b/compiler-rt/lib/profile/InstrProfData.inc
deleted file mode 100644
index 7078af5f4cf8..000000000000
--- a/compiler-rt/lib/profile/InstrProfData.inc
+++ /dev/null
@@ -1,752 +0,0 @@
-/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\
-|*
-|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-|* See https://llvm.org/LICENSE.txt for license information.
-|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-|*
-\*===----------------------------------------------------------------------===*/
-/*
- * This is the master file that defines all the data structure, signature,
- * constant literals that are shared across profiling runtime library,
- * compiler (instrumentation), and host tools (reader/writer). The entities
- * defined in this file affect the profile runtime ABI, the raw profile format,
- * or both.
- *
- * The file has two identical copies. The master copy lives in LLVM and
- * the other one sits in compiler-rt/lib/profile directory. To make changes
- * in this file, first modify the master copy and copy it over to compiler-rt.
- * Testing of any change in this file can start only after the two copies are
- * synced up.
- *
- * The first part of the file includes macros that defines types, names, and
- * initializers for the member fields of the core data structures. The field
- * declarations for one structure is enabled by defining the field activation
- * macro associated with that structure. Only one field activation record
- * can be defined at one time and the rest definitions will be filtered out by
- * the preprocessor.
- *
- * Examples of how the template is used to instantiate structure definition:
- * 1. To declare a structure:
- *
- * struct ProfData {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- * Type Name;
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 2. To construct LLVM type arrays for the struct type:
- *
- * Type *DataTypes[] = {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- * LLVMType,
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 4. To construct constant array for the initializers:
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- * Initializer,
- * Constant *ConstantVals[] = {
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- *
- * The second part of the file includes definitions all other entities that
- * are related to runtime ABI and format. When no field activation macro is
- * defined, this file can be included to introduce the definitions.
- *
-\*===----------------------------------------------------------------------===*/
-
-/* Functions marked with INSTR_PROF_VISIBILITY must have hidden visibility in
- * the compiler runtime. */
-#ifndef INSTR_PROF_VISIBILITY
-#define INSTR_PROF_VISIBILITY
-#endif
-
-/* INSTR_PROF_DATA start. */
-/* Definition of member fields of the per-function control structure. */
-#ifndef INSTR_PROF_DATA
-#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
- ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
- IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
- ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
- Inc->getHash()->getZExtValue()))
-INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt64PtrTy(Ctx), CounterPtr, \
- ConstantExpr::getBitCast(CounterPtr, \
- llvm::Type::getInt64PtrTy(Ctx)))
-/* This is used to map function pointers for the indirect call targets to
- * function name hashes during the conversion from raw to merged profile
- * data.
- */
-INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
- FunctionAddr)
-INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
- ValuesPtrExpr)
-INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
- ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
-INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
- ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
-#undef INSTR_PROF_DATA
-/* INSTR_PROF_DATA end. */
-
-
-/* This is an internal data structure used by value profiler. It
- * is defined here to allow serialization code sharing by LLVM
- * to be used in unit test.
- *
- * typedef struct ValueProfNode {
- * // InstrProfValueData VData;
- * uint64_t Value;
- * uint64_t Count;
- * struct ValueProfNode *Next;
- * } ValueProfNode;
- */
-/* INSTR_PROF_VALUE_NODE start. */
-#ifndef INSTR_PROF_VALUE_NODE
-#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \
- ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \
- ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
- ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0))
-#undef INSTR_PROF_VALUE_NODE
-/* INSTR_PROF_VALUE_NODE end. */
-
-/* INSTR_PROF_RAW_HEADER start */
-/* Definition of member fields of the raw profile header data structure. */
-#ifndef INSTR_PROF_RAW_HEADER
-#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
-INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
-INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesSize, NamesSize)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta, (uintptr_t)CountersBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
-#undef INSTR_PROF_RAW_HEADER
-/* INSTR_PROF_RAW_HEADER end */
-
-/* VALUE_PROF_FUNC_PARAM start */
-/* Definition of parameter types of the runtime API used to do value profiling
- * for a given value site.
- */
-#ifndef VALUE_PROF_FUNC_PARAM
-#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType)
-#define INSTR_PROF_COMMA
-#else
-#define INSTR_PROF_DATA_DEFINED
-#define INSTR_PROF_COMMA ,
-#endif
-VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
- INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
-#ifndef VALUE_RANGE_PROF
-VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
-#else /* VALUE_RANGE_PROF */
-VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) \
- INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeStart, Type::getInt64Ty(Ctx)) \
- INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeLast, Type::getInt64Ty(Ctx)) \
- INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint64_t, LargeValue, Type::getInt64Ty(Ctx))
-#endif /*VALUE_RANGE_PROF */
-#undef VALUE_PROF_FUNC_PARAM
-#undef INSTR_PROF_COMMA
-/* VALUE_PROF_FUNC_PARAM end */
-
-/* VALUE_PROF_KIND start */
-#ifndef VALUE_PROF_KIND
-#define VALUE_PROF_KIND(Enumerator, Value, Descr)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-/* For indirect function call value profiling, the addresses of the target
- * functions are profiled by the instrumented code. The target addresses are
- * written in the raw profile data and converted to target function name's MD5
- * hash by the profile reader during deserialization. Typically, this happens
- * when the raw profile data is read during profile merging.
- *
- * For this remapping the ProfData is used. ProfData contains both the function
- * name hash and the function address.
- */
-VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0, "indirect call target")
-/* For memory intrinsic functions size profiling. */
-VALUE_PROF_KIND(IPVK_MemOPSize, 1, "memory intrinsic functions size")
-/* These two kinds must be the last to be
- * declared. This is to make sure the string
- * array created with the template can be
- * indexed with the kind value.
- */
-VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget, "first")
-VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last")
-
-#undef VALUE_PROF_KIND
-/* VALUE_PROF_KIND end */
-
-/* COVMAP_FUNC_RECORD start */
-/* Definition of member fields of the function record structure in coverage
- * map.
- */
-#ifndef COVMAP_FUNC_RECORD
-#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-#ifdef COVMAP_V1
-COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
- NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
- llvm::Type::getInt8PtrTy(Ctx)))
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
- NameValue.size()))
-#else
-COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
- llvm::IndexedInstrProf::ComputeHash(NameValue)))
-#endif
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
- CoverageMapping.size()))
-COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), FuncHash))
-#undef COVMAP_FUNC_RECORD
-/* COVMAP_FUNC_RECORD end. */
-
-/* COVMAP_HEADER start */
-/* Definition of member fields of coverage map header.
- */
-#ifndef COVMAP_HEADER
-#define COVMAP_HEADER(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
- llvm::ConstantInt::get(Int32Ty, FunctionRecords.size()))
-COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
- llvm::ConstantInt::get(Int32Ty, FilenamesSize))
-COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
- llvm::ConstantInt::get(Int32Ty, CoverageMappingSize))
-COVMAP_HEADER(uint32_t, Int32Ty, Version, \
- llvm::ConstantInt::get(Int32Ty, CovMapVersion::CurrentVersion))
-#undef COVMAP_HEADER
-/* COVMAP_HEADER end. */
-
-
-#ifdef INSTR_PROF_SECT_ENTRY
-#define INSTR_PROF_DATA_DEFINED
-INSTR_PROF_SECT_ENTRY(IPSK_data, \
- INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
- INSTR_PROF_DATA_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
- INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
- INSTR_PROF_CNTS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_name, \
- INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
- INSTR_PROF_NAME_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vals, \
- INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
- INSTR_PROF_VALS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
- INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
- INSTR_PROF_VNODES_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
- INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
- INSTR_PROF_COVMAP_COFF, "__LLVM_COV,")
-INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \
- INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \
- INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,")
-
-#undef INSTR_PROF_SECT_ENTRY
-#endif
-
-
-#ifdef INSTR_PROF_VALUE_PROF_DATA
-#define INSTR_PROF_DATA_DEFINED
-
-#define INSTR_PROF_MAX_NUM_VAL_PER_SITE 255
-/*!
- * This is the header of the data structure that defines the on-disk
- * layout of the value profile data of a particular kind for one function.
- */
-typedef struct ValueProfRecord {
- /* The kind of the value profile record. */
- uint32_t Kind;
- /*
- * The number of value profile sites. It is guaranteed to be non-zero;
- * otherwise the record for this kind won't be emitted.
- */
- uint32_t NumValueSites;
- /*
- * The first element of the array that stores the number of profiled
- * values for each value site. The size of the array is NumValueSites.
- * Since NumValueSites is greater than zero, there is at least one
- * element in the array.
- */
- uint8_t SiteCountArray[1];
-
- /*
- * The fake declaration is for documentation purpose only.
- * Align the start of next field to be on 8 byte boundaries.
- uint8_t Padding[X];
- */
-
- /* The array of value profile data. The size of the array is the sum
- * of all elements in SiteCountArray[].
- InstrProfValueData ValueData[];
- */
-
-#ifdef __cplusplus
- /*!
- * Return the number of value sites.
- */
- uint32_t getNumValueSites() const { return NumValueSites; }
- /*!
- * Read data from this record and save it to Record.
- */
- void deserializeTo(InstrProfRecord &Record,
- InstrProfSymtab *SymTab);
- /*
- * In-place byte swap:
- * Do byte swap for this instance. \c Old is the original order before
- * the swap, and \c New is the New byte order.
- */
- void swapBytes(support::endianness Old, support::endianness New);
-#endif
-} ValueProfRecord;
-
-/*!
- * Per-function header/control data structure for value profiling
- * data in indexed format.
- */
-typedef struct ValueProfData {
- /*
- * Total size in bytes including this field. It must be a multiple
- * of sizeof(uint64_t).
- */
- uint32_t TotalSize;
- /*
- *The number of value profile kinds that has value profile data.
- * In this implementation, a value profile kind is considered to
- * have profile data if the number of value profile sites for the
- * kind is not zero. More aggressively, the implementation can
- * choose to check the actual data value: if none of the value sites
- * has any profiled values, the kind can be skipped.
- */
- uint32_t NumValueKinds;
-
- /*
- * Following are a sequence of variable length records. The prefix/header
- * of each record is defined by ValueProfRecord type. The number of
- * records is NumValueKinds.
- * ValueProfRecord Record_1;
- * ValueProfRecord Record_N;
- */
-
-#if __cplusplus
- /*!
- * Return the total size in bytes of the on-disk value profile data
- * given the data stored in Record.
- */
- static uint32_t getSize(const InstrProfRecord &Record);
- /*!
- * Return a pointer to \c ValueProfData instance ready to be streamed.
- */
- static std::unique_ptr<ValueProfData>
- serializeFrom(const InstrProfRecord &Record);
- /*!
- * Check the integrity of the record.
- */
- Error checkIntegrity();
- /*!
- * Return a pointer to \c ValueProfileData instance ready to be read.
- * All data in the instance are properly byte swapped. The input
- * data is assumed to be in little endian order.
- */
- static Expected<std::unique_ptr<ValueProfData>>
- getValueProfData(const unsigned char *SrcBuffer,
- const unsigned char *const SrcBufferEnd,
- support::endianness SrcDataEndianness);
- /*!
- * Swap byte order from \c Endianness order to host byte order.
- */
- void swapBytesToHost(support::endianness Endianness);
- /*!
- * Swap byte order from host byte order to \c Endianness order.
- */
- void swapBytesFromHost(support::endianness Endianness);
- /*!
- * Return the total size of \c ValueProfileData.
- */
- uint32_t getSize() const { return TotalSize; }
- /*!
- * Read data from this data and save it to \c Record.
- */
- void deserializeTo(InstrProfRecord &Record,
- InstrProfSymtab *SymTab);
- void operator delete(void *ptr) { ::operator delete(ptr); }
-#endif
-} ValueProfData;
-
-/*
- * The closure is designed to abstact away two types of value profile data:
- * - InstrProfRecord which is the primary data structure used to
- * represent profile data in host tools (reader, writer, and profile-use)
- * - value profile runtime data structure suitable to be used by C
- * runtime library.
- *
- * Both sources of data need to serialize to disk/memory-buffer in common
- * format: ValueProfData. The abstraction allows compiler-rt's raw profiler
- * writer to share the same format and code with indexed profile writer.
- *
- * For documentation of the member methods below, refer to corresponding methods
- * in class InstrProfRecord.
- */
-typedef struct ValueProfRecordClosure {
- const void *Record;
- uint32_t (*GetNumValueKinds)(const void *Record);
- uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind);
- uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind);
- uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S);
-
- /*
- * After extracting the value profile data from the value profile record,
- * this method is used to map the in-memory value to on-disk value. If
- * the method is null, value will be written out untranslated.
- */
- uint64_t (*RemapValueData)(uint32_t, uint64_t Value);
- void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K,
- uint32_t S);
- ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes);
-} ValueProfRecordClosure;
-
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getFirstValueProfRecord(ValueProfData *VPD);
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getValueProfRecordNext(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY InstrProfValueData *
-getValueProfRecordValueData(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfRecordHeaderSize(uint32_t NumValueSites);
-
-#undef INSTR_PROF_VALUE_PROF_DATA
-#endif /* INSTR_PROF_VALUE_PROF_DATA */
-
-
-#ifdef INSTR_PROF_COMMON_API_IMPL
-#define INSTR_PROF_DATA_DEFINED
-#ifdef __cplusplus
-#define INSTR_PROF_INLINE inline
-#define INSTR_PROF_NULLPTR nullptr
-#else
-#define INSTR_PROF_INLINE
-#define INSTR_PROF_NULLPTR NULL
-#endif
-
-#ifndef offsetof
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
-
-/*!
- * Return the \c ValueProfRecord header size including the
- * padding bytes.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) {
- uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) +
- sizeof(uint8_t) * NumValueSites;
- /* Round the size to multiple of 8 bytes. */
- Size = (Size + 7) & ~7;
- return Size;
-}
-
-/*!
- * Return the total size of the value profile record including the
- * header and the value data.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordSize(uint32_t NumValueSites,
- uint32_t NumValueData) {
- return getValueProfRecordHeaderSize(NumValueSites) +
- sizeof(InstrProfValueData) * NumValueData;
-}
-
-/*!
- * Return the pointer to the start of value data array.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) {
- return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize(
- This->NumValueSites));
-}
-
-/*!
- * Return the total number of value data for \c This record.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) {
- uint32_t NumValueData = 0;
- uint32_t I;
- for (I = 0; I < This->NumValueSites; I++)
- NumValueData += This->SiteCountArray[I];
- return NumValueData;
-}
-
-/*!
- * Use this method to advance to the next \c This \c ValueProfRecord.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) {
- uint32_t NumValueData = getValueProfRecordNumValueData(This);
- return (ValueProfRecord *)((char *)This +
- getValueProfRecordSize(This->NumValueSites,
- NumValueData));
-}
-
-/*!
- * Return the first \c ValueProfRecord instance.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) {
- return (ValueProfRecord *)((char *)This + sizeof(ValueProfData));
-}
-
-/* Closure based interfaces. */
-
-/*!
- * Return the total size in bytes of the on-disk value profile data
- * given the data stored in Record.
- */
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfDataSize(ValueProfRecordClosure *Closure) {
- uint32_t Kind;
- uint32_t TotalSize = sizeof(ValueProfData);
- const void *Record = Closure->Record;
-
- for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
- uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind);
- if (!NumValueSites)
- continue;
- TotalSize += getValueProfRecordSize(NumValueSites,
- Closure->GetNumValueData(Record, Kind));
- }
- return TotalSize;
-}
-
-/*!
- * Extract value profile data of a function for the profile kind \c ValueKind
- * from the \c Closure and serialize the data into \c This record instance.
- */
-INSTR_PROF_VISIBILITY void
-serializeValueProfRecordFrom(ValueProfRecord *This,
- ValueProfRecordClosure *Closure,
- uint32_t ValueKind, uint32_t NumValueSites) {
- uint32_t S;
- const void *Record = Closure->Record;
- This->Kind = ValueKind;
- This->NumValueSites = NumValueSites;
- InstrProfValueData *DstVD = getValueProfRecordValueData(This);
-
- for (S = 0; S < NumValueSites; S++) {
- uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S);
- This->SiteCountArray[S] = ND;
- Closure->GetValueForSite(Record, DstVD, ValueKind, S);
- DstVD += ND;
- }
-}
-
-/*!
- * Extract value profile data of a function from the \c Closure
- * and serialize the data into \c DstData if it is not NULL or heap
- * memory allocated by the \c Closure's allocator method. If \c
- * DstData is not null, the caller is expected to set the TotalSize
- * in DstData.
- */
-INSTR_PROF_VISIBILITY ValueProfData *
-serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
- ValueProfData *DstData) {
- uint32_t Kind;
- uint32_t TotalSize =
- DstData ? DstData->TotalSize : getValueProfDataSize(Closure);
-
- ValueProfData *VPD =
- DstData ? DstData : Closure->AllocValueProfData(TotalSize);
-
- VPD->TotalSize = TotalSize;
- VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record);
- ValueProfRecord *VR = getFirstValueProfRecord(VPD);
- for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
- uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind);
- if (!NumValueSites)
- continue;
- serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites);
- VR = getValueProfRecordNext(VR);
- }
- return VPD;
-}
-
-#undef INSTR_PROF_COMMON_API_IMPL
-#endif /* INSTR_PROF_COMMON_API_IMPL */
-
-/*============================================================================*/
-
-#ifndef INSTR_PROF_DATA_DEFINED
-
-#ifndef INSTR_PROF_DATA_INC
-#define INSTR_PROF_DATA_INC
-
-/* Helper macros. */
-#define INSTR_PROF_SIMPLE_QUOTE(x) #x
-#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x)
-#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y
-#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y)
-
-/* Magic number to detect file format and endianness.
- * Use 255 at one end, since no UTF-8 file can use that character. Avoid 0,
- * so that utilities, like strings, don't grab it as a string. 129 is also
- * invalid UTF-8, and high enough to be interesting.
- * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR"
- * for 32-bit platforms.
- */
-#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
- (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \
- (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129
-#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
- (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \
- (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
-
-/* Raw profile format version (start from 1). */
-#define INSTR_PROF_RAW_VERSION 4
-/* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 5
-/* Coverage mapping format vresion (start from 0). */
-#define INSTR_PROF_COVMAP_VERSION 2
-
-/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
- * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
- * generated profile, and 0 if this is a Clang FE generated profile.
- * 1 in bit 57 indicates there are context-sensitive records in the profile.
- */
-#define VARIANT_MASKS_ALL 0xff00000000000000ULL
-#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
-#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
-#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
-#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
-#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
-
-/* The variable that holds the name of the profile data
- * specified via command line. */
-#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
-
-/* section name strings common to all targets other
- than WIN32 */
-#define INSTR_PROF_DATA_COMMON __llvm_prf_data
-#define INSTR_PROF_NAME_COMMON __llvm_prf_names
-#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
-#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
-#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
-#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
-#define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile
-/* Windows section names. Because these section names contain dollar characters,
- * they must be quoted.
- */
-#define INSTR_PROF_DATA_COFF ".lprfd$M"
-#define INSTR_PROF_NAME_COFF ".lprfn$M"
-#define INSTR_PROF_CNTS_COFF ".lprfc$M"
-#define INSTR_PROF_VALS_COFF ".lprfv$M"
-#define INSTR_PROF_VNODES_COFF ".lprfnd$M"
-#define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
-#define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M"
-
-#ifdef _WIN32
-/* Runtime section names and name strings. */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
-#define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF
-#else
-/* Runtime section names and name strings. */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON)
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON)
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON)
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON)
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON)
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON)
-/* Order file instrumentation. */
-#define INSTR_PROF_ORDERFILE_SECT_NAME \
- INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON)
-#endif
-
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME _llvm_order_file_buffer
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME_STR \
- INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_NAME)
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME _llvm_order_file_buffer_idx
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME_STR \
- INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME)
-
-/* Macros to define start/stop section symbol for a given
- * section on Linux. For instance
- * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will
- * expand to __start___llvm_prof_data
- */
-#define INSTR_PROF_SECT_START(Sect) \
- INSTR_PROF_CONCAT(__start_,Sect)
-#define INSTR_PROF_SECT_STOP(Sect) \
- INSTR_PROF_CONCAT(__stop_,Sect)
-
-/* Value Profiling API linkage name. */
-#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
-#define INSTR_PROF_VALUE_PROF_FUNC_STR \
- INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
-#define INSTR_PROF_VALUE_RANGE_PROF_FUNC __llvm_profile_instrument_range
-#define INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR \
- INSTR_PROF_QUOTE(INSTR_PROF_VALUE_RANGE_PROF_FUNC)
-
-/* InstrProfile per-function control data alignment. */
-#define INSTR_PROF_DATA_ALIGNMENT 8
-
-/* The data structure that represents a tracked value by the
- * value profiler.
- */
-typedef struct InstrProfValueData {
- /* Profiled value. */
- uint64_t Value;
- /* Number of times the value appears in the training run. */
- uint64_t Count;
-} InstrProfValueData;
-
-#endif /* INSTR_PROF_DATA_INC */
-
-#ifndef INSTR_ORDER_FILE_INC
-/* The maximal # of functions: 128*1024 (the buffer size will be 128*4 KB). */
-#define INSTR_ORDER_FILE_BUFFER_SIZE 131072
-#define INSTR_ORDER_FILE_BUFFER_BITS 17
-#define INSTR_ORDER_FILE_BUFFER_MASK 0x1ffff
-#endif /* INSTR_ORDER_FILE_INC */
-#else
-#undef INSTR_PROF_DATA_DEFINED
-#endif
diff --git a/compiler-rt/lib/profile/InstrProfiling.c b/compiler-rt/lib/profile/InstrProfiling.c
index f3787715e709..087d1cdd2efe 100644
--- a/compiler-rt/lib/profile/InstrProfiling.c
+++ b/compiler-rt/lib/profile/InstrProfiling.c
@@ -15,7 +15,7 @@
#include "InstrProfilingInternal.h"
#define INSTR_PROF_VALUE_PROF_DATA
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR = INSTR_PROF_RAW_VERSION;
diff --git a/compiler-rt/lib/profile/InstrProfiling.h b/compiler-rt/lib/profile/InstrProfiling.h
index ffc4396169d0..3a3bab3d0b4c 100644
--- a/compiler-rt/lib/profile/InstrProfiling.h
+++ b/compiler-rt/lib/profile/InstrProfiling.h
@@ -13,32 +13,48 @@
#include <stdio.h>
#define INSTR_PROF_VISIBILITY COMPILER_RT_VISIBILITY
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
enum ValueKind {
#define VALUE_PROF_KIND(Enumerator, Value, Descr) Enumerator = Value,
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
};
typedef void *IntPtrT;
typedef struct COMPILER_RT_ALIGNAS(INSTR_PROF_DATA_ALIGNMENT)
__llvm_profile_data {
#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) Type Name;
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
} __llvm_profile_data;
typedef struct __llvm_profile_header {
#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer) Type Name;
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
} __llvm_profile_header;
typedef struct ValueProfNode * PtrToNodeT;
typedef struct ValueProfNode {
#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer) Type Name;
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
} ValueProfNode;
/*!
+ * \brief Return 1 if profile counters are continuously synced to the raw
+ * profile via an mmap(). This is in contrast to the default mode, in which
+ * the raw profile is written out at program exit time.
+ */
+int __llvm_profile_is_continuous_mode_enabled(void);
+
+/*!
+ * \brief Enable continuous mode.
+ *
+ * See \ref __llvm_profile_is_continuous_mode_enabled. The behavior is undefined
+ * if continuous mode is already enabled, or if it cannot be enable due to
+ * conflicting options.
+ */
+void __llvm_profile_enable_continuous_mode(void);
+
+/*!
* \brief Get number of bytes necessary to pad the argument to eight
* byte boundary.
*/
@@ -104,7 +120,7 @@ int __llvm_profile_check_compatibility(const char *Profile,
*/
void INSTR_PROF_VALUE_PROF_FUNC(
#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType) ArgType ArgName
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
);
void __llvm_profile_instrument_target_value(uint64_t TargetValue, void *Data,
@@ -159,6 +175,12 @@ int __llvm_orderfile_dump(void);
* Note: There may be multiple copies of the profile runtime (one for each
* instrumented image/DSO). This API only modifies the filename within the
* copy of the runtime available to the calling image.
+ *
+ * Warning: This is a no-op if continuous mode (\ref
+ * __llvm_profile_is_continuous_mode_enabled) is on. The reason for this is
+ * that in continuous mode, profile counters are mmap()'d to the profile at
+ * program initialization time. Support for transferring the mmap'd profile
+ * counts to a new file has not been implemented.
*/
void __llvm_profile_set_filename(const char *Name);
@@ -181,6 +203,12 @@ void __llvm_profile_set_filename(const char *Name);
* Note: There may be multiple copies of the profile runtime (one for each
* instrumented image/DSO). This API only modifies the file object within the
* copy of the runtime available to the calling image.
+ *
+ * Warning: This is a no-op if continuous mode (\ref
+ * __llvm_profile_is_continuous_mode_enabled) is on. The reason for this is
+ * that in continuous mode, profile counters are mmap()'d to the profile at
+ * program initialization time. Support for transferring the mmap'd profile
+ * counts to a new file has not been implemented.
*/
void __llvm_profile_set_file_object(FILE *File, int EnableMerge);
@@ -223,6 +251,24 @@ uint64_t __llvm_profile_get_version(void);
uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
const __llvm_profile_data *End);
+/* ! \brief Given the sizes of the data and counter information, return the
+ * number of padding bytes before and after the counters, and after the names,
+ * in the raw profile.
+ *
+ * Note: In this context, "size" means "number of entries", i.e. the first two
+ * arguments must be the result of __llvm_profile_get_data_size() and of
+ * (__llvm_profile_end_counters() - __llvm_profile_begin_counters()) resp.
+ *
+ * Note: When mmap() mode is disabled, no padding bytes before/after counters
+ * are needed. However, in mmap() mode, the counter section in the raw profile
+ * must be page-aligned: this API computes the number of padding bytes
+ * needed to achieve that.
+ */
+void __llvm_profile_get_padding_sizes_for_counters(
+ uint64_t DataSize, uint64_t CountersSize, uint64_t NamesSize,
+ uint64_t *PaddingBytesBeforeCounters, uint64_t *PaddingBytesAfterCounters,
+ uint64_t *PaddingBytesAfterNames);
+
/*!
* \brief Set the flag that profile data has been dumped to the file.
* This is useful for users to disable dumping profile data to the file for
diff --git a/compiler-rt/lib/profile/InstrProfilingBuffer.c b/compiler-rt/lib/profile/InstrProfilingBuffer.c
index 5bdeb8e32807..174280fd4b52 100644
--- a/compiler-rt/lib/profile/InstrProfilingBuffer.c
+++ b/compiler-rt/lib/profile/InstrProfilingBuffer.c
@@ -8,6 +8,23 @@
#include "InstrProfiling.h"
#include "InstrProfilingInternal.h"
+#include "InstrProfilingPort.h"
+
+/* When continuous mode is enabled (%c), this parameter is set to 1.
+ *
+ * This parameter is defined here in InstrProfilingBuffer.o, instead of in
+ * InstrProfilingFile.o, to sequester all libc-dependent code in
+ * InstrProfilingFile.o. The test `instrprof-without-libc` will break if this
+ * layering is violated. */
+static int ContinuouslySyncProfile = 0;
+
+COMPILER_RT_VISIBILITY int __llvm_profile_is_continuous_mode_enabled(void) {
+ return ContinuouslySyncProfile;
+}
+
+COMPILER_RT_VISIBILITY void __llvm_profile_enable_continuous_mode(void) {
+ ContinuouslySyncProfile = 1;
+}
COMPILER_RT_VISIBILITY
uint64_t __llvm_profile_get_size_for_buffer(void) {
@@ -30,6 +47,41 @@ uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
sizeof(__llvm_profile_data);
}
+/// Calculate the number of padding bytes needed to add to \p Offset in order
+/// for (\p Offset + Padding) to be page-aligned.
+static uint64_t calculateBytesNeededToPageAlign(uint64_t Offset,
+ unsigned PageSize) {
+ uint64_t OffsetModPage = Offset % PageSize;
+ if (OffsetModPage > 0)
+ return PageSize - OffsetModPage;
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY
+void __llvm_profile_get_padding_sizes_for_counters(
+ uint64_t DataSize, uint64_t CountersSize, uint64_t NamesSize,
+ uint64_t *PaddingBytesBeforeCounters, uint64_t *PaddingBytesAfterCounters,
+ uint64_t *PaddingBytesAfterNames) {
+ if (!__llvm_profile_is_continuous_mode_enabled()) {
+ *PaddingBytesBeforeCounters = 0;
+ *PaddingBytesAfterCounters = 0;
+ *PaddingBytesAfterNames = __llvm_profile_get_num_padding_bytes(NamesSize);
+ return;
+ }
+
+ // In continuous mode, the file offsets for headers and for the start of
+ // counter sections need to be page-aligned.
+ unsigned PageSize = getpagesize();
+ uint64_t DataSizeInBytes = DataSize * sizeof(__llvm_profile_data);
+ uint64_t CountersSizeInBytes = CountersSize * sizeof(uint64_t);
+ *PaddingBytesBeforeCounters = calculateBytesNeededToPageAlign(
+ sizeof(__llvm_profile_header) + DataSizeInBytes, PageSize);
+ *PaddingBytesAfterCounters =
+ calculateBytesNeededToPageAlign(CountersSizeInBytes, PageSize);
+ *PaddingBytesAfterNames =
+ calculateBytesNeededToPageAlign(NamesSize, PageSize);
+}
+
COMPILER_RT_VISIBILITY
uint64_t __llvm_profile_get_size_for_buffer_internal(
const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
@@ -37,11 +89,21 @@ uint64_t __llvm_profile_get_size_for_buffer_internal(
const char *NamesBegin, const char *NamesEnd) {
/* Match logic in __llvm_profile_write_buffer(). */
const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
- const uint8_t Padding = __llvm_profile_get_num_padding_bytes(NamesSize);
+ uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
+ uint64_t CountersSize = CountersEnd - CountersBegin;
+
+ /* Determine how much padding is needed before/after the counters and after
+ * the names. */
+ uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
+ PaddingBytesAfterNames;
+ __llvm_profile_get_padding_sizes_for_counters(
+ DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
+ &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
+
return sizeof(__llvm_profile_header) +
- (__llvm_profile_get_data_size(DataBegin, DataEnd) *
- sizeof(__llvm_profile_data)) +
- (CountersEnd - CountersBegin) * sizeof(uint64_t) + NamesSize + Padding;
+ (DataSize * sizeof(__llvm_profile_data)) + PaddingBytesBeforeCounters +
+ (CountersSize * sizeof(uint64_t)) + PaddingBytesAfterCounters +
+ NamesSize + PaddingBytesAfterNames;
}
COMPILER_RT_VISIBILITY
diff --git a/compiler-rt/lib/profile/InstrProfilingFile.c b/compiler-rt/lib/profile/InstrProfilingFile.c
index 1b253c3e865e..7f3727eed92d 100644
--- a/compiler-rt/lib/profile/InstrProfilingFile.c
+++ b/compiler-rt/lib/profile/InstrProfilingFile.c
@@ -32,6 +32,7 @@
#include "InstrProfiling.h"
#include "InstrProfilingInternal.h"
+#include "InstrProfilingPort.h"
#include "InstrProfilingUtil.h"
/* From where is profile name specified.
@@ -100,6 +101,12 @@ static void setProfileFile(FILE *File) { ProfileFile = File; }
COMPILER_RT_VISIBILITY void __llvm_profile_set_file_object(FILE *File,
int EnableMerge) {
+ if (__llvm_profile_is_continuous_mode_enabled()) {
+ PROF_WARN("__llvm_profile_set_file_object(fd=%d) not supported, because "
+ "continuous sync mode (%%c) is enabled",
+ fileno(File));
+ return;
+ }
setProfileFile(File);
setProfileMergeRequested(EnableMerge);
}
@@ -115,11 +122,23 @@ static uint32_t fileWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs,
uint32_t NumIOVecs) {
uint32_t I;
FILE *File = (FILE *)This->WriterCtx;
+ char Zeroes[sizeof(uint64_t)] = {0};
for (I = 0; I < NumIOVecs; I++) {
if (IOVecs[I].Data) {
if (fwrite(IOVecs[I].Data, IOVecs[I].ElmSize, IOVecs[I].NumElm, File) !=
IOVecs[I].NumElm)
return 1;
+ } else if (IOVecs[I].UseZeroPadding) {
+ size_t BytesToWrite = IOVecs[I].ElmSize * IOVecs[I].NumElm;
+ while (BytesToWrite > 0) {
+ size_t PartialWriteLen =
+ (sizeof(uint64_t) > BytesToWrite) ? BytesToWrite : sizeof(uint64_t);
+ if (fwrite(Zeroes, sizeof(uint8_t), PartialWriteLen, File) !=
+ PartialWriteLen) {
+ return 1;
+ }
+ BytesToWrite -= PartialWriteLen;
+ }
} else {
if (fseek(File, IOVecs[I].ElmSize * IOVecs[I].NumElm, SEEK_CUR) == -1)
return 1;
@@ -164,21 +183,18 @@ static void setupIOBuffer() {
}
}
-/* Read profile data in \c ProfileFile and merge with in-memory
- profile counters. Returns -1 if there is fatal error, otheriwse
- 0 is returned. Returning 0 does not mean merge is actually
- performed. If merge is actually done, *MergeDone is set to 1.
-*/
-static int doProfileMerging(FILE *ProfileFile, int *MergeDone) {
- uint64_t ProfileFileSize;
- char *ProfileBuffer;
-
+/* Get the size of the profile file. If there are any errors, print the
+ * message under the assumption that the profile is being read for merging
+ * purposes, and return -1. Otherwise return the file size in the inout param
+ * \p ProfileFileSize. */
+static int getProfileFileSizeForMerging(FILE *ProfileFile,
+ uint64_t *ProfileFileSize) {
if (fseek(ProfileFile, 0L, SEEK_END) == -1) {
PROF_ERR("Unable to merge profile data, unable to get size: %s\n",
strerror(errno));
return -1;
}
- ProfileFileSize = ftell(ProfileFile);
+ *ProfileFileSize = ftell(ProfileFile);
/* Restore file offset. */
if (fseek(ProfileFile, 0L, SEEK_SET) == -1) {
@@ -187,28 +203,60 @@ static int doProfileMerging(FILE *ProfileFile, int *MergeDone) {
return -1;
}
- /* Nothing to merge. */
- if (ProfileFileSize < sizeof(__llvm_profile_header)) {
- if (ProfileFileSize)
- PROF_WARN("Unable to merge profile data: %s\n",
- "source profile file is too small.");
- return 0;
+ if (*ProfileFileSize > 0 &&
+ *ProfileFileSize < sizeof(__llvm_profile_header)) {
+ PROF_WARN("Unable to merge profile data: %s\n",
+ "source profile file is too small.");
+ return -1;
}
+ return 0;
+}
- ProfileBuffer = mmap(NULL, ProfileFileSize, PROT_READ, MAP_SHARED | MAP_FILE,
- fileno(ProfileFile), 0);
- if (ProfileBuffer == MAP_FAILED) {
+/* mmap() \p ProfileFile for profile merging purposes, assuming that an
+ * exclusive lock is held on the file and that \p ProfileFileSize is the
+ * length of the file. Return the mmap'd buffer in the inout variable
+ * \p ProfileBuffer. Returns -1 on failure. On success, the caller is
+ * responsible for unmapping the mmap'd buffer in \p ProfileBuffer. */
+static int mmapProfileForMerging(FILE *ProfileFile, uint64_t ProfileFileSize,
+ char **ProfileBuffer) {
+ *ProfileBuffer = mmap(NULL, ProfileFileSize, PROT_READ, MAP_SHARED | MAP_FILE,
+ fileno(ProfileFile), 0);
+ if (*ProfileBuffer == MAP_FAILED) {
PROF_ERR("Unable to merge profile data, mmap failed: %s\n",
strerror(errno));
return -1;
}
- if (__llvm_profile_check_compatibility(ProfileBuffer, ProfileFileSize)) {
- (void)munmap(ProfileBuffer, ProfileFileSize);
+ if (__llvm_profile_check_compatibility(*ProfileBuffer, ProfileFileSize)) {
+ (void)munmap(*ProfileBuffer, ProfileFileSize);
PROF_WARN("Unable to merge profile data: %s\n",
"source profile file is not compatible.");
- return 0;
+ return -1;
}
+ return 0;
+}
+
+/* Read profile data in \c ProfileFile and merge with in-memory
+ profile counters. Returns -1 if there is fatal error, otheriwse
+ 0 is returned. Returning 0 does not mean merge is actually
+ performed. If merge is actually done, *MergeDone is set to 1.
+*/
+static int doProfileMerging(FILE *ProfileFile, int *MergeDone) {
+ uint64_t ProfileFileSize;
+ char *ProfileBuffer;
+
+ /* Get the size of the profile on disk. */
+ if (getProfileFileSizeForMerging(ProfileFile, &ProfileFileSize) == -1)
+ return -1;
+
+ /* Nothing to merge. */
+ if (!ProfileFileSize)
+ return 0;
+
+ /* mmap() the profile and check that it is compatible with the data in
+ * the current image. */
+ if (mmapProfileForMerging(ProfileFile, ProfileFileSize, &ProfileBuffer) == -1)
+ return -1;
/* Now start merging */
__llvm_profile_merge_from_buffer(ProfileBuffer, ProfileFileSize);
@@ -330,6 +378,8 @@ static int writeOrderFile(const char *OutputName) {
return RetVal;
}
+#define LPROF_INIT_ONCE_ENV "__LLVM_PROFILE_RT_INIT_ONCE"
+
static void truncateCurrentFile(void) {
const char *Filename;
char *FilenameBuf;
@@ -342,13 +392,26 @@ static void truncateCurrentFile(void) {
if (!Filename)
return;
- /* By pass file truncation to allow online raw profile
- * merging. */
- if (lprofCurFilename.MergePoolSize)
+ /* Only create the profile directory and truncate an existing profile once.
+ * In continuous mode, this is necessary, as the profile is written-to by the
+ * runtime initializer. */
+ int initialized = getenv(LPROF_INIT_ONCE_ENV) != NULL;
+ if (initialized)
return;
+#if defined(_WIN32)
+ _putenv(LPROF_INIT_ONCE_ENV "=" LPROF_INIT_ONCE_ENV);
+#else
+ setenv(LPROF_INIT_ONCE_ENV, LPROF_INIT_ONCE_ENV, 1);
+#endif
+ /* Create the profile dir (even if online merging is enabled), so that
+ * the profile file can be set up if continuous mode is enabled. */
createProfileDir(Filename);
+ /* By pass file truncation to allow online raw profile merging. */
+ if (lprofCurFilename.MergePoolSize)
+ return;
+
/* Truncate the file. Later we'll reopen and append. */
File = fopen(Filename, "w");
if (!File)
@@ -356,6 +419,165 @@ static void truncateCurrentFile(void) {
fclose(File);
}
+#ifndef _MSC_VER
+static void assertIsZero(int *i) {
+ if (*i)
+ PROF_WARN("Expected flag to be 0, but got: %d\n", *i);
+}
+#endif
+
+#if !defined(__Fuchsia__) && !defined(_WIN32)
+/* Write a partial profile to \p Filename, which is required to be backed by
+ * the open file object \p File. */
+static int writeProfileWithFileObject(const char *Filename, FILE *File) {
+ setProfileFile(File);
+ int rc = writeFile(Filename);
+ if (rc)
+ PROF_ERR("Failed to write file \"%s\": %s\n", Filename, strerror(errno));
+ setProfileFile(NULL);
+ return rc;
+}
+
+/* Unlock the profile \p File and clear the unlock flag. */
+static void unlockProfile(int *ProfileRequiresUnlock, FILE *File) {
+ if (!*ProfileRequiresUnlock) {
+ PROF_WARN("%s", "Expected to require profile unlock\n");
+ }
+ lprofUnlockFileHandle(File);
+ *ProfileRequiresUnlock = 0;
+}
+#endif // !defined(__Fuchsia__) && !defined(_WIN32)
+
+static void initializeProfileForContinuousMode(void) {
+ if (!__llvm_profile_is_continuous_mode_enabled())
+ return;
+
+#if defined(__Fuchsia__) || defined(_WIN32)
+ PROF_ERR("%s\n", "Continuous mode not yet supported on Fuchsia or Windows.");
+#else // defined(__Fuchsia__) || defined(_WIN32)
+ /* Get the sizes of various profile data sections. Taken from
+ * __llvm_profile_get_size_for_buffer(). */
+ const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
+ const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+ const uint64_t *CountersBegin = __llvm_profile_begin_counters();
+ const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *NamesBegin = __llvm_profile_begin_names();
+ const char *NamesEnd = __llvm_profile_end_names();
+ const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
+ uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
+ uint64_t CountersSize = CountersEnd - CountersBegin;
+
+ /* Check that the counter and data sections in this image are page-aligned. */
+ unsigned PageSize = getpagesize();
+ if ((intptr_t)CountersBegin % PageSize != 0) {
+ PROF_ERR("Counters section not page-aligned (start = %p, pagesz = %u).\n",
+ CountersBegin, PageSize);
+ return;
+ }
+ if ((intptr_t)DataBegin % PageSize != 0) {
+ PROF_ERR("Data section not page-aligned (start = %p, pagesz = %u).\n",
+ DataBegin, PageSize);
+ return;
+ }
+
+ int Length = getCurFilenameLength();
+ char *FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ const char *Filename = getCurFilename(FilenameBuf, 0);
+ if (!Filename)
+ return;
+
+ FILE *File = NULL;
+ off_t CurrentFileOffset = 0;
+ off_t OffsetModPage = 0;
+
+ /* Whether an exclusive lock on the profile must be dropped after init.
+ * Use a cleanup to warn if the unlock does not occur. */
+ COMPILER_RT_CLEANUP(assertIsZero) int ProfileRequiresUnlock = 0;
+
+ if (!doMerging()) {
+ /* We are not merging profiles, so open the raw profile in append mode. */
+ File = fopen(Filename, "a+b");
+ if (!File)
+ return;
+
+ /* Check that the offset within the file is page-aligned. */
+ CurrentFileOffset = ftello(File);
+ OffsetModPage = CurrentFileOffset % PageSize;
+ if (OffsetModPage != 0) {
+ PROF_ERR("Continuous counter sync mode is enabled, but raw profile is not"
+ "page-aligned. CurrentFileOffset = %" PRIu64 ", pagesz = %u.\n",
+ (uint64_t)CurrentFileOffset, PageSize);
+ return;
+ }
+
+ /* Grow the profile so that mmap() can succeed. Leak the file handle, as
+ * the file should stay open. */
+ if (writeProfileWithFileObject(Filename, File) != 0)
+ return;
+ } else {
+ /* We are merging profiles. Map the counter section as shared memory into
+ * the profile, i.e. into each participating process. An increment in one
+ * process should be visible to every other process with the same counter
+ * section mapped. */
+ File = lprofOpenFileEx(Filename);
+ if (!File)
+ return;
+
+ ProfileRequiresUnlock = 1;
+
+ uint64_t ProfileFileSize;
+ if (getProfileFileSizeForMerging(File, &ProfileFileSize) == -1)
+ return unlockProfile(&ProfileRequiresUnlock, File);
+
+ if (ProfileFileSize == 0) {
+ /* Grow the profile so that mmap() can succeed. Leak the file handle, as
+ * the file should stay open. */
+ if (writeProfileWithFileObject(Filename, File) != 0)
+ return unlockProfile(&ProfileRequiresUnlock, File);
+ } else {
+ /* The merged profile has a non-zero length. Check that it is compatible
+ * with the data in this process. */
+ char *ProfileBuffer;
+ if (mmapProfileForMerging(File, ProfileFileSize, &ProfileBuffer) == -1 ||
+ munmap(ProfileBuffer, ProfileFileSize) == -1)
+ return unlockProfile(&ProfileRequiresUnlock, File);
+ }
+ }
+
+ int Fileno = fileno(File);
+
+ /* Determine how much padding is needed before/after the counters and after
+ * the names. */
+ uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
+ PaddingBytesAfterNames;
+ __llvm_profile_get_padding_sizes_for_counters(
+ DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
+ &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
+
+ uint64_t PageAlignedCountersLength =
+ (CountersSize * sizeof(uint64_t)) + PaddingBytesAfterCounters;
+ uint64_t FileOffsetToCounters =
+ CurrentFileOffset + sizeof(__llvm_profile_header) +
+ (DataSize * sizeof(__llvm_profile_data)) + PaddingBytesBeforeCounters;
+
+ uint64_t *CounterMmap = (uint64_t *)mmap(
+ (void *)CountersBegin, PageAlignedCountersLength, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED, Fileno, FileOffsetToCounters);
+ if (CounterMmap != CountersBegin) {
+ PROF_ERR(
+ "Continuous counter sync mode is enabled, but mmap() failed (%s).\n"
+ " - CountersBegin: %p\n"
+ " - PageAlignedCountersLength: %" PRIu64 "\n"
+ " - Fileno: %d\n"
+ " - FileOffsetToCounters: %" PRIu64 "\n",
+ strerror(errno), CountersBegin, PageAlignedCountersLength, Fileno,
+ FileOffsetToCounters);
+ }
+
+ unlockProfile(&ProfileRequiresUnlock, File);
+#endif // defined(__Fuchsia__) || defined(_WIN32)
+}
+
static const char *DefaultProfileName = "default.profraw";
static void resetFilenameToDefault(void) {
if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
@@ -366,12 +588,22 @@ static void resetFilenameToDefault(void) {
lprofCurFilename.PNS = PNS_default;
}
-static int containsMergeSpecifier(const char *FilenamePat, int I) {
- return (FilenamePat[I] == 'm' ||
- (FilenamePat[I] >= '1' && FilenamePat[I] <= '9' &&
- /* If FilenamePat[I] is not '\0', the next byte is guaranteed
- * to be in-bound as the string is null terminated. */
- FilenamePat[I + 1] == 'm'));
+static unsigned getMergePoolSize(const char *FilenamePat, int *I) {
+ unsigned J = 0, Num = 0;
+ for (;; ++J) {
+ char C = FilenamePat[*I + J];
+ if (C == 'm') {
+ *I += J;
+ return Num ? Num : 1;
+ }
+ if (C < '0' || C > '9')
+ break;
+ Num = Num * 10 + C - '0';
+
+ /* If FilenamePat[*I+J] is between '0' and '9', the next byte is guaranteed
+ * to be in-bound as the string is null terminated. */
+ }
+ return 0;
}
/* Parses the pattern string \p FilenamePat and stores the result to
@@ -419,19 +651,26 @@ static int parseFilenamePattern(const char *FilenamePat,
FilenamePat);
return -1;
}
- } else if (containsMergeSpecifier(FilenamePat, I)) {
+ } else if (FilenamePat[I] == 'c') {
+ if (__llvm_profile_is_continuous_mode_enabled()) {
+ PROF_WARN("%%c specifier can only be specified once in %s.\n",
+ FilenamePat);
+ return -1;
+ }
+
+ __llvm_profile_enable_continuous_mode();
+ I++; /* advance to 'c' */
+ } else {
+ unsigned MergePoolSize = getMergePoolSize(FilenamePat, &I);
+ if (!MergePoolSize)
+ continue;
if (MergingEnabled) {
PROF_WARN("%%m specifier can only be specified once in %s.\n",
FilenamePat);
return -1;
}
MergingEnabled = 1;
- if (FilenamePat[I] == 'm')
- lprofCurFilename.MergePoolSize = 1;
- else {
- lprofCurFilename.MergePoolSize = FilenamePat[I] - '0';
- I++; /* advance to 'm' */
- }
+ lprofCurFilename.MergePoolSize = MergePoolSize;
}
}
@@ -447,6 +686,7 @@ static void parseAndSetFilename(const char *FilenamePat,
const char *OldFilenamePat = lprofCurFilename.FilenamePat;
ProfileNameSpecifier OldPNS = lprofCurFilename.PNS;
+ /* The old profile name specifier takes precedence over the old one. */
if (PNS < OldPNS)
return;
@@ -475,11 +715,12 @@ static void parseAndSetFilename(const char *FilenamePat,
}
truncateCurrentFile();
+ initializeProfileForContinuousMode();
}
/* Return buffer length that is required to store the current profile
* filename with PID and hostname substitutions. */
-/* The length to hold uint64_t followed by 2 digit pool id including '_' */
+/* The length to hold uint64_t followed by 3 digits pool id including '_' */
#define SIGLEN 24
static int getCurFilenameLength() {
int Len;
@@ -511,7 +752,8 @@ static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf) {
return 0;
if (!(lprofCurFilename.NumPids || lprofCurFilename.NumHosts ||
- lprofCurFilename.MergePoolSize)) {
+ lprofCurFilename.MergePoolSize ||
+ __llvm_profile_is_continuous_mode_enabled())) {
if (!ForceUseBuf)
return lprofCurFilename.FilenamePat;
@@ -532,18 +774,18 @@ static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf) {
} else if (FilenamePat[I] == 'h') {
memcpy(FilenameBuf + J, lprofCurFilename.Hostname, HostNameLength);
J += HostNameLength;
- } else if (containsMergeSpecifier(FilenamePat, I)) {
- char LoadModuleSignature[SIGLEN];
+ } else {
+ if (!getMergePoolSize(FilenamePat, &I))
+ continue;
+ char LoadModuleSignature[SIGLEN + 1];
int S;
int ProfilePoolId = getpid() % lprofCurFilename.MergePoolSize;
- S = snprintf(LoadModuleSignature, SIGLEN, "%" PRIu64 "_%d",
+ S = snprintf(LoadModuleSignature, SIGLEN + 1, "%" PRIu64 "_%d",
lprofGetLoadModuleSignature(), ProfilePoolId);
if (S == -1 || S > SIGLEN)
S = SIGLEN;
memcpy(FilenameBuf + J, LoadModuleSignature, S);
J += S;
- if (FilenamePat[I] != 'm')
- I++;
}
/* Drop any unknown substitutions. */
} else
@@ -646,6 +888,8 @@ void __llvm_profile_initialize_file(void) {
*/
COMPILER_RT_VISIBILITY
void __llvm_profile_set_filename(const char *FilenamePat) {
+ if (__llvm_profile_is_continuous_mode_enabled())
+ return;
parseAndSetFilename(FilenamePat, PNS_runtime_api, 1);
}
@@ -660,7 +904,7 @@ int __llvm_profile_write_file(void) {
char *FilenameBuf;
int PDeathSig = 0;
- if (lprofProfileDumped()) {
+ if (lprofProfileDumped() || __llvm_profile_is_continuous_mode_enabled()) {
PROF_NOTE("Profile data not written to file: %s.\n", "already written");
return 0;
}
diff --git a/compiler-rt/lib/profile/InstrProfilingInternal.h b/compiler-rt/lib/profile/InstrProfilingInternal.h
index 66f8a0677014..0cea4876f0ae 100644
--- a/compiler-rt/lib/profile/InstrProfilingInternal.h
+++ b/compiler-rt/lib/profile/InstrProfilingInternal.h
@@ -41,11 +41,18 @@ int __llvm_profile_write_buffer_internal(
/*!
* The data structure describing the data to be written by the
* low level writer callback function.
+ *
+ * If \ref ProfDataIOVec.Data is null, and \ref ProfDataIOVec.UseZeroPadding is
+ * 0, the write is skipped (the writer simply advances ElmSize*NumElm bytes).
+ *
+ * If \ref ProfDataIOVec.Data is null, and \ref ProfDataIOVec.UseZeroPadding is
+ * nonzero, ElmSize*NumElm zero bytes are written.
*/
typedef struct ProfDataIOVec {
const void *Data;
size_t ElmSize;
size_t NumElm;
+ int UseZeroPadding;
} ProfDataIOVec;
struct ProfDataWriter;
diff --git a/compiler-rt/lib/profile/InstrProfilingMerge.c b/compiler-rt/lib/profile/InstrProfilingMerge.c
index 44dce7cc9f62..0fd9b2bcd41f 100644
--- a/compiler-rt/lib/profile/InstrProfilingMerge.c
+++ b/compiler-rt/lib/profile/InstrProfilingMerge.c
@@ -14,7 +14,7 @@
#include "InstrProfilingUtil.h"
#define INSTR_PROF_VALUE_PROF_DATA
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
COMPILER_RT_VISIBILITY
void (*VPMergeHook)(ValueProfData *, __llvm_profile_data *);
diff --git a/compiler-rt/lib/profile/InstrProfilingMergeFile.c b/compiler-rt/lib/profile/InstrProfilingMergeFile.c
index b853f15b4737..8923ba21cc58 100644
--- a/compiler-rt/lib/profile/InstrProfilingMergeFile.c
+++ b/compiler-rt/lib/profile/InstrProfilingMergeFile.c
@@ -16,7 +16,7 @@
#include "InstrProfilingUtil.h"
#define INSTR_PROF_VALUE_PROF_DATA
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
/* Merge value profile data pointed to by SrcValueProfData into
* in-memory profile counters pointed by to DstData. */
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c b/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
index 2388871a2d54..23b7efbe672b 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
@@ -128,6 +128,8 @@ static uint32_t lprofVMOWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs,
__llvm_profile_offset, Length);
if (Status != ZX_OK)
return -1;
+ } else if (IOVecs[I].UseZeroPadding) {
+ /* Resizing the VMO should zero fill. */
}
__llvm_profile_offset += Length;
}
diff --git a/compiler-rt/lib/profile/InstrProfilingPort.h b/compiler-rt/lib/profile/InstrProfilingPort.h
index da5b5c0f8bb7..20cf5d660c6a 100644
--- a/compiler-rt/lib/profile/InstrProfilingPort.h
+++ b/compiler-rt/lib/profile/InstrProfilingPort.h
@@ -22,6 +22,7 @@
/* Need to include <stdio.h> and <io.h> */
#define COMPILER_RT_FTRUNCATE(f,l) _chsize(_fileno(f),l)
#define COMPILER_RT_ALWAYS_INLINE __forceinline
+#define COMPILER_RT_CLEANUP(x)
#elif __GNUC__
#define COMPILER_RT_ALIGNAS(x) __attribute__((aligned(x)))
#define COMPILER_RT_VISIBILITY __attribute__((visibility("hidden")))
@@ -29,6 +30,7 @@
#define COMPILER_RT_ALLOCA __builtin_alloca
#define COMPILER_RT_FTRUNCATE(f,l) ftruncate(fileno(f),l)
#define COMPILER_RT_ALWAYS_INLINE inline __attribute((always_inline))
+#define COMPILER_RT_CLEANUP(x) __attribute__((cleanup(x)))
#endif
#if defined(__APPLE__)
@@ -99,6 +101,17 @@
(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
#endif /* DIR_SEPARATOR_2 */
+#if defined(_WIN32)
+#include <windows.h>
+static inline size_t getpagesize() {
+ SYSTEM_INFO S;
+ GetNativeSystemInfo(&S);
+ return S.dwPageSize;
+}
+#else /* defined(_WIN32) */
+#include <unistd.h>
+#endif /* defined(_WIN32) */
+
#define PROF_ERR(Format, ...) \
fprintf(stderr, "LLVM Profile Error: " Format, __VA_ARGS__);
diff --git a/compiler-rt/lib/profile/InstrProfilingRuntime.cpp b/compiler-rt/lib/profile/InstrProfilingRuntime.cpp
index 679186ef8309..5dff09d70632 100644
--- a/compiler-rt/lib/profile/InstrProfilingRuntime.cpp
+++ b/compiler-rt/lib/profile/InstrProfilingRuntime.cpp
@@ -19,8 +19,9 @@ namespace {
class RegisterRuntime {
public:
RegisterRuntime() {
- __llvm_profile_register_write_file_atexit();
__llvm_profile_initialize_file();
+ if (!__llvm_profile_is_continuous_mode_enabled())
+ __llvm_profile_register_write_file_atexit();
}
};
diff --git a/compiler-rt/lib/profile/InstrProfilingUtil.c b/compiler-rt/lib/profile/InstrProfilingUtil.c
index 13301f341fc5..bf5a9670fe18 100644
--- a/compiler-rt/lib/profile/InstrProfilingUtil.c
+++ b/compiler-rt/lib/profile/InstrProfilingUtil.c
@@ -207,8 +207,9 @@ COMPILER_RT_VISIBILITY FILE *lprofOpenFileEx(const char *ProfileName) {
f = fdopen(fd, "r+b");
#elif defined(_WIN32)
// FIXME: Use the wide variants to handle Unicode filenames.
- HANDLE h = CreateFileA(ProfileName, GENERIC_READ | GENERIC_WRITE, 0, 0,
- OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0);
+ HANDLE h = CreateFileA(ProfileName, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, 0, OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, 0);
if (h == INVALID_HANDLE_VALUE)
return NULL;
@@ -218,6 +219,10 @@ COMPILER_RT_VISIBILITY FILE *lprofOpenFileEx(const char *ProfileName) {
return NULL;
}
+ if (lprofLockFd(fd) != 0)
+ PROF_WARN("Data may be corrupted during profile merging : %s\n",
+ "Fail to obtain file lock due to system limit.");
+
f = _fdopen(fd, "r+b");
if (f == 0) {
CloseHandle(h);
diff --git a/compiler-rt/lib/profile/InstrProfilingUtil.h b/compiler-rt/lib/profile/InstrProfilingUtil.h
index efba94ca7639..f0e29a8803a0 100644
--- a/compiler-rt/lib/profile/InstrProfilingUtil.h
+++ b/compiler-rt/lib/profile/InstrProfilingUtil.h
@@ -30,9 +30,11 @@ int lprofUnlockFileHandle(FILE *F);
* lock for exclusive access. The caller will block
* if the lock is already held by another process. */
FILE *lprofOpenFileEx(const char *Filename);
-/* PS4 doesn't have getenv. Define a shim. */
+/* PS4 doesn't have setenv/getenv. Define a shim. */
#if __ORBIS__
static inline char *getenv(const char *name) { return NULL; }
+static inline int setenv(const char *name, const char *value, int overwrite)
+{ return 0; }
#endif /* #if __ORBIS__ */
/* GCOV_PREFIX and GCOV_PREFIX_STRIP support */
diff --git a/compiler-rt/lib/profile/InstrProfilingValue.c b/compiler-rt/lib/profile/InstrProfilingValue.c
index b7c71768c2c3..fd53cac3dff3 100644
--- a/compiler-rt/lib/profile/InstrProfilingValue.c
+++ b/compiler-rt/lib/profile/InstrProfilingValue.c
@@ -17,7 +17,7 @@
#define INSTR_PROF_VALUE_PROF_DATA
#define INSTR_PROF_COMMON_API_IMPL
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
static int hasStaticCounters = 1;
static int OutOfNodesWarnings = 0;
diff --git a/compiler-rt/lib/profile/InstrProfilingWriter.c b/compiler-rt/lib/profile/InstrProfilingWriter.c
index d910cbb8f2fc..c34e110a6959 100644
--- a/compiler-rt/lib/profile/InstrProfilingWriter.c
+++ b/compiler-rt/lib/profile/InstrProfilingWriter.c
@@ -14,9 +14,10 @@
#include "InstrProfiling.h"
#include "InstrProfilingInternal.h"
+#include "InstrProfilingPort.h"
#define INSTR_PROF_VALUE_PROF_DATA
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
COMPILER_RT_VISIBILITY void (*FreeHook)(void *) = NULL;
static ProfBufferIO TheBufferIO;
@@ -40,6 +41,9 @@ COMPILER_RT_VISIBILITY uint32_t lprofBufferWriter(ProfDataWriter *This,
size_t Length = IOVecs[I].ElmSize * IOVecs[I].NumElm;
if (IOVecs[I].Data)
memcpy(*Buffer, IOVecs[I].Data, Length);
+ else if (IOVecs[I].UseZeroPadding) {
+ /* Allocating the buffer should zero fill. */
+ }
*Buffer += Length;
}
return 0;
@@ -84,7 +88,7 @@ lprofBufferIOWrite(ProfBufferIO *BufferIO, const uint8_t *Data, uint32_t Size) {
return -1;
}
/* Special case, bypass the buffer completely. */
- ProfDataIOVec IO[] = {{Data, sizeof(uint8_t), Size}};
+ ProfDataIOVec IO[] = {{Data, sizeof(uint8_t), Size, 0}};
if (Size > BufferIO->BufferSz) {
if (BufferIO->FileWriter->Write(BufferIO->FileWriter, IO, 1))
return -1;
@@ -103,7 +107,7 @@ lprofBufferIOWrite(ProfBufferIO *BufferIO, const uint8_t *Data, uint32_t Size) {
COMPILER_RT_VISIBILITY int lprofBufferIOFlush(ProfBufferIO *BufferIO) {
if (BufferIO->CurOffset) {
ProfDataIOVec IO[] = {
- {BufferIO->BufferStart, sizeof(uint8_t), BufferIO->CurOffset}};
+ {BufferIO->BufferStart, sizeof(uint8_t), BufferIO->CurOffset, 0}};
if (BufferIO->FileWriter->Write(BufferIO->FileWriter, IO, 1))
return -1;
BufferIO->CurOffset = 0;
@@ -257,10 +261,6 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
const uint64_t CountersSize = CountersEnd - CountersBegin;
const uint64_t NamesSize = NamesEnd - NamesBegin;
- const uint64_t Padding = __llvm_profile_get_num_padding_bytes(NamesSize);
-
- /* Enough zeroes for padding. */
- const char Zeroes[sizeof(uint64_t)] = {0};
/* Create the header. */
__llvm_profile_header Header;
@@ -268,19 +268,33 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
if (!DataSize)
return 0;
+ /* Determine how much padding is needed before/after the counters and after
+ * the names. */
+ uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
+ PaddingBytesAfterNames;
+ __llvm_profile_get_padding_sizes_for_counters(
+ DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
+ &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
+
/* Initialize header structure. */
#define INSTR_PROF_RAW_HEADER(Type, Name, Init) Header.Name = Init;
-#include "InstrProfData.inc"
+#include "profile/InstrProfData.inc"
/* Write the data. */
ProfDataIOVec IOVec[] = {
- {&Header, sizeof(__llvm_profile_header), 1},
- {DataBegin, sizeof(__llvm_profile_data), DataSize},
- {CountersBegin, sizeof(uint64_t), CountersSize},
- {SkipNameDataWrite ? NULL : NamesBegin, sizeof(uint8_t), NamesSize},
- {Zeroes, sizeof(uint8_t), Padding}};
+ {&Header, sizeof(__llvm_profile_header), 1, 0},
+ {DataBegin, sizeof(__llvm_profile_data), DataSize, 0},
+ {NULL, sizeof(uint8_t), PaddingBytesBeforeCounters, 1},
+ {CountersBegin, sizeof(uint64_t), CountersSize, 0},
+ {NULL, sizeof(uint8_t), PaddingBytesAfterCounters, 1},
+ {SkipNameDataWrite ? NULL : NamesBegin, sizeof(uint8_t), NamesSize, 0},
+ {NULL, sizeof(uint8_t), PaddingBytesAfterNames, 1}};
if (Writer->Write(Writer, IOVec, sizeof(IOVec) / sizeof(*IOVec)))
return -1;
+ /* Value profiling is not yet supported in continuous mode. */
+ if (__llvm_profile_is_continuous_mode_enabled())
+ return 0;
+
return writeValueProfData(Writer, VPDataReader, DataBegin, DataEnd);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index 8d07906cca03..906d4af7f5ee 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -213,7 +213,7 @@ void *LowLevelAllocator::Allocate(uptr size) {
// Align allocation size.
size = RoundUpTo(size, low_level_alloc_min_alignment);
if (allocated_end_ - allocated_current_ < (sptr)size) {
- uptr size_to_allocate = Max(size, GetPageSizeCached());
+ uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
allocated_current_ =
(char*)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 87b8f02b5b73..3b52172c483c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -552,7 +552,7 @@ bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
template<typename T>
class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
public:
- InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(1); }
+ InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
explicit InternalMmapVector(uptr cnt) {
InternalMmapVectorNoCtor<T>::Initialize(cnt);
this->resize(cnt);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index 50e3558b52e8..2a4ab7e67a5c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -79,13 +79,15 @@
#define devname __devname50
#define fgetpos __fgetpos50
#define fsetpos __fsetpos50
+#define fstatvfs __fstatvfs90
+#define fstatvfs1 __fstatvfs190
#define fts_children __fts_children60
#define fts_close __fts_close60
#define fts_open __fts_open60
#define fts_read __fts_read60
#define fts_set __fts_set60
#define getitimer __getitimer50
-#define getmntinfo __getmntinfo13
+#define getmntinfo __getmntinfo90
#define getpwent __getpwent50
#define getpwnam __getpwnam50
#define getpwnam_r __getpwnam_r50
@@ -95,6 +97,7 @@
#define getutxent __getutxent50
#define getutxid __getutxid50
#define getutxline __getutxline50
+#define getvfsstat __getvfsstat90
#define pututxline __pututxline50
#define glob __glob30
#define gmtime __gmtime50
@@ -116,6 +119,8 @@
#define sigprocmask __sigprocmask14
#define sigtimedwait __sigtimedwait50
#define stat __stat50
+#define statvfs __statvfs90
+#define statvfs1 __statvfs190
#define time __time50
#define times __times13
#define unvis __unvis50
@@ -4177,11 +4182,27 @@ INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
#if SANITIZER_INTERCEPT___PTHREAD_MUTEX
INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
- return WRAP(pthread_mutex_lock)(m);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_lock, m);
+ COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
+ int res = REAL(__pthread_mutex_lock)(m);
+ if (res == errno_EOWNERDEAD)
+ COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
+ return res;
}
INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
- return WRAP(pthread_mutex_unlock)(m);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_unlock, m);
+ COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
+ int res = REAL(__pthread_mutex_unlock)(m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
+ return res;
}
#define INIT___PTHREAD_MUTEX_LOCK \
@@ -9623,6 +9644,95 @@ INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
#define INIT_GETENTROPY
#endif
+#if SANITIZER_INTERCEPT_QSORT
+// Glibc qsort uses a temporary buffer allocated either on stack or on heap.
+// Poisoned memory from there may get copied into the comparator arguments,
+// where it needs to be dealt with. But even that is not enough - the results of
+// the sort may be copied into the input/output array based on the results of
+// the comparator calls, but directly from the temp memory, bypassing the
+// unpoisoning done in wrapped_qsort_compar. We deal with this by, again,
+// unpoisoning the entire array after the sort is done.
+//
+// We can not check that the entire array is initialized at the beginning. IMHO,
+// it's fine for parts of the sorted objects to contain uninitialized memory,
+// ex. as padding in structs.
+typedef int (*qsort_compar_f)(const void *, const void *);
+static THREADLOCAL qsort_compar_f qsort_compar;
+static THREADLOCAL SIZE_T qsort_size;
+int wrapped_qsort_compar(const void *a, const void *b) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_size);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_size);
+ return qsort_compar(a, b);
+}
+
+INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
+ qsort_compar_f compar) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, qsort, base, nmemb, size, compar);
+ // Run the comparator over all array elements to detect any memory issues.
+ if (nmemb > 1) {
+ for (SIZE_T i = 0; i < nmemb - 1; ++i) {
+ void *p = (void *)((char *)base + i * size);
+ void *q = (void *)((char *)base + (i + 1) * size);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ compar(p, q);
+ }
+ }
+ qsort_compar_f old_compar = qsort_compar;
+ qsort_compar = compar;
+ SIZE_T old_size = qsort_size;
+ qsort_size = size;
+ REAL(qsort)(base, nmemb, size, wrapped_qsort_compar);
+ qsort_compar = old_compar;
+ qsort_size = old_size;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+}
+#define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
+#else
+#define INIT_QSORT
+#endif
+
+#if SANITIZER_INTERCEPT_QSORT_R
+typedef int (*qsort_r_compar_f)(const void *, const void *, void *);
+static THREADLOCAL qsort_r_compar_f qsort_r_compar;
+static THREADLOCAL SIZE_T qsort_r_size;
+int wrapped_qsort_r_compar(const void *a, const void *b, void *arg) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_r_size);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_r_size);
+ return qsort_r_compar(a, b, arg);
+}
+
+INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
+ qsort_r_compar_f compar, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, qsort_r, base, nmemb, size, compar, arg);
+ // Run the comparator over all array elements to detect any memory issues.
+ if (nmemb > 1) {
+ for (SIZE_T i = 0; i < nmemb - 1; ++i) {
+ void *p = (void *)((char *)base + i * size);
+ void *q = (void *)((char *)base + (i + 1) * size);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ compar(p, q, arg);
+ }
+ }
+ qsort_r_compar_f old_compar = qsort_r_compar;
+ qsort_r_compar = compar;
+ SIZE_T old_size = qsort_r_size;
+ qsort_r_size = size;
+ REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, arg);
+ qsort_r_compar = old_compar;
+ qsort_r_size = old_size;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+}
+#define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
+#else
+#define INIT_QSORT_R
+#endif
+
+#include "sanitizer_common_interceptors_netbsd_compat.inc"
+
static void InitializeCommonInterceptors() {
#if SI_POSIX
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
@@ -9924,6 +10034,8 @@ static void InitializeCommonInterceptors() {
INIT_CRYPT;
INIT_CRYPT_R;
INIT_GETENTROPY;
+ INIT_QSORT;
+ INIT_QSORT_R;
INIT___PRINTF_CHK;
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc
new file mode 100644
index 000000000000..6aa73ec8c6a2
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc
@@ -0,0 +1,128 @@
+//===-- sanitizer_common_interceptors_netbsd_compat.inc ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// Interceptors for NetBSD old function calls that have been versioned.
+//
+// NetBSD minimal version supported 9.0.
+// NetBSD current version supported 9.99.26.
+//
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_NETBSD
+
+// First undef all mangled symbols.
+// Next, define compat interceptors.
+// Finally, undef INIT_ and redefine it.
+// This allows to avoid preprocessor issues.
+
+#undef fstatvfs
+#undef fstatvfs1
+#undef getmntinfo
+#undef getvfsstat
+#undef statvfs
+#undef statvfs1
+
+INTERCEPTOR(int, statvfs, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statvfs)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
+ return res;
+}
+
+INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatvfs)(fd, buf);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+
+#undef INIT_STATVFS
+#define INIT_STATVFS \
+ COMMON_INTERCEPT_FUNCTION(statvfs); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs); \
+ COMMON_INTERCEPT_FUNCTION(__statvfs90); \
+ COMMON_INTERCEPT_FUNCTION(__fstatvfs90)
+
+INTERCEPTOR(int, __getmntinfo13, void **mntbufp, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __getmntinfo13, mntbufp, flags);
+ int cnt = REAL(__getmntinfo13)(mntbufp, flags);
+ if (cnt > 0 && mntbufp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mntbufp, sizeof(void *));
+ if (*mntbufp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statvfs90_sz);
+ }
+ return cnt;
+}
+
+#undef INIT_GETMNTINFO
+#define INIT_GETMNTINFO \
+ COMMON_INTERCEPT_FUNCTION(__getmntinfo13); \
+ COMMON_INTERCEPT_FUNCTION(__getmntinfo90)
+
+INTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getvfsstat, buf, bufsize, flags);
+ int ret = REAL(getvfsstat)(buf, bufsize, flags);
+ if (buf && ret > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, ret * struct_statvfs90_sz);
+ return ret;
+}
+
+#undef INIT_GETVFSSTAT
+#define INIT_GETVFSSTAT \
+ COMMON_INTERCEPT_FUNCTION(getvfsstat); \
+ COMMON_INTERCEPT_FUNCTION(__getvfsstat90)
+
+INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ int res = REAL(statvfs1)(path, buf, flags);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
+ return res;
+}
+
+INTERCEPTOR(int, fstatvfs1, int fd, void *buf, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs1, fd, buf, flags);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ int res = REAL(fstatvfs1)(fd, buf, flags);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+
+#undef INIT_STATVFS1
+#define INIT_STATVFS1 \
+ COMMON_INTERCEPT_FUNCTION(statvfs1); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs1); \
+ COMMON_INTERCEPT_FUNCTION(__statvfs190); \
+ COMMON_INTERCEPT_FUNCTION(__fstatvfs190)
+
+#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
index 1e2bc6652617..9e274268bf2a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
@@ -56,9 +56,16 @@ char *FlagParser::ll_strndup(const char *s, uptr n) {
}
void FlagParser::PrintFlagDescriptions() {
+ char buffer[128];
+ buffer[sizeof(buffer) - 1] = '\0';
Printf("Available flags for %s:\n", SanitizerToolName);
- for (int i = 0; i < n_flags_; ++i)
- Printf("\t%s\n\t\t- %s\n", flags_[i].name, flags_[i].desc);
+ for (int i = 0; i < n_flags_; ++i) {
+ bool truncated = !(flags_[i].handler->Format(buffer, sizeof(buffer)));
+ CHECK_EQ(buffer[sizeof(buffer) - 1], '\0');
+ const char *truncation_str = truncated ? " Truncated" : "";
+ Printf("\t%s\n\t\t- %s (Current Value%s: %s)\n", flags_[i].name,
+ flags_[i].desc, truncation_str, buffer);
+ }
}
void FlagParser::fatal_error(const char *err) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h b/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
index c24ad25626ba..fac5dff34633 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
@@ -22,9 +22,23 @@ namespace __sanitizer {
class FlagHandlerBase {
public:
virtual bool Parse(const char *value) { return false; }
+ // Write the C string representation of the current value (truncated to fit)
+ // into the buffer of size `size`. Returns false if truncation occurred and
+ // returns true otherwise.
+ virtual bool Format(char *buffer, uptr size) {
+ if (size > 0)
+ buffer[0] = '\0';
+ return false;
+ }
protected:
~FlagHandlerBase() {}
+
+ inline bool FormatString(char *buffer, uptr size, const char *str_to_use) {
+ uptr num_symbols_should_write =
+ internal_snprintf(buffer, size, "%s", str_to_use);
+ return num_symbols_should_write < size;
+ }
};
template <typename T>
@@ -34,6 +48,7 @@ class FlagHandler : public FlagHandlerBase {
public:
explicit FlagHandler(T *t) : t_(t) {}
bool Parse(const char *value) final;
+ bool Format(char *buffer, uptr size) final;
};
inline bool ParseBool(const char *value, bool *b) {
@@ -60,6 +75,11 @@ inline bool FlagHandler<bool>::Parse(const char *value) {
}
template <>
+inline bool FlagHandler<bool>::Format(char *buffer, uptr size) {
+ return FormatString(buffer, size, *t_ ? "true" : "false");
+}
+
+template <>
inline bool FlagHandler<HandleSignalMode>::Parse(const char *value) {
bool b;
if (ParseBool(value, &b)) {
@@ -76,12 +96,23 @@ inline bool FlagHandler<HandleSignalMode>::Parse(const char *value) {
}
template <>
+inline bool FlagHandler<HandleSignalMode>::Format(char *buffer, uptr size) {
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "%d", *t_);
+ return num_symbols_should_write < size;
+}
+
+template <>
inline bool FlagHandler<const char *>::Parse(const char *value) {
*t_ = value;
return true;
}
template <>
+inline bool FlagHandler<const char *>::Format(char *buffer, uptr size) {
+ return FormatString(buffer, size, *t_);
+}
+
+template <>
inline bool FlagHandler<int>::Parse(const char *value) {
const char *value_end;
*t_ = internal_simple_strtoll(value, &value_end, 10);
@@ -91,6 +122,12 @@ inline bool FlagHandler<int>::Parse(const char *value) {
}
template <>
+inline bool FlagHandler<int>::Format(char *buffer, uptr size) {
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "%d", *t_);
+ return num_symbols_should_write < size;
+}
+
+template <>
inline bool FlagHandler<uptr>::Parse(const char *value) {
const char *value_end;
*t_ = internal_simple_strtoll(value, &value_end, 10);
@@ -100,6 +137,12 @@ inline bool FlagHandler<uptr>::Parse(const char *value) {
}
template <>
+inline bool FlagHandler<uptr>::Format(char *buffer, uptr size) {
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "%p", *t_);
+ return num_symbols_should_write < size;
+}
+
+template <>
inline bool FlagHandler<s64>::Parse(const char *value) {
const char *value_end;
*t_ = internal_simple_strtoll(value, &value_end, 10);
@@ -108,6 +151,12 @@ inline bool FlagHandler<s64>::Parse(const char *value) {
return ok;
}
+template <>
+inline bool FlagHandler<s64>::Format(char *buffer, uptr size) {
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "%lld", *t_);
+ return num_symbols_should_write < size;
+}
+
class FlagParser {
static const int kMaxFlags = 200;
struct Flag {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
index 66a0a5579ed3..684ee1e0b999 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
@@ -75,11 +75,13 @@ void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
class FlagHandlerInclude : public FlagHandlerBase {
FlagParser *parser_;
bool ignore_missing_;
+ const char *original_path_;
public:
explicit FlagHandlerInclude(FlagParser *parser, bool ignore_missing)
- : parser_(parser), ignore_missing_(ignore_missing) {}
+ : parser_(parser), ignore_missing_(ignore_missing), original_path_("") {}
bool Parse(const char *value) final {
+ original_path_ = value;
if (internal_strchr(value, '%')) {
char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude");
SubstituteForFlagValue(value, buf, kMaxPathLength);
@@ -89,6 +91,12 @@ class FlagHandlerInclude : public FlagHandlerBase {
}
return parser_->ParseFile(value, ignore_missing_);
}
+ bool Format(char *buffer, uptr size) {
+ // Note `original_path_` isn't actually what's parsed due to `%`
+ // substitutions. Printing the substituted path would require holding onto
+ // mmap'ed memory.
+ return FormatString(buffer, size, original_path_);
+ }
};
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
index 7d592bdcb61f..065258a5a6e1 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
@@ -132,6 +132,9 @@ COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
" until the RSS goes below the soft limit."
" This limit does not affect memory allocations other than"
" malloc/new.")
+COMMON_FLAG(uptr, max_allocation_size_mb, 0,
+ "If non-zero, malloc/new calls larger than this size will return "
+ "nullptr (or crash if allocator_may_return_null=false).")
COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
COMMON_FLAG(s32, allocator_release_to_os_interval_ms,
((bool)SANITIZER_FUCHSIA || (bool)SANITIZER_WINDOWS) ? -1 : 5000,
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc b/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
index 03ef7c1788cd..d0cc4da9755f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
@@ -24,7 +24,7 @@ struct ioctl_desc {
const char *name;
};
-const unsigned ioctl_table_max = 1236;
+const unsigned ioctl_table_max = 1238;
static ioctl_desc ioctl_table[ioctl_table_max];
static unsigned ioctl_table_size = 0;
@@ -166,9 +166,6 @@ static void ioctl_table_fill() {
_(FE_ENABLE_HIGH_LNB_VOLTAGE, READ, sizeof(int));
_(FE_SET_FRONTEND_TUNE_MODE, READ, sizeof(unsigned int));
_(FE_DISHNETWORK_SEND_LEGACY_CMD, READ, sizeof(unsigned long));
- /* Entries from file: dev/filemon/filemon.h */
- _(FILEMON_SET_FD, READWRITE, sizeof(int));
- _(FILEMON_SET_PID, READWRITE, sizeof(int));
/* Entries from file: dev/hdaudio/hdaudioio.h */
_(HDAUDIO_FGRP_INFO, READWRITE, struct_plistref_sz);
_(HDAUDIO_FGRP_GETCONFIG, READWRITE, struct_plistref_sz);
@@ -653,6 +650,7 @@ static void ioctl_table_fill() {
_(NVMM_IOC_MACHINE_CONFIGURE, READ, struct_nvmm_ioc_machine_configure_sz);
_(NVMM_IOC_VCPU_CREATE, READ, struct_nvmm_ioc_vcpu_create_sz);
_(NVMM_IOC_VCPU_DESTROY, READ, struct_nvmm_ioc_vcpu_destroy_sz);
+ _(NVMM_IOC_VCPU_CONFIGURE, READ, struct_nvmm_ioc_vcpu_configure_sz);
_(NVMM_IOC_VCPU_SETSTATE, READ, struct_nvmm_ioc_vcpu_setstate_sz);
_(NVMM_IOC_VCPU_GETSTATE, READ, struct_nvmm_ioc_vcpu_getstate_sz);
_(NVMM_IOC_VCPU_INJECT, READ, struct_nvmm_ioc_vcpu_inject_sz);
@@ -735,6 +733,7 @@ static void ioctl_table_fill() {
_(IOC_NPF_SAVE, WRITE, struct_nvlist_ref_sz);
_(IOC_NPF_RULE, READWRITE, struct_nvlist_ref_sz);
_(IOC_NPF_CONN_LOOKUP, READWRITE, struct_nvlist_ref_sz);
+ _(IOC_NPF_TABLE_REPLACE, READWRITE, struct_nvlist_ref_sz);
/* Entries from file: net/if_pppoe.h */
_(PPPOESETPARMS, READ, struct_pppoediscparms_sz);
_(PPPOEGETPARMS, READWRITE, struct_pppoediscparms_sz);
@@ -1403,8 +1402,11 @@ static void ioctl_table_fill() {
_(SNDCTL_DSP_SETRECVOL, READ, sizeof(unsigned int));
_(SNDCTL_DSP_SKIP, NONE, 0);
_(SNDCTL_DSP_SILENCE, NONE, 0);
+ /* Entries from file: dev/filemon/filemon.h (compat <= 9.99.26) */
+ _(FILEMON_SET_FD, READWRITE, sizeof(int));
+ _(FILEMON_SET_PID, READWRITE, sizeof(int));
#undef _
-} // NOLINT
+} // NOLINT
static bool ioctl_initialized = false;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
index 00226305e07c..d0ffc79b0610 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -105,7 +105,7 @@
// FIXME: do we have anything like this on Mac?
#ifndef SANITIZER_CAN_USE_PREINIT_ARRAY
#if ((SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_OPENBSD || \
- SANITIZER_FUCHSIA) && !defined(PIC)
+ SANITIZER_FUCHSIA || SANITIZER_NETBSD) && !defined(PIC)
#define SANITIZER_CAN_USE_PREINIT_ARRAY 1
// Before Solaris 11.4, .preinit_array is fully supported only with GNU ld.
// FIXME: Check for those conditions.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
index 0b53da6c349f..84453f1bd300 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
@@ -166,7 +166,7 @@ namespace __sanitizer {
#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
#if !SANITIZER_S390 && !SANITIZER_OPENBSD
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
- OFF_T offset) {
+ u64 offset) {
#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
@@ -407,7 +407,10 @@ uptr internal_unlink(const char *path) {
}
uptr internal_rename(const char *oldpath, const char *newpath) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD
+#if defined(__riscv)
+ return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
+ (uptr)newpath, 0);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD
return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath);
#else
@@ -1972,6 +1975,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif
*bp = ucontext->uc_mcontext.gregs[11];
*sp = ucontext->uc_mcontext.gregs[15];
+#elif defined(__riscv)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.__gregs[REG_PC];
+ *bp = ucontext->uc_mcontext.__gregs[REG_S0];
+ *sp = ucontext->uc_mcontext.__gregs[REG_SP];
#else
# error "Unsupported arch"
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index cd503718205a..edbe8402808a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -50,6 +50,7 @@
#if SANITIZER_NETBSD
#include <sys/sysctl.h>
#include <sys/tls.h>
+#include <lwp.h>
#endif
#if SANITIZER_SOLARIS
@@ -399,13 +400,7 @@ uptr ThreadSelf() {
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
- struct tls_tcb * tcb;
-# ifdef __HAVE___LWP_GETTCB_FAST
- tcb = (struct tls_tcb *)__lwp_gettcb_fast();
-# elif defined(__HAVE___LWP_GETPRIVATE_FAST)
- tcb = (struct tls_tcb *)__lwp_getprivate_fast();
-# endif
- return tcb;
+ return (struct tls_tcb *)_lwp_getprivate();
}
uptr ThreadSelf() {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp
index 41e187eaf8da..9e3b4f13a436 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp
@@ -27,7 +27,7 @@ namespace __sanitizer {
// --------------- sanitizer_libc.h
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
- OFF_T offset) {
+ u64 offset) {
struct s390_mmap_params {
unsigned long addr;
unsigned long length;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp
index 4e74f6a3b516..49a951e04b37 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp
@@ -95,7 +95,7 @@ static void *GetRealLibcAddress(const char *symbol) {
// --------------- sanitizer_libc.h
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
- OFF_T offset) {
+ u64 offset) {
CHECK(&__mmap);
return (uptr)__mmap(addr, length, prot, flags, fd, 0, offset);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
index b45c975358db..c68bfa258755 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
@@ -255,11 +255,11 @@
#define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
#endif
-// The AArch64 linux port uses the canonical syscall set as mandated by
-// the upstream linux community for all new ports. Other ports may still
-// use legacy syscalls.
+// The AArch64 and RISC-V linux ports use the canonical syscall set as
+// mandated by the upstream linux community for all new ports. Other ports
+// may still use legacy syscalls.
#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
-# if defined(__aarch64__) && SANITIZER_LINUX
+# if (defined(__aarch64__) || defined(__riscv)) && SANITIZER_LINUX
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
# else
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
index 61a6b82ef818..4cc69af1241d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -90,6 +90,24 @@
# define SI_IOS 0
#endif
+#if SANITIZER_IOSSIM
+# define SI_IOSSIM 1
+#else
+# define SI_IOSSIM 0
+#endif
+
+#if SANITIZER_WATCHOS
+# define SI_WATCHOS 1
+#else
+# define SI_WATCHOS 0
+#endif
+
+#if SANITIZER_TVOS
+# define SI_TVOS 1
+#else
+# define SI_TVOS 0
+#endif
+
#if SANITIZER_FUCHSIA
# define SI_NOT_FUCHSIA 0
#else
@@ -575,5 +593,8 @@
#define SANITIZER_INTERCEPT_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD
#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD
+#define SANITIZER_INTERCEPT_QSORT \
+ (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS)
+#define SANITIZER_INTERCEPT_QSORT_R (SI_LINUX && !SI_ANDROID)
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp
index 842bc789f479..c51327e1269e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp
@@ -65,7 +65,7 @@ namespace __sanitizer {
#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
&& !defined(__mips__) && !defined(__s390__)\
- && !defined(__sparc__)
+ && !defined(__sparc__) && !defined(__riscv)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
index f01de6c995e6..48a78c8998a2 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
@@ -17,6 +17,7 @@
#define _KMEMUSER
#define RAY_DO_SIGLEV
+#define __LEGACY_PT_LWPINFO
// clang-format off
#include <sys/param.h>
@@ -71,6 +72,15 @@
#include <sys/msg.h>
#include <sys/mtio.h>
#include <sys/ptrace.h>
+
+// Compat for NetBSD < 9.99.30.
+#ifndef PT_LWPSTATUS
+#define PT_LWPSTATUS 24
+#endif
+#ifndef PT_LWPNEXT
+#define PT_LWPNEXT 25
+#endif
+
#include <sys/resource.h>
#include <sys/sem.h>
#include <sys/sha1.h>
@@ -109,7 +119,12 @@
#include <dev/dmover/dmover_io.h>
#include <dev/dtv/dtvio_demux.h>
#include <dev/dtv/dtvio_frontend.h>
+#if !__NetBSD_Prereq__(9, 99, 26)
#include <dev/filemon/filemon.h>
+#else
+#define FILEMON_SET_FD _IOWR('S', 1, int)
+#define FILEMON_SET_PID _IOWR('S', 2, pid_t)
+#endif
#include <dev/hdaudio/hdaudioio.h>
#include <dev/hdmicec/hdmicecio.h>
#include <dev/hpc/hpcfbio.h>
@@ -287,6 +302,8 @@ int ptrace_pt_get_event_mask = PT_GET_EVENT_MASK;
int ptrace_pt_get_process_state = PT_GET_PROCESS_STATE;
int ptrace_pt_set_siginfo = PT_SET_SIGINFO;
int ptrace_pt_get_siginfo = PT_GET_SIGINFO;
+int ptrace_pt_lwpstatus = PT_LWPSTATUS;
+int ptrace_pt_lwpnext = PT_LWPNEXT;
int ptrace_piod_read_d = PIOD_READ_D;
int ptrace_piod_write_d = PIOD_WRITE_D;
int ptrace_piod_read_i = PIOD_READ_I;
@@ -319,6 +336,8 @@ int ptrace_pt_getdbregs = -1;
unsigned struct_ptrace_ptrace_io_desc_struct_sz = sizeof(struct ptrace_io_desc);
unsigned struct_ptrace_ptrace_lwpinfo_struct_sz = sizeof(struct ptrace_lwpinfo);
+unsigned struct_ptrace_ptrace_lwpstatus_struct_sz =
+ sizeof(struct __sanitizer_ptrace_lwpstatus);
unsigned struct_ptrace_ptrace_event_struct_sz = sizeof(ptrace_event_t);
unsigned struct_ptrace_ptrace_siginfo_struct_sz = sizeof(ptrace_siginfo_t);
@@ -698,6 +717,7 @@ unsigned struct_nvmm_ioc_machine_configure_sz =
sizeof(nvmm_ioc_machine_configure);
unsigned struct_nvmm_ioc_vcpu_create_sz = sizeof(nvmm_ioc_vcpu_create);
unsigned struct_nvmm_ioc_vcpu_destroy_sz = sizeof(nvmm_ioc_vcpu_destroy);
+unsigned struct_nvmm_ioc_vcpu_configure_sz = sizeof(nvmm_ioc_vcpu_configure);
unsigned struct_nvmm_ioc_vcpu_setstate_sz = sizeof(nvmm_ioc_vcpu_destroy);
unsigned struct_nvmm_ioc_vcpu_getstate_sz = sizeof(nvmm_ioc_vcpu_getstate);
unsigned struct_nvmm_ioc_vcpu_inject_sz = sizeof(nvmm_ioc_vcpu_inject);
@@ -1458,6 +1478,7 @@ unsigned IOCTL_NVMM_IOC_MACHINE_DESTROY = NVMM_IOC_MACHINE_DESTROY;
unsigned IOCTL_NVMM_IOC_MACHINE_CONFIGURE = NVMM_IOC_MACHINE_CONFIGURE;
unsigned IOCTL_NVMM_IOC_VCPU_CREATE = NVMM_IOC_VCPU_CREATE;
unsigned IOCTL_NVMM_IOC_VCPU_DESTROY = NVMM_IOC_VCPU_DESTROY;
+unsigned IOCTL_NVMM_IOC_VCPU_CONFIGURE = NVMM_IOC_VCPU_CONFIGURE;
unsigned IOCTL_NVMM_IOC_VCPU_SETSTATE = NVMM_IOC_VCPU_SETSTATE;
unsigned IOCTL_NVMM_IOC_VCPU_GETSTATE = NVMM_IOC_VCPU_GETSTATE;
unsigned IOCTL_NVMM_IOC_VCPU_INJECT = NVMM_IOC_VCPU_INJECT;
@@ -1534,6 +1555,7 @@ unsigned IOCTL_IOC_NPF_STATS = IOC_NPF_STATS;
unsigned IOCTL_IOC_NPF_SAVE = IOC_NPF_SAVE;
unsigned IOCTL_IOC_NPF_RULE = IOC_NPF_RULE;
unsigned IOCTL_IOC_NPF_CONN_LOOKUP = IOC_NPF_CONN_LOOKUP;
+unsigned IOCTL_IOC_NPF_TABLE_REPLACE = IOC_NPF_TABLE_REPLACE;
unsigned IOCTL_PPPOESETPARMS = PPPOESETPARMS;
unsigned IOCTL_PPPOEGETPARMS = PPPOEGETPARMS;
unsigned IOCTL_PPPOEGETSESSION = PPPOEGETSESSION;
@@ -2392,4 +2414,42 @@ CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_flags);
CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_props);
CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_propslen);
+// Compat with 9.0
+struct statvfs90 {
+ unsigned long f_flag;
+ unsigned long f_bsize;
+ unsigned long f_frsize;
+ unsigned long f_iosize;
+
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_bresvd;
+
+ u64 f_files;
+ u64 f_ffree;
+ u64 f_favail;
+ u64 f_fresvd;
+
+ u64 f_syncreads;
+ u64 f_syncwrites;
+
+ u64 f_asyncreads;
+ u64 f_asyncwrites;
+
+ struct {
+ s32 __fsid_val[2];
+ } f_fsidx;
+ unsigned long f_fsid;
+ unsigned long f_namemax;
+ u32 f_owner;
+
+ u32 f_spare[4];
+
+ char f_fstypename[32];
+ char f_mntonname[32];
+ char f_mntfromname[32];
+};
+unsigned struct_statvfs90_sz = sizeof(struct statvfs90);
+
#endif // SANITIZER_NETBSD
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
index 4fb3b8c0e06f..794efdb6eff6 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
@@ -354,7 +354,13 @@ struct __sanitizer_addrinfo {
int ai_family;
int ai_socktype;
int ai_protocol;
+#if defined(__sparc__) && defined(_LP64)
+ int __ai_pad0;
+#endif
unsigned ai_addrlen;
+#if defined(__alpha__) || (defined(__i386__) && defined(_LP64))
+ int __ai_pad0;
+#endif
char *ai_canonname;
void *ai_addr;
struct __sanitizer_addrinfo *ai_next;
@@ -406,6 +412,8 @@ extern int ptrace_pt_get_event_mask;
extern int ptrace_pt_get_process_state;
extern int ptrace_pt_set_siginfo;
extern int ptrace_pt_get_siginfo;
+extern int ptrace_pt_lwpstatus;
+extern int ptrace_pt_lwpnext;
extern int ptrace_piod_read_d;
extern int ptrace_piod_write_d;
extern int ptrace_piod_read_i;
@@ -430,8 +438,17 @@ struct __sanitizer_ptrace_lwpinfo {
int pl_event;
};
+struct __sanitizer_ptrace_lwpstatus {
+ __sanitizer_lwpid_t pl_lwpid;
+ __sanitizer_sigset_t pl_sigpend;
+ __sanitizer_sigset_t pl_sigmask;
+ char pl_name[20];
+ void *pl_private;
+};
+
extern unsigned struct_ptrace_ptrace_io_desc_struct_sz;
extern unsigned struct_ptrace_ptrace_lwpinfo_struct_sz;
+extern unsigned struct_ptrace_ptrace_lwpstatus_struct_sz;
extern unsigned struct_ptrace_ptrace_event_struct_sz;
extern unsigned struct_ptrace_ptrace_siginfo_struct_sz;
@@ -856,6 +873,7 @@ extern unsigned struct_nvmm_ioc_machine_destroy_sz;
extern unsigned struct_nvmm_ioc_machine_configure_sz;
extern unsigned struct_nvmm_ioc_vcpu_create_sz;
extern unsigned struct_nvmm_ioc_vcpu_destroy_sz;
+extern unsigned struct_nvmm_ioc_vcpu_configure_sz;
extern unsigned struct_nvmm_ioc_vcpu_setstate_sz;
extern unsigned struct_nvmm_ioc_vcpu_getstate_sz;
extern unsigned struct_nvmm_ioc_vcpu_inject_sz;
@@ -1605,6 +1623,7 @@ extern unsigned IOCTL_NVMM_IOC_MACHINE_DESTROY;
extern unsigned IOCTL_NVMM_IOC_MACHINE_CONFIGURE;
extern unsigned IOCTL_NVMM_IOC_VCPU_CREATE;
extern unsigned IOCTL_NVMM_IOC_VCPU_DESTROY;
+extern unsigned IOCTL_NVMM_IOC_VCPU_CONFIGURE;
extern unsigned IOCTL_NVMM_IOC_VCPU_SETSTATE;
extern unsigned IOCTL_NVMM_IOC_VCPU_GETSTATE;
extern unsigned IOCTL_NVMM_IOC_VCPU_INJECT;
@@ -1679,6 +1698,7 @@ extern unsigned IOCTL_IOC_NPF_STATS;
extern unsigned IOCTL_IOC_NPF_SAVE;
extern unsigned IOCTL_IOC_NPF_RULE;
extern unsigned IOCTL_IOC_NPF_CONN_LOOKUP;
+extern unsigned IOCTL_IOC_NPF_TABLE_REPLACE;
extern unsigned IOCTL_PPPOESETPARMS;
extern unsigned IOCTL_PPPOEGETPARMS;
extern unsigned IOCTL_PPPOEGETSESSION;
@@ -2400,6 +2420,9 @@ struct __sanitizer_cdbw {
#define SIGACTION_SYMNAME __sigaction14
+// Compat with 9.0
+extern unsigned struct_statvfs90_sz;
+
#endif // SANITIZER_NETBSD
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
index 9852e6ba7879..aa845df4dde4 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -230,7 +230,7 @@ namespace __sanitizer {
// has been removed from glibc 2.28.
#if defined(__aarch64__) || defined(__s390x__) || defined (__mips64) \
|| defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) \
- || defined(__x86_64__)
+ || defined(__x86_64__) || (defined(__riscv) && __riscv_xlen == 64)
#define SIZEOF_STRUCT_USTAT 32
#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
|| defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
@@ -1128,11 +1128,9 @@ CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
-#if (!defined(__aarch64__) || !SANITIZER_LINUX || __GLIBC_PREREQ (2, 21)) && \
- !defined(__arm__)
-/* On aarch64 glibc 2.20 and earlier provided incorrect mode field. */
-/* On Arm newer glibc provide a different mode field, it's hard to detect
- so just disable the check. */
+#if !SANITIZER_LINUX || __GLIBC_PREREQ (2, 31)
+/* glibc 2.30 and earlier provided 16-bit mode field instead of 32-bit
+ on many architectures. */
CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index db2c4f07b3ae..5337b26b29b8 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -98,6 +98,9 @@ const unsigned struct_kernel_stat64_sz = 144;
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__riscv) && __riscv_xlen == 64
+const unsigned struct_kernel_stat_sz = 128;
+const unsigned struct_kernel_stat64_sz = 104;
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
@@ -204,26 +207,13 @@ struct __sanitizer_ipc_perm {
u64 __unused1;
u64 __unused2;
#elif defined(__sparc__)
-#if defined(__arch64__)
unsigned mode;
- unsigned short __pad1;
-#else
- unsigned short __pad1;
- unsigned short mode;
unsigned short __pad2;
-#endif
unsigned short __seq;
unsigned long long __unused1;
unsigned long long __unused2;
-#elif defined(__mips__) || defined(__aarch64__) || defined(__s390x__)
- unsigned int mode;
- unsigned short __seq;
- unsigned short __pad1;
- unsigned long __unused1;
- unsigned long __unused2;
#else
- unsigned short mode;
- unsigned short __pad1;
+ unsigned int mode;
unsigned short __seq;
unsigned short __pad2;
#if defined(__x86_64__) && !defined(_LP64)
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
index 05fb0f630207..70c71f04d2d3 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
@@ -39,7 +39,7 @@ uptr internal_write(fd_t fd, const void *buf, uptr count);
// Memory
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
- int fd, OFF_T offset);
+ int fd, u64 offset);
uptr internal_munmap(void *addr, uptr length);
int internal_mprotect(void *addr, uptr length, int prot);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
index 5690d75097f9..1ed21343254d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
@@ -120,10 +120,18 @@ bool ThreadSuspender::SuspendAllThreads() {
VReport(2, "Attached to process %d.\n", pid_);
+#ifdef PT_LWPNEXT
+ struct ptrace_lwpstatus pl;
+ int op = PT_LWPNEXT;
+#else
struct ptrace_lwpinfo pl;
- int val;
+ int op = PT_LWPINFO;
+#endif
+
pl.pl_lwpid = 0;
- while ((val = ptrace(PT_LWPINFO, pid_, (void *)&pl, sizeof(pl))) != -1 &&
+
+ int val;
+ while ((val = ptrace(op, pid_, (void *)&pl, sizeof(pl))) != -1 &&
pl.pl_lwpid != 0) {
suspended_threads_list_.Append(pl.pl_lwpid);
VReport(2, "Appended thread %d in process %d.\n", pl.pl_lwpid, pid_);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
index a619ed092f0b..f26efe5c50b5 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
@@ -31,6 +31,9 @@ bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
Dl_info info;
int result = dladdr((const void *)addr, &info);
if (!result) return false;
+
+ CHECK(addr >= reinterpret_cast<uptr>(info.dli_saddr));
+ stack->info.function_offset = addr - reinterpret_cast<uptr>(info.dli_saddr);
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
if (!demangled) return false;
stack->info.function = internal_strdup(demangled);
@@ -145,12 +148,29 @@ bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
const char *buf = process_->SendCommand(command);
if (!buf) return false;
uptr line;
+ uptr start_address = AddressInfo::kUnknown;
if (!ParseCommandOutput(buf, addr, &stack->info.function, &stack->info.module,
- &stack->info.file, &line, nullptr)) {
+ &stack->info.file, &line, &start_address)) {
process_ = nullptr;
return false;
}
stack->info.line = (int)line;
+
+ if (start_address == AddressInfo::kUnknown) {
+ // Fallback to dladdr() to get function start address if atos doesn't report
+ // it.
+ Dl_info info;
+ int result = dladdr((const void *)addr, &info);
+ if (result)
+ start_address = reinterpret_cast<uptr>(info.dli_saddr);
+ }
+
+ // Only assig to `function_offset` if we were able to get the function's
+ // start address.
+ if (start_address != AddressInfo::kUnknown) {
+ CHECK(addr >= start_address);
+ stack->info.function_offset = addr - start_address;
+ }
return true;
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc b/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
index 21b521669630..02b7e11b1677 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
@@ -42,8 +42,8 @@
// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
//
// Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2018-10-30
-// Generated from: syscalls.master,v 1.293 2018/07/31 13:00:13 rjs Exp
+// Generated date: 2019-12-24
+// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
//
//===----------------------------------------------------------------------===//
@@ -323,6 +323,16 @@ PRE_SYSCALL(ptrace)
PRE_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
} else if (req_ == ptrace_pt_get_siginfo) {
PRE_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_lwpstatus) {
+ struct __sanitizer_ptrace_lwpstatus *addr =
+ (struct __sanitizer_ptrace_lwpstatus *)addr_;
+ PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ PRE_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);
+ } else if (req_ == ptrace_pt_lwpnext) {
+ struct __sanitizer_ptrace_lwpstatus *addr =
+ (struct __sanitizer_ptrace_lwpstatus *)addr_;
+ PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ PRE_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);
} else if (req_ == ptrace_pt_setregs) {
PRE_READ(addr_, struct_ptrace_reg_struct_sz);
} else if (req_ == ptrace_pt_getregs) {
@@ -366,6 +376,16 @@ POST_SYSCALL(ptrace)
POST_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
} else if (req_ == ptrace_pt_get_siginfo) {
POST_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_lwpstatus) {
+ struct __sanitizer_ptrace_lwpstatus *addr =
+ (struct __sanitizer_ptrace_lwpstatus *)addr_;
+ POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ POST_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);
+ } else if (req_ == ptrace_pt_lwpnext) {
+ struct __sanitizer_ptrace_lwpstatus *addr =
+ (struct __sanitizer_ptrace_lwpstatus *)addr_;
+ POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ POST_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);
} else if (req_ == ptrace_pt_setregs) {
POST_READ(addr_, struct_ptrace_reg_struct_sz);
} else if (req_ == ptrace_pt_getregs) {
@@ -2433,30 +2453,31 @@ PRE_SYSCALL(uuidgen)(void *store_, long long count_) { /* Nothing to do */ }
POST_SYSCALL(uuidgen)(long long res, void *store_, long long count_) {
/* Nothing to do */
}
-PRE_SYSCALL(getvfsstat)(void *buf_, long long bufsize_, long long flags_) {
+PRE_SYSCALL(compat_90_getvfsstat)
+(void *buf_, long long bufsize_, long long flags_) {
/* Nothing to do */
}
-POST_SYSCALL(getvfsstat)
+POST_SYSCALL(compat_90_getvfsstat)
(long long res, void *buf_, long long bufsize_, long long flags_) {
/* Nothing to do */
}
-PRE_SYSCALL(statvfs1)(void *path_, void *buf_, long long flags_) {
+PRE_SYSCALL(compat_90_statvfs1)(void *path_, void *buf_, long long flags_) {
const char *path = (const char *)path_;
if (path) {
PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
}
}
-POST_SYSCALL(statvfs1)
+POST_SYSCALL(compat_90_statvfs1)
(long long res, void *path_, void *buf_, long long flags_) {
const char *path = (const char *)path_;
if (path) {
POST_READ(path, __sanitizer::internal_strlen(path) + 1);
}
}
-PRE_SYSCALL(fstatvfs1)(long long fd_, void *buf_, long long flags_) {
+PRE_SYSCALL(compat_90_fstatvfs1)(long long fd_, void *buf_, long long flags_) {
/* Nothing to do */
}
-POST_SYSCALL(fstatvfs1)
+POST_SYSCALL(compat_90_fstatvfs1)
(long long res, long long fd_, void *buf_, long long flags_) {
/* Nothing to do */
}
@@ -2853,13 +2874,13 @@ PRE_SYSCALL(__fhopen40)(void *fhp_, long long fh_size_, long long flags_) {
}
POST_SYSCALL(__fhopen40)
(long long res, void *fhp_, long long fh_size_, long long flags_) {}
-PRE_SYSCALL(__fhstatvfs140)
+PRE_SYSCALL(compat_90_fhstatvfs1)
(void *fhp_, long long fh_size_, void *buf_, long long flags_) {
if (fhp_) {
PRE_READ(fhp_, fh_size_);
}
}
-POST_SYSCALL(__fhstatvfs140)
+POST_SYSCALL(compat_90_fhstatvfs1)
(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {}
PRE_SYSCALL(compat_50___fhstat40)(void *fhp_, long long fh_size_, void *sb_) {
if (fhp_) {
@@ -3768,6 +3789,41 @@ POST_SYSCALL(clock_getcpuclockid2)
(long long res, long long idtype_, long long id_, void *clock_id_) {
/* Nothing to do */
}
+PRE_SYSCALL(__getvfsstat90)(void *buf_, long long bufsize_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__getvfsstat90)
+(long long res, void *buf_, long long bufsize_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__statvfs190)(void *path_, void *buf_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__statvfs190)
+(long long res, void *path_, void *buf_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(__fstatvfs190)(long long fd_, void *buf_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__fstatvfs190)
+(long long res, long long fd_, void *buf_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__fhstatvfs190)
+(void *fhp_, long long fh_size_, void *buf_, long long flags_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(__fhstatvfs190)
+(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {}
#undef SYS_MAXSYSARGS
} // extern "C"
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp b/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp
index ac351d3a8362..d3c59e357d46 100644
--- a/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp
@@ -19,13 +19,6 @@
#include <tuple>
-// Need to match ../sanitizer_common/sanitizer_internal_defs.h
-#if defined(ARCH_PPC)
-#define OFF_T unsigned long
-#else
-#define OFF_T unsigned long long
-#endif
-
namespace __sanitizer {
unsigned long internal_open(const char *filename, int flags);
unsigned long internal_open(const char *filename, int flags, unsigned mode);
@@ -35,7 +28,7 @@ unsigned long internal_lstat(const char *path, void *buf);
unsigned long internal_fstat(int fd, void *buf);
size_t internal_strlen(const char *s);
unsigned long internal_mmap(void *addr, unsigned long length, int prot,
- int flags, int fd, OFF_T offset);
+ int flags, int fd, unsigned long long offset);
void *internal_memcpy(void *dest, const void *src, unsigned long n);
// Used to propagate errno.
bool internal_iserror(unsigned long retval, int *rverrno = 0);
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h
index 62c6f2875106..3a5aaae73674 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -14,6 +14,7 @@
#include "flags.h"
#include "primary32.h"
#include "primary64.h"
+#include "secondary.h"
#include "size_class_map.h"
#include "tsd_exclusive.h"
#include "tsd_shared.h"
@@ -31,6 +32,7 @@ struct DefaultConfig {
// 512KB regions
typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
#endif
+ typedef MapAllocator<> Secondary;
template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
};
@@ -43,6 +45,7 @@ struct AndroidConfig {
// 512KB regions
typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
#endif
+ typedef MapAllocator<> Secondary;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
};
@@ -56,16 +59,20 @@ struct AndroidSvelteConfig {
// 64KB regions
typedef SizeClassAllocator32<SizeClassMap, 16U> Primary;
#endif
+ typedef MapAllocator<0U> Secondary;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
};
+#if SCUDO_CAN_USE_PRIMARY64
struct FuchsiaConfig {
// 1GB Regions
typedef SizeClassAllocator64<DefaultSizeClassMap, 30U> Primary;
+ typedef MapAllocator<0U> Secondary;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 8U>; // Shared, max 8 TSDs.
};
+#endif
#if SCUDO_ANDROID
typedef AndroidConfig Config;
diff --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
index 47037d764e25..6c84ba86ed32 100644
--- a/compiler-rt/lib/scudo/standalone/atomic_helpers.h
+++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -21,12 +21,12 @@ enum memory_order {
memory_order_acq_rel = 4,
memory_order_seq_cst = 5
};
-COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
-COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
-COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
-COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
-COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
-COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
+static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
+static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
+static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
+static_assert(memory_order_release == __ATOMIC_RELEASE, "");
+static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
+static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
struct atomic_u8 {
typedef u8 Type;
@@ -60,7 +60,7 @@ struct atomic_uptr {
};
template <typename T>
-INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
+inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
typename T::Type V;
__atomic_load(&A->ValDoNotUse, &V, MO);
@@ -68,29 +68,29 @@ INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
}
template <typename T>
-INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
+inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
__atomic_store(&A->ValDoNotUse, &V, MO);
}
-INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
+inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
template <typename T>
-INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
+inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
}
template <typename T>
-INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
+inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
}
template <typename T>
-INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
+inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
typename T::Type R;
@@ -99,7 +99,7 @@ INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
}
template <typename T>
-INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
+inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
typename T::Type Xchg,
memory_order MO) {
return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
@@ -107,7 +107,7 @@ INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
}
template <typename T>
-INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
+inline bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
typename T::Type Xchg,
memory_order MO) {
return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
@@ -117,17 +117,17 @@ INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
// Clutter-reducing helpers.
template <typename T>
-INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
+inline typename T::Type atomic_load_relaxed(const volatile T *A) {
return atomic_load(A, memory_order_relaxed);
}
template <typename T>
-INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
+inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
atomic_store(A, V, memory_order_relaxed);
}
template <typename T>
-INLINE typename T::Type atomic_compare_exchange(volatile T *A,
+inline typename T::Type atomic_compare_exchange(volatile T *A,
typename T::Type Cmp,
typename T::Type Xchg) {
atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
diff --git a/compiler-rt/lib/scudo/standalone/bytemap.h b/compiler-rt/lib/scudo/standalone/bytemap.h
index caeeb2fac879..a03a0c471062 100644
--- a/compiler-rt/lib/scudo/standalone/bytemap.h
+++ b/compiler-rt/lib/scudo/standalone/bytemap.h
@@ -34,6 +34,9 @@ public:
return Map[Index];
}
+ void disable() {}
+ void enable() {}
+
private:
u8 *Map;
};
@@ -82,6 +85,9 @@ public:
return Level2Map[Index % Level2Size];
}
+ void disable() { Mutex.lock(); }
+ void enable() { Mutex.unlock(); }
+
private:
u8 *get(uptr Index) const {
DCHECK_LT(Index, Level1Size);
diff --git a/compiler-rt/lib/scudo/standalone/checksum.cpp b/compiler-rt/lib/scudo/standalone/checksum.cpp
index f713f5a81609..5de049a0931b 100644
--- a/compiler-rt/lib/scudo/standalone/checksum.cpp
+++ b/compiler-rt/lib/scudo/standalone/checksum.cpp
@@ -44,7 +44,6 @@ bool hasHardwareCRC32() {
__get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
return !!(Ecx & bit_SSE4_2);
}
-
#elif defined(__arm__) || defined(__aarch64__)
#ifndef AT_HWCAP
#define AT_HWCAP 16
@@ -65,6 +64,9 @@ bool hasHardwareCRC32() {
return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
#endif // SCUDO_FUCHSIA
}
+#else
+// No hardware CRC32 implemented in Scudo for other architectures.
+bool hasHardwareCRC32() { return false; }
#endif // defined(__x86_64__) || defined(__i386__)
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/checksum.h b/compiler-rt/lib/scudo/standalone/checksum.h
index 092342fd6efb..a63b1b4f064d 100644
--- a/compiler-rt/lib/scudo/standalone/checksum.h
+++ b/compiler-rt/lib/scudo/standalone/checksum.h
@@ -37,7 +37,7 @@ enum class Checksum : u8 {
// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
// odds with CRC32, but enough for our needs.
-INLINE u16 computeBSDChecksum(u16 Sum, uptr Data) {
+inline u16 computeBSDChecksum(u16 Sum, uptr Data) {
for (u8 I = 0; I < sizeof(Data); I++) {
Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
Sum = static_cast<u16>(Sum + (Data & 0xff));
diff --git a/compiler-rt/lib/scudo/standalone/chunk.h b/compiler-rt/lib/scudo/standalone/chunk.h
index 9ae75823ba77..f4d68b3ac6c4 100644
--- a/compiler-rt/lib/scudo/standalone/chunk.h
+++ b/compiler-rt/lib/scudo/standalone/chunk.h
@@ -20,7 +20,7 @@ namespace scudo {
extern Checksum HashAlgorithm;
-INLINE u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
+inline u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
// If the hardware CRC32 feature is defined here, it was enabled everywhere,
// as opposed to only for crc32_hw.cpp. This means that other hardware
// specific instructions were likely emitted at other places, and as a result
@@ -71,7 +71,7 @@ struct UnpackedHeader {
uptr Checksum : 16;
};
typedef atomic_u64 AtomicPackedHeader;
-COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+static_assert(sizeof(UnpackedHeader) == sizeof(PackedHeader), "");
// Those constants are required to silence some -Werror=conversion errors when
// assigning values to the related bitfield variables.
@@ -86,13 +86,12 @@ constexpr uptr getHeaderSize() {
return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
}
-INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
getHeaderSize());
}
-INLINE
-const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
+inline const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
return reinterpret_cast<const AtomicPackedHeader *>(
reinterpret_cast<uptr>(Ptr) - getHeaderSize());
}
@@ -100,7 +99,7 @@ const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
// We do not need a cryptographically strong hash for the checksum, but a CRC
// type function that can alert us in the event a header is invalid or
// corrupted. Ideally slightly better than a simple xor of all fields.
-static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
+static inline u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
UnpackedHeader *Header) {
UnpackedHeader ZeroChecksumHeader = *Header;
ZeroChecksumHeader.Checksum = 0;
@@ -110,7 +109,7 @@ static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
ARRAY_SIZE(HeaderHolder));
}
-INLINE void storeHeader(u32 Cookie, void *Ptr,
+inline void storeHeader(u32 Cookie, void *Ptr,
UnpackedHeader *NewUnpackedHeader) {
NewUnpackedHeader->Checksum =
computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
@@ -118,9 +117,8 @@ INLINE void storeHeader(u32 Cookie, void *Ptr,
atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
}
-INLINE
-void loadHeader(u32 Cookie, const void *Ptr,
- UnpackedHeader *NewUnpackedHeader) {
+inline void loadHeader(u32 Cookie, const void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
*NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
if (UNLIKELY(NewUnpackedHeader->Checksum !=
@@ -128,7 +126,7 @@ void loadHeader(u32 Cookie, const void *Ptr,
reportHeaderCorruption(const_cast<void *>(Ptr));
}
-INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
+inline void compareExchangeHeader(u32 Cookie, void *Ptr,
UnpackedHeader *NewUnpackedHeader,
UnpackedHeader *OldUnpackedHeader) {
NewUnpackedHeader->Checksum =
@@ -141,8 +139,8 @@ INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
reportHeaderRace(Ptr);
}
-INLINE
-bool isValid(u32 Cookie, const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+inline bool isValid(u32 Cookie, const void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
*NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
return NewUnpackedHeader->Checksum ==
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 60be1dd20d39..a0b4b2973e96 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -18,17 +18,36 @@
#include "quarantine.h"
#include "report.h"
#include "secondary.h"
+#include "string_utils.h"
#include "tsd.h"
+#ifdef GWP_ASAN_HOOKS
+#include "gwp_asan/guarded_pool_allocator.h"
+// GWP-ASan is declared here in order to avoid indirect call overhead. It's also
+// instantiated outside of the Allocator class, as the allocator is only
+// zero-initialised. GWP-ASan requires constant initialisation, and the Scudo
+// allocator doesn't have a constexpr constructor (see discussion here:
+// https://reviews.llvm.org/D69265#inline-624315).
+static gwp_asan::GuardedPoolAllocator GuardedAlloc;
+#endif // GWP_ASAN_HOOKS
+
+extern "C" inline void EmptyCallback() {}
+
namespace scudo {
-template <class Params> class Allocator {
+template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
+class Allocator {
public:
using PrimaryT = typename Params::Primary;
using CacheT = typename PrimaryT::CacheT;
- typedef Allocator<Params> ThisT;
+ typedef Allocator<Params, PostInitCallback> ThisT;
typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
+ void callPostInitCallback() {
+ static pthread_once_t OnceControl = PTHREAD_ONCE_INIT;
+ pthread_once(&OnceControl, PostInitCallback);
+ }
+
struct QuarantineCallback {
explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
: Allocator(Instance), Cache(LocalCache) {}
@@ -133,6 +152,22 @@ public:
Quarantine.init(
static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
+
+#ifdef GWP_ASAN_HOOKS
+ gwp_asan::options::Options Opt;
+ Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
+ // Bear in mind - Scudo has its own alignment guarantees that are strictly
+ // enforced. Scudo exposes the same allocation function for everything from
+ // malloc() to posix_memalign, so in general this flag goes unused, as Scudo
+ // will always ask GWP-ASan for an aligned amount of bytes.
+ Opt.PerfectlyRightAlign = getFlags()->GWP_ASAN_PerfectlyRightAlign;
+ Opt.MaxSimultaneousAllocations =
+ getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
+ Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
+ Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
+ Opt.Printf = Printf;
+ GuardedAlloc.init(Opt);
+#endif // GWP_ASAN_HOOKS
}
void reset() { memset(this, 0, sizeof(*this)); }
@@ -144,7 +179,10 @@ public:
TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
- void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
+ // The Cache must be provided zero-initialized.
+ void initCache(CacheT *Cache) {
+ Cache->initLinkerInitialized(&Stats, &Primary);
+ }
// Release the resources used by a TSD, which involves:
// - draining the local quarantine cache to the global quarantine;
@@ -162,6 +200,15 @@ public:
bool ZeroContents = false) {
initThreadMaybe();
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.shouldSample())) {
+ if (void *Ptr = GuardedAlloc.allocate(roundUpTo(Size, Alignment)))
+ return Ptr;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ ZeroContents |= static_cast<bool>(Options.ZeroContents);
+
if (UNLIKELY(Alignment > MaxAlignment)) {
if (Options.MayReturnNull)
return nullptr;
@@ -180,12 +227,13 @@ public:
((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
// Takes care of extravagantly large sizes as well as integer overflows.
- if (UNLIKELY(Size >= MaxAllowedMallocSize ||
- NeededSize >= MaxAllowedMallocSize)) {
+ static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
+ if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
if (Options.MayReturnNull)
return nullptr;
reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
}
+ DCHECK_LE(Size, NeededSize);
void *Block;
uptr ClassId;
@@ -200,7 +248,8 @@ public:
TSD->unlock();
} else {
ClassId = 0;
- Block = Secondary.allocate(NeededSize, Alignment, &BlockEnd);
+ Block =
+ Secondary.allocate(NeededSize, Alignment, &BlockEnd, ZeroContents);
}
if (UNLIKELY(!Block)) {
@@ -212,22 +261,23 @@ public:
// We only need to zero the contents for Primary backed allocations. This
// condition is not necessarily unlikely, but since memset is costly, we
// might as well mark it as such.
- if (UNLIKELY((ZeroContents || Options.ZeroContents) && ClassId))
+ if (UNLIKELY(ZeroContents && ClassId))
memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
+ const uptr UnalignedUserPtr =
+ reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+ const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
+
Chunk::UnpackedHeader Header = {};
- uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
- if (UNLIKELY(!isAligned(UserPtr, Alignment))) {
- const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
- const uptr Offset = AlignedUserPtr - UserPtr;
- DCHECK_GT(Offset, 2 * sizeof(u32));
+ if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
+ const uptr Offset = UserPtr - UnalignedUserPtr;
+ DCHECK_GE(Offset, 2 * sizeof(u32));
// The BlockMarker has no security purpose, but is specifically meant for
// the chunk iteration function that can be used in debugging situations.
// It is the only situation where we have to locate the start of a chunk
// based on its block address.
reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
- UserPtr = AlignedUserPtr;
Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
}
Header.ClassId = ClassId & Chunk::ClassIdMask;
@@ -254,6 +304,13 @@ public:
// being destroyed properly. Any other heap operation will do a full init.
initThreadMaybe(/*MinimalInit=*/true);
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
+ GuardedAlloc.deallocate(Ptr);
+ return;
+ }
+#endif // GWP_ASAN_HOOKS
+
if (&__scudo_deallocate_hook)
__scudo_deallocate_hook(Ptr);
@@ -293,6 +350,17 @@ public:
DCHECK_NE(OldPtr, nullptr);
DCHECK_NE(NewSize, 0);
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
+ uptr OldSize = GuardedAlloc.getSize(OldPtr);
+ void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
+ if (NewPtr)
+ memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
+ GuardedAlloc.deallocate(OldPtr);
+ return NewPtr;
+ }
+#endif // GWP_ASAN_HOOKS
+
if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
@@ -354,11 +422,14 @@ public:
return NewPtr;
}
- // TODO(kostyak): while this locks the Primary & Secondary, it still allows
- // pointers to be fetched from the TSD. We ultimately want to
- // lock the registry as well. For now, it's good enough.
+ // TODO(kostyak): disable() is currently best-effort. There are some small
+ // windows of time when an allocation could still succeed after
+ // this function finishes. We will revisit that later.
void disable() {
initThreadMaybe();
+ TSDRegistry.disable();
+ Stats.disable();
+ Quarantine.disable();
Primary.disable();
Secondary.disable();
}
@@ -367,6 +438,9 @@ public:
initThreadMaybe();
Secondary.enable();
Primary.enable();
+ Quarantine.enable();
+ Stats.enable();
+ TSDRegistry.enable();
}
// The function returns the amount of bytes required to store the statistics,
@@ -396,7 +470,10 @@ public:
Str.output();
}
- void releaseToOS() { Primary.releaseToOS(); }
+ void releaseToOS() {
+ initThreadMaybe();
+ Primary.releaseToOS();
+ }
// Iterate over all chunks and call a callback for all busy chunks located
// within the provided memory range. Said callback must not use this allocator
@@ -409,10 +486,11 @@ public:
auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
if (Block < From || Block >= To)
return;
- uptr ChunkSize;
- const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
- if (ChunkBase != InvalidChunk)
- Callback(ChunkBase, ChunkSize, Arg);
+ uptr Chunk;
+ Chunk::UnpackedHeader Header;
+ if (getChunkFromBlock(Block, &Chunk, &Header) &&
+ Header.State == Chunk::State::Allocated)
+ Callback(Chunk, getSize(reinterpret_cast<void *>(Chunk), &Header), Arg);
};
Primary.iterateOverBlocks(Lambda);
Secondary.iterateOverBlocks(Lambda);
@@ -435,6 +513,12 @@ public:
initThreadMaybe();
if (UNLIKELY(!Ptr))
return 0;
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
+ return GuardedAlloc.getSize(Ptr);
+#endif // GWP_ASAN_HOOKS
+
Chunk::UnpackedHeader Header;
Chunk::loadHeader(Cookie, Ptr, &Header);
// Getting the usable size of a chunk only makes sense if it's allocated.
@@ -448,8 +532,24 @@ public:
Stats.get(S);
}
+ // Returns true if the pointer provided was allocated by the current
+ // allocator instance, which is compliant with tcmalloc's ownership concept.
+ // A corrupted chunk will not be reported as owned, which is WAI.
+ bool isOwned(const void *Ptr) {
+ initThreadMaybe();
+#ifdef GWP_ASAN_HOOKS
+ if (GuardedAlloc.pointerIsMine(Ptr))
+ return true;
+#endif // GWP_ASAN_HOOKS
+ if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
+ return false;
+ Chunk::UnpackedHeader Header;
+ return Chunk::isValid(Cookie, Ptr, &Header) &&
+ Header.State == Chunk::State::Allocated;
+ }
+
private:
- typedef MapAllocator SecondaryT;
+ using SecondaryT = typename Params::Secondary;
typedef typename PrimaryT::SizeClassMap SizeClassMap;
static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
@@ -459,9 +559,10 @@ private:
static const uptr MaxAllowedMallocSize =
FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
- // Constants used by the chunk iteration mechanism.
+ static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
+ "Minimal alignment must at least cover a chunk header.");
+
static const u32 BlockMarker = 0x44554353U;
- static const uptr InvalidChunk = ~static_cast<uptr>(0);
GlobalStats Stats;
TSDRegistryT TSDRegistry;
@@ -514,7 +615,7 @@ private:
reportSanityCheckError("class ID");
}
- static INLINE void *getBlockBegin(const void *Ptr,
+ static inline void *getBlockBegin(const void *Ptr,
Chunk::UnpackedHeader *Header) {
return reinterpret_cast<void *>(
reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
@@ -522,7 +623,7 @@ private:
}
// Return the size of a chunk as requested during its allocation.
- INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
+ inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (LIKELY(Header->ClassId))
return SizeOrUnusedBytes;
@@ -539,7 +640,9 @@ private:
Chunk::UnpackedHeader NewHeader = *Header;
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
// than the maximum allowed, we return a chunk directly to the backend.
- const bool BypassQuarantine = !Quarantine.getCacheSize() || !Size ||
+ // Logical Or can be short-circuited, which introduces unnecessary
+ // conditional jumps, so use bitwise Or and let the compiler be clever.
+ const bool BypassQuarantine = !Quarantine.getCacheSize() | !Size |
(Size > Options.QuarantineMaxChunkSize);
if (BypassQuarantine) {
NewHeader.State = Chunk::State::Available;
@@ -567,20 +670,13 @@ private:
}
}
- // This only cares about valid busy chunks. This might change in the future.
- uptr getChunkFromBlock(uptr Block, uptr *Size) {
+ bool getChunkFromBlock(uptr Block, uptr *Chunk,
+ Chunk::UnpackedHeader *Header) {
u32 Offset = 0;
if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
Offset = reinterpret_cast<u32 *>(Block)[1];
- const uptr P = Block + Offset + Chunk::getHeaderSize();
- const void *Ptr = reinterpret_cast<const void *>(P);
- Chunk::UnpackedHeader Header;
- if (!Chunk::isValid(Cookie, Ptr, &Header) ||
- Header.State != Chunk::State::Allocated)
- return InvalidChunk;
- if (Size)
- *Size = getSize(Ptr, &Header);
- return P;
+ *Chunk = Block + Offset + Chunk::getHeaderSize();
+ return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
}
uptr getStats(ScopedString *Str) {
diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h
index c015d1ca5669..a76eb6bbc164 100644
--- a/compiler-rt/lib/scudo/standalone/common.h
+++ b/compiler-rt/lib/scudo/standalone/common.h
@@ -19,22 +19,22 @@
namespace scudo {
-template <class Dest, class Source> INLINE Dest bit_cast(const Source &S) {
- COMPILER_CHECK(sizeof(Dest) == sizeof(Source));
+template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
+ static_assert(sizeof(Dest) == sizeof(Source), "");
Dest D;
memcpy(&D, &S, sizeof(D));
return D;
}
-INLINE constexpr uptr roundUpTo(uptr X, uptr Boundary) {
+inline constexpr uptr roundUpTo(uptr X, uptr Boundary) {
return (X + Boundary - 1) & ~(Boundary - 1);
}
-INLINE constexpr uptr roundDownTo(uptr X, uptr Boundary) {
+inline constexpr uptr roundDownTo(uptr X, uptr Boundary) {
return X & ~(Boundary - 1);
}
-INLINE constexpr bool isAligned(uptr X, uptr Alignment) {
+inline constexpr bool isAligned(uptr X, uptr Alignment) {
return (X & (Alignment - 1)) == 0;
}
@@ -48,14 +48,14 @@ template <class T> void Swap(T &A, T &B) {
B = Tmp;
}
-INLINE bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
+inline bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
-INLINE uptr getMostSignificantSetBitIndex(uptr X) {
+inline uptr getMostSignificantSetBitIndex(uptr X) {
DCHECK_NE(X, 0U);
return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
}
-INLINE uptr roundUpToPowerOfTwo(uptr Size) {
+inline uptr roundUpToPowerOfTwo(uptr Size) {
DCHECK(Size);
if (isPowerOfTwo(Size))
return Size;
@@ -65,17 +65,17 @@ INLINE uptr roundUpToPowerOfTwo(uptr Size) {
return 1UL << (Up + 1);
}
-INLINE uptr getLeastSignificantSetBitIndex(uptr X) {
+inline uptr getLeastSignificantSetBitIndex(uptr X) {
DCHECK_NE(X, 0U);
return static_cast<uptr>(__builtin_ctzl(X));
}
-INLINE uptr getLog2(uptr X) {
+inline uptr getLog2(uptr X) {
DCHECK(isPowerOfTwo(X));
return getLeastSignificantSetBitIndex(X);
}
-INLINE u32 getRandomU32(u32 *State) {
+inline u32 getRandomU32(u32 *State) {
// ANSI C linear congruential PRNG (16-bit output).
// return (*State = *State * 1103515245 + 12345) >> 16;
// XorShift (32-bit output).
@@ -85,11 +85,11 @@ INLINE u32 getRandomU32(u32 *State) {
return *State;
}
-INLINE u32 getRandomModN(u32 *State, u32 N) {
+inline u32 getRandomModN(u32 *State, u32 N) {
return getRandomU32(State) % N; // [0, N)
}
-template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {
+template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
if (N <= 1)
return;
u32 State = *RandState;
@@ -100,7 +100,7 @@ template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {
// Hardware specific inlinable functions.
-INLINE void yieldProcessor(u8 Count) {
+inline void yieldProcessor(u8 Count) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__("" ::: "memory");
for (u8 I = 0; I < Count; I++)
@@ -117,7 +117,7 @@ INLINE void yieldProcessor(u8 Count) {
extern uptr PageSizeCached;
uptr getPageSizeSlow();
-INLINE uptr getPageSizeCached() {
+inline uptr getPageSizeCached() {
// Bionic uses a hardcoded value.
if (SCUDO_ANDROID)
return 4096U;
diff --git a/compiler-rt/lib/scudo/standalone/flags.cpp b/compiler-rt/lib/scudo/standalone/flags.cpp
index 1e970ae49505..dd9f050a2d20 100644
--- a/compiler-rt/lib/scudo/standalone/flags.cpp
+++ b/compiler-rt/lib/scudo/standalone/flags.cpp
@@ -22,6 +22,13 @@ void Flags::setDefaults() {
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "flags.inc"
#undef SCUDO_FLAG
+
+#ifdef GWP_ASAN_HOOKS
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ GWP_ASAN_##Name = DefaultValue;
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+#endif // GWP_ASAN_HOOKS
}
void registerFlags(FlagParser *Parser, Flags *F) {
@@ -30,6 +37,14 @@ void registerFlags(FlagParser *Parser, Flags *F) {
reinterpret_cast<void *>(&F->Name));
#include "flags.inc"
#undef SCUDO_FLAG
+
+#ifdef GWP_ASAN_HOOKS
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ Parser->registerFlag("GWP_ASAN_" #Name, Description, FlagType::FT_##Type, \
+ reinterpret_cast<void *>(&F->GWP_ASAN_##Name));
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+#endif // GWP_ASAN_HOOKS
}
static const char *getCompileDefinitionScudoDefaultOptions() {
diff --git a/compiler-rt/lib/scudo/standalone/flags.h b/compiler-rt/lib/scudo/standalone/flags.h
index edd39a1b8ba9..2cd0a5b1334b 100644
--- a/compiler-rt/lib/scudo/standalone/flags.h
+++ b/compiler-rt/lib/scudo/standalone/flags.h
@@ -17,6 +17,14 @@ struct Flags {
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "flags.inc"
#undef SCUDO_FLAG
+
+#ifdef GWP_ASAN_HOOKS
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ Type GWP_ASAN_##Name;
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+#endif // GWP_ASAN_HOOKS
+
void setDefaults();
};
diff --git a/compiler-rt/lib/scudo/standalone/flags_parser.cpp b/compiler-rt/lib/scudo/standalone/flags_parser.cpp
index 070c08b01938..be39fcd4f887 100644
--- a/compiler-rt/lib/scudo/standalone/flags_parser.cpp
+++ b/compiler-rt/lib/scudo/standalone/flags_parser.cpp
@@ -108,7 +108,7 @@ void FlagParser::parseString(const char *S) {
Pos = OldPos;
}
-INLINE bool parseBool(const char *Value, bool *b) {
+inline bool parseBool(const char *Value, bool *b) {
if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
strncmp(Value, "false", 5) == 0) {
*b = false;
diff --git a/compiler-rt/lib/scudo/standalone/flags_parser.h b/compiler-rt/lib/scudo/standalone/flags_parser.h
index 857b50e880ec..32511f768c66 100644
--- a/compiler-rt/lib/scudo/standalone/flags_parser.h
+++ b/compiler-rt/lib/scudo/standalone/flags_parser.h
@@ -29,7 +29,7 @@ public:
void printFlagDescriptions();
private:
- static const u32 MaxFlags = 12;
+ static const u32 MaxFlags = 16;
struct Flag {
const char *Name;
const char *Desc;
diff --git a/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/compiler-rt/lib/scudo/standalone/fuchsia.cpp
index 0a9483ae1dd0..b3d72de158cf 100644
--- a/compiler-rt/lib/scudo/standalone/fuchsia.cpp
+++ b/compiler-rt/lib/scudo/standalone/fuchsia.cpp
@@ -29,7 +29,7 @@ void NORETURN die() { __builtin_trap(); }
// We zero-initialize the Extra parameter of map(), make sure this is consistent
// with ZX_HANDLE_INVALID.
-COMPILER_CHECK(ZX_HANDLE_INVALID == 0);
+static_assert(ZX_HANDLE_INVALID == 0, "");
static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
// Only scenario so far.
@@ -171,7 +171,7 @@ u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
- COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN);
+ static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, "");
if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
return false;
_zx_cprng_draw(Buffer, Length);
diff --git a/compiler-rt/lib/scudo/standalone/internal_defs.h b/compiler-rt/lib/scudo/standalone/internal_defs.h
index 64ed238ebfec..8f6a89ecba73 100644
--- a/compiler-rt/lib/scudo/standalone/internal_defs.h
+++ b/compiler-rt/lib/scudo/standalone/internal_defs.h
@@ -30,7 +30,6 @@
#define INTERFACE __attribute__((visibility("default")))
#define WEAK __attribute__((weak))
-#define INLINE inline
#define ALWAYS_INLINE inline __attribute__((always_inline))
#define ALIAS(X) __attribute__((alias(X)))
// Please only use the ALIGNED macro before the type. Using ALIGNED after the
@@ -84,12 +83,12 @@ void NORETURN reportCheckFailed(const char *File, int Line,
#define CHECK_IMPL(C1, Op, C2) \
do { \
- u64 V1 = (u64)(C1); \
- u64 V2 = (u64)(C2); \
+ scudo::u64 V1 = (scudo::u64)(C1); \
+ scudo::u64 V2 = (scudo::u64)(C2); \
if (UNLIKELY(!(V1 Op V2))) { \
- reportCheckFailed(__FILE__, __LINE__, "(" #C1 ") " #Op " (" #C2 ")", V1, \
- V2); \
- die(); \
+ scudo::reportCheckFailed(__FILE__, __LINE__, \
+ "(" #C1 ") " #Op " (" #C2 ")", V1, V2); \
+ scudo::die(); \
} \
} while (false)
@@ -126,8 +125,6 @@ void NORETURN reportCheckFailed(const char *File, int Line,
die(); \
} while (0)
-#define COMPILER_CHECK(Pred) static_assert(Pred, "")
-
} // namespace scudo
#endif // SCUDO_INTERNAL_DEFS_H_
diff --git a/compiler-rt/lib/scudo/standalone/list.h b/compiler-rt/lib/scudo/standalone/list.h
index 6a7b9bd747a7..c3b898a328ca 100644
--- a/compiler-rt/lib/scudo/standalone/list.h
+++ b/compiler-rt/lib/scudo/standalone/list.h
@@ -13,43 +13,93 @@
namespace scudo {
-// Intrusive POD singly-linked list.
+// Intrusive POD singly and doubly linked list.
// An object with all zero fields should represent a valid empty list. clear()
// should be called on all non-zero-initialized objects before using.
-template <class Item> struct IntrusiveList {
- friend class Iterator;
+
+template <class T> class IteratorBase {
+public:
+ explicit IteratorBase(T *CurrentT) : Current(CurrentT) {}
+ IteratorBase &operator++() {
+ Current = Current->Next;
+ return *this;
+ }
+ bool operator!=(IteratorBase Other) const { return Current != Other.Current; }
+ T &operator*() { return *Current; }
+
+private:
+ T *Current;
+};
+
+template <class T> struct IntrusiveList {
+ bool empty() const { return Size == 0; }
+ uptr size() const { return Size; }
+
+ T *front() { return First; }
+ const T *front() const { return First; }
+ T *back() { return Last; }
+ const T *back() const { return Last; }
void clear() {
First = Last = nullptr;
Size = 0;
}
- bool empty() const { return Size == 0; }
- uptr size() const { return Size; }
+ typedef IteratorBase<T> Iterator;
+ typedef IteratorBase<const T> ConstIterator;
- void push_back(Item *X) {
- if (empty()) {
- X->Next = nullptr;
- First = Last = X;
- Size = 1;
- } else {
- X->Next = nullptr;
- Last->Next = X;
- Last = X;
- Size++;
+ Iterator begin() { return Iterator(First); }
+ Iterator end() { return Iterator(nullptr); }
+
+ ConstIterator begin() const { return ConstIterator(First); }
+ ConstIterator end() const { return ConstIterator(nullptr); }
+
+ void checkConsistency() const;
+
+protected:
+ uptr Size;
+ T *First;
+ T *Last;
+};
+
+template <class T> void IntrusiveList<T>::checkConsistency() const {
+ if (Size == 0) {
+ CHECK_EQ(First, nullptr);
+ CHECK_EQ(Last, nullptr);
+ } else {
+ uptr Count = 0;
+ for (T *I = First;; I = I->Next) {
+ Count++;
+ if (I == Last)
+ break;
}
+ CHECK_EQ(this->size(), Count);
+ CHECK_EQ(Last->Next, nullptr);
}
+}
- void push_front(Item *X) {
- if (empty()) {
- X->Next = nullptr;
- First = Last = X;
- Size = 1;
- } else {
- X->Next = First;
+template <class T> struct SinglyLinkedList : public IntrusiveList<T> {
+ using IntrusiveList<T>::First;
+ using IntrusiveList<T>::Last;
+ using IntrusiveList<T>::Size;
+ using IntrusiveList<T>::empty;
+
+ void push_back(T *X) {
+ X->Next = nullptr;
+ if (empty())
First = X;
- Size++;
- }
+ else
+ Last->Next = X;
+ Last = X;
+ Size++;
+ }
+
+ void push_front(T *X) {
+ if (empty())
+ Last = X;
+ X->Next = First;
+ First = X;
+ Size++;
}
void pop_front() {
@@ -60,7 +110,7 @@ template <class Item> struct IntrusiveList {
Size--;
}
- void extract(Item *Prev, Item *X) {
+ void extract(T *Prev, T *X) {
DCHECK(!empty());
DCHECK_NE(Prev, nullptr);
DCHECK_NE(X, nullptr);
@@ -71,84 +121,106 @@ template <class Item> struct IntrusiveList {
Size--;
}
- Item *front() { return First; }
- const Item *front() const { return First; }
- Item *back() { return Last; }
- const Item *back() const { return Last; }
-
- void append_front(IntrusiveList<Item> *L) {
+ void append_back(SinglyLinkedList<T> *L) {
DCHECK_NE(this, L);
if (L->empty())
return;
if (empty()) {
*this = *L;
- } else if (!L->empty()) {
- L->Last->Next = First;
- First = L->First;
+ } else {
+ Last->Next = L->First;
+ Last = L->Last;
Size += L->size();
}
L->clear();
}
+};
- void append_back(IntrusiveList<Item> *L) {
- DCHECK_NE(this, L);
- if (L->empty())
- return;
+template <class T> struct DoublyLinkedList : IntrusiveList<T> {
+ using IntrusiveList<T>::First;
+ using IntrusiveList<T>::Last;
+ using IntrusiveList<T>::Size;
+ using IntrusiveList<T>::empty;
+
+ void push_front(T *X) {
+ X->Prev = nullptr;
if (empty()) {
- *this = *L;
+ Last = X;
} else {
- Last->Next = L->First;
- Last = L->Last;
- Size += L->size();
+ DCHECK_EQ(First->Prev, nullptr);
+ First->Prev = X;
}
- L->clear();
+ X->Next = First;
+ First = X;
+ Size++;
+ }
+
+ // Inserts X before Y.
+ void insert(T *X, T *Y) {
+ if (Y == First)
+ return push_front(X);
+ T *Prev = Y->Prev;
+ // This is a hard CHECK to ensure consistency in the event of an intentional
+ // corruption of Y->Prev, to prevent a potential write-{4,8}.
+ CHECK_EQ(Prev->Next, Y);
+ Prev->Next = X;
+ X->Prev = Prev;
+ X->Next = Y;
+ Y->Prev = X;
+ Size++;
}
- void checkConsistency() {
- if (Size == 0) {
- CHECK_EQ(First, nullptr);
- CHECK_EQ(Last, nullptr);
+ void push_back(T *X) {
+ X->Next = nullptr;
+ if (empty()) {
+ First = X;
} else {
- uptr Count = 0;
- for (Item *I = First;; I = I->Next) {
- Count++;
- if (I == Last)
- break;
- }
- CHECK_EQ(size(), Count);
- CHECK_EQ(Last->Next, nullptr);
+ DCHECK_EQ(Last->Next, nullptr);
+ Last->Next = X;
}
+ X->Prev = Last;
+ Last = X;
+ Size++;
}
- template <class ItemT> class IteratorBase {
- public:
- explicit IteratorBase(ItemT *CurrentItem) : Current(CurrentItem) {}
- IteratorBase &operator++() {
- Current = Current->Next;
- return *this;
+ void pop_front() {
+ DCHECK(!empty());
+ First = First->Next;
+ if (!First)
+ Last = nullptr;
+ else
+ First->Prev = nullptr;
+ Size--;
+ }
+
+ // The consistency of the adjacent links is aggressively checked in order to
+ // catch potential corruption attempts, that could yield a mirrored
+ // write-{4,8} primitive. nullptr checks are deemed less vital.
+ void remove(T *X) {
+ T *Prev = X->Prev;
+ T *Next = X->Next;
+ if (Prev) {
+ CHECK_EQ(Prev->Next, X);
+ Prev->Next = Next;
}
- bool operator!=(IteratorBase Other) const {
- return Current != Other.Current;
+ if (Next) {
+ CHECK_EQ(Next->Prev, X);
+ Next->Prev = Prev;
}
- ItemT &operator*() { return *Current; }
-
- private:
- ItemT *Current;
- };
-
- typedef IteratorBase<Item> Iterator;
- typedef IteratorBase<const Item> ConstIterator;
-
- Iterator begin() { return Iterator(First); }
- Iterator end() { return Iterator(nullptr); }
-
- ConstIterator begin() const { return ConstIterator(First); }
- ConstIterator end() const { return ConstIterator(nullptr); }
-
-private:
- uptr Size;
- Item *First;
- Item *Last;
+ if (First == X) {
+ DCHECK_EQ(Prev, nullptr);
+ First = Next;
+ } else {
+ DCHECK_NE(Prev, nullptr);
+ }
+ if (Last == X) {
+ DCHECK_EQ(Next, nullptr);
+ Last = Prev;
+ } else {
+ DCHECK_NE(Next, nullptr);
+ }
+ Size--;
+ }
};
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/platform.h b/compiler-rt/lib/scudo/standalone/platform.h
index a897a566f9bf..a4c2a0b26603 100644
--- a/compiler-rt/lib/scudo/standalone/platform.h
+++ b/compiler-rt/lib/scudo/standalone/platform.h
@@ -9,13 +9,17 @@
#ifndef SCUDO_PLATFORM_H_
#define SCUDO_PLATFORM_H_
+// Transitive includes of stdint.h specify some of the defines checked below.
+#include <stdint.h>
+
#if defined(__linux__)
#define SCUDO_LINUX 1
#else
#define SCUDO_LINUX 0
#endif
-#if defined(__ANDROID__)
+// See https://android.googlesource.com/platform/bionic/+/master/docs/defines.md
+#if defined(__BIONIC__)
#define SCUDO_ANDROID 1
#else
#define SCUDO_ANDROID 0
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 9123d07b49b9..e296a78778e0 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -42,7 +42,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
public:
typedef SizeClassMapT SizeClassMap;
// Regions should be large enough to hold the largest Block.
- COMPILER_CHECK((1UL << RegionSizeLog) >= SizeClassMap::MaxSize);
+ static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
@@ -123,13 +123,26 @@ public:
}
void disable() {
- for (uptr I = 0; I < NumClasses; I++)
- getSizeClassInfo(I)->Mutex.lock();
+ // The BatchClassId must be locked last since other classes can use it.
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
+ if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
+ continue;
+ getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
+ }
+ getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
+ RegionsStashMutex.lock();
+ PossibleRegions.disable();
}
void enable() {
- for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
- getSizeClassInfo(static_cast<uptr>(I))->Mutex.unlock();
+ PossibleRegions.enable();
+ RegionsStashMutex.unlock();
+ getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ getSizeClassInfo(I)->Mutex.unlock();
+ }
}
template <typename F> void iterateOverBlocks(F Callback) {
@@ -197,14 +210,14 @@ private:
struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
HybridMutex Mutex;
- IntrusiveList<TransferBatch> FreeList;
+ SinglyLinkedList<TransferBatch> FreeList;
SizeClassStats Stats;
bool CanRelease;
u32 RandState;
uptr AllocatedUser;
ReleaseToOsInfo ReleaseInfo;
};
- COMPILER_CHECK(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+ static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
uptr computeRegionId(uptr Mem) {
const uptr Id = Mem >> RegionSizeLog;
@@ -300,10 +313,10 @@ private:
const uptr NumberOfBlocks = RegionSize / Size;
DCHECK_GT(NumberOfBlocks, 0);
TransferBatch *B = nullptr;
- constexpr uptr ShuffleArraySize = 48;
+ constexpr u32 ShuffleArraySize = 8U * TransferBatch::MaxNumCached;
void *ShuffleArray[ShuffleArraySize];
u32 Count = 0;
- const uptr AllocatedUser = NumberOfBlocks * Size;
+ const uptr AllocatedUser = Size * NumberOfBlocks;
for (uptr I = Region; I < Region + AllocatedUser; I += Size) {
ShuffleArray[Count++] = reinterpret_cast<void *>(I);
if (Count == ShuffleArraySize) {
@@ -319,6 +332,11 @@ private:
return nullptr;
}
DCHECK(B);
+ if (!Sci->FreeList.empty()) {
+ Sci->FreeList.push_back(B);
+ B = Sci->FreeList.front();
+ Sci->FreeList.pop_front();
+ }
DCHECK_GT(B->getCount(), 0);
C->getStats().add(StatFree, AllocatedUser);
@@ -376,7 +394,7 @@ private:
for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
if (PossibleRegions[I] == ClassId) {
ReleaseRecorder Recorder(I * RegionSize);
- releaseFreeMemoryToOS(&Sci->FreeList, I * RegionSize,
+ releaseFreeMemoryToOS(Sci->FreeList, I * RegionSize,
RegionSize / PageSize, BlockSize, &Recorder);
if (Recorder.getReleasedRangesCount() > 0) {
Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 8f443ea7fa3f..ef02f0b772d6 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -125,13 +125,22 @@ public:
}
void disable() {
- for (uptr I = 0; I < NumClasses; I++)
- getRegionInfo(I)->Mutex.lock();
+ // The BatchClassId must be locked last since other classes can use it.
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
+ if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
+ continue;
+ getRegionInfo(static_cast<uptr>(I))->Mutex.lock();
+ }
+ getRegionInfo(SizeClassMap::BatchClassId)->Mutex.lock();
}
void enable() {
- for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
- getRegionInfo(static_cast<uptr>(I))->Mutex.unlock();
+ getRegionInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ getRegionInfo(I)->Mutex.unlock();
+ }
}
template <typename F> void iterateOverBlocks(F Callback) const {
@@ -187,6 +196,8 @@ private:
// Call map for user memory with at least this size.
static const uptr MapSizeIncrement = 1UL << 17;
+ // Fill at most this number of batches from the newly map'd memory.
+ static const u32 MaxNumBatches = 8U;
struct RegionStats {
uptr PoppedBlocks;
@@ -202,7 +213,7 @@ private:
struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
HybridMutex Mutex;
- IntrusiveList<TransferBatch> FreeList;
+ SinglyLinkedList<TransferBatch> FreeList;
RegionStats Stats;
bool CanRelease;
bool Exhausted;
@@ -213,7 +224,7 @@ private:
MapPlatformData Data;
ReleaseToOsInfo ReleaseInfo;
};
- COMPILER_CHECK(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+ static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
uptr PrimaryBase;
RegionInfo *RegionInfoArray;
@@ -289,16 +300,18 @@ private:
C->getStats().add(StatMapped, UserMapSize);
}
- const uptr NumberOfBlocks = Min(
- 8UL * MaxCount, (Region->MappedUser - Region->AllocatedUser) / Size);
+ const u32 NumberOfBlocks = Min(
+ MaxNumBatches * MaxCount,
+ static_cast<u32>((Region->MappedUser - Region->AllocatedUser) / Size));
DCHECK_GT(NumberOfBlocks, 0);
TransferBatch *B = nullptr;
- constexpr uptr ShuffleArraySize = 48;
+ constexpr u32 ShuffleArraySize =
+ MaxNumBatches * TransferBatch::MaxNumCached;
void *ShuffleArray[ShuffleArraySize];
u32 Count = 0;
const uptr P = RegionBeg + Region->AllocatedUser;
- const uptr AllocatedUser = NumberOfBlocks * Size;
+ const uptr AllocatedUser = Size * NumberOfBlocks;
for (uptr I = P; I < P + AllocatedUser; I += Size) {
ShuffleArray[Count++] = reinterpret_cast<void *>(I);
if (Count == ShuffleArraySize) {
@@ -314,6 +327,11 @@ private:
return nullptr;
}
DCHECK(B);
+ if (!Region->FreeList.empty()) {
+ Region->FreeList.push_back(B);
+ B = Region->FreeList.front();
+ Region->FreeList.pop_front();
+ }
DCHECK_GT(B->getCount(), 0);
C->getStats().add(StatFree, AllocatedUser);
@@ -372,7 +390,7 @@ private:
}
ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
- releaseFreeMemoryToOS(&Region->FreeList, Region->RegionBeg,
+ releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg,
roundUpTo(Region->AllocatedUser, PageSize) / PageSize,
BlockSize, &Recorder);
diff --git a/compiler-rt/lib/scudo/standalone/quarantine.h b/compiler-rt/lib/scudo/standalone/quarantine.h
index 35fd0bc197ea..406a0e23804d 100644
--- a/compiler-rt/lib/scudo/standalone/quarantine.h
+++ b/compiler-rt/lib/scudo/standalone/quarantine.h
@@ -59,7 +59,7 @@ struct QuarantineBatch {
void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
};
-COMPILER_CHECK(sizeof(QuarantineBatch) <= (1U << 13)); // 8Kb.
+static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
// Per-thread cache of memory blocks.
template <typename Callback> class QuarantineCache {
@@ -160,7 +160,7 @@ public:
}
private:
- IntrusiveList<QuarantineBatch> List;
+ SinglyLinkedList<QuarantineBatch> List;
atomic_uptr Size;
void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
@@ -205,7 +205,7 @@ public:
ScopedLock L(CacheMutex);
Cache.transfer(C);
}
- if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock())
+ if (Cache.getSize() > getMaxSize() && RecycleMutex.tryLock())
recycle(atomic_load_relaxed(&MinSize), Cb);
}
@@ -214,7 +214,7 @@ public:
ScopedLock L(CacheMutex);
Cache.transfer(C);
}
- RecyleMutex.lock();
+ RecycleMutex.lock();
recycle(0, Cb);
}
@@ -225,11 +225,22 @@ public:
getMaxSize() >> 10, getCacheSize() >> 10);
}
+ void disable() {
+ // RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
+ RecycleMutex.lock();
+ CacheMutex.lock();
+ }
+
+ void enable() {
+ CacheMutex.unlock();
+ RecycleMutex.unlock();
+ }
+
private:
// Read-only data.
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
CacheT Cache;
- alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex;
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
atomic_uptr MinSize;
atomic_uptr MaxSize;
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
@@ -261,7 +272,7 @@ private:
while (Cache.getSize() > MinSize)
Tmp.enqueueBatch(Cache.dequeueBatch());
}
- RecyleMutex.unlock();
+ RecycleMutex.unlock();
doRecycle(&Tmp, Cb);
}
diff --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
index 4fe29fde4bde..4b5c56ce7c19 100644
--- a/compiler-rt/lib/scudo/standalone/release.h
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -149,7 +149,7 @@ private:
template <class TransferBatchT, class ReleaseRecorderT>
NOINLINE void
-releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> *FreeList, uptr Base,
+releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
uptr AllocatedPagesCount, uptr BlockSize,
ReleaseRecorderT *Recorder) {
const uptr PageSize = getPageSizeCached();
@@ -199,18 +199,18 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> *FreeList, uptr Base,
// allocated page.
if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
// Each chunk affects one page only.
- for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
- for (u32 I = 0; I < (*It).getCount(); I++) {
- const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ for (const auto &It : FreeList) {
+ for (u32 I = 0; I < It.getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>(It.get(I));
if (P >= Base && P < End)
Counters.inc((P - Base) >> PageSizeLog);
}
}
} else {
// In all other cases chunks might affect more than one page.
- for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
- for (u32 I = 0; I < (*It).getCount(); I++) {
- const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ for (const auto &It : FreeList) {
+ for (u32 I = 0; I < It.getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>(It.get(I));
if (P >= Base && P < End)
Counters.incRange((P - Base) >> PageSizeLog,
(P - Base + BlockSize - 1) >> PageSizeLog);
diff --git a/compiler-rt/lib/scudo/standalone/report.cpp b/compiler-rt/lib/scudo/standalone/report.cpp
index 12d851ff019a..80cc6eda2af9 100644
--- a/compiler-rt/lib/scudo/standalone/report.cpp
+++ b/compiler-rt/lib/scudo/standalone/report.cpp
@@ -34,7 +34,7 @@ private:
ScopedString Message;
};
-INLINE void NORETURN trap() { __builtin_trap(); }
+inline void NORETURN trap() { __builtin_trap(); }
// This could potentially be called recursively if a CHECK fails in the reports.
void NORETURN reportCheckFailed(const char *File, int Line,
diff --git a/compiler-rt/lib/scudo/standalone/secondary.cpp b/compiler-rt/lib/scudo/standalone/secondary.cpp
deleted file mode 100644
index db7361d7134a..000000000000
--- a/compiler-rt/lib/scudo/standalone/secondary.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-//===-- secondary.cpp -------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "secondary.h"
-
-#include "string_utils.h"
-
-namespace scudo {
-
-// As with the Primary, the size passed to this function includes any desired
-// alignment, so that the frontend can align the user allocation. The hint
-// parameter allows us to unmap spurious memory when dealing with larger
-// (greater than a page) alignments on 32-bit platforms.
-// Due to the sparsity of address space available on those platforms, requesting
-// an allocation from the Secondary with a large alignment would end up wasting
-// VA space (even though we are not committing the whole thing), hence the need
-// to trim off some of the reserved space.
-// For allocations requested with an alignment greater than or equal to a page,
-// the committed memory will amount to something close to Size - AlignmentHint
-// (pending rounding and headers).
-void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
- DCHECK_GT(Size, AlignmentHint);
- const uptr PageSize = getPageSizeCached();
- const uptr MapSize =
- roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize;
- MapPlatformData Data = {};
- uptr MapBase =
- reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
- MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
- if (UNLIKELY(!MapBase))
- return nullptr;
- uptr CommitBase = MapBase + PageSize;
- uptr MapEnd = MapBase + MapSize;
-
- // In the unlikely event of alignments larger than a page, adjust the amount
- // of memory we want to commit, and trim the extra memory.
- if (UNLIKELY(AlignmentHint >= PageSize)) {
- // For alignments greater than or equal to a page, the user pointer (eg: the
- // pointer that is returned by the C or C++ allocation APIs) ends up on a
- // page boundary , and our headers will live in the preceding page.
- CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
- const uptr NewMapBase = CommitBase - PageSize;
- DCHECK_GE(NewMapBase, MapBase);
- // We only trim the extra memory on 32-bit platforms: 64-bit platforms
- // are less constrained memory wise, and that saves us two syscalls.
- if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
- unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
- MapBase = NewMapBase;
- }
- const uptr NewMapEnd = CommitBase + PageSize +
- roundUpTo((Size - AlignmentHint), PageSize) +
- PageSize;
- DCHECK_LE(NewMapEnd, MapEnd);
- if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
- unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
- MapEnd = NewMapEnd;
- }
- }
-
- const uptr CommitSize = MapEnd - PageSize - CommitBase;
- const uptr Ptr =
- reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
- CommitSize, "scudo:secondary", 0, &Data));
- LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
- H->MapBase = MapBase;
- H->MapSize = MapEnd - MapBase;
- H->BlockEnd = CommitBase + CommitSize;
- H->Data = Data;
- {
- ScopedLock L(Mutex);
- if (LIKELY(Tail)) {
- Tail->Next = H;
- H->Prev = Tail;
- }
- Tail = H;
- AllocatedBytes += CommitSize;
- if (LargestSize < CommitSize)
- LargestSize = CommitSize;
- NumberOfAllocs++;
- Stats.add(StatAllocated, CommitSize);
- Stats.add(StatMapped, H->MapSize);
- }
- if (BlockEnd)
- *BlockEnd = CommitBase + CommitSize;
- return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
-}
-
-void MapAllocator::deallocate(void *Ptr) {
- LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
- {
- ScopedLock L(Mutex);
- LargeBlock::Header *Prev = H->Prev;
- LargeBlock::Header *Next = H->Next;
- if (Prev) {
- CHECK_EQ(Prev->Next, H);
- Prev->Next = Next;
- }
- if (Next) {
- CHECK_EQ(Next->Prev, H);
- Next->Prev = Prev;
- }
- if (UNLIKELY(Tail == H)) {
- CHECK(!Next);
- Tail = Prev;
- } else {
- CHECK(Next);
- }
- const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
- FreedBytes += CommitSize;
- NumberOfFrees++;
- Stats.sub(StatAllocated, CommitSize);
- Stats.sub(StatMapped, H->MapSize);
- }
- void *Addr = reinterpret_cast<void *>(H->MapBase);
- const uptr Size = H->MapSize;
- MapPlatformData Data;
- Data = H->Data;
- unmap(Addr, Size, UNMAP_ALL, &Data);
-}
-
-void MapAllocator::getStats(ScopedString *Str) const {
- Str->append(
- "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
- "(%zuK), remains %zu (%zuK) max %zuM\n",
- NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
- NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
- LargestSize >> 20);
-}
-
-} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 9d074a57c772..ab68e5a1d38d 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -10,6 +10,7 @@
#define SCUDO_SECONDARY_H_
#include "common.h"
+#include "list.h"
#include "mutex.h"
#include "stats.h"
#include "string_utils.h"
@@ -47,8 +48,12 @@ static Header *getHeader(const void *Ptr) {
} // namespace LargeBlock
-class MapAllocator {
+template <uptr MaxFreeListSize = 32U> class MapAllocator {
public:
+ // Ensure the freelist is disabled on Fuchsia, since it doesn't support
+ // releasing Secondary blocks yet.
+ static_assert(!SCUDO_FUCHSIA || MaxFreeListSize == 0U, "");
+
void initLinkerInitialized(GlobalStats *S) {
Stats.initLinkerInitialized();
if (LIKELY(S))
@@ -59,7 +64,8 @@ public:
initLinkerInitialized(S);
}
- void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr);
+ void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr,
+ bool ZeroContents = false);
void deallocate(void *Ptr);
@@ -78,13 +84,17 @@ public:
void enable() { Mutex.unlock(); }
template <typename F> void iterateOverBlocks(F Callback) const {
- for (LargeBlock::Header *H = Tail; H != nullptr; H = H->Prev)
- Callback(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize());
+ for (const auto &H : InUseBlocks)
+ Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
}
+ static uptr getMaxFreeListSize(void) { return MaxFreeListSize; }
+
private:
HybridMutex Mutex;
- LargeBlock::Header *Tail;
+ DoublyLinkedList<LargeBlock::Header> InUseBlocks;
+ // The free list is sorted based on the committed size of blocks.
+ DoublyLinkedList<LargeBlock::Header> FreeBlocks;
uptr AllocatedBytes;
uptr FreedBytes;
uptr LargestSize;
@@ -93,6 +103,158 @@ private:
LocalStats Stats;
};
+// As with the Primary, the size passed to this function includes any desired
+// alignment, so that the frontend can align the user allocation. The hint
+// parameter allows us to unmap spurious memory when dealing with larger
+// (greater than a page) alignments on 32-bit platforms.
+// Due to the sparsity of address space available on those platforms, requesting
+// an allocation from the Secondary with a large alignment would end up wasting
+// VA space (even though we are not committing the whole thing), hence the need
+// to trim off some of the reserved space.
+// For allocations requested with an alignment greater than or equal to a page,
+// the committed memory will amount to something close to Size - AlignmentHint
+// (pending rounding and headers).
+template <uptr MaxFreeListSize>
+void *MapAllocator<MaxFreeListSize>::allocate(uptr Size, uptr AlignmentHint,
+ uptr *BlockEnd,
+ bool ZeroContents) {
+ DCHECK_GE(Size, AlignmentHint);
+ const uptr PageSize = getPageSizeCached();
+ const uptr RoundedSize =
+ roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
+
+ if (MaxFreeListSize && AlignmentHint < PageSize) {
+ ScopedLock L(Mutex);
+ for (auto &H : FreeBlocks) {
+ const uptr FreeBlockSize = H.BlockEnd - reinterpret_cast<uptr>(&H);
+ if (FreeBlockSize < RoundedSize)
+ continue;
+ // Candidate free block should only be at most 4 pages larger.
+ if (FreeBlockSize > RoundedSize + 4 * PageSize)
+ break;
+ FreeBlocks.remove(&H);
+ InUseBlocks.push_back(&H);
+ AllocatedBytes += FreeBlockSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, FreeBlockSize);
+ if (BlockEnd)
+ *BlockEnd = H.BlockEnd;
+ void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(&H) +
+ LargeBlock::getHeaderSize());
+ if (ZeroContents)
+ memset(Ptr, 0, H.BlockEnd - reinterpret_cast<uptr>(Ptr));
+ return Ptr;
+ }
+ }
+
+ MapPlatformData Data = {};
+ const uptr MapSize = RoundedSize + 2 * PageSize;
+ uptr MapBase =
+ reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
+ MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+ if (UNLIKELY(!MapBase))
+ return nullptr;
+ uptr CommitBase = MapBase + PageSize;
+ uptr MapEnd = MapBase + MapSize;
+
+ // In the unlikely event of alignments larger than a page, adjust the amount
+ // of memory we want to commit, and trim the extra memory.
+ if (UNLIKELY(AlignmentHint >= PageSize)) {
+ // For alignments greater than or equal to a page, the user pointer (eg: the
+ // pointer that is returned by the C or C++ allocation APIs) ends up on a
+ // page boundary , and our headers will live in the preceding page.
+ CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+ const uptr NewMapBase = CommitBase - PageSize;
+ DCHECK_GE(NewMapBase, MapBase);
+ // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+ // are less constrained memory wise, and that saves us two syscalls.
+ if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+ unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+ MapBase = NewMapBase;
+ }
+ const uptr NewMapEnd = CommitBase + PageSize +
+ roundUpTo((Size - AlignmentHint), PageSize) +
+ PageSize;
+ DCHECK_LE(NewMapEnd, MapEnd);
+ if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+ unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+ MapEnd = NewMapEnd;
+ }
+ }
+
+ const uptr CommitSize = MapEnd - PageSize - CommitBase;
+ const uptr Ptr =
+ reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
+ CommitSize, "scudo:secondary", 0, &Data));
+ LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+ H->MapBase = MapBase;
+ H->MapSize = MapEnd - MapBase;
+ H->BlockEnd = CommitBase + CommitSize;
+ H->Data = Data;
+ {
+ ScopedLock L(Mutex);
+ InUseBlocks.push_back(H);
+ AllocatedBytes += CommitSize;
+ if (LargestSize < CommitSize)
+ LargestSize = CommitSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, CommitSize);
+ Stats.add(StatMapped, H->MapSize);
+ }
+ if (BlockEnd)
+ *BlockEnd = CommitBase + CommitSize;
+ return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+}
+
+template <uptr MaxFreeListSize>
+void MapAllocator<MaxFreeListSize>::deallocate(void *Ptr) {
+ LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
+ const uptr Block = reinterpret_cast<uptr>(H);
+ {
+ ScopedLock L(Mutex);
+ InUseBlocks.remove(H);
+ const uptr CommitSize = H->BlockEnd - Block;
+ FreedBytes += CommitSize;
+ NumberOfFrees++;
+ Stats.sub(StatAllocated, CommitSize);
+ if (MaxFreeListSize && FreeBlocks.size() < MaxFreeListSize) {
+ bool Inserted = false;
+ for (auto &F : FreeBlocks) {
+ const uptr FreeBlockSize = F.BlockEnd - reinterpret_cast<uptr>(&F);
+ if (FreeBlockSize >= CommitSize) {
+ FreeBlocks.insert(H, &F);
+ Inserted = true;
+ break;
+ }
+ }
+ if (!Inserted)
+ FreeBlocks.push_back(H);
+ const uptr RoundedAllocationStart =
+ roundUpTo(Block + LargeBlock::getHeaderSize(), getPageSizeCached());
+ MapPlatformData Data = H->Data;
+ // TODO(kostyak): use release_to_os_interval_ms
+ releasePagesToOS(Block, RoundedAllocationStart - Block,
+ H->BlockEnd - RoundedAllocationStart, &Data);
+ return;
+ }
+ Stats.sub(StatMapped, H->MapSize);
+ }
+ void *Addr = reinterpret_cast<void *>(H->MapBase);
+ const uptr Size = H->MapSize;
+ MapPlatformData Data = H->Data;
+ unmap(Addr, Size, UNMAP_ALL, &Data);
+}
+
+template <uptr MaxFreeListSize>
+void MapAllocator<MaxFreeListSize>::getStats(ScopedString *Str) const {
+ Str->append(
+ "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
+ "(%zuK), remains %zu (%zuK) max %zuM\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
+ NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
+ LargestSize >> 20);
+}
+
} // namespace scudo
#endif // SCUDO_SECONDARY_H_
diff --git a/compiler-rt/lib/scudo/standalone/size_class_map.h b/compiler-rt/lib/scudo/standalone/size_class_map.h
index dfef0865b9d9..947526e8aea1 100644
--- a/compiler-rt/lib/scudo/standalone/size_class_map.h
+++ b/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -49,7 +49,7 @@ public:
static const uptr MaxSize = 1UL << MaxSizeLog;
static const uptr NumClasses =
MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1;
- COMPILER_CHECK(NumClasses <= 256);
+ static_assert(NumClasses <= 256, "");
static const uptr LargestClassId = NumClasses - 1;
static const uptr BatchClassId = 0;
@@ -120,7 +120,8 @@ public:
if (C < LargestClassId)
CHECK_EQ(getClassIdBySize(S + 1), C + 1);
CHECK_EQ(getClassIdBySize(S - 1), C);
- CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1));
+ if (C - 1 != BatchClassId)
+ CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1));
}
// Do not perform the loop if the maximum size is too large.
if (MaxSizeLog > 19)
@@ -129,7 +130,7 @@ public:
const uptr C = getClassIdBySize(S);
CHECK_LT(C, NumClasses);
CHECK_GE(getSizeByClassId(C), S);
- if (C > 0)
+ if (C - 1 != BatchClassId)
CHECK_LT(getSizeByClassId(C - 1), S);
}
}
diff --git a/compiler-rt/lib/scudo/standalone/stats.h b/compiler-rt/lib/scudo/standalone/stats.h
index 16ef5b89b854..38481e98e48d 100644
--- a/compiler-rt/lib/scudo/standalone/stats.h
+++ b/compiler-rt/lib/scudo/standalone/stats.h
@@ -10,6 +10,7 @@
#define SCUDO_STATS_H_
#include "atomic_helpers.h"
+#include "list.h"
#include "mutex.h"
#include <string.h>
@@ -45,20 +46,17 @@ public:
uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
-private:
- friend class GlobalStats;
- atomic_uptr StatsArray[StatCount];
LocalStats *Next;
LocalStats *Prev;
+
+private:
+ atomic_uptr StatsArray[StatCount];
};
// Global stats, used for aggregation and querying.
class GlobalStats : public LocalStats {
public:
- void initLinkerInitialized() {
- Next = this;
- Prev = this;
- }
+ void initLinkerInitialized() {}
void init() {
memset(this, 0, sizeof(*this));
initLinkerInitialized();
@@ -66,38 +64,35 @@ public:
void link(LocalStats *S) {
ScopedLock L(Mutex);
- S->Next = Next;
- S->Prev = this;
- Next->Prev = S;
- Next = S;
+ StatsList.push_back(S);
}
void unlink(LocalStats *S) {
ScopedLock L(Mutex);
- S->Prev->Next = S->Next;
- S->Next->Prev = S->Prev;
+ StatsList.remove(S);
for (uptr I = 0; I < StatCount; I++)
add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
}
void get(uptr *S) const {
- memset(S, 0, StatCount * sizeof(uptr));
ScopedLock L(Mutex);
- const LocalStats *Stats = this;
- for (;;) {
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] = LocalStats::get(static_cast<StatType>(I));
+ for (const auto &Stats : StatsList) {
for (uptr I = 0; I < StatCount; I++)
- S[I] += Stats->get(static_cast<StatType>(I));
- Stats = Stats->Next;
- if (Stats == this)
- break;
+ S[I] += Stats.get(static_cast<StatType>(I));
}
// All stats must be non-negative.
for (uptr I = 0; I < StatCount; I++)
S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
}
+ void disable() { Mutex.lock(); }
+ void enable() { Mutex.unlock(); }
+
private:
mutable HybridMutex Mutex;
+ DoublyLinkedList<LocalStats> StatsList;
};
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h
index f24ff01960fb..20f0d69cabfd 100644
--- a/compiler-rt/lib/scudo/standalone/tsd.h
+++ b/compiler-rt/lib/scudo/standalone/tsd.h
@@ -14,6 +14,7 @@
#include "mutex.h"
#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
+#include <pthread.h>
// With some build setups, this might still not be defined.
#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
@@ -38,7 +39,7 @@ template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
void commitBack(Allocator *Instance) { Instance->commitBack(this); }
- INLINE bool tryLock() {
+ inline bool tryLock() {
if (Mutex.tryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
@@ -49,12 +50,12 @@ template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
return false;
}
- INLINE void lock() {
+ inline void lock() {
atomic_store_relaxed(&Precedence, 0);
Mutex.lock();
}
- INLINE void unlock() { Mutex.unlock(); }
- INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+ inline void unlock() { Mutex.unlock(); }
+ inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
private:
HybridMutex Mutex;
diff --git a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
index 971ae4857fca..69479ea7bdf4 100644
--- a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
+++ b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
@@ -11,8 +11,6 @@
#include "tsd.h"
-#include <pthread.h>
-
namespace scudo {
enum class ThreadState : u8 {
@@ -48,7 +46,8 @@ template <class Allocator> struct TSDRegistryExT {
}
ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
- if (LIKELY(State == ThreadState::Initialized)) {
+ if (LIKELY(State == ThreadState::Initialized &&
+ !atomic_load(&Disabled, memory_order_acquire))) {
*UnlockRequired = false;
return &ThreadTSD;
}
@@ -58,6 +57,20 @@ template <class Allocator> struct TSDRegistryExT {
return FallbackTSD;
}
+ // To disable the exclusive TSD registry, we effectively lock the fallback TSD
+ // and force all threads to attempt to use it instead of their local one.
+ void disable() {
+ Mutex.lock();
+ FallbackTSD->lock();
+ atomic_store(&Disabled, 1U, memory_order_release);
+ }
+
+ void enable() {
+ atomic_store(&Disabled, 0U, memory_order_release);
+ FallbackTSD->unlock();
+ Mutex.unlock();
+ }
+
private:
void initOnceMaybe(Allocator *Instance) {
ScopedLock L(Mutex);
@@ -77,10 +90,12 @@ private:
pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
ThreadTSD.initLinkerInitialized(Instance);
State = ThreadState::Initialized;
+ Instance->callPostInitCallback();
}
pthread_key_t PThreadKey;
bool Initialized;
+ atomic_u8 Disabled;
TSD<Allocator> *FallbackTSD;
HybridMutex Mutex;
static THREADLOCAL ThreadState State;
diff --git a/compiler-rt/lib/scudo/standalone/tsd_shared.h b/compiler-rt/lib/scudo/standalone/tsd_shared.h
index da88a897b8f5..5ab8269519a9 100644
--- a/compiler-rt/lib/scudo/standalone/tsd_shared.h
+++ b/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -12,8 +12,6 @@
#include "linux.h" // for getAndroidTlsPtr()
#include "tsd.h"
-#include <pthread.h>
-
namespace scudo {
template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
@@ -50,6 +48,8 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
void unmapTestOnly() {
unmap(reinterpret_cast<void *>(TSDs),
sizeof(TSD<Allocator>) * NumberOfTSDs);
+ setCurrentTSD(nullptr);
+ pthread_key_delete(PThreadKey);
}
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
@@ -70,9 +70,21 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
return getTSDAndLockSlow(TSD);
}
+ void disable() {
+ Mutex.lock();
+ for (u32 I = 0; I < NumberOfTSDs; I++)
+ TSDs[I].lock();
+ }
+
+ void enable() {
+ for (s32 I = NumberOfTSDs - 1; I >= 0; I--)
+ TSDs[I].unlock();
+ Mutex.unlock();
+ }
+
private:
ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
-#if SCUDO_ANDROID
+#if _BIONIC
*getAndroidTlsPtr() = reinterpret_cast<uptr>(CurrentTSD);
#elif SCUDO_LINUX
ThreadTSD = CurrentTSD;
@@ -84,7 +96,7 @@ private:
}
ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
-#if SCUDO_ANDROID
+#if _BIONIC
return reinterpret_cast<TSD<Allocator> *>(*getAndroidTlsPtr());
#elif SCUDO_LINUX
return ThreadTSD;
@@ -105,6 +117,7 @@ private:
// Initial context assignment is done in a plain round-robin fashion.
const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
+ Instance->callPostInitCallback();
}
NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
@@ -152,12 +165,12 @@ private:
u32 CoPrimes[MaxTSDCount];
bool Initialized;
HybridMutex Mutex;
-#if SCUDO_LINUX && !SCUDO_ANDROID
+#if SCUDO_LINUX && !_BIONIC
static THREADLOCAL TSD<Allocator> *ThreadTSD;
#endif
};
-#if SCUDO_LINUX && !SCUDO_ANDROID
+#if SCUDO_LINUX && !_BIONIC
template <class Allocator, u32 MaxTSDCount>
THREADLOCAL TSD<Allocator>
*TSDRegistrySharedT<Allocator, MaxTSDCount>::ThreadTSD;
diff --git a/compiler-rt/lib/scudo/standalone/vector.h b/compiler-rt/lib/scudo/standalone/vector.h
index 3cb4005ed29c..6ca350a25771 100644
--- a/compiler-rt/lib/scudo/standalone/vector.h
+++ b/compiler-rt/lib/scudo/standalone/vector.h
@@ -84,7 +84,8 @@ private:
DCHECK_LE(Size, NewCapacity);
const uptr NewCapacityBytes =
roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
- T *NewData = (T *)map(nullptr, NewCapacityBytes, "scudo:vector");
+ T *NewData =
+ reinterpret_cast<T *>(map(nullptr, NewCapacityBytes, "scudo:vector"));
if (Data) {
memcpy(NewData, Data, Size * sizeof(T));
unmap(Data, CapacityBytes);
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
index dffd7cc26fe8..93a666c4d61e 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
@@ -18,22 +18,23 @@
#include <stdint.h>
#include <stdio.h>
-static scudo::Allocator<scudo::Config> Allocator;
+#define SCUDO_PREFIX(name) name
+#define SCUDO_ALLOCATOR Allocator
+
+extern "C" void SCUDO_PREFIX(malloc_postinit)();
+static scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)>
+ SCUDO_ALLOCATOR;
// Pointer to the static allocator so that the C++ wrappers can access it.
// Technically we could have a completely separated heap for C & C++ but in
// reality the amount of cross pollination between the two is staggering.
-scudo::Allocator<scudo::Config> *AllocatorPtr = &Allocator;
-
-extern "C" {
+scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> *
+ CONCATENATE(SCUDO_ALLOCATOR, Ptr) = &SCUDO_ALLOCATOR;
-#define SCUDO_PREFIX(name) name
-#define SCUDO_ALLOCATOR Allocator
#include "wrappers_c.inc"
+
#undef SCUDO_ALLOCATOR
#undef SCUDO_PREFIX
-INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
-
-} // extern "C"
+extern "C" INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
#endif // !SCUDO_ANDROID || !_BIONIC
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
index a9adbc83588b..2fd709eaa1f6 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -17,6 +17,8 @@
#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
#endif
+extern "C" {
+
INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
scudo::uptr Product;
if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
@@ -134,26 +136,23 @@ INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
}
-// Bionic wants a function named PREFIX_iterate and not PREFIX_malloc_iterate
-// which is somewhat inconsistent with the rest, workaround that.
-#if SCUDO_ANDROID && _BIONIC
-#define SCUDO_ITERATE iterate
-#else
-#define SCUDO_ITERATE malloc_iterate
-#endif
-
-INTERFACE WEAK int SCUDO_PREFIX(SCUDO_ITERATE)(
+INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
uintptr_t base, size_t size,
void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
return 0;
}
+INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
+
INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
SCUDO_ALLOCATOR.disable();
}
-INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
+void SCUDO_PREFIX(malloc_postinit)() {
+ pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable),
+ SCUDO_PREFIX(malloc_enable));
+}
INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
if (param == M_DECAY_TIME) {
@@ -184,3 +183,5 @@ INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
fputs("</malloc>", stream);
return 0;
}
+
+} // extern "C"
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index fa4145c066b6..f004369d96cb 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -18,22 +18,40 @@
#include <stdint.h>
#include <stdio.h>
-static scudo::Allocator<scudo::AndroidConfig> Allocator;
-static scudo::Allocator<scudo::AndroidSvelteConfig> SvelteAllocator;
-
-extern "C" {
-
// Regular MallocDispatch definitions.
#define SCUDO_PREFIX(name) CONCATENATE(scudo_, name)
#define SCUDO_ALLOCATOR Allocator
+
+extern "C" void SCUDO_PREFIX(malloc_postinit)();
+static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
+ SCUDO_ALLOCATOR;
+// Pointer to the static allocator so that the C++ wrappers can access it.
+// Technically we could have a completely separated heap for C & C++ but in
+// reality the amount of cross pollination between the two is staggering.
+scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)> *
+ CONCATENATE(SCUDO_ALLOCATOR, Ptr) = &SCUDO_ALLOCATOR;
+
#include "wrappers_c.inc"
+
#undef SCUDO_ALLOCATOR
#undef SCUDO_PREFIX
// Svelte MallocDispatch definitions.
#define SCUDO_PREFIX(name) CONCATENATE(scudo_svelte_, name)
#define SCUDO_ALLOCATOR SvelteAllocator
+
+extern "C" void SCUDO_PREFIX(malloc_postinit)();
+static scudo::Allocator<scudo::AndroidSvelteConfig,
+ SCUDO_PREFIX(malloc_postinit)>
+ SCUDO_ALLOCATOR;
+// Pointer to the static allocator so that the C++ wrappers can access it.
+// Technically we could have a completely separated heap for C & C++ but in
+// reality the amount of cross pollination between the two is staggering.
+scudo::Allocator<scudo::AndroidSvelteConfig, SCUDO_PREFIX(malloc_postinit)> *
+ CONCATENATE(SCUDO_ALLOCATOR, Ptr) = &SCUDO_ALLOCATOR;
+
#include "wrappers_c.inc"
+
#undef SCUDO_ALLOCATOR
#undef SCUDO_PREFIX
@@ -44,6 +62,4 @@ INTERFACE void __scudo_print_stats(void) {
SvelteAllocator.printStats();
}
-} // extern "C"
-
#endif // SCUDO_ANDROID && _BIONIC
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
index d4370d506e5e..7fc1a9600e53 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
@@ -20,7 +20,7 @@
namespace scudo {
// A common errno setting logic shared by almost all Scudo C wrappers.
-INLINE void *setErrnoOnNull(void *Ptr) {
+inline void *setErrnoOnNull(void *Ptr) {
if (UNLIKELY(!Ptr))
errno = ENOMEM;
return Ptr;
@@ -30,14 +30,14 @@ INLINE void *setErrnoOnNull(void *Ptr) {
// Checks aligned_alloc() parameters, verifies that the alignment is a power of
// two and that the size is a multiple of alignment.
-INLINE bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
+inline bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
return Alignment == 0 || !isPowerOfTwo(Alignment) ||
!isAligned(Size, Alignment);
}
// Checks posix_memalign() parameters, verifies that alignment is a power of two
// and a multiple of sizeof(void *).
-INLINE bool checkPosixMemalignAlignment(uptr Alignment) {
+inline bool checkPosixMemalignAlignment(uptr Alignment) {
return Alignment == 0 || !isPowerOfTwo(Alignment) ||
!isAligned(Alignment, sizeof(void *));
}
@@ -45,7 +45,7 @@ INLINE bool checkPosixMemalignAlignment(uptr Alignment) {
// Returns true if calloc(Size, N) overflows on Size*N calculation. Use a
// builtin supported by recent clang & GCC if it exists, otherwise fallback to a
// costly division.
-INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
+inline bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
#if __has_builtin(__builtin_umull_overflow)
return __builtin_umull_overflow(Size, N, Product);
#else
@@ -58,7 +58,7 @@ INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of PageSize.
-INLINE bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
+inline bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
return roundUpTo(Size, PageSize) < Size;
}
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp b/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
index 72235e9c9820..1da5385c7789 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
@@ -15,7 +15,8 @@
#include <stdint.h>
-extern scudo::Allocator<scudo::Config> *AllocatorPtr;
+extern "C" void malloc_postinit();
+extern scudo::Allocator<scudo::Config, malloc_postinit> *AllocatorPtr;
namespace std {
struct nothrow_t {};
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 1b2c0549d399..743e67bf2f7d 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -113,9 +113,16 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
gp->mtx.Unlock();
}
+static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
+static uptr max_user_defined_malloc_size;
+
void InitializeAllocator() {
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
+ max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
+ ? common_flags()->max_allocation_size_mb
+ << 20
+ : kMaxAllowedMallocSize;
}
void InitializeAllocatorLate() {
@@ -150,15 +157,17 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
OutputReport(thr, rep);
}
-static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
bool signal) {
- if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
+ if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
+ sz > max_user_defined_malloc_size) {
if (AllocatorMayReturnNull())
return nullptr;
+ uptr malloc_limit =
+ Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
GET_STACK_TRACE_FATAL(thr, pc);
- ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
+ ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
}
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (UNLIKELY(!p)) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
index 326ca8532e52..ae65dd3fd995 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
@@ -75,9 +75,14 @@ static uptr main_thread_identity = 0;
ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
static ThreadState *main_thread_state_loc = (ThreadState *)main_thread_state;
+// We cannot use pthread_self() before libpthread has been initialized. Our
+// current heuristic for guarding this is checking `main_thread_identity` which
+// is only assigned in `__tsan::InitializePlatform`.
static ThreadState **cur_thread_location() {
+ if (main_thread_identity == 0)
+ return &main_thread_state_loc;
uptr thread_identity = (uptr)pthread_self();
- if (thread_identity == main_thread_identity || main_thread_identity == 0)
+ if (thread_identity == main_thread_identity)
return &main_thread_state_loc;
return (ThreadState **)MemToShadow(thread_identity);
}