aboutsummaryrefslogtreecommitdiff
path: root/lib/sanitizer_common
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sanitizer_common')
-rw-r--r--lib/sanitizer_common/CMakeLists.txt72
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc35
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h989
-rw-r--r--lib/sanitizer_common/sanitizer_allocator64.h488
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_clang.h2
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_msvc.h48
-rw-r--r--lib/sanitizer_common/sanitizer_common.cc135
-rw-r--r--lib/sanitizer_common/sanitizer_common.h108
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors.inc224
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc142
-rw-r--r--lib/sanitizer_common/sanitizer_flags.cc63
-rw-r--r--lib/sanitizer_common/sanitizer_interface_defs.h56
-rw-r--r--lib/sanitizer_common/sanitizer_internal_defs.h83
-rw-r--r--lib/sanitizer_common/sanitizer_lfstack.h73
-rw-r--r--lib/sanitizer_common/sanitizer_libc.cc45
-rw-r--r--lib/sanitizer_common/sanitizer_libc.h14
-rw-r--r--lib/sanitizer_common/sanitizer_linux.cc327
-rw-r--r--lib/sanitizer_common/sanitizer_list.h4
-rw-r--r--lib/sanitizer_common/sanitizer_mac.cc91
-rw-r--r--lib/sanitizer_common/sanitizer_mutex.h31
-rw-r--r--lib/sanitizer_common/sanitizer_placement_new.h2
-rw-r--r--lib/sanitizer_common/sanitizer_platform_interceptors.h38
-rw-r--r--lib/sanitizer_common/sanitizer_posix.cc84
-rw-r--r--lib/sanitizer_common/sanitizer_printf.cc88
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps.h48
-rw-r--r--lib/sanitizer_common/sanitizer_quarantine.h172
-rw-r--r--lib/sanitizer_common/sanitizer_report_decorator.h37
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.cc204
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.h36
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace.cc262
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace.h79
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer.cc325
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer.h60
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_itanium.cc42
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_linux.cc182
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_mac.cc31
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_win.cc37
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc119
-rwxr-xr-xlib/sanitizer_common/scripts/check_lint.sh82
-rw-r--r--lib/sanitizer_common/tests/CMakeLists.txt138
-rw-r--r--lib/sanitizer_common/tests/lit.cfg29
-rw-r--r--lib/sanitizer_common/tests/lit.site.cfg.in9
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator64_test.cc257
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc99
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc484
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc162
-rw-r--r--lib/sanitizer_common/tests/sanitizer_common_test.cc33
-rw-r--r--lib/sanitizer_common/tests/sanitizer_flags_test.cc10
-rw-r--r--lib/sanitizer_common/tests/sanitizer_libc_test.cc42
-rw-r--r--lib/sanitizer_common/tests/sanitizer_list_test.cc20
-rw-r--r--lib/sanitizer_common/tests/sanitizer_mutex_test.cc128
-rw-r--r--lib/sanitizer_common/tests/sanitizer_printf_test.cc125
-rw-r--r--lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc85
-rw-r--r--lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc69
-rw-r--r--lib/sanitizer_common/tests/sanitizer_test_main.cc19
-rw-r--r--lib/sanitizer_common/tests/sanitizer_test_utils.h80
-rw-r--r--lib/sanitizer_common/tests/standalone_malloc_test.cc87
57 files changed, 5535 insertions, 1299 deletions
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
index d797a56dabd9..ee0e1237c1a9 100644
--- a/lib/sanitizer_common/CMakeLists.txt
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -10,26 +10,68 @@ set(SANITIZER_SOURCES
sanitizer_mac.cc
sanitizer_posix.cc
sanitizer_printf.cc
+ sanitizer_stackdepot.cc
+ sanitizer_stacktrace.cc
sanitizer_symbolizer.cc
+ sanitizer_symbolizer_itanium.cc
+ sanitizer_symbolizer_linux.cc
+ sanitizer_symbolizer_mac.cc
+ sanitizer_symbolizer_win.cc
sanitizer_win.cc
)
-set(SANITIZER_CFLAGS "-fPIC -fno-exceptions -funwind-tables -fvisibility=hidden")
+# Explicitly list all sanitizer_common headers. Not all of these are
+# included in sanitizer_common source files, but we need to depend on
+# headers when building our custom unit tests.
+set(SANITIZER_HEADERS
+ sanitizer_allocator.h
+ sanitizer_atomic_clang.h
+ sanitizer_atomic_msvc.h
+ sanitizer_atomic.h
+ sanitizer_common.h
+ sanitizer_common_interceptors.inc
+ sanitizer_common_interceptors_scanf.inc
+ sanitizer_flags.h
+ sanitizer_internal_defs.h
+ sanitizer_lfstack.h
+ sanitizer_libc.h
+ sanitizer_list.h
+ sanitizer_mutex.h
+ sanitizer_placement_new.h
+ sanitizer_platform_interceptors.h
+ sanitizer_procmaps.h
+ sanitizer_quarantine.h
+ sanitizer_report_decorator.h
+ sanitizer_stackdepot.h
+ sanitizer_stacktrace.h
+ sanitizer_symbolizer.h
+ )
-set(SANITIZER_COMMON_DEFINITIONS
- SANITIZER_HAS_EXCEPTIONS=1)
+set(SANITIZER_CFLAGS ${SANITIZER_COMMON_CFLAGS})
-if(CAN_TARGET_X86_64)
- add_library(RTSanitizerCommon.x86_64 OBJECT ${SANITIZER_SOURCES})
- set_property(TARGET RTSanitizerCommon.x86_64 PROPERTY COMPILE_FLAGS
- "${SANITIZER_CFLAGS} ${TARGET_X86_64_CFLAGS}")
- set_property(TARGET RTSanitizerCommon.x86_64 APPEND PROPERTY COMPILE_DEFINITIONS
- ${SANITIZER_COMMON_DEFINITIONS})
+set(SANITIZER_RUNTIME_LIBRARIES)
+if(APPLE)
+ # Build universal binary on APPLE.
+ add_library(RTSanitizerCommon.osx OBJECT ${SANITIZER_SOURCES})
+ set_target_compile_flags(RTSanitizerCommon.osx ${SANITIZER_CFLAGS})
+ set_target_properties(RTSanitizerCommon.osx PROPERTIES
+ OSX_ARCHITECTURES "${SANITIZER_COMMON_SUPPORTED_ARCH}")
+ list(APPEND SANITIZER_RUNTIME_LIBRARIES RTSanitizerCommon.osx)
+elseif(ANDROID)
+ add_library(RTSanitizerCommon.arm.android OBJECT ${SANITIZER_SOURCES})
+ set_target_compile_flags(RTSanitizerCommon.arm.android
+ ${SANITIZER_CFLAGS})
+ list(APPEND SANITIZER_RUNTIME_LIBRARIES RTSanitizerCommon.arm.android)
+else()
+ # Otherwise, build separate libraries for each target.
+ foreach(arch ${SANITIZER_COMMON_SUPPORTED_ARCH})
+ add_compiler_rt_object_library(RTSanitizerCommon ${arch}
+ SOURCES ${SANITIZER_SOURCES} CFLAGS ${SANITIZER_CFLAGS})
+ list(APPEND SANITIZER_RUNTIME_LIBRARIES RTSanitizerCommon.${arch})
+ endforeach()
endif()
-if(CAN_TARGET_I386)
- add_library(RTSanitizerCommon.i386 OBJECT ${SANITIZER_SOURCES})
- set_property(TARGET RTSanitizerCommon.i386 PROPERTY COMPILE_FLAGS
- "${SANITIZER_CFLAGS} ${TARGET_I386_CFLAGS}")
- set_property(TARGET RTSanitizerCommon.i386 APPEND PROPERTY COMPILE_DEFINITIONS
- ${SANITIZER_COMMON_DEFINITIONS})
+
+# Unit tests for common sanitizer runtime.
+if(LLVM_INCLUDE_TESTS)
+ add_subdirectory(tests)
endif()
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index 816fddf1c5ad..b13a7c6c14c0 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -15,16 +15,16 @@
// FIXME: We should probably use more low-level allocator that would
// mmap some pages and split them into chunks to fulfill requests.
-#ifdef __linux__
+#if defined(__linux__) && !defined(__ANDROID__)
extern "C" void *__libc_malloc(__sanitizer::uptr size);
extern "C" void __libc_free(void *ptr);
# define LIBC_MALLOC __libc_malloc
# define LIBC_FREE __libc_free
-#else // __linux__
+#else // __linux__ && !ANDROID
# include <stdlib.h>
# define LIBC_MALLOC malloc
# define LIBC_FREE free
-#endif // __linux__
+#endif // __linux__ && !ANDROID
namespace __sanitizer {
@@ -49,11 +49,30 @@ void InternalFree(void *addr) {
LIBC_FREE(addr);
}
-void *InternalAllocBlock(void *p) {
- CHECK_NE(p, (void*)0);
- u64 *pp = (u64*)((uptr)p & ~0x7);
- for (; pp[0] != kBlockMagic; pp--) {}
- return pp + 1;
+// LowLevelAllocator
+static LowLevelAllocateCallback low_level_alloc_callback;
+
+void *LowLevelAllocator::Allocate(uptr size) {
+ // Align allocation size.
+ size = RoundUpTo(size, 8);
+ if (allocated_end_ - allocated_current_ < (sptr)size) {
+ uptr size_to_allocate = Max(size, GetPageSizeCached());
+ allocated_current_ =
+ (char*)MmapOrDie(size_to_allocate, __FUNCTION__);
+ allocated_end_ = allocated_current_ + size_to_allocate;
+ if (low_level_alloc_callback) {
+ low_level_alloc_callback((uptr)allocated_current_,
+ size_to_allocate);
+ }
+ }
+ CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
+ void *res = allocated_current_;
+ allocated_current_ += size;
+ return res;
+}
+
+void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
+ low_level_alloc_callback = callback;
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
new file mode 100644
index 000000000000..ad89c3c870dc
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -0,0 +1,989 @@
+//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_H
+#define SANITIZER_ALLOCATOR_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_lfstack.h"
+
+namespace __sanitizer {
+
+// SizeClassMap maps allocation sizes into size classes and back.
+// Class 0 corresponds to size 0.
+// Classes 1 - 16 correspond to sizes 8 - 128 (size = class_id * 8).
+// Next 8 classes: 128 + i * 16 (i = 1 to 8).
+// Next 8 classes: 256 + i * 32 (i = 1 to 8).
+// ...
+// Next 8 classes: 2^k + i * 2^(k-3) (i = 1 to 8).
+// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
+//
+// This structure of the size class map gives us:
+// - Efficient table-free class-to-size and size-to-class functions.
+// - Difference between two consequent size classes is betweed 12% and 6%
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that need to be cached per-thread:
+// - kMaxNumCached is the maximal number of chunks per size class.
+// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
+//
+// Part of output of SizeClassMap::Print():
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 8 diff: +8 00% l 3 cached: 256 2048; id 1
+// c02 => s: 16 diff: +8 100% l 4 cached: 256 4096; id 2
+// ...
+// c07 => s: 56 diff: +8 16% l 5 cached: 256 14336; id 7
+//
+// c08 => s: 64 diff: +8 14% l 6 cached: 256 16384; id 8
+// ...
+// c15 => s: 120 diff: +8 07% l 6 cached: 256 30720; id 15
+//
+// c16 => s: 128 diff: +8 06% l 7 cached: 256 32768; id 16
+// c17 => s: 144 diff: +16 12% l 7 cached: 227 32688; id 17
+// ...
+// c23 => s: 240 diff: +16 07% l 7 cached: 136 32640; id 23
+//
+// c24 => s: 256 diff: +16 06% l 8 cached: 128 32768; id 24
+// c25 => s: 288 diff: +32 12% l 8 cached: 113 32544; id 25
+// ...
+// c31 => s: 480 diff: +32 07% l 8 cached: 68 32640; id 31
+//
+// c32 => s: 512 diff: +32 06% l 9 cached: 64 32768; id 32
+
+
+template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog,
+ uptr kMinBatchClassT>
+class SizeClassMap {
+ static const uptr kMinSizeLog = 3;
+ static const uptr kMidSizeLog = kMinSizeLog + 4;
+ static const uptr kMinSize = 1 << kMinSizeLog;
+ static const uptr kMidSize = 1 << kMidSizeLog;
+ static const uptr kMidClass = kMidSize / kMinSize;
+ static const uptr S = 3;
+ static const uptr M = (1 << S) - 1;
+
+ public:
+ static const uptr kMaxNumCached = kMaxNumCachedT;
+ struct TransferBatch {
+ TransferBatch *next;
+ uptr count;
+ void *batch[kMaxNumCached];
+ };
+
+ static const uptr kMinBatchClass = kMinBatchClassT;
+ static const uptr kMaxSize = 1 << kMaxSizeLog;
+ static const uptr kNumClasses =
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
+ COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
+ static const uptr kNumClassesRounded =
+ kNumClasses == 32 ? 32 :
+ kNumClasses <= 64 ? 64 :
+ kNumClasses <= 128 ? 128 : 256;
+
+ static uptr Size(uptr class_id) {
+ if (class_id <= kMidClass)
+ return kMinSize * class_id;
+ class_id -= kMidClass;
+ uptr t = kMidSize << (class_id >> S);
+ return t + (t >> S) * (class_id & M);
+ }
+
+ static uptr ClassID(uptr size) {
+ if (size <= kMidSize)
+ return (size + kMinSize - 1) >> kMinSizeLog;
+ if (size > kMaxSize) return 0;
+ uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
+ uptr hbits = (size >> (l - S)) & M;
+ uptr lbits = size & ((1 << (l - S)) - 1);
+ uptr l1 = l - kMidSizeLog;
+ return kMidClass + (l1 << S) + hbits + (lbits > 0);
+ }
+
+ static uptr MaxCached(uptr class_id) {
+ if (class_id == 0) return 0;
+ uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
+ return Max(1UL, Min(kMaxNumCached, n));
+ }
+
+ static void Print() {
+ uptr prev_s = 0;
+ uptr total_cached = 0;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ uptr s = Size(i);
+ if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
+ Printf("\n");
+ uptr d = s - prev_s;
+ uptr p = prev_s ? (d * 100 / prev_s) : 0;
+ uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(s);
+ uptr cached = MaxCached(i) * s;
+ Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
+ "cached: %zd %zd; id %zd\n",
+ i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
+ total_cached += cached;
+ prev_s = s;
+ }
+ Printf("Total cached: %zd\n", total_cached);
+ }
+
+ static void Validate() {
+ for (uptr c = 1; c < kNumClasses; c++) {
+ // Printf("Validate: c%zd\n", c);
+ uptr s = Size(c);
+ CHECK_EQ(ClassID(s), c);
+ if (c != kNumClasses - 1)
+ CHECK_EQ(ClassID(s + 1), c + 1);
+ CHECK_EQ(ClassID(s - 1), c);
+ if (c)
+ CHECK_GT(Size(c), Size(c-1));
+ }
+ CHECK_EQ(ClassID(kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= kMaxSize; s++) {
+ uptr c = ClassID(s);
+ // Printf("s%zd => c%zd\n", s, c);
+ CHECK_LT(c, kNumClasses);
+ CHECK_GE(Size(c), s);
+ if (c > 0)
+ CHECK_LT(Size(c-1), s);
+ }
+
+ // TransferBatch for kMinBatchClass must fit into the block itself.
+ const uptr batch_size = sizeof(TransferBatch)
+ - sizeof(void*) // NOLINT
+ * (kMaxNumCached - MaxCached(kMinBatchClass));
+ CHECK_LE(batch_size, Size(kMinBatchClass));
+ // TransferBatch for kMinBatchClass-1 must not fit into the block itself.
+ const uptr batch_size1 = sizeof(TransferBatch)
+ - sizeof(void*) // NOLINT
+ * (kMaxNumCached - MaxCached(kMinBatchClass - 1));
+ CHECK_GT(batch_size1, Size(kMinBatchClass - 1));
+ }
+};
+
+typedef SizeClassMap<17, 256, 16, FIRST_32_SECOND_64(33, 36)>
+ DefaultSizeClassMap;
+typedef SizeClassMap<17, 64, 14, FIRST_32_SECOND_64(25, 28)>
+ CompactSizeClassMap;
+template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
+
+// Allocators call these callbacks on mmap/munmap.
+struct NoOpMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const { }
+};
+
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+//
+// Space: a portion of address space of kSpaceSize bytes starting at
+// a fixed address (kSpaceBeg). Both constants are powers of two and
+// kSpaceBeg is kSpaceSize-aligned.
+// At the beginning the entire space is mprotect-ed, then small parts of it
+// are mapped on demand.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
+template <const uptr kSpaceBeg, const uptr kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class SizeClassAllocator64 {
+ public:
+ typedef typename SizeClassMap::TransferBatch Batch;
+ typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
+
+ void Init() {
+ CHECK_EQ(kSpaceBeg,
+ reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
+ MapWithCallback(kSpaceEnd, AdditionalSize());
+ }
+
+ void MapWithCallback(uptr beg, uptr size) {
+ CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *region = GetRegionInfo(class_id);
+ Batch *b = region->free_list.Pop();
+ if (b == 0)
+ b = PopulateFreeList(c, class_id, region);
+ region->n_allocated += b->count;
+ return b;
+ }
+
+ void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ region->free_list.Push(b);
+ region->n_freed += b->count;
+ }
+
+ static bool PointerIsMine(void *p) {
+ return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
+ }
+
+ static uptr GetSizeClass(void *p) {
+ return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
+ }
+
+ void *GetBlockBegin(void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = SizeClassMap::Size(class_id);
+ uptr chunk_idx = GetChunkIdx((uptr)p, size);
+ uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
+ uptr beg = chunk_idx * size;
+ uptr next_beg = beg + size;
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user >= next_beg)
+ return reinterpret_cast<void*>(reg_beg + beg);
+ return 0;
+ }
+
+ static uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return SizeClassMap::Size(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = SizeClassMap::Size(class_id);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
+ return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
+ }
+
+ void PrintStats() {
+ uptr total_mapped = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ total_mapped += region->mapped_user;
+ n_allocated += region->n_allocated;
+ n_freed += region->n_freed;
+ }
+ Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
+ "remains %zd\n",
+ total_mapped >> 20, n_allocated, n_allocated - n_freed);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) continue;
+ Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
+ class_id,
+ SizeClassMap::Size(class_id),
+ region->mapped_user >> 10,
+ region->n_allocated,
+ region->n_allocated - region->n_freed);
+ }
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+
+ private:
+ static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
+ static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
+ COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // Populate the free list with at most this number of bytes at once
+ // or with one element if its size is greater.
+ static const uptr kPopulateSize = 1 << 14;
+ // Call mmap for user memory with at least this size.
+ static const uptr kUserMapSize = 1 << 15;
+ // Call mmap for metadata memory with at least this size.
+ static const uptr kMetaMapSize = 1 << 16;
+
+ struct RegionInfo {
+ BlockingMutex mutex;
+ LFStack<Batch> free_list;
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ uptr mapped_user; // Bytes mapped for user memory.
+ uptr mapped_meta; // Bytes mapped for metadata.
+ uptr n_allocated, n_freed; // Just stats.
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
+
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
+ RegionInfo *GetRegionInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
+ return &regions[class_id];
+ }
+
+ static uptr GetChunkIdx(uptr chunk, uptr size) {
+ u32 offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
+ // We save 2x by using 32-bit div, but may need to use a 256-way switch.
+ return offset / (u32)size;
+ }
+
+ Batch *NOINLINE PopulateFreeList(AllocatorCache *c, uptr class_id,
+ RegionInfo *region) {
+ BlockingMutexLock l(&region->mutex);
+ Batch *b = region->free_list.Pop();
+ if (b)
+ return b;
+ uptr size = SizeClassMap::Size(class_id);
+ uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
+ uptr beg_idx = region->allocated_user;
+ uptr end_idx = beg_idx + count * size;
+ uptr region_beg = kSpaceBeg + kRegionSize * class_id;
+ if (end_idx + size > region->mapped_user) {
+ // Do the mmap for the user memory.
+ uptr map_size = kUserMapSize;
+ while (end_idx + size > region->mapped_user + map_size)
+ map_size += kUserMapSize;
+ CHECK_GE(region->mapped_user + map_size, end_idx);
+ MapWithCallback(region_beg + region->mapped_user, map_size);
+ region->mapped_user += map_size;
+ }
+ uptr total_count = (region->mapped_user - beg_idx - size)
+ / size / count * count;
+ region->allocated_meta += total_count * kMetadataSize;
+ if (region->allocated_meta > region->mapped_meta) {
+ uptr map_size = kMetaMapSize;
+ while (region->allocated_meta > region->mapped_meta + map_size)
+ map_size += kMetaMapSize;
+ // Do the mmap for the metadata.
+ CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
+ MapWithCallback(region_beg + kRegionSize -
+ region->mapped_meta - map_size, map_size);
+ region->mapped_meta += map_size;
+ }
+ CHECK_LE(region->allocated_meta, region->mapped_meta);
+ if (region->allocated_user + region->allocated_meta > kRegionSize) {
+ Printf("Out of memory. Dying.\n");
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize / 1024 / 1024, size);
+ Die();
+ }
+ for (;;) {
+ if (class_id < SizeClassMap::kMinBatchClass)
+ b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
+ else
+ b = (Batch*)(region_beg + beg_idx);
+ b->count = count;
+ for (uptr i = 0; i < count; i++)
+ b->batch[i] = (void*)(region_beg + beg_idx + i * size);
+ region->allocated_user += count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+ beg_idx += count * size;
+ if (beg_idx + count * size + size > region->mapped_user)
+ break;
+ region->free_list.Push(b);
+ }
+ return b;
+ }
+};
+
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+template <const uptr kSpaceBeg, const u64 kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class SizeClassAllocator32 {
+ public:
+ typedef typename SizeClassMap::TransferBatch Batch;
+ typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
+
+ void Init() {
+ state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
+ }
+
+ void *MapWithCallback(uptr size) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ void *res = MmapOrDie(size, "SizeClassAllocator32");
+ MapUnmapCallback().OnMap((uptr)res, size);
+ return res;
+ }
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *GetMetaData(void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = SizeClassMap::Size(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return reinterpret_cast<void*>(meta);
+ }
+
+ Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ if (sci->free_list.empty())
+ PopulateFreeList(c, sci, class_id);
+ CHECK(!sci->free_list.empty());
+ Batch *b = sci->free_list.front();
+ sci->free_list.pop_front();
+ return b;
+ }
+
+ void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ sci->free_list.push_front(b);
+ }
+
+ bool PointerIsMine(void *p) {
+ return GetSizeClass(p) != 0;
+ }
+
+ uptr GetSizeClass(void *p) {
+ return state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ }
+
+ void *GetBlockBegin(void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = SizeClassMap::Size(GetSizeClass(p));
+ u32 offset = mem - beg;
+ u32 n = offset / (u32)size; // 32-bit division
+ uptr res = beg + (n * (u32)size);
+ return reinterpret_cast<void*>(res);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return SizeClassMap::Size(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (state_->possible_regions[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (state_->possible_regions[i])
+ UnmapWithCallback((i * kRegionSize), kRegionSize);
+ UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
+ }
+
+ void PrintStats() {
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+
+ private:
+ static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+
+ struct SizeClassInfo {
+ SpinMutex mutex;
+ IntrusiveList<Batch> free_list;
+ char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
+
+ uptr ComputeRegionId(uptr mem) {
+ uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
+ state_->possible_regions[ComputeRegionId(res)] = class_id;
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ return &state_->size_class_info_array[class_id];
+ }
+
+ void PopulateFreeList(AllocatorCache *c, SizeClassInfo *sci, uptr class_id) {
+ uptr size = SizeClassMap::Size(class_id);
+ uptr reg = AllocateRegion(class_id);
+ uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ uptr max_count = SizeClassMap::MaxCached(class_id);
+ Batch *b = 0;
+ for (uptr i = reg; i < reg + n_chunks * size; i += size) {
+ if (b == 0) {
+ if (class_id < SizeClassMap::kMinBatchClass)
+ b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
+ else
+ b = (Batch*)i;
+ b->count = 0;
+ }
+ b->batch[b->count++] = (void*)i;
+ if (b->count == max_count) {
+ sci->free_list.push_back(b);
+ b = 0;
+ }
+ }
+ if (b)
+ sci->free_list.push_back(b);
+ }
+
+ struct State {
+ u8 possible_regions[kNumPossibleRegions];
+ SizeClassInfo size_class_info_array[kNumClasses];
+ };
+ State *state_;
+};
+
+// Objects of this type should be used as local caches for SizeClassAllocator64
+// or SizeClassAllocator32. Since the typical use of this class is to have one
+// object per thread in TLS, is has to be POD.
+template<class SizeClassAllocator>
+struct SizeClassAllocatorLocalCache {
+ typedef SizeClassAllocator Allocator;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+
+ // Don't need to call Init if the object is a global (i.e. zero-initialized).
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(allocator, class_id);
+ void *res = c->batch[--c->count];
+ PREFETCH(c->batch[c->count - 1]);
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(allocator, class_id);
+ c->batch[c->count++] = p;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(allocator, class_id);
+ }
+ }
+
+ // private:
+ typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
+ typedef typename SizeClassMap::TransferBatch Batch;
+ struct PerClass {
+ uptr count;
+ uptr max_count;
+ void *batch[2 * SizeClassMap::kMaxNumCached];
+ };
+ PerClass per_class_[kNumClasses];
+
+ void InitCache() {
+ if (per_class_[0].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * SizeClassMap::MaxCached(i);
+ }
+ }
+
+ void NOINLINE Refill(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ Batch *b = allocator->AllocateBatch(this, class_id);
+ for (uptr i = 0; i < b->count; i++)
+ c->batch[i] = b->batch[i];
+ c->count = b->count;
+ if (class_id < SizeClassMap::kMinBatchClass)
+ Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
+ }
+
+ void NOINLINE Drain(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ Batch *b;
+ if (class_id < SizeClassMap::kMinBatchClass)
+ b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
+ else
+ b = (Batch*)c->batch[0];
+ uptr cnt = Min(c->max_count / 2, c->count);
+ for (uptr i = 0; i < cnt; i++) {
+ b->batch[i] = c->batch[i];
+ c->batch[i] = c->batch[i + c->max_count / 2];
+ }
+ b->count = cnt;
+ c->count -= cnt;
+ allocator->DeallocateBatch(class_id, b);
+ }
+};
+
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+template <class MapUnmapCallback = NoOpMapUnmapCallback>
+class LargeMmapAllocator {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ page_size_ = GetPageSizeCached();
+ }
+
+ void *Allocate(uptr size, uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = RoundUpMapSize(size);
+ if (alignment > page_size_)
+ map_size += alignment;
+ if (map_size < size) return 0; // Overflow.
+ uptr map_beg = reinterpret_cast<uptr>(
+ MmapOrDie(map_size, "LargeMmapAllocator"));
+ MapUnmapCallback().OnMap(map_beg, map_size);
+ uptr map_end = map_beg + map_size;
+ uptr res = map_beg + page_size_;
+ if (res & (alignment - 1)) // Align.
+ res += alignment - (res & (alignment - 1));
+ CHECK_EQ(0, res & (alignment - 1));
+ CHECK_LE(res + size, map_end);
+ Header *h = GetHeader(res);
+ h->size = size;
+ h->map_beg = map_beg;
+ h->map_size = map_size;
+ uptr size_log = SANITIZER_WORDSIZE - __builtin_clzl(map_size) - 1;
+ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = n_chunks_++;
+ CHECK_LT(idx, kMaxNumChunks);
+ h->chunk_idx = idx;
+ chunks_[idx] = h;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
+ stats.by_size_log[size_log]++;
+ }
+ return reinterpret_cast<void*>(res);
+ }
+
+ void Deallocate(void *p) {
+ Header *h = GetHeader(p);
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = h->chunk_idx;
+ CHECK_EQ(chunks_[idx], h);
+ CHECK_LT(idx, n_chunks_);
+ chunks_[idx] = chunks_[n_chunks_ - 1];
+ chunks_[idx]->chunk_idx = idx;
+ n_chunks_--;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
+ }
+ MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
+ UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ SpinMutexLock l(&mutex_);
+ uptr res = 0;
+ for (uptr i = 0; i < n_chunks_; i++) {
+ Header *h = chunks_[i];
+ CHECK_EQ(h->chunk_idx, i);
+ res += RoundUpMapSize(h->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(void *p) {
+ return GetBlockBegin(p) != 0;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ return RoundUpTo(GetHeader(p)->size, page_size_);
+ }
+
+ // At least page_size_/2 metadata bytes is available.
+ void *GetMetaData(void *p) {
+ // Too slow: CHECK_EQ(p, GetBlockBegin(p));
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ return GetHeader(p) + 1;
+ }
+
+ void *GetBlockBegin(void *ptr) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ SpinMutexLock l(&mutex_);
+ uptr nearest_chunk = 0;
+ // Cache-friendly linear search.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ uptr ch = reinterpret_cast<uptr>(chunks_[i]);
+ if (p < ch) continue; // p is at left to this chunk, skip it.
+ if (p - ch < p - nearest_chunk)
+ nearest_chunk = ch;
+ }
+ if (!nearest_chunk)
+ return 0;
+ Header *h = reinterpret_cast<Header *>(nearest_chunk);
+ CHECK_GE(nearest_chunk, h->map_beg);
+ CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
+ CHECK_LE(nearest_chunk, p);
+ if (h->map_beg + h->map_size < p)
+ return 0;
+ return GetUser(h);
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M; by size logs: ",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
+ uptr c = stats.by_size_log[i];
+ if (!c) continue;
+ Printf("%zd:%zd; ", i, c);
+ }
+ Printf("\n");
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
+ struct Header {
+ uptr map_beg;
+ uptr map_size;
+ uptr size;
+ uptr chunk_idx;
+ };
+
+ Header *GetHeader(uptr p) {
+ CHECK_EQ(p % page_size_, 0);
+ return reinterpret_cast<Header*>(p - page_size_);
+ }
+ Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
+
+ void *GetUser(Header *h) {
+ CHECK_EQ((uptr)h % page_size_, 0);
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, page_size_) + page_size_;
+ }
+
+ uptr page_size_;
+ Header *chunks_[kMaxNumChunks];
+ uptr n_chunks_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
+ } stats;
+ SpinMutex mutex_;
+};
+
+// This class implements a complete memory allocator by using two
+// internal allocators:
+// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
+// When allocating 2^x bytes it should return 2^x aligned chunk.
+// PrimaryAllocator is used via a local AllocatorCache.
+// SecondaryAllocator can allocate anything, but is not efficient.
+template <class PrimaryAllocator, class AllocatorCache,
+ class SecondaryAllocator> // NOLINT
+class CombinedAllocator {
+ public:
+ void Init() {
+ primary_.Init();
+ secondary_.Init();
+ }
+
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
+ bool cleared = false) {
+ // Returning 0 on malloc(0) may break a lot of code.
+ if (size == 0)
+ size = 1;
+ if (size + alignment < size)
+ return 0;
+ if (alignment > 8)
+ size = RoundUpTo(size, alignment);
+ void *res;
+ if (primary_.CanAllocate(size, alignment))
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
+ res = secondary_.Allocate(size, alignment);
+ if (alignment > 8)
+ CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ if (cleared && res)
+ internal_memset(res, 0, size);
+ return res;
+ }
+
+ void Deallocate(AllocatorCache *cache, void *p) {
+ if (!p) return;
+ if (primary_.PointerIsMine(p))
+ cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
+ else
+ secondary_.Deallocate(p);
+ }
+
+ void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
+ uptr alignment) {
+ if (!p)
+ return Allocate(cache, new_size, alignment);
+ if (!new_size) {
+ Deallocate(cache, p);
+ return 0;
+ }
+ CHECK(PointerIsMine(p));
+ uptr old_size = GetActuallyAllocatedSize(p);
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = Allocate(cache, new_size, alignment);
+ if (new_p)
+ internal_memcpy(new_p, p, memcpy_size);
+ Deallocate(cache, p);
+ return new_p;
+ }
+
+ bool PointerIsMine(void *p) {
+ if (primary_.PointerIsMine(p))
+ return true;
+ return secondary_.PointerIsMine(p);
+ }
+
+ bool FromPrimary(void *p) {
+ return primary_.PointerIsMine(p);
+ }
+
+ void *GetMetaData(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetMetaData(p);
+ return secondary_.GetMetaData(p);
+ }
+
+ void *GetBlockBegin(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBegin(p);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetActuallyAllocatedSize(p);
+ return secondary_.GetActuallyAllocatedSize(p);
+ }
+
+ uptr TotalMemoryUsed() {
+ return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
+ }
+
+ void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+
+ void SwallowCache(AllocatorCache *cache) {
+ cache->Drain(&primary_);
+ }
+
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
+ private:
+ PrimaryAllocator primary_;
+ SecondaryAllocator secondary_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_H
+
diff --git a/lib/sanitizer_common/sanitizer_allocator64.h b/lib/sanitizer_common/sanitizer_allocator64.h
deleted file mode 100644
index eb79a128c1cc..000000000000
--- a/lib/sanitizer_common/sanitizer_allocator64.h
+++ /dev/null
@@ -1,488 +0,0 @@
-//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Specialized allocator which works only in 64-bit address space.
-// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
-// The main feature of this allocator is that the header is located far away
-// from the user memory region, so that the tool does not use extra shadow
-// for the header.
-//
-// Status: not yet ready.
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_ALLOCATOR_H
-#define SANITIZER_ALLOCATOR_H
-
-#include "sanitizer_common.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_list.h"
-#include "sanitizer_mutex.h"
-
-namespace __sanitizer {
-
-// Maps size class id to size and back.
-class DefaultSizeClassMap {
- private:
- // Here we use a spline composed of 5 polynomials of oder 1.
- // The first size class is l0, then the classes go with step s0
- // untill they reach l1, after which they go with step s1 and so on.
- // Steps should be powers of two for cheap division.
- // The size of the last size class should be a power of two.
- // There should be at most 256 size classes.
- static const uptr l0 = 1 << 4;
- static const uptr l1 = 1 << 9;
- static const uptr l2 = 1 << 12;
- static const uptr l3 = 1 << 15;
- static const uptr l4 = 1 << 18;
- static const uptr l5 = 1 << 21;
-
- static const uptr s0 = 1 << 4;
- static const uptr s1 = 1 << 6;
- static const uptr s2 = 1 << 9;
- static const uptr s3 = 1 << 12;
- static const uptr s4 = 1 << 15;
-
- static const uptr u0 = 0 + (l1 - l0) / s0;
- static const uptr u1 = u0 + (l2 - l1) / s1;
- static const uptr u2 = u1 + (l3 - l2) / s2;
- static const uptr u3 = u2 + (l4 - l3) / s3;
- static const uptr u4 = u3 + (l5 - l4) / s4;
-
- public:
- static const uptr kNumClasses = u4 + 1;
- static const uptr kMaxSize = l5;
- static const uptr kMinSize = l0;
-
- COMPILER_CHECK(kNumClasses <= 256);
- COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
-
- static uptr Size(uptr class_id) {
- if (class_id <= u0) return l0 + s0 * (class_id - 0);
- if (class_id <= u1) return l1 + s1 * (class_id - u0);
- if (class_id <= u2) return l2 + s2 * (class_id - u1);
- if (class_id <= u3) return l3 + s3 * (class_id - u2);
- if (class_id <= u4) return l4 + s4 * (class_id - u3);
- return 0;
- }
- static uptr ClassID(uptr size) {
- if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
- if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
- if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
- if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
- if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
- return 0;
- }
-};
-
-struct AllocatorListNode {
- AllocatorListNode *next;
-};
-
-typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
-
-
-// Space: a portion of address space of kSpaceSize bytes starting at
-// a fixed address (kSpaceBeg). Both constants are powers of two and
-// kSpaceBeg is kSpaceSize-aligned.
-//
-// Region: a part of Space dedicated to a single size class.
-// There are kNumClasses Regions of equal size.
-//
-// UserChunk: a piece of memory returned to user.
-// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
-//
-// A Region looks like this:
-// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
-template <const uptr kSpaceBeg, const uptr kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap>
-class SizeClassAllocator64 {
- public:
- void Init() {
- CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
- AllocBeg(), AllocSize())));
- }
-
- bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- void *Allocate(uptr size, uptr alignment) {
- CHECK(CanAllocate(size, alignment));
- return AllocateBySizeClass(SizeClassMap::ClassID(size));
- }
-
- void Deallocate(void *p) {
- CHECK(PointerIsMine(p));
- DeallocateBySizeClass(p, GetSizeClass(p));
- }
-
- // Allocate several chunks of the given class_id.
- void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(&region->mutex);
- if (region->free_list.empty()) {
- PopulateFreeList(class_id, region);
- }
- CHECK(!region->free_list.empty());
- // Just take as many chunks as we have in the free list now.
- // FIXME: this might be too much.
- free_list->append_front(&region->free_list);
- CHECK(region->free_list.empty());
- }
-
- // Swallow the entire free_list for the given class_id.
- void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(&region->mutex);
- region->free_list.append_front(free_list);
- }
-
- bool PointerIsMine(void *p) {
- return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
- }
- uptr GetSizeClass(void *p) {
- return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- void *GetMetaData(void *p) {
- uptr class_id = GetSizeClass(p);
- uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
- return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
- (1 + chunk_idx) * kMetadataSize);
- }
-
- uptr TotalMemoryUsed() {
- uptr res = 0;
- for (uptr i = 0; i < kNumClasses; i++)
- res += GetRegionInfo(i)->allocated_user;
- return res;
- }
-
- // Test-only.
- void TestOnlyUnmap() {
- UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
- }
-
- static const uptr kNumClasses = 256; // Power of two <= 256
-
- private:
- COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
- static const uptr kRegionSize = kSpaceSize / kNumClasses;
- COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
- // Populate the free list with at most this number of bytes at once
- // or with one element if its size is greater.
- static const uptr kPopulateSize = 1 << 18;
-
- struct RegionInfo {
- SpinMutex mutex;
- AllocatorFreeList free_list;
- uptr allocated_user; // Bytes allocated for user memory.
- uptr allocated_meta; // Bytes allocated for metadata.
- char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
- };
- COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
-
- uptr AdditionalSize() {
- uptr res = sizeof(RegionInfo) * kNumClasses;
- CHECK_EQ(res % kPageSize, 0);
- return res;
- }
- uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
- uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
-
- RegionInfo *GetRegionInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
- return &regions[-1 - class_id];
- }
-
- uptr GetChunkIdx(uptr chunk, uptr class_id) {
- u32 offset = chunk % kRegionSize;
- // Here we divide by a non-constant. This is costly.
- // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
- // We save 2x by using 32-bit div, but may need to use a 256-way switch.
- return offset / (u32)SizeClassMap::Size(class_id);
- }
-
- void PopulateFreeList(uptr class_id, RegionInfo *region) {
- uptr size = SizeClassMap::Size(class_id);
- uptr beg_idx = region->allocated_user;
- uptr end_idx = beg_idx + kPopulateSize;
- region->free_list.clear();
- uptr region_beg = kSpaceBeg + kRegionSize * class_id;
- uptr idx = beg_idx;
- uptr i = 0;
- do { // do-while loop because we need to put at least one item.
- uptr p = region_beg + idx;
- region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
- idx += size;
- i++;
- } while (idx < end_idx);
- region->allocated_user += idx - beg_idx;
- region->allocated_meta += i * kMetadataSize;
- CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
- }
-
- void *AllocateBySizeClass(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(&region->mutex);
- if (region->free_list.empty()) {
- PopulateFreeList(class_id, region);
- }
- CHECK(!region->free_list.empty());
- AllocatorListNode *node = region->free_list.front();
- region->free_list.pop_front();
- return reinterpret_cast<void*>(node);
- }
-
- void DeallocateBySizeClass(void *p, uptr class_id) {
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(&region->mutex);
- region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
- }
-};
-
-// Objects of this type should be used as local caches for SizeClassAllocator64.
-// Since the typical use of this class is to have one object per thread in TLS,
-// is has to be POD.
-template<const uptr kNumClasses, class SizeClassAllocator>
-struct SizeClassAllocatorLocalCache {
- // Don't need to call Init if the object is a global (i.e. zero-initialized).
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
-
- void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- AllocatorFreeList *free_list = &free_lists_[class_id];
- if (free_list->empty())
- allocator->BulkAllocate(class_id, free_list);
- CHECK(!free_list->empty());
- void *res = free_list->front();
- free_list->pop_front();
- return res;
- }
-
- void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
- CHECK_LT(class_id, kNumClasses);
- free_lists_[class_id].push_front(reinterpret_cast<AllocatorListNode*>(p));
- }
-
- void Drain(SizeClassAllocator *allocator) {
- for (uptr i = 0; i < kNumClasses; i++) {
- allocator->BulkDeallocate(i, &free_lists_[i]);
- CHECK(free_lists_[i].empty());
- }
- }
-
- // private:
- AllocatorFreeList free_lists_[kNumClasses];
-};
-
-// This class can (de)allocate only large chunks of memory using mmap/unmap.
-// The main purpose of this allocator is to cover large and rare allocation
-// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-// The result is always page-aligned.
-class LargeMmapAllocator {
- public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
- void *Allocate(uptr size, uptr alignment) {
- CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it?
- uptr map_size = RoundUpMapSize(size);
- void *map = MmapOrDie(map_size, "LargeMmapAllocator");
- void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
- + kPageSize);
- Header *h = GetHeader(res);
- h->size = size;
- {
- SpinMutexLock l(&mutex_);
- h->next = list_;
- h->prev = 0;
- if (list_)
- list_->prev = h;
- list_ = h;
- }
- return res;
- }
-
- void Deallocate(void *p) {
- Header *h = GetHeader(p);
- uptr map_size = RoundUpMapSize(h->size);
- {
- SpinMutexLock l(&mutex_);
- Header *prev = h->prev;
- Header *next = h->next;
- if (prev)
- prev->next = next;
- if (next)
- next->prev = prev;
- if (h == list_)
- list_ = next;
- }
- UnmapOrDie(h, map_size);
- }
-
- uptr TotalMemoryUsed() {
- SpinMutexLock l(&mutex_);
- uptr res = 0;
- for (Header *l = list_; l; l = l->next) {
- res += RoundUpMapSize(l->size);
- }
- return res;
- }
-
- bool PointerIsMine(void *p) {
- // Fast check.
- if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
- SpinMutexLock l(&mutex_);
- for (Header *l = list_; l; l = l->next) {
- if (GetUser(l) == p) return true;
- }
- return false;
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- return RoundUpMapSize(GetHeader(p)->size) - kPageSize;
- }
-
- // At least kPageSize/2 metadata bytes is available.
- void *GetMetaData(void *p) {
- return GetHeader(p) + 1;
- }
-
- private:
- struct Header {
- uptr size;
- Header *next;
- Header *prev;
- };
-
- Header *GetHeader(void *p) {
- return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
- }
-
- void *GetUser(Header *h) {
- return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
- }
-
- uptr RoundUpMapSize(uptr size) {
- return RoundUpTo(size, kPageSize) + kPageSize;
- }
-
- Header *list_;
- SpinMutex mutex_;
-};
-
-// This class implements a complete memory allocator by using two
-// internal allocators:
-// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
-// When allocating 2^x bytes it should return 2^x aligned chunk.
-// PrimaryAllocator is used via a local AllocatorCache.
-// SecondaryAllocator can allocate anything, but is not efficient.
-template <class PrimaryAllocator, class AllocatorCache,
- class SecondaryAllocator> // NOLINT
-class CombinedAllocator {
- public:
- void Init() {
- primary_.Init();
- secondary_.Init();
- }
-
- void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false) {
- // Returning 0 on malloc(0) may break a lot of code.
- if (size == 0) size = 1;
- if (alignment > 8)
- size = RoundUpTo(size, alignment);
- void *res;
- if (primary_.CanAllocate(size, alignment))
- res = cache->Allocate(&primary_, primary_.ClassID(size));
- else
- res = secondary_.Allocate(size, alignment);
- if (alignment > 8)
- CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
- if (cleared)
- internal_memset(res, 0, size);
- return res;
- }
-
- void Deallocate(AllocatorCache *cache, void *p) {
- if (!p) return;
- if (primary_.PointerIsMine(p))
- cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
- else
- secondary_.Deallocate(p);
- }
-
- void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
- uptr alignment) {
- if (!p)
- return Allocate(cache, new_size, alignment);
- if (!new_size) {
- Deallocate(cache, p);
- return 0;
- }
- CHECK(PointerIsMine(p));
- uptr old_size = GetActuallyAllocatedSize(p);
- uptr memcpy_size = Min(new_size, old_size);
- void *new_p = Allocate(cache, new_size, alignment);
- if (new_p)
- internal_memcpy(new_p, p, memcpy_size);
- Deallocate(cache, p);
- return new_p;
- }
-
- bool PointerIsMine(void *p) {
- if (primary_.PointerIsMine(p))
- return true;
- return secondary_.PointerIsMine(p);
- }
-
- void *GetMetaData(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetMetaData(p);
- return secondary_.GetMetaData(p);
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetActuallyAllocatedSize(p);
- return secondary_.GetActuallyAllocatedSize(p);
- }
-
- uptr TotalMemoryUsed() {
- return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
- }
-
- void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
-
- void SwallowCache(AllocatorCache *cache) {
- cache->Drain(&primary_);
- }
-
- private:
- PrimaryAllocator primary_;
- SecondaryAllocator secondary_;
-};
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_ALLOCATOR_H
diff --git a/lib/sanitizer_common/sanitizer_atomic_clang.h b/lib/sanitizer_common/sanitizer_atomic_clang.h
index af7044165a61..7f73df3bd455 100644
--- a/lib/sanitizer_common/sanitizer_atomic_clang.h
+++ b/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -41,6 +41,7 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
+ // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
@@ -56,6 +57,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
+ // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
diff --git a/lib/sanitizer_common/sanitizer_atomic_msvc.h b/lib/sanitizer_common/sanitizer_atomic_msvc.h
index 2a15b59a3442..58a6a20ec9c5 100644
--- a/lib/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/lib/sanitizer_common/sanitizer_atomic_msvc.h
@@ -25,6 +25,31 @@ extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd)
+#ifdef _WIN64
+extern "C" void *_InterlockedCompareExchangePointer(
+ void *volatile *Destination,
+ void *Exchange, void *Comparand);
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+#else
+// There's no _InterlockedCompareExchangePointer intrinsic on x86,
+// so call _InterlockedCompareExchange instead.
+extern "C"
+long __cdecl _InterlockedCompareExchange( // NOLINT
+ long volatile *Destination, // NOLINT
+ long Exchange, long Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange)
+
+inline static void *_InterlockedCompareExchangePointer(
+ void *volatile *Destination,
+ void *Exchange, void *Comparand) {
+ return reinterpret_cast<void*>(
+ _InterlockedCompareExchange(
+ reinterpret_cast<long volatile*>(Destination), // NOLINT
+ reinterpret_cast<long>(Exchange), // NOLINT
+ reinterpret_cast<long>(Comparand))); // NOLINT
+}
+#endif
+
namespace __sanitizer {
INLINE void atomic_signal_fence(memory_order) {
@@ -47,6 +72,7 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
+ // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
@@ -62,6 +88,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
+ // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
@@ -107,6 +134,27 @@ INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
return v;
}
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
+ uptr *cmp,
+ uptr xchg,
+ memory_order mo) {
+ uptr cmpv = *cmp;
+ uptr prev = (uptr)_InterlockedCompareExchangePointer(
+ (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ return atomic_compare_exchange_strong(a, cmp, xchg, mo);
+}
+
} // namespace __sanitizer
#endif // SANITIZER_ATOMIC_CLANG_H
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index 6dd1ff91726e..4a8d9a749bf8 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -16,18 +16,90 @@
namespace __sanitizer {
+uptr GetPageSizeCached() {
+ static uptr PageSize;
+ if (!PageSize)
+ PageSize = GetPageSize();
+ return PageSize;
+}
+
+static bool log_to_file = false; // Set to true by __sanitizer_set_report_path
+
+// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
+// isn't equal to the current PID, try to obtain file descriptor by opening
+// file "report_path_prefix.<PID>".
+static fd_t report_fd = kStderrFd;
+static char report_path_prefix[4096]; // Set via __sanitizer_set_report_path.
+// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
+// child thread will be different from |report_fd_pid|.
+static int report_fd_pid = 0;
+
+static void (*DieCallback)(void);
+void SetDieCallback(void (*callback)(void)) {
+ DieCallback = callback;
+}
+
+void NORETURN Die() {
+ if (DieCallback) {
+ DieCallback();
+ }
+ Exit(1);
+}
+
+static CheckFailedCallbackType CheckFailedCallback;
+void SetCheckFailedCallback(CheckFailedCallbackType callback) {
+ CheckFailedCallback = callback;
+}
+
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ if (CheckFailedCallback) {
+ CheckFailedCallback(file, line, cond, v1, v2);
+ }
+ Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
+ v1, v2);
+ Die();
+}
+
+static void MaybeOpenReportFile() {
+ if (!log_to_file || (report_fd_pid == GetPid())) return;
+ char report_path_full[4096];
+ internal_snprintf(report_path_full, sizeof(report_path_full),
+ "%s.%d", report_path_prefix, GetPid());
+ fd_t fd = internal_open(report_path_full, true);
+ if (fd == kInvalidFd) {
+ report_fd = kStderrFd;
+ log_to_file = false;
+ Report("ERROR: Can't open file: %s\n", report_path_full);
+ Die();
+ }
+ if (report_fd != kInvalidFd) {
+ // We're in the child. Close the parent's log.
+ internal_close(report_fd);
+ }
+ report_fd = fd;
+ report_fd_pid = GetPid();
+}
+
+bool PrintsToTty() {
+ MaybeOpenReportFile();
+ return internal_isatty(report_fd);
+}
+
void RawWrite(const char *buffer) {
static const char *kRawWriteError = "RawWrite can't output requested buffer!";
uptr length = (uptr)internal_strlen(buffer);
- if (length != internal_write(2, buffer, length)) {
- internal_write(2, kRawWriteError, internal_strlen(kRawWriteError));
+ MaybeOpenReportFile();
+ if (length != internal_write(report_fd, buffer, length)) {
+ internal_write(report_fd, kRawWriteError, internal_strlen(kRawWriteError));
Die();
}
}
uptr ReadFileToBuffer(const char *file_name, char **buff,
uptr *buff_size, uptr max_len) {
- const uptr kMinFileLen = kPageSize;
+ uptr PageSize = GetPageSizeCached();
+ uptr kMinFileLen = PageSize;
uptr read_len = 0;
*buff = 0;
*buff_size = 0;
@@ -41,8 +113,8 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
// Read up to one page at a time.
read_len = 0;
bool reached_eof = false;
- while (read_len + kPageSize <= size) {
- uptr just_read = internal_read(fd, *buff + read_len, kPageSize);
+ while (read_len + PageSize <= size) {
+ uptr just_read = internal_read(fd, *buff + read_len, PageSize);
if (just_read == 0) {
reached_eof = true;
break;
@@ -97,4 +169,57 @@ void SortArray(uptr *array, uptr size) {
}
}
+// We want to map a chunk of address space aligned to 'alignment'.
+// We do it by maping a bit more and then unmaping redundant pieces.
+// We probably can do it with fewer syscalls in some OS-dependent way.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+// uptr PageSize = GetPageSizeCached();
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = size + alignment;
+ uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
+ uptr map_end = map_res + map_size;
+ uptr res = map_res;
+ if (res & (alignment - 1)) // Not aligned.
+ res = (map_res + alignment) & ~(alignment - 1);
+ uptr end = res + size;
+ if (res != map_res)
+ UnmapOrDie((void*)map_res, res - map_res);
+ if (end != map_end)
+ UnmapOrDie((void*)end, map_end - end);
+ return (void*)res;
+}
+
} // namespace __sanitizer
+
+using namespace __sanitizer; // NOLINT
+
+extern "C" {
+void __sanitizer_set_report_path(const char *path) {
+ if (!path) return;
+ uptr len = internal_strlen(path);
+ if (len > sizeof(report_path_prefix) - 100) {
+ Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
+ path[0], path[1], path[2], path[3],
+ path[4], path[5], path[6], path[7]);
+ Die();
+ }
+ internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
+ report_path_prefix[len] = '\0';
+ report_fd = kInvalidFd;
+ log_to_file = true;
+}
+
+void __sanitizer_set_report_fd(int fd) {
+ if (report_fd != kStdoutFd &&
+ report_fd != kStderrFd &&
+ report_fd != kInvalidFd)
+ internal_close(report_fd);
+ report_fd = fd;
+}
+
+void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
+ (void)reserved;
+ PrepareForSandboxing();
+}
+} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index 4c7c1e9d86ed..1d002398c785 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -21,19 +21,21 @@
namespace __sanitizer {
// Constants.
-const uptr kWordSize = __WORDSIZE / 8;
+const uptr kWordSize = SANITIZER_WORDSIZE / 8;
const uptr kWordSizeInBits = 8 * kWordSize;
-const uptr kPageSizeBits = 12;
-const uptr kPageSize = 1UL << kPageSizeBits;
-const uptr kCacheLineSize = 64;
-#ifndef _WIN32
-const uptr kMmapGranularity = kPageSize;
+
+#if defined(__powerpc__) || defined(__powerpc64__)
+const uptr kCacheLineSize = 128;
#else
-const uptr kMmapGranularity = 1UL << 16;
+const uptr kCacheLineSize = 64;
#endif
+uptr GetPageSize();
+uptr GetPageSizeCached();
+uptr GetMmapGranularity();
// Threads
int GetPid();
+uptr GetTid();
uptr GetThreadSelf();
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom);
@@ -42,21 +44,66 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
void *MmapOrDie(uptr size, const char *mem_type);
void UnmapOrDie(void *addr, uptr size);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
+void *MmapFixedOrDie(uptr fixed_addr, uptr size);
void *Mprotect(uptr fixed_addr, uptr size);
+// Map aligned chunk of address space; size and alignment are powers of two.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
+void FlushUnneededShadowMemory(uptr addr, uptr size);
// Internal allocator
void *InternalAlloc(uptr size);
void InternalFree(void *p);
-// Given the pointer p into a valid allocated block,
-// returns a pointer to the beginning of the block.
-void *InternalAllocBlock(void *p);
+
+// InternalScopedBuffer can be used instead of large stack arrays to
+// keep frame size low.
+// FIXME: use InternalAlloc instead of MmapOrDie once
+// InternalAlloc is made libc-free.
+template<typename T>
+class InternalScopedBuffer {
+ public:
+ explicit InternalScopedBuffer(uptr cnt) {
+ cnt_ = cnt;
+ ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
+ }
+ ~InternalScopedBuffer() {
+ UnmapOrDie(ptr_, cnt_ * sizeof(T));
+ }
+ T &operator[](uptr i) { return ptr_[i]; }
+ T *data() { return ptr_; }
+ uptr size() { return cnt_ * sizeof(T); }
+
+ private:
+ T *ptr_;
+ uptr cnt_;
+ // Disallow evil constructors.
+ InternalScopedBuffer(const InternalScopedBuffer&);
+ void operator=(const InternalScopedBuffer&);
+};
+
+// Simple low-level (mmap-based) allocator for internal use. Doesn't have
+// constructor, so all instances of LowLevelAllocator should be
+// linker initialized.
+class LowLevelAllocator {
+ public:
+ // Requires an external lock.
+ void *Allocate(uptr size);
+ private:
+ char *allocated_end_;
+ char *allocated_current_;
+};
+typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
+// Allows to register tool-specific callbacks for LowLevelAllocator.
+// Passing NULL removes the callback.
+void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
// IO
void RawWrite(const char *buffer);
+bool PrintsToTty();
void Printf(const char *format, ...);
void Report(const char *format, ...);
+void SetPrintfAndReportCallback(void (*callback)(const char *));
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
@@ -69,19 +116,44 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
// in '*buff_size'.
void *MapFileToMemory(const char *file_name, uptr *buff_size);
+// OS
+void DisableCoreDumper();
+void DumpProcessMap();
+bool FileExists(const char *filename);
const char *GetEnv(const char *name);
const char *GetPwd();
+void ReExec();
+bool StackSizeIsUnlimited();
+void SetStackSizeLimitInBytes(uptr limit);
+void PrepareForSandboxing();
// Other
-void DisableCoreDumper();
-void DumpProcessMap();
void SleepForSeconds(int seconds);
void SleepForMillis(int millis);
-void NORETURN Exit(int exitcode);
-void NORETURN Abort();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
+// Exit
+void NORETURN Abort();
+void NORETURN Exit(int exitcode);
+void NORETURN Die();
+void NORETURN SANITIZER_INTERFACE_ATTRIBUTE
+CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
+
+// Set the name of the current thread to 'name', return true on succees.
+// The name may be truncated to a system-dependent limit.
+bool SanitizerSetThreadName(const char *name);
+// Get the name of the current thread (no more than max_len bytes),
+// return true on succees. name should have space for at least max_len+1 bytes.
+bool SanitizerGetThreadName(char *name, int max_len);
+
+// Specific tools may override behavior of "Die" and "CheckFailed" functions
+// to do tool-specific job.
+void SetDieCallback(void (*callback)(void));
+typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
+ u64, u64);
+void SetCheckFailedCallback(CheckFailedCallbackType callback);
+
// Math
INLINE bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
@@ -90,6 +162,12 @@ INLINE uptr RoundUpTo(uptr size, uptr boundary) {
CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
+INLINE uptr RoundDownTo(uptr x, uptr boundary) {
+ return x & ~(boundary - 1);
+}
+INLINE bool IsAligned(uptr a, uptr alignment) {
+ return (a & (alignment - 1)) == 0;
+}
// Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++.
template<class T> T Min(T a, T b) { return a < b ? a : b; }
@@ -112,7 +190,7 @@ INLINE int ToLower(int c) {
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
}
-#if __WORDSIZE == 64
+#if SANITIZER_WORDSIZE == 64
# define FIRST_32_SECOND_64(a, b) (b)
#else
# define FIRST_32_SECOND_64(a, b) (a)
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc
new file mode 100644
index 000000000000..8bc2e8b5c292
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -0,0 +1,224 @@
+//===-- sanitizer_common_interceptors.inc -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define it's own macros:
+// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_READ_RANGE
+// COMMON_INTERCEPTOR_WRITE_RANGE
+// COMMON_INTERCEPTOR_FD_ACQUIRE
+// COMMON_INTERCEPTOR_FD_RELEASE
+// COMMON_INTERCEPTOR_SET_THREAD_NAME
+//===----------------------------------------------------------------------===//
+#include "interception/interception.h"
+#include "sanitizer_platform_interceptors.h"
+
+#include <stdarg.h>
+
+#if SANITIZER_INTERCEPT_READ
+INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
+ SSIZE_T res = REAL(read)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+# define INIT_READ INTERCEPT_FUNCTION(read)
+#else
+# define INIT_READ
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD
+INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
+ SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+# define INIT_PREAD INTERCEPT_FUNCTION(pread)
+#else
+# define INIT_PREAD
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD64
+INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
+ SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
+#else
+# define INIT_PREAD64
+#endif
+
+#if SANITIZER_INTERCEPT_WRITE
+INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(write)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+# define INIT_WRITE INTERCEPT_FUNCTION(write)
+#else
+# define INIT_WRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE
+INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+# define INIT_PWRITE INTERCEPT_FUNCTION(pwrite)
+#else
+# define INIT_PWRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE64
+INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite64)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+# define INIT_PWRITE64 INTERCEPT_FUNCTION(pwrite64)
+#else
+# define INIT_PWRITE64
+#endif
+
+#if SANITIZER_INTERCEPT_PRCTL
+INTERCEPTOR(int, prctl, int option,
+ unsigned long arg2, unsigned long arg3, // NOLINT
+ unsigned long arg4, unsigned long arg5) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
+ static const int PR_SET_NAME = 15;
+ int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
+ if (option == PR_SET_NAME) {
+ char buff[16];
+ internal_strncpy(buff, (char*)arg2, 15);
+ buff[15] = 0;
+ COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
+ }
+ return res;
+}
+# define INIT_PRCTL INTERCEPT_FUNCTION(prctl)
+#else
+# define INIT_PRCTL
+#endif // SANITIZER_INTERCEPT_PRCTL
+
+
+#if SANITIZER_INTERCEPT_SCANF
+
+#include "sanitizer_common_interceptors_scanf.inc"
+
+INTERCEPTOR(int, vscanf, const char *format, va_list ap) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, vscanf, format, ap);
+ scanf_common(ctx, format, ap);
+ int res = REAL(vscanf)(format, ap); // NOLINT
+ return res;
+}
+
+INTERCEPTOR(int, vsscanf, const char *str, const char *format, // NOLINT
+ va_list ap) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, vsscanf, str, format, ap);
+ scanf_common(ctx, format, ap);
+ int res = REAL(vsscanf)(str, format, ap); // NOLINT
+ // FIXME: read of str
+ return res;
+}
+
+INTERCEPTOR(int, vfscanf, void *stream, const char *format, // NOLINT
+ va_list ap) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, vfscanf, stream, format, ap);
+ scanf_common(ctx, format, ap);
+ int res = REAL(vfscanf)(stream, format, ap); // NOLINT
+ return res;
+}
+
+INTERCEPTOR(int, scanf, const char *format, ...) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, scanf, format);
+ va_list ap;
+ va_start(ap, format);
+ int res = vscanf(format, ap); // NOLINT
+ va_end(ap);
+ return res;
+}
+
+INTERCEPTOR(int, fscanf, void* stream, const char *format, ...) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fscanf, stream, format);
+ va_list ap;
+ va_start(ap, format);
+ int res = vfscanf(stream, format, ap); // NOLINT
+ va_end(ap);
+ return res;
+}
+
+INTERCEPTOR(int, sscanf, const char *str, const char *format, ...) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sscanf, str, format); // NOLINT
+ va_list ap;
+ va_start(ap, format);
+ int res = vsscanf(str, format, ap); // NOLINT
+ va_end(ap);
+ return res;
+}
+
+#define INIT_SCANF \
+ INTERCEPT_FUNCTION(scanf); \
+ INTERCEPT_FUNCTION(sscanf); /* NOLINT */ \
+ INTERCEPT_FUNCTION(fscanf); \
+ INTERCEPT_FUNCTION(vscanf); \
+ INTERCEPT_FUNCTION(vsscanf); \
+ INTERCEPT_FUNCTION(vfscanf)
+
+#else
+#define INIT_SCANF
+#endif
+
+#define SANITIZER_COMMON_INTERCEPTORS_INIT \
+ INIT_READ; \
+ INIT_PREAD; \
+ INIT_PREAD64; \
+ INIT_PRCTL; \
+ INIT_WRITE; \
+ INIT_SCANF;
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc b/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc
new file mode 100644
index 000000000000..63d67a7115ec
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc
@@ -0,0 +1,142 @@
+//===-- sanitizer_common_interceptors_scanf.inc -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Scanf implementation for use in *Sanitizer interceptors.
+//
+//===----------------------------------------------------------------------===//
+#include <stdarg.h>
+
+struct ScanfSpec {
+ char c;
+ unsigned size;
+};
+
+// One-letter specs.
+static const ScanfSpec scanf_specs[] = {
+ {'p', sizeof(void *)},
+ {'e', sizeof(float)},
+ {'E', sizeof(float)},
+ {'a', sizeof(float)},
+ {'f', sizeof(float)},
+ {'g', sizeof(float)},
+ {'d', sizeof(int)},
+ {'i', sizeof(int)},
+ {'o', sizeof(int)},
+ {'u', sizeof(int)},
+ {'x', sizeof(int)},
+ {'X', sizeof(int)},
+ {'n', sizeof(int)},
+ {'t', sizeof(PTRDIFF_T)},
+ {'z', sizeof(SIZE_T)},
+ {'j', sizeof(INTMAX_T)},
+ {'h', sizeof(short)}
+};
+
+static const unsigned scanf_specs_cnt =
+ sizeof(scanf_specs) / sizeof(scanf_specs[0]);
+
+// %ll?, %L?, %q? specs
+static const ScanfSpec scanf_llspecs[] = {
+ {'e', sizeof(long double)},
+ {'f', sizeof(long double)},
+ {'g', sizeof(long double)},
+ {'d', sizeof(long long)},
+ {'i', sizeof(long long)},
+ {'o', sizeof(long long)},
+ {'u', sizeof(long long)},
+ {'x', sizeof(long long)}
+};
+
+static const unsigned scanf_llspecs_cnt =
+ sizeof(scanf_llspecs) / sizeof(scanf_llspecs[0]);
+
+// %l? specs
+static const ScanfSpec scanf_lspecs[] = {
+ {'e', sizeof(double)},
+ {'f', sizeof(double)},
+ {'g', sizeof(double)},
+ {'d', sizeof(long)},
+ {'i', sizeof(long)},
+ {'o', sizeof(long)},
+ {'u', sizeof(long)},
+ {'x', sizeof(long)},
+ {'X', sizeof(long)},
+};
+
+static const unsigned scanf_lspecs_cnt =
+ sizeof(scanf_lspecs) / sizeof(scanf_lspecs[0]);
+
+static unsigned match_spec(const struct ScanfSpec *spec, unsigned n, char c) {
+ for (unsigned i = 0; i < n; ++i)
+ if (spec[i].c == c)
+ return spec[i].size;
+ return 0;
+}
+
+static void scanf_common(void *ctx, const char *format, va_list ap_const) {
+ va_list aq;
+ va_copy(aq, ap_const);
+
+ const char *p = format;
+ unsigned size;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ ++p;
+ if (*p == '*' || *p == '%' || *p == 0) {
+ ++p;
+ continue;
+ }
+ if (*p == '0' || (*p >= '1' && *p <= '9')) {
+ size = internal_atoll(p);
+ // +1 for the \0 at the end
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size + 1);
+ ++p;
+ continue;
+ }
+
+ if (*p == 'L' || *p == 'q') {
+ ++p;
+ size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
+ continue;
+ }
+
+ if (*p == 'l') {
+ ++p;
+ if (*p == 'l') {
+ ++p;
+ size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
+ continue;
+ } else {
+ size = match_spec(scanf_lspecs, scanf_lspecs_cnt, *p);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
+ continue;
+ }
+ }
+
+ if (*p == 'h' && *(p + 1) == 'h') {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), sizeof(char));
+ p += 2;
+ continue;
+ }
+
+ size = match_spec(scanf_specs, scanf_specs_cnt, *p);
+ if (size) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
+ ++p;
+ continue;
+ }
+ }
+ va_end(aq);
+}
diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc
index cdeeb78d7a5d..eca910c08090 100644
--- a/lib/sanitizer_common/sanitizer_flags.cc
+++ b/lib/sanitizer_common/sanitizer_flags.cc
@@ -18,13 +18,14 @@
namespace __sanitizer {
-static char *GetFlagValue(const char *env, const char *name) {
+static bool GetFlagValue(const char *env, const char *name,
+ const char **value, int *value_length) {
if (env == 0)
- return 0;
+ return false;
const char *pos = internal_strstr(env, name);
const char *end;
if (pos == 0)
- return 0;
+ return false;
pos += internal_strlen(name);
if (pos[0] != '=') {
end = pos;
@@ -42,41 +43,55 @@ static char *GetFlagValue(const char *env, const char *name) {
if (end == 0)
end = pos + internal_strlen(pos);
}
- int len = end - pos;
- char *f = (char*)InternalAlloc(len + 1);
- internal_memcpy(f, pos, len);
- f[len] = '\0';
- return f;
+ *value = pos;
+ *value_length = end - pos;
+ return true;
+}
+
+static bool StartsWith(const char *flag, int flag_length, const char *value) {
+ if (!flag || !value)
+ return false;
+ int value_length = internal_strlen(value);
+ return (flag_length >= value_length) &&
+ (0 == internal_strncmp(flag, value, value_length));
}
void ParseFlag(const char *env, bool *flag, const char *name) {
- char *val = GetFlagValue(env, name);
- if (val == 0)
+ const char *value;
+ int value_length;
+ if (!GetFlagValue(env, name, &value, &value_length))
return;
- if (0 == internal_strcmp(val, "0") ||
- 0 == internal_strcmp(val, "no") ||
- 0 == internal_strcmp(val, "false"))
+ if (StartsWith(value, value_length, "0") ||
+ StartsWith(value, value_length, "no") ||
+ StartsWith(value, value_length, "false"))
*flag = false;
- if (0 == internal_strcmp(val, "1") ||
- 0 == internal_strcmp(val, "yes") ||
- 0 == internal_strcmp(val, "true"))
+ if (StartsWith(value, value_length, "1") ||
+ StartsWith(value, value_length, "yes") ||
+ StartsWith(value, value_length, "true"))
*flag = true;
- InternalFree(val);
}
void ParseFlag(const char *env, int *flag, const char *name) {
- char *val = GetFlagValue(env, name);
- if (val == 0)
+ const char *value;
+ int value_length;
+ if (!GetFlagValue(env, name, &value, &value_length))
return;
- *flag = internal_atoll(val);
- InternalFree(val);
+ *flag = internal_atoll(value);
}
+static LowLevelAllocator allocator_for_flags;
+
void ParseFlag(const char *env, const char **flag, const char *name) {
- const char *val = GetFlagValue(env, name);
- if (val == 0)
+ const char *value;
+ int value_length;
+ if (!GetFlagValue(env, name, &value, &value_length))
return;
- *flag = val;
+ // Copy the flag value. Don't use locks here, as flags are parsed at
+ // tool startup.
+ char *value_copy = (char*)(allocator_for_flags.Allocate(value_length + 1));
+ internal_memcpy(value_copy, value, value_length);
+ value_copy[value_length] = '\0';
+ *flag = value_copy;
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_interface_defs.h b/lib/sanitizer_common/sanitizer_interface_defs.h
deleted file mode 100644
index 2395ea505657..000000000000
--- a/lib/sanitizer_common/sanitizer_interface_defs.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- sanitizer_interface_defs.h -----------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is shared between AddressSanitizer and ThreadSanitizer.
-// It contains basic macro and types.
-// NOTE: This file may be included into user code.
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_INTERFACE_DEFS_H
-#define SANITIZER_INTERFACE_DEFS_H
-
-// ----------- ATTENTION -------------
-// This header should NOT include any other headers to avoid portability issues.
-
-#if defined(_WIN32)
-// FIXME find out what we need on Windows. __declspec(dllexport) ?
-# define SANITIZER_INTERFACE_ATTRIBUTE
-# define SANITIZER_WEAK_ATTRIBUTE
-#elif defined(SANITIZER_GO)
-# define SANITIZER_INTERFACE_ATTRIBUTE
-# define SANITIZER_WEAK_ATTRIBUTE
-#else
-# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
-# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
-#endif
-
-// __has_feature
-#if !defined(__has_feature)
-# define __has_feature(x) 0
-#endif
-
-// For portability reasons we do not include stddef.h, stdint.h or any other
-// system header, but we do need some basic types that are not defined
-// in a portable way by the language itself.
-namespace __sanitizer {
-
-typedef unsigned long uptr; // NOLINT
-typedef signed long sptr; // NOLINT
-typedef unsigned char u8;
-typedef unsigned short u16; // NOLINT
-typedef unsigned int u32;
-typedef unsigned long long u64; // NOLINT
-typedef signed char s8;
-typedef signed short s16; // NOLINT
-typedef signed int s32;
-typedef signed long long s64; // NOLINT
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_INTERFACE_DEFS_H
diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h
index b8cf61fad84a..7ff27338192a 100644
--- a/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -13,7 +13,7 @@
#ifndef SANITIZER_DEFS_H
#define SANITIZER_DEFS_H
-#include "sanitizer_interface_defs.h"
+#include "sanitizer/common_interface_defs.h"
using namespace __sanitizer; // NOLINT
// ----------- ATTENTION -------------
// This header should NOT include any other headers to avoid portability issues.
@@ -24,8 +24,7 @@ using namespace __sanitizer; // NOLINT
#define WEAK SANITIZER_WEAK_ATTRIBUTE
// Platform-specific defs.
-#if defined(_WIN32)
-typedef unsigned long DWORD; // NOLINT
+#if defined(_MSC_VER)
# define ALWAYS_INLINE __declspec(forceinline)
// FIXME(timurrrr): do we need this on Windows?
# define ALIAS(x)
@@ -35,7 +34,12 @@ typedef unsigned long DWORD; // NOLINT
# define NORETURN __declspec(noreturn)
# define THREADLOCAL __declspec(thread)
# define NOTHROW
-#else // _WIN32
+# define LIKELY(x) (x)
+# define UNLIKELY(x) (x)
+# define UNUSED
+# define USED
+# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
+#else // _MSC_VER
# define ALWAYS_INLINE __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x)))
# define ALIGNED(x) __attribute__((aligned(x)))
@@ -43,22 +47,21 @@ typedef unsigned long DWORD; // NOLINT
# define NOINLINE __attribute__((noinline))
# define NORETURN __attribute__((noreturn))
# define THREADLOCAL __thread
-# ifdef __cplusplus
-# define NOTHROW throw()
-# else
-# define NOTHROW __attribute__((__nothrow__))
-#endif
-#endif // _WIN32
-
-// We have no equivalent of these on Windows.
-#ifndef _WIN32
+# define NOTHROW throw()
# define LIKELY(x) __builtin_expect(!!(x), 1)
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
# define UNUSED __attribute__((unused))
# define USED __attribute__((used))
-#endif
+# if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(x) generates prefetchnt0 on x86
+# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
+# else
+# define PREFETCH(x) __builtin_prefetch(x)
+# endif
+#endif // _MSC_VER
#if defined(_WIN32)
+typedef unsigned long DWORD; // NOLINT
typedef DWORD thread_return_t;
# define THREAD_CALLING_CONV __stdcall
#else // _WIN32
@@ -67,15 +70,11 @@ typedef void* thread_return_t;
#endif // _WIN32
typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
-// If __WORDSIZE was undefined by the platform, define it in terms of the
-// compiler built-ins __LP64__ and _WIN64.
-#ifndef __WORDSIZE
-# if __LP64__ || defined(_WIN64)
-# define __WORDSIZE 64
-# else
-# define __WORDSIZE 32
-# endif
-#endif // __WORDSIZE
+#if __LP64__ || defined(_WIN64)
+# define SANITIZER_WORDSIZE 64
+#else
+# define SANITIZER_WORDSIZE 32
+#endif
// NOTE: Functions below must be defined in each run-time.
namespace __sanitizer {
@@ -130,23 +129,32 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
#define DCHECK_GE(a, b)
#endif
-#define UNIMPLEMENTED() CHECK("unimplemented" && 0)
+#define UNREACHABLE(msg) do { \
+ CHECK(0 && msg); \
+ Die(); \
+} while (0)
+
+#define UNIMPLEMENTED() UNREACHABLE("unimplemented")
#define COMPILER_CHECK(pred) IMPL_COMPILER_ASSERT(pred, __LINE__)
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
+
#define IMPL_PASTE(a, b) a##b
#define IMPL_COMPILER_ASSERT(pred, line) \
- typedef char IMPL_PASTE(assertion_failed_##_, line)[2*(int)(pred)-1];
+ typedef char IMPL_PASTE(assertion_failed_##_, line)[2*(int)(pred)-1]
// Limits for integral types. We have to redefine it in case we don't
// have stdint.h (like in Visual Studio 9).
-#if __WORDSIZE == 64
+#undef __INT64_C
+#undef __UINT64_C
+#if SANITIZER_WORDSIZE == 64
# define __INT64_C(c) c ## L
# define __UINT64_C(c) c ## UL
#else
# define __INT64_C(c) c ## LL
# define __UINT64_C(c) c ## ULL
-#endif // __WORDSIZE == 64
+#endif // SANITIZER_WORDSIZE == 64
#undef INT32_MIN
#define INT32_MIN (-2147483647-1)
#undef INT32_MAX
@@ -160,4 +168,25 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
#undef UINT64_MAX
#define UINT64_MAX (__UINT64_C(18446744073709551615))
+enum LinkerInitialized { LINKER_INITIALIZED = 0 };
+
+#if !defined(_MSC_VER) || defined(__clang__)
+# define GET_CALLER_PC() (uptr)__builtin_return_address(0)
+# define GET_CURRENT_FRAME() (uptr)__builtin_frame_address(0)
+#else
+extern "C" void* _ReturnAddress(void);
+# pragma intrinsic(_ReturnAddress)
+# define GET_CALLER_PC() (uptr)_ReturnAddress()
+// CaptureStackBackTrace doesn't need to know BP on Windows.
+// FIXME: This macro is still used when printing error reports though it's not
+// clear if the BP value is needed in the ASan reports on Windows.
+# define GET_CURRENT_FRAME() (uptr)0xDEADBEEF
+#endif
+
+#define HANDLE_EINTR(res, f) { \
+ do { \
+ res = (f); \
+ } while (res == -1 && errno == EINTR); \
+ }
+
#endif // SANITIZER_DEFS_H
diff --git a/lib/sanitizer_common/sanitizer_lfstack.h b/lib/sanitizer_common/sanitizer_lfstack.h
new file mode 100644
index 000000000000..c26e45db8f89
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_lfstack.h
@@ -0,0 +1,73 @@
+//===-- sanitizer_lfstack.h -=-----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Lock-free stack.
+// Uses 32/17 bits as ABA-counter on 32/64-bit platforms.
+// The memory passed to Push() must not be ever munmap'ed.
+// The type T must contain T *next field.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LFSTACK_H
+#define SANITIZER_LFSTACK_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+template<typename T>
+struct LFStack {
+ void Clear() {
+ atomic_store(&head_, 0, memory_order_relaxed);
+ }
+
+ bool Empty() const {
+ return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0;
+ }
+
+ void Push(T *p) {
+ u64 cmp = atomic_load(&head_, memory_order_relaxed);
+ for (;;) {
+ u64 cnt = (cmp & kCounterMask) + kCounterInc;
+ u64 xch = (u64)(uptr)p | cnt;
+ p->next = (T*)(uptr)(cmp & kPtrMask);
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_release))
+ break;
+ }
+ }
+
+ T *Pop() {
+ u64 cmp = atomic_load(&head_, memory_order_acquire);
+ for (;;) {
+ T *cur = (T*)(uptr)(cmp & kPtrMask);
+ if (cur == 0)
+ return 0;
+ T *nxt = cur->next;
+ u64 cnt = (cmp & kCounterMask);
+ u64 xch = (u64)(uptr)nxt | cnt;
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_acquire))
+ return cur;
+ }
+ }
+
+ // private:
+ static const int kCounterBits = FIRST_32_SECOND_64(32, 17);
+ static const u64 kPtrMask = ((u64)-1) >> kCounterBits;
+ static const u64 kCounterMask = ~kPtrMask;
+ static const u64 kCounterInc = kPtrMask + 1;
+
+ atomic_uint64_t head_;
+};
+}
+
+#endif // #ifndef SANITIZER_LFSTACK_H
diff --git a/lib/sanitizer_common/sanitizer_libc.cc b/lib/sanitizer_common/sanitizer_libc.cc
index c4332423bf3a..349be35012dd 100644
--- a/lib/sanitizer_common/sanitizer_libc.cc
+++ b/lib/sanitizer_common/sanitizer_libc.cc
@@ -44,6 +44,23 @@ void *internal_memcpy(void *dest, const void *src, uptr n) {
return dest;
}
+void *internal_memmove(void *dest, const void *src, uptr n) {
+ char *d = (char*)dest;
+ char *s = (char*)src;
+ sptr i, signed_n = (sptr)n;
+ CHECK_GE(signed_n, 0);
+ if (d < s) {
+ for (i = 0; i < signed_n; ++i)
+ d[i] = s[i];
+ } else {
+ if (d > s && signed_n > 0)
+ for (i = signed_n - 1; i >= 0 ; --i) {
+ d[i] = s[i];
+ }
+ }
+ return dest;
+}
+
void *internal_memset(void* s, int c, uptr n) {
// The next line prevents Clang from making a call to memset() instead of the
// loop below.
@@ -56,6 +73,15 @@ void *internal_memset(void* s, int c, uptr n) {
return s;
}
+uptr internal_strcspn(const char *s, const char *reject) {
+ uptr i;
+ for (i = 0; s[i]; i++) {
+ if (internal_strchr(reject, s[i]) != 0)
+ return i;
+ }
+ return i;
+}
+
char* internal_strdup(const char *s) {
uptr len = internal_strlen(s);
char *s2 = (char*)InternalAlloc(len + 1);
@@ -179,4 +205,23 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
}
}
+bool mem_is_zero(const char *beg, uptr size) {
+ CHECK_LE(size, 1UL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
+ const char *end = beg + size;
+ uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));
+ uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));
+ uptr all = 0;
+ // Prologue.
+ for (const char *mem = beg; mem < (char*)aligned_beg && mem < end; mem++)
+ all |= *mem;
+ // Aligned loop.
+ for (; aligned_beg < aligned_end; aligned_beg++)
+ all |= *aligned_beg;
+ // Epilogue.
+ if ((char*)aligned_end >= beg)
+ for (const char *mem = (char*)aligned_end; mem < end; mem++)
+ all |= *mem;
+ return all == 0;
+}
+
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_libc.h b/lib/sanitizer_common/sanitizer_libc.h
index 8da4286cef73..aa052c654d39 100644
--- a/lib/sanitizer_common/sanitizer_libc.h
+++ b/lib/sanitizer_common/sanitizer_libc.h
@@ -18,7 +18,7 @@
// ----------- ATTENTION -------------
// This header should NOT include any other headers from sanitizer runtime.
-#include "sanitizer_interface_defs.h"
+#include "sanitizer/common_interface_defs.h"
namespace __sanitizer {
@@ -29,10 +29,12 @@ s64 internal_atoll(const char *nptr);
void *internal_memchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
void *internal_memcpy(void *dest, const void *src, uptr n);
+void *internal_memmove(void *dest, const void *src, uptr n);
// Should not be used in performance-critical places.
void *internal_memset(void *s, int c, uptr n);
char* internal_strchr(const char *s, int c);
int internal_strcmp(const char *s1, const char *s2);
+uptr internal_strcspn(const char *s, const char *reject);
char *internal_strdup(const char *s);
uptr internal_strlen(const char *s);
char *internal_strncat(char *dst, const char *src, uptr n);
@@ -45,6 +47,11 @@ char *internal_strstr(const char *haystack, const char *needle);
// Works only for base=10 and doesn't set errno.
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base);
+// Return true if all bytes in [mem, mem+size) are zero.
+// Optimized for the case when the result is true.
+bool mem_is_zero(const char *mem, uptr size);
+
+
// Memory
void *internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset);
@@ -53,12 +60,17 @@ int internal_munmap(void *addr, uptr length);
// I/O
typedef int fd_t;
const fd_t kInvalidFd = -1;
+const fd_t kStdinFd = 0;
+const fd_t kStdoutFd = 1;
+const fd_t kStderrFd = 2;
int internal_close(fd_t fd);
+int internal_isatty(fd_t fd);
fd_t internal_open(const char *filename, bool write);
uptr internal_read(fd_t fd, void *buf, uptr count);
uptr internal_write(fd_t fd, const void *buf, uptr count);
uptr internal_filesize(fd_t fd); // -1 on error.
int internal_dup2(int oldfd, int newfd);
+uptr internal_readlink(const char *path, char *buf, uptr bufsize);
int internal_snprintf(char *buffer, uptr length, const char *format, ...);
// Threading
diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc
index 70e2eb346183..8b9ba38ca777 100644
--- a/lib/sanitizer_common/sanitizer_linux.cc
+++ b/lib/sanitizer_common/sanitizer_linux.cc
@@ -16,13 +16,12 @@
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
-#include "sanitizer_symbolizer.h"
+#include "sanitizer_stacktrace.h"
-#include <elf.h>
#include <fcntl.h>
-#include <link.h>
#include <pthread.h>
#include <sched.h>
#include <sys/mman.h>
@@ -32,13 +31,26 @@
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
+#include <unwind.h>
+#include <errno.h>
+#include <sys/prctl.h>
+#include <linux/futex.h>
+
+// Are we using 32-bit or 64-bit syscalls?
+// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
+// but it still needs to use 64-bit syscalls.
+#if defined(__x86_64__) || SANITIZER_WORDSIZE == 64
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
+#else
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
+#endif
namespace __sanitizer {
// --------------- sanitizer_libc.h
void *internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) {
-#if __WORDSIZE == 64
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
#else
return (void *)syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
@@ -59,15 +71,19 @@ fd_t internal_open(const char *filename, bool write) {
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
- return (uptr)syscall(__NR_read, fd, buf, count);
+ sptr res;
+ HANDLE_EINTR(res, (sptr)syscall(__NR_read, fd, buf, count));
+ return res;
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
- return (uptr)syscall(__NR_write, fd, buf, count);
+ sptr res;
+ HANDLE_EINTR(res, (sptr)syscall(__NR_write, fd, buf, count));
+ return res;
}
uptr internal_filesize(fd_t fd) {
-#if __WORDSIZE == 64
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
struct stat st;
if (syscall(__NR_fstat, fd, &st))
return -1;
@@ -83,11 +99,33 @@ int internal_dup2(int oldfd, int newfd) {
return syscall(__NR_dup2, oldfd, newfd);
}
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+ return (uptr)syscall(__NR_readlink, path, buf, bufsize);
+}
+
int internal_sched_yield() {
return syscall(__NR_sched_yield);
}
// ----------------- sanitizer_common.h
+bool FileExists(const char *filename) {
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ struct stat st;
+ if (syscall(__NR_stat, filename, &st))
+ return false;
+#else
+ struct stat64 st;
+ if (syscall(__NR_stat64, filename, &st))
+ return false;
+#endif
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+uptr GetTid() {
+ return syscall(__NR_gettid);
+}
+
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M
@@ -99,7 +137,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable.
- ProcessMaps proc_maps;
+ MemoryMappingLayout proc_maps;
uptr start, end, offset;
uptr prev_end = 0;
while (proc_maps.Next(&start, &end, &offset, 0, 0)) {
@@ -163,102 +201,96 @@ const char *GetEnv(const char *name) {
return 0; // Not found.
}
-// ------------------ sanitizer_symbolizer.h
-typedef ElfW(Ehdr) Elf_Ehdr;
-typedef ElfW(Shdr) Elf_Shdr;
-typedef ElfW(Phdr) Elf_Phdr;
-
-bool FindDWARFSection(uptr object_file_addr, const char *section_name,
- DWARFSection *section) {
- Elf_Ehdr *exe = (Elf_Ehdr*)object_file_addr;
- Elf_Shdr *sections = (Elf_Shdr*)(object_file_addr + exe->e_shoff);
- uptr section_names = object_file_addr +
- sections[exe->e_shstrndx].sh_offset;
- for (int i = 0; i < exe->e_shnum; i++) {
- Elf_Shdr *current_section = &sections[i];
- const char *current_name = (const char*)section_names +
- current_section->sh_name;
- if (IsFullNameOfDWARFSection(current_name, section_name)) {
- section->data = (const char*)object_file_addr +
- current_section->sh_offset;
- section->size = current_section->sh_size;
- return true;
+static void ReadNullSepFileToArray(const char *path, char ***arr,
+ int arr_size) {
+ char *buff;
+ uptr buff_size = 0;
+ *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
+ ReadFileToBuffer(path, &buff, &buff_size, 1024 * 1024);
+ (*arr)[0] = buff;
+ int count, i;
+ for (count = 1, i = 1; ; i++) {
+ if (buff[i] == 0) {
+ if (buff[i+1] == 0) break;
+ (*arr)[count] = &buff[i+1];
+ CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
+ count++;
}
}
- return false;
+ (*arr)[count] = 0;
}
-#ifdef ANDROID
-uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules) {
- UNIMPLEMENTED();
+void ReExec() {
+ static const int kMaxArgv = 100, kMaxEnvp = 1000;
+ char **argv, **envp;
+ ReadNullSepFileToArray("/proc/self/cmdline", &argv, kMaxArgv);
+ ReadNullSepFileToArray("/proc/self/environ", &envp, kMaxEnvp);
+ execve(argv[0], argv, envp);
}
-#else // ANDROID
-struct DlIteratePhdrData {
- ModuleDIContext *modules;
- uptr current_n;
- uptr max_n;
-};
-static const uptr kMaxPathLength = 512;
-
-static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
- DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
- if (data->current_n == data->max_n)
- return 0;
- char *module_name = 0;
- if (data->current_n == 0) {
- // First module is the binary itself.
- module_name = (char*)InternalAlloc(kMaxPathLength);
- uptr module_name_len = readlink("/proc/self/exe",
- module_name, kMaxPathLength);
- CHECK_NE(module_name_len, (uptr)-1);
- CHECK_LT(module_name_len, kMaxPathLength);
- module_name[module_name_len] = '\0';
- } else if (info->dlpi_name) {
- module_name = internal_strdup(info->dlpi_name);
- }
- if (module_name == 0 || module_name[0] == '\0')
- return 0;
- void *mem = &data->modules[data->current_n];
- ModuleDIContext *cur_module = new(mem) ModuleDIContext(module_name,
- info->dlpi_addr);
- data->current_n++;
- for (int i = 0; i < info->dlpi_phnum; i++) {
- const Elf_Phdr *phdr = &info->dlpi_phdr[i];
- if (phdr->p_type == PT_LOAD) {
- uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
- uptr cur_end = cur_beg + phdr->p_memsz;
- cur_module->addAddressRange(cur_beg, cur_end);
- }
+void PrepareForSandboxing() {
+ // Some kinds of sandboxes may forbid filesystem access, so we won't be able
+ // to read the file mappings from /proc/self/maps. Luckily, neither the
+ // process will be able to load additional libraries, so it's fine to use the
+ // cached mappings.
+ MemoryMappingLayout::CacheMemoryMappings();
+}
+
+// ----------------- sanitizer_procmaps.h
+// Linker initialized.
+ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
+StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
+
+MemoryMappingLayout::MemoryMappingLayout() {
+ proc_self_maps_.len =
+ ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
+ &proc_self_maps_.mmaped_size, 1 << 26);
+ if (proc_self_maps_.mmaped_size == 0) {
+ LoadFromCache();
+ CHECK_GT(proc_self_maps_.len, 0);
}
- InternalFree(module_name);
- return 0;
+ // internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
+ Reset();
+ // FIXME: in the future we may want to cache the mappings on demand only.
+ CacheMemoryMappings();
}
-uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules) {
- CHECK(modules);
- DlIteratePhdrData data = {modules, 0, max_modules};
- dl_iterate_phdr(dl_iterate_phdr_cb, &data);
- return data.current_n;
+MemoryMappingLayout::~MemoryMappingLayout() {
+ // Only unmap the buffer if it is different from the cached one. Otherwise
+ // it will be unmapped when the cache is refreshed.
+ if (proc_self_maps_.data != cached_proc_self_maps_.data) {
+ UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
+ }
}
-#endif // ANDROID
-// ----------------- sanitizer_procmaps.h
-ProcessMaps::ProcessMaps() {
- proc_self_maps_buff_len_ =
- ReadFileToBuffer("/proc/self/maps", &proc_self_maps_buff_,
- &proc_self_maps_buff_mmaped_size_, 1 << 26);
- CHECK_GT(proc_self_maps_buff_len_, 0);
- // internal_write(2, proc_self_maps_buff_, proc_self_maps_buff_len_);
- Reset();
+void MemoryMappingLayout::Reset() {
+ current_ = proc_self_maps_.data;
}
-ProcessMaps::~ProcessMaps() {
- UnmapOrDie(proc_self_maps_buff_, proc_self_maps_buff_mmaped_size_);
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ SpinMutexLock l(&cache_lock_);
+ // Don't invalidate the cache if the mappings are unavailable.
+ ProcSelfMapsBuff old_proc_self_maps;
+ old_proc_self_maps = cached_proc_self_maps_;
+ cached_proc_self_maps_.len =
+ ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
+ &cached_proc_self_maps_.mmaped_size, 1 << 26);
+ if (cached_proc_self_maps_.mmaped_size == 0) {
+ cached_proc_self_maps_ = old_proc_self_maps;
+ } else {
+ if (old_proc_self_maps.mmaped_size) {
+ UnmapOrDie(old_proc_self_maps.data,
+ old_proc_self_maps.mmaped_size);
+ }
+ }
}
-void ProcessMaps::Reset() {
- current_ = proc_self_maps_buff_;
+void MemoryMappingLayout::LoadFromCache() {
+ SpinMutexLock l(&cache_lock_);
+ if (cached_proc_self_maps_.data) {
+ proc_self_maps_ = cached_proc_self_maps_;
+ }
}
// Parse a hex value in str and update str.
@@ -290,9 +322,9 @@ static bool IsDecimal(char c) {
return c >= '0' && c <= '9';
}
-bool ProcessMaps::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size) {
- char *last = proc_self_maps_buff_ + proc_self_maps_buff_len_;
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size) {
+ char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
if (!start) start = &dummy;
@@ -336,13 +368,116 @@ bool ProcessMaps::Next(uptr *start, uptr *end, uptr *offset,
return true;
}
-// Gets the object name and the offset by walking ProcessMaps.
-bool ProcessMaps::GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[],
- uptr filename_size) {
+// Gets the object name and the offset by walking MemoryMappingLayout.
+bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[],
+ uptr filename_size) {
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
}
+bool SanitizerSetThreadName(const char *name) {
+#ifdef PR_SET_NAME
+ return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
+#else
+ return false;
+#endif
+}
+
+bool SanitizerGetThreadName(char *name, int max_len) {
+#ifdef PR_GET_NAME
+ char buff[17];
+ if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
+ return false;
+ internal_strncpy(name, buff, max_len);
+ name[max_len] = 0;
+ return true;
+#else
+ return false;
+#endif
+}
+
+#ifndef SANITIZER_GO
+//------------------------- SlowUnwindStack -----------------------------------
+#ifdef __arm__
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#ifdef __arm__
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return _Unwind_GetIP(ctx);
+#endif
+}
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ StackTrace *b = (StackTrace*)param;
+ CHECK(b->size < b->max_size);
+ uptr pc = Unwind_GetIP(ctx);
+ b->trace[b->size++] = pc;
+ if (b->size == b->max_size) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
+}
+
+static bool MatchPc(uptr cur_pc, uptr trace_pc) {
+ return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
+}
+
+void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+ this->size = 0;
+ this->max_size = max_depth;
+ if (max_depth > 1) {
+ _Unwind_Backtrace(Unwind_Trace, this);
+ // We need to pop a few frames so that pc is on top.
+ // trace[0] belongs to the current function so we always pop it.
+ int to_pop = 1;
+ /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
+ else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
+ else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
+ else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
+ else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
+ this->PopStackFrames(to_pop);
+ }
+ this->trace[0] = pc;
+}
+
+#endif // #ifndef SANITIZER_GO
+
+enum MutexState {
+ MtxUnlocked = 0,
+ MtxLocked = 1,
+ MtxSleeping = 2
+};
+
+BlockingMutex::BlockingMutex(LinkerInitialized) {
+ CHECK_EQ(owner_, 0);
+}
+
+void BlockingMutex::Lock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
+ syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
+ CHECK_NE(v, MtxUnlocked);
+ if (v == MtxSleeping)
+ syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
+}
+
} // namespace __sanitizer
#endif // __linux__
diff --git a/lib/sanitizer_common/sanitizer_list.h b/lib/sanitizer_common/sanitizer_list.h
index ef98eee12317..f61d28f3d900 100644
--- a/lib/sanitizer_common/sanitizer_list.h
+++ b/lib/sanitizer_common/sanitizer_list.h
@@ -72,6 +72,8 @@ struct IntrusiveList {
void append_front(IntrusiveList<Item> *l) {
CHECK_NE(this, l);
+ if (l->empty())
+ return;
if (empty()) {
*this = *l;
} else if (!l->empty()) {
@@ -84,6 +86,8 @@ struct IntrusiveList {
void append_back(IntrusiveList<Item> *l) {
CHECK_NE(this, l);
+ if (l->empty())
+ return;
if (empty()) {
*this = *l;
} else {
diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc
index e64c2debb882..c4b8e4c2bcf2 100644
--- a/lib/sanitizer_common/sanitizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_mac.cc
@@ -18,7 +18,6 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_procmaps.h"
-#include "sanitizer_symbolizer.h"
#include <crt_externs.h> // for _NSGetEnviron
#include <fcntl.h>
@@ -31,6 +30,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#include <libkern/OSAtomic.h>
namespace __sanitizer {
@@ -62,7 +62,7 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) {
}
uptr internal_filesize(fd_t fd) {
- struct stat st = {};
+ struct stat st;
if (fstat(fd, &st))
return -1;
return (uptr)st.st_size;
@@ -72,11 +72,27 @@ int internal_dup2(int oldfd, int newfd) {
return dup2(oldfd, newfd);
}
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+ return readlink(path, buf, bufsize);
+}
+
int internal_sched_yield() {
return sched_yield();
}
// ----------------- sanitizer_common.h
+bool FileExists(const char *filename) {
+ struct stat st;
+ if (stat(filename, &st))
+ return false;
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+uptr GetTid() {
+ return reinterpret_cast<uptr>(pthread_self());
+}
+
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
CHECK(stack_top);
@@ -107,25 +123,21 @@ const char *GetEnv(const char *name) {
return 0;
}
-// ------------------ sanitizer_symbolizer.h
-bool FindDWARFSection(uptr object_file_addr, const char *section_name,
- DWARFSection *section) {
+void ReExec() {
UNIMPLEMENTED();
- return false;
}
-uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules) {
- UNIMPLEMENTED();
- return 0;
-};
+void PrepareForSandboxing() {
+ // Nothing here for now.
+}
// ----------------- sanitizer_procmaps.h
-ProcessMaps::ProcessMaps() {
+MemoryMappingLayout::MemoryMappingLayout() {
Reset();
}
-ProcessMaps::~ProcessMaps() {
+MemoryMappingLayout::~MemoryMappingLayout() {
}
// More information about Mach-O headers can be found in mach-o/loader.h
@@ -142,15 +154,26 @@ ProcessMaps::~ProcessMaps() {
// Because these fields are taken from the images as is, one needs to add
// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
-void ProcessMaps::Reset() {
+void MemoryMappingLayout::Reset() {
// Count down from the top.
// TODO(glider): as per man 3 dyld, iterating over the headers with
// _dyld_image_count is thread-unsafe. We need to register callbacks for
- // adding and removing images which will invalidate the ProcessMaps state.
+ // adding and removing images which will invalidate the MemoryMappingLayout
+ // state.
current_image_ = _dyld_image_count();
current_load_cmd_count_ = -1;
current_load_cmd_addr_ = 0;
current_magic_ = 0;
+ current_filetype_ = 0;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ // No-op on Mac for now.
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ // No-op on Mac for now.
}
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
@@ -161,7 +184,7 @@ void ProcessMaps::Reset() {
// segment.
// Note that the segment addresses are not necessarily sorted.
template<u32 kLCSegment, typename SegmentCommand>
-bool ProcessMaps::NextSegmentLoad(
+bool MemoryMappingLayout::NextSegmentLoad(
uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size) {
const char* lc = current_load_cmd_addr_;
@@ -171,7 +194,13 @@ bool ProcessMaps::NextSegmentLoad(
const SegmentCommand* sc = (const SegmentCommand *)lc;
if (start) *start = sc->vmaddr + dlloff;
if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
- if (offset) *offset = sc->fileoff;
+ if (offset) {
+ if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
+ *offset = sc->vmaddr;
+ } else {
+ *offset = sc->fileoff;
+ }
+ }
if (filename) {
internal_strncpy(filename, _dyld_get_image_name(current_image_),
filename_size);
@@ -181,8 +210,8 @@ bool ProcessMaps::NextSegmentLoad(
return false;
}
-bool ProcessMaps::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size) {
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size) {
for (; current_image_ >= 0; current_image_--) {
const mach_header* hdr = _dyld_get_image_header(current_image_);
if (!hdr) continue;
@@ -190,6 +219,7 @@ bool ProcessMaps::Next(uptr *start, uptr *end, uptr *offset,
// Set up for this image;
current_load_cmd_count_ = hdr->ncmds;
current_magic_ = hdr->magic;
+ current_filetype_ = hdr->filetype;
switch (current_magic_) {
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
@@ -232,12 +262,31 @@ bool ProcessMaps::Next(uptr *start, uptr *end, uptr *offset,
return false;
}
-bool ProcessMaps::GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[],
- uptr filename_size) {
+bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[],
+ uptr filename_size) {
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
}
+BlockingMutex::BlockingMutex(LinkerInitialized) {
+ // We assume that OS_SPINLOCK_INIT is zero
+}
+
+void BlockingMutex::Lock() {
+ CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
+ CHECK(OS_SPINLOCK_INIT == 0);
+ CHECK(owner_ != (uptr)pthread_self());
+ OSSpinLockLock((OSSpinLock*)&opaque_storage_);
+ CHECK(!owner_);
+ owner_ = (uptr)pthread_self();
+}
+
+void BlockingMutex::Unlock() {
+ CHECK(owner_ == (uptr)pthread_self());
+ owner_ = 0;
+ OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
+}
+
} // namespace __sanitizer
#endif // __APPLE__
diff --git a/lib/sanitizer_common/sanitizer_mutex.h b/lib/sanitizer_common/sanitizer_mutex.h
index ca3e2f9a4839..56438fce471c 100644
--- a/lib/sanitizer_common/sanitizer_mutex.h
+++ b/lib/sanitizer_common/sanitizer_mutex.h
@@ -20,18 +20,22 @@
namespace __sanitizer {
-class SpinMutex {
+class StaticSpinMutex {
public:
- SpinMutex() {
+ void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
void Lock() {
- if (atomic_exchange(&state_, 1, memory_order_acquire) == 0)
+ if (TryLock())
return;
LockSlow();
}
+ bool TryLock() {
+ return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
+ }
+
void Unlock() {
atomic_store(&state_, 0, memory_order_release);
}
@@ -50,11 +54,29 @@ class SpinMutex {
return;
}
}
+};
+class SpinMutex : public StaticSpinMutex {
+ public:
+ SpinMutex() {
+ Init();
+ }
+
+ private:
SpinMutex(const SpinMutex&);
void operator=(const SpinMutex&);
};
+class BlockingMutex {
+ public:
+ explicit BlockingMutex(LinkerInitialized);
+ void Lock();
+ void Unlock();
+ private:
+ uptr opaque_storage_[10];
+ uptr owner_; // for debugging
+};
+
template<typename MutexType>
class GenericScopedLock {
public:
@@ -93,7 +115,8 @@ class GenericScopedReadLock {
void operator=(const GenericScopedReadLock&);
};
-typedef GenericScopedLock<SpinMutex> SpinMutexLock;
+typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
+typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_placement_new.h b/lib/sanitizer_common/sanitizer_placement_new.h
index f133a6ffe513..c0b85e1c1717 100644
--- a/lib/sanitizer_common/sanitizer_placement_new.h
+++ b/lib/sanitizer_common/sanitizer_placement_new.h
@@ -19,7 +19,7 @@
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
-#if (__WORDSIZE == 64) || defined(__APPLE__)
+#if (SANITIZER_WORDSIZE == 64) || defined(__APPLE__)
typedef uptr operator_new_ptr_type;
#else
typedef u32 operator_new_ptr_type;
diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h
new file mode 100644
index 000000000000..abd41fe8c997
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -0,0 +1,38 @@
+//===-- sanitizer_platform_interceptors.h -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines macro telling whether sanitizer tools can/should intercept
+// given library functions on a given platform.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_internal_defs.h"
+
+#if !defined(_WIN32)
+# define SI_NOT_WINDOWS 1
+#else
+# define SI_NOT_WINDOWS 0
+#endif
+
+#if defined(__linux__) && !defined(ANDROID)
+# define SI_LINUX_NOT_ANDROID 1
+#else
+# define SI_LINUX_NOT_ANDROID 0
+#endif
+
+# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
+
+# define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
+# define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
+# define SANITIZER_INTERCEPT_PRCTL SI_LINUX_NOT_ANDROID
+
+# define SANITIZER_INTERCEPT_SCANF 1
diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc
index 4caee3ba68c0..32657838600d 100644
--- a/lib/sanitizer_common/sanitizer_posix.cc
+++ b/lib/sanitizer_common/sanitizer_posix.cc
@@ -17,10 +17,12 @@
#include "sanitizer_libc.h"
#include "sanitizer_procmaps.h"
+#include <errno.h>
#include <pthread.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/time.h>
@@ -30,6 +32,13 @@
namespace __sanitizer {
// ------------- sanitizer_common.h
+uptr GetPageSize() {
+ return sysconf(_SC_PAGESIZE);
+}
+
+uptr GetMmapGranularity() {
+ return GetPageSize();
+}
int GetPid() {
return getpid();
@@ -40,13 +49,22 @@ uptr GetThreadSelf() {
}
void *MmapOrDie(uptr size, const char *mem_type) {
- size = RoundUpTo(size, kPageSize);
+ size = RoundUpTo(size, GetPageSizeCached());
void *res = internal_mmap(0, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
if (res == (void*)-1) {
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s\n",
- size, size, mem_type);
+ static int recursion_count;
+ if (recursion_count) {
+ // The Report() and CHECK calls below may call mmap recursively and fail.
+ // If we went into recursion, just die.
+ RawWrite("AddressSanitizer is unable to mmap\n");
+ Die();
+ }
+ recursion_count++;
+ Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s: %s\n",
+ size, size, mem_type, strerror(errno));
+ DumpProcessMap();
CHECK("unable to mmap" && 0);
}
return res;
@@ -63,10 +81,31 @@ void UnmapOrDie(void *addr, uptr size) {
}
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
- return internal_mmap((void*)fixed_addr, size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
- -1, 0);
+ uptr PageSize = GetPageSizeCached();
+ void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
+ RoundUpTo(size, PageSize),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
+ -1, 0);
+ if (p == (void*)-1)
+ Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ size, size, fixed_addr, errno);
+ return p;
+}
+
+void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
+ uptr PageSize = GetPageSizeCached();
+ void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
+ RoundUpTo(size, PageSize),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ -1, 0);
+ if (p == (void*)-1) {
+ Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ size, size, fixed_addr, errno);
+ CHECK("unable to mmap" && 0);
+ }
+ return p;
}
void *Mprotect(uptr fixed_addr, uptr size) {
@@ -76,13 +115,17 @@ void *Mprotect(uptr fixed_addr, uptr size) {
-1, 0);
}
+void FlushUnneededShadowMemory(uptr addr, uptr size) {
+ madvise((void*)addr, size, MADV_DONTNEED);
+}
+
void *MapFileToMemory(const char *file_name, uptr *buff_size) {
fd_t fd = internal_open(file_name, false);
CHECK_NE(fd, kInvalidFd);
uptr fsize = internal_filesize(fd);
CHECK_NE(fsize, (uptr)-1);
CHECK_GT(fsize, 0);
- *buff_size = RoundUpTo(fsize, kPageSize);
+ *buff_size = RoundUpTo(fsize, GetPageSizeCached());
void *map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
return (map == MAP_FAILED) ? 0 : map;
}
@@ -100,7 +143,7 @@ static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
// several worker threads on Mac, which aren't expected to map big chunks of
// memory).
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
- ProcessMaps procmaps;
+ MemoryMappingLayout procmaps;
uptr start, end;
while (procmaps.Next(&start, &end,
/*offset*/0, /*filename*/0, /*filename_size*/0)) {
@@ -111,7 +154,7 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
}
void DumpProcessMap() {
- ProcessMaps proc_maps;
+ MemoryMappingLayout proc_maps;
uptr start, end;
const sptr kBufSize = 4095;
char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
@@ -135,6 +178,23 @@ void DisableCoreDumper() {
setrlimit(RLIMIT_CORE, &nocore);
}
+bool StackSizeIsUnlimited() {
+ struct rlimit rlim;
+ CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim));
+ return (rlim.rlim_cur == (uptr)-1);
+}
+
+void SetStackSizeLimitInBytes(uptr limit) {
+ struct rlimit rlim;
+ rlim.rlim_cur = limit;
+ rlim.rlim_max = limit;
+ if (setrlimit(RLIMIT_STACK, &rlim)) {
+ Report("setrlimit() failed %d\n", errno);
+ Die();
+ }
+ CHECK(!StackSizeIsUnlimited());
+}
+
void SleepForSeconds(int seconds) {
sleep(seconds);
}
@@ -159,6 +219,10 @@ int Atexit(void (*function)(void)) {
#endif
}
+int internal_isatty(fd_t fd) {
+ return isatty(fd);
+}
+
} // namespace __sanitizer
#endif // __linux__ || __APPLE_
diff --git a/lib/sanitizer_common/sanitizer_printf.cc b/lib/sanitizer_common/sanitizer_printf.cc
index 7b70c3aae23d..2393e8f2b87b 100644
--- a/lib/sanitizer_common/sanitizer_printf.cc
+++ b/lib/sanitizer_common/sanitizer_printf.cc
@@ -45,7 +45,12 @@ static int AppendUnsigned(char **buff, const char *buff_end, u64 num,
num_buffer[pos++] = num % base;
num /= base;
} while (num > 0);
- while (pos < minimal_num_length) num_buffer[pos++] = 0;
+ if (pos < minimal_num_length) {
+ // Make sure compiler doesn't insert call to memset here.
+ internal_memset(&num_buffer[pos], 0,
+ sizeof(num_buffer[0]) * (minimal_num_length - pos));
+ pos = minimal_num_length;
+ }
int result = 0;
while (pos-- > 0) {
uptr digit = num_buffer[pos];
@@ -55,13 +60,16 @@ static int AppendUnsigned(char **buff, const char *buff_end, u64 num,
return result;
}
-static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num) {
+static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num,
+ u8 minimal_num_length) {
int result = 0;
if (num < 0) {
result += AppendChar(buff, buff_end, '-');
num = -num;
+ if (minimal_num_length)
+ --minimal_num_length;
}
- result += AppendUnsigned(buff, buff_end, (u64)num, 10, 0);
+ result += AppendUnsigned(buff, buff_end, (u64)num, 10, minimal_num_length);
return result;
}
@@ -79,14 +87,14 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int result = 0;
result += AppendString(buff, buff_end, "0x");
result += AppendUnsigned(buff, buff_end, ptr_value, 16,
- (__WORDSIZE == 64) ? 12 : 8);
+ (SANITIZER_WORDSIZE == 64) ? 12 : 8);
return result;
}
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
- static const char *kPrintfFormatsHelp = "Supported Printf formats: "
- "%%[z]{d,u,x}; %%p; %%s\n";
+ static const char *kPrintfFormatsHelp =
+ "Supported Printf formats: %(0[0-9]*)?(z|ll)?{d,u,x}; %p; %s; %c\n";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1];
@@ -98,37 +106,55 @@ int VSNPrintf(char *buff, int buff_length,
continue;
}
cur++;
+ bool have_width = (*cur == '0');
+ int width = 0;
+ if (have_width) {
+ while (*cur >= '0' && *cur <= '9') {
+ have_width = true;
+ width = width * 10 + *cur++ - '0';
+ }
+ }
bool have_z = (*cur == 'z');
cur += have_z;
+ bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
+ cur += have_ll * 2;
s64 dval;
u64 uval;
+ bool have_flags = have_width | have_z | have_ll;
switch (*cur) {
case 'd': {
- dval = have_z ? va_arg(args, sptr)
- : va_arg(args, int);
- result += AppendSignedDecimal(&buff, buff_end, dval);
+ dval = have_ll ? va_arg(args, s64)
+ : have_z ? va_arg(args, sptr)
+ : va_arg(args, int);
+ result += AppendSignedDecimal(&buff, buff_end, dval, width);
break;
}
case 'u':
case 'x': {
- uval = have_z ? va_arg(args, uptr)
- : va_arg(args, unsigned);
+ uval = have_ll ? va_arg(args, u64)
+ : have_z ? va_arg(args, uptr)
+ : va_arg(args, unsigned);
result += AppendUnsigned(&buff, buff_end, uval,
- (*cur == 'u') ? 10 : 16, 0);
+ (*cur == 'u') ? 10 : 16, width);
break;
}
case 'p': {
- RAW_CHECK_MSG(!have_z, kPrintfFormatsHelp);
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
break;
}
case 's': {
- RAW_CHECK_MSG(!have_z, kPrintfFormatsHelp);
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
result += AppendString(&buff, buff_end, va_arg(args, char*));
break;
}
+ case 'c': {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendChar(&buff, buff_end, va_arg(args, int));
+ break;
+ }
case '%' : {
- RAW_CHECK_MSG(!have_z, kPrintfFormatsHelp);
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
result += AppendChar(&buff, buff_end, '%');
break;
}
@@ -142,16 +168,22 @@ int VSNPrintf(char *buff, int buff_length,
return result;
}
+static void (*PrintfAndReportCallback)(const char *);
+void SetPrintfAndReportCallback(void (*callback)(const char *)) {
+ PrintfAndReportCallback = callback;
+}
+
void Printf(const char *format, ...) {
- const int kLen = 1024 * 4;
- char *buffer = (char*)MmapOrDie(kLen, __FUNCTION__);
+ const int kLen = 16 * 1024;
+ InternalScopedBuffer<char> buffer(kLen);
va_list args;
va_start(args, format);
- int needed_length = VSNPrintf(buffer, kLen, format, args);
+ int needed_length = VSNPrintf(buffer.data(), kLen, format, args);
va_end(args);
RAW_CHECK_MSG(needed_length < kLen, "Buffer in Printf is too short!\n");
- RawWrite(buffer);
- UnmapOrDie(buffer, kLen);
+ RawWrite(buffer.data());
+ if (PrintfAndReportCallback)
+ PrintfAndReportCallback(buffer.data());
}
// Writes at most "length" symbols to "buffer" (including trailing '\0').
@@ -168,18 +200,20 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
// Like Printf, but prints the current PID before the output string.
void Report(const char *format, ...) {
- const int kLen = 1024 * 4;
- char *buffer = (char*)MmapOrDie(kLen, __FUNCTION__);
- int needed_length = internal_snprintf(buffer, kLen, "==%d== ", GetPid());
+ const int kLen = 16 * 1024;
+ InternalScopedBuffer<char> buffer(kLen);
+ int needed_length = internal_snprintf(buffer.data(),
+ kLen, "==%d== ", GetPid());
RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
va_list args;
va_start(args, format);
- needed_length += VSNPrintf(buffer + needed_length, kLen - needed_length,
- format, args);
+ needed_length += VSNPrintf(buffer.data() + needed_length,
+ kLen - needed_length, format, args);
va_end(args);
RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
- RawWrite(buffer);
- UnmapOrDie(buffer, kLen);
+ RawWrite(buffer.data());
+ if (PrintfAndReportCallback)
+ PrintfAndReportCallback(buffer.data());
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_procmaps.h b/lib/sanitizer_common/sanitizer_procmaps.h
index e7f9cac6cf6c..1b8ea7aff165 100644
--- a/lib/sanitizer_common/sanitizer_procmaps.h
+++ b/lib/sanitizer_common/sanitizer_procmaps.h
@@ -15,12 +15,32 @@
#define SANITIZER_PROCMAPS_H
#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
namespace __sanitizer {
-class ProcessMaps {
+#ifdef _WIN32
+class MemoryMappingLayout {
public:
- ProcessMaps();
+ MemoryMappingLayout() {}
+ bool GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[], uptr filename_size) {
+ UNIMPLEMENTED();
+ }
+};
+
+#else // _WIN32
+#if defined(__linux__)
+struct ProcSelfMapsBuff {
+ char *data;
+ uptr mmaped_size;
+ uptr len;
+};
+#endif // defined(__linux__)
+
+class MemoryMappingLayout {
+ public:
+ MemoryMappingLayout();
bool Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size);
void Reset();
@@ -28,9 +48,14 @@ class ProcessMaps {
// address 'addr'. Returns true on success.
bool GetObjectNameAndOffset(uptr addr, uptr *offset,
char filename[], uptr filename_size);
- ~ProcessMaps();
+ // In some cases, e.g. when running under a sandbox on Linux, ASan is unable
+ // to obtain the memory mappings. It should fall back to pre-cached data
+ // instead of aborting.
+ static void CacheMemoryMappings();
+ ~MemoryMappingLayout();
private:
+ void LoadFromCache();
// Default implementation of GetObjectNameAndOffset.
// Quite slow, because it iterates through the whole process map for each
// lookup.
@@ -61,22 +86,27 @@ class ProcessMaps {
return false;
}
-#if defined __linux__
- char *proc_self_maps_buff_;
- uptr proc_self_maps_buff_mmaped_size_;
- uptr proc_self_maps_buff_len_;
+# if defined __linux__
+ ProcSelfMapsBuff proc_self_maps_;
char *current_;
-#elif defined __APPLE__
+
+ // Static mappings cache.
+ static ProcSelfMapsBuff cached_proc_self_maps_;
+ static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
+# elif defined __APPLE__
template<u32 kLCSegment, typename SegmentCommand>
bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size);
int current_image_;
u32 current_magic_;
+ u32 current_filetype_;
int current_load_cmd_count_;
char *current_load_cmd_addr_;
-#endif
+# endif
};
+#endif // _WIN32
+
} // namespace __sanitizer
#endif // SANITIZER_PROCMAPS_H
diff --git a/lib/sanitizer_common/sanitizer_quarantine.h b/lib/sanitizer_common/sanitizer_quarantine.h
new file mode 100644
index 000000000000..ec90d2d6871b
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_quarantine.h
@@ -0,0 +1,172 @@
+//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Memory quarantine for AddressSanitizer and potentially other tools.
+// Quarantine caches some specified amount of memory in per-thread caches,
+// then evicts to global FIFO queue. When the queue reaches specified threshold,
+// oldest memory is recycled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_QUARANTINE_H
+#define SANITIZER_QUARANTINE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_list.h"
+
+namespace __sanitizer {
+
+template<typename Node> class QuarantineCache;
+
+struct QuarantineBatch {
+ static const uptr kSize = 1024;
+ QuarantineBatch *next;
+ uptr size;
+ uptr count;
+ void *batch[kSize];
+};
+
+// The callback interface is:
+// void Callback::Recycle(Node *ptr);
+// void *cb.Allocate(uptr size);
+// void cb.Deallocate(void *ptr);
+template<typename Callback, typename Node>
+class Quarantine {
+ public:
+ typedef QuarantineCache<Callback> Cache;
+
+ explicit Quarantine(LinkerInitialized)
+ : cache_(LINKER_INITIALIZED) {
+ }
+
+ void Init(uptr size, uptr cache_size) {
+ max_size_ = size;
+ min_size_ = size / 10 * 9; // 90% of max size.
+ max_cache_size_ = cache_size;
+ }
+
+ void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
+ c->Enqueue(cb, ptr, size);
+ if (c->Size() > max_cache_size_)
+ Drain(c, cb);
+ }
+
+ void NOINLINE Drain(Cache *c, Callback cb) {
+ {
+ SpinMutexLock l(&cache_mutex_);
+ cache_.Transfer(c);
+ }
+ if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
+ Recycle(cb);
+ }
+
+ private:
+ // Read-only data.
+ char pad0_[kCacheLineSize];
+ uptr max_size_;
+ uptr min_size_;
+ uptr max_cache_size_;
+ char pad1_[kCacheLineSize];
+ SpinMutex cache_mutex_;
+ SpinMutex recycle_mutex_;
+ Cache cache_;
+ char pad2_[kCacheLineSize];
+
+ void NOINLINE Recycle(Callback cb) {
+ Cache tmp;
+ {
+ SpinMutexLock l(&cache_mutex_);
+ while (cache_.Size() > min_size_) {
+ QuarantineBatch *b = cache_.DequeueBatch();
+ tmp.EnqueueBatch(b);
+ }
+ }
+ recycle_mutex_.Unlock();
+ DoRecycle(&tmp, cb);
+ }
+
+ void NOINLINE DoRecycle(Cache *c, Callback cb) {
+ while (QuarantineBatch *b = c->DequeueBatch()) {
+ const uptr kPrefetch = 16;
+ for (uptr i = 0; i < kPrefetch; i++)
+ PREFETCH(b->batch[i]);
+ for (uptr i = 0; i < b->count; i++) {
+ PREFETCH(b->batch[i + kPrefetch]);
+ cb.Recycle((Node*)b->batch[i]);
+ }
+ cb.Deallocate(b);
+ }
+ }
+};
+
+// Per-thread cache of memory blocks.
+template<typename Callback>
+class QuarantineCache {
+ public:
+ explicit QuarantineCache(LinkerInitialized) {
+ }
+
+ QuarantineCache()
+ : size_() {
+ list_.clear();
+ }
+
+ uptr Size() const {
+ return atomic_load(&size_, memory_order_relaxed);
+ }
+
+ void Enqueue(Callback cb, void *ptr, uptr size) {
+ if (list_.empty() || list_.back()->count == QuarantineBatch::kSize)
+ AllocBatch(cb);
+ QuarantineBatch *b = list_.back();
+ b->batch[b->count++] = ptr;
+ b->size += size;
+ SizeAdd(size);
+ }
+
+ void Transfer(QuarantineCache *c) {
+ list_.append_back(&c->list_);
+ SizeAdd(c->Size());
+ atomic_store(&c->size_, 0, memory_order_relaxed);
+ }
+
+ void EnqueueBatch(QuarantineBatch *b) {
+ list_.push_back(b);
+ SizeAdd(b->size);
+ }
+
+ QuarantineBatch *DequeueBatch() {
+ if (list_.empty())
+ return 0;
+ QuarantineBatch *b = list_.front();
+ list_.pop_front();
+ SizeAdd(-b->size);
+ return b;
+ }
+
+ private:
+ IntrusiveList<QuarantineBatch> list_;
+ atomic_uintptr_t size_;
+
+ void SizeAdd(uptr add) {
+ atomic_store(&size_, Size() + add, memory_order_relaxed);
+ }
+
+ QuarantineBatch *NOINLINE AllocBatch(Callback cb) {
+ QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
+ b->count = 0;
+ b->size = 0;
+ list_.push_back(b);
+ return b;
+ }
+};
+}
+
+#endif // #ifndef SANITIZER_QUARANTINE_H
diff --git a/lib/sanitizer_common/sanitizer_report_decorator.h b/lib/sanitizer_common/sanitizer_report_decorator.h
new file mode 100644
index 000000000000..50a3ee572fdb
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_report_decorator.h
@@ -0,0 +1,37 @@
+//===-- sanitizer_report_decorator.h ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Tags to decorate the sanitizer reports.
+// Currently supported tags:
+// * None.
+// * ANSI color sequences.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_H
+#define SANITIZER_ALLOCATOR_H
+
+namespace __sanitizer {
+class AnsiColorDecorator {
+ public:
+ explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { }
+ const char *Black() { return ansi_ ? "\033[1m\033[30m" : ""; }
+ const char *Red() { return ansi_ ? "\033[1m\033[31m" : ""; }
+ const char *Green() { return ansi_ ? "\033[1m\033[32m" : ""; }
+ const char *Yellow() { return ansi_ ? "\033[1m\033[33m" : ""; }
+ const char *Blue() { return ansi_ ? "\033[1m\033[34m" : ""; }
+ const char *Magenta() { return ansi_ ? "\033[1m\033[35m" : ""; }
+ const char *Cyan() { return ansi_ ? "\033[1m\033[36m" : ""; }
+ const char *White() { return ansi_ ? "\033[1m\033[37m" : ""; }
+ const char *Default() { return ansi_ ? "\033[1m\033[0m" : ""; }
+ private:
+ bool ansi_;
+};
+} // namespace __sanitizer
+#endif // SANITIZER_ALLOCATOR_H
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.cc b/lib/sanitizer_common/sanitizer_stackdepot.cc
new file mode 100644
index 000000000000..08e5238325e5
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_stackdepot.cc
@@ -0,0 +1,204 @@
+//===-- sanitizer_stackdepot.cc -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_stackdepot.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+const int kTabSize = 1024 * 1024; // Hash table size.
+const int kPartBits = 8;
+const int kPartShift = sizeof(u32) * 8 - kPartBits - 1;
+const int kPartCount = 1 << kPartBits; // Number of subparts in the table.
+const int kPartSize = kTabSize / kPartCount;
+const int kMaxId = 1 << kPartShift;
+
+struct StackDesc {
+ StackDesc *link;
+ u32 id;
+ u32 hash;
+ uptr size;
+ uptr stack[1]; // [size]
+};
+
+static struct {
+ StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
+ atomic_uintptr_t region_pos; // Region allocator for StackDesc's.
+ atomic_uintptr_t region_end;
+ atomic_uintptr_t tab[kTabSize]; // Hash table of StackDesc's.
+ atomic_uint32_t seq[kPartCount]; // Unique id generators.
+} depot;
+
+static StackDepotStats stats;
+
+StackDepotStats *StackDepotGetStats() {
+ return &stats;
+}
+
+static u32 hash(const uptr *stack, uptr size) {
+ // murmur2
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed ^ (size * sizeof(uptr));
+ for (uptr i = 0; i < size; i++) {
+ u32 k = stack[i];
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ }
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+}
+
+static StackDesc *tryallocDesc(uptr memsz) {
+ // Optimisic lock-free allocation, essentially try to bump the region ptr.
+ for (;;) {
+ uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire);
+ uptr end = atomic_load(&depot.region_end, memory_order_acquire);
+ if (cmp == 0 || cmp + memsz > end)
+ return 0;
+ if (atomic_compare_exchange_weak(
+ &depot.region_pos, &cmp, cmp + memsz,
+ memory_order_acquire))
+ return (StackDesc*)cmp;
+ }
+}
+
+static StackDesc *allocDesc(uptr size) {
+ // First, try to allocate optimisitically.
+ uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
+ StackDesc *s = tryallocDesc(memsz);
+ if (s)
+ return s;
+ // If failed, lock, retry and alloc new superblock.
+ SpinMutexLock l(&depot.mtx);
+ for (;;) {
+ s = tryallocDesc(memsz);
+ if (s)
+ return s;
+ atomic_store(&depot.region_pos, 0, memory_order_relaxed);
+ uptr allocsz = 64 * 1024;
+ if (allocsz < memsz)
+ allocsz = memsz;
+ uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ stats.mapped += allocsz;
+ atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
+ atomic_store(&depot.region_pos, mem, memory_order_release);
+ }
+}
+
+static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) {
+ // Searches linked list s for the stack, returns its id.
+ for (; s; s = s->link) {
+ if (s->hash == hash && s->size == size) {
+ uptr i = 0;
+ for (; i < size; i++) {
+ if (stack[i] != s->stack[i])
+ break;
+ }
+ if (i == size)
+ return s->id;
+ }
+ }
+ return 0;
+}
+
+static StackDesc *lock(atomic_uintptr_t *p) {
+ // Uses the pointer lsb as mutex.
+ for (int i = 0;; i++) {
+ uptr cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & 1) == 0
+ && atomic_compare_exchange_weak(p, &cmp, cmp | 1,
+ memory_order_acquire))
+ return (StackDesc*)cmp;
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ }
+}
+
+static void unlock(atomic_uintptr_t *p, StackDesc *s) {
+ DCHECK_EQ((uptr)s & 1, 0);
+ atomic_store(p, (uptr)s, memory_order_release);
+}
+
+u32 StackDepotPut(const uptr *stack, uptr size) {
+ if (stack == 0 || size == 0)
+ return 0;
+ uptr h = hash(stack, size);
+ atomic_uintptr_t *p = &depot.tab[h % kTabSize];
+ uptr v = atomic_load(p, memory_order_consume);
+ StackDesc *s = (StackDesc*)(v & ~1);
+ // First, try to find the existing stack.
+ u32 id = find(s, stack, size, h);
+ if (id)
+ return id;
+ // If failed, lock, retry and insert new.
+ StackDesc *s2 = lock(p);
+ if (s2 != s) {
+ id = find(s2, stack, size, h);
+ if (id) {
+ unlock(p, s2);
+ return id;
+ }
+ }
+ uptr part = (h % kTabSize) / kPartSize;
+ id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
+ CHECK_LT(id, kMaxId);
+ id |= part << kPartShift;
+ CHECK_NE(id, 0);
+ CHECK_EQ(id & (1u << 31), 0);
+ s = allocDesc(size);
+ s->id = id;
+ s->hash = h;
+ s->size = size;
+ internal_memcpy(s->stack, stack, size * sizeof(uptr));
+ s->link = s2;
+ unlock(p, s);
+ return id;
+}
+
+const uptr *StackDepotGet(u32 id, uptr *size) {
+ if (id == 0)
+ return 0;
+ CHECK_EQ(id & (1u << 31), 0);
+ // High kPartBits contain part id, so we need to scan at most kPartSize lists.
+ uptr part = id >> kPartShift;
+ for (int i = 0; i != kPartSize; i++) {
+ uptr idx = part * kPartSize + i;
+ CHECK_LT(idx, kTabSize);
+ atomic_uintptr_t *p = &depot.tab[idx];
+ uptr v = atomic_load(p, memory_order_consume);
+ StackDesc *s = (StackDesc*)(v & ~1);
+ for (; s; s = s->link) {
+ if (s->id == id) {
+ *size = s->size;
+ return s->stack;
+ }
+ }
+ }
+ *size = 0;
+ return 0;
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.h b/lib/sanitizer_common/sanitizer_stackdepot.h
new file mode 100644
index 000000000000..49e6669dd203
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_stackdepot.h
@@ -0,0 +1,36 @@
+//===-- sanitizer_stackdepot.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKDEPOT_H
+#define SANITIZER_STACKDEPOT_H
+
+#include "sanitizer/common_interface_defs.h"
+
+namespace __sanitizer {
+
+// StackDepot efficiently stores huge amounts of stack traces.
+
+// Maps stack trace to an unique id.
+u32 StackDepotPut(const uptr *stack, uptr size);
+// Retrieves a stored stack trace by the id.
+const uptr *StackDepotGet(u32 id, uptr *size);
+
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr mapped;
+};
+
+StackDepotStats *StackDepotGetStats();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKDEPOT_H
diff --git a/lib/sanitizer_common/sanitizer_stacktrace.cc b/lib/sanitizer_common/sanitizer_stacktrace.cc
new file mode 100644
index 000000000000..109a674e45b3
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_stacktrace.cc
@@ -0,0 +1,262 @@
+//===-- sanitizer_stacktrace.cc -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+static const char *StripPathPrefix(const char *filepath,
+ const char *strip_file_prefix) {
+ if (filepath == internal_strstr(filepath, strip_file_prefix))
+ return filepath + internal_strlen(strip_file_prefix);
+ return filepath;
+}
+
+// ----------------------- StackTrace ----------------------------- {{{1
+uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
+#ifdef __arm__
+ // Cancel Thumb bit.
+ pc = pc & (~1);
+#endif
+#if defined(__powerpc__) || defined(__powerpc64__)
+ // PCs are always 4 byte aligned.
+ return pc - 4;
+#elif defined(__sparc__)
+ return pc - 8;
+#else
+ return pc - 1;
+#endif
+}
+
+static void PrintStackFramePrefix(uptr frame_num, uptr pc) {
+ Printf(" #%zu 0x%zx", frame_num, pc);
+}
+
+static void PrintSourceLocation(const char *file, int line, int column,
+ const char *strip_file_prefix) {
+ CHECK(file);
+ Printf(" %s", StripPathPrefix(file, strip_file_prefix));
+ if (line > 0) {
+ Printf(":%d", line);
+ if (column > 0)
+ Printf(":%d", column);
+ }
+}
+
+static void PrintModuleAndOffset(const char *module, uptr offset,
+ const char *strip_file_prefix) {
+ Printf(" (%s+0x%zx)", StripPathPrefix(module, strip_file_prefix), offset);
+}
+
+void StackTrace::PrintStack(const uptr *addr, uptr size,
+ bool symbolize, const char *strip_file_prefix,
+ SymbolizeCallback symbolize_callback ) {
+ MemoryMappingLayout proc_maps;
+ InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
+ InternalScopedBuffer<AddressInfo> addr_frames(64);
+ uptr frame_num = 0;
+ for (uptr i = 0; i < size && addr[i]; i++) {
+ // PCs in stack traces are actually the return addresses, that is,
+ // addresses of the next instructions after the call.
+ uptr pc = GetPreviousInstructionPc(addr[i]);
+ uptr addr_frames_num = 0; // The number of stack frames for current
+ // instruction address.
+ if (symbolize_callback) {
+ if (symbolize_callback((void*)pc, buff.data(), buff.size())) {
+ addr_frames_num = 1;
+ PrintStackFramePrefix(frame_num, pc);
+ // We can't know anything about the string returned by external
+ // symbolizer, but if it starts with filename, try to strip path prefix
+ // from it.
+ Printf(" %s\n", StripPathPrefix(buff.data(), strip_file_prefix));
+ frame_num++;
+ }
+ }
+ if (symbolize && addr_frames_num == 0) {
+ // Use our own (online) symbolizer, if necessary.
+ addr_frames_num = SymbolizeCode(pc, addr_frames.data(),
+ addr_frames.size());
+ for (uptr j = 0; j < addr_frames_num; j++) {
+ AddressInfo &info = addr_frames[j];
+ PrintStackFramePrefix(frame_num, pc);
+ if (info.function) {
+ Printf(" in %s", info.function);
+ }
+ if (info.file) {
+ PrintSourceLocation(info.file, info.line, info.column,
+ strip_file_prefix);
+ } else if (info.module) {
+ PrintModuleAndOffset(info.module, info.module_offset,
+ strip_file_prefix);
+ }
+ Printf("\n");
+ info.Clear();
+ frame_num++;
+ }
+ }
+ if (addr_frames_num == 0) {
+ // If online symbolization failed, try to output at least module and
+ // offset for instruction.
+ PrintStackFramePrefix(frame_num, pc);
+ uptr offset;
+ if (proc_maps.GetObjectNameAndOffset(pc, &offset,
+ buff.data(), buff.size())) {
+ PrintModuleAndOffset(buff.data(), offset, strip_file_prefix);
+ }
+ Printf("\n");
+ frame_num++;
+ }
+ }
+}
+
+uptr StackTrace::GetCurrentPc() {
+ return GET_CALLER_PC();
+}
+
+void StackTrace::FastUnwindStack(uptr pc, uptr bp,
+ uptr stack_top, uptr stack_bottom) {
+ CHECK(size == 0 && trace[0] == pc);
+ size = 1;
+ uhwptr *frame = (uhwptr *)bp;
+ uhwptr *prev_frame = frame;
+ while (frame >= prev_frame &&
+ frame < (uhwptr *)stack_top - 2 &&
+ frame > (uhwptr *)stack_bottom &&
+ size < max_size) {
+ uhwptr pc1 = frame[1];
+ if (pc1 != pc) {
+ trace[size++] = (uptr) pc1;
+ }
+ prev_frame = frame;
+ frame = (uhwptr *)frame[0];
+ }
+}
+
+void StackTrace::PopStackFrames(uptr count) {
+ CHECK(size >= count);
+ size -= count;
+ for (uptr i = 0; i < size; i++) {
+ trace[i] = trace[i + count];
+ }
+}
+
+// On 32-bits we don't compress stack traces.
+// On 64-bits we compress stack traces: if a given pc differes slightly from
+// the previous one, we record a 31-bit offset instead of the full pc.
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr StackTrace::CompressStack(StackTrace *stack, u32 *compressed, uptr size) {
+#if SANITIZER_WORDSIZE == 32
+ // Don't compress, just copy.
+ uptr res = 0;
+ for (uptr i = 0; i < stack->size && i < size; i++) {
+ compressed[i] = stack->trace[i];
+ res++;
+ }
+ if (stack->size < size)
+ compressed[stack->size] = 0;
+#else // 64 bits, compress.
+ uptr prev_pc = 0;
+ const uptr kMaxOffset = (1ULL << 30) - 1;
+ uptr c_index = 0;
+ uptr res = 0;
+ for (uptr i = 0, n = stack->size; i < n; i++) {
+ uptr pc = stack->trace[i];
+ if (!pc) break;
+ if ((s64)pc < 0) break;
+ // Printf("C pc[%zu] %zx\n", i, pc);
+ if (prev_pc - pc < kMaxOffset || pc - prev_pc < kMaxOffset) {
+ uptr offset = (s64)(pc - prev_pc);
+ offset |= (1U << 31);
+ if (c_index >= size) break;
+ // Printf("C co[%zu] offset %zx\n", i, offset);
+ compressed[c_index++] = offset;
+ } else {
+ uptr hi = pc >> 32;
+ uptr lo = (pc << 32) >> 32;
+ CHECK_EQ((hi & (1 << 31)), 0);
+ if (c_index + 1 >= size) break;
+ // Printf("C co[%zu] hi/lo: %zx %zx\n", c_index, hi, lo);
+ compressed[c_index++] = hi;
+ compressed[c_index++] = lo;
+ }
+ res++;
+ prev_pc = pc;
+ }
+ if (c_index < size)
+ compressed[c_index] = 0;
+ if (c_index + 1 < size)
+ compressed[c_index + 1] = 0;
+#endif // SANITIZER_WORDSIZE
+
+ // debug-only code
+#if 0
+ StackTrace check_stack;
+ UncompressStack(&check_stack, compressed, size);
+ if (res < check_stack.size) {
+ Printf("res %zu check_stack.size %zu; c_size %zu\n", res,
+ check_stack.size, size);
+ }
+ // |res| may be greater than check_stack.size, because
+ // UncompressStack(CompressStack(stack)) eliminates the 0x0 frames.
+ CHECK(res >= check_stack.size);
+ CHECK_EQ(0, REAL(memcmp)(check_stack.trace, stack->trace,
+ check_stack.size * sizeof(uptr)));
+#endif
+
+ return res;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void StackTrace::UncompressStack(StackTrace *stack,
+ u32 *compressed, uptr size) {
+#if SANITIZER_WORDSIZE == 32
+ // Don't uncompress, just copy.
+ stack->size = 0;
+ for (uptr i = 0; i < size && i < kStackTraceMax; i++) {
+ if (!compressed[i]) break;
+ stack->size++;
+ stack->trace[i] = compressed[i];
+ }
+#else // 64 bits, uncompress
+ uptr prev_pc = 0;
+ stack->size = 0;
+ for (uptr i = 0; i < size && stack->size < kStackTraceMax; i++) {
+ u32 x = compressed[i];
+ uptr pc = 0;
+ if (x & (1U << 31)) {
+ // Printf("U co[%zu] offset: %x\n", i, x);
+ // this is an offset
+ s32 offset = x;
+ offset = (offset << 1) >> 1; // remove the 31-byte and sign-extend.
+ pc = prev_pc + offset;
+ CHECK(pc);
+ } else {
+ // CHECK(i + 1 < size);
+ if (i + 1 >= size) break;
+ uptr hi = x;
+ uptr lo = compressed[i+1];
+ // Printf("U co[%zu] hi/lo: %zx %zx\n", i, hi, lo);
+ i++;
+ pc = (hi << 32) | lo;
+ if (!pc) break;
+ }
+ // Printf("U pc[%zu] %zx\n", stack->size, pc);
+ stack->trace[stack->size++] = pc;
+ prev_pc = pc;
+ }
+#endif // SANITIZER_WORDSIZE
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_stacktrace.h b/lib/sanitizer_common/sanitizer_stacktrace.h
new file mode 100644
index 000000000000..597d24fd067f
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_stacktrace.h
@@ -0,0 +1,79 @@
+//===-- sanitizer_stacktrace.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKTRACE_H
+#define SANITIZER_STACKTRACE_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+static const uptr kStackTraceMax = 256;
+
+struct StackTrace {
+ typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
+ int out_size);
+ uptr size;
+ uptr max_size;
+ uptr trace[kStackTraceMax];
+ static void PrintStack(const uptr *addr, uptr size,
+ bool symbolize, const char *strip_file_prefix,
+ SymbolizeCallback symbolize_callback);
+ void CopyTo(uptr *dst, uptr dst_size) {
+ for (uptr i = 0; i < size && i < dst_size; i++)
+ dst[i] = trace[i];
+ for (uptr i = size; i < dst_size; i++)
+ dst[i] = 0;
+ }
+
+ void CopyFrom(uptr *src, uptr src_size) {
+ size = src_size;
+ if (size > kStackTraceMax) size = kStackTraceMax;
+ for (uptr i = 0; i < size; i++) {
+ trace[i] = src[i];
+ }
+ }
+
+ void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom);
+ void SlowUnwindStack(uptr pc, uptr max_depth);
+
+ void PopStackFrames(uptr count);
+
+ static uptr GetCurrentPc();
+ static uptr GetPreviousInstructionPc(uptr pc);
+
+ static uptr CompressStack(StackTrace *stack,
+ u32 *compressed, uptr size);
+ static void UncompressStack(StackTrace *stack,
+ u32 *compressed, uptr size);
+};
+
+} // namespace __sanitizer
+
+// Use this macro if you want to print stack trace with the caller
+// of the current function in the top frame.
+#define GET_CALLER_PC_BP_SP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = GET_CALLER_PC(); \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
+// Use this macro if you want to print stack trace with the current
+// function in the top frame.
+#define GET_CURRENT_PC_BP_SP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = StackTrace::GetCurrentPc(); \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
+
+#endif // SANITIZER_STACKTRACE_H
diff --git a/lib/sanitizer_common/sanitizer_symbolizer.cc b/lib/sanitizer_common/sanitizer_symbolizer.cc
index 85eb0764f19c..a1d95ae0e0b2 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer.cc
@@ -7,9 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This is a stub for LLVM-based symbolizer.
// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries. See sanitizer.h for details.
+// run-time libraries. See sanitizer_symbolizer.h for details.
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
@@ -19,18 +18,6 @@
namespace __sanitizer {
-bool IsFullNameOfDWARFSection(const char *full_name, const char *short_name) {
- // Skip "__DWARF," prefix.
- if (0 == internal_strncmp(full_name, "__DWARF,", 8)) {
- full_name += 8;
- }
- // Skip . and _ prefices.
- while (*full_name == '.' || *full_name == '_') {
- full_name++;
- }
- return 0 == internal_strcmp(full_name, short_name);
-}
-
void AddressInfo::Clear() {
InternalFree(module);
InternalFree(function);
@@ -38,28 +25,20 @@ void AddressInfo::Clear() {
internal_memset(this, 0, sizeof(AddressInfo));
}
-ModuleDIContext::ModuleDIContext(const char *module_name, uptr base_address) {
+LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
full_name_ = internal_strdup(module_name);
- short_name_ = internal_strrchr(module_name, '/');
- if (short_name_ == 0) {
- short_name_ = full_name_;
- } else {
- short_name_++;
- }
base_address_ = base_address;
n_ranges_ = 0;
- mapped_addr_ = 0;
- mapped_size_ = 0;
}
-void ModuleDIContext::addAddressRange(uptr beg, uptr end) {
+void LoadedModule::addAddressRange(uptr beg, uptr end) {
CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
ranges_[n_ranges_].beg = beg;
ranges_[n_ranges_].end = end;
n_ranges_++;
}
-bool ModuleDIContext::containsAddress(uptr address) const {
+bool LoadedModule::containsAddress(uptr address) const {
for (uptr i = 0; i < n_ranges_; i++) {
if (ranges_[i].beg <= address && address < ranges_[i].end)
return true;
@@ -67,56 +46,256 @@ bool ModuleDIContext::containsAddress(uptr address) const {
return false;
}
-void ModuleDIContext::getAddressInfo(AddressInfo *info) {
- info->module = internal_strdup(full_name_);
- info->module_offset = info->address - base_address_;
- if (mapped_addr_ == 0)
- CreateDIContext();
- // FIXME: Use the actual debug info context here.
- info->function = 0;
- info->file = 0;
- info->line = 0;
- info->column = 0;
+// Extracts the prefix of "str" that consists of any characters not
+// present in "delims" string, and copies this prefix to "result", allocating
+// space for it.
+// Returns a pointer to "str" after skipping extracted prefix and first
+// delimiter char.
+static const char *ExtractToken(const char *str, const char *delims,
+ char **result) {
+ uptr prefix_len = internal_strcspn(str, delims);
+ *result = (char*)InternalAlloc(prefix_len + 1);
+ internal_memcpy(*result, str, prefix_len);
+ (*result)[prefix_len] = '\0';
+ const char *prefix_end = str + prefix_len;
+ if (*prefix_end != '\0') prefix_end++;
+ return prefix_end;
+}
+
+// Same as ExtractToken, but converts extracted token to integer.
+static const char *ExtractInt(const char *str, const char *delims,
+ int *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (int)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
}
-void ModuleDIContext::CreateDIContext() {
- mapped_addr_ = (uptr)MapFileToMemory(full_name_, &mapped_size_);
- CHECK(mapped_addr_);
- DWARFSection debug_info;
- DWARFSection debug_abbrev;
- DWARFSection debug_line;
- DWARFSection debug_aranges;
- DWARFSection debug_str;
- FindDWARFSection(mapped_addr_, "debug_info", &debug_info);
- FindDWARFSection(mapped_addr_, "debug_abbrev", &debug_abbrev);
- FindDWARFSection(mapped_addr_, "debug_line", &debug_line);
- FindDWARFSection(mapped_addr_, "debug_aranges", &debug_aranges);
- FindDWARFSection(mapped_addr_, "debug_str", &debug_str);
- // FIXME: Construct actual debug info context using mapped_addr,
- // mapped_size and pointers to DWARF sections in memory.
+static const char *ExtractUptr(const char *str, const char *delims,
+ uptr *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (uptr)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
}
+// ExternalSymbolizer encapsulates communication between the tool and
+// external symbolizer program, running in a different subprocess,
+// For now we assume the following protocol:
+// For each request of the form
+// <module_name> <module_offset>
+// passed to STDIN, external symbolizer prints to STDOUT response:
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// ...
+// <empty line>
+class ExternalSymbolizer {
+ public:
+ ExternalSymbolizer(const char *path, int input_fd, int output_fd)
+ : path_(path),
+ input_fd_(input_fd),
+ output_fd_(output_fd),
+ times_restarted_(0) {
+ CHECK(path_);
+ CHECK_NE(input_fd_, kInvalidFd);
+ CHECK_NE(output_fd_, kInvalidFd);
+ }
+
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ CHECK(module_name);
+ internal_snprintf(buffer_, kBufferSize, "%s%s 0x%zx\n",
+ is_data ? "DATA " : "", module_name, module_offset);
+ if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
+ return 0;
+ if (!readFromSymbolizer(buffer_, kBufferSize))
+ return 0;
+ return buffer_;
+ }
+
+ bool Restart() {
+ if (times_restarted_ >= kMaxTimesRestarted) return false;
+ times_restarted_++;
+ internal_close(input_fd_);
+ internal_close(output_fd_);
+ return StartSymbolizerSubprocess(path_, &input_fd_, &output_fd_);
+ }
+
+ private:
+ bool readFromSymbolizer(char *buffer, uptr max_length) {
+ if (max_length == 0)
+ return true;
+ uptr read_len = 0;
+ while (true) {
+ uptr just_read = internal_read(input_fd_, buffer + read_len,
+ max_length - read_len);
+ // We can't read 0 bytes, as we don't expect external symbolizer to close
+ // its stdout.
+ if (just_read == 0 || just_read == (uptr)-1) {
+ Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
+ return false;
+ }
+ read_len += just_read;
+ // Empty line marks the end of symbolizer output.
+ if (read_len >= 2 && buffer[read_len - 1] == '\n' &&
+ buffer[read_len - 2] == '\n') {
+ break;
+ }
+ }
+ return true;
+ }
+
+ bool writeToSymbolizer(const char *buffer, uptr length) {
+ if (length == 0)
+ return true;
+ uptr write_len = internal_write(output_fd_, buffer, length);
+ if (write_len == 0 || write_len == (uptr)-1) {
+ Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_);
+ return false;
+ }
+ return true;
+ }
+
+ const char *path_;
+ int input_fd_;
+ int output_fd_;
+
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+
+ static const uptr kMaxTimesRestarted = 5;
+ uptr times_restarted_;
+};
+
+static LowLevelAllocator symbolizer_allocator; // Linker initialized.
+
class Symbolizer {
public:
uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames) {
if (max_frames == 0)
return 0;
- AddressInfo *info = &frames[0];
- info->Clear();
- info->address = addr;
- ModuleDIContext *module = FindModuleForAddress(addr);
- if (module) {
- module->getAddressInfo(info);
+ LoadedModule *module = FindModuleForAddress(addr);
+ if (module == 0)
+ return 0;
+ const char *module_name = module->full_name();
+ uptr module_offset = addr - module->base_address();
+ const char *str = SendCommand(false, module_name, module_offset);
+ if (str == 0) {
+ // External symbolizer was not initialized or failed. Fill only data
+ // about module name and offset.
+ AddressInfo *info = &frames[0];
+ info->Clear();
+ info->FillAddressAndModuleInfo(addr, module_name, module_offset);
return 1;
}
- return 0;
+ uptr frame_id = 0;
+ for (frame_id = 0; frame_id < max_frames; frame_id++) {
+ AddressInfo *info = &frames[frame_id];
+ char *function_name = 0;
+ str = ExtractToken(str, "\n", &function_name);
+ CHECK(function_name);
+ if (function_name[0] == '\0') {
+ // There are no more frames.
+ break;
+ }
+ info->Clear();
+ info->FillAddressAndModuleInfo(addr, module_name, module_offset);
+ info->function = function_name;
+ // Parse <file>:<line>:<column> buffer.
+ char *file_line_info = 0;
+ str = ExtractToken(str, "\n", &file_line_info);
+ CHECK(file_line_info);
+ const char *line_info = ExtractToken(file_line_info, ":", &info->file);
+ line_info = ExtractInt(line_info, ":", &info->line);
+ line_info = ExtractInt(line_info, "", &info->column);
+ InternalFree(file_line_info);
+
+ // Functions and filenames can be "??", in which case we write 0
+ // to address info to mark that names are unknown.
+ if (0 == internal_strcmp(info->function, "??")) {
+ InternalFree(info->function);
+ info->function = 0;
+ }
+ if (0 == internal_strcmp(info->file, "??")) {
+ InternalFree(info->file);
+ info->file = 0;
+ }
+ }
+ if (frame_id == 0) {
+ // Make sure we return at least one frame.
+ AddressInfo *info = &frames[0];
+ info->Clear();
+ info->FillAddressAndModuleInfo(addr, module_name, module_offset);
+ frame_id = 1;
+ }
+ return frame_id;
+ }
+
+ bool SymbolizeData(uptr addr, DataInfo *info) {
+ LoadedModule *module = FindModuleForAddress(addr);
+ if (module == 0)
+ return false;
+ const char *module_name = module->full_name();
+ uptr module_offset = addr - module->base_address();
+ internal_memset(info, 0, sizeof(*info));
+ info->address = addr;
+ info->module = internal_strdup(module_name);
+ info->module_offset = module_offset;
+ const char *str = SendCommand(true, module_name, module_offset);
+ if (str == 0)
+ return true;
+ str = ExtractToken(str, "\n", &info->name);
+ str = ExtractUptr(str, " ", &info->start);
+ str = ExtractUptr(str, "\n", &info->size);
+ info->start += module->base_address();
+ return true;
+ }
+
+ bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
+ int input_fd, output_fd;
+ if (!StartSymbolizerSubprocess(path_to_symbolizer, &input_fd, &output_fd))
+ return false;
+ void *mem = symbolizer_allocator.Allocate(sizeof(ExternalSymbolizer));
+ external_symbolizer_ = new(mem) ExternalSymbolizer(path_to_symbolizer,
+ input_fd, output_fd);
+ return true;
}
private:
- ModuleDIContext *FindModuleForAddress(uptr address) {
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ if (external_symbolizer_ == 0) {
+ ReportExternalSymbolizerError(
+ "WARNING: Trying to symbolize code, but external "
+ "symbolizer is not initialized!\n");
+ return 0;
+ }
+ for (;;) {
+ char *reply = external_symbolizer_->SendCommand(is_data, module_name,
+ module_offset);
+ if (reply)
+ return reply;
+ // Try to restart symbolizer subprocess. If we don't succeed, forget
+ // about it and don't try to use it later.
+ if (!external_symbolizer_->Restart()) {
+ ReportExternalSymbolizerError(
+ "WARNING: Failed to use and restart external symbolizer!\n");
+ external_symbolizer_ = 0;
+ return 0;
+ }
+ }
+ }
+
+ LoadedModule *FindModuleForAddress(uptr address) {
if (modules_ == 0) {
- modules_ = (ModuleDIContext*)InternalAlloc(
- kMaxNumberOfModuleContexts * sizeof(ModuleDIContext));
+ modules_ = (LoadedModule*)(symbolizer_allocator.Allocate(
+ kMaxNumberOfModuleContexts * sizeof(LoadedModule)));
CHECK(modules_);
n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts);
CHECK_GT(n_modules_, 0);
@@ -129,10 +308,22 @@ class Symbolizer {
}
return 0;
}
- static const uptr kMaxNumberOfModuleContexts = 4096;
- // Array of module debug info contexts is leaked.
- ModuleDIContext *modules_;
+ void ReportExternalSymbolizerError(const char *msg) {
+ // Don't use atomics here for now, as SymbolizeCode can't be called
+ // from multiple threads anyway.
+ static bool reported;
+ if (!reported) {
+ Report(msg);
+ reported = true;
+ }
+ }
+
+ // 16K loaded modules should be enough for everyone.
+ static const uptr kMaxNumberOfModuleContexts = 1 << 14;
+ LoadedModule *modules_; // Array of module descriptions is leaked.
uptr n_modules_;
+
+ ExternalSymbolizer *external_symbolizer_; // Leaked.
};
static Symbolizer symbolizer; // Linker initialized.
@@ -141,4 +332,12 @@ uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
return symbolizer.SymbolizeCode(address, frames, max_frames);
}
+bool SymbolizeData(uptr address, DataInfo *info) {
+ return symbolizer.SymbolizeData(address, info);
+}
+
+bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
+ return symbolizer.InitializeExternalSymbolizer(path_to_symbolizer);
+}
+
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_symbolizer.h b/lib/sanitizer_common/sanitizer_symbolizer.h
index c813e8088d7e..c26d621ea065 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer.h
+++ b/lib/sanitizer_common/sanitizer_symbolizer.h
@@ -44,6 +44,22 @@ struct AddressInfo {
}
// Deletes all strings and sets all fields to zero.
void Clear();
+
+ void FillAddressAndModuleInfo(uptr addr, const char *mod_name,
+ uptr mod_offset) {
+ address = addr;
+ module = internal_strdup(mod_name);
+ module_offset = mod_offset;
+ }
+};
+
+struct DataInfo {
+ uptr address;
+ char *module;
+ uptr module_offset;
+ char *name;
+ uptr start;
+ uptr size;
};
// Fills at most "max_frames" elements of "frames" with descriptions
@@ -51,49 +67,45 @@ struct AddressInfo {
// of descriptions actually filled.
// This function should NOT be called from two threads simultaneously.
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
+bool SymbolizeData(uptr address, DataInfo *info);
-// Debug info routines
-struct DWARFSection {
- const char *data;
- uptr size;
- DWARFSection() {
- data = 0;
- size = 0;
- }
-};
-// Returns true on success.
-bool FindDWARFSection(uptr object_file_addr, const char *section_name,
- DWARFSection *section);
-bool IsFullNameOfDWARFSection(const char *full_name, const char *short_name);
+// Attempts to demangle the provided C++ mangled name.
+const char *Demangle(const char *Name);
-class ModuleDIContext {
+// Starts external symbolizer program in a subprocess. Sanitizer communicates
+// with external symbolizer via pipes.
+bool InitializeExternalSymbolizer(const char *path_to_symbolizer);
+
+class LoadedModule {
public:
- ModuleDIContext(const char *module_name, uptr base_address);
+ LoadedModule(const char *module_name, uptr base_address);
void addAddressRange(uptr beg, uptr end);
bool containsAddress(uptr address) const;
- void getAddressInfo(AddressInfo *info);
const char *full_name() const { return full_name_; }
+ uptr base_address() const { return base_address_; }
private:
- void CreateDIContext();
-
struct AddressRange {
uptr beg;
uptr end;
};
char *full_name_;
- char *short_name_;
uptr base_address_;
- static const uptr kMaxNumberOfAddressRanges = 8;
+ static const uptr kMaxNumberOfAddressRanges = 6;
AddressRange ranges_[kMaxNumberOfAddressRanges];
uptr n_ranges_;
- uptr mapped_addr_;
- uptr mapped_size_;
};
-// OS-dependent function that gets the linked list of all loaded modules.
-uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules);
+// Creates external symbolizer connected via pipe, user should write
+// to output_fd and read from input_fd.
+bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
+ int *input_fd, int *output_fd);
+
+// OS-dependent function that fills array with descriptions of at most
+// "max_modules" currently loaded modules. Returns the number of
+// initialized modules.
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules);
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc b/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc
new file mode 100644
index 000000000000..438629492923
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc
@@ -0,0 +1,42 @@
+//===-- sanitizer_symbolizer_itanium.cc -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between the sanitizer run-time libraries.
+// Itanium C++ ABI-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+#if defined(__APPLE__) || defined(__linux__)
+
+#include "sanitizer_symbolizer.h"
+
+#include <stdlib.h>
+
+// C++ demangling function, as required by Itanium C++ ABI. This is weak,
+// because we do not require a C++ ABI library to be linked to a program
+// using sanitizers; if it's not present, we'll just use the mangled name.
+namespace __cxxabiv1 {
+ extern "C" char *__cxa_demangle(const char *mangled, char *buffer,
+ size_t *length, int *status)
+ SANITIZER_WEAK_ATTRIBUTE;
+}
+
+const char *__sanitizer::Demangle(const char *MangledName) {
+ // FIXME: __cxa_demangle aggressively insists on allocating memory.
+ // There's not much we can do about that, short of providing our
+ // own demangler (libc++abi's implementation could be adapted so that
+ // it does not allocate). For now, we just call it anyway, and we leak
+ // the returned value.
+ if (__cxxabiv1::__cxa_demangle)
+ if (const char *Demangled =
+ __cxxabiv1::__cxa_demangle(MangledName, 0, 0, 0))
+ return Demangled;
+
+ return MangledName;
+}
+
+#endif // __APPLE__ || __linux__
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_linux.cc b/lib/sanitizer_common/sanitizer_symbolizer_linux.cc
new file mode 100644
index 000000000000..4bd3dc8826ef
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_symbolizer_linux.cc
@@ -0,0 +1,182 @@
+//===-- sanitizer_symbolizer_linux.cc -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Linux-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+#ifdef __linux__
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_symbolizer.h"
+
+#include <elf.h>
+#include <errno.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#if !defined(__ANDROID__) && !defined(ANDROID)
+#include <link.h>
+#endif
+
+namespace __sanitizer {
+
+static const int kSymbolizerStartupTimeMillis = 10;
+
+bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
+ int *input_fd, int *output_fd) {
+ if (!FileExists(path_to_symbolizer)) {
+ Report("WARNING: invalid path to external symbolizer!\n");
+ return false;
+ }
+
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
+ for (int j = 0; j < i; j++) {
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
+ return false;
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
+ for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ break;
+ }
+ }
+ }
+ CHECK(infd);
+ CHECK(outfd);
+
+ int pid = fork();
+ if (pid == -1) {
+ // Fork() failed.
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ Report("WARNING: failed to fork external symbolizer "
+ " (errno: %d)\n", errno);
+ return false;
+ } else if (pid == 0) {
+ // Child subprocess.
+ internal_close(STDOUT_FILENO);
+ internal_close(STDIN_FILENO);
+ internal_dup2(outfd[0], STDIN_FILENO);
+ internal_dup2(infd[1], STDOUT_FILENO);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ for (int fd = getdtablesize(); fd > 2; fd--)
+ internal_close(fd);
+ execl(path_to_symbolizer, path_to_symbolizer, (char*)0);
+ Exit(1);
+ }
+
+ // Continue execution in parent process.
+ internal_close(outfd[0]);
+ internal_close(infd[1]);
+ *input_fd = infd[0];
+ *output_fd = outfd[1];
+
+ // Check that symbolizer subprocess started successfully.
+ int pid_status;
+ SleepForMillis(kSymbolizerStartupTimeMillis);
+ int exited_pid = waitpid(pid, &pid_status, WNOHANG);
+ if (exited_pid != 0) {
+ // Either waitpid failed, or child has already exited.
+ Report("WARNING: external symbolizer didn't start up correctly!\n");
+ return false;
+ }
+
+ return true;
+}
+
+#if defined(__ANDROID__) || defined(ANDROID)
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+ UNIMPLEMENTED();
+}
+#else // ANDROID
+typedef ElfW(Phdr) Elf_Phdr;
+
+struct DlIteratePhdrData {
+ LoadedModule *modules;
+ uptr current_n;
+ uptr max_n;
+};
+
+static const uptr kMaxPathLength = 512;
+
+static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
+ DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
+ if (data->current_n == data->max_n)
+ return 0;
+ InternalScopedBuffer<char> module_name(kMaxPathLength);
+ module_name.data()[0] = '\0';
+ if (data->current_n == 0) {
+ // First module is the binary itself.
+ uptr module_name_len = internal_readlink(
+ "/proc/self/exe", module_name.data(), module_name.size());
+ CHECK_NE(module_name_len, (uptr)-1);
+ CHECK_LT(module_name_len, module_name.size());
+ module_name[module_name_len] = '\0';
+ } else if (info->dlpi_name) {
+ internal_strncpy(module_name.data(), info->dlpi_name, module_name.size());
+ }
+ if (module_name.data()[0] == '\0')
+ return 0;
+ void *mem = &data->modules[data->current_n];
+ LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(),
+ info->dlpi_addr);
+ data->current_n++;
+ for (int i = 0; i < info->dlpi_phnum; i++) {
+ const Elf_Phdr *phdr = &info->dlpi_phdr[i];
+ if (phdr->p_type == PT_LOAD) {
+ uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
+ uptr cur_end = cur_beg + phdr->p_memsz;
+ cur_module->addAddressRange(cur_beg, cur_end);
+ }
+ }
+ return 0;
+}
+
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+ CHECK(modules);
+ DlIteratePhdrData data = {modules, 0, max_modules};
+ dl_iterate_phdr(dl_iterate_phdr_cb, &data);
+ return data.current_n;
+}
+#endif // ANDROID
+
+} // namespace __sanitizer
+
+#endif // __linux__
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_mac.cc b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
new file mode 100644
index 000000000000..23993607e77b
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
@@ -0,0 +1,31 @@
+//===-- sanitizer_symbolizer_mac.cc ---------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Mac-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+#ifdef __APPLE__
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
+ int *input_fd, int *output_fd) {
+ UNIMPLEMENTED();
+}
+
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+ UNIMPLEMENTED();
+}
+
+} // namespace __sanitizer
+
+#endif // __APPLE__
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_win.cc b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
new file mode 100644
index 000000000000..f1b6a02a6f9a
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -0,0 +1,37 @@
+//===-- sanitizer_symbolizer_win.cc ---------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Windows-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+#ifdef _WIN32
+#include <windows.h>
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
+ int *input_fd, int *output_fd) {
+ UNIMPLEMENTED();
+}
+
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+ UNIMPLEMENTED();
+};
+
+const char *Demangle(const char *MangledName) {
+ return MangledName;
+}
+
+} // namespace __sanitizer
+
+#endif // _WIN32
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index c68a1fee4068..2ae37af8847c 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -12,15 +12,32 @@
// sanitizer_libc.h.
//===----------------------------------------------------------------------===//
#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#define NOGDI
+#include <stdlib.h>
+#include <io.h>
#include <windows.h>
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
-#include "sanitizer_symbolizer.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
namespace __sanitizer {
// --------------------- sanitizer_common.h
+uptr GetPageSize() {
+ return 1U << 14; // FIXME: is this configurable?
+}
+
+uptr GetMmapGranularity() {
+ return 1U << 16; // FIXME: is this configurable?
+}
+
+bool FileExists(const char *filename) {
+ UNIMPLEMENTED();
+}
+
int GetPid() {
return GetProcessId(GetCurrentProcess());
}
@@ -42,7 +59,6 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
*stack_bottom = (uptr)mbi.AllocationBase;
}
-
void *MmapOrDie(uptr size, const char *mem_type) {
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (rv == 0) {
@@ -62,8 +78,18 @@ void UnmapOrDie(void *addr, uptr size) {
}
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
- return VirtualAlloc((LPVOID)fixed_addr, size,
- MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
+ // but on Win64 it does.
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (p == 0)
+ Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at %p (%d)\n",
+ size, size, fixed_addr, GetLastError());
+ return p;
+}
+
+void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
+ return MmapFixedNoReserve(fixed_addr, size);
}
void *Mprotect(uptr fixed_addr, uptr size) {
@@ -98,7 +124,6 @@ const char *GetEnv(const char *name) {
const char *GetPwd() {
UNIMPLEMENTED();
- return 0;
}
void DumpProcessMap() {
@@ -109,6 +134,22 @@ void DisableCoreDumper() {
UNIMPLEMENTED();
}
+void ReExec() {
+ UNIMPLEMENTED();
+}
+
+void PrepareForSandboxing() {
+ // Nothing here for now.
+}
+
+bool StackSizeIsUnlimited() {
+ UNIMPLEMENTED();
+}
+
+void SetStackSizeLimitInBytes(uptr limit) {
+ UNIMPLEMENTED();
+}
+
void SleepForSeconds(int seconds) {
Sleep(seconds * 1000);
}
@@ -126,50 +167,40 @@ void Abort() {
_exit(-1); // abort is not NORETURN on Windows.
}
+#ifndef SANITIZER_GO
int Atexit(void (*function)(void)) {
return atexit(function);
}
-
-// ------------------ sanitizer_symbolizer.h
-bool FindDWARFSection(uptr object_file_addr, const char *section_name,
- DWARFSection *section) {
- UNIMPLEMENTED();
- return false;
-}
-
-uptr GetListOfModules(ModuleDIContext *modules, uptr max_modules) {
- UNIMPLEMENTED();
-};
+#endif
// ------------------ sanitizer_libc.h
void *internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) {
UNIMPLEMENTED();
- return 0;
}
int internal_munmap(void *addr, uptr length) {
UNIMPLEMENTED();
- return 0;
}
int internal_close(fd_t fd) {
UNIMPLEMENTED();
- return 0;
+}
+
+int internal_isatty(fd_t fd) {
+ return _isatty(fd);
}
fd_t internal_open(const char *filename, bool write) {
UNIMPLEMENTED();
- return 0;
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
UNIMPLEMENTED();
- return 0;
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
- if (fd != 2)
+ if (fd != kStderrFd)
UNIMPLEMENTED();
HANDLE err = GetStdHandle(STD_ERROR_HANDLE);
if (err == 0)
@@ -182,19 +213,57 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) {
uptr internal_filesize(fd_t fd) {
UNIMPLEMENTED();
- return 0;
}
int internal_dup2(int oldfd, int newfd) {
UNIMPLEMENTED();
- return 0;
}
-int internal_sched_yield() {
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
UNIMPLEMENTED();
+}
+
+int internal_sched_yield() {
+ Sleep(0);
return 0;
}
+// ---------------------- BlockingMutex ---------------- {{{1
+enum LockState {
+ LOCK_UNINITIALIZED = 0,
+ LOCK_READY = -1,
+};
+
+BlockingMutex::BlockingMutex(LinkerInitialized li) {
+ // FIXME: see comments in BlockingMutex::Lock() for the details.
+ CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
+
+ CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
+ InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+ owner_ = LOCK_READY;
+}
+
+void BlockingMutex::Lock() {
+ if (owner_ == LOCK_UNINITIALIZED) {
+ // FIXME: hm, global BlockingMutex objects are not initialized?!?
+ // This might be a side effect of the clang+cl+link Frankenbuild...
+ new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
+
+ // FIXME: If it turns out the linker doesn't invoke our
+ // constructors, we should probably manually Lock/Unlock all the global
+ // locks while we're starting in one thread to avoid double-init races.
+ }
+ EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+ CHECK(owner_ == LOCK_READY);
+ owner_ = GetThreadSelf();
+}
+
+void BlockingMutex::Unlock() {
+ CHECK(owner_ == GetThreadSelf());
+ owner_ = LOCK_READY;
+ LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+}
+
} // namespace __sanitizer
#endif // _WIN32
diff --git a/lib/sanitizer_common/scripts/check_lint.sh b/lib/sanitizer_common/scripts/check_lint.sh
new file mode 100755
index 000000000000..e65794df0ce7
--- /dev/null
+++ b/lib/sanitizer_common/scripts/check_lint.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+set -e
+
+SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+
+# Guess path to LLVM_CHECKOUT if not provided
+if [ "${LLVM_CHECKOUT}" == "" ]; then
+ LLVM_CHECKOUT="${SCRIPT_DIR}/../../../../../"
+ echo "LLVM Checkout: ${LLVM_CHECKOUT}"
+fi
+
+# Cpplint setup
+cd ${SCRIPT_DIR}
+if [ ! -d cpplint ]; then
+ svn co -r83 http://google-styleguide.googlecode.com/svn/trunk/cpplint cpplint
+fi
+CPPLINT=${SCRIPT_DIR}/cpplint/cpplint.py
+
+# Filters
+# TODO: remove some of these filters
+ASAN_RTL_LINT_FILTER=-readability/casting,-readability/check,-build/include,-build/header_guard,-build/class,-legal/copyright,-build/namespaces
+ASAN_TEST_LINT_FILTER=-readability/casting,-build/include,-legal/copyright,-whitespace/newline,-runtime/sizeof,-runtime/int,-runtime/printf,-build/header_guard
+ASAN_LIT_TEST_LINT_FILTER=${ASAN_TEST_LINT_FILTER},-whitespace/line_length
+TSAN_RTL_LINT_FILTER=-legal/copyright,-build/include,-readability/casting,-build/header_guard,-build/namespaces
+TSAN_TEST_LINT_FILTER=${TSAN_RTL_LINT_FILTER},-runtime/threadsafe_fn,-runtime/int
+TSAN_LIT_TEST_LINT_FILTER=${TSAN_TEST_LINT_FILTER},-whitespace/line_length
+MSAN_RTL_LINT_FILTER=-legal/copyright,-build/include,-readability/casting,-build/header_guard,-build/namespaces
+TSAN_RTL_INC_LINT_FILTER=${TSAN_TEST_LINT_FILTER},-runtime/sizeof
+
+cd ${LLVM_CHECKOUT}
+
+# LLVM Instrumentation
+LLVM_INSTRUMENTATION=lib/Transforms/Instrumentation
+LLVM_LINT_FILTER=-,+whitespace
+${CPPLINT} --filter=${LLVM_LINT_FILTER} ${LLVM_INSTRUMENTATION}/*Sanitizer.cpp \
+ ${LLVM_INSTRUMENTATION}/BlackList.*
+
+COMPILER_RT=projects/compiler-rt
+
+# Headers
+SANITIZER_INCLUDES=${COMPILER_RT}/include/sanitizer
+${CPPLINT} --filter=${TSAN_RTL_LINT_FILTER} ${SANITIZER_INCLUDES}/*.h
+
+# Sanitizer_common
+COMMON_RTL=${COMPILER_RT}/lib/sanitizer_common
+${CPPLINT} --filter=${ASAN_RTL_LINT_FILTER} ${COMMON_RTL}/*.{cc,h}
+${CPPLINT} --filter=${TSAN_RTL_LINT_FILTER} ${COMMON_RTL}/tests/*.cc
+
+# Interception
+INTERCEPTION=${COMPILER_RT}/lib/interception
+${CPPLINT} --filter=${ASAN_RTL_LINT_FILTER} ${INTERCEPTION}/*.{cc,h}
+
+# ASan
+ASAN_RTL=${COMPILER_RT}/lib/asan
+${CPPLINT} --filter=${ASAN_RTL_LINT_FILTER} ${ASAN_RTL}/*.{cc,h}
+${CPPLINT} --filter=${ASAN_TEST_LINT_FILTER} ${ASAN_RTL}/tests/*.{cc,h}
+${CPPLINT} --filter=${ASAN_LIT_TEST_LINT_FILTER} ${ASAN_RTL}/lit_tests/*.cc \
+ ${ASAN_RTL}/lit_tests/*/*.cc \
+
+# TSan
+TSAN_RTL=${COMPILER_RT}/lib/tsan
+${CPPLINT} --filter=${TSAN_RTL_LINT_FILTER} ${TSAN_RTL}/rtl/*.{cc,h}
+${CPPLINT} --filter=${TSAN_TEST_LINT_FILTER} ${TSAN_RTL}/tests/rtl/*.{cc,h} \
+ ${TSAN_RTL}/tests/unit/*.cc
+${CPPLINT} --filter=${TSAN_LIT_TEST_LINT_FILTER} ${TSAN_RTL}/lit_tests/*.cc
+
+# MSan
+MSAN_RTL=${COMPILER_RT}/lib/msan
+${CPPLINT} --filter=${MSAN_RTL_LINT_FILTER} ${MSAN_RTL}/*.{cc,h}
+
+set +e
+
+# Misc files
+FILES=${COMMON_RTL}/*.inc
+for FILE in $FILES; do
+ TMPFILE=$(mktemp -u ${FILE}.XXXXX).cc
+ echo "Checking $FILE"
+ cp -f $FILE $TMPFILE && \
+ ${CPPLINT} --filter=${TSAN_RTL_INC_LINT_FILTER} $TMPFILE
+ rm $TMPFILE
+done
diff --git a/lib/sanitizer_common/tests/CMakeLists.txt b/lib/sanitizer_common/tests/CMakeLists.txt
new file mode 100644
index 000000000000..f83a89cbe37c
--- /dev/null
+++ b/lib/sanitizer_common/tests/CMakeLists.txt
@@ -0,0 +1,138 @@
+include(CompilerRTCompile)
+
+set(SANITIZER_UNITTESTS
+ sanitizer_allocator_test.cc
+ sanitizer_common_test.cc
+ sanitizer_flags_test.cc
+ sanitizer_libc_test.cc
+ sanitizer_list_test.cc
+ sanitizer_mutex_test.cc
+ sanitizer_printf_test.cc
+ sanitizer_scanf_interceptor_test.cc
+ sanitizer_stackdepot_test.cc
+ sanitizer_test_main.cc
+ )
+
+set(SANITIZER_TEST_HEADERS)
+foreach(header ${SANITIZER_HEADERS})
+ list(APPEND SANITIZER_TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
+endforeach()
+
+include_directories(..)
+include_directories(../..)
+
+# Adds static library which contains sanitizer_common object file
+# (universal binary on Mac and arch-specific object files on Linux).
+macro(add_sanitizer_common_lib library)
+ add_library(${library} STATIC ${ARGN})
+ set_target_properties(${library} PROPERTIES
+ ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+endmacro()
+
+function(get_sanitizer_common_lib_for_arch arch lib lib_name)
+ if(APPLE)
+ set(tgt_name "RTSanitizerCommon.test.osx")
+ else()
+ set(tgt_name "RTSanitizerCommon.test.${arch}")
+ endif()
+ set(${lib} "${tgt_name}" PARENT_SCOPE)
+ set(${lib_name} "lib${tgt_name}.a" PARENT_SCOPE)
+endfunction()
+
+# Sanitizer_common unit tests testsuite.
+add_custom_target(SanitizerUnitTests)
+set_target_properties(SanitizerUnitTests PROPERTIES
+ FOLDER "Sanitizer unittests")
+
+# Adds sanitizer tests for architecture.
+macro(add_sanitizer_tests_for_arch arch)
+ get_target_flags_for_arch(${arch} TARGET_FLAGS)
+ set(SANITIZER_TEST_SOURCES ${SANITIZER_UNITTESTS}
+ ${COMPILER_RT_GTEST_SOURCE})
+ set(SANITIZER_TEST_CFLAGS ${COMPILER_RT_GTEST_INCLUDE_CFLAGS}
+ -I${COMPILER_RT_SOURCE_DIR}/include
+ -I${COMPILER_RT_SOURCE_DIR}/lib
+ -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common
+ -O2 -g ${TARGET_FLAGS})
+ set(SANITIZER_TEST_LINK_FLAGS -lstdc++ -lpthread ${TARGET_FLAGS})
+ set(SANITIZER_TEST_OBJECTS)
+ foreach(source ${SANITIZER_TEST_SOURCES})
+ get_filename_component(basename ${source} NAME)
+ set(output_obj "${basename}.${arch}.o")
+ clang_compile(${output_obj} ${source}
+ CFLAGS ${SANITIZER_TEST_CFLAGS}
+ DEPS gtest ${SANITIZER_RUNTIME_LIBRARIES}
+ ${SANITIZER_TEST_HEADERS})
+ list(APPEND SANITIZER_TEST_OBJECTS ${output_obj})
+ endforeach()
+ get_sanitizer_common_lib_for_arch(${arch} SANITIZER_COMMON_LIB
+ SANITIZER_COMMON_LIB_NAME)
+ # Add unittest target.
+ set(SANITIZER_TEST_NAME "Sanitizer-${arch}-Test")
+ add_compiler_rt_test(SanitizerUnitTests ${SANITIZER_TEST_NAME}
+ OBJECTS ${SANITIZER_TEST_OBJECTS}
+ ${SANITIZER_COMMON_LIB_NAME}
+ DEPS ${SANITIZER_TEST_OBJECTS} ${SANITIZER_COMMON_LIB}
+ LINK_FLAGS ${SANITIZER_TEST_LINK_FLAGS})
+endmacro()
+
+if(COMPILER_RT_CAN_EXECUTE_TESTS)
+ # We use just-built clang to build sanitizer_common unittests, so we must
+ # be sure that produced binaries would work.
+ if(APPLE)
+ add_sanitizer_common_lib("RTSanitizerCommon.test.osx"
+ $<TARGET_OBJECTS:RTSanitizerCommon.osx>)
+ else()
+ if(CAN_TARGET_x86_64)
+ add_sanitizer_common_lib("RTSanitizerCommon.test.x86_64"
+ $<TARGET_OBJECTS:RTSanitizerCommon.x86_64>)
+ endif()
+ if(CAN_TARGET_i386)
+ add_sanitizer_common_lib("RTSanitizerCommon.test.i386"
+ $<TARGET_OBJECTS:RTSanitizerCommon.i386>)
+ endif()
+ endif()
+ if(CAN_TARGET_x86_64)
+ add_sanitizer_tests_for_arch(x86_64)
+ endif()
+ if(CAN_TARGET_i386)
+ add_sanitizer_tests_for_arch(i386)
+ endif()
+
+ # Run unittests as a part of lit testsuite.
+ configure_lit_site_cfg(
+ ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in
+ ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
+ )
+
+ add_lit_testsuite(check-sanitizer "Running sanitizer library unittests"
+ ${CMAKE_CURRENT_BINARY_DIR}
+ DEPENDS SanitizerUnitTests
+ )
+ set_target_properties(check-sanitizer PROPERTIES FOLDER "Sanitizer unittests")
+endif()
+
+if(ANDROID)
+ # We assume that unit tests on Android are built in a build
+ # tree with fresh Clang as a host compiler.
+ add_executable(SanitizerTest
+ ${SANITIZER_UNITTESTS}
+ ${COMPILER_RT_GTEST_SOURCE}
+ $<TARGET_OBJECTS:RTSanitizerCommon.arm.android>
+ )
+ set_target_compile_flags(SanitizerTest
+ ${SANITIZER_COMMON_CFLAGS}
+ ${COMPILER_RT_GTEST_INCLUDE_CFLAGS}
+ -I${COMPILER_RT_SOURCE_DIR}/include
+ -I${COMPILER_RT_SOURCE_DIR}/lib
+ -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common
+ -O2 -g
+ )
+ # Setup correct output directory and link flags.
+ get_unittest_directory(OUTPUT_DIR)
+ set_target_properties(SanitizerTest PROPERTIES
+ RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_DIR})
+ set_target_link_flags(SanitizerTest ${SANITIZER_TEST_LINK_FLAGS})
+ # Add unit test to test suite.
+ add_dependencies(SanitizerUnitTests SanitizerTest)
+endif()
diff --git a/lib/sanitizer_common/tests/lit.cfg b/lib/sanitizer_common/tests/lit.cfg
new file mode 100644
index 000000000000..d774753985ac
--- /dev/null
+++ b/lib/sanitizer_common/tests/lit.cfg
@@ -0,0 +1,29 @@
+# -*- Python -*-
+
+import os
+
+def get_required_attr(config, attr_name):
+ attr_value = getattr(config, attr_name, None)
+ if not attr_value:
+ lit.fatal("No attribute %r in test configuration! You may need to run "
+ "tests from your build directory or add this attribute "
+ "to lit.site.cfg " % attr_name)
+ return attr_value
+
+# Setup attributes common for all compiler-rt projects.
+llvm_src_root = get_required_attr(config, 'llvm_src_root')
+compiler_rt_lit_unit_cfg = os.path.join(llvm_src_root, "projects",
+ "compiler-rt", "lib",
+ "lit.common.unit.cfg")
+lit.load_config(config, compiler_rt_lit_unit_cfg)
+
+# Setup config name.
+config.name = 'SanitizerCommon-Unit'
+
+# Setup test source and exec root. For unit tests, we define
+# it as build directory with sanitizer_common unit tests.
+llvm_obj_root = get_required_attr(config, "llvm_obj_root")
+config.test_exec_root = os.path.join(llvm_obj_root, "projects",
+ "compiler-rt", "lib",
+ "sanitizer_common", "tests")
+config.test_source_root = config.test_exec_root
diff --git a/lib/sanitizer_common/tests/lit.site.cfg.in b/lib/sanitizer_common/tests/lit.site.cfg.in
new file mode 100644
index 000000000000..bb9a28d6a6cb
--- /dev/null
+++ b/lib/sanitizer_common/tests/lit.site.cfg.in
@@ -0,0 +1,9 @@
+## Autogenerated by LLVM/Clang configuration.
+# Do not edit!
+
+config.build_type = "@CMAKE_BUILD_TYPE@"
+config.llvm_obj_root = "@LLVM_BINARY_DIR@"
+config.llvm_src_root = "@LLVM_SOURCE_DIR@"
+
+# Let the main config do the real work.
+lit.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg")
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
deleted file mode 100644
index 1410f26ce84f..000000000000
--- a/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
+++ /dev/null
@@ -1,257 +0,0 @@
-//===-- sanitizer_allocator64_test.cc -------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Tests for sanitizer_allocator64.h.
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_allocator64.h"
-#include "gtest/gtest.h"
-
-#include <algorithm>
-#include <vector>
-
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize = 0x10000000000; // 1T.
-
-typedef DefaultSizeClassMap SCMap;
-typedef
- SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 16, SCMap> Allocator;
-typedef SizeClassAllocatorLocalCache<Allocator::kNumClasses, Allocator>
- AllocatorCache;
-
-TEST(SanitizerCommon, DefaultSizeClassMap) {
-#if 0
- for (uptr i = 0; i < SCMap::kNumClasses; i++) {
- // printf("% 3ld: % 5ld (%4lx); ", i, SCMap::Size(i), SCMap::Size(i));
- printf("c%ld => %ld ", i, SCMap::Size(i));
- if ((i % 8) == 7)
- printf("\n");
- }
- printf("\n");
-#endif
-
- for (uptr c = 0; c < SCMap::kNumClasses; c++) {
- uptr s = SCMap::Size(c);
- CHECK_EQ(SCMap::ClassID(s), c);
- if (c != SCMap::kNumClasses - 1)
- CHECK_EQ(SCMap::ClassID(s + 1), c + 1);
- CHECK_EQ(SCMap::ClassID(s - 1), c);
- if (c)
- CHECK_GT(SCMap::Size(c), SCMap::Size(c-1));
- }
- CHECK_EQ(SCMap::ClassID(SCMap::kMaxSize + 1), 0);
-
- for (uptr s = 1; s <= SCMap::kMaxSize; s++) {
- uptr c = SCMap::ClassID(s);
- CHECK_LT(c, SCMap::kNumClasses);
- CHECK_GE(SCMap::Size(c), s);
- if (c > 0)
- CHECK_LT(SCMap::Size(c-1), s);
- }
-}
-
-TEST(SanitizerCommon, SizeClassAllocator64) {
- Allocator a;
- a.Init();
-
- static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
- 50000, 60000, 100000, 300000, 500000, 1000000, 2000000};
-
- std::vector<void *> allocated;
-
- uptr last_total_allocated = 0;
- for (int i = 0; i < 5; i++) {
- // Allocate a bunch of chunks.
- for (uptr s = 0; s < sizeof(sizes) /sizeof(sizes[0]); s++) {
- uptr size = sizes[s];
- // printf("s = %ld\n", size);
- uptr n_iter = std::max((uptr)2, 1000000 / size);
- for (uptr i = 0; i < n_iter; i++) {
- void *x = a.Allocate(size, 1);
- allocated.push_back(x);
- CHECK(a.PointerIsMine(x));
- CHECK_GE(a.GetActuallyAllocatedSize(x), size);
- uptr class_id = a.GetSizeClass(x);
- CHECK_EQ(class_id, SCMap::ClassID(size));
- uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
- metadata[0] = reinterpret_cast<uptr>(x) + 1;
- metadata[1] = 0xABCD;
- }
- }
- // Deallocate all.
- for (uptr i = 0; i < allocated.size(); i++) {
- void *x = allocated[i];
- uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
- CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
- CHECK_EQ(metadata[1], 0xABCD);
- a.Deallocate(x);
- }
- allocated.clear();
- uptr total_allocated = a.TotalMemoryUsed();
- if (last_total_allocated == 0)
- last_total_allocated = total_allocated;
- CHECK_EQ(last_total_allocated, total_allocated);
- }
-
- a.TestOnlyUnmap();
-}
-
-
-TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
- Allocator a;
- a.Init();
- static volatile void *sink;
-
- const uptr kNumAllocs = 10000;
- void *allocated[kNumAllocs];
- for (uptr i = 0; i < kNumAllocs; i++) {
- uptr size = (i % 4096) + 1;
- void *x = a.Allocate(size, 1);
- allocated[i] = x;
- }
- // Get Metadata kNumAllocs^2 times.
- for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
- sink = a.GetMetaData(allocated[i % kNumAllocs]);
- }
- for (uptr i = 0; i < kNumAllocs; i++) {
- a.Deallocate(allocated[i]);
- }
-
- a.TestOnlyUnmap();
- (void)sink;
-}
-
-void FailInAssertionOnOOM() {
- Allocator a;
- a.Init();
- const uptr size = 1 << 20;
- for (int i = 0; i < 1000000; i++) {
- a.Allocate(size, 1);
- }
-
- a.TestOnlyUnmap();
-}
-
-TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
- EXPECT_DEATH(FailInAssertionOnOOM(),
- "allocated_user.*allocated_meta.*kRegionSize");
-}
-
-TEST(SanitizerCommon, LargeMmapAllocator) {
- LargeMmapAllocator a;
- a.Init();
-
- static const int kNumAllocs = 100;
- void *allocated[kNumAllocs];
- static const uptr size = 1000;
- // Allocate some.
- for (int i = 0; i < kNumAllocs; i++) {
- allocated[i] = a.Allocate(size, 1);
- }
- // Deallocate all.
- CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
- for (int i = 0; i < kNumAllocs; i++) {
- void *p = allocated[i];
- CHECK(a.PointerIsMine(p));
- a.Deallocate(p);
- }
- // Check that non left.
- CHECK_EQ(a.TotalMemoryUsed(), 0);
-
- // Allocate some more, also add metadata.
- for (int i = 0; i < kNumAllocs; i++) {
- void *x = a.Allocate(size, 1);
- CHECK_GE(a.GetActuallyAllocatedSize(x), size);
- uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
- *meta = i;
- allocated[i] = x;
- }
- CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
- // Deallocate all in reverse order.
- for (int i = 0; i < kNumAllocs; i++) {
- int idx = kNumAllocs - i - 1;
- void *p = allocated[idx];
- uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
- CHECK_EQ(*meta, idx);
- CHECK(a.PointerIsMine(p));
- a.Deallocate(p);
- }
- CHECK_EQ(a.TotalMemoryUsed(), 0);
-}
-
-TEST(SanitizerCommon, CombinedAllocator) {
- typedef Allocator PrimaryAllocator;
- typedef LargeMmapAllocator SecondaryAllocator;
- typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> Allocator;
-
- AllocatorCache cache;
- Allocator a;
- a.Init();
- cache.Init();
- const uptr kNumAllocs = 100000;
- const uptr kNumIter = 10;
- for (uptr iter = 0; iter < kNumIter; iter++) {
- std::vector<void*> allocated;
- for (uptr i = 0; i < kNumAllocs; i++) {
- uptr size = (i % (1 << 14)) + 1;
- if ((i % 1024) == 0)
- size = 1 << (10 + (i % 14));
- void *x = a.Allocate(&cache, size, 1);
- uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
- CHECK_EQ(*meta, 0);
- *meta = size;
- allocated.push_back(x);
- }
-
- random_shuffle(allocated.begin(), allocated.end());
-
- for (uptr i = 0; i < kNumAllocs; i++) {
- void *x = allocated[i];
- uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
- CHECK_NE(*meta, 0);
- CHECK(a.PointerIsMine(x));
- *meta = 0;
- a.Deallocate(&cache, x);
- }
- allocated.clear();
- a.SwallowCache(&cache);
- }
- a.TestOnlyUnmap();
-}
-
-static THREADLOCAL AllocatorCache static_allocator_cache;
-
-TEST(SanitizerCommon, SizeClassAllocatorLocalCache) {
- static_allocator_cache.Init();
-
- Allocator a;
- AllocatorCache cache;
-
- a.Init();
- cache.Init();
-
- const uptr kNumAllocs = 10000;
- const int kNumIter = 100;
- uptr saved_total = 0;
- for (int i = 0; i < kNumIter; i++) {
- void *allocated[kNumAllocs];
- for (uptr i = 0; i < kNumAllocs; i++) {
- allocated[i] = cache.Allocate(&a, 0);
- }
- for (uptr i = 0; i < kNumAllocs; i++) {
- cache.Deallocate(&a, 0, allocated[i]);
- }
- cache.Drain(&a);
- uptr total_allocated = a.TotalMemoryUsed();
- if (saved_total)
- CHECK_EQ(saved_total, total_allocated);
- saved_total = total_allocated;
- }
-
- a.TestOnlyUnmap();
-}
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc b/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc
deleted file mode 100644
index cff782342a6a..000000000000
--- a/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-//===-- sanitizer_allocator64_testlib.cc ----------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Malloc replacement library based on CombinedAllocator.
-// The primary purpose of this file is an end-to-end integration test
-// for CombinedAllocator.
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_allocator64.h"
-#include <stddef.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <assert.h>
-
-namespace {
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize = 0x10000000000; // 1T.
-
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 16,
- DefaultSizeClassMap> PrimaryAllocator;
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator::kNumClasses,
- PrimaryAllocator> AllocatorCache;
-typedef LargeMmapAllocator SecondaryAllocator;
-typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> Allocator;
-
-static THREADLOCAL AllocatorCache cache;
-static Allocator allocator;
-
-static int inited = 0;
-
-__attribute__((constructor))
-void Init() {
- if (inited) return;
- inited = true; // this must happen before any threads are created.
- allocator.Init();
-}
-
-} // namespace
-
-namespace __sanitizer {
-void NORETURN Die() {
- _exit(77);
-}
-void NORETURN CheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- fprintf(stderr, "CheckFailed: %s:%d %s (%lld %lld)\n",
- file, line, cond, v1, v2);
- Die();
-}
-}
-
-#if 1
-extern "C" {
-void *malloc(size_t size) {
- Init();
- assert(inited);
- return allocator.Allocate(&cache, size, 8);
-}
-
-void free(void *p) {
- assert(inited);
- allocator.Deallocate(&cache, p);
-}
-
-void *calloc(size_t nmemb, size_t size) {
- assert(inited);
- return allocator.Allocate(&cache, nmemb * size, 8, /*cleared=*/true);
-}
-
-void *realloc(void *p, size_t new_size) {
- assert(inited);
- return allocator.Reallocate(&cache, p, new_size, 8);
-}
-
-void *memalign() { assert(0); }
-
-int posix_memalign(void **memptr, size_t alignment, size_t size) {
- *memptr = allocator.Allocate(&cache, size, alignment);
- CHECK_EQ(((uptr)*memptr & (alignment - 1)), 0);
- return 0;
-}
-
-void *valloc(size_t size) {
- assert(inited);
- return allocator.Allocate(&cache, size, kPageSize);
-}
-
-void *pvalloc(size_t size) {
- assert(inited);
- if (size == 0) size = kPageSize;
- return allocator.Allocate(&cache, size, kPageSize);
-}
-}
-#endif
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index d6c7f56dc143..d67f4636ef4f 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -8,13 +8,465 @@
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Tests for sanitizer_allocator.h.
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
+
+#include "sanitizer_test_utils.h"
+
#include "gtest/gtest.h"
+
#include <stdlib.h>
+#include <pthread.h>
+#include <algorithm>
+#include <vector>
+
+// Too slow for debug build
+#if TSAN_DEBUG == 0
+
+#if SANITIZER_WORDSIZE == 64
+static const uptr kAllocatorSpace = 0x700000000000ULL;
+static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
+static const u64 kAddressSpaceSize = 1ULL << 47;
+
+typedef SizeClassAllocator64<
+ kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
+
+typedef SizeClassAllocator64<
+ kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
+#else
+static const u64 kAddressSpaceSize = 1ULL << 32;
+#endif
+
+typedef SizeClassAllocator32<
+ 0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
+
+template <class SizeClassMap>
+void TestSizeClassMap() {
+ typedef SizeClassMap SCMap;
+ // SCMap::Print();
+ SCMap::Validate();
+}
+
+TEST(SanitizerCommon, DefaultSizeClassMap) {
+ TestSizeClassMap<DefaultSizeClassMap>();
+}
+
+TEST(SanitizerCommon, CompactSizeClassMap) {
+ TestSizeClassMap<CompactSizeClassMap>();
+}
-namespace __sanitizer {
+template <class Allocator>
+void TestSizeClassAllocator() {
+ Allocator *a = new Allocator;
+ a->Init();
+ SizeClassAllocatorLocalCache<Allocator> cache;
+ cache.Init();
+
+ static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
+ 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
+
+ std::vector<void *> allocated;
+
+ uptr last_total_allocated = 0;
+ for (int i = 0; i < 3; i++) {
+ // Allocate a bunch of chunks.
+ for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
+ uptr size = sizes[s];
+ if (!a->CanAllocate(size, 1)) continue;
+ // printf("s = %ld\n", size);
+ uptr n_iter = std::max((uptr)6, 10000000 / size);
+ // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
+ for (uptr i = 0; i < n_iter; i++) {
+ uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
+ char *x = (char*)cache.Allocate(a, class_id0);
+ x[0] = 0;
+ x[size - 1] = 0;
+ x[size / 2] = 0;
+ allocated.push_back(x);
+ CHECK_EQ(x, a->GetBlockBegin(x));
+ CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
+ CHECK(a->PointerIsMine(x));
+ CHECK(a->PointerIsMine(x + size - 1));
+ CHECK(a->PointerIsMine(x + size / 2));
+ CHECK_GE(a->GetActuallyAllocatedSize(x), size);
+ uptr class_id = a->GetSizeClass(x);
+ CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
+ uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
+ metadata[0] = reinterpret_cast<uptr>(x) + 1;
+ metadata[1] = 0xABCD;
+ }
+ }
+ // Deallocate all.
+ for (uptr i = 0; i < allocated.size(); i++) {
+ void *x = allocated[i];
+ uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
+ CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
+ CHECK_EQ(metadata[1], 0xABCD);
+ cache.Deallocate(a, a->GetSizeClass(x), x);
+ }
+ allocated.clear();
+ uptr total_allocated = a->TotalMemoryUsed();
+ if (last_total_allocated == 0)
+ last_total_allocated = total_allocated;
+ CHECK_EQ(last_total_allocated, total_allocated);
+ }
+
+ a->TestOnlyUnmap();
+ delete a;
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64) {
+ TestSizeClassAllocator<Allocator64>();
+}
+
+TEST(SanitizerCommon, SizeClassAllocator64Compact) {
+ TestSizeClassAllocator<Allocator64Compact>();
+}
+#endif
+
+TEST(SanitizerCommon, SizeClassAllocator32Compact) {
+ TestSizeClassAllocator<Allocator32Compact>();
+}
+
+template <class Allocator>
+void SizeClassAllocatorMetadataStress() {
+ Allocator *a = new Allocator;
+ a->Init();
+ SizeClassAllocatorLocalCache<Allocator> cache;
+ cache.Init();
+ static volatile void *sink;
+
+ const uptr kNumAllocs = 10000;
+ void *allocated[kNumAllocs];
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ void *x = cache.Allocate(a, 1 + i % 50);
+ allocated[i] = x;
+ }
+ // Get Metadata kNumAllocs^2 times.
+ for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
+ sink = a->GetMetaData(allocated[i % kNumAllocs]);
+ }
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ cache.Deallocate(a, 1 + i % 50, allocated[i]);
+ }
+
+ a->TestOnlyUnmap();
+ (void)sink;
+ delete a;
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
+ SizeClassAllocatorMetadataStress<Allocator64>();
+}
+
+TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
+ SizeClassAllocatorMetadataStress<Allocator64Compact>();
+}
+#endif
+TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
+ SizeClassAllocatorMetadataStress<Allocator32Compact>();
+}
+
+struct TestMapUnmapCallback {
+ static int map_count, unmap_count;
+ void OnMap(uptr p, uptr size) const { map_count++; }
+ void OnUnmap(uptr p, uptr size) const { unmap_count++; }
+};
+int TestMapUnmapCallback::map_count;
+int TestMapUnmapCallback::unmap_count;
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
+ TestMapUnmapCallback::map_count = 0;
+ TestMapUnmapCallback::unmap_count = 0;
+ typedef SizeClassAllocator64<
+ kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
+ TestMapUnmapCallback> Allocator64WithCallBack;
+ Allocator64WithCallBack *a = new Allocator64WithCallBack;
+ a->Init();
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
+ SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
+ cache.Init();
+ a->AllocateBatch(&cache, 64);
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
+ a->TestOnlyUnmap();
+ EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
+ delete a;
+}
+#endif
+
+TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
+ TestMapUnmapCallback::map_count = 0;
+ TestMapUnmapCallback::unmap_count = 0;
+ typedef SizeClassAllocator32<
+ 0, kAddressSpaceSize, 16, CompactSizeClassMap,
+ TestMapUnmapCallback> Allocator32WithCallBack;
+ Allocator32WithCallBack *a = new Allocator32WithCallBack;
+ a->Init();
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
+ SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
+ cache.Init();
+ a->AllocateBatch(&cache, 64);
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 2); // alloc.
+ a->TestOnlyUnmap();
+ EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2); // The whole thing + alloc.
+ delete a;
+ // fprintf(stderr, "Map: %d Unmap: %d\n",
+ // TestMapUnmapCallback::map_count,
+ // TestMapUnmapCallback::unmap_count);
+}
+
+TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
+ TestMapUnmapCallback::map_count = 0;
+ TestMapUnmapCallback::unmap_count = 0;
+ LargeMmapAllocator<TestMapUnmapCallback> a;
+ a.Init();
+ void *x = a.Allocate(1 << 20, 1);
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
+ a.Deallocate(x);
+ EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
+}
+
+template<class Allocator>
+void FailInAssertionOnOOM() {
+ Allocator a;
+ a.Init();
+ SizeClassAllocatorLocalCache<Allocator> cache;
+ cache.Init();
+ for (int i = 0; i < 1000000; i++) {
+ a.AllocateBatch(&cache, 64);
+ }
+
+ a.TestOnlyUnmap();
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
+ EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
+}
+#endif
+
+TEST(SanitizerCommon, LargeMmapAllocator) {
+ LargeMmapAllocator<> a;
+ a.Init();
+
+ static const int kNumAllocs = 1000;
+ char *allocated[kNumAllocs];
+ static const uptr size = 4000;
+ // Allocate some.
+ for (int i = 0; i < kNumAllocs; i++) {
+ allocated[i] = (char *)a.Allocate(size, 1);
+ CHECK(a.PointerIsMine(allocated[i]));
+ }
+ // Deallocate all.
+ CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+ for (int i = 0; i < kNumAllocs; i++) {
+ char *p = allocated[i];
+ CHECK(a.PointerIsMine(p));
+ a.Deallocate(p);
+ }
+ // Check that non left.
+ CHECK_EQ(a.TotalMemoryUsed(), 0);
+
+ // Allocate some more, also add metadata.
+ for (int i = 0; i < kNumAllocs; i++) {
+ char *x = (char *)a.Allocate(size, 1);
+ CHECK_GE(a.GetActuallyAllocatedSize(x), size);
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ *meta = i;
+ allocated[i] = x;
+ }
+ for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
+ char *p = allocated[i % kNumAllocs];
+ CHECK(a.PointerIsMine(p));
+ CHECK(a.PointerIsMine(p + 2000));
+ }
+ CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+ // Deallocate all in reverse order.
+ for (int i = 0; i < kNumAllocs; i++) {
+ int idx = kNumAllocs - i - 1;
+ char *p = allocated[idx];
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
+ CHECK_EQ(*meta, idx);
+ CHECK(a.PointerIsMine(p));
+ a.Deallocate(p);
+ }
+ CHECK_EQ(a.TotalMemoryUsed(), 0);
+
+ // Test alignments.
+ uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
+ for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
+ const uptr kNumAlignedAllocs = 100;
+ for (uptr i = 0; i < kNumAlignedAllocs; i++) {
+ uptr size = ((i % 10) + 1) * 4096;
+ char *p = allocated[i] = (char *)a.Allocate(size, alignment);
+ CHECK_EQ(p, a.GetBlockBegin(p));
+ CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
+ CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
+ CHECK_EQ(0, (uptr)allocated[i] % alignment);
+ p[0] = p[size - 1] = 0;
+ }
+ for (uptr i = 0; i < kNumAlignedAllocs; i++) {
+ a.Deallocate(allocated[i]);
+ }
+ }
+}
+
+template
+<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
+void TestCombinedAllocator() {
+ typedef
+ CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
+ Allocator;
+ Allocator *a = new Allocator;
+ a->Init();
+
+ AllocatorCache cache;
+ cache.Init();
+
+ EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
+ EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
+ EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
+ EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
+ EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
+
+ const uptr kNumAllocs = 100000;
+ const uptr kNumIter = 10;
+ for (uptr iter = 0; iter < kNumIter; iter++) {
+ std::vector<void*> allocated;
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ uptr size = (i % (1 << 14)) + 1;
+ if ((i % 1024) == 0)
+ size = 1 << (10 + (i % 14));
+ void *x = a->Allocate(&cache, size, 1);
+ uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
+ CHECK_EQ(*meta, 0);
+ *meta = size;
+ allocated.push_back(x);
+ }
+
+ random_shuffle(allocated.begin(), allocated.end());
+
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ void *x = allocated[i];
+ uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
+ CHECK_NE(*meta, 0);
+ CHECK(a->PointerIsMine(x));
+ *meta = 0;
+ a->Deallocate(&cache, x);
+ }
+ allocated.clear();
+ a->SwallowCache(&cache);
+ }
+ a->TestOnlyUnmap();
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, CombinedAllocator64) {
+ TestCombinedAllocator<Allocator64,
+ LargeMmapAllocator<>,
+ SizeClassAllocatorLocalCache<Allocator64> > ();
+}
+
+TEST(SanitizerCommon, CombinedAllocator64Compact) {
+ TestCombinedAllocator<Allocator64Compact,
+ LargeMmapAllocator<>,
+ SizeClassAllocatorLocalCache<Allocator64Compact> > ();
+}
+#endif
+
+TEST(SanitizerCommon, CombinedAllocator32Compact) {
+ TestCombinedAllocator<Allocator32Compact,
+ LargeMmapAllocator<>,
+ SizeClassAllocatorLocalCache<Allocator32Compact> > ();
+}
+
+template <class AllocatorCache>
+void TestSizeClassAllocatorLocalCache() {
+ static AllocatorCache static_allocator_cache;
+ static_allocator_cache.Init();
+ AllocatorCache cache;
+ typedef typename AllocatorCache::Allocator Allocator;
+ Allocator *a = new Allocator();
+
+ a->Init();
+ cache.Init();
+
+ const uptr kNumAllocs = 10000;
+ const int kNumIter = 100;
+ uptr saved_total = 0;
+ for (int class_id = 1; class_id <= 5; class_id++) {
+ for (int it = 0; it < kNumIter; it++) {
+ void *allocated[kNumAllocs];
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ allocated[i] = cache.Allocate(a, class_id);
+ }
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ cache.Deallocate(a, class_id, allocated[i]);
+ }
+ cache.Drain(a);
+ uptr total_allocated = a->TotalMemoryUsed();
+ if (it)
+ CHECK_EQ(saved_total, total_allocated);
+ saved_total = total_allocated;
+ }
+ }
+
+ a->TestOnlyUnmap();
+ delete a;
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
+ TestSizeClassAllocatorLocalCache<
+ SizeClassAllocatorLocalCache<Allocator64> >();
+}
+
+TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
+ TestSizeClassAllocatorLocalCache<
+ SizeClassAllocatorLocalCache<Allocator64Compact> >();
+}
+#endif
+
+TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
+ TestSizeClassAllocatorLocalCache<
+ SizeClassAllocatorLocalCache<Allocator32Compact> >();
+}
+
+#if SANITIZER_WORDSIZE == 64
+typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
+static AllocatorCache static_allocator_cache;
+
+void *AllocatorLeakTestWorker(void *arg) {
+ typedef AllocatorCache::Allocator Allocator;
+ Allocator *a = (Allocator*)(arg);
+ static_allocator_cache.Allocate(a, 10);
+ static_allocator_cache.Drain(a);
+ return 0;
+}
+
+TEST(SanitizerCommon, AllocatorLeakTest) {
+ typedef AllocatorCache::Allocator Allocator;
+ Allocator a;
+ a.Init();
+ uptr total_used_memory = 0;
+ for (int i = 0; i < 100; i++) {
+ pthread_t t;
+ EXPECT_EQ(0, pthread_create(&t, 0, AllocatorLeakTestWorker, &a));
+ EXPECT_EQ(0, pthread_join(t, 0));
+ if (i == 0)
+ total_used_memory = a.TotalMemoryUsed();
+ EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
+ }
+
+ a.TestOnlyUnmap();
+}
+#endif
TEST(Allocator, Basic) {
char *p = (char*)InternalAlloc(10);
@@ -22,14 +474,6 @@ TEST(Allocator, Basic) {
char *p2 = (char*)InternalAlloc(20);
EXPECT_NE(p2, (char*)0);
EXPECT_NE(p2, p);
- for (int i = 0; i < 10; i++) {
- p[i] = 42;
- EXPECT_EQ(p, InternalAllocBlock(p + i));
- }
- for (int i = 0; i < 20; i++) {
- ((char*)p2)[i] = 42;
- EXPECT_EQ(p2, InternalAllocBlock(p2 + i));
- }
InternalFree(p);
InternalFree(p2);
}
@@ -39,13 +483,9 @@ TEST(Allocator, Stress) {
char *ptrs[kCount];
unsigned rnd = 42;
for (int i = 0; i < kCount; i++) {
- uptr sz = rand_r(&rnd) % 1000;
+ uptr sz = my_rand_r(&rnd) % 1000;
char *p = (char*)InternalAlloc(sz);
EXPECT_NE(p, (char*)0);
- for (uptr j = 0; j < sz; j++) {
- p[j] = 42;
- EXPECT_EQ(p, InternalAllocBlock(p + j));
- }
ptrs[i] = p;
}
for (int i = 0; i < kCount; i++) {
@@ -53,4 +493,18 @@ TEST(Allocator, Stress) {
}
}
-} // namespace __sanitizer
+TEST(Allocator, ScopedBuffer) {
+ const int kSize = 512;
+ {
+ InternalScopedBuffer<int> int_buf(kSize);
+ EXPECT_EQ(sizeof(int) * kSize, int_buf.size()); // NOLINT
+ }
+ InternalScopedBuffer<char> char_buf(kSize);
+ EXPECT_EQ(sizeof(char) * kSize, char_buf.size()); // NOLINT
+ internal_memset(char_buf.data(), 'c', kSize);
+ for (int i = 0; i < kSize; i++) {
+ EXPECT_EQ('c', char_buf[i]);
+ }
+}
+
+#endif // #if TSAN_DEBUG==0
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc b/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc
new file mode 100644
index 000000000000..f6a944f68f5e
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc
@@ -0,0 +1,162 @@
+//===-- sanitizer_allocator_testlib.cc ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Malloc replacement library based on CombinedAllocator.
+// The primary purpose of this file is an end-to-end integration test
+// for CombinedAllocator.
+//===----------------------------------------------------------------------===//
+/* Usage:
+clang++ -fno-exceptions -g -fPIC -I. -I../include -Isanitizer \
+ sanitizer_common/tests/sanitizer_allocator_testlib.cc \
+ sanitizer_common/sanitizer_*.cc -shared -lpthread -o testmalloc.so
+LD_PRELOAD=`pwd`/testmalloc.so /your/app
+*/
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include <stddef.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+
+#ifndef SANITIZER_MALLOC_HOOK
+# define SANITIZER_MALLOC_HOOK(p, s)
+#endif
+
+#ifndef SANITIZER_FREE_HOOK
+# define SANITIZER_FREE_HOOK(p)
+#endif
+
+namespace {
+static const uptr kAllocatorSpace = 0x600000000000ULL;
+static const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
+
+typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
+ CompactSizeClassMap> PrimaryAllocator;
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+static Allocator allocator;
+static bool global_inited;
+static THREADLOCAL AllocatorCache cache;
+static THREADLOCAL bool thread_inited;
+static pthread_key_t pkey;
+
+static void thread_dtor(void *v) {
+ if ((uptr)v != 3) {
+ pthread_setspecific(pkey, (void*)((uptr)v + 1));
+ return;
+ }
+ allocator.SwallowCache(&cache);
+}
+
+static void NOINLINE thread_init() {
+ if (!global_inited) {
+ global_inited = true;
+ allocator.Init();
+ pthread_key_create(&pkey, thread_dtor);
+ }
+ thread_inited = true;
+ pthread_setspecific(pkey, (void*)1);
+ cache.Init();
+}
+} // namespace
+
+extern "C" {
+void *malloc(size_t size) {
+ if (UNLIKELY(!thread_inited))
+ thread_init();
+ void *p = allocator.Allocate(&cache, size, 8);
+ SANITIZER_MALLOC_HOOK(p, size);
+ return p;
+}
+
+void free(void *p) {
+ if (UNLIKELY(!thread_inited))
+ thread_init();
+ SANITIZER_FREE_HOOK(p);
+ allocator.Deallocate(&cache, p);
+}
+
+void *calloc(size_t nmemb, size_t size) {
+ if (UNLIKELY(!thread_inited))
+ thread_init();
+ size *= nmemb;
+ void *p = allocator.Allocate(&cache, size, 8, false);
+ memset(p, 0, size);
+ SANITIZER_MALLOC_HOOK(p, size);
+ return p;
+}
+
+void *realloc(void *p, size_t size) {
+ if (UNLIKELY(!thread_inited))
+ thread_init();
+ if (p) {
+ SANITIZER_FREE_HOOK(p);
+ }
+ p = allocator.Reallocate(&cache, p, size, 8);
+ if (p) {
+ SANITIZER_MALLOC_HOOK(p, size);
+ }
+ return p;
+}
+
+void *memalign(size_t alignment, size_t size) {
+ if (UNLIKELY(!thread_inited))
+ thread_init();
+ void *p = allocator.Allocate(&cache, size, alignment);
+ SANITIZER_MALLOC_HOOK(p, size);
+ return p;
+}
+
+int posix_memalign(void **memptr, size_t alignment, size_t size) {
+ if (UNLIKELY(!thread_inited))
+ thread_init();
+ *memptr = allocator.Allocate(&cache, size, alignment);
+ SANITIZER_MALLOC_HOOK(*memptr, size);
+ return 0;
+}
+
+void *valloc(size_t size) {
+ if (UNLIKELY(!thread_inited))
+ thread_init();
+ if (size == 0)
+ size = GetPageSizeCached();
+ void *p = allocator.Allocate(&cache, size, GetPageSizeCached());
+ SANITIZER_MALLOC_HOOK(p, size);
+ return p;
+}
+
+void cfree(void *p) ALIAS("free");
+void *pvalloc(size_t size) ALIAS("valloc");
+void *__libc_memalign(size_t alignment, size_t size) ALIAS("memalign");
+
+void malloc_usable_size() {
+}
+
+void mallinfo() {
+}
+
+void mallopt() {
+}
+} // extern "C"
+
+namespace std {
+ struct nothrow_t;
+}
+
+void *operator new(size_t size) ALIAS("malloc");
+void *operator new[](size_t size) ALIAS("malloc");
+void *operator new(size_t size, std::nothrow_t const&) ALIAS("malloc");
+void *operator new[](size_t size, std::nothrow_t const&) ALIAS("malloc");
+void operator delete(void *ptr) ALIAS("free");
+void operator delete[](void *ptr) ALIAS("free");
+void operator delete(void *ptr, std::nothrow_t const&) ALIAS("free");
+void operator delete[](void *ptr, std::nothrow_t const&) ALIAS("free");
diff --git a/lib/sanitizer_common/tests/sanitizer_common_test.cc b/lib/sanitizer_common/tests/sanitizer_common_test.cc
index 91570dcc99e0..01d8b5a87c01 100644
--- a/lib/sanitizer_common/tests/sanitizer_common_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_common_test.cc
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
#include "gtest/gtest.h"
namespace __sanitizer {
@@ -63,4 +64,36 @@ TEST(SanitizerCommon, SortTest) {
EXPECT_TRUE(IsSorted(array, 2));
}
+TEST(SanitizerCommon, MmapAlignedOrDie) {
+ uptr PageSize = GetPageSizeCached();
+ for (uptr size = 1; size <= 32; size *= 2) {
+ for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
+ for (int iter = 0; iter < 100; iter++) {
+ uptr res = (uptr)MmapAlignedOrDie(
+ size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest");
+ EXPECT_EQ(0U, res % (alignment * PageSize));
+ internal_memset((void*)res, 1, size * PageSize);
+ UnmapOrDie((void*)res, size * PageSize);
+ }
+ }
+ }
+}
+
+#ifdef __linux__
+TEST(SanitizerCommon, SanitizerSetThreadName) {
+ const char *names[] = {
+ "0123456789012",
+ "01234567890123",
+ "012345678901234", // Larger names will be truncated on linux.
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE(names); i++) {
+ EXPECT_TRUE(SanitizerSetThreadName(names[i]));
+ char buff[100];
+ EXPECT_TRUE(SanitizerGetThreadName(buff, sizeof(buff) - 1));
+ EXPECT_EQ(0, internal_strcmp(buff, names[i]));
+ }
+}
+#endif
+
} // namespace sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_flags_test.cc b/lib/sanitizer_common/tests/sanitizer_flags_test.cc
index 4b273e5b9cef..c0589f4d2e90 100644
--- a/lib/sanitizer_common/tests/sanitizer_flags_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_flags_test.cc
@@ -12,11 +12,9 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_libc.h"
#include "gtest/gtest.h"
-#include "tsan_rtl.h" // FIXME: break dependency from TSan runtime.
-using __tsan::ScopedInRtl;
-
#include <string.h>
namespace __sanitizer {
@@ -34,11 +32,10 @@ static void TestStrFlag(const char *start_value, const char *env,
const char *final_value) {
const char *flag = start_value;
ParseFlag(env, &flag, kFlagName);
- EXPECT_STREQ(final_value, flag);
+ EXPECT_EQ(internal_strcmp(final_value, flag), 0);
}
TEST(SanitizerCommon, BooleanFlags) {
- ScopedInRtl in_rtl;
TestFlag(true, "--flag_name", true);
TestFlag(false, "flag_name", false);
TestFlag(false, "--flag_name=1", true);
@@ -51,7 +48,6 @@ TEST(SanitizerCommon, BooleanFlags) {
}
TEST(SanitizerCommon, IntFlags) {
- ScopedInRtl in_rtl;
TestFlag(-11, 0, -11);
TestFlag(-11, "flag_name", 0);
TestFlag(-11, "--flag_name=", 0);
@@ -60,12 +56,12 @@ TEST(SanitizerCommon, IntFlags) {
}
TEST(SanitizerCommon, StrFlags) {
- ScopedInRtl in_rtl;
TestStrFlag("zzz", 0, "zzz");
TestStrFlag("zzz", "flag_name", "");
TestStrFlag("zzz", "--flag_name=", "");
TestStrFlag("", "--flag_name=abc", "abc");
TestStrFlag("", "--flag_name='abc zxc'", "abc zxc");
+ TestStrFlag("", "--flag_name='abc zxcc'", "abc zxcc");
TestStrFlag("", "--flag_name=\"abc qwe\" asd", "abc qwe");
}
diff --git a/lib/sanitizer_common/tests/sanitizer_libc_test.cc b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
new file mode 100644
index 000000000000..b9d8414e0cbf
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
@@ -0,0 +1,42 @@
+//===-- sanitizer_libc_test.cc --------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Tests for sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "gtest/gtest.h"
+
+// A regression test for internal_memmove() implementation.
+TEST(SanitizerCommon, InternalMemmoveRegression) {
+ char src[] = "Hello World";
+ char *dest = src + 6;
+ __sanitizer::internal_memmove(dest, src, 5);
+ EXPECT_EQ(dest[0], src[0]);
+ EXPECT_EQ(dest[4], src[4]);
+}
+
+TEST(SanitizerCommon, mem_is_zero) {
+ size_t size = 128;
+ char *x = new char[size];
+ memset(x, 0, size);
+ for (size_t pos = 0; pos < size; pos++) {
+ x[pos] = 1;
+ for (size_t beg = 0; beg < size; beg++) {
+ for (size_t end = beg; end < size; end++) {
+ // fprintf(stderr, "pos %zd beg %zd end %zd \n", pos, beg, end);
+ if (beg <= pos && pos < end)
+ EXPECT_FALSE(__sanitizer::mem_is_zero(x + beg, end - beg));
+ else
+ EXPECT_TRUE(__sanitizer::mem_is_zero(x + beg, end - beg));
+ }
+ }
+ x[pos] = 0;
+ }
+ delete [] x;
+}
diff --git a/lib/sanitizer_common/tests/sanitizer_list_test.cc b/lib/sanitizer_common/tests/sanitizer_list_test.cc
index d328fbfdf92c..fbe53c0375c0 100644
--- a/lib/sanitizer_common/tests/sanitizer_list_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_list_test.cc
@@ -21,8 +21,7 @@ struct ListItem {
typedef IntrusiveList<ListItem> List;
-// Check that IntrusiveList can be made thread-local.
-static THREADLOCAL List static_list;
+static List static_list;
static void SetList(List *l, ListItem *x = 0,
ListItem *y = 0, ListItem *z = 0) {
@@ -154,4 +153,21 @@ TEST(SanitizerCommon, IntrusiveList) {
CHECK(l2.empty());
}
+TEST(SanitizerCommon, IntrusiveListAppendEmpty) {
+ ListItem i;
+ List l;
+ l.clear();
+ l.push_back(&i);
+ List l2;
+ l2.clear();
+ l.append_back(&l2);
+ CHECK_EQ(l.back(), &i);
+ CHECK_EQ(l.front(), &i);
+ CHECK_EQ(l.size(), 1);
+ l.append_front(&l2);
+ CHECK_EQ(l.back(), &i);
+ CHECK_EQ(l.front(), &i);
+ CHECK_EQ(l.size(), 1);
+}
+
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_mutex_test.cc b/lib/sanitizer_common/tests/sanitizer_mutex_test.cc
new file mode 100644
index 000000000000..6bb2ae29a188
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_mutex_test.cc
@@ -0,0 +1,128 @@
+//===-- sanitizer_mutex_test.cc -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "gtest/gtest.h"
+
+#include <string.h>
+
+namespace __sanitizer {
+
+template<typename MutexType>
+class TestData {
+ public:
+ explicit TestData(MutexType *mtx)
+ : mtx_(mtx) {
+ for (int i = 0; i < kSize; i++)
+ data_[i] = 0;
+ }
+
+ void Write() {
+ Lock l(mtx_);
+ T v0 = data_[0];
+ for (int i = 0; i < kSize; i++) {
+ CHECK_EQ(data_[i], v0);
+ data_[i]++;
+ }
+ }
+
+ void TryWrite() {
+ if (!mtx_->TryLock())
+ return;
+ T v0 = data_[0];
+ for (int i = 0; i < kSize; i++) {
+ CHECK_EQ(data_[i], v0);
+ data_[i]++;
+ }
+ mtx_->Unlock();
+ }
+
+ void Backoff() {
+ volatile T data[kSize] = {};
+ for (int i = 0; i < kSize; i++) {
+ data[i]++;
+ CHECK_EQ(data[i], 1);
+ }
+ }
+
+ private:
+ typedef GenericScopedLock<MutexType> Lock;
+ static const int kSize = 64;
+ typedef u64 T;
+ MutexType *mtx_;
+ char pad_[kCacheLineSize];
+ T data_[kSize];
+};
+
+const int kThreads = 8;
+const int kWriteRate = 1024;
+#if SANITIZER_DEBUG
+const int kIters = 16*1024;
+#else
+const int kIters = 64*1024;
+#endif
+
+template<typename MutexType>
+static void *lock_thread(void *param) {
+ TestData<MutexType> *data = (TestData<MutexType>*)param;
+ for (int i = 0; i < kIters; i++) {
+ data->Write();
+ data->Backoff();
+ }
+ return 0;
+}
+
+template<typename MutexType>
+static void *try_thread(void *param) {
+ TestData<MutexType> *data = (TestData<MutexType>*)param;
+ for (int i = 0; i < kIters; i++) {
+ data->TryWrite();
+ data->Backoff();
+ }
+ return 0;
+}
+
+TEST(SanitizerCommon, SpinMutex) {
+ SpinMutex mtx;
+ mtx.Init();
+ TestData<SpinMutex> data(&mtx);
+ pthread_t threads[kThreads];
+ for (int i = 0; i < kThreads; i++)
+ pthread_create(&threads[i], 0, lock_thread<SpinMutex>, &data);
+ for (int i = 0; i < kThreads; i++)
+ pthread_join(threads[i], 0);
+}
+
+TEST(SanitizerCommon, SpinMutexTry) {
+ SpinMutex mtx;
+ mtx.Init();
+ TestData<SpinMutex> data(&mtx);
+ pthread_t threads[kThreads];
+ for (int i = 0; i < kThreads; i++)
+ pthread_create(&threads[i], 0, try_thread<SpinMutex>, &data);
+ for (int i = 0; i < kThreads; i++)
+ pthread_join(threads[i], 0);
+}
+
+TEST(SanitizerCommon, BlockingMutex) {
+ u64 mtxmem[1024] = {};
+ BlockingMutex *mtx = new(mtxmem) BlockingMutex(LINKER_INITIALIZED);
+ TestData<BlockingMutex> data(mtx);
+ pthread_t threads[kThreads];
+ for (int i = 0; i < kThreads; i++)
+ pthread_create(&threads[i], 0, lock_thread<BlockingMutex>, &data);
+ for (int i = 0; i < kThreads; i++)
+ pthread_join(threads[i], 0);
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_printf_test.cc b/lib/sanitizer_common/tests/sanitizer_printf_test.cc
new file mode 100644
index 000000000000..b1889cd8794e
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_printf_test.cc
@@ -0,0 +1,125 @@
+//===-- sanitizer_printf_test.cc ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Tests for sanitizer_printf.cc
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "gtest/gtest.h"
+
+#include <string.h>
+#include <limits.h>
+
+namespace __sanitizer {
+
+TEST(Printf, Basic) {
+ char buf[1024];
+ uptr len = internal_snprintf(buf, sizeof(buf),
+ "a%db%zdc%ue%zuf%xh%zxq%pe%sr",
+ (int)-1, (long)-2, // NOLINT
+ (unsigned)-4, (unsigned long)5, // NOLINT
+ (unsigned)10, (unsigned long)11, // NOLINT
+ (void*)0x123, "_string_");
+ EXPECT_EQ(len, strlen(buf));
+ void *ptr;
+ if (sizeof(ptr) == 4) {
+ EXPECT_STREQ("a-1b-2c4294967292e5fahbq"
+ "0x00000123e_string_r", buf);
+ } else {
+ EXPECT_STREQ("a-1b-2c4294967292e5fahbq"
+ "0x000000000123e_string_r", buf);
+ }
+}
+
+TEST(Printf, OverflowStr) {
+ char buf[] = "123456789";
+ uptr len = internal_snprintf(buf, 4, "%s", "abcdef"); // NOLINT
+ EXPECT_EQ(len, (uptr)6);
+ EXPECT_STREQ("abc", buf);
+ EXPECT_EQ(buf[3], 0);
+ EXPECT_EQ(buf[4], '5');
+ EXPECT_EQ(buf[5], '6');
+ EXPECT_EQ(buf[6], '7');
+ EXPECT_EQ(buf[7], '8');
+ EXPECT_EQ(buf[8], '9');
+ EXPECT_EQ(buf[9], 0);
+}
+
+TEST(Printf, OverflowInt) {
+ char buf[] = "123456789";
+ internal_snprintf(buf, 4, "%d", -123456789); // NOLINT
+ EXPECT_STREQ("-12", buf);
+ EXPECT_EQ(buf[3], 0);
+ EXPECT_EQ(buf[4], '5');
+ EXPECT_EQ(buf[5], '6');
+ EXPECT_EQ(buf[6], '7');
+ EXPECT_EQ(buf[7], '8');
+ EXPECT_EQ(buf[8], '9');
+ EXPECT_EQ(buf[9], 0);
+}
+
+TEST(Printf, OverflowUint) {
+ char buf[] = "123456789";
+ uptr val;
+ if (sizeof(val) == 4) {
+ val = (uptr)0x12345678;
+ } else {
+ val = (uptr)0x123456789ULL;
+ }
+ internal_snprintf(buf, 4, "a%zx", val); // NOLINT
+ EXPECT_STREQ("a12", buf);
+ EXPECT_EQ(buf[3], 0);
+ EXPECT_EQ(buf[4], '5');
+ EXPECT_EQ(buf[5], '6');
+ EXPECT_EQ(buf[6], '7');
+ EXPECT_EQ(buf[7], '8');
+ EXPECT_EQ(buf[8], '9');
+ EXPECT_EQ(buf[9], 0);
+}
+
+TEST(Printf, OverflowPtr) {
+ char buf[] = "123456789";
+ void *p;
+ if (sizeof(p) == 4) {
+ p = (void*)0x1234567;
+ } else {
+ p = (void*)0x123456789ULL;
+ }
+ internal_snprintf(buf, 4, "%p", p); // NOLINT
+ EXPECT_STREQ("0x0", buf);
+ EXPECT_EQ(buf[3], 0);
+ EXPECT_EQ(buf[4], '5');
+ EXPECT_EQ(buf[5], '6');
+ EXPECT_EQ(buf[6], '7');
+ EXPECT_EQ(buf[7], '8');
+ EXPECT_EQ(buf[8], '9');
+ EXPECT_EQ(buf[9], 0);
+}
+
+template<typename T>
+static void TestMinMax(const char *fmt, T min, T max) {
+ char buf[1024];
+ uptr len = internal_snprintf(buf, sizeof(buf), fmt, min, max);
+ char buf2[1024];
+ snprintf(buf2, sizeof(buf2), fmt, min, max);
+ EXPECT_EQ(len, strlen(buf));
+ EXPECT_STREQ(buf2, buf);
+}
+
+TEST(Printf, MinMax) {
+ TestMinMax<int>("%d-%d", INT_MIN, INT_MAX); // NOLINT
+ TestMinMax<long>("%zd-%zd", LONG_MIN, LONG_MAX); // NOLINT
+ TestMinMax<unsigned>("%u-%u", 0, UINT_MAX); // NOLINT
+ TestMinMax<unsigned long>("%zu-%zu", 0, ULONG_MAX); // NOLINT
+ TestMinMax<unsigned>("%x-%x", 0, UINT_MAX); // NOLINT
+ TestMinMax<unsigned long>("%zx-%zx", 0, ULONG_MAX); // NOLINT
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc b/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc
new file mode 100644
index 000000000000..00b260479da9
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc
@@ -0,0 +1,85 @@
+//===-- sanitizer_scanf_interceptor_test.cc -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Tests for *scanf interceptors implementation in sanitizer_common.
+//
+//===----------------------------------------------------------------------===//
+#include <vector>
+
+#include "interception/interception.h"
+#include "sanitizer_test_utils.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "gtest/gtest.h"
+
+using namespace __sanitizer;
+
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ ((std::vector<unsigned> *)ctx)->push_back(size)
+
+#include "sanitizer_common/sanitizer_common_interceptors_scanf.inc"
+
+static void testScanf2(void *ctx, const char *format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ scanf_common(ctx, format, ap);
+ va_end(ap);
+}
+
+static void testScanf(const char *format, unsigned n, ...) {
+ std::vector<unsigned> scanf_sizes;
+ // 16 args should be enough.
+ testScanf2((void *)&scanf_sizes, format,
+ (void*)0, (void*)0, (void*)0, (void*)0,
+ (void*)0, (void*)0, (void*)0, (void*)0,
+ (void*)0, (void*)0, (void*)0, (void*)0,
+ (void*)0, (void*)0, (void*)0, (void*)0);
+ ASSERT_EQ(n, scanf_sizes.size()) <<
+ "Unexpected number of format arguments: '" << format << "'";
+ va_list ap;
+ va_start(ap, n);
+ for (unsigned i = 0; i < n; ++i)
+ EXPECT_EQ(va_arg(ap, unsigned), scanf_sizes[i]) <<
+ "Unexpect write size for argument " << i << ", format string '" <<
+ format << "'";
+ va_end(ap);
+}
+
+TEST(SanitizerCommonInterceptors, Scanf) {
+ const unsigned I = sizeof(int); // NOLINT
+ const unsigned L = sizeof(long); // NOLINT
+ const unsigned LL = sizeof(long long); // NOLINT
+ const unsigned S = sizeof(short); // NOLINT
+ const unsigned C = sizeof(char); // NOLINT
+ const unsigned D = sizeof(double); // NOLINT
+ const unsigned F = sizeof(float); // NOLINT
+
+ testScanf("%d", 1, I);
+ testScanf("%d%d%d", 3, I, I, I);
+ testScanf("ab%u%dc", 2, I, I);
+ testScanf("%ld", 1, L);
+ testScanf("%llu", 1, LL);
+ testScanf("a %hd%hhx", 2, S, C);
+
+ testScanf("%%", 0);
+ testScanf("a%%", 0);
+ testScanf("a%%b", 0);
+ testScanf("a%%%%b", 0);
+ testScanf("a%%b%%", 0);
+ testScanf("a%%%%%%b", 0);
+ testScanf("a%%%%%b", 0);
+ testScanf("a%%%%%f", 1, F);
+ testScanf("a%%%lxb", 1, L);
+ testScanf("a%lf%%%lxb", 2, D, L);
+ testScanf("%nf", 1, I);
+
+ testScanf("%10s", 1, 11);
+ testScanf("%%10s", 0);
+ testScanf("%*10s", 0);
+ testScanf("%*d", 0);
+}
diff --git a/lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc b/lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc
new file mode 100644
index 000000000000..5350c2ab8dbc
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc
@@ -0,0 +1,69 @@
+//===-- sanitizer_stackdepot_test.cc --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "gtest/gtest.h"
+
+namespace __sanitizer {
+
+TEST(SanitizerCommon, StackDepotBasic) {
+ uptr s1[] = {1, 2, 3, 4, 5};
+ u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1));
+ uptr sz1 = 0;
+ const uptr *sp1 = StackDepotGet(i1, &sz1);
+ EXPECT_NE(sp1, (uptr*)0);
+ EXPECT_EQ(sz1, ARRAY_SIZE(s1));
+ EXPECT_EQ(internal_memcmp(sp1, s1, sizeof(s1)), 0);
+}
+
+TEST(SanitizerCommon, StackDepotAbsent) {
+ uptr sz1 = 0;
+ const uptr *sp1 = StackDepotGet((1 << 30) - 1, &sz1);
+ EXPECT_EQ(sp1, (uptr*)0);
+}
+
+TEST(SanitizerCommon, StackDepotEmptyStack) {
+ u32 i1 = StackDepotPut(0, 0);
+ uptr sz1 = 0;
+ const uptr *sp1 = StackDepotGet(i1, &sz1);
+ EXPECT_EQ(sp1, (uptr*)0);
+}
+
+TEST(SanitizerCommon, StackDepotZeroId) {
+ uptr sz1 = 0;
+ const uptr *sp1 = StackDepotGet(0, &sz1);
+ EXPECT_EQ(sp1, (uptr*)0);
+}
+
+TEST(SanitizerCommon, StackDepotSame) {
+ uptr s1[] = {1, 2, 3, 4, 6};
+ u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1));
+ u32 i2 = StackDepotPut(s1, ARRAY_SIZE(s1));
+ EXPECT_EQ(i1, i2);
+ uptr sz1 = 0;
+ const uptr *sp1 = StackDepotGet(i1, &sz1);
+ EXPECT_NE(sp1, (uptr*)0);
+ EXPECT_EQ(sz1, ARRAY_SIZE(s1));
+ EXPECT_EQ(internal_memcmp(sp1, s1, sizeof(s1)), 0);
+}
+
+TEST(SanitizerCommon, StackDepotSeveral) {
+ uptr s1[] = {1, 2, 3, 4, 7};
+ u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1));
+ uptr s2[] = {1, 2, 3, 4, 8, 9};
+ u32 i2 = StackDepotPut(s2, ARRAY_SIZE(s2));
+ EXPECT_NE(i1, i2);
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_test_main.cc b/lib/sanitizer_common/tests/sanitizer_test_main.cc
new file mode 100644
index 000000000000..12d1d15af917
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_test_main.cc
@@ -0,0 +1,19 @@
+//===-- sanitizer_test_main.cc --------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "gtest/gtest.h"
+
+int main(int argc, char **argv) {
+ testing::GTEST_FLAG(death_test_style) = "threadsafe";
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/lib/sanitizer_common/tests/sanitizer_test_utils.h b/lib/sanitizer_common/tests/sanitizer_test_utils.h
new file mode 100644
index 000000000000..6129ea8a5370
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_test_utils.h
@@ -0,0 +1,80 @@
+//===-- sanitizer_test_utils.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of *Sanitizer runtime.
+// Common unit tests utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_TEST_UTILS_H
+#define SANITIZER_TEST_UTILS_H
+
+#if defined(_WIN32)
+typedef unsigned __int8 uint8_t;
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+typedef __int8 int8_t;
+typedef __int16 int16_t;
+typedef __int32 int32_t;
+typedef __int64 int64_t;
+# define NOINLINE __declspec(noinline)
+# define USED
+#else // defined(_WIN32)
+# define NOINLINE __attribute__((noinline))
+# define USED __attribute__((used))
+#include <stdint.h>
+#endif // defined(_WIN32)
+
+#if !defined(__has_feature)
+#define __has_feature(x) 0
+#endif
+
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
+ __attribute__((no_address_safety_analysis))
+#else
+# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
+#endif
+
+#if __LP64__ || defined(_WIN64)
+# define SANITIZER_WORDSIZE 64
+#else
+# define SANITIZER_WORDSIZE 32
+#endif
+
+// Make the compiler thinks that something is going on there.
+inline void break_optimization(void *arg) {
+ __asm__ __volatile__("" : : "r" (arg) : "memory");
+}
+
+// This function returns its parameter but in such a way that compiler
+// can not prove it.
+template<class T>
+NOINLINE
+static T Ident(T t) {
+ T ret = t;
+ break_optimization(&ret);
+ return ret;
+}
+
+// Simple stand-alone pseudorandom number generator.
+// Current algorithm is ANSI C linear congruential PRNG.
+static inline uint32_t my_rand_r(uint32_t* state) {
+ return (*state = *state * 1103515245 + 12345) >> 16;
+}
+
+static uint32_t global_seed = 0;
+
+static inline uint32_t my_rand() {
+ return my_rand_r(&global_seed);
+}
+
+
+#endif // SANITIZER_TEST_UTILS_H
diff --git a/lib/sanitizer_common/tests/standalone_malloc_test.cc b/lib/sanitizer_common/tests/standalone_malloc_test.cc
new file mode 100644
index 000000000000..9e6f7c93b04b
--- /dev/null
+++ b/lib/sanitizer_common/tests/standalone_malloc_test.cc
@@ -0,0 +1,87 @@
+#include <stdio.h>
+#include <vector>
+#include <pthread.h>
+#include <malloc.h>
+#include <algorithm>
+
+using namespace std;
+
+const size_t kNumThreds = 16;
+const size_t kNumIters = 1 << 23;
+
+inline void break_optimization(void *arg) {
+ __asm__ __volatile__("" : : "r" (arg) : "memory");
+}
+
+__attribute__((noinline))
+static void *MallocThread(void *t) {
+ size_t total_malloced = 0, total_freed = 0;
+ size_t max_in_use = 0;
+ size_t tid = reinterpret_cast<size_t>(t);
+ vector<pair<char *, size_t> > allocated;
+ allocated.reserve(kNumIters);
+ for (size_t i = 1; i < kNumIters; i++) {
+ if ((i % (kNumIters / 4)) == 0 && tid == 0)
+ fprintf(stderr, " T[%ld] iter %ld\n", tid, i);
+ bool allocate = (i % 5) <= 2; // 60% malloc, 40% free
+ if (i > kNumIters / 4)
+ allocate = i % 2; // then switch to 50% malloc, 50% free
+ if (allocate) {
+ size_t size = 1 + (i % 200);
+ if ((i % 10001) == 0)
+ size *= 4096;
+ total_malloced += size;
+ char *x = new char[size];
+ x[0] = x[size - 1] = x[size / 2] = 0;
+ allocated.push_back(make_pair(x, size));
+ max_in_use = max(max_in_use, total_malloced - total_freed);
+ } else {
+ if (allocated.empty()) continue;
+ size_t slot = i % allocated.size();
+ char *p = allocated[slot].first;
+ p[0] = 0; // emulate last user touch of the block
+ size_t size = allocated[slot].second;
+ total_freed += size;
+ swap(allocated[slot], allocated.back());
+ allocated.pop_back();
+ delete [] p;
+ }
+ }
+ if (tid == 0)
+ fprintf(stderr, " T[%ld] total_malloced: %ldM in use %ldM max %ldM\n",
+ tid, total_malloced >> 20, (total_malloced - total_freed) >> 20,
+ max_in_use >> 20);
+ for (size_t i = 0; i < allocated.size(); i++)
+ delete [] allocated[i].first;
+ return 0;
+}
+
+template <int depth>
+struct DeepStack {
+ __attribute__((noinline))
+ static void *run(void *t) {
+ break_optimization(0);
+ DeepStack<depth - 1>::run(t);
+ break_optimization(0);
+ return 0;
+ }
+};
+
+template<>
+struct DeepStack<0> {
+ static void *run(void *t) {
+ MallocThread(t);
+ return 0;
+ }
+};
+
+// Build with -Dstandalone_malloc_test=main to make it a separate program.
+int standalone_malloc_test() {
+ pthread_t t[kNumThreds];
+ for (size_t i = 0; i < kNumThreds; i++)
+ pthread_create(&t[i], 0, DeepStack<200>::run, reinterpret_cast<void *>(i));
+ for (size_t i = 0; i < kNumThreds; i++)
+ pthread_join(t[i], 0);
+ malloc_stats();
+ return 0;
+}