aboutsummaryrefslogtreecommitdiff
path: root/lib/sanitizer_common
diff options
context:
space:
mode:
authorEd Schouten <ed@FreeBSD.org>2013-05-27 18:27:12 +0000
committerEd Schouten <ed@FreeBSD.org>2013-05-27 18:27:12 +0000
commit11023dc647fd8f41418da90d59db138400d0f334 (patch)
tree50f0ab80515576749ef638dd0766b70a65904bfa /lib/sanitizer_common
parent58aabf08b77d221489f10e274812ec60917c21a8 (diff)
downloadsrc-11023dc647fd8f41418da90d59db138400d0f334.tar.gz
src-11023dc647fd8f41418da90d59db138400d0f334.zip
Import compiler-rt r182741.vendor/compiler-rt/compiler-rt-r182741
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=251034 svn path=/vendor/compiler-rt/compiler-rt-r182741/; revision=251036; tag=vendor/compiler-rt/compiler-rt-r182741
Diffstat (limited to 'lib/sanitizer_common')
-rw-r--r--lib/sanitizer_common/CMakeLists.txt37
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc12
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h471
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_clang.h6
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_msvc.h27
-rw-r--r--lib/sanitizer_common/sanitizer_common.cc99
-rw-r--r--lib/sanitizer_common/sanitizer_common.h187
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors.inc925
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc361
-rw-r--r--lib/sanitizer_common/sanitizer_common_libcdep.cc23
-rw-r--r--lib/sanitizer_common/sanitizer_common_syscalls.inc148
-rw-r--r--lib/sanitizer_common/sanitizer_flags.cc31
-rw-r--r--lib/sanitizer_common/sanitizer_flags.h23
-rw-r--r--lib/sanitizer_common/sanitizer_internal_defs.h110
-rw-r--r--lib/sanitizer_common/sanitizer_lfstack.h2
-rw-r--r--lib/sanitizer_common/sanitizer_libc.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_libc.h40
-rw-r--r--lib/sanitizer_common/sanitizer_linux.cc516
-rw-r--r--lib/sanitizer_common/sanitizer_linux.h65
-rw-r--r--lib/sanitizer_common/sanitizer_linux_libcdep.cc273
-rw-r--r--lib/sanitizer_common/sanitizer_mac.cc117
-rw-r--r--lib/sanitizer_common/sanitizer_mutex.h2
-rw-r--r--lib/sanitizer_common/sanitizer_placement_new.h2
-rw-r--r--lib/sanitizer_common/sanitizer_platform.h46
-rw-r--r--lib/sanitizer_common/sanitizer_platform_interceptors.h49
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_posix.cc155
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_posix.h115
-rw-r--r--lib/sanitizer_common/sanitizer_posix.cc158
-rw-r--r--lib/sanitizer_common/sanitizer_posix_libcdep.cc116
-rw-r--r--lib/sanitizer_common/sanitizer_printf.cc107
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps.h39
-rw-r--r--lib/sanitizer_common/sanitizer_quarantine.h4
-rw-r--r--lib/sanitizer_common/sanitizer_report_decorator.h26
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.h2
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace.cc18
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace.h15
-rw-r--r--lib/sanitizer_common/sanitizer_stoptheworld.h68
-rw-r--r--lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc403
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer.h15
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_itanium.cc6
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc (renamed from lib/sanitizer_common/sanitizer_symbolizer.cc)120
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_linux_libcdep.cc (renamed from lib/sanitizer_common/sanitizer_symbolizer_linux.cc)81
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_mac.cc17
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_win.cc11
-rw-r--r--lib/sanitizer_common/sanitizer_syscall_generic.inc24
-rw-r--r--lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc87
-rw-r--r--lib/sanitizer_common/sanitizer_thread_registry.cc279
-rw-r--r--lib/sanitizer_common/sanitizer_thread_registry.h145
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc176
-rwxr-xr-xlib/sanitizer_common/scripts/check_lint.sh33
-rw-r--r--lib/sanitizer_common/tests/CMakeLists.txt49
-rw-r--r--lib/sanitizer_common/tests/lit.cfg5
-rw-r--r--lib/sanitizer_common/tests/lit.site.cfg.in9
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc300
-rw-r--r--lib/sanitizer_common/tests/sanitizer_atomic_test.cc55
-rw-r--r--lib/sanitizer_common/tests/sanitizer_common_test.cc66
-rw-r--r--lib/sanitizer_common/tests/sanitizer_flags_test.cc20
-rw-r--r--lib/sanitizer_common/tests/sanitizer_libc_test.cc75
-rw-r--r--lib/sanitizer_common/tests/sanitizer_linux_test.cc253
-rw-r--r--lib/sanitizer_common/tests/sanitizer_mutex_test.cc7
-rw-r--r--lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc137
-rw-r--r--lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc96
-rw-r--r--lib/sanitizer_common/tests/sanitizer_stoptheworld_test.cc194
-rw-r--r--lib/sanitizer_common/tests/sanitizer_stoptheworld_testlib.cc53
-rw-r--r--lib/sanitizer_common/tests/sanitizer_test_utils.h14
-rw-r--r--lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc230
66 files changed, 6434 insertions, 923 deletions
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
index ee0e1237c1a9..2683a37a32ca 100644
--- a/lib/sanitizer_common/CMakeLists.txt
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -8,18 +8,27 @@ set(SANITIZER_SOURCES
sanitizer_libc.cc
sanitizer_linux.cc
sanitizer_mac.cc
+ sanitizer_platform_limits_posix.cc
sanitizer_posix.cc
sanitizer_printf.cc
sanitizer_stackdepot.cc
sanitizer_stacktrace.cc
- sanitizer_symbolizer.cc
sanitizer_symbolizer_itanium.cc
- sanitizer_symbolizer_linux.cc
sanitizer_symbolizer_mac.cc
sanitizer_symbolizer_win.cc
+ sanitizer_thread_registry.cc
sanitizer_win.cc
)
+set(SANITIZER_LIBCDEP_SOURCES
+ sanitizer_common_libcdep.cc
+ sanitizer_linux_libcdep.cc
+ sanitizer_posix_libcdep.cc
+ sanitizer_stoptheworld_linux_libcdep.cc
+ sanitizer_symbolizer_libcdep.cc
+ sanitizer_symbolizer_linux_libcdep.cc
+ )
+
# Explicitly list all sanitizer_common headers. Not all of these are
# included in sanitizer_common source files, but we need to depend on
# headers when building our custom unit tests.
@@ -31,10 +40,12 @@ set(SANITIZER_HEADERS
sanitizer_common.h
sanitizer_common_interceptors.inc
sanitizer_common_interceptors_scanf.inc
+ sanitizer_common_syscalls.inc
sanitizer_flags.h
sanitizer_internal_defs.h
sanitizer_lfstack.h
sanitizer_libc.h
+ sanitizer_linux.h
sanitizer_list.h
sanitizer_mutex.h
sanitizer_placement_new.h
@@ -45,20 +56,24 @@ set(SANITIZER_HEADERS
sanitizer_stackdepot.h
sanitizer_stacktrace.h
sanitizer_symbolizer.h
+ sanitizer_thread_registry.h
)
-set(SANITIZER_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+set(SANITIZER_CFLAGS
+ ${SANITIZER_COMMON_CFLAGS}
+ -fno-rtti)
set(SANITIZER_RUNTIME_LIBRARIES)
if(APPLE)
# Build universal binary on APPLE.
- add_library(RTSanitizerCommon.osx OBJECT ${SANITIZER_SOURCES})
- set_target_compile_flags(RTSanitizerCommon.osx ${SANITIZER_CFLAGS})
- set_target_properties(RTSanitizerCommon.osx PROPERTIES
- OSX_ARCHITECTURES "${SANITIZER_COMMON_SUPPORTED_ARCH}")
+ add_compiler_rt_osx_object_library(RTSanitizerCommon
+ ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH}
+ SOURCES ${SANITIZER_SOURCES} ${SANITIZER_LIBCDEP_SOURCES}
+ CFLAGS ${SANITIZER_CFLAGS})
list(APPEND SANITIZER_RUNTIME_LIBRARIES RTSanitizerCommon.osx)
elseif(ANDROID)
- add_library(RTSanitizerCommon.arm.android OBJECT ${SANITIZER_SOURCES})
+ add_library(RTSanitizerCommon.arm.android OBJECT
+ ${SANITIZER_SOURCES} ${SANITIZER_LIBCDEP_SOURCES})
set_target_compile_flags(RTSanitizerCommon.arm.android
${SANITIZER_CFLAGS})
list(APPEND SANITIZER_RUNTIME_LIBRARIES RTSanitizerCommon.arm.android)
@@ -67,6 +82,12 @@ else()
foreach(arch ${SANITIZER_COMMON_SUPPORTED_ARCH})
add_compiler_rt_object_library(RTSanitizerCommon ${arch}
SOURCES ${SANITIZER_SOURCES} CFLAGS ${SANITIZER_CFLAGS})
+ add_compiler_rt_object_library(RTSanitizerCommonLibc ${arch}
+ SOURCES ${SANITIZER_LIBCDEP_SOURCES} CFLAGS ${SANITIZER_CFLAGS})
+ add_compiler_rt_static_runtime(clang_rt.san-${arch} ${arch}
+ SOURCES $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
+ CFLAGS ${SANITIZER_CFLAGS})
list(APPEND SANITIZER_RUNTIME_LIBRARIES RTSanitizerCommon.${arch})
endforeach()
endif()
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index b13a7c6c14c0..a97a70937a43 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -15,16 +15,16 @@
// FIXME: We should probably use more low-level allocator that would
// mmap some pages and split them into chunks to fulfill requests.
-#if defined(__linux__) && !defined(__ANDROID__)
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(__sanitizer::uptr size);
extern "C" void __libc_free(void *ptr);
# define LIBC_MALLOC __libc_malloc
# define LIBC_FREE __libc_free
-#else // __linux__ && !ANDROID
+#else // SANITIZER_LINUX && !SANITIZER_ANDROID
# include <stdlib.h>
# define LIBC_MALLOC malloc
# define LIBC_FREE free
-#endif // __linux__ && !ANDROID
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
namespace __sanitizer {
@@ -75,4 +75,10 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
low_level_alloc_callback = callback;
}
+bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
+ if (!size) return false;
+ uptr max = (uptr)-1L;
+ return (max / size) < n;
+}
+
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index ad89c3c870dc..0542addb7f37 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -25,16 +25,16 @@ namespace __sanitizer {
// SizeClassMap maps allocation sizes into size classes and back.
// Class 0 corresponds to size 0.
-// Classes 1 - 16 correspond to sizes 8 - 128 (size = class_id * 8).
-// Next 8 classes: 128 + i * 16 (i = 1 to 8).
-// Next 8 classes: 256 + i * 32 (i = 1 to 8).
+// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
+// Next 4 classes: 256 + i * 64 (i = 1 to 4).
+// Next 4 classes: 512 + i * 128 (i = 1 to 4).
// ...
-// Next 8 classes: 2^k + i * 2^(k-3) (i = 1 to 8).
+// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
//
// This structure of the size class map gives us:
// - Efficient table-free class-to-size and size-to-class functions.
-// - Difference between two consequent size classes is betweed 12% and 6%
+// - Difference between two consequent size classes is betweed 14% and 25%
//
// This class also gives a hint to a thread-caching allocator about the amount
// of chunks that need to be cached per-thread:
@@ -42,50 +42,70 @@ namespace __sanitizer {
// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
//
// Part of output of SizeClassMap::Print():
-// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
-// c01 => s: 8 diff: +8 00% l 3 cached: 256 2048; id 1
-// c02 => s: 16 diff: +8 100% l 4 cached: 256 4096; id 2
-// ...
-// c07 => s: 56 diff: +8 16% l 5 cached: 256 14336; id 7
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
+// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
+// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
+// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
+// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
+// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
+// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
//
-// c08 => s: 64 diff: +8 14% l 6 cached: 256 16384; id 8
-// ...
-// c15 => s: 120 diff: +8 07% l 6 cached: 256 30720; id 15
+// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
+// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
+// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
+// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
+// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
+// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
+// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
+// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
//
-// c16 => s: 128 diff: +8 06% l 7 cached: 256 32768; id 16
-// c17 => s: 144 diff: +16 12% l 7 cached: 227 32688; id 17
-// ...
-// c23 => s: 240 diff: +16 07% l 7 cached: 136 32640; id 23
+// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
+// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
+// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
+// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
//
-// c24 => s: 256 diff: +16 06% l 8 cached: 128 32768; id 24
-// c25 => s: 288 diff: +32 12% l 8 cached: 113 32544; id 25
-// ...
-// c31 => s: 480 diff: +32 07% l 8 cached: 68 32640; id 31
+// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
+// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
+// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
+// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
//
-// c32 => s: 512 diff: +32 06% l 9 cached: 64 32768; id 32
-
+// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
+// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
+// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
+// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
+//
+// ...
+//
+// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
+// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
+// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
+// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
+//
+// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
-template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog,
- uptr kMinBatchClassT>
+template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
class SizeClassMap {
- static const uptr kMinSizeLog = 3;
+ static const uptr kMinSizeLog = 4;
static const uptr kMidSizeLog = kMinSizeLog + 4;
static const uptr kMinSize = 1 << kMinSizeLog;
static const uptr kMidSize = 1 << kMidSizeLog;
static const uptr kMidClass = kMidSize / kMinSize;
- static const uptr S = 3;
+ static const uptr S = 2;
static const uptr M = (1 << S) - 1;
public:
static const uptr kMaxNumCached = kMaxNumCachedT;
+ // We transfer chunks between central and thread-local free lists in batches.
+ // For small size classes we allocate batches separately.
+ // For large size classes we use one of the chunks to store the batch.
struct TransferBatch {
TransferBatch *next;
uptr count;
void *batch[kMaxNumCached];
};
- static const uptr kMinBatchClass = kMinBatchClassT;
- static const uptr kMaxSize = 1 << kMaxSizeLog;
+ static const uptr kMaxSize = 1UL << kMaxSizeLog;
static const uptr kNumClasses =
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
@@ -106,7 +126,7 @@ class SizeClassMap {
if (size <= kMidSize)
return (size + kMinSize - 1) >> kMinSizeLog;
if (size > kMaxSize) return 0;
- uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
+ uptr l = MostSignificantSetBitIndex(size);
uptr hbits = (size >> (l - S)) & M;
uptr lbits = size & ((1 << (l - S)) - 1);
uptr l1 = l - kMidSizeLog;
@@ -116,7 +136,7 @@ class SizeClassMap {
static uptr MaxCached(uptr class_id) {
if (class_id == 0) return 0;
uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
- return Max(1UL, Min(kMaxNumCached, n));
+ return Max<uptr>(1, Min(kMaxNumCached, n));
}
static void Print() {
@@ -128,7 +148,7 @@ class SizeClassMap {
Printf("\n");
uptr d = s - prev_s;
uptr p = prev_s ? (d * 100 / prev_s) : 0;
- uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(s);
+ uptr l = s ? MostSignificantSetBitIndex(s) : 0;
uptr cached = MaxCached(i) * s;
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
"cached: %zd %zd; id %zd\n",
@@ -139,10 +159,16 @@ class SizeClassMap {
Printf("Total cached: %zd\n", total_cached);
}
+ static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
+ return Size(class_id) < sizeof(TransferBatch) -
+ sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
+ }
+
static void Validate() {
for (uptr c = 1; c < kNumClasses; c++) {
// Printf("Validate: c%zd\n", c);
uptr s = Size(c);
+ CHECK_NE(s, 0U);
CHECK_EQ(ClassID(s), c);
if (c != kNumClasses - 1)
CHECK_EQ(ClassID(s + 1), c + 1);
@@ -160,26 +186,93 @@ class SizeClassMap {
if (c > 0)
CHECK_LT(Size(c-1), s);
}
-
- // TransferBatch for kMinBatchClass must fit into the block itself.
- const uptr batch_size = sizeof(TransferBatch)
- - sizeof(void*) // NOLINT
- * (kMaxNumCached - MaxCached(kMinBatchClass));
- CHECK_LE(batch_size, Size(kMinBatchClass));
- // TransferBatch for kMinBatchClass-1 must not fit into the block itself.
- const uptr batch_size1 = sizeof(TransferBatch)
- - sizeof(void*) // NOLINT
- * (kMaxNumCached - MaxCached(kMinBatchClass - 1));
- CHECK_GT(batch_size1, Size(kMinBatchClass - 1));
}
};
-typedef SizeClassMap<17, 256, 16, FIRST_32_SECOND_64(33, 36)>
- DefaultSizeClassMap;
-typedef SizeClassMap<17, 64, 14, FIRST_32_SECOND_64(25, 28)>
- CompactSizeClassMap;
+typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
+typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
+// Memory allocator statistics
+enum AllocatorStat {
+ AllocatorStatMalloced,
+ AllocatorStatFreed,
+ AllocatorStatMmapped,
+ AllocatorStatUnmapped,
+ AllocatorStatCount
+};
+
+typedef u64 AllocatorStatCounters[AllocatorStatCount];
+
+// Per-thread stats, live in per-thread cache.
+class AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+
+ void Add(AllocatorStat i, u64 v) {
+ v += atomic_load(&stats_[i], memory_order_relaxed);
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Set(AllocatorStat i, u64 v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ u64 Get(AllocatorStat i) const {
+ return atomic_load(&stats_[i], memory_order_relaxed);
+ }
+
+ private:
+ friend class AllocatorGlobalStats;
+ AllocatorStats *next_;
+ AllocatorStats *prev_;
+ atomic_uint64_t stats_[AllocatorStatCount];
+};
+
+// Global stats, used for aggregation and querying.
+class AllocatorGlobalStats : public AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ next_ = this;
+ prev_ = this;
+ }
+
+ void Register(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->next_ = next_;
+ s->prev_ = this;
+ next_->prev_ = s;
+ next_ = s;
+ }
+
+ void Unregister(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->prev_->next_ = s->next_;
+ s->next_->prev_ = s->prev_;
+ for (int i = 0; i < AllocatorStatCount; i++)
+ Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
+ }
+
+ void Get(AllocatorStatCounters s) const {
+ internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
+ SpinMutexLock l(&mu_);
+ const AllocatorStats *stats = this;
+ for (;;) {
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] += stats->Get(AllocatorStat(i));
+ stats = stats->next_;
+ if (stats == this)
+ break;
+ }
+ }
+
+ private:
+ mutable SpinMutex mu_;
+};
+
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
@@ -233,18 +326,20 @@ class SizeClassAllocator64 {
alignment <= SizeClassMap::kMaxSize;
}
- Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
+ NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id);
Batch *b = region->free_list.Pop();
if (b == 0)
- b = PopulateFreeList(c, class_id, region);
+ b = PopulateFreeList(stat, c, class_id, region);
region->n_allocated += b->count;
return b;
}
- void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
RegionInfo *region = GetRegionInfo(class_id);
+ CHECK_GT(b->count, 0);
region->free_list.Push(b);
region->n_freed += b->count;
}
@@ -260,10 +355,12 @@ class SizeClassAllocator64 {
void *GetBlockBegin(void *p) {
uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id);
+ if (!size) return 0;
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
uptr beg = chunk_idx * size;
uptr next_beg = beg + size;
+ if (class_id >= kNumClasses) return 0;
RegionInfo *region = GetRegionInfo(class_id);
if (region->mapped_user >= next_beg)
return reinterpret_cast<void*>(reg_beg + beg);
@@ -322,6 +419,38 @@ class SizeClassAllocator64 {
}
}
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetRegionInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = (int)kNumClasses - 1; i >= 0; i--) {
+ GetRegionInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over existing chunks. May include chunks that are not currently
+ // allocated to the user (e.g. freed).
+ // The caller is expected to call ForceLock() before calling this function.
+ template<typename Callable>
+ void ForEachChunk(const Callable &callback) {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr chunk_size = SizeClassMap::Size(class_id);
+ uptr region_beg = kSpaceBeg + class_id * kRegionSize;
+ for (uptr p = region_beg;
+ p < region_beg + region->allocated_user;
+ p += chunk_size) {
+ // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
+ callback((void *)p);
+ }
+ }
+ }
+
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
@@ -336,7 +465,7 @@ class SizeClassAllocator64 {
// or with one element if its size is greater.
static const uptr kPopulateSize = 1 << 14;
// Call mmap for user memory with at least this size.
- static const uptr kUserMapSize = 1 << 15;
+ static const uptr kUserMapSize = 1 << 16;
// Call mmap for metadata memory with at least this size.
static const uptr kMetaMapSize = 1 << 16;
@@ -363,15 +492,16 @@ class SizeClassAllocator64 {
}
static uptr GetChunkIdx(uptr chunk, uptr size) {
- u32 offset = chunk % kRegionSize;
+ uptr offset = chunk % kRegionSize;
// Here we divide by a non-constant. This is costly.
- // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
- // We save 2x by using 32-bit div, but may need to use a 256-way switch.
- return offset / (u32)size;
+ // size always fits into 32-bits. If the offset fits too, use 32-bit div.
+ if (offset >> (SANITIZER_WORDSIZE / 2))
+ return offset / size;
+ return (u32)offset / (u32)size;
}
- Batch *NOINLINE PopulateFreeList(AllocatorCache *c, uptr class_id,
- RegionInfo *region) {
+ NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id, RegionInfo *region) {
BlockingMutexLock l(&region->mutex);
Batch *b = region->free_list.Pop();
if (b)
@@ -388,6 +518,7 @@ class SizeClassAllocator64 {
map_size += kUserMapSize;
CHECK_GE(region->mapped_user + map_size, end_idx);
MapWithCallback(region_beg + region->mapped_user, map_size);
+ stat->Add(AllocatorStatMmapped, map_size);
region->mapped_user += map_size;
}
uptr total_count = (region->mapped_user - beg_idx - size)
@@ -404,14 +535,14 @@ class SizeClassAllocator64 {
region->mapped_meta += map_size;
}
CHECK_LE(region->allocated_meta, region->mapped_meta);
- if (region->allocated_user + region->allocated_meta > kRegionSize) {
- Printf("Out of memory. Dying.\n");
+ if (region->mapped_user + region->mapped_meta > kRegionSize) {
+ Printf("%s: Out of memory. Dying. ", SanitizerToolName);
Printf("The process has exhausted %zuMB for size class %zu.\n",
kRegionSize / 1024 / 1024, size);
Die();
}
for (;;) {
- if (class_id < SizeClassMap::kMinBatchClass)
+ if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
else
b = (Batch*)(region_beg + beg_idx);
@@ -423,12 +554,37 @@ class SizeClassAllocator64 {
beg_idx += count * size;
if (beg_idx + count * size + size > region->mapped_user)
break;
+ CHECK_GT(b->count, 0);
region->free_list.Push(b);
}
return b;
}
};
+// Maps integers in rage [0, kSize) to u8 values.
+template<u64 kSize>
+class FlatByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize);
+ CHECK_EQ(0U, map_[idx]);
+ map_[idx] = val;
+ }
+ u8 operator[] (uptr idx) {
+ CHECK_LT(idx, kSize);
+ // FIXME: CHECK may be too expensive here.
+ return map_[idx];
+ }
+ private:
+ u8 map_[kSize];
+};
+
+// FIXME: Also implement TwoLevelByteMap.
+
// SizeClassAllocator32 -- allocator for 32-bit address space.
// This allocator can theoretically be used on 64-bit arch, but there it is less
// efficient than SizeClassAllocator64.
@@ -440,7 +596,7 @@ class SizeClassAllocator64 {
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
-// an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
+// a ByteMap possible_regions to store the size classes of each Region.
// 0 size class means the region is not used by the allocator.
//
// One Region is used to allocate chunks of a single size class.
@@ -451,16 +607,19 @@ class SizeClassAllocator64 {
// chache-line aligned.
template <const uptr kSpaceBeg, const u64 kSpaceSize,
const uptr kMetadataSize, class SizeClassMap,
+ const uptr kRegionSizeLog,
+ class ByteMap,
class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator32 {
public:
typedef typename SizeClassMap::TransferBatch Batch;
typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, MapUnmapCallback> ThisT;
+ SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
void Init() {
- state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
+ possible_regions.TestOnlyInit();
+ internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
void *MapWithCallback(uptr size) {
@@ -469,6 +628,7 @@ class SizeClassAllocator32 {
MapUnmapCallback().OnMap((uptr)res, size);
return res;
}
+
void UnmapWithCallback(uptr beg, uptr size) {
MapUnmapCallback().OnUnmap(beg, size);
UnmapOrDie(reinterpret_cast<void *>(beg), size);
@@ -490,22 +650,24 @@ class SizeClassAllocator32 {
return reinterpret_cast<void*>(meta);
}
- Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
+ NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
if (sci->free_list.empty())
- PopulateFreeList(c, sci, class_id);
+ PopulateFreeList(stat, c, sci, class_id);
CHECK(!sci->free_list.empty());
Batch *b = sci->free_list.front();
sci->free_list.pop_front();
return b;
}
- void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
+ CHECK_GT(b->count, 0);
sci->free_list.push_front(b);
}
@@ -514,7 +676,7 @@ class SizeClassAllocator32 {
}
uptr GetSizeClass(void *p) {
- return state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
}
void *GetBlockBegin(void *p) {
@@ -539,16 +701,48 @@ class SizeClassAllocator32 {
// No need to lock here.
uptr res = 0;
for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (state_->possible_regions[i])
+ if (possible_regions[i])
res += kRegionSize;
return res;
}
void TestOnlyUnmap() {
for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (state_->possible_regions[i])
+ if (possible_regions[i])
UnmapWithCallback((i * kRegionSize), kRegionSize);
- UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetSizeClassInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = kNumClasses - 1; i >= 0; i--) {
+ GetSizeClassInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over existing chunks. May include chunks that are not currently
+ // allocated to the user (e.g. freed).
+ // The caller is expected to call ForceLock() before calling this function.
+ template<typename Callable>
+ void ForEachChunk(const Callable &callback) {
+ for (uptr region = 0; region < kNumPossibleRegions; region++)
+ if (possible_regions[region]) {
+ uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
+ uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
+ uptr region_beg = region * kRegionSize;
+ for (uptr p = region_beg;
+ p < region_beg + max_chunks_in_region * chunk_size;
+ p += chunk_size) {
+ // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
+ callback((void *)p);
+ }
+ }
}
void PrintStats() {
@@ -558,7 +752,6 @@ class SizeClassAllocator32 {
static const uptr kNumClasses = SizeClassMap::kNumClasses;
private:
- static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
static const uptr kRegionSize = 1 << kRegionSizeLog;
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
@@ -579,31 +772,32 @@ class SizeClassAllocator32 {
return mem & ~(kRegionSize - 1);
}
- uptr AllocateRegion(uptr class_id) {
+ uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
"SizeClassAllocator32"));
MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMmapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
- CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
- state_->possible_regions[ComputeRegionId(res)] = class_id;
+ possible_regions.set(ComputeRegionId(res), class_id);
return res;
}
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses);
- return &state_->size_class_info_array[class_id];
+ return &size_class_info_array[class_id];
}
- void PopulateFreeList(AllocatorCache *c, SizeClassInfo *sci, uptr class_id) {
+ void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ SizeClassInfo *sci, uptr class_id) {
uptr size = SizeClassMap::Size(class_id);
- uptr reg = AllocateRegion(class_id);
+ uptr reg = AllocateRegion(stat, class_id);
uptr n_chunks = kRegionSize / (size + kMetadataSize);
uptr max_count = SizeClassMap::MaxCached(class_id);
Batch *b = 0;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (b == 0) {
- if (class_id < SizeClassMap::kMinBatchClass)
+ if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
else
b = (Batch*)i;
@@ -611,19 +805,19 @@ class SizeClassAllocator32 {
}
b->batch[b->count++] = (void*)i;
if (b->count == max_count) {
+ CHECK_GT(b->count, 0);
sci->free_list.push_back(b);
b = 0;
}
}
- if (b)
+ if (b) {
+ CHECK_GT(b->count, 0);
sci->free_list.push_back(b);
+ }
}
- struct State {
- u8 possible_regions[kNumPossibleRegions];
- SizeClassInfo size_class_info_array[kNumClasses];
- };
- State *state_;
+ ByteMap possible_regions;
+ SizeClassInfo size_class_info_array[kNumClasses];
};
// Objects of this type should be used as local caches for SizeClassAllocator64
@@ -634,14 +828,22 @@ struct SizeClassAllocatorLocalCache {
typedef SizeClassAllocator Allocator;
static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
- // Don't need to call Init if the object is a global (i.e. zero-initialized).
- void Init() {
- internal_memset(this, 0, sizeof(*this));
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
}
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
@@ -653,7 +855,12 @@ struct SizeClassAllocatorLocalCache {
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ InitCache();
+ stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
+ CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id);
c->batch[c->count++] = p;
@@ -676,9 +883,10 @@ struct SizeClassAllocatorLocalCache {
void *batch[2 * SizeClassMap::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
void InitCache() {
- if (per_class_[0].max_count)
+ if (per_class_[1].max_count)
return;
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
@@ -686,22 +894,23 @@ struct SizeClassAllocatorLocalCache {
}
}
- void NOINLINE Refill(SizeClassAllocator *allocator, uptr class_id) {
+ NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
- Batch *b = allocator->AllocateBatch(this, class_id);
+ Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ CHECK_GT(b->count, 0);
for (uptr i = 0; i < b->count; i++)
c->batch[i] = b->batch[i];
c->count = b->count;
- if (class_id < SizeClassMap::kMinBatchClass)
+ if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
}
- void NOINLINE Drain(SizeClassAllocator *allocator, uptr class_id) {
+ NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
Batch *b;
- if (class_id < SizeClassMap::kMinBatchClass)
+ if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
else
b = (Batch*)c->batch[0];
@@ -712,7 +921,8 @@ struct SizeClassAllocatorLocalCache {
}
b->count = cnt;
c->count -= cnt;
- allocator->DeallocateBatch(class_id, b);
+ CHECK_GT(b->count, 0);
+ allocator->DeallocateBatch(&stats_, class_id, b);
}
};
@@ -727,7 +937,7 @@ class LargeMmapAllocator {
page_size_ = GetPageSizeCached();
}
- void *Allocate(uptr size, uptr alignment) {
+ void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
CHECK(IsPowerOfTwo(alignment));
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
@@ -746,7 +956,7 @@ class LargeMmapAllocator {
h->size = size;
h->map_beg = map_beg;
h->map_size = map_size;
- uptr size_log = SANITIZER_WORDSIZE - __builtin_clzl(map_size) - 1;
+ uptr size_log = MostSignificantSetBitIndex(map_size);
CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
{
SpinMutexLock l(&mutex_);
@@ -758,11 +968,13 @@ class LargeMmapAllocator {
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
stats.by_size_log[size_log]++;
+ stat->Add(AllocatorStatMalloced, map_size);
+ stat->Add(AllocatorStatMmapped, map_size);
}
return reinterpret_cast<void*>(res);
}
- void Deallocate(void *p) {
+ void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
SpinMutexLock l(&mutex_);
@@ -774,6 +986,8 @@ class LargeMmapAllocator {
n_chunks_--;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
+ stat->Add(AllocatorStatFreed, h->map_size);
+ stat->Add(AllocatorStatUnmapped, h->map_size);
}
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
@@ -822,7 +1036,7 @@ class LargeMmapAllocator {
CHECK_GE(nearest_chunk, h->map_beg);
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
CHECK_LE(nearest_chunk, p);
- if (h->map_beg + h->map_size < p)
+ if (h->map_beg + h->map_size <= p)
return 0;
return GetUser(h);
}
@@ -840,6 +1054,25 @@ class LargeMmapAllocator {
Printf("\n");
}
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ mutex_.Lock();
+ }
+
+ void ForceUnlock() {
+ mutex_.Unlock();
+ }
+
+ // Iterate over existing chunks. May include chunks that are not currently
+ // allocated to the user (e.g. freed).
+ // The caller is expected to call ForceLock() before calling this function.
+ template<typename Callable>
+ void ForEachChunk(const Callable &callback) {
+ for (uptr i = 0; i < n_chunks_; i++)
+ callback(GetUser(chunks_[i]));
+ }
+
private:
static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
struct Header {
@@ -886,6 +1119,7 @@ class CombinedAllocator {
void Init() {
primary_.Init();
secondary_.Init();
+ stats_.Init();
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
@@ -901,7 +1135,7 @@ class CombinedAllocator {
if (primary_.CanAllocate(size, alignment))
res = cache->Allocate(&primary_, primary_.ClassID(size));
else
- res = secondary_.Allocate(size, alignment);
+ res = secondary_.Allocate(&stats_, size, alignment);
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
if (cleared && res)
@@ -914,7 +1148,7 @@ class CombinedAllocator {
if (primary_.PointerIsMine(p))
cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
else
- secondary_.Deallocate(p);
+ secondary_.Deallocate(&stats_, p);
}
void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
@@ -969,20 +1203,57 @@ class CombinedAllocator {
void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+ void InitCache(AllocatorCache *cache) {
+ cache->Init(&stats_);
+ }
+
+ void DestroyCache(AllocatorCache *cache) {
+ cache->Destroy(&primary_, &stats_);
+ }
+
void SwallowCache(AllocatorCache *cache) {
cache->Drain(&primary_);
}
+ void GetStats(AllocatorStatCounters s) const {
+ stats_.Get(s);
+ }
+
void PrintStats() {
primary_.PrintStats();
secondary_.PrintStats();
}
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ primary_.ForceLock();
+ secondary_.ForceLock();
+ }
+
+ void ForceUnlock() {
+ secondary_.ForceUnlock();
+ primary_.ForceUnlock();
+ }
+
+ // Iterate over existing chunks. May include chunks that are not currently
+ // allocated to the user (e.g. freed).
+ // The caller is expected to call ForceLock() before calling this function.
+ template<typename Callable>
+ void ForEachChunk(const Callable &callback) {
+ primary_.ForEachChunk(callback);
+ secondary_.ForEachChunk(callback);
+ }
+
private:
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
+ AllocatorGlobalStats stats_;
};
+// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
+bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
+
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
diff --git a/lib/sanitizer_common/sanitizer_atomic_clang.h b/lib/sanitizer_common/sanitizer_atomic_clang.h
index 7f73df3bd455..30158b49683c 100644
--- a/lib/sanitizer_common/sanitizer_atomic_clang.h
+++ b/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -113,9 +113,9 @@ INLINE bool atomic_compare_exchange_strong(volatile T *a,
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
- typename T::Type *cmp,
- typename T::Type xchg,
- memory_order mo) {
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
}
diff --git a/lib/sanitizer_common/sanitizer_atomic_msvc.h b/lib/sanitizer_common/sanitizer_atomic_msvc.h
index 58a6a20ec9c5..dc22ef05e589 100644
--- a/lib/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/lib/sanitizer_common/sanitizer_atomic_msvc.h
@@ -134,6 +134,27 @@ INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
return v;
}
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
+ u8 *cmp,
+ u8 xchgv,
+ memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ u8 cmpv = *cmp;
+ u8 prev;
+ __asm {
+ mov al, cmpv
+ mov ecx, a
+ mov dl, xchgv
+ lock cmpxchg [ecx], dl
+ mov prev, al
+ }
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr *cmp,
uptr xchg,
@@ -149,9 +170,9 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
- typename T::Type *cmp,
- typename T::Type xchg,
- memory_order mo) {
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
}
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index 4a8d9a749bf8..abbe5f92d1a9 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -16,6 +16,9 @@
namespace __sanitizer {
+const char *SanitizerToolName = "SanitizerTool";
+uptr SanitizerVerbosity = 0;
+
uptr GetPageSizeCached() {
static uptr PageSize;
if (!PageSize)
@@ -28,11 +31,11 @@ static bool log_to_file = false; // Set to true by __sanitizer_set_report_path
// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
// isn't equal to the current PID, try to obtain file descriptor by opening
// file "report_path_prefix.<PID>".
-static fd_t report_fd = kStderrFd;
+fd_t report_fd = kStderrFd;
static char report_path_prefix[4096]; // Set via __sanitizer_set_report_path.
// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
// child thread will be different from |report_fd_pid|.
-static int report_fd_pid = 0;
+static uptr report_fd_pid = 0;
static void (*DieCallback)(void);
void SetDieCallback(void (*callback)(void)) {
@@ -43,7 +46,7 @@ void NORETURN Die() {
if (DieCallback) {
DieCallback();
}
- Exit(1);
+ internal__exit(1);
}
static CheckFailedCallbackType CheckFailedCallback;
@@ -61,29 +64,24 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
Die();
}
-static void MaybeOpenReportFile() {
- if (!log_to_file || (report_fd_pid == GetPid())) return;
- char report_path_full[4096];
- internal_snprintf(report_path_full, sizeof(report_path_full),
- "%s.%d", report_path_prefix, GetPid());
- fd_t fd = internal_open(report_path_full, true);
- if (fd == kInvalidFd) {
+void MaybeOpenReportFile() {
+ if (!log_to_file || (report_fd_pid == internal_getpid())) return;
+ InternalScopedBuffer<char> report_path_full(4096);
+ internal_snprintf(report_path_full.data(), report_path_full.size(),
+ "%s.%d", report_path_prefix, internal_getpid());
+ uptr openrv = OpenFile(report_path_full.data(), true);
+ if (internal_iserror(openrv)) {
report_fd = kStderrFd;
log_to_file = false;
- Report("ERROR: Can't open file: %s\n", report_path_full);
+ Report("ERROR: Can't open file: %s\n", report_path_full.data());
Die();
}
if (report_fd != kInvalidFd) {
// We're in the child. Close the parent's log.
internal_close(report_fd);
}
- report_fd = fd;
- report_fd_pid = GetPid();
-}
-
-bool PrintsToTty() {
- MaybeOpenReportFile();
- return internal_isatty(report_fd);
+ report_fd = openrv;
+ report_fd_pid = internal_getpid();
}
void RawWrite(const char *buffer) {
@@ -105,8 +103,9 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
*buff_size = 0;
// The files we usually open are not seekable, so try different buffer sizes.
for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
- fd_t fd = internal_open(file_name, /*write*/ false);
- if (fd == kInvalidFd) return 0;
+ uptr openrv = OpenFile(file_name, /*write*/ false);
+ if (internal_iserror(openrv)) return 0;
+ fd_t fd = openrv;
UnmapOrDie(*buff, *buff_size);
*buff = (char*)MmapOrDie(size, __FUNCTION__);
*buff_size = size;
@@ -128,45 +127,15 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
return read_len;
}
-// We don't want to use std::sort to avoid including <algorithm>, as
-// we may end up with two implementation of std::sort - one in instrumented
-// code, and the other in runtime.
-// qsort() from stdlib won't work as it calls malloc(), which results
-// in deadlock in ASan allocator.
-// We re-implement in-place sorting w/o recursion as straightforward heapsort.
+typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
+
+template<class T>
+static inline bool CompareLess(const T &a, const T &b) {
+ return a < b;
+}
+
void SortArray(uptr *array, uptr size) {
- if (size < 2)
- return;
- // Stage 1: insert elements to the heap.
- for (uptr i = 1; i < size; i++) {
- uptr j, p;
- for (j = i; j > 0; j = p) {
- p = (j - 1) / 2;
- if (array[j] > array[p])
- Swap(array[j], array[p]);
- else
- break;
- }
- }
- // Stage 2: swap largest element with the last one,
- // and sink the new top.
- for (uptr i = size - 1; i > 0; i--) {
- Swap(array[0], array[i]);
- uptr j, max_ind;
- for (j = 0; j < i; j = max_ind) {
- uptr left = 2 * j + 1;
- uptr right = 2 * j + 2;
- max_ind = j;
- if (left < i && array[left] > array[max_ind])
- max_ind = left;
- if (right < i && array[right] > array[max_ind])
- max_ind = right;
- if (max_ind != j)
- Swap(array[j], array[max_ind]);
- else
- break;
- }
- }
+ InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess);
}
// We want to map a chunk of address space aligned to 'alignment'.
@@ -190,6 +159,16 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
return (void*)res;
}
+void ReportErrorSummary(const char *error_type, const char *file,
+ int line, const char *function) {
+ const int kMaxSize = 1024; // We don't want a summary too long.
+ InternalScopedBuffer<char> buff(kMaxSize);
+ internal_snprintf(buff.data(), kMaxSize, "%s: %s %s:%d %s",
+ SanitizerToolName, error_type,
+ file ? file : "??", line, function ? function : "??");
+ __sanitizer_report_error_summary(buff.data());
+}
+
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT
@@ -222,4 +201,8 @@ void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
(void)reserved;
PrepareForSandboxing();
}
+
+void __sanitizer_report_error_summary(const char *error_summary) {
+ Printf("SUMMARY: %s\n", error_summary);
+}
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index 1d002398c785..d800360169fb 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -17,8 +17,11 @@
#define SANITIZER_COMMON_H
#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
namespace __sanitizer {
+struct StackTrace;
// Constants.
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
@@ -30,15 +33,19 @@ const uptr kCacheLineSize = 128;
const uptr kCacheLineSize = 64;
#endif
+extern const char *SanitizerToolName; // Can be changed by the tool.
+extern uptr SanitizerVerbosity;
+
uptr GetPageSize();
uptr GetPageSizeCached();
uptr GetMmapGranularity();
// Threads
-int GetPid();
uptr GetTid();
uptr GetThreadSelf();
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom);
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size);
// Memory management
void *MmapOrDie(uptr size, const char *mem_type);
@@ -104,7 +111,12 @@ bool PrintsToTty();
void Printf(const char *format, ...);
void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
+// Can be used to prevent mixing error reports from different sanitizers.
+extern StaticSpinMutex CommonSanitizerReportMutex;
+void MaybeOpenReportFile();
+extern fd_t report_fd;
+uptr OpenFile(const char *filename, bool write);
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
// The size of the mmaped region is stored in '*buff_size',
@@ -121,21 +133,26 @@ void DisableCoreDumper();
void DumpProcessMap();
bool FileExists(const char *filename);
const char *GetEnv(const char *name);
+bool SetEnv(const char *name, const char *value);
const char *GetPwd();
+u32 GetUid();
void ReExec();
bool StackSizeIsUnlimited();
void SetStackSizeLimitInBytes(uptr limit);
void PrepareForSandboxing();
+void InitTlsSize();
+uptr GetTlsSize();
+
// Other
void SleepForSeconds(int seconds);
void SleepForMillis(int millis);
+u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
// Exit
void NORETURN Abort();
-void NORETURN Exit(int exitcode);
void NORETURN Die();
void NORETURN SANITIZER_INTERFACE_ATTRIBUTE
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
@@ -154,20 +171,79 @@ typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
+// Construct a one-line string like
+// SanitizerToolName: error_type file:line function
+// and call __sanitizer_report_error_summary on it.
+void ReportErrorSummary(const char *error_type, const char *file,
+ int line, const char *function);
+
// Math
+#if SANITIZER_WINDOWS && !defined(__clang__)
+extern "C" {
+unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
+unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
+#if defined(_WIN64)
+unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
+unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
+#endif
+}
+#endif
+
+INLINE uptr MostSignificantSetBitIndex(uptr x) {
+ CHECK_NE(x, 0U);
+ unsigned long up; // NOLINT
+#if !SANITIZER_WINDOWS || defined(__clang__)
+ up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
+#elif defined(_WIN64)
+ _BitScanReverse64(&up, x);
+#else
+ _BitScanReverse(&up, x);
+#endif
+ return up;
+}
+
INLINE bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
+
+INLINE uptr RoundUpToPowerOfTwo(uptr size) {
+ CHECK(size);
+ if (IsPowerOfTwo(size)) return size;
+
+ uptr up = MostSignificantSetBitIndex(size);
+ CHECK(size < (1ULL << (up + 1)));
+ CHECK(size > (1ULL << up));
+ return 1UL << (up + 1);
+}
+
INLINE uptr RoundUpTo(uptr size, uptr boundary) {
CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
+
INLINE uptr RoundDownTo(uptr x, uptr boundary) {
return x & ~(boundary - 1);
}
+
INLINE bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
+
+INLINE uptr Log2(uptr x) {
+ CHECK(IsPowerOfTwo(x));
+#if !SANITIZER_WINDOWS || defined(__clang__)
+ return __builtin_ctzl(x);
+#elif defined(_WIN64)
+ unsigned long ret; // NOLINT
+ _BitScanForward64(&ret, x);
+ return ret;
+#else
+ unsigned long ret; // NOLINT
+ _BitScanForward(&ret, x);
+ return ret;
+#endif
+}
+
// Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++.
template<class T> T Min(T a, T b) { return a < b ? a : b; }
@@ -196,6 +272,113 @@ INLINE int ToLower(int c) {
# define FIRST_32_SECOND_64(a, b) (a)
#endif
+// A low-level vector based on mmap. May incur a significant memory overhead for
+// small vectors.
+// WARNING: The current implementation supports only POD types.
+template<typename T>
+class InternalVector {
+ public:
+ explicit InternalVector(uptr initial_capacity) {
+ CHECK_GT(initial_capacity, 0);
+ capacity_ = initial_capacity;
+ size_ = 0;
+ data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalVector");
+ }
+ ~InternalVector() {
+ UnmapOrDie(data_, capacity_ * sizeof(T));
+ }
+ T &operator[](uptr i) {
+ CHECK_LT(i, size_);
+ return data_[i];
+ }
+ const T &operator[](uptr i) const {
+ CHECK_LT(i, size_);
+ return data_[i];
+ }
+ void push_back(const T &element) {
+ CHECK_LE(size_, capacity_);
+ if (size_ == capacity_) {
+ uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
+ Resize(new_capacity);
+ }
+ data_[size_++] = element;
+ }
+ T &back() {
+ CHECK_GT(size_, 0);
+ return data_[size_ - 1];
+ }
+ void pop_back() {
+ CHECK_GT(size_, 0);
+ size_--;
+ }
+ uptr size() const {
+ return size_;
+ }
+ const T *data() const {
+ return data_;
+ }
+ uptr capacity() const {
+ return capacity_;
+ }
+
+ private:
+ void Resize(uptr new_capacity) {
+ CHECK_GT(new_capacity, 0);
+ CHECK_LE(size_, new_capacity);
+ T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
+ "InternalVector");
+ internal_memcpy(new_data, data_, size_ * sizeof(T));
+ T *old_data = data_;
+ data_ = new_data;
+ UnmapOrDie(old_data, capacity_ * sizeof(T));
+ capacity_ = new_capacity;
+ }
+ // Disallow evil constructors.
+ InternalVector(const InternalVector&);
+ void operator=(const InternalVector&);
+
+ T *data_;
+ uptr capacity_;
+ uptr size_;
+};
+
+// HeapSort for arrays and InternalVector.
+template<class Container, class Compare>
+void InternalSort(Container *v, uptr size, Compare comp) {
+ if (size < 2)
+ return;
+ // Stage 1: insert elements to the heap.
+ for (uptr i = 1; i < size; i++) {
+ uptr j, p;
+ for (j = i; j > 0; j = p) {
+ p = (j - 1) / 2;
+ if (comp((*v)[p], (*v)[j]))
+ Swap((*v)[j], (*v)[p]);
+ else
+ break;
+ }
+ }
+ // Stage 2: swap largest element with the last one,
+ // and sink the new top.
+ for (uptr i = size - 1; i > 0; i--) {
+ Swap((*v)[0], (*v)[i]);
+ uptr j, max_ind;
+ for (j = 0; j < i; j = max_ind) {
+ uptr left = 2 * j + 1;
+ uptr right = 2 * j + 2;
+ max_ind = j;
+ if (left < i && comp((*v)[max_ind], (*v)[left]))
+ max_ind = left;
+ if (right < i && comp((*v)[max_ind], (*v)[right]))
+ max_ind = right;
+ if (max_ind != j)
+ Swap((*v)[j], (*v)[max_ind]);
+ else
+ break;
+ }
+ }
+}
+
} // namespace __sanitizer
#endif // SANITIZER_COMMON_H
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc
index 8bc2e8b5c292..8c0fb55f3ce9 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -24,9 +24,97 @@
#include <stdarg.h>
+#if SANITIZER_WINDOWS
+#define va_copy(dst, src) ((dst) = (src))
+#endif // _WIN32
+
+#if SANITIZER_INTERCEPT_STRCASECMP
+static inline int CharCaseCmp(unsigned char c1, unsigned char c2) {
+ int c1_low = ToLower(c1);
+ int c2_low = ToLower(c2);
+ return c1_low - c2_low;
+}
+
+INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcasecmp, s1, s2);
+ unsigned char c1 = 0, c2 = 0;
+ uptr i;
+ for (i = 0; ; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (CharCaseCmp(c1, c2) != 0 || c1 == '\0')
+ break;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, i + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, i + 1);
+ return CharCaseCmp(c1, c2);
+}
+
+INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, n);
+ unsigned char c1 = 0, c2 = 0;
+ uptr i;
+ for (i = 0; i < n; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (CharCaseCmp(c1, c2) != 0 || c1 == '\0')
+ break;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, n));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, n));
+ return CharCaseCmp(c1, c2);
+}
+
+#define INIT_STRCASECMP INTERCEPT_FUNCTION(strcasecmp)
+#define INIT_STRNCASECMP INTERCEPT_FUNCTION(strncasecmp)
+#else
+#define INIT_STRCASECMP
+#define INIT_STRNCASECMP
+#endif
+
+#if SANITIZER_INTERCEPT_FREXP
+INTERCEPTOR(double, frexp, double x, int *exp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, frexp, x, exp);
+ double res = REAL(frexp)(x, exp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ return res;
+}
+
+#define INIT_FREXP INTERCEPT_FUNCTION(frexp);
+#else
+#define INIT_FREXP
+#endif // SANITIZER_INTERCEPT_FREXP
+
+#if SANITIZER_INTERCEPT_FREXPF_FREXPL
+INTERCEPTOR(float, frexpf, float x, int *exp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
+ float res = REAL(frexpf)(x, exp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ return res;
+}
+
+INTERCEPTOR(long double, frexpl, long double x, int *exp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
+ long double res = REAL(frexpl)(x, exp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ return res;
+}
+
+#define INIT_FREXPF_FREXPL \
+ INTERCEPT_FUNCTION(frexpf); \
+ INTERCEPT_FUNCTION(frexpl)
+#else
+#define INIT_FREXPF_FREXPL
+#endif // SANITIZER_INTERCEPT_FREXPF_FREXPL
+
#if SANITIZER_INTERCEPT_READ
INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
- void* ctx;
+ void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0)
@@ -35,14 +123,14 @@ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
-# define INIT_READ INTERCEPT_FUNCTION(read)
+#define INIT_READ INTERCEPT_FUNCTION(read)
#else
-# define INIT_READ
+#define INIT_READ
#endif
#if SANITIZER_INTERCEPT_PREAD
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
- void* ctx;
+ void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0)
@@ -51,14 +139,14 @@ INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
-# define INIT_PREAD INTERCEPT_FUNCTION(pread)
+#define INIT_PREAD INTERCEPT_FUNCTION(pread)
#else
-# define INIT_PREAD
+#define INIT_PREAD
#endif
#if SANITIZER_INTERCEPT_PREAD64
INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
- void* ctx;
+ void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0)
@@ -67,14 +155,14 @@ INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
-# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
+#define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
#else
-# define INIT_PREAD64
+#define INIT_PREAD64
#endif
#if SANITIZER_INTERCEPT_WRITE
INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
- void* ctx;
+ void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);
if (fd >= 0)
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
@@ -83,142 +171,821 @@ INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
-# define INIT_WRITE INTERCEPT_FUNCTION(write)
+#define INIT_WRITE INTERCEPT_FUNCTION(write)
#else
-# define INIT_WRITE
+#define INIT_WRITE
#endif
#if SANITIZER_INTERCEPT_PWRITE
-INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count) {
- void* ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count);
+INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count, offset);
if (fd >= 0)
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
- SSIZE_T res = REAL(pwrite)(fd, ptr, count);
+ SSIZE_T res = REAL(pwrite)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
-# define INIT_PWRITE INTERCEPT_FUNCTION(pwrite)
+#define INIT_PWRITE INTERCEPT_FUNCTION(pwrite)
#else
-# define INIT_PWRITE
+#define INIT_PWRITE
#endif
#if SANITIZER_INTERCEPT_PWRITE64
-INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count) {
- void* ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count);
+INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count,
+ OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count, offset);
if (fd >= 0)
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
- SSIZE_T res = REAL(pwrite64)(fd, ptr, count);
+ SSIZE_T res = REAL(pwrite64)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
-# define INIT_PWRITE64 INTERCEPT_FUNCTION(pwrite64)
+#define INIT_PWRITE64 INTERCEPT_FUNCTION(pwrite64)
#else
-# define INIT_PWRITE64
+#define INIT_PWRITE64
#endif
#if SANITIZER_INTERCEPT_PRCTL
INTERCEPTOR(int, prctl, int option,
- unsigned long arg2, unsigned long arg3, // NOLINT
- unsigned long arg4, unsigned long arg5) { // NOLINT
- void* ctx;
+ unsigned long arg2, unsigned long arg3, // NOLINT
+ unsigned long arg4, unsigned long arg5) { // NOLINT
+ void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
static const int PR_SET_NAME = 15;
int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
if (option == PR_SET_NAME) {
char buff[16];
- internal_strncpy(buff, (char*)arg2, 15);
+ internal_strncpy(buff, (char *)arg2, 15);
buff[15] = 0;
COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
}
return res;
}
-# define INIT_PRCTL INTERCEPT_FUNCTION(prctl)
+#define INIT_PRCTL INTERCEPT_FUNCTION(prctl)
#else
-# define INIT_PRCTL
-#endif // SANITIZER_INTERCEPT_PRCTL
+#define INIT_PRCTL
+#endif // SANITIZER_INTERCEPT_PRCTL
+#if SANITIZER_INTERCEPT_TIME
+INTERCEPTOR(unsigned long, time, unsigned long *t) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, time, t);
+ unsigned long res = REAL(time)(t);
+ if (t && res != (unsigned long)-1) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, t, sizeof(*t));
+ }
+ return res;
+}
+#define INIT_TIME \
+ INTERCEPT_FUNCTION(time);
+#else
+#define INIT_TIME
+#endif // SANITIZER_INTERCEPT_TIME
+
+
+#if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+INTERCEPTOR(void *, localtime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, localtime, timep);
+ void *res = REAL(localtime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz);
+ }
+ return res;
+}
+INTERCEPTOR(void *, localtime_r, unsigned long *timep, void *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, localtime_r, timep, result);
+ void *res = REAL(localtime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz);
+ }
+ return res;
+}
+INTERCEPTOR(void *, gmtime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gmtime, timep);
+ void *res = REAL(gmtime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz);
+ }
+ return res;
+}
+INTERCEPTOR(void *, gmtime_r, unsigned long *timep, void *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gmtime_r, timep, result);
+ void *res = REAL(gmtime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz);
+ }
+ return res;
+}
+INTERCEPTOR(char *, ctime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep);
+ char *res = REAL(ctime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result);
+ char *res = REAL(ctime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, asctime, void *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm);
+ char *res = REAL(asctime)(tm);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, struct_tm_sz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, asctime_r, void *tm, char *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result);
+ char *res = REAL(asctime_r)(tm, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, struct_tm_sz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_LOCALTIME_AND_FRIENDS \
+ INTERCEPT_FUNCTION(localtime); \
+ INTERCEPT_FUNCTION(localtime_r); \
+ INTERCEPT_FUNCTION(gmtime); \
+ INTERCEPT_FUNCTION(gmtime_r); \
+ INTERCEPT_FUNCTION(ctime); \
+ INTERCEPT_FUNCTION(ctime_r); \
+ INTERCEPT_FUNCTION(asctime); \
+ INTERCEPT_FUNCTION(asctime_r);
+#else
+#define INIT_LOCALTIME_AND_FRIENDS
+#endif // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+
#if SANITIZER_INTERCEPT_SCANF
#include "sanitizer_common_interceptors_scanf.inc"
-INTERCEPTOR(int, vscanf, const char *format, va_list ap) { // NOLINT
- void* ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, vscanf, format, ap);
- scanf_common(ctx, format, ap);
- int res = REAL(vscanf)(format, ap); // NOLINT
+#define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \
+ va_list aq; \
+ va_copy(aq, ap); \
+ int res = REAL(vname)(__VA_ARGS__); \
+ if (res > 0) \
+ scanf_common(ctx, res, allowGnuMalloc, format, aq); \
+ va_end(aq); \
+ return res; \
+ }
+
+INTERCEPTOR(int, vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vscanf, true, format, ap)
+
+INTERCEPTOR(int, vsscanf, const char *str, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vsscanf, true, str, format, ap)
+
+INTERCEPTOR(int, vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vfscanf, true, stream, format, ap)
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+INTERCEPTOR(int, __isoc99_vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vscanf, false, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsscanf, const char *str, const char *format,
+ va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)
+
+INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
+#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
+
+#define SCANF_INTERCEPTOR_IMPL(name, vname, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, name, __VA_ARGS__); \
+ va_list ap; \
+ va_start(ap, format); \
+ int res = vname(__VA_ARGS__, ap); \
+ va_end(ap); \
+ return res; \
+ }
+
+INTERCEPTOR(int, scanf, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(scanf, vscanf, format)
+
+INTERCEPTOR(int, fscanf, void *stream, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
+
+INTERCEPTOR(int, sscanf, const char *str, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+INTERCEPTOR(int, __isoc99_scanf, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
+
+INTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
+
+INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+#endif
+
+#define INIT_SCANF \
+ INTERCEPT_FUNCTION(scanf); \
+ INTERCEPT_FUNCTION(sscanf); \
+ INTERCEPT_FUNCTION(fscanf); \
+ INTERCEPT_FUNCTION(vscanf); \
+ INTERCEPT_FUNCTION(vsscanf); \
+ INTERCEPT_FUNCTION(vfscanf); \
+ INTERCEPT_FUNCTION(__isoc99_scanf); \
+ INTERCEPT_FUNCTION(__isoc99_sscanf); \
+ INTERCEPT_FUNCTION(__isoc99_fscanf); \
+ INTERCEPT_FUNCTION(__isoc99_vscanf); \
+ INTERCEPT_FUNCTION(__isoc99_vsscanf); \
+ INTERCEPT_FUNCTION(__isoc99_vfscanf);
+
+#else
+#define INIT_SCANF
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+INTERCEPTOR(void *, getpwnam, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ void *res = REAL(getpwnam)(name);
+ if (res != 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz);
+ return res;
+}
+INTERCEPTOR(void *, getpwuid, u32 uid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid);
+ void *res = REAL(getpwuid)(uid);
+ if (res != 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz);
+ return res;
+}
+INTERCEPTOR(void *, getgrnam, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ void *res = REAL(getgrnam)(name);
+ if (res != 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz);
return res;
}
+INTERCEPTOR(void *, getgrgid, u32 gid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid);
+ void *res = REAL(getgrgid)(gid);
+ if (res != 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz);
+ return res;
+}
+#define INIT_GETPWNAM_AND_FRIENDS \
+ INTERCEPT_FUNCTION(getpwnam); \
+ INTERCEPT_FUNCTION(getpwuid); \
+ INTERCEPT_FUNCTION(getgrnam); \
+ INTERCEPT_FUNCTION(getgrgid);
+#else
+#define INIT_GETPWNAM_AND_FRIENDS
+#endif
+
-INTERCEPTOR(int, vsscanf, const char *str, const char *format, // NOLINT
- va_list ap) {
- void* ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, vsscanf, str, format, ap);
- scanf_common(ctx, format, ap);
- int res = REAL(vsscanf)(str, format, ap); // NOLINT
- // FIXME: read of str
+#if SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+INTERCEPTOR(int, getpwnam_r, const char *name, void *pwd,
+ char *buf, SIZE_T buflen, void **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
return res;
}
+INTERCEPTOR(int, getpwuid_r, u32 uid, void *pwd,
+ char *buf, SIZE_T buflen, void **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result);
+ int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ return res;
+}
+INTERCEPTOR(int, getgrnam_r, const char *name, void *grp,
+ char *buf, SIZE_T buflen, void **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ int res = REAL(getgrnam_r)(name, grp, buf, buflen, result);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ return res;
+}
+INTERCEPTOR(int, getgrgid_r, u32 gid, void *grp,
+ char *buf, SIZE_T buflen, void **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result);
+ int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ return res;
+}
+#define INIT_GETPWNAM_R_AND_FRIENDS \
+ INTERCEPT_FUNCTION(getpwnam_r); \
+ INTERCEPT_FUNCTION(getpwuid_r); \
+ INTERCEPT_FUNCTION(getgrnam_r); \
+ INTERCEPT_FUNCTION(getgrgid_r);
+#else
+#define INIT_GETPWNAM_R_AND_FRIENDS
+#endif
+
-INTERCEPTOR(int, vfscanf, void *stream, const char *format, // NOLINT
- va_list ap) {
- void* ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, vfscanf, stream, format, ap);
- scanf_common(ctx, format, ap);
- int res = REAL(vfscanf)(stream, format, ap); // NOLINT
+#if SANITIZER_INTERCEPT_CLOCK_GETTIME
+INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp);
+ int res = REAL(clock_getres)(clk_id, tp);
+ if (!res && tp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp);
+ int res = REAL(clock_gettime)(clk_id, tp);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
+ }
return res;
}
+INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tp, struct_timespec_sz);
+ return REAL(clock_settime)(clk_id, tp);
+}
+#define INIT_CLOCK_GETTIME \
+ INTERCEPT_FUNCTION(clock_getres); \
+ INTERCEPT_FUNCTION(clock_gettime); \
+ INTERCEPT_FUNCTION(clock_settime);
+#else
+#define INIT_CLOCK_GETTIME
+#endif
-INTERCEPTOR(int, scanf, const char *format, ...) { // NOLINT
- void* ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, scanf, format);
- va_list ap;
- va_start(ap, format);
- int res = vscanf(format, ap); // NOLINT
- va_end(ap);
+
+#if SANITIZER_INTERCEPT_GETITIMER
+INTERCEPTOR(int, getitimer, int which, void *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value);
+ int res = REAL(getitimer)(which, curr_value);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerval_sz);
+ int res = REAL(setitimer)(which, new_value, old_value);
+ if (!res && old_value) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz);
+ }
return res;
}
+#define INIT_GETITIMER \
+ INTERCEPT_FUNCTION(getitimer); \
+ INTERCEPT_FUNCTION(setitimer);
+#else
+#define INIT_GETITIMER
+#endif
+
+
+#if SANITIZER_INTERCEPT_GLOB
+struct sanitizer_glob_t {
+ SIZE_T gl_pathc;
+ char **gl_pathv;
+};
+
+static void unpoison_glob_t(void *ctx, sanitizer_glob_t *pglob) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pglob, sizeof(*pglob));
+ // +1 for NULL pointer at the end.
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, pglob->gl_pathv, (pglob->gl_pathc + 1) * sizeof(*pglob->gl_pathv));
+ for (SIZE_T i = 0; i < pglob->gl_pathc; ++i) {
+ char *p = pglob->gl_pathv[i];
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, REAL(strlen)(p) + 1);
+ }
+}
-INTERCEPTOR(int, fscanf, void* stream, const char *format, ...) { // NOLINT
- void* ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, fscanf, stream, format);
- va_list ap;
- va_start(ap, format);
- int res = vfscanf(stream, format, ap); // NOLINT
- va_end(ap);
+INTERCEPTOR(int, glob, const char *pattern, int flags,
+ int (*errfunc)(const char *epath, int eerrno),
+ sanitizer_glob_t *pglob) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob);
+ int res = REAL(glob)(pattern, flags, errfunc, pglob);
+ if (res == 0)
+ unpoison_glob_t(ctx, pglob);
return res;
}
-INTERCEPTOR(int, sscanf, const char *str, const char *format, ...) { // NOLINT
- void* ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, sscanf, str, format); // NOLINT
- va_list ap;
- va_start(ap, format);
- int res = vsscanf(str, format, ap); // NOLINT
- va_end(ap);
+INTERCEPTOR(int, glob64, const char *pattern, int flags,
+ int (*errfunc)(const char *epath, int eerrno),
+ sanitizer_glob_t *pglob) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, glob64, pattern, flags, errfunc, pglob);
+ int res = REAL(glob64)(pattern, flags, errfunc, pglob);
+ if (res == 0)
+ unpoison_glob_t(ctx, pglob);
+ return res;
+}
+#define INIT_GLOB \
+ INTERCEPT_FUNCTION(glob); \
+ INTERCEPT_FUNCTION(glob64);
+#else // SANITIZER_INTERCEPT_GLOB
+#define INIT_GLOB
+#endif // SANITIZER_INTERCEPT_GLOB
+
+
+#if SANITIZER_INTERCEPT_WAIT
+// According to sys/wait.h, wait(), waitid(), waitpid() may have symbol version
+// suffixes on Darwin. See the declaration of INTERCEPTOR_WITH_SUFFIX for
+// details.
+INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wait, status);
+ int res = REAL(wait)(status);
+ if (res != -1 && status)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ return res;
+}
+INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
+ int options) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options);
+ int res = REAL(waitid)(idtype, id, infop, options);
+ if (res != -1 && infop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz);
return res;
}
+INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options);
+ int res = REAL(waitpid)(pid, status, options);
+ if (res != -1 && status)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ return res;
+}
+INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage);
+ int res = REAL(wait3)(status, options, rusage);
+ if (res != -1) {
+ if (status)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage);
+ int res = REAL(wait4)(pid, status, options, rusage);
+ if (res != -1) {
+ if (status)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#define INIT_WAIT \
+ INTERCEPT_FUNCTION(wait); \
+ INTERCEPT_FUNCTION(waitid); \
+ INTERCEPT_FUNCTION(waitpid); \
+ INTERCEPT_FUNCTION(wait3); \
+ INTERCEPT_FUNCTION(wait4);
+#else
+#define INIT_WAIT
+#endif
-#define INIT_SCANF \
- INTERCEPT_FUNCTION(scanf); \
- INTERCEPT_FUNCTION(sscanf); /* NOLINT */ \
- INTERCEPT_FUNCTION(fscanf); \
- INTERCEPT_FUNCTION(vscanf); \
- INTERCEPT_FUNCTION(vsscanf); \
- INTERCEPT_FUNCTION(vfscanf)
+#if SANITIZER_INTERCEPT_INET
+INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, inet_ntop, af, src, dst, size);
+ uptr sz = __sanitizer_in_addr_sz(af);
+ if (sz) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sz);
+ // FIXME: figure out read size based on the address family.
+ char *res = REAL(inet_ntop)(af, src, dst, size);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst);
+ // FIXME: figure out read size based on the address family.
+ int res = REAL(inet_pton)(af, src, dst);
+ if (res == 1) {
+ uptr sz = __sanitizer_in_addr_sz(af);
+ if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz);
+ }
+ return res;
+}
+#define INIT_INET \
+ INTERCEPT_FUNCTION(inet_ntop); \
+ INTERCEPT_FUNCTION(inet_pton);
+#else
+#define INIT_INET
+#endif
+#if SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM
+INTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param);
+ int res = REAL(pthread_getschedparam)(thread, policy, param);
+ if (res == 0) {
+ if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy));
+ if (param) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, sizeof(*param));
+ }
+ return res;
+}
+#define INIT_PTHREAD_GETSCHEDPARAM INTERCEPT_FUNCTION(pthread_getschedparam);
#else
-#define INIT_SCANF
+#define INIT_PTHREAD_GETSCHEDPARAM
+#endif
+
+#if SANITIZER_INTERCEPT_GETADDRINFO
+INTERCEPTOR(int, getaddrinfo, char *node, char *service,
+ struct __sanitizer_addrinfo *hints,
+ struct __sanitizer_addrinfo **out) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getaddrinfo, node, service, hints, out);
+ if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, REAL(strlen)(node) + 1);
+ if (service)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1);
+ if (hints)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
+ int res = REAL(getaddrinfo)(node, service, hints, out);
+ if (res == 0) {
+ struct __sanitizer_addrinfo *p = *out;
+ while (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_addrinfo));
+ if (p->ai_addr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_addr, struct_sockaddr_sz);
+ if (p->ai_canonname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_canonname,
+ REAL(strlen)(p->ai_canonname) + 1);
+ p = p->ai_next;
+ }
+ }
+ return res;
+}
+#define INIT_GETADDRINFO INTERCEPT_FUNCTION(getaddrinfo);
+#else
+#define INIT_GETADDRINFO
+#endif
+
+#if SANITIZER_INTERCEPT_GETSOCKNAME
+INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ int addrlen_in = *addrlen;
+ int res = REAL(getsockname)(sock_fd, addr, addrlen);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen));
+ }
+ return res;
+}
+#define INIT_GETSOCKNAME INTERCEPT_FUNCTION(getsockname);
+#else
+#define INIT_GETSOCKNAME
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME || SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+static void write_hostent(void *ctx, struct __sanitizer_hostent *h) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h, sizeof(__sanitizer_hostent));
+ if (h->h_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, REAL(strlen)(h->h_name) + 1);
+ char **p = h->h_aliases;
+ while (*p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ ++p;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, h->h_aliases, (p - h->h_aliases + 1) * sizeof(*h->h_aliases));
+ p = h->h_addr_list;
+ while (*p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, h->h_length);
+ ++p;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, h->h_addr_list, (p - h->h_addr_list + 1) * sizeof(*h->h_addr_list));
+}
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME
+INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname, char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname, name);
+ struct __sanitizer_hostent *res = REAL(gethostbyname)(name);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+
+INTERCEPTOR(struct __sanitizer_hostent *, gethostbyaddr, void *addr, int len,
+ int type) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr, addr, len, type);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
+ struct __sanitizer_hostent *res = REAL(gethostbyaddr)(addr, len, type);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+
+INTERCEPTOR(struct __sanitizer_hostent *, gethostent) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostent);
+ struct __sanitizer_hostent *res = REAL(gethostent)();
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+
+INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname2, char *name, int af) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2, name, af);
+ struct __sanitizer_hostent *res = REAL(gethostbyname2)(name, af);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+#define INIT_GETHOSTBYNAME \
+ INTERCEPT_FUNCTION(gethostent); \
+ INTERCEPT_FUNCTION(gethostbyaddr); \
+ INTERCEPT_FUNCTION(gethostbyname); \
+ INTERCEPT_FUNCTION(gethostbyname2);
+#else
+#define INIT_GETHOSTBYNAME
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf,
+ SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result,
+ h_errnop);
+ int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop);
+ if (res == 0) {
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (*result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ }
+ return res;
+}
+
+INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
+ struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
+ __sanitizer_hostent **result, int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr_r, addr, len, type, ret, buf,
+ buflen, result, h_errnop);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
+ int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result,
+ h_errnop);
+ if (res == 0) {
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (*result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ }
+ return res;
+}
+
+INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
+ char *buf, SIZE_T buflen, __sanitizer_hostent **result,
+ int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
+ h_errnop);
+ int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
+ if (res == 0) {
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (*result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ }
+ return res;
+}
+
+INTERCEPTOR(int, gethostbyname2_r, char *name, int af,
+ struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
+ __sanitizer_hostent **result, int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2_r, name, af, ret, buf, buflen,
+ result, h_errnop);
+ int res =
+ REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop);
+ if (res == 0) {
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (*result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ }
+ return res;
+}
+#define INIT_GETHOSTBYNAME_R \
+ INTERCEPT_FUNCTION(gethostent_r); \
+ INTERCEPT_FUNCTION(gethostbyaddr_r); \
+ INTERCEPT_FUNCTION(gethostbyname_r); \
+ INTERCEPT_FUNCTION(gethostbyname2_r);
+#else
+#define INIT_GETHOSTBYNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETSOCKOPT
+INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval,
+ int *optlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getsockopt, sockfd, level, optname, optval,
+ optlen);
+ if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen));
+ int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen);
+ if (res == 0)
+ if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen);
+ return res;
+}
+#define INIT_GETSOCKOPT INTERCEPT_FUNCTION(getsockopt);
+#else
+#define INIT_GETSOCKOPT
#endif
#define SANITIZER_COMMON_INTERCEPTORS_INIT \
+ INIT_STRCASECMP; \
+ INIT_STRNCASECMP; \
INIT_READ; \
INIT_PREAD; \
INIT_PREAD64; \
INIT_PRCTL; \
INIT_WRITE; \
- INIT_SCANF;
+ INIT_PWRITE; \
+ INIT_PWRITE64; \
+ INIT_LOCALTIME_AND_FRIENDS; \
+ INIT_SCANF; \
+ INIT_FREXP; \
+ INIT_FREXPF_FREXPL; \
+ INIT_GETPWNAM_AND_FRIENDS; \
+ INIT_GETPWNAM_R_AND_FRIENDS; \
+ INIT_CLOCK_GETTIME; \
+ INIT_GETITIMER; \
+ INIT_TIME; \
+ INIT_GLOB; \
+ INIT_WAIT; \
+ INIT_INET; \
+ INIT_PTHREAD_GETSCHEDPARAM; \
+ INIT_GETADDRINFO; \
+ INIT_GETSOCKNAME; \
+ INIT_GETHOSTBYNAME; \
+ INIT_GETHOSTBYNAME_R; \
+ INIT_GETSOCKOPT;
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc b/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc
index 63d67a7115ec..8bb5cd818ac2 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc
@@ -8,83 +8,41 @@
//===----------------------------------------------------------------------===//
//
// Scanf implementation for use in *Sanitizer interceptors.
+// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
+// with a few common GNU extensions.
//
//===----------------------------------------------------------------------===//
#include <stdarg.h>
-struct ScanfSpec {
- char c;
- unsigned size;
+struct ScanfDirective {
+ int argIdx; // argument index, or -1 of not specified ("%n$")
+ int fieldWidth;
+ bool suppressed; // suppress assignment ("*")
+ bool allocate; // allocate space ("m")
+ char lengthModifier[2];
+ char convSpecifier;
+ bool maybeGnuMalloc;
};
-// One-letter specs.
-static const ScanfSpec scanf_specs[] = {
- {'p', sizeof(void *)},
- {'e', sizeof(float)},
- {'E', sizeof(float)},
- {'a', sizeof(float)},
- {'f', sizeof(float)},
- {'g', sizeof(float)},
- {'d', sizeof(int)},
- {'i', sizeof(int)},
- {'o', sizeof(int)},
- {'u', sizeof(int)},
- {'x', sizeof(int)},
- {'X', sizeof(int)},
- {'n', sizeof(int)},
- {'t', sizeof(PTRDIFF_T)},
- {'z', sizeof(SIZE_T)},
- {'j', sizeof(INTMAX_T)},
- {'h', sizeof(short)}
-};
-
-static const unsigned scanf_specs_cnt =
- sizeof(scanf_specs) / sizeof(scanf_specs[0]);
-
-// %ll?, %L?, %q? specs
-static const ScanfSpec scanf_llspecs[] = {
- {'e', sizeof(long double)},
- {'f', sizeof(long double)},
- {'g', sizeof(long double)},
- {'d', sizeof(long long)},
- {'i', sizeof(long long)},
- {'o', sizeof(long long)},
- {'u', sizeof(long long)},
- {'x', sizeof(long long)}
-};
-
-static const unsigned scanf_llspecs_cnt =
- sizeof(scanf_llspecs) / sizeof(scanf_llspecs[0]);
-
-// %l? specs
-static const ScanfSpec scanf_lspecs[] = {
- {'e', sizeof(double)},
- {'f', sizeof(double)},
- {'g', sizeof(double)},
- {'d', sizeof(long)},
- {'i', sizeof(long)},
- {'o', sizeof(long)},
- {'u', sizeof(long)},
- {'x', sizeof(long)},
- {'X', sizeof(long)},
-};
-
-static const unsigned scanf_lspecs_cnt =
- sizeof(scanf_lspecs) / sizeof(scanf_lspecs[0]);
-
-static unsigned match_spec(const struct ScanfSpec *spec, unsigned n, char c) {
- for (unsigned i = 0; i < n; ++i)
- if (spec[i].c == c)
- return spec[i].size;
- return 0;
+static const char *parse_number(const char *p, int *out) {
+ *out = internal_atoll(p);
+ while (*p >= '0' && *p <= '9')
+ ++p;
+ return p;
}
-static void scanf_common(void *ctx, const char *format, va_list ap_const) {
- va_list aq;
- va_copy(aq, ap_const);
+static bool char_is_one_of(char c, const char *s) {
+ return !!internal_strchr(s, c);
+}
- const char *p = format;
- unsigned size;
+// Parse scanf format string. If a valid directive in encountered, it is
+// returned in dir. This function returns the pointer to the first
+// unprocessed character, or 0 in case of error.
+// In case of the end-of-string, a pointer to the closing \0 is returned.
+static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
+ ScanfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
while (*p) {
if (*p != '%') {
@@ -92,51 +50,260 @@ static void scanf_common(void *ctx, const char *format, va_list ap_const) {
continue;
}
++p;
- if (*p == '*' || *p == '%' || *p == 0) {
+ // %%
+ if (*p == '%') {
++p;
continue;
}
- if (*p == '0' || (*p >= '1' && *p <= '9')) {
- size = internal_atoll(p);
- // +1 for the \0 at the end
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size + 1);
+ if (*p == '\0') {
+ return 0;
+ }
+ // %n$
+ if (*p >= '0' && *p <= '9') {
+ int number;
+ const char *q = parse_number(p, &number);
+ if (*q == '$') {
+ dir->argIdx = number;
+ p = q + 1;
+ }
+ // Otherwise, do not change p. This will be re-parsed later as the field
+ // width.
+ }
+ // *
+ if (*p == '*') {
+ dir->suppressed = true;
++p;
- continue;
}
-
- if (*p == 'L' || *p == 'q') {
+ // Field width.
+ if (*p >= '0' && *p <= '9') {
+ p = parse_number(p, &dir->fieldWidth);
+ if (dir->fieldWidth <= 0)
+ return 0;
+ }
+ // m
+ if (*p == 'm') {
+ dir->allocate = true;
++p;
- size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
- continue;
}
-
- if (*p == 'l') {
+ // Length modifier.
+ if (char_is_one_of(*p, "jztLq")) {
+ dir->lengthModifier[0] = *p;
+ ++p;
+ } else if (*p == 'h') {
+ dir->lengthModifier[0] = 'h';
+ ++p;
+ if (*p == 'h') {
+ dir->lengthModifier[1] = 'h';
+ ++p;
+ }
+ } else if (*p == 'l') {
+ dir->lengthModifier[0] = 'l';
++p;
if (*p == 'l') {
+ dir->lengthModifier[1] = 'l';
++p;
- size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
- continue;
- } else {
- size = match_spec(scanf_lspecs, scanf_lspecs_cnt, *p);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
- continue;
}
}
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ // Consume %[...] expression.
+ if (dir->convSpecifier == '[') {
+ if (*p == '^')
+ ++p;
+ if (*p == ']')
+ ++p;
+ while (*p && *p != ']')
+ ++p;
+ if (*p == 0)
+ return 0; // unexpected end of string
+ // Consume the closing ']'.
+ ++p;
+ }
+ // This is unfortunately ambiguous between old GNU extension
+ // of %as, %aS and %a[...] and newer POSIX %a followed by
+ // letters s, S or [.
+ if (allowGnuMalloc && dir->convSpecifier == 'a' &&
+ !dir->lengthModifier[0]) {
+ if (*p == 's' || *p == 'S') {
+ dir->maybeGnuMalloc = true;
+ ++p;
+ } else if (*p == '[') {
+ // Watch for %a[h-j%d], if % appears in the
+ // [...] range, then we need to give up, we don't know
+ // if scanf will parse it as POSIX %a [h-j %d ] or
+ // GNU allocation of string with range dh-j plus %.
+ const char *q = p + 1;
+ if (*q == '^')
+ ++q;
+ if (*q == ']')
+ ++q;
+ while (*q && *q != ']' && *q != '%')
+ ++q;
+ if (*q == 0 || *q == '%')
+ return 0;
+ p = q + 1; // Consume the closing ']'.
+ dir->maybeGnuMalloc = true;
+ }
+ }
+ break;
+ }
+ return p;
+}
- if (*p == 'h' && *(p + 1) == 'h') {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), sizeof(char));
- p += 2;
- continue;
+// Returns true if the character is an integer conversion specifier.
+static bool scanf_is_integer_conv(char c) {
+ return char_is_one_of(c, "diouxXn");
+}
+
+// Returns true if the character is an floating point conversion specifier.
+static bool scanf_is_float_conv(char c) {
+ return char_is_one_of(c, "aAeEfFgG");
+}
+
+// Returns string output character size for string-like conversions,
+// or 0 if the conversion is invalid.
+static int scanf_get_char_size(ScanfDirective *dir) {
+ if (char_is_one_of(dir->convSpecifier, "CS")) {
+ // wchar_t
+ return 0;
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cs[")) {
+ if (dir->lengthModifier[0] == 'l')
+ // wchar_t
+ return 0;
+ else if (dir->lengthModifier[0] == 0)
+ return sizeof(char);
+ else
+ return 0;
+ }
+
+ return 0;
+}
+
+enum ScanfStoreSize {
+ // Store size not known in advance; can be calculated as strlen() of the
+ // destination buffer.
+ SSS_STRLEN = -1,
+ // Invalid conversion specifier.
+ SSS_INVALID = 0
+};
+
+// Returns the store size of a scanf directive (if >0), or a value of
+// ScanfStoreSize.
+static int scanf_get_store_size(ScanfDirective *dir) {
+ if (dir->allocate) {
+ if (!char_is_one_of(dir->convSpecifier, "cCsS["))
+ return SSS_INVALID;
+ return sizeof(char *);
+ }
+
+ if (dir->maybeGnuMalloc) {
+ if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
+ return SSS_INVALID;
+ // This is ambiguous, so check the smaller size of char * (if it is
+ // a GNU extension of %as, %aS or %a[...]) and float (if it is
+ // POSIX %a followed by s, S or [ letters).
+ return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
+ }
+
+ if (scanf_is_integer_conv(dir->convSpecifier)) {
+ switch (dir->lengthModifier[0]) {
+ case 'h':
+ return dir->lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
+ case 'l':
+ return dir->lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
+ case 'L':
+ return sizeof(long long);
+ case 'j':
+ return sizeof(INTMAX_T);
+ case 'z':
+ return sizeof(SIZE_T);
+ case 't':
+ return sizeof(PTRDIFF_T);
+ case 0:
+ return sizeof(int);
+ default:
+ return SSS_INVALID;
}
+ }
- size = match_spec(scanf_specs, scanf_specs_cnt, *p);
- if (size) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
- ++p;
+ if (scanf_is_float_conv(dir->convSpecifier)) {
+ switch (dir->lengthModifier[0]) {
+ case 'L':
+ case 'q':
+ return sizeof(long double);
+ case 'l':
+ return dir->lengthModifier[1] == 'l' ? sizeof(long double)
+ : sizeof(double);
+ case 0:
+ return sizeof(float);
+ default:
+ return SSS_INVALID;
+ }
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "sS[")) {
+ unsigned charSize = scanf_get_char_size(dir);
+ if (charSize == 0)
+ return SSS_INVALID;
+ if (dir->fieldWidth == 0)
+ return SSS_STRLEN;
+ return (dir->fieldWidth + 1) * charSize;
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cC")) {
+ unsigned charSize = scanf_get_char_size(dir);
+ if (charSize == 0)
+ return SSS_INVALID;
+ if (dir->fieldWidth == 0)
+ return charSize;
+ return dir->fieldWidth * charSize;
+ }
+
+ if (dir->convSpecifier == 'p') {
+ if (dir->lengthModifier[1] != 0)
+ return SSS_INVALID;
+ return sizeof(void *);
+ }
+
+ return SSS_INVALID;
+}
+
+// Common part of *scanf interceptors.
+// Process format string and va_list, and report all store ranges.
+// Stops when "consuming" n_inputs input items.
+static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
+ const char *format, va_list aq) {
+ CHECK_GT(n_inputs, 0);
+ const char *p = format;
+
+ while (*p && n_inputs) {
+ ScanfDirective dir;
+ p = scanf_parse_next(p, allowGnuMalloc, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.suppressed)
continue;
+ int size = scanf_get_store_size(&dir);
+ if (size == SSS_INVALID)
+ break;
+ void *argp = va_arg(aq, void *);
+ if (dir.convSpecifier != 'n')
+ --n_inputs;
+ if (size == SSS_STRLEN) {
+ size = internal_strlen((const char *)argp) + 1;
}
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
}
- va_end(aq);
}
diff --git a/lib/sanitizer_common/sanitizer_common_libcdep.cc b/lib/sanitizer_common/sanitizer_common_libcdep.cc
new file mode 100644
index 000000000000..36f6cf0bc0db
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_common_libcdep.cc
@@ -0,0 +1,23 @@
+//===-- sanitizer_common_libcdep.cc ---------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+bool PrintsToTty() {
+ MaybeOpenReportFile();
+ return internal_isatty(report_fd);
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_common_syscalls.inc b/lib/sanitizer_common/sanitizer_common_syscalls.inc
new file mode 100644
index 000000000000..da25e6b6ad2c
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_common_syscalls.inc
@@ -0,0 +1,148 @@
+//===-- sanitizer_common_syscalls.inc ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common syscalls handlers for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define it's own macros:
+// COMMON_SYSCALL_PRE_READ_RANGE
+// Called in prehook for regions that will be read by the kernel and
+// must be initialized.
+// COMMON_SYSCALL_PRE_WRITE_RANGE
+// Called in prehook for regions that will be written to by the kernel
+// and must be addressable. The actual write range may be smaller than
+// reported in the prehook. See POST_WRITE_RANGE.
+// COMMON_SYSCALL_POST_READ_RANGE
+// Called in posthook for regions that were read by the kernel. Does
+// not make much sense.
+// COMMON_SYSCALL_POST_WRITE_RANGE
+// Called in posthook for regions that were written to by the kernel
+// and are now initialized.
+//===----------------------------------------------------------------------===//
+
+#define PRE_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_##name
+#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
+#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
+
+#define POST_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_##name
+#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
+#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
+
+// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
+
+extern "C" {
+struct sanitizer_kernel_iovec {
+ void *iov_base;
+ unsigned long iov_len;
+};
+
+struct sanitizer_kernel_msghdr {
+ void *msg_name;
+ int msg_namelen;
+ struct sanitizer_kernel_iovec *msg_iov;
+ unsigned long msg_iovlen;
+ void *msg_control;
+ unsigned long msg_controllen;
+ unsigned msg_flags;
+};
+
+struct sanitizer_kernel_timeval {
+ long tv_sec;
+ long tv_usec;
+};
+
+struct sanitizer_kernel_rusage {
+ struct sanitizer_kernel_timeval ru_timeval[2];
+ long ru_long[14];
+};
+
+PRE_SYSCALL(recvmsg)(int sockfd, struct sanitizer_kernel_msghdr *msg,
+ int flags) {
+ PRE_READ(msg, sizeof(*msg));
+}
+
+POST_SYSCALL(recvmsg)(long res, int sockfd, struct sanitizer_kernel_msghdr *msg,
+ int flags) {
+ if (res > 0)
+ for (unsigned long i = 0; i < msg->msg_iovlen; ++i) {
+ POST_WRITE(msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
+ }
+ POST_WRITE(msg->msg_control, msg->msg_controllen);
+}
+
+PRE_SYSCALL(rt_sigpending)(void *p, unsigned long s) { PRE_WRITE(p, s); }
+
+POST_SYSCALL(rt_sigpending)(long res, void *p, unsigned long s) {
+ if (res == 0) {
+ POST_WRITE(p, s);
+ }
+}
+
+PRE_SYSCALL(getdents)(int fd, void *dirp, int count) { PRE_WRITE(dirp, count); }
+
+POST_SYSCALL(getdents)(long res, int fd, void *dirp, int count) {
+ if (res > 0) {
+ POST_WRITE(dirp, res);
+ }
+}
+
+PRE_SYSCALL(getdents64)(int fd, void *dirp, int count) {
+ PRE_WRITE(dirp, count);
+}
+
+POST_SYSCALL(getdents64)(long res, int fd, void *dirp, int count) {
+ if (res > 0) {
+ POST_WRITE(dirp, res);
+ }
+}
+
+PRE_SYSCALL(wait4)(int pid, int *status, int options,
+ struct sanitizer_kernel_rusage *r) {
+ if (status) {
+ PRE_WRITE(status, sizeof(*status));
+ }
+ if (r) {
+ PRE_WRITE(r, sizeof(*r));
+ }
+}
+
+POST_SYSCALL(wait4)(long res, int pid, int *status, int options,
+ struct sanitizer_kernel_rusage *r) {
+ if (res > 0) {
+ if (status) {
+ POST_WRITE(status, sizeof(*status));
+ }
+ if (r) {
+ POST_WRITE(r, sizeof(*r));
+ }
+ }
+}
+
+PRE_SYSCALL(waitpid)(int pid, int *status, int options) {
+ if (status) {
+ PRE_WRITE(status, sizeof(*status));
+ }
+}
+
+POST_SYSCALL(waitpid)(long res, int pid, int *status, int options) {
+ if (res > 0 && status) {
+ POST_WRITE(status, sizeof(*status));
+ }
+}
+} // extern "C"
+
+#undef PRE_SYSCALL
+#undef PRE_READ
+#undef PRE_WRITE
+#undef POST_SYSCALL
+#undef POST_READ
+#undef POST_WRITE
diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc
index eca910c08090..b7218e5ad212 100644
--- a/lib/sanitizer_common/sanitizer_flags.cc
+++ b/lib/sanitizer_common/sanitizer_flags.cc
@@ -18,15 +18,35 @@
namespace __sanitizer {
+CommonFlags common_flags_dont_use_directly;
+
+void ParseCommonFlagsFromString(const char *str) {
+ CommonFlags *f = common_flags();
+ ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
+ ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix");
+ ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
+ ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
+ ParseFlag(str, &f->symbolize, "symbolize");
+}
+
static bool GetFlagValue(const char *env, const char *name,
const char **value, int *value_length) {
if (env == 0)
return false;
- const char *pos = internal_strstr(env, name);
- const char *end;
- if (pos == 0)
- return false;
+ const char *pos = 0;
+ for (;;) {
+ pos = internal_strstr(env, name);
+ if (pos == 0)
+ return false;
+ if (pos != env && ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) {
+ // Seems to be middle of another flag name or value.
+ env = pos + 1;
+ continue;
+ }
+ break;
+ }
pos += internal_strlen(name);
+ const char *end;
if (pos[0] != '=') {
end = pos;
} else {
@@ -38,7 +58,8 @@ static bool GetFlagValue(const char *env, const char *name,
pos += 1;
end = internal_strchr(pos, '\'');
} else {
- end = internal_strchr(pos, ' ');
+ // Read until the next space or colon.
+ end = pos + internal_strcspn(pos, " :");
}
if (end == 0)
end = pos + internal_strlen(pos);
diff --git a/lib/sanitizer_common/sanitizer_flags.h b/lib/sanitizer_common/sanitizer_flags.h
index b7ce4524b055..e97ce6a87188 100644
--- a/lib/sanitizer_common/sanitizer_flags.h
+++ b/lib/sanitizer_common/sanitizer_flags.h
@@ -22,6 +22,29 @@ void ParseFlag(const char *env, bool *flag, const char *name);
void ParseFlag(const char *env, int *flag, const char *name);
void ParseFlag(const char *env, const char **flag, const char *name);
+struct CommonFlags {
+ // If set, use the online symbolizer from common sanitizer runtime.
+ bool symbolize;
+ // Path to external symbolizer.
+ const char *external_symbolizer_path;
+ // Strips this prefix from file paths in error reports.
+ const char *strip_path_prefix;
+ // Use fast (frame-pointer-based) unwinder on fatal errors (if available).
+ bool fast_unwind_on_fatal;
+ // Use fast (frame-pointer-based) unwinder on malloc/free (if available).
+ bool fast_unwind_on_malloc;
+ // Max number of stack frames kept for each allocation/deallocation.
+ int malloc_context_size;
+};
+
+extern CommonFlags common_flags_dont_use_directly;
+
+inline CommonFlags *common_flags() {
+ return &common_flags_dont_use_directly;
+}
+
+void ParseCommonFlagsFromString(const char *str);
+
} // namespace __sanitizer
#endif // SANITIZER_FLAGS_H
diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h
index 7ff27338192a..9a7d374bf5ad 100644
--- a/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -13,19 +13,109 @@
#ifndef SANITIZER_DEFS_H
#define SANITIZER_DEFS_H
-#include "sanitizer/common_interface_defs.h"
+#include "sanitizer_platform.h"
+
+#if SANITIZER_WINDOWS
+// FIXME find out what we need on Windows. __declspec(dllexport) ?
+# define SANITIZER_INTERFACE_ATTRIBUTE
+# define SANITIZER_WEAK_ATTRIBUTE
+#elif defined(SANITIZER_GO)
+# define SANITIZER_INTERFACE_ATTRIBUTE
+# define SANITIZER_WEAK_ATTRIBUTE
+#else
+# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
+#endif
+
+#if SANITIZER_LINUX && !defined(SANITIZER_GO)
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
+#else
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
+#endif
+
+// GCC does not understand __has_feature
+#if !defined(__has_feature)
+# define __has_feature(x) 0
+#endif
+
+// For portability reasons we do not include stddef.h, stdint.h or any other
+// system header, but we do need some basic types that are not defined
+// in a portable way by the language itself.
+namespace __sanitizer {
+
+#if defined(_WIN64)
+// 64-bit Windows uses LLP64 data model.
+typedef unsigned long long uptr; // NOLINT
+typedef signed long long sptr; // NOLINT
+#else
+typedef unsigned long uptr; // NOLINT
+typedef signed long sptr; // NOLINT
+#endif // defined(_WIN64)
+#if defined(__x86_64__)
+// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
+// 64-bit pointer to unwind stack frame.
+typedef unsigned long long uhwptr; // NOLINT
+#else
+typedef uptr uhwptr; // NOLINT
+#endif
+typedef unsigned char u8;
+typedef unsigned short u16; // NOLINT
+typedef unsigned int u32;
+typedef unsigned long long u64; // NOLINT
+typedef signed char s8;
+typedef signed short s16; // NOLINT
+typedef signed int s32;
+typedef signed long long s64; // NOLINT
+typedef int fd_t;
+
+// WARNING: OFF_T may be different from OS type off_t, depending on the value of
+// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
+// like pread and mmap, as opposed to pread64 and mmap64.
+// Mac and Linux/x86-64 are special.
+#if SANITIZER_MAC || (SANITIZER_LINUX && defined(__x86_64__))
+typedef u64 OFF_T;
+#else
+typedef uptr OFF_T;
+#endif
+typedef u64 OFF64_T;
+} // namespace __sanitizer
+
+extern "C" {
+ // Tell the tools to write their reports to "path.<pid>" instead of stderr.
+ void __sanitizer_set_report_path(const char *path)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+
+ // Tell the tools to write their reports to given file descriptor instead of
+ // stderr.
+ void __sanitizer_set_report_fd(int fd)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+
+ // Notify the tools that the sandbox is going to be turned on. The reserved
+ // parameter will be used in the future to hold a structure with functions
+ // that the tools may call to bypass the sandbox.
+ void __sanitizer_sandbox_on_notify(void *reserved)
+ SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
+
+ // This function is called by the tool when it has just finished reporting
+ // an error. 'error_summary' is a one-line string that summarizes
+ // the error message. This function can be overridden by the client.
+ void __sanitizer_report_error_summary(const char *error_summary)
+ SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
+} // extern "C"
+
+
using namespace __sanitizer; // NOLINT
// ----------- ATTENTION -------------
// This header should NOT include any other headers to avoid portability issues.
// Common defs.
-#define INLINE static inline
+#define INLINE inline
#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
#define WEAK SANITIZER_WEAK_ATTRIBUTE
// Platform-specific defs.
#if defined(_MSC_VER)
-# define ALWAYS_INLINE __declspec(forceinline)
+# define ALWAYS_INLINE __forceinline
// FIXME(timurrrr): do we need this on Windows?
# define ALIAS(x)
# define ALIGNED(x) __declspec(align(x))
@@ -40,7 +130,7 @@ using namespace __sanitizer; // NOLINT
# define USED
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER
-# define ALWAYS_INLINE __attribute__((always_inline))
+# define ALWAYS_INLINE inline __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x)))
# define ALIGNED(x) __attribute__((aligned(x)))
# define FORMAT(f, a) __attribute__((format(printf, f, a)))
@@ -60,7 +150,7 @@ using namespace __sanitizer; // NOLINT
# endif
#endif // _MSC_VER
-#if defined(_WIN32)
+#if SANITIZER_WINDOWS
typedef unsigned long DWORD; // NOLINT
typedef DWORD thread_return_t;
# define THREAD_CALLING_CONV __stdcall
@@ -183,10 +273,12 @@ extern "C" void* _ReturnAddress(void);
# define GET_CURRENT_FRAME() (uptr)0xDEADBEEF
#endif
-#define HANDLE_EINTR(res, f) { \
- do { \
- res = (f); \
- } while (res == -1 && errno == EINTR); \
+#define HANDLE_EINTR(res, f) \
+ { \
+ int rverrno; \
+ do { \
+ res = (f); \
+ } while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
}
#endif // SANITIZER_DEFS_H
diff --git a/lib/sanitizer_common/sanitizer_lfstack.h b/lib/sanitizer_common/sanitizer_lfstack.h
index c26e45db8f89..088413908087 100644
--- a/lib/sanitizer_common/sanitizer_lfstack.h
+++ b/lib/sanitizer_common/sanitizer_lfstack.h
@@ -68,6 +68,6 @@ struct LFStack {
atomic_uint64_t head_;
};
-}
+} // namespace __sanitizer
#endif // #ifndef SANITIZER_LFSTACK_H
diff --git a/lib/sanitizer_common/sanitizer_libc.cc b/lib/sanitizer_common/sanitizer_libc.cc
index 349be35012dd..20c03c4474a1 100644
--- a/lib/sanitizer_common/sanitizer_libc.cc
+++ b/lib/sanitizer_common/sanitizer_libc.cc
@@ -206,7 +206,7 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
}
bool mem_is_zero(const char *beg, uptr size) {
- CHECK_LE(size, 1UL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
+ CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
const char *end = beg + size;
uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));
uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));
diff --git a/lib/sanitizer_common/sanitizer_libc.h b/lib/sanitizer_common/sanitizer_libc.h
index aa052c654d39..82d809a0305a 100644
--- a/lib/sanitizer_common/sanitizer_libc.h
+++ b/lib/sanitizer_common/sanitizer_libc.h
@@ -11,14 +11,13 @@
// run-time libraries.
// These tools can not use some of the libc functions directly because those
// functions are intercepted. Instead, we implement a tiny subset of libc here.
-// NOTE: This file may be included into user code.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LIBC_H
#define SANITIZER_LIBC_H
// ----------- ATTENTION -------------
// This header should NOT include any other headers from sanitizer runtime.
-#include "sanitizer/common_interface_defs.h"
+#include "sanitizer_internal_defs.h"
namespace __sanitizer {
@@ -46,6 +45,7 @@ char *internal_strrchr(const char *s, int c);
char *internal_strstr(const char *haystack, const char *needle);
// Works only for base=10 and doesn't set errno.
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base);
+int internal_snprintf(char *buffer, uptr length, const char *format, ...);
// Return true if all bytes in [mem, mem+size) are zero.
// Optimized for the case when the result is true.
@@ -53,28 +53,46 @@ bool mem_is_zero(const char *mem, uptr size);
// Memory
-void *internal_mmap(void *addr, uptr length, int prot, int flags,
- int fd, u64 offset);
-int internal_munmap(void *addr, uptr length);
+uptr internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, u64 offset);
+uptr internal_munmap(void *addr, uptr length);
// I/O
-typedef int fd_t;
const fd_t kInvalidFd = -1;
const fd_t kStdinFd = 0;
const fd_t kStdoutFd = 1;
const fd_t kStderrFd = 2;
-int internal_close(fd_t fd);
+uptr internal_close(fd_t fd);
int internal_isatty(fd_t fd);
-fd_t internal_open(const char *filename, bool write);
+
+// Use __sanitizer::OpenFile() instead.
+uptr internal_open(const char *filename, int flags);
+uptr internal_open(const char *filename, int flags, u32 mode);
+
uptr internal_read(fd_t fd, void *buf, uptr count);
uptr internal_write(fd_t fd, const void *buf, uptr count);
+
+// OS
uptr internal_filesize(fd_t fd); // -1 on error.
-int internal_dup2(int oldfd, int newfd);
+uptr internal_stat(const char *path, void *buf);
+uptr internal_lstat(const char *path, void *buf);
+uptr internal_fstat(fd_t fd, void *buf);
+uptr internal_dup2(int oldfd, int newfd);
uptr internal_readlink(const char *path, char *buf, uptr bufsize);
-int internal_snprintf(char *buffer, uptr length, const char *format, ...);
+uptr internal_unlink(const char *path);
+void NORETURN internal__exit(int exitcode);
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
+
+uptr internal_ptrace(int request, int pid, void *addr, void *data);
+uptr internal_waitpid(int pid, int *status, int options);
+uptr internal_getpid();
+uptr internal_getppid();
// Threading
-int internal_sched_yield();
+uptr internal_sched_yield();
+
+// Error handling
+bool internal_iserror(uptr retval, int *rverrno = 0);
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc
index 8b9ba38ca777..6e234e5f1e1d 100644
--- a/lib/sanitizer_common/sanitizer_linux.cc
+++ b/lib/sanitizer_common/sanitizer_linux.cc
@@ -11,20 +11,28 @@
// run-time libraries and implements linux-specific functions from
// sanitizer_libc.h.
//===----------------------------------------------------------------------===//
-#ifdef __linux__
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
+#include <asm/param.h>
+#include <dlfcn.h>
+#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <sched.h>
#include <sys/mman.h>
+#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/syscall.h>
@@ -32,9 +40,20 @@
#include <sys/types.h>
#include <unistd.h>
#include <unwind.h>
-#include <errno.h>
-#include <sys/prctl.h>
-#include <linux/futex.h>
+
+#if !SANITIZER_ANDROID
+#include <sys/signal.h>
+#endif
+
+// <linux/time.h>
+struct kernel_timeval {
+ long tv_sec;
+ long tv_usec;
+};
+
+// <linux/futex.h> is broken on some linux distributions.
+const int FUTEX_WAIT = 0;
+const int FUTEX_WAKE = 1;
// Are we using 32-bit or 64-bit syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
@@ -47,130 +66,158 @@
namespace __sanitizer {
+#ifdef __x86_64__
+#include "sanitizer_syscall_linux_x86_64.inc"
+#else
+#include "sanitizer_syscall_generic.inc"
+#endif
+
// --------------- sanitizer_libc.h
-void *internal_mmap(void *addr, uptr length, int prot, int flags,
+uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
+ return internal_syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
#else
- return (void *)syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
+ return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
#endif
}
-int internal_munmap(void *addr, uptr length) {
- return syscall(__NR_munmap, addr, length);
+uptr internal_munmap(void *addr, uptr length) {
+ return internal_syscall(__NR_munmap, addr, length);
+}
+
+uptr internal_close(fd_t fd) {
+ return internal_syscall(__NR_close, fd);
}
-int internal_close(fd_t fd) {
- return syscall(__NR_close, fd);
+uptr internal_open(const char *filename, int flags) {
+ return internal_syscall(__NR_open, filename, flags);
}
-fd_t internal_open(const char *filename, bool write) {
- return syscall(__NR_open, filename,
+uptr internal_open(const char *filename, int flags, u32 mode) {
+ return internal_syscall(__NR_open, filename, flags, mode);
+}
+
+uptr OpenFile(const char *filename, bool write) {
+ return internal_open(filename,
write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
sptr res;
- HANDLE_EINTR(res, (sptr)syscall(__NR_read, fd, buf, count));
+ HANDLE_EINTR(res, (sptr)internal_syscall(__NR_read, fd, buf, count));
return res;
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
sptr res;
- HANDLE_EINTR(res, (sptr)syscall(__NR_write, fd, buf, count));
+ HANDLE_EINTR(res, (sptr)internal_syscall(__NR_write, fd, buf, count));
return res;
}
-uptr internal_filesize(fd_t fd) {
+#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS
+static void stat64_to_stat(struct stat64 *in, struct stat *out) {
+ internal_memset(out, 0, sizeof(*out));
+ out->st_dev = in->st_dev;
+ out->st_ino = in->st_ino;
+ out->st_mode = in->st_mode;
+ out->st_nlink = in->st_nlink;
+ out->st_uid = in->st_uid;
+ out->st_gid = in->st_gid;
+ out->st_rdev = in->st_rdev;
+ out->st_size = in->st_size;
+ out->st_blksize = in->st_blksize;
+ out->st_blocks = in->st_blocks;
+ out->st_atime = in->st_atime;
+ out->st_mtime = in->st_mtime;
+ out->st_ctime = in->st_ctime;
+ out->st_ino = in->st_ino;
+}
+#endif
+
+uptr internal_stat(const char *path, void *buf) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- struct stat st;
- if (syscall(__NR_fstat, fd, &st))
- return -1;
+ return internal_syscall(__NR_stat, path, buf);
#else
- struct stat64 st;
- if (syscall(__NR_fstat64, fd, &st))
- return -1;
+ struct stat64 buf64;
+ int res = internal_syscall(__NR_stat64, path, &buf64);
+ stat64_to_stat(&buf64, (struct stat *)buf);
+ return res;
#endif
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(__NR_lstat, path, buf);
+#else
+ struct stat64 buf64;
+ int res = internal_syscall(__NR_lstat64, path, &buf64);
+ stat64_to_stat(&buf64, (struct stat *)buf);
+ return res;
+#endif
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(__NR_fstat, fd, buf);
+#else
+ struct stat64 buf64;
+ int res = internal_syscall(__NR_fstat64, fd, &buf64);
+ stat64_to_stat(&buf64, (struct stat *)buf);
+ return res;
+#endif
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
return (uptr)st.st_size;
}
-int internal_dup2(int oldfd, int newfd) {
- return syscall(__NR_dup2, oldfd, newfd);
+uptr internal_dup2(int oldfd, int newfd) {
+ return internal_syscall(__NR_dup2, oldfd, newfd);
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
- return (uptr)syscall(__NR_readlink, path, buf, bufsize);
+ return internal_syscall(__NR_readlink, path, buf, bufsize);
+}
+
+uptr internal_unlink(const char *path) {
+ return internal_syscall(__NR_unlink, path);
}
-int internal_sched_yield() {
- return syscall(__NR_sched_yield);
+uptr internal_sched_yield() {
+ return internal_syscall(__NR_sched_yield);
+}
+
+void internal__exit(int exitcode) {
+ internal_syscall(__NR_exit_group, exitcode);
+ Die(); // Unreachable.
+}
+
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]) {
+ return internal_syscall(__NR_execve, filename, argv, envp);
}
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
struct stat st;
- if (syscall(__NR_stat, filename, &st))
+ if (internal_stat(filename, &st))
return false;
-#else
- struct stat64 st;
- if (syscall(__NR_stat64, filename, &st))
- return false;
-#endif
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
}
uptr GetTid() {
- return syscall(__NR_gettid);
-}
-
-void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
- uptr *stack_bottom) {
- static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M
- CHECK(stack_top);
- CHECK(stack_bottom);
- if (at_initialization) {
- // This is the main thread. Libpthread may not be initialized yet.
- struct rlimit rl;
- CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
-
- // Find the mapping that contains a stack variable.
- MemoryMappingLayout proc_maps;
- uptr start, end, offset;
- uptr prev_end = 0;
- while (proc_maps.Next(&start, &end, &offset, 0, 0)) {
- if ((uptr)&rl < end)
- break;
- prev_end = end;
- }
- CHECK((uptr)&rl >= start && (uptr)&rl < end);
-
- // Get stacksize from rlimit, but clip it so that it does not overlap
- // with other mappings.
- uptr stacksize = rl.rlim_cur;
- if (stacksize > end - prev_end)
- stacksize = end - prev_end;
- // When running with unlimited stack size, we still want to set some limit.
- // The unlimited stack size is caused by 'ulimit -s unlimited'.
- // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
- if (stacksize > kMaxThreadStackSize)
- stacksize = kMaxThreadStackSize;
- *stack_top = end;
- *stack_bottom = end - stacksize;
- return;
- }
- pthread_attr_t attr;
- CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
- uptr stacksize = 0;
- void *stackaddr = 0;
- pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
- pthread_attr_destroy(&attr);
+ return internal_syscall(__NR_gettid);
+}
- *stack_top = (uptr)stackaddr + stacksize;
- *stack_bottom = (uptr)stackaddr;
- CHECK(stacksize < kMaxThreadStackSize); // Sanity check.
+u64 NanoTime() {
+ kernel_timeval tv;
+ internal_syscall(__NR_gettimeofday, &tv, 0);
+ return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
}
// Like getenv, but reads env directly from /proc and does not use libc.
@@ -201,6 +248,11 @@ const char *GetEnv(const char *name) {
return 0; // Not found.
}
+extern "C" {
+ extern void *__libc_stack_end SANITIZER_WEAK_ATTRIBUTE;
+}
+
+#if !SANITIZER_GO
static void ReadNullSepFileToArray(const char *path, char ***arr,
int arr_size) {
char *buff;
@@ -219,13 +271,33 @@ static void ReadNullSepFileToArray(const char *path, char ***arr,
}
(*arr)[count] = 0;
}
+#endif
+
+static void GetArgsAndEnv(char*** argv, char*** envp) {
+#if !SANITIZER_GO
+ if (&__libc_stack_end) {
+#endif
+ uptr* stack_end = (uptr*)__libc_stack_end;
+ int argc = *stack_end;
+ *argv = (char**)(stack_end + 1);
+ *envp = (char**)(stack_end + argc + 2);
+#if !SANITIZER_GO
+ } else {
+ static const int kMaxArgv = 2000, kMaxEnvp = 2000;
+ ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
+ ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
+ }
+#endif
+}
void ReExec() {
- static const int kMaxArgv = 100, kMaxEnvp = 1000;
char **argv, **envp;
- ReadNullSepFileToArray("/proc/self/cmdline", &argv, kMaxArgv);
- ReadNullSepFileToArray("/proc/self/environ", &envp, kMaxEnvp);
- execve(argv[0], argv, envp);
+ GetArgsAndEnv(&argv, &envp);
+ uptr rv = internal_execve("/proc/self/exe", argv, envp);
+ int rverrno;
+ CHECK_EQ(internal_iserror(rv, &rverrno), true);
+ Printf("execve failed, errno %d\n", rverrno);
+ Die();
}
void PrepareForSandboxing() {
@@ -234,6 +306,8 @@ void PrepareForSandboxing() {
// process will be able to load additional libraries, so it's fine to use the
// cached mappings.
MemoryMappingLayout::CacheMemoryMappings();
+ // Same for /proc/self/exe in the symbolizer.
+ SymbolizerPrepareForSandboxing();
}
// ----------------- sanitizer_procmaps.h
@@ -241,18 +315,22 @@ void PrepareForSandboxing() {
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
-MemoryMappingLayout::MemoryMappingLayout() {
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
proc_self_maps_.len =
ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
&proc_self_maps_.mmaped_size, 1 << 26);
- if (proc_self_maps_.mmaped_size == 0) {
- LoadFromCache();
- CHECK_GT(proc_self_maps_.len, 0);
+ if (cache_enabled) {
+ if (proc_self_maps_.mmaped_size == 0) {
+ LoadFromCache();
+ CHECK_GT(proc_self_maps_.len, 0);
+ }
+ } else {
+ CHECK_GT(proc_self_maps_.mmaped_size, 0);
}
- // internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
Reset();
// FIXME: in the future we may want to cache the mappings on demand only.
- CacheMemoryMappings();
+ if (cache_enabled)
+ CacheMemoryMappings();
}
MemoryMappingLayout::~MemoryMappingLayout() {
@@ -314,7 +392,7 @@ static uptr ParseHex(char **str) {
return x;
}
-static bool IsOnOf(char c, char c1, char c2) {
+static bool IsOneOf(char c, char c1, char c2) {
return c == c1 || c == c2;
}
@@ -323,7 +401,8 @@ static bool IsDecimal(char c) {
}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size) {
+ char filename[], uptr filename_size,
+ uptr *protection) {
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
@@ -338,10 +417,22 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
CHECK_EQ(*current_++, '-');
*end = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
- CHECK(IsOnOf(*current_++, '-', 'r'));
- CHECK(IsOnOf(*current_++, '-', 'w'));
- CHECK(IsOnOf(*current_++, '-', 'x'));
- CHECK(IsOnOf(*current_++, 's', 'p'));
+ uptr local_protection = 0;
+ CHECK(IsOneOf(*current_, '-', 'r'));
+ if (*current_++ == 'r')
+ local_protection |= kProtectionRead;
+ CHECK(IsOneOf(*current_, '-', 'w'));
+ if (*current_++ == 'w')
+ local_protection |= kProtectionWrite;
+ CHECK(IsOneOf(*current_, '-', 'x'));
+ if (*current_++ == 'x')
+ local_protection |= kProtectionExecute;
+ CHECK(IsOneOf(*current_, 's', 'p'));
+ if (*current_++ == 's')
+ local_protection |= kProtectionShared;
+ if (protection) {
+ *protection = local_protection;
+ }
CHECK_EQ(*current_++, ' ');
*offset = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
@@ -371,87 +462,12 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
// Gets the object name and the offset by walking MemoryMappingLayout.
bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
char filename[],
- uptr filename_size) {
- return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
+ uptr filename_size,
+ uptr *protection) {
+ return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
+ protection);
}
-bool SanitizerSetThreadName(const char *name) {
-#ifdef PR_SET_NAME
- return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
-#else
- return false;
-#endif
-}
-
-bool SanitizerGetThreadName(char *name, int max_len) {
-#ifdef PR_GET_NAME
- char buff[17];
- if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
- return false;
- internal_strncpy(name, buff, max_len);
- name[max_len] = 0;
- return true;
-#else
- return false;
-#endif
-}
-
-#ifndef SANITIZER_GO
-//------------------------- SlowUnwindStack -----------------------------------
-#ifdef __arm__
-#define UNWIND_STOP _URC_END_OF_STACK
-#define UNWIND_CONTINUE _URC_NO_REASON
-#else
-#define UNWIND_STOP _URC_NORMAL_STOP
-#define UNWIND_CONTINUE _URC_NO_REASON
-#endif
-
-uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
-#ifdef __arm__
- uptr val;
- _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
- 15 /* r15 = PC */, _UVRSD_UINT32, &val);
- CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
- // Clear the Thumb bit.
- return val & ~(uptr)1;
-#else
- return _Unwind_GetIP(ctx);
-#endif
-}
-
-_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
- StackTrace *b = (StackTrace*)param;
- CHECK(b->size < b->max_size);
- uptr pc = Unwind_GetIP(ctx);
- b->trace[b->size++] = pc;
- if (b->size == b->max_size) return UNWIND_STOP;
- return UNWIND_CONTINUE;
-}
-
-static bool MatchPc(uptr cur_pc, uptr trace_pc) {
- return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
-}
-
-void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
- this->size = 0;
- this->max_size = max_depth;
- if (max_depth > 1) {
- _Unwind_Backtrace(Unwind_Trace, this);
- // We need to pop a few frames so that pc is on top.
- // trace[0] belongs to the current function so we always pop it.
- int to_pop = 1;
- /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
- else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
- else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
- else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
- else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
- this->PopStackFrames(to_pop);
- }
- this->trace[0] = pc;
-}
-
-#endif // #ifndef SANITIZER_GO
-
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
@@ -462,12 +478,16 @@ BlockingMutex::BlockingMutex(LinkerInitialized) {
CHECK_EQ(owner_, 0);
}
+BlockingMutex::BlockingMutex() {
+ internal_memset(this, 0, sizeof(*this));
+}
+
void BlockingMutex::Lock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
- syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+ internal_syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
}
void BlockingMutex::Unlock() {
@@ -475,9 +495,147 @@ void BlockingMutex::Unlock() {
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping)
- syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
+ internal_syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
+}
+
+void BlockingMutex::CheckLocked() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
+}
+
+// ----------------- sanitizer_linux.h
+// The actual size of this structure is specified by d_reclen.
+// Note that getdents64 uses a different structure format. We only provide the
+// 32-bit syscall here.
+struct linux_dirent {
+ unsigned long d_ino;
+ unsigned long d_off;
+ unsigned short d_reclen;
+ char d_name[256];
+};
+
+// Syscall wrappers.
+uptr internal_ptrace(int request, int pid, void *addr, void *data) {
+ return internal_syscall(__NR_ptrace, request, pid, addr, data);
+}
+
+uptr internal_waitpid(int pid, int *status, int options) {
+ return internal_syscall(__NR_wait4, pid, status, options, 0 /* rusage */);
+}
+
+uptr internal_getpid() {
+ return internal_syscall(__NR_getpid);
+}
+
+uptr internal_getppid() {
+ return internal_syscall(__NR_getppid);
+}
+
+uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
+ return internal_syscall(__NR_getdents, fd, dirp, count);
+}
+
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
+ return internal_syscall(__NR_lseek, fd, offset, whence);
+}
+
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
+ return internal_syscall(__NR_prctl, option, arg2, arg3, arg4, arg5);
+}
+
+uptr internal_sigaltstack(const struct sigaltstack *ss,
+ struct sigaltstack *oss) {
+ return internal_syscall(__NR_sigaltstack, ss, oss);
+}
+
+// ThreadLister implementation.
+ThreadLister::ThreadLister(int pid)
+ : pid_(pid),
+ descriptor_(-1),
+ buffer_(4096),
+ error_(true),
+ entry_((struct linux_dirent *)buffer_.data()),
+ bytes_read_(0) {
+ char task_directory_path[80];
+ internal_snprintf(task_directory_path, sizeof(task_directory_path),
+ "/proc/%d/task/", pid);
+ uptr openrv = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY);
+ if (internal_iserror(openrv)) {
+ error_ = true;
+ Report("Can't open /proc/%d/task for reading.\n", pid);
+ } else {
+ error_ = false;
+ descriptor_ = openrv;
+ }
+}
+
+int ThreadLister::GetNextTID() {
+ int tid = -1;
+ do {
+ if (error_)
+ return -1;
+ if ((char *)entry_ >= &buffer_[bytes_read_] && !GetDirectoryEntries())
+ return -1;
+ if (entry_->d_ino != 0 && entry_->d_name[0] >= '0' &&
+ entry_->d_name[0] <= '9') {
+ // Found a valid tid.
+ tid = (int)internal_atoll(entry_->d_name);
+ }
+ entry_ = (struct linux_dirent *)(((char *)entry_) + entry_->d_reclen);
+ } while (tid < 0);
+ return tid;
+}
+
+void ThreadLister::Reset() {
+ if (error_ || descriptor_ < 0)
+ return;
+ internal_lseek(descriptor_, 0, SEEK_SET);
+}
+
+ThreadLister::~ThreadLister() {
+ if (descriptor_ >= 0)
+ internal_close(descriptor_);
+}
+
+bool ThreadLister::error() { return error_; }
+
+bool ThreadLister::GetDirectoryEntries() {
+ CHECK_GE(descriptor_, 0);
+ CHECK_NE(error_, true);
+ bytes_read_ = internal_getdents(descriptor_,
+ (struct linux_dirent *)buffer_.data(),
+ buffer_.size());
+ if (internal_iserror(bytes_read_)) {
+ Report("Can't read directory entries from /proc/%d/task.\n", pid_);
+ error_ = true;
+ return false;
+ } else if (bytes_read_ == 0) {
+ return false;
+ }
+ entry_ = (struct linux_dirent *)buffer_.data();
+ return true;
+}
+
+uptr GetPageSize() {
+#if defined(__x86_64__) || defined(__i386__)
+ return EXEC_PAGESIZE;
+#else
+ return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
+#endif
+}
+
+// Match full names of the form /path/to/base_name{-,.}*
+bool LibraryNameIs(const char *full_name, const char *base_name) {
+ const char *name = full_name;
+ // Strip path.
+ while (*name != '\0') name++;
+ while (name > full_name && *name != '/') name--;
+ if (*name == '/') name++;
+ uptr base_name_length = internal_strlen(base_name);
+ if (internal_strncmp(name, base_name, base_name_length)) return false;
+ return (name[base_name_length] == '-' || name[base_name_length] == '.');
}
} // namespace __sanitizer
-#endif // __linux__
+#endif // SANITIZER_LINUX
diff --git a/lib/sanitizer_common/sanitizer_linux.h b/lib/sanitizer_common/sanitizer_linux.h
new file mode 100644
index 000000000000..ba68e6c2dd5a
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_linux.h
@@ -0,0 +1,65 @@
+//===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Linux-specific syscall wrappers and classes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LINUX_H
+#define SANITIZER_LINUX_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+
+struct sigaltstack;
+
+namespace __sanitizer {
+// Dirent structure for getdents(). Note that this structure is different from
+// the one in <dirent.h>, which is used by readdir().
+struct linux_dirent;
+
+// Syscall wrappers.
+uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
+uptr internal_sigaltstack(const struct sigaltstack* ss,
+ struct sigaltstack* oss);
+
+// This class reads thread IDs from /proc/<pid>/task using only syscalls.
+class ThreadLister {
+ public:
+ explicit ThreadLister(int pid);
+ ~ThreadLister();
+ // GetNextTID returns -1 if the list of threads is exhausted, or if there has
+ // been an error.
+ int GetNextTID();
+ void Reset();
+ bool error();
+
+ private:
+ bool GetDirectoryEntries();
+
+ int pid_;
+ int descriptor_;
+ InternalScopedBuffer<char> buffer_;
+ bool error_;
+ struct linux_dirent* entry_;
+ int bytes_read_;
+};
+
+void AdjustStackSizeLinux(void *attr, int verbosity);
+
+// Exposed for testing.
+uptr ThreadDescriptorSize();
+
+// Matches a library's file name against a base name (stripping path and version
+// information).
+bool LibraryNameIs(const char *full_name, const char *base_name);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX_H
diff --git a/lib/sanitizer_common/sanitizer_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
new file mode 100644
index 000000000000..d9e2f5389606
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -0,0 +1,273 @@
+//===-- sanitizer_linux_libcdep.cc ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
+
+#ifdef __x86_64__
+#include <asm/prctl.h>
+#endif
+#include <dlfcn.h>
+#include <pthread.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <unwind.h>
+
+#ifdef __x86_64__
+extern "C" int arch_prctl(int code, __sanitizer::uptr *addr);
+#endif
+
+namespace __sanitizer {
+
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ if (at_initialization) {
+ // This is the main thread. Libpthread may not be initialized yet.
+ struct rlimit rl;
+ CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
+
+ // Find the mapping that contains a stack variable.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr start, end, offset;
+ uptr prev_end = 0;
+ while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) {
+ if ((uptr)&rl < end)
+ break;
+ prev_end = end;
+ }
+ CHECK((uptr)&rl >= start && (uptr)&rl < end);
+
+ // Get stacksize from rlimit, but clip it so that it does not overlap
+ // with other mappings.
+ uptr stacksize = rl.rlim_cur;
+ if (stacksize > end - prev_end)
+ stacksize = end - prev_end;
+ // When running with unlimited stack size, we still want to set some limit.
+ // The unlimited stack size is caused by 'ulimit -s unlimited'.
+ // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
+ if (stacksize > kMaxThreadStackSize)
+ stacksize = kMaxThreadStackSize;
+ *stack_top = end;
+ *stack_bottom = end - stacksize;
+ return;
+ }
+ pthread_attr_t attr;
+ CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
+ uptr stacksize = 0;
+ void *stackaddr = 0;
+ pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
+ pthread_attr_destroy(&attr);
+
+ *stack_top = (uptr)stackaddr + stacksize;
+ *stack_bottom = (uptr)stackaddr;
+ CHECK(stacksize < kMaxThreadStackSize); // Sanity check.
+}
+
+// Does not compile for Go because dlsym() requires -ldl
+#ifndef SANITIZER_GO
+bool SetEnv(const char *name, const char *value) {
+ void *f = dlsym(RTLD_NEXT, "setenv");
+ if (f == 0)
+ return false;
+ typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
+ setenv_ft setenv_f;
+ CHECK_EQ(sizeof(setenv_f), sizeof(f));
+ internal_memcpy(&setenv_f, &f, sizeof(f));
+ return setenv_f(name, value, 1) == 0;
+}
+#endif
+
+bool SanitizerSetThreadName(const char *name) {
+#ifdef PR_SET_NAME
+ return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
+#else
+ return false;
+#endif
+}
+
+bool SanitizerGetThreadName(char *name, int max_len) {
+#ifdef PR_GET_NAME
+ char buff[17];
+ if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
+ return false;
+ internal_strncpy(name, buff, max_len);
+ name[max_len] = 0;
+ return true;
+#else
+ return false;
+#endif
+}
+
+#ifndef SANITIZER_GO
+//------------------------- SlowUnwindStack -----------------------------------
+#ifdef __arm__
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#ifdef __arm__
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return _Unwind_GetIP(ctx);
+#endif
+}
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ StackTrace *b = (StackTrace*)param;
+ CHECK(b->size < b->max_size);
+ uptr pc = Unwind_GetIP(ctx);
+ b->trace[b->size++] = pc;
+ if (b->size == b->max_size) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
+}
+
+static bool MatchPc(uptr cur_pc, uptr trace_pc) {
+ return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
+}
+
+void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+ this->size = 0;
+ this->max_size = max_depth;
+ if (max_depth > 1) {
+ _Unwind_Backtrace(Unwind_Trace, this);
+ // We need to pop a few frames so that pc is on top.
+ // trace[0] belongs to the current function so we always pop it.
+ int to_pop = 1;
+ /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
+ else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
+ else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
+ else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
+ else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
+ this->PopStackFrames(to_pop);
+ }
+ this->trace[0] = pc;
+}
+
+#endif // !SANITIZER_GO
+
+static uptr g_tls_size;
+
+#ifdef __i386__
+# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
+#else
+# define DL_INTERNAL_FUNCTION
+#endif
+
+void InitTlsSize() {
+#if !defined(SANITIZER_GO) && !SANITIZER_ANDROID
+ typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
+ get_tls_func get_tls;
+ void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
+ CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr));
+ internal_memcpy(&get_tls, &get_tls_static_info_ptr,
+ sizeof(get_tls_static_info_ptr));
+ CHECK_NE(get_tls, 0);
+ size_t tls_size = 0;
+ size_t tls_align = 0;
+ get_tls(&tls_size, &tls_align);
+ g_tls_size = tls_size;
+#endif
+}
+
+uptr GetTlsSize() {
+ return g_tls_size;
+}
+
+// sizeof(struct thread) from glibc.
+#ifdef __x86_64__
+const uptr kThreadDescriptorSize = 2304;
+
+uptr ThreadDescriptorSize() {
+ return kThreadDescriptorSize;
+}
+#endif
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+#ifndef SANITIZER_GO
+#ifdef __x86_64__
+ arch_prctl(ARCH_GET_FS, tls_addr);
+ *tls_size = GetTlsSize();
+ *tls_addr -= *tls_size;
+ *tls_addr += kThreadDescriptorSize;
+#else
+ *tls_addr = 0;
+ *tls_size = 0;
+#endif
+
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+
+ if (!main) {
+ // If stack and tls intersect, make them non-intersecting.
+ if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
+ CHECK_GT(*tls_addr + *tls_size, *stk_addr);
+ CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
+ *stk_size -= *tls_size;
+ *tls_addr = *stk_addr + *stk_size;
+ }
+ }
+#else // SANITIZER_GO
+ *stk_addr = 0;
+ *stk_size = 0;
+ *tls_addr = 0;
+ *tls_size = 0;
+#endif // SANITIZER_GO
+}
+
+void AdjustStackSizeLinux(void *attr_, int verbosity) {
+ pthread_attr_t *attr = (pthread_attr_t *)attr_;
+ uptr stackaddr = 0;
+ size_t stacksize = 0;
+ pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
+ // GLibC will return (0 - stacksize) as the stack address in the case when
+ // stacksize is set, but stackaddr is not.
+ bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
+ // We place a lot of tool data into TLS, account for that.
+ const uptr minstacksize = GetTlsSize() + 128*1024;
+ if (stacksize < minstacksize) {
+ if (!stack_set) {
+ if (verbosity && stacksize != 0)
+ Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
+ minstacksize);
+ pthread_attr_setstacksize(attr, minstacksize);
+ } else {
+ Printf("Sanitizer: pre-allocated stack size is insufficient: "
+ "%zu < %zu\n", stacksize, minstacksize);
+ Printf("Sanitizer: pthread_create is likely to fail.\n");
+ }
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX
diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc
index c4b8e4c2bcf2..f97d1e39c5bf 100644
--- a/lib/sanitizer_common/sanitizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_mac.cc
@@ -12,7 +12,15 @@
// sanitizer_libc.h.
//===----------------------------------------------------------------------===//
-#ifdef __APPLE__
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+
+// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
+// the clients will most certainly use 64-bit ones as well.
+#ifndef _DARWIN_USE_64_BIT_INODE
+#define _DARWIN_USE_64_BIT_INODE 1
+#endif
+#include <stdio.h>
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
@@ -31,26 +39,37 @@
#include <sys/types.h>
#include <unistd.h>
#include <libkern/OSAtomic.h>
+#include <errno.h>
namespace __sanitizer {
+#include "sanitizer_syscall_generic.inc"
+
// ---------------------- sanitizer_libc.h
-void *internal_mmap(void *addr, size_t length, int prot, int flags,
- int fd, u64 offset) {
- return mmap(addr, length, prot, flags, fd, offset);
+uptr internal_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, u64 offset) {
+ return (uptr)mmap(addr, length, prot, flags, fd, offset);
}
-int internal_munmap(void *addr, uptr length) {
+uptr internal_munmap(void *addr, uptr length) {
return munmap(addr, length);
}
-int internal_close(fd_t fd) {
+uptr internal_close(fd_t fd) {
return close(fd);
}
-fd_t internal_open(const char *filename, bool write) {
- return open(filename,
- write ? O_WRONLY | O_CREAT : O_RDONLY, 0660);
+uptr internal_open(const char *filename, int flags) {
+ return open(filename, flags);
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+ return open(filename, flags, mode);
+}
+
+uptr OpenFile(const char *filename, bool write) {
+ return internal_open(filename,
+ write ? O_WRONLY | O_CREAT : O_RDONLY, 0660);
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
@@ -61,14 +80,26 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) {
return write(fd, buf, count);
}
+uptr internal_stat(const char *path, void *buf) {
+ return stat(path, (struct stat *)buf);
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+ return lstat(path, (struct stat *)buf);
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+ return fstat(fd, (struct stat *)buf);
+}
+
uptr internal_filesize(fd_t fd) {
struct stat st;
- if (fstat(fd, &st))
+ if (internal_fstat(fd, &st))
return -1;
return (uptr)st.st_size;
}
-int internal_dup2(int oldfd, int newfd) {
+uptr internal_dup2(int oldfd, int newfd) {
return dup2(oldfd, newfd);
}
@@ -76,10 +107,18 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
return readlink(path, buf, bufsize);
}
-int internal_sched_yield() {
+uptr internal_sched_yield() {
return sched_yield();
}
+void internal__exit(int exitcode) {
+ _exit(exitcode);
+}
+
+uptr internal_getpid() {
+ return getpid();
+}
+
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
@@ -131,9 +170,13 @@ void PrepareForSandboxing() {
// Nothing here for now.
}
+uptr GetPageSize() {
+ return sysconf(_SC_PAGESIZE);
+}
+
// ----------------- sanitizer_procmaps.h
-MemoryMappingLayout::MemoryMappingLayout() {
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
Reset();
}
@@ -186,7 +229,9 @@ void MemoryMappingLayout::LoadFromCache() {
template<u32 kLCSegment, typename SegmentCommand>
bool MemoryMappingLayout::NextSegmentLoad(
uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size) {
+ char filename[], uptr filename_size, uptr *protection) {
+ if (protection)
+ UNIMPLEMENTED();
const char* lc = current_load_cmd_addr_;
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
@@ -211,7 +256,8 @@ bool MemoryMappingLayout::NextSegmentLoad(
}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size) {
+ char filename[], uptr filename_size,
+ uptr *protection) {
for (; current_image_ >= 0; current_image_--) {
const mach_header* hdr = _dyld_get_image_header(current_image_);
if (!hdr) continue;
@@ -243,14 +289,14 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
- start, end, offset, filename, filename_size))
+ start, end, offset, filename, filename_size, protection))
return true;
break;
}
#endif
case MH_MAGIC: {
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
- start, end, offset, filename, filename_size))
+ start, end, offset, filename, filename_size, protection))
return true;
break;
}
@@ -264,18 +310,24 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
char filename[],
- uptr filename_size) {
- return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
+ uptr filename_size,
+ uptr *protection) {
+ return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
+ protection);
}
BlockingMutex::BlockingMutex(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
+BlockingMutex::BlockingMutex() {
+ internal_memset(this, 0, sizeof(*this));
+}
+
void BlockingMutex::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
- CHECK(OS_SPINLOCK_INIT == 0);
- CHECK(owner_ != (uptr)pthread_self());
+ CHECK_EQ(OS_SPINLOCK_INIT, 0);
+ CHECK_NE(owner_, (uptr)pthread_self());
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
CHECK(!owner_);
owner_ = (uptr)pthread_self();
@@ -287,6 +339,27 @@ void BlockingMutex::Unlock() {
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
+void BlockingMutex::CheckLocked() {
+ CHECK_EQ((uptr)pthread_self(), owner_);
+}
+
+uptr GetTlsSize() {
+ return 0;
+}
+
+void InitTlsSize() {
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+ *tls_addr = 0;
+ *tls_size = 0;
+}
+
} // namespace __sanitizer
-#endif // __APPLE__
+#endif // SANITIZER_MAC
diff --git a/lib/sanitizer_common/sanitizer_mutex.h b/lib/sanitizer_common/sanitizer_mutex.h
index 56438fce471c..469981c35176 100644
--- a/lib/sanitizer_common/sanitizer_mutex.h
+++ b/lib/sanitizer_common/sanitizer_mutex.h
@@ -70,8 +70,10 @@ class SpinMutex : public StaticSpinMutex {
class BlockingMutex {
public:
explicit BlockingMutex(LinkerInitialized);
+ BlockingMutex();
void Lock();
void Unlock();
+ void CheckLocked();
private:
uptr opaque_storage_[10];
uptr owner_; // for debugging
diff --git a/lib/sanitizer_common/sanitizer_placement_new.h b/lib/sanitizer_common/sanitizer_placement_new.h
index c0b85e1c1717..a42301aedeac 100644
--- a/lib/sanitizer_common/sanitizer_placement_new.h
+++ b/lib/sanitizer_common/sanitizer_placement_new.h
@@ -19,7 +19,7 @@
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
-#if (SANITIZER_WORDSIZE == 64) || defined(__APPLE__)
+#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
typedef uptr operator_new_ptr_type;
#else
typedef u32 operator_new_ptr_type;
diff --git a/lib/sanitizer_common/sanitizer_platform.h b/lib/sanitizer_common/sanitizer_platform.h
new file mode 100644
index 000000000000..acb997180957
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_platform.h
@@ -0,0 +1,46 @@
+//===-- sanitizer_platform.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common platform macros.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_H
+#define SANITIZER_PLATFORM_H
+
+#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
+# error "This operating system is not supported"
+#endif
+
+#if defined(__linux__)
+# define SANITIZER_LINUX 1
+#else
+# define SANITIZER_LINUX 0
+#endif
+
+#if defined(__APPLE__)
+# define SANITIZER_MAC 1
+#else
+# define SANITIZER_MAC 0
+#endif
+
+#if defined(_WIN32)
+# define SANITIZER_WINDOWS 1
+#else
+# define SANITIZER_WINDOWS 0
+#endif
+
+#if defined(__ANDROID__) || defined(ANDROID)
+# define SANITIZER_ANDROID 1
+#else
+# define SANITIZER_ANDROID 0
+#endif
+
+#define SANITIZER_POSIX (SANITIZER_LINUX || SANITIZER_MAC)
+
+#endif // SANITIZER_PLATFORM_H
diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h
index abd41fe8c997..60c7145b611a 100644
--- a/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -11,21 +11,38 @@
// given library functions on a given platform.
//
//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
+#define SANITIZER_PLATFORM_INTERCEPTORS_H
#include "sanitizer_internal_defs.h"
-#if !defined(_WIN32)
+#if !SANITIZER_WINDOWS
# define SI_NOT_WINDOWS 1
+# include "sanitizer_platform_limits_posix.h"
#else
# define SI_NOT_WINDOWS 0
#endif
-#if defined(__linux__) && !defined(ANDROID)
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define SI_LINUX_NOT_ANDROID 1
#else
# define SI_LINUX_NOT_ANDROID 0
#endif
+#if SANITIZER_LINUX
+# define SI_LINUX 1
+#else
+# define SI_LINUX 0
+#endif
+
+#if SANITIZER_MAC
+# define SI_MAC 1
+#else
+# define SI_MAC 0
+#endif
+
+# define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
+
# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
@@ -33,6 +50,30 @@
# define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PRCTL SI_LINUX_NOT_ANDROID
+# define SANITIZER_INTERCEPT_PRCTL SI_LINUX
+
+# define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
+
+# define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
+
+# define SANITIZER_INTERCEPT_FREXP 1
+# define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
+
+# define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+# define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
+# define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
+# define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
+# define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SCANF 1
+#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
new file mode 100644
index 000000000000..c269de65ca2c
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
@@ -0,0 +1,155 @@
+//===-- sanitizer_platform_limits_posix.cc --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific POSIX data structures.
+//===----------------------------------------------------------------------===//
+
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX || SANITIZER_MAC
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
+
+#include <arpa/inet.h>
+#include <dirent.h>
+#include <grp.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <signal.h>
+#include <stddef.h>
+#include <sys/utsname.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <netdb.h>
+#include <time.h>
+
+#if !SANITIZER_ANDROID
+#include <sys/ucontext.h>
+#endif // !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX
+#include <link.h>
+#include <sys/vfs.h>
+#include <sys/epoll.h>
+#endif // SANITIZER_LINUX
+
+namespace __sanitizer {
+ unsigned struct_utsname_sz = sizeof(struct utsname);
+ unsigned struct_stat_sz = sizeof(struct stat);
+ unsigned struct_stat64_sz = sizeof(struct stat64);
+ unsigned struct_rusage_sz = sizeof(struct rusage);
+ unsigned struct_tm_sz = sizeof(struct tm);
+ unsigned struct_passwd_sz = sizeof(struct passwd);
+ unsigned struct_group_sz = sizeof(struct group);
+ unsigned siginfo_t_sz = sizeof(siginfo_t);
+ unsigned struct_sigaction_sz = sizeof(struct sigaction);
+ unsigned struct_itimerval_sz = sizeof(struct itimerval);
+ unsigned pthread_t_sz = sizeof(pthread_t);
+ unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+
+#if !SANITIZER_ANDROID
+ unsigned ucontext_t_sz = sizeof(ucontext_t);
+#endif // !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX
+ unsigned struct_rlimit_sz = sizeof(struct rlimit);
+ unsigned struct_dirent_sz = sizeof(struct dirent);
+ unsigned struct_statfs_sz = sizeof(struct statfs);
+ unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
+ unsigned struct_timespec_sz = sizeof(struct timespec);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_dirent64_sz = sizeof(struct dirent64);
+ unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
+ unsigned struct_statfs64_sz = sizeof(struct statfs64);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ uptr sig_ign = (uptr)SIG_IGN;
+ uptr sig_dfl = (uptr)SIG_DFL;
+
+ void* __sanitizer_get_msghdr_iov_iov_base(void* msg, int idx) {
+ return ((struct msghdr *)msg)->msg_iov[idx].iov_base;
+ }
+
+ uptr __sanitizer_get_msghdr_iov_iov_len(void* msg, int idx) {
+ return ((struct msghdr *)msg)->msg_iov[idx].iov_len;
+ }
+
+ uptr __sanitizer_get_msghdr_iovlen(void* msg) {
+ return ((struct msghdr *)msg)->msg_iovlen;
+ }
+
+ uptr __sanitizer_get_socklen_t(void* socklen_ptr) {
+ return *(socklen_t*)socklen_ptr;
+ }
+
+ uptr __sanitizer_get_sigaction_sa_sigaction(void *act) {
+ struct sigaction *a = (struct sigaction *)act;
+ // Check that sa_sigaction and sa_handler are the same.
+ CHECK((void *)&(a->sa_sigaction) == (void *)&(a->sa_handler));
+ return (uptr) a->sa_sigaction;
+ }
+ void __sanitizer_set_sigaction_sa_sigaction(void *act, uptr cb) {
+ struct sigaction *a = (struct sigaction *)act;
+ a->sa_sigaction = (void (*)(int, siginfo_t *, void *))cb;
+ }
+ bool __sanitizer_get_sigaction_sa_siginfo(void *act) {
+ struct sigaction *a = (struct sigaction *)act;
+ return a->sa_flags & SA_SIGINFO;
+ }
+
+ uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+ }
+} // namespace __sanitizer
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+COMPILER_CHECK(sizeof(__sanitizer::struct_sigaction_max_sz) >=
+ sizeof(__sanitizer::struct_sigaction_sz));
+#if SANITIZER_LINUX
+COMPILER_CHECK(offsetof(struct __sanitizer_dl_phdr_info, dlpi_addr) ==
+ offsetof(struct dl_phdr_info, dlpi_addr));
+COMPILER_CHECK(offsetof(struct __sanitizer_dl_phdr_info, dlpi_name) ==
+ offsetof(struct dl_phdr_info, dlpi_name));
+COMPILER_CHECK(offsetof(struct __sanitizer_dl_phdr_info, dlpi_phdr) ==
+ offsetof(struct dl_phdr_info, dlpi_phdr));
+COMPILER_CHECK(offsetof(struct __sanitizer_dl_phdr_info, dlpi_phnum) ==
+ offsetof(struct dl_phdr_info, dlpi_phnum));
+#endif
+
+COMPILER_CHECK(sizeof(struct __sanitizer_addrinfo) == sizeof(struct addrinfo));
+COMPILER_CHECK(offsetof(struct __sanitizer_addrinfo, ai_addr) ==
+ offsetof(struct addrinfo, ai_addr));
+COMPILER_CHECK(offsetof(struct __sanitizer_addrinfo, ai_canonname) ==
+ offsetof(struct addrinfo, ai_canonname));
+COMPILER_CHECK(offsetof(struct __sanitizer_addrinfo, ai_next) ==
+ offsetof(struct addrinfo, ai_next));
+
+COMPILER_CHECK(sizeof(struct __sanitizer_hostent) == sizeof(struct hostent));
+COMPILER_CHECK(offsetof(struct __sanitizer_hostent, h_name) ==
+ offsetof(struct hostent, h_name));
+COMPILER_CHECK(offsetof(struct __sanitizer_hostent, h_aliases) ==
+ offsetof(struct hostent, h_aliases));
+COMPILER_CHECK(offsetof(struct __sanitizer_hostent, h_addr_list) ==
+ offsetof(struct hostent, h_addr_list));
+
+#endif // SANITIZER_LINUX || SANITIZER_MAC
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/lib/sanitizer_common/sanitizer_platform_limits_posix.h
new file mode 100644
index 000000000000..37581953db24
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -0,0 +1,115 @@
+//===-- sanitizer_platform_limits_posix.h ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific POSIX data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
+#define SANITIZER_PLATFORM_LIMITS_POSIX_H
+
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+ extern unsigned struct_utsname_sz;
+ extern unsigned struct_stat_sz;
+ extern unsigned struct_stat64_sz;
+ extern unsigned struct_rusage_sz;
+ extern unsigned struct_tm_sz;
+ extern unsigned struct_passwd_sz;
+ extern unsigned struct_group_sz;
+ extern unsigned struct_sigaction_sz;
+ extern unsigned siginfo_t_sz;
+ extern unsigned struct_itimerval_sz;
+ extern unsigned pthread_t_sz;
+ extern unsigned struct_sockaddr_sz;
+
+#if !SANITIZER_ANDROID
+ extern unsigned ucontext_t_sz;
+#endif // !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX
+ extern unsigned struct_rlimit_sz;
+ extern unsigned struct_dirent_sz;
+ extern unsigned struct_statfs_sz;
+ extern unsigned struct_epoll_event_sz;
+ extern unsigned struct_timespec_sz;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ extern unsigned struct_dirent64_sz;
+ extern unsigned struct_rlimit64_sz;
+ extern unsigned struct_statfs64_sz;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ void* __sanitizer_get_msghdr_iov_iov_base(void* msg, int idx);
+ uptr __sanitizer_get_msghdr_iov_iov_len(void* msg, int idx);
+ uptr __sanitizer_get_msghdr_iovlen(void* msg);
+ uptr __sanitizer_get_socklen_t(void* socklen_ptr);
+
+ // This thing depends on the platform. We are only interested in the upper
+ // limit. Verified with a compiler assert in .cc.
+ const int pthread_attr_t_max_sz = 128;
+ union __sanitizer_pthread_attr_t {
+ char size[pthread_attr_t_max_sz]; // NOLINT
+ void *align;
+ };
+
+ uptr __sanitizer_get_sigaction_sa_sigaction(void *act);
+ void __sanitizer_set_sigaction_sa_sigaction(void *act, uptr cb);
+ bool __sanitizer_get_sigaction_sa_siginfo(void *act);
+
+ const unsigned struct_sigaction_max_sz = 256;
+ union __sanitizer_sigaction {
+ char size[struct_sigaction_max_sz]; // NOLINT
+ };
+
+ extern uptr sig_ign;
+ extern uptr sig_dfl;
+
+ uptr __sanitizer_in_addr_sz(int af);
+
+#if SANITIZER_LINUX
+ struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+ };
+#endif
+
+ struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+#if SANITIZER_ANDROID || SANITIZER_MAC
+ unsigned ai_addrlen;
+ char *ai_canonname;
+ void *ai_addr;
+#else // LINUX
+ uptr ai_addrlen;
+ void *ai_addr;
+ char *ai_canonname;
+#endif
+ struct __sanitizer_addrinfo *ai_next;
+ };
+
+ struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+ };
+
+} // namespace __sanitizer
+
+#endif
diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc
index 32657838600d..af25b245fdd3 100644
--- a/lib/sanitizer_common/sanitizer_posix.cc
+++ b/lib/sanitizer_common/sanitizer_posix.cc
@@ -11,123 +11,106 @@
// run-time libraries and implements POSIX-specific functions from
// sanitizer_libc.h.
//===----------------------------------------------------------------------===//
-#if defined(__linux__) || defined(__APPLE__)
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX || SANITIZER_MAC
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
-#include <errno.h>
-#include <pthread.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <unistd.h>
namespace __sanitizer {
// ------------- sanitizer_common.h
-uptr GetPageSize() {
- return sysconf(_SC_PAGESIZE);
-}
-
uptr GetMmapGranularity() {
return GetPageSize();
}
-int GetPid() {
- return getpid();
-}
-
-uptr GetThreadSelf() {
- return (uptr)pthread_self();
-}
-
void *MmapOrDie(uptr size, const char *mem_type) {
size = RoundUpTo(size, GetPageSizeCached());
- void *res = internal_mmap(0, size,
+ uptr res = internal_mmap(0, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
- if (res == (void*)-1) {
+ int reserrno;
+ if (internal_iserror(res, &reserrno)) {
static int recursion_count;
if (recursion_count) {
// The Report() and CHECK calls below may call mmap recursively and fail.
// If we went into recursion, just die.
- RawWrite("AddressSanitizer is unable to mmap\n");
+ RawWrite("ERROR: Failed to mmap\n");
Die();
}
recursion_count++;
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s: %s\n",
- size, size, mem_type, strerror(errno));
+ Report("ERROR: %s failed to allocate 0x%zx (%zd) bytes of %s: %d\n",
+ SanitizerToolName, size, size, mem_type, reserrno);
DumpProcessMap();
CHECK("unable to mmap" && 0);
}
- return res;
+ return (void *)res;
}
void UnmapOrDie(void *addr, uptr size) {
if (!addr || !size) return;
- int res = internal_munmap(addr, size);
- if (res != 0) {
- Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n",
- size, size, addr);
+ uptr res = internal_munmap(addr, size);
+ if (internal_iserror(res)) {
+ Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, size, size, addr);
CHECK("unable to unmap" && 0);
}
}
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
uptr PageSize = GetPageSizeCached();
- void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
+ uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
RoundUpTo(size, PageSize),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
-1, 0);
- if (p == (void*)-1)
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
- size, size, fixed_addr, errno);
- return p;
+ int reserrno;
+ if (internal_iserror(p, &reserrno))
+ Report("ERROR: "
+ "%s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ SanitizerToolName, size, size, fixed_addr, reserrno);
+ return (void *)p;
}
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
uptr PageSize = GetPageSizeCached();
- void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
+ uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
RoundUpTo(size, PageSize),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1, 0);
- if (p == (void*)-1) {
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
- size, size, fixed_addr, errno);
+ int reserrno;
+ if (internal_iserror(p, &reserrno)) {
+ Report("ERROR:"
+ " %s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ SanitizerToolName, size, size, fixed_addr, reserrno);
CHECK("unable to mmap" && 0);
}
- return p;
+ return (void *)p;
}
void *Mprotect(uptr fixed_addr, uptr size) {
- return internal_mmap((void*)fixed_addr, size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
- -1, 0);
-}
-
-void FlushUnneededShadowMemory(uptr addr, uptr size) {
- madvise((void*)addr, size, MADV_DONTNEED);
+ return (void *)internal_mmap((void*)fixed_addr, size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED |
+ MAP_NORESERVE, -1, 0);
}
void *MapFileToMemory(const char *file_name, uptr *buff_size) {
- fd_t fd = internal_open(file_name, false);
- CHECK_NE(fd, kInvalidFd);
+ uptr openrv = OpenFile(file_name, false);
+ CHECK(!internal_iserror(openrv));
+ fd_t fd = openrv;
uptr fsize = internal_filesize(fd);
CHECK_NE(fsize, (uptr)-1);
CHECK_GT(fsize, 0);
*buff_size = RoundUpTo(fsize, GetPageSizeCached());
- void *map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
- return (map == MAP_FAILED) ? 0 : map;
+ uptr map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ return internal_iserror(map) ? 0 : (void *)map;
}
@@ -143,10 +126,11 @@ static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
// several worker threads on Mac, which aren't expected to map big chunks of
// memory).
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
- MemoryMappingLayout procmaps;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
- while (procmaps.Next(&start, &end,
- /*offset*/0, /*filename*/0, /*filename_size*/0)) {
+ while (proc_maps.Next(&start, &end,
+ /*offset*/0, /*filename*/0, /*filename_size*/0,
+ /*protection*/0)) {
if (!IntervalsAreSeparate(start, end, range_start, range_end))
return false;
}
@@ -154,13 +138,13 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
}
void DumpProcessMap() {
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
const sptr kBufSize = 4095;
char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
Report("Process memory map follows:\n");
while (proc_maps.Next(&start, &end, /* file_offset */0,
- filename, kBufSize)) {
+ filename, kBufSize, /* protection */0)) {
Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
}
Report("End of process memory map.\n");
@@ -171,58 +155,6 @@ const char *GetPwd() {
return GetEnv("PWD");
}
-void DisableCoreDumper() {
- struct rlimit nocore;
- nocore.rlim_cur = 0;
- nocore.rlim_max = 0;
- setrlimit(RLIMIT_CORE, &nocore);
-}
-
-bool StackSizeIsUnlimited() {
- struct rlimit rlim;
- CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim));
- return (rlim.rlim_cur == (uptr)-1);
-}
-
-void SetStackSizeLimitInBytes(uptr limit) {
- struct rlimit rlim;
- rlim.rlim_cur = limit;
- rlim.rlim_max = limit;
- if (setrlimit(RLIMIT_STACK, &rlim)) {
- Report("setrlimit() failed %d\n", errno);
- Die();
- }
- CHECK(!StackSizeIsUnlimited());
-}
-
-void SleepForSeconds(int seconds) {
- sleep(seconds);
-}
-
-void SleepForMillis(int millis) {
- usleep(millis * 1000);
-}
-
-void Exit(int exitcode) {
- _exit(exitcode);
-}
-
-void Abort() {
- abort();
-}
-
-int Atexit(void (*function)(void)) {
-#ifndef SANITIZER_GO
- return atexit(function);
-#else
- return 0;
-#endif
-}
-
-int internal_isatty(fd_t fd) {
- return isatty(fd);
-}
-
} // namespace __sanitizer
-#endif // __linux__ || __APPLE_
+#endif // SANITIZER_LINUX || SANITIZER_MAC
diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
new file mode 100644
index 000000000000..43da171ba271
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -0,0 +1,116 @@
+//===-- sanitizer_posix_libcdep.cc ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements libc-dependent POSIX-specific functions
+// from sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+#include "sanitizer_common.h"
+#include "sanitizer_stacktrace.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+namespace __sanitizer {
+
+u32 GetUid() {
+ return getuid();
+}
+
+uptr GetThreadSelf() {
+ return (uptr)pthread_self();
+}
+
+void FlushUnneededShadowMemory(uptr addr, uptr size) {
+ madvise((void*)addr, size, MADV_DONTNEED);
+}
+
+void DisableCoreDumper() {
+ struct rlimit nocore;
+ nocore.rlim_cur = 0;
+ nocore.rlim_max = 0;
+ setrlimit(RLIMIT_CORE, &nocore);
+}
+
+bool StackSizeIsUnlimited() {
+ struct rlimit rlim;
+ CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim));
+ return (rlim.rlim_cur == (uptr)-1);
+}
+
+void SetStackSizeLimitInBytes(uptr limit) {
+ struct rlimit rlim;
+ rlim.rlim_cur = limit;
+ rlim.rlim_max = limit;
+ if (setrlimit(RLIMIT_STACK, &rlim)) {
+ Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
+ Die();
+ }
+ CHECK(!StackSizeIsUnlimited());
+}
+
+void SleepForSeconds(int seconds) {
+ sleep(seconds);
+}
+
+void SleepForMillis(int millis) {
+ usleep(millis * 1000);
+}
+
+void Abort() {
+ abort();
+}
+
+int Atexit(void (*function)(void)) {
+#ifndef SANITIZER_GO
+ return atexit(function);
+#else
+ return 0;
+#endif
+}
+
+int internal_isatty(fd_t fd) {
+ return isatty(fd);
+}
+
+#ifndef SANITIZER_GO
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
+ uptr stack_top, uptr stack_bottom, bool fast) {
+#if !SANITIZER_CAN_FAST_UNWIND
+ fast = false;
+#endif
+#if SANITIZER_MAC
+ // Always unwind fast on Mac.
+ (void)fast;
+#else
+ if (!fast || (stack_top == stack_bottom))
+ return stack->SlowUnwindStack(pc, max_s);
+#endif // SANITIZER_MAC
+ stack->size = 0;
+ stack->trace[0] = pc;
+ if (max_s > 1) {
+ stack->max_size = max_s;
+ stack->FastUnwindStack(pc, bp, stack_top, stack_bottom);
+ }
+}
+#endif // SANITIZER_GO
+
+} // namespace __sanitizer
+
+#endif
diff --git a/lib/sanitizer_common/sanitizer_printf.cc b/lib/sanitizer_common/sanitizer_printf.cc
index 2393e8f2b87b..5935d7f17a5e 100644
--- a/lib/sanitizer_common/sanitizer_printf.cc
+++ b/lib/sanitizer_common/sanitizer_printf.cc
@@ -21,8 +21,14 @@
#include <stdio.h>
#include <stdarg.h>
+#if SANITIZER_WINDOWS
+# define va_copy(dst, src) ((dst) = (src))
+#endif
+
namespace __sanitizer {
+StaticSpinMutex CommonSanitizerReportMutex;
+
static int AppendChar(char **buff, const char *buff_end, char c) {
if (*buff < buff_end) {
**buff = c;
@@ -173,17 +179,86 @@ void SetPrintfAndReportCallback(void (*callback)(const char *)) {
PrintfAndReportCallback = callback;
}
-void Printf(const char *format, ...) {
+#if SANITIZER_SUPPORTS_WEAK_HOOKS
+// Can be overriden in frontend.
+SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+void OnPrint(const char *str);
+#endif
+
+static void CallPrintfAndReportCallback(const char *str) {
+#if SANITIZER_SUPPORTS_WEAK_HOOKS
+ if (&OnPrint != NULL)
+ OnPrint(str);
+#endif
+ if (PrintfAndReportCallback)
+ PrintfAndReportCallback(str);
+}
+
+static void SharedPrintfCode(bool append_pid, const char *format,
+ va_list args) {
+ va_list args2;
+ va_copy(args2, args);
const int kLen = 16 * 1024;
- InternalScopedBuffer<char> buffer(kLen);
+ // |local_buffer| is small enough not to overflow the stack and/or violate
+ // the stack limit enforced by TSan (-Wframe-larger-than=512). On the other
+ // hand, the bigger the buffer is, the more the chance the error report will
+ // fit into it.
+ char local_buffer[400];
+ int needed_length;
+ char *buffer = local_buffer;
+ int buffer_size = ARRAY_SIZE(local_buffer);
+ // First try to print a message using a local buffer, and then fall back to
+ // mmaped buffer.
+ for (int use_mmap = 0; use_mmap < 2; use_mmap++) {
+ if (use_mmap) {
+ va_end(args);
+ va_copy(args, args2);
+ buffer = (char*)MmapOrDie(kLen, "Report");
+ buffer_size = kLen;
+ }
+ needed_length = 0;
+ if (append_pid) {
+ int pid = internal_getpid();
+ needed_length += internal_snprintf(buffer, buffer_size, "==%d==", pid);
+ if (needed_length >= buffer_size) {
+ // The pid doesn't fit into the current buffer.
+ if (!use_mmap)
+ continue;
+ RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
+ }
+ }
+ needed_length += VSNPrintf(buffer + needed_length,
+ buffer_size - needed_length, format, args);
+ if (needed_length >= buffer_size) {
+ // The message doesn't fit into the current buffer.
+ if (!use_mmap)
+ continue;
+ RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
+ }
+ // If the message fit into the buffer, print it and exit.
+ break;
+ }
+ RawWrite(buffer);
+ CallPrintfAndReportCallback(buffer);
+ // If we had mapped any memory, clean up.
+ if (buffer != local_buffer)
+ UnmapOrDie((void *)buffer, buffer_size);
+ va_end(args2);
+}
+
+void Printf(const char *format, ...) {
va_list args;
va_start(args, format);
- int needed_length = VSNPrintf(buffer.data(), kLen, format, args);
+ SharedPrintfCode(false, format, args);
+ va_end(args);
+}
+
+// Like Printf, but prints the current PID before the output string.
+void Report(const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ SharedPrintfCode(true, format, args);
va_end(args);
- RAW_CHECK_MSG(needed_length < kLen, "Buffer in Printf is too short!\n");
- RawWrite(buffer.data());
- if (PrintfAndReportCallback)
- PrintfAndReportCallback(buffer.data());
}
// Writes at most "length" symbols to "buffer" (including trailing '\0').
@@ -198,22 +273,4 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
-// Like Printf, but prints the current PID before the output string.
-void Report(const char *format, ...) {
- const int kLen = 16 * 1024;
- InternalScopedBuffer<char> buffer(kLen);
- int needed_length = internal_snprintf(buffer.data(),
- kLen, "==%d== ", GetPid());
- RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
- va_list args;
- va_start(args, format);
- needed_length += VSNPrintf(buffer.data() + needed_length,
- kLen - needed_length, format, args);
- va_end(args);
- RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
- RawWrite(buffer.data());
- if (PrintfAndReportCallback)
- PrintfAndReportCallback(buffer.data());
-}
-
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_procmaps.h b/lib/sanitizer_common/sanitizer_procmaps.h
index 1b8ea7aff165..b96f09ec4561 100644
--- a/lib/sanitizer_common/sanitizer_procmaps.h
+++ b/lib/sanitizer_common/sanitizer_procmaps.h
@@ -19,51 +19,63 @@
namespace __sanitizer {
-#ifdef _WIN32
+#if SANITIZER_WINDOWS
class MemoryMappingLayout {
public:
- MemoryMappingLayout() {}
+ explicit MemoryMappingLayout(bool cache_enabled) {
+ (void)cache_enabled;
+ }
bool GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size) {
+ char filename[], uptr filename_size,
+ uptr *protection) {
UNIMPLEMENTED();
}
};
#else // _WIN32
-#if defined(__linux__)
+#if SANITIZER_LINUX
struct ProcSelfMapsBuff {
char *data;
uptr mmaped_size;
uptr len;
};
-#endif // defined(__linux__)
+#endif // SANITIZER_LINUX
class MemoryMappingLayout {
public:
- MemoryMappingLayout();
+ explicit MemoryMappingLayout(bool cache_enabled);
bool Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size);
+ char filename[], uptr filename_size, uptr *protection);
void Reset();
// Gets the object file name and the offset in that object for a given
// address 'addr'. Returns true on success.
bool GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size);
+ char filename[], uptr filename_size,
+ uptr *protection);
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
// instead of aborting.
static void CacheMemoryMappings();
~MemoryMappingLayout();
+ // Memory protection masks.
+ static const uptr kProtectionRead = 1;
+ static const uptr kProtectionWrite = 2;
+ static const uptr kProtectionExecute = 4;
+ static const uptr kProtectionShared = 8;
+
private:
void LoadFromCache();
// Default implementation of GetObjectNameAndOffset.
// Quite slow, because it iterates through the whole process map for each
// lookup.
bool IterateForObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size) {
+ char filename[], uptr filename_size,
+ uptr *protection) {
Reset();
uptr start, end, file_offset;
- for (int i = 0; Next(&start, &end, &file_offset, filename, filename_size);
+ for (int i = 0; Next(&start, &end, &file_offset, filename, filename_size,
+ protection);
i++) {
if (addr >= start && addr < end) {
// Don't subtract 'start' for the first entry:
@@ -86,17 +98,18 @@ class MemoryMappingLayout {
return false;
}
-# if defined __linux__
+# if SANITIZER_LINUX
ProcSelfMapsBuff proc_self_maps_;
char *current_;
// Static mappings cache.
static ProcSelfMapsBuff cached_proc_self_maps_;
static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
-# elif defined __APPLE__
+# elif SANITIZER_MAC
template<u32 kLCSegment, typename SegmentCommand>
bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size);
+ char filename[], uptr filename_size,
+ uptr *protection);
int current_image_;
u32 current_magic_;
u32 current_filetype_;
diff --git a/lib/sanitizer_common/sanitizer_quarantine.h b/lib/sanitizer_common/sanitizer_quarantine.h
index ec90d2d6871b..599d13645dd7 100644
--- a/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/lib/sanitizer_common/sanitizer_quarantine.h
@@ -159,7 +159,7 @@ class QuarantineCache {
atomic_store(&size_, Size() + add, memory_order_relaxed);
}
- QuarantineBatch *NOINLINE AllocBatch(Callback cb) {
+ NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
b->count = 0;
b->size = 0;
@@ -167,6 +167,6 @@ class QuarantineCache {
return b;
}
};
-}
+} // namespace __sanitizer
#endif // #ifndef SANITIZER_QUARANTINE_H
diff --git a/lib/sanitizer_common/sanitizer_report_decorator.h b/lib/sanitizer_common/sanitizer_report_decorator.h
index 50a3ee572fdb..49334d5e0c71 100644
--- a/lib/sanitizer_common/sanitizer_report_decorator.h
+++ b/lib/sanitizer_common/sanitizer_report_decorator.h
@@ -14,24 +14,26 @@
//
//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_ALLOCATOR_H
-#define SANITIZER_ALLOCATOR_H
+#ifndef SANITIZER_REPORT_DECORATOR_H
+#define SANITIZER_REPORT_DECORATOR_H
namespace __sanitizer {
class AnsiColorDecorator {
public:
explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { }
- const char *Black() { return ansi_ ? "\033[1m\033[30m" : ""; }
- const char *Red() { return ansi_ ? "\033[1m\033[31m" : ""; }
- const char *Green() { return ansi_ ? "\033[1m\033[32m" : ""; }
- const char *Yellow() { return ansi_ ? "\033[1m\033[33m" : ""; }
- const char *Blue() { return ansi_ ? "\033[1m\033[34m" : ""; }
- const char *Magenta() { return ansi_ ? "\033[1m\033[35m" : ""; }
- const char *Cyan() { return ansi_ ? "\033[1m\033[36m" : ""; }
- const char *White() { return ansi_ ? "\033[1m\033[37m" : ""; }
- const char *Default() { return ansi_ ? "\033[1m\033[0m" : ""; }
+ const char *Bold() const { return ansi_ ? "\033[1m" : ""; }
+ const char *Black() const { return ansi_ ? "\033[1m\033[30m" : ""; }
+ const char *Red() const { return ansi_ ? "\033[1m\033[31m" : ""; }
+ const char *Green() const { return ansi_ ? "\033[1m\033[32m" : ""; }
+ const char *Yellow() const { return ansi_ ? "\033[1m\033[33m" : ""; }
+ const char *Blue() const { return ansi_ ? "\033[1m\033[34m" : ""; }
+ const char *Magenta() const { return ansi_ ? "\033[1m\033[35m" : ""; }
+ const char *Cyan() const { return ansi_ ? "\033[1m\033[36m" : ""; }
+ const char *White() const { return ansi_ ? "\033[1m\033[37m" : ""; }
+ const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
private:
bool ansi_;
};
} // namespace __sanitizer
-#endif // SANITIZER_ALLOCATOR_H
+
+#endif // SANITIZER_REPORT_DECORATOR_H
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.h b/lib/sanitizer_common/sanitizer_stackdepot.h
index 49e6669dd203..5915fdbb4310 100644
--- a/lib/sanitizer_common/sanitizer_stackdepot.h
+++ b/lib/sanitizer_common/sanitizer_stackdepot.h
@@ -13,7 +13,7 @@
#ifndef SANITIZER_STACKDEPOT_H
#define SANITIZER_STACKDEPOT_H
-#include "sanitizer/common_interface_defs.h"
+#include "sanitizer_internal_defs.h"
namespace __sanitizer {
diff --git a/lib/sanitizer_common/sanitizer_stacktrace.cc b/lib/sanitizer_common/sanitizer_stacktrace.cc
index 109a674e45b3..724c29c86b66 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace.cc
+++ b/lib/sanitizer_common/sanitizer_stacktrace.cc
@@ -17,8 +17,9 @@
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-static const char *StripPathPrefix(const char *filepath,
- const char *strip_file_prefix) {
+const char *StripPathPrefix(const char *filepath,
+ const char *strip_file_prefix) {
+ if (filepath == 0) return 0;
if (filepath == internal_strstr(filepath, strip_file_prefix))
return filepath + internal_strlen(strip_file_prefix);
return filepath;
@@ -63,7 +64,7 @@ static void PrintModuleAndOffset(const char *module, uptr offset,
void StackTrace::PrintStack(const uptr *addr, uptr size,
bool symbolize, const char *strip_file_prefix,
SymbolizeCallback symbolize_callback ) {
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
InternalScopedBuffer<AddressInfo> addr_frames(64);
uptr frame_num = 0;
@@ -84,7 +85,7 @@ void StackTrace::PrintStack(const uptr *addr, uptr size,
frame_num++;
}
}
- if (symbolize && addr_frames_num == 0) {
+ if (symbolize && addr_frames_num == 0 && &SymbolizeCode) {
// Use our own (online) symbolizer, if necessary.
addr_frames_num = SymbolizeCode(pc, addr_frames.data(),
addr_frames.size());
@@ -112,7 +113,8 @@ void StackTrace::PrintStack(const uptr *addr, uptr size,
PrintStackFramePrefix(frame_num, pc);
uptr offset;
if (proc_maps.GetObjectNameAndOffset(pc, &offset,
- buff.data(), buff.size())) {
+ buff.data(), buff.size(),
+ /* protection */0)) {
PrintModuleAndOffset(buff.data(), offset, strip_file_prefix);
}
Printf("\n");
@@ -130,10 +132,12 @@ void StackTrace::FastUnwindStack(uptr pc, uptr bp,
CHECK(size == 0 && trace[0] == pc);
size = 1;
uhwptr *frame = (uhwptr *)bp;
- uhwptr *prev_frame = frame;
- while (frame >= prev_frame &&
+ uhwptr *prev_frame = frame - 1;
+ // Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
+ while (frame > prev_frame &&
frame < (uhwptr *)stack_top - 2 &&
frame > (uhwptr *)stack_bottom &&
+ IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_size) {
uhwptr pc1 = frame[1];
if (pc1 != pc) {
diff --git a/lib/sanitizer_common/sanitizer_stacktrace.h b/lib/sanitizer_common/sanitizer_stacktrace.h
index 597d24fd067f..fcfdd7e0b59b 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace.h
+++ b/lib/sanitizer_common/sanitizer_stacktrace.h
@@ -19,6 +19,14 @@ namespace __sanitizer {
static const uptr kStackTraceMax = 256;
+#if SANITIZER_LINUX && (defined(__arm__) || \
+ defined(__powerpc__) || defined(__powerpc64__) || \
+ defined(__sparc__))
+#define SANITIZER_CAN_FAST_UNWIND 0
+#else
+#define SANITIZER_CAN_FAST_UNWIND 1
+#endif
+
struct StackTrace {
typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
int out_size);
@@ -57,6 +65,13 @@ struct StackTrace {
u32 *compressed, uptr size);
};
+
+const char *StripPathPrefix(const char *filepath,
+ const char *strip_file_prefix);
+
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
+ uptr stack_top, uptr stack_bottom, bool fast);
+
} // namespace __sanitizer
// Use this macro if you want to print stack trace with the caller
diff --git a/lib/sanitizer_common/sanitizer_stoptheworld.h b/lib/sanitizer_common/sanitizer_stoptheworld.h
new file mode 100644
index 000000000000..cc9408bb845f
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_stoptheworld.h
@@ -0,0 +1,68 @@
+//===-- sanitizer_stoptheworld.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the StopTheWorld function which suspends the execution of the current
+// process and runs the user-supplied callback in the same address space.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STOPTHEWORLD_H
+#define SANITIZER_STOPTHEWORLD_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+typedef int SuspendedThreadID;
+
+// Holds the list of suspended threads and provides an interface to dump their
+// register contexts.
+class SuspendedThreadsList {
+ public:
+ SuspendedThreadsList()
+ : thread_ids_(1024) {}
+ SuspendedThreadID GetThreadID(uptr index) const {
+ CHECK_LT(index, thread_ids_.size());
+ return thread_ids_[index];
+ }
+ int GetRegistersAndSP(uptr index, uptr *buffer, uptr *sp) const;
+ // The buffer in GetRegistersAndSP should be at least this big.
+ static uptr RegisterCount();
+ uptr thread_count() const { return thread_ids_.size(); }
+ bool Contains(SuspendedThreadID thread_id) const {
+ for (uptr i = 0; i < thread_ids_.size(); i++) {
+ if (thread_ids_[i] == thread_id)
+ return true;
+ }
+ return false;
+ }
+ void Append(SuspendedThreadID thread_id) {
+ thread_ids_.push_back(thread_id);
+ }
+
+ private:
+ InternalVector<SuspendedThreadID> thread_ids_;
+
+ // Prohibit copy and assign.
+ SuspendedThreadsList(const SuspendedThreadsList&);
+ void operator=(const SuspendedThreadsList&);
+};
+
+typedef void (*StopTheWorldCallback)(
+ const SuspendedThreadsList &suspended_threads_list,
+ void *argument);
+
+// Suspend all threads in the current process and run the callback on the list
+// of suspended threads. This function will resume the threads before returning.
+// The callback should not call any libc functions.
+// This function should NOT be called from multiple threads simultaneously.
+void StopTheWorld(StopTheWorldCallback callback, void *argument);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STOPTHEWORLD_H
diff --git a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
new file mode 100644
index 000000000000..e5284ee2211a
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
@@ -0,0 +1,403 @@
+//===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "sanitizer_stoptheworld.h"
+
+#include <errno.h>
+#include <sched.h> // for clone
+#include <stddef.h>
+#include <sys/prctl.h> // for PR_* definitions
+#include <sys/ptrace.h> // for PTRACE_* definitions
+#include <sys/types.h> // for pid_t
+#if SANITIZER_ANDROID && defined(__arm__)
+# include <linux/user.h> // for pt_regs
+#else
+# include <sys/user.h> // for user_regs_struct
+#endif
+#include <sys/wait.h> // for signal-related stuff
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+
+// This module works by spawning a Linux task which then attaches to every
+// thread in the caller process with ptrace. This suspends the threads, and
+// PTRACE_GETREGS can then be used to obtain their register state. The callback
+// supplied to StopTheWorld() is run in the tracer task while the threads are
+// suspended.
+// The tracer task must be placed in a different thread group for ptrace to
+// work, so it cannot be spawned as a pthread. Instead, we use the low-level
+// clone() interface (we want to share the address space with the caller
+// process, so we prefer clone() over fork()).
+//
+// We avoid the use of libc for two reasons:
+// 1. calling a library function while threads are suspended could cause a
+// deadlock, if one of the treads happens to be holding a libc lock;
+// 2. it's generally not safe to call libc functions from the tracer task,
+// because clone() does not set up a thread-local storage for it. Any
+// thread-local variables used by libc will be shared between the tracer task
+// and the thread which spawned it.
+//
+// We deal with this by replacing libc calls with calls to our own
+// implementations defined in sanitizer_libc.h and sanitizer_linux.h. However,
+// there are still some libc functions which are used here:
+//
+// * All of the system calls ultimately go through the libc syscall() function.
+// We're operating under the assumption that syscall()'s implementation does
+// not acquire any locks or use any thread-local data (except for the errno
+// variable, which we handle separately).
+//
+// * We lack custom implementations of sigfillset() and sigaction(), so we use
+// the libc versions instead. The same assumptions as above apply.
+//
+// * It is safe to call libc functions before the cloned thread is spawned or
+// after it has exited. The following functions are used in this manner:
+// sigdelset()
+// sigprocmask()
+// clone()
+
+COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
+
+namespace __sanitizer {
+// This class handles thread suspending/unsuspending in the tracer thread.
+class ThreadSuspender {
+ public:
+ explicit ThreadSuspender(pid_t pid)
+ : pid_(pid) {
+ CHECK_GE(pid, 0);
+ }
+ bool SuspendAllThreads();
+ void ResumeAllThreads();
+ void KillAllThreads();
+ SuspendedThreadsList &suspended_threads_list() {
+ return suspended_threads_list_;
+ }
+ private:
+ SuspendedThreadsList suspended_threads_list_;
+ pid_t pid_;
+ bool SuspendThread(SuspendedThreadID thread_id);
+};
+
+bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
+ // Are we already attached to this thread?
+ // Currently this check takes linear time, however the number of threads is
+ // usually small.
+ if (suspended_threads_list_.Contains(thread_id))
+ return false;
+ int pterrno;
+ if (internal_iserror(internal_ptrace(PTRACE_ATTACH, thread_id, NULL, NULL),
+ &pterrno)) {
+ // Either the thread is dead, or something prevented us from attaching.
+ // Log this event and move on.
+ Report("Could not attach to thread %d (errno %d).\n", thread_id, pterrno);
+ return false;
+ } else {
+ if (SanitizerVerbosity > 0)
+ Report("Attached to thread %d.\n", thread_id);
+ // The thread is not guaranteed to stop before ptrace returns, so we must
+ // wait on it.
+ uptr waitpid_status;
+ HANDLE_EINTR(waitpid_status, internal_waitpid(thread_id, NULL, __WALL));
+ int wperrno;
+ if (internal_iserror(waitpid_status, &wperrno)) {
+ // Got a ECHILD error. I don't think this situation is possible, but it
+ // doesn't hurt to report it.
+ Report("Waiting on thread %d failed, detaching (errno %d).\n", thread_id,
+ wperrno);
+ internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL);
+ return false;
+ }
+ suspended_threads_list_.Append(thread_id);
+ return true;
+ }
+}
+
+void ThreadSuspender::ResumeAllThreads() {
+ for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) {
+ pid_t tid = suspended_threads_list_.GetThreadID(i);
+ int pterrno;
+ if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, NULL, NULL),
+ &pterrno)) {
+ if (SanitizerVerbosity > 0)
+ Report("Detached from thread %d.\n", tid);
+ } else {
+ // Either the thread is dead, or we are already detached.
+ // The latter case is possible, for instance, if this function was called
+ // from a signal handler.
+ Report("Could not detach from thread %d (errno %d).\n", tid, pterrno);
+ }
+ }
+}
+
+void ThreadSuspender::KillAllThreads() {
+ for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++)
+ internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
+ NULL, NULL);
+}
+
+bool ThreadSuspender::SuspendAllThreads() {
+ ThreadLister thread_lister(pid_);
+ bool added_threads;
+ do {
+ // Run through the directory entries once.
+ added_threads = false;
+ pid_t tid = thread_lister.GetNextTID();
+ while (tid >= 0) {
+ if (SuspendThread(tid))
+ added_threads = true;
+ tid = thread_lister.GetNextTID();
+ }
+ if (thread_lister.error()) {
+ // Detach threads and fail.
+ ResumeAllThreads();
+ return false;
+ }
+ thread_lister.Reset();
+ } while (added_threads);
+ return true;
+}
+
+// Pointer to the ThreadSuspender instance for use in signal handler.
+static ThreadSuspender *thread_suspender_instance = NULL;
+
+// Signals that should not be blocked (this is used in the parent thread as well
+// as the tracer thread).
+static const int kUnblockedSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV,
+ SIGBUS, SIGXCPU, SIGXFSZ };
+
+// Structure for passing arguments into the tracer thread.
+struct TracerThreadArgument {
+ StopTheWorldCallback callback;
+ void *callback_argument;
+ // The tracer thread waits on this mutex while the parent finished its
+ // preparations.
+ BlockingMutex mutex;
+};
+
+// Signal handler to wake up suspended threads when the tracer thread dies.
+void TracerThreadSignalHandler(int signum, siginfo_t *siginfo, void *) {
+ if (thread_suspender_instance != NULL) {
+ if (signum == SIGABRT)
+ thread_suspender_instance->KillAllThreads();
+ else
+ thread_suspender_instance->ResumeAllThreads();
+ }
+ internal__exit((signum == SIGABRT) ? 1 : 2);
+}
+
+// Size of alternative stack for signal handlers in the tracer thread.
+static const int kHandlerStackSize = 4096;
+
+// This function will be run as a cloned task.
+static int TracerThread(void* argument) {
+ TracerThreadArgument *tracer_thread_argument =
+ (TracerThreadArgument *)argument;
+
+ // Wait for the parent thread to finish preparations.
+ tracer_thread_argument->mutex.Lock();
+ tracer_thread_argument->mutex.Unlock();
+
+ ThreadSuspender thread_suspender(internal_getppid());
+ // Global pointer for the signal handler.
+ thread_suspender_instance = &thread_suspender;
+
+ // Alternate stack for signal handling.
+ InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize);
+ struct sigaltstack handler_stack;
+ internal_memset(&handler_stack, 0, sizeof(handler_stack));
+ handler_stack.ss_sp = handler_stack_memory.data();
+ handler_stack.ss_size = kHandlerStackSize;
+ internal_sigaltstack(&handler_stack, NULL);
+
+ // Install our handler for fatal signals. Other signals should be blocked by
+ // the mask we inherited from the caller thread.
+ for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
+ signal_index++) {
+ struct sigaction new_sigaction;
+ internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
+ new_sigaction.sa_sigaction = TracerThreadSignalHandler;
+ new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO;
+ sigfillset(&new_sigaction.sa_mask);
+ sigaction(kUnblockedSignals[signal_index], &new_sigaction, NULL);
+ }
+
+ int exit_code = 0;
+ if (!thread_suspender.SuspendAllThreads()) {
+ Report("Failed suspending threads.\n");
+ exit_code = 3;
+ } else {
+ tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
+ tracer_thread_argument->callback_argument);
+ thread_suspender.ResumeAllThreads();
+ exit_code = 0;
+ }
+ thread_suspender_instance = NULL;
+ handler_stack.ss_flags = SS_DISABLE;
+ internal_sigaltstack(&handler_stack, NULL);
+ return exit_code;
+}
+
+class ScopedStackSpaceWithGuard {
+ public:
+ explicit ScopedStackSpaceWithGuard(uptr stack_size) {
+ stack_size_ = stack_size;
+ guard_size_ = GetPageSizeCached();
+ // FIXME: Omitting MAP_STACK here works in current kernels but might break
+ // in the future.
+ guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
+ "ScopedStackWithGuard");
+ CHECK_EQ(guard_start_, (uptr)Mprotect((uptr)guard_start_, guard_size_));
+ }
+ ~ScopedStackSpaceWithGuard() {
+ UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
+ }
+ void *Bottom() const {
+ return (void *)(guard_start_ + stack_size_ + guard_size_);
+ }
+
+ private:
+ uptr stack_size_;
+ uptr guard_size_;
+ uptr guard_start_;
+};
+
+static sigset_t blocked_sigset;
+static sigset_t old_sigset;
+static struct sigaction old_sigactions[ARRAY_SIZE(kUnblockedSignals)];
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ // Block all signals that can be blocked safely, and install default handlers
+ // for the remaining signals.
+ // We cannot allow user-defined handlers to run while the ThreadSuspender
+ // thread is active, because they could conceivably call some libc functions
+ // which modify errno (which is shared between the two threads).
+ sigfillset(&blocked_sigset);
+ for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
+ signal_index++) {
+ // Remove the signal from the set of blocked signals.
+ sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]);
+ // Install the default handler.
+ struct sigaction new_sigaction;
+ internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
+ new_sigaction.sa_handler = SIG_DFL;
+ sigfillset(&new_sigaction.sa_mask);
+ sigaction(kUnblockedSignals[signal_index], &new_sigaction,
+ &old_sigactions[signal_index]);
+ }
+ int sigprocmask_status = sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
+ CHECK_EQ(sigprocmask_status, 0); // sigprocmask should never fail
+ // Make this process dumpable. Processes that are not dumpable cannot be
+ // attached to.
+ int process_was_dumpable = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
+ if (!process_was_dumpable)
+ internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
+ // Prepare the arguments for TracerThread.
+ struct TracerThreadArgument tracer_thread_argument;
+ tracer_thread_argument.callback = callback;
+ tracer_thread_argument.callback_argument = argument;
+ const uptr kTracerStackSize = 2 * 1024 * 1024;
+ ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
+ // Block the execution of TracerThread until after we have set ptrace
+ // permissions.
+ tracer_thread_argument.mutex.Lock();
+ pid_t tracer_pid = clone(TracerThread, tracer_stack.Bottom(),
+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
+ &tracer_thread_argument, 0, 0, 0);
+ if (tracer_pid < 0) {
+ Report("Failed spawning a tracer thread (errno %d).\n", errno);
+ tracer_thread_argument.mutex.Unlock();
+ } else {
+ // On some systems we have to explicitly declare that we want to be traced
+ // by the tracer thread.
+#ifdef PR_SET_PTRACER
+ internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
+#endif
+ // Allow the tracer thread to start.
+ tracer_thread_argument.mutex.Unlock();
+ // Since errno is shared between this thread and the tracer thread, we
+ // must avoid using errno while the tracer thread is running.
+ // At this point, any signal will either be blocked or kill us, so waitpid
+ // should never return (and set errno) while the tracer thread is alive.
+ uptr waitpid_status = internal_waitpid(tracer_pid, NULL, __WALL);
+ int wperrno;
+ if (internal_iserror(waitpid_status, &wperrno))
+ Report("Waiting on the tracer thread failed (errno %d).\n", wperrno);
+ }
+ // Restore the dumpable flag.
+ if (!process_was_dumpable)
+ internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
+ // Restore the signal handlers.
+ for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
+ signal_index++) {
+ sigaction(kUnblockedSignals[signal_index],
+ &old_sigactions[signal_index], NULL);
+ }
+ sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset);
+}
+
+// Platform-specific methods from SuspendedThreadsList.
+#if SANITIZER_ANDROID && defined(__arm__)
+typedef pt_regs regs_struct;
+#define REG_SP ARM_sp
+
+#elif SANITIZER_LINUX && defined(__arm__)
+typedef user_regs regs_struct;
+#define REG_SP uregs[13]
+
+#elif defined(__i386__) || defined(__x86_64__)
+typedef user_regs_struct regs_struct;
+#if defined(__i386__)
+#define REG_SP esp
+#else
+#define REG_SP rsp
+#endif
+
+#elif defined(__powerpc__) || defined(__powerpc64__)
+typedef pt_regs regs_struct;
+#define REG_SP gpr[PT_R1]
+
+#else
+#error "Unsupported architecture"
+#endif // SANITIZER_ANDROID && defined(__arm__)
+
+int SuspendedThreadsList::GetRegistersAndSP(uptr index,
+ uptr *buffer,
+ uptr *sp) const {
+ pid_t tid = GetThreadID(index);
+ regs_struct regs;
+ int pterrno;
+ if (internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, NULL, &regs),
+ &pterrno)) {
+ Report("Could not get registers from thread %d (errno %d).\n",
+ tid, pterrno);
+ return -1;
+ }
+
+ *sp = regs.REG_SP;
+ internal_memcpy(buffer, &regs, sizeof(regs));
+ return 0;
+}
+
+uptr SuspendedThreadsList::RegisterCount() {
+ return sizeof(regs_struct) / sizeof(uptr);
+}
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX
diff --git a/lib/sanitizer_common/sanitizer_symbolizer.h b/lib/sanitizer_common/sanitizer_symbolizer.h
index c26d621ea065..ef37fd387f98 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer.h
+++ b/lib/sanitizer_common/sanitizer_symbolizer.h
@@ -66,9 +66,13 @@ struct DataInfo {
// for a given address (in all inlined functions). Returns the number
// of descriptions actually filled.
// This function should NOT be called from two threads simultaneously.
-uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
+uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames)
+ SANITIZER_WEAK_ATTRIBUTE;
bool SymbolizeData(uptr address, DataInfo *info);
+bool IsSymbolizerAvailable();
+void FlushSymbolizer(); // releases internal caches (if any)
+
// Attempts to demangle the provided C++ mangled name.
const char *Demangle(const char *Name);
@@ -104,8 +108,13 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
// OS-dependent function that fills array with descriptions of at most
// "max_modules" currently loaded modules. Returns the number of
-// initialized modules.
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules);
+// initialized modules. If filter is nonzero, ignores modules for which
+// filter(full_name) is false.
+typedef bool (*string_predicate_t)(const char *);
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
+ string_predicate_t filter);
+
+void SymbolizerPrepareForSandboxing();
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc b/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc
index 438629492923..e20fb91f0eb5 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc
@@ -10,7 +10,9 @@
// This file is shared between the sanitizer run-time libraries.
// Itanium C++ ABI-specific implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
-#if defined(__APPLE__) || defined(__linux__)
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC || SANITIZER_LINUX
#include "sanitizer_symbolizer.h"
@@ -39,4 +41,4 @@ const char *__sanitizer::Demangle(const char *MangledName) {
return MangledName;
}
-#endif // __APPLE__ || __linux__
+#endif // SANITIZER_MAC || SANITIZER_LINUX
diff --git a/lib/sanitizer_common/sanitizer_symbolizer.cc b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
index a1d95ae0e0b2..ad339e21a927 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
@@ -1,4 +1,4 @@
-//===-- sanitizer_symbolizer.cc -------------------------------------------===//
+//===-- sanitizer_symbolizer_libcdep.cc -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -111,7 +111,7 @@ class ExternalSymbolizer {
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
CHECK(module_name);
- internal_snprintf(buffer_, kBufferSize, "%s%s 0x%zx\n",
+ internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n",
is_data ? "DATA " : "", module_name, module_offset);
if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
return 0;
@@ -128,6 +128,9 @@ class ExternalSymbolizer {
return StartSymbolizerSubprocess(path_, &input_fd_, &output_fd_);
}
+ void Flush() {
+ }
+
private:
bool readFromSymbolizer(char *buffer, uptr max_length) {
if (max_length == 0)
@@ -176,7 +179,67 @@ class ExternalSymbolizer {
static LowLevelAllocator symbolizer_allocator; // Linker initialized.
+#if SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+bool __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_symbolize_flush();
+} // extern "C"
+
+class InternalSymbolizer {
+ public:
+ typedef bool (*SanitizerSymbolizeFn)(const char*, u64, char*, int);
+
+ static InternalSymbolizer *get() {
+ if (__sanitizer_symbolize_code != 0 &&
+ __sanitizer_symbolize_data != 0) {
+ void *mem = symbolizer_allocator.Allocate(sizeof(InternalSymbolizer));
+ return new(mem) InternalSymbolizer();
+ }
+ return 0;
+ }
+
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ SanitizerSymbolizeFn symbolize_fn = is_data ? __sanitizer_symbolize_data
+ : __sanitizer_symbolize_code;
+ if (symbolize_fn(module_name, module_offset, buffer_, kBufferSize))
+ return buffer_;
+ return 0;
+ }
+
+ void Flush() {
+ if (__sanitizer_symbolize_flush)
+ __sanitizer_symbolize_flush();
+ }
+
+ private:
+ InternalSymbolizer() { }
+
+ static const int kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+};
+#else // SANITIZER_SUPPORTS_WEAK_HOOKS
+
+class InternalSymbolizer {
+ public:
+ static InternalSymbolizer *get() { return 0; }
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ return 0;
+ }
+ void Flush() {
+ }
+};
+
+#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+
class Symbolizer {
+ // This class has no constructor, as global constructors are forbidden in
+ // sanitizer_common. It should be linker initialized instead.
public:
uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames) {
if (max_frames == 0)
@@ -268,8 +331,30 @@ class Symbolizer {
return true;
}
+ bool IsSymbolizerAvailable() {
+ if (internal_symbolizer_ == 0)
+ internal_symbolizer_ = InternalSymbolizer::get();
+ return internal_symbolizer_ || external_symbolizer_;
+ }
+
+ void Flush() {
+ if (internal_symbolizer_)
+ internal_symbolizer_->Flush();
+ if (external_symbolizer_)
+ external_symbolizer_->Flush();
+ }
+
private:
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ // First, try to use internal symbolizer.
+ if (!IsSymbolizerAvailable()) {
+ return 0;
+ }
+ if (internal_symbolizer_) {
+ return internal_symbolizer_->SendCommand(is_data, module_name,
+ module_offset);
+ }
+ // Otherwise, fall back to external symbolizer.
if (external_symbolizer_ == 0) {
ReportExternalSymbolizerError(
"WARNING: Trying to symbolize code, but external "
@@ -293,21 +378,35 @@ class Symbolizer {
}
LoadedModule *FindModuleForAddress(uptr address) {
- if (modules_ == 0) {
+ bool modules_were_reloaded = false;
+ if (modules_ == 0 || !modules_fresh_) {
modules_ = (LoadedModule*)(symbolizer_allocator.Allocate(
kMaxNumberOfModuleContexts * sizeof(LoadedModule)));
CHECK(modules_);
- n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts);
- CHECK_GT(n_modules_, 0);
+ n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts,
+ /* filter */ 0);
+ // FIXME: Return this check when GetListOfModules is implemented on Mac.
+ // CHECK_GT(n_modules_, 0);
CHECK_LT(n_modules_, kMaxNumberOfModuleContexts);
+ modules_fresh_ = true;
+ modules_were_reloaded = true;
}
for (uptr i = 0; i < n_modules_; i++) {
if (modules_[i].containsAddress(address)) {
return &modules_[i];
}
}
+ // Reload the modules and look up again, if we haven't tried it yet.
+ if (!modules_were_reloaded) {
+ // FIXME: set modules_fresh_ from dlopen()/dlclose() interceptors.
+ // It's too aggressive to reload the list of modules each time we fail
+ // to find a module for a given address.
+ modules_fresh_ = false;
+ return FindModuleForAddress(address);
+ }
return 0;
}
+
void ReportExternalSymbolizerError(const char *msg) {
// Don't use atomics here for now, as SymbolizeCode can't be called
// from multiple threads anyway.
@@ -322,8 +421,11 @@ class Symbolizer {
static const uptr kMaxNumberOfModuleContexts = 1 << 14;
LoadedModule *modules_; // Array of module descriptions is leaked.
uptr n_modules_;
+ // If stale, need to reload the modules before looking up addresses.
+ bool modules_fresh_;
ExternalSymbolizer *external_symbolizer_; // Leaked.
+ InternalSymbolizer *internal_symbolizer_; // Leaked.
};
static Symbolizer symbolizer; // Linker initialized.
@@ -340,4 +442,12 @@ bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
return symbolizer.InitializeExternalSymbolizer(path_to_symbolizer);
}
+bool IsSymbolizerAvailable() {
+ return symbolizer.IsSymbolizerAvailable();
+}
+
+void FlushSymbolizer() {
+ symbolizer.Flush();
+}
+
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_linux.cc b/lib/sanitizer_common/sanitizer_symbolizer_linux_libcdep.cc
index 4bd3dc8826ef..82ce50e0aacb 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_linux.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_linux_libcdep.cc
@@ -1,4 +1,4 @@
-//===-- sanitizer_symbolizer_linux.cc -------------------------------------===//
+//===-- sanitizer_symbolizer_linux_libcdep.cc -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,13 +11,18 @@
// run-time libraries.
// Linux-specific implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
-#ifdef __linux__
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_symbolizer.h"
+// Android NDK r8e elf.h depends on stdint.h without including the latter.
+#include <stdint.h>
+
#include <elf.h>
#include <errno.h>
#include <poll.h>
@@ -26,7 +31,7 @@
#include <sys/wait.h>
#include <unistd.h>
-#if !defined(__ANDROID__) && !defined(ANDROID)
+#if !SANITIZER_ANDROID
#include <link.h>
#endif
@@ -99,7 +104,7 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
for (int fd = getdtablesize(); fd > 2; fd--)
internal_close(fd);
execl(path_to_symbolizer, path_to_symbolizer, (char*)0);
- Exit(1);
+ internal__exit(1);
}
// Continue execution in parent process.
@@ -121,39 +126,71 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
return true;
}
-#if defined(__ANDROID__) || defined(ANDROID)
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
- UNIMPLEMENTED();
+#if SANITIZER_ANDROID
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
+ string_predicate_t filter) {
+ return 0;
}
-#else // ANDROID
+
+void SymbolizerPrepareForSandboxing() {
+ // Do nothing on Android.
+}
+#else // SANITIZER_ANDROID
typedef ElfW(Phdr) Elf_Phdr;
struct DlIteratePhdrData {
LoadedModule *modules;
uptr current_n;
+ bool first;
uptr max_n;
+ string_predicate_t filter;
};
static const uptr kMaxPathLength = 512;
+static char proc_self_exe_cache_str[kMaxPathLength];
+static uptr proc_self_exe_cache_len = 0;
+
+static uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ uptr module_name_len = internal_readlink(
+ "/proc/self/exe", buf, buf_len);
+ int readlink_error;
+ if (internal_iserror(buf_len, &readlink_error)) {
+ if (proc_self_exe_cache_len) {
+ // If available, use the cached module name.
+ CHECK_LE(proc_self_exe_cache_len, buf_len);
+ internal_strncpy(buf, proc_self_exe_cache_str, buf_len);
+ module_name_len = internal_strlen(proc_self_exe_cache_str);
+ } else {
+ // We can't read /proc/self/exe for some reason, assume the name of the
+ // binary is unknown.
+ Report("WARNING: readlink(\"/proc/self/exe\") failed with errno %d, "
+ "some stack frames may not be symbolized\n", readlink_error);
+ module_name_len = internal_snprintf(buf, buf_len, "/proc/self/exe");
+ }
+ CHECK_LT(module_name_len, buf_len);
+ buf[module_name_len] = '\0';
+ }
+ return module_name_len;
+}
+
static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
if (data->current_n == data->max_n)
return 0;
InternalScopedBuffer<char> module_name(kMaxPathLength);
module_name.data()[0] = '\0';
- if (data->current_n == 0) {
+ if (data->first) {
+ data->first = false;
// First module is the binary itself.
- uptr module_name_len = internal_readlink(
- "/proc/self/exe", module_name.data(), module_name.size());
- CHECK_NE(module_name_len, (uptr)-1);
- CHECK_LT(module_name_len, module_name.size());
- module_name[module_name_len] = '\0';
+ ReadBinaryName(module_name.data(), module_name.size());
} else if (info->dlpi_name) {
internal_strncpy(module_name.data(), info->dlpi_name, module_name.size());
}
if (module_name.data()[0] == '\0')
return 0;
+ if (data->filter && !data->filter(module_name.data()))
+ return 0;
void *mem = &data->modules[data->current_n];
LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(),
info->dlpi_addr);
@@ -169,14 +206,22 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
return 0;
}
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
+ string_predicate_t filter) {
CHECK(modules);
- DlIteratePhdrData data = {modules, 0, max_modules};
+ DlIteratePhdrData data = {modules, 0, true, max_modules, filter};
dl_iterate_phdr(dl_iterate_phdr_cb, &data);
return data.current_n;
}
-#endif // ANDROID
+
+void SymbolizerPrepareForSandboxing() {
+ if (!proc_self_exe_cache_len) {
+ proc_self_exe_cache_len =
+ ReadBinaryName(proc_self_exe_cache_str, kMaxPathLength);
+ }
+}
+#endif // SANITIZER_ANDROID
} // namespace __sanitizer
-#endif // __linux__
+#endif // SANITIZER_LINUX
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_mac.cc b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
index 23993607e77b..9d96690bfda2 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
@@ -11,7 +11,9 @@
// run-time libraries.
// Mac-specific implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
-#ifdef __APPLE__
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer.h"
@@ -22,10 +24,17 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
UNIMPLEMENTED();
}
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
- UNIMPLEMENTED();
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
+ string_predicate_t filter) {
+ // FIXME: Actually implement this on Mac. Just using MemoryMappingLayout
+ // may be enough for this on Mac.
+ return 0;
+}
+
+void SymbolizerPrepareForSandboxing() {
+ // Do nothing on Mac.
}
} // namespace __sanitizer
-#endif // __APPLE__
+#endif // SANITIZER_MAC
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_win.cc b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
index f1b6a02a6f9a..993261aab7b0 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -11,7 +11,9 @@
// run-time libraries.
// Windows-specific implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
-#ifdef _WIN32
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
#include <windows.h>
#include "sanitizer_internal_defs.h"
@@ -24,10 +26,15 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
UNIMPLEMENTED();
}
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
+ string_predicate_t filter) {
UNIMPLEMENTED();
};
+void SymbolizerPrepareForSandboxing() {
+ // Do nothing on Windows.
+}
+
const char *Demangle(const char *MangledName) {
return MangledName;
}
diff --git a/lib/sanitizer_common/sanitizer_syscall_generic.inc b/lib/sanitizer_common/sanitizer_syscall_generic.inc
new file mode 100644
index 000000000000..aac20a5f2d69
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_syscall_generic.inc
@@ -0,0 +1,24 @@
+//===-- sanitizer_syscall_generic.inc ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic implementations of internal_syscall and internal_iserror.
+//
+//===----------------------------------------------------------------------===//
+
+#define internal_syscall syscall
+
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval == (uptr)-1) {
+ if (rverrno)
+ *rverrno = errno;
+ return true;
+ } else {
+ return false;
+ }
+}
diff --git a/lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc b/lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
new file mode 100644
index 000000000000..e084b84ab118
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
@@ -0,0 +1,87 @@
+//===-- sanitizer_syscall_linux_x86_64.inc ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/x86_64.
+//
+//===----------------------------------------------------------------------===//
+
+static uptr internal_syscall(u64 nr) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr) : "rcx", "r11");
+ return retval;
+}
+
+template <typename T1>
+static uptr internal_syscall(u64 nr, T1 arg1) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1) :
+ "rcx", "r11");
+ return retval;
+}
+
+template <typename T1, typename T2>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2) : "rcx", "r11");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3) : "rcx", "r11");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4) {
+ u64 retval;
+ asm volatile("mov %5, %%r10;"
+ "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4) :
+ "rcx", "r11", "r10");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,
+ T5 arg5) {
+ u64 retval;
+ asm volatile("mov %5, %%r10;"
+ "mov %6, %%r8;"
+ "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5) :
+ "rcx", "r11", "r10", "r8");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,
+ T5 arg5, T6 arg6) {
+ u64 retval;
+ asm volatile("mov %5, %%r10;"
+ "mov %6, %%r8;"
+ "mov %7, %%r9;"
+ "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5),
+ "r"((u64)arg6) : "rcx", "r11", "r10", "r8", "r9");
+ return retval;
+}
+
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
diff --git a/lib/sanitizer_common/sanitizer_thread_registry.cc b/lib/sanitizer_common/sanitizer_thread_registry.cc
new file mode 100644
index 000000000000..466dc3b8a27f
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_thread_registry.cc
@@ -0,0 +1,279 @@
+//===-- sanitizer_thread_registry.cc --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// General thread bookkeeping functionality.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_thread_registry.h"
+
+namespace __sanitizer {
+
+ThreadContextBase::ThreadContextBase(u32 tid)
+ : tid(tid), unique_id(0), os_id(0), user_id(0), status(ThreadStatusInvalid),
+ detached(false), reuse_count(0), parent_tid(0), next(0) {
+ name[0] = '\0';
+}
+
+ThreadContextBase::~ThreadContextBase() {
+ // ThreadContextBase should never be deleted.
+ CHECK(0);
+}
+
+void ThreadContextBase::SetName(const char *new_name) {
+ name[0] = '\0';
+ if (new_name) {
+ internal_strncpy(name, new_name, sizeof(name));
+ name[sizeof(name) - 1] = '\0';
+ }
+}
+
+void ThreadContextBase::SetDead() {
+ CHECK(status == ThreadStatusRunning ||
+ status == ThreadStatusFinished);
+ status = ThreadStatusDead;
+ user_id = 0;
+ OnDead();
+}
+
+void ThreadContextBase::SetJoined(void *arg) {
+ // FIXME(dvyukov): print message and continue (it's user error).
+ CHECK_EQ(false, detached);
+ CHECK_EQ(ThreadStatusFinished, status);
+ status = ThreadStatusDead;
+ user_id = 0;
+ OnJoined(arg);
+}
+
+void ThreadContextBase::SetFinished() {
+ if (!detached)
+ status = ThreadStatusFinished;
+ OnFinished();
+}
+
+void ThreadContextBase::SetStarted(uptr _os_id, void *arg) {
+ status = ThreadStatusRunning;
+ os_id = _os_id;
+ OnStarted(arg);
+}
+
+void ThreadContextBase::SetCreated(uptr _user_id, u64 _unique_id,
+ bool _detached, u32 _parent_tid, void *arg) {
+ status = ThreadStatusCreated;
+ user_id = _user_id;
+ unique_id = _unique_id;
+ detached = _detached;
+ // Parent tid makes no sense for the main thread.
+ if (tid != 0)
+ parent_tid = _parent_tid;
+ OnCreated(arg);
+}
+
+void ThreadContextBase::Reset() {
+ status = ThreadStatusInvalid;
+ reuse_count++;
+ SetName(0);
+ OnReset();
+}
+
+// ThreadRegistry implementation.
+
+const u32 ThreadRegistry::kUnknownTid = -1U;
+
+ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
+ u32 thread_quarantine_size)
+ : context_factory_(factory),
+ max_threads_(max_threads),
+ thread_quarantine_size_(thread_quarantine_size),
+ mtx_(),
+ n_contexts_(0),
+ total_threads_(0),
+ alive_threads_(0),
+ max_alive_threads_(0),
+ running_threads_(0) {
+ threads_ = (ThreadContextBase **)MmapOrDie(max_threads_ * sizeof(threads_[0]),
+ "ThreadRegistry");
+ dead_threads_.clear();
+ invalid_threads_.clear();
+}
+
+void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
+ uptr *alive) {
+ BlockingMutexLock l(&mtx_);
+ if (total) *total = n_contexts_;
+ if (running) *running = running_threads_;
+ if (alive) *alive = alive_threads_;
+}
+
+uptr ThreadRegistry::GetMaxAliveThreads() {
+ BlockingMutexLock l(&mtx_);
+ return max_alive_threads_;
+}
+
+u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
+ void *arg) {
+ BlockingMutexLock l(&mtx_);
+ u32 tid = kUnknownTid;
+ ThreadContextBase *tctx = QuarantinePop();
+ if (tctx) {
+ tid = tctx->tid;
+ } else if (n_contexts_ < max_threads_) {
+ // Allocate new thread context and tid.
+ tid = n_contexts_++;
+ tctx = context_factory_(tid);
+ threads_[tid] = tctx;
+ } else {
+ Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
+ SanitizerToolName, max_threads_);
+ Die();
+ }
+ CHECK_NE(tctx, 0);
+ CHECK_NE(tid, kUnknownTid);
+ CHECK_LT(tid, max_threads_);
+ CHECK_EQ(tctx->status, ThreadStatusInvalid);
+ alive_threads_++;
+ if (max_alive_threads_ < alive_threads_) {
+ max_alive_threads_++;
+ CHECK_EQ(alive_threads_, max_alive_threads_);
+ }
+ tctx->SetCreated(user_id, total_threads_++, detached,
+ parent_tid, arg);
+ return tid;
+}
+
+void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
+ void *arg) {
+ CheckLocked();
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx == 0)
+ continue;
+ cb(tctx, arg);
+ }
+}
+
+u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
+ BlockingMutexLock l(&mtx_);
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx != 0 && cb(tctx, arg))
+ return tctx->tid;
+ }
+ return kUnknownTid;
+}
+
+ThreadContextBase *
+ThreadRegistry::FindThreadContextLocked(FindThreadCallback cb, void *arg) {
+ CheckLocked();
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx != 0 && cb(tctx, arg))
+ return tctx;
+ }
+ return 0;
+}
+
+static bool FindThreadContextByOsIdCallback(ThreadContextBase *tctx,
+ void *arg) {
+ return (tctx->os_id == (uptr)arg && tctx->status != ThreadStatusInvalid &&
+ tctx->status != ThreadStatusDead);
+}
+
+ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(uptr os_id) {
+ return FindThreadContextLocked(FindThreadContextByOsIdCallback,
+ (void *)os_id);
+}
+
+void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(ThreadStatusRunning, tctx->status);
+ tctx->SetName(name);
+}
+
+void ThreadRegistry::DetachThread(u32 tid) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ if (tctx->status == ThreadStatusInvalid) {
+ Report("%s: Detach of non-existent thread\n", SanitizerToolName);
+ return;
+ }
+ if (tctx->status == ThreadStatusFinished) {
+ tctx->SetDead();
+ QuarantinePush(tctx);
+ } else {
+ tctx->detached = true;
+ }
+}
+
+void ThreadRegistry::JoinThread(u32 tid, void *arg) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ if (tctx->status == ThreadStatusInvalid) {
+ Report("%s: Join of non-existent thread\n", SanitizerToolName);
+ return;
+ }
+ tctx->SetJoined(arg);
+ QuarantinePush(tctx);
+}
+
+void ThreadRegistry::FinishThread(u32 tid) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_GT(alive_threads_, 0);
+ alive_threads_--;
+ CHECK_GT(running_threads_, 0);
+ running_threads_--;
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(ThreadStatusRunning, tctx->status);
+ tctx->SetFinished();
+ if (tctx->detached) {
+ tctx->SetDead();
+ QuarantinePush(tctx);
+ }
+}
+
+void ThreadRegistry::StartThread(u32 tid, uptr os_id, void *arg) {
+ BlockingMutexLock l(&mtx_);
+ running_threads_++;
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(ThreadStatusCreated, tctx->status);
+ tctx->SetStarted(os_id, arg);
+}
+
+void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
+ dead_threads_.push_back(tctx);
+ if (dead_threads_.size() <= thread_quarantine_size_)
+ return;
+ tctx = dead_threads_.front();
+ dead_threads_.pop_front();
+ CHECK_EQ(tctx->status, ThreadStatusDead);
+ tctx->Reset();
+ invalid_threads_.push_back(tctx);
+}
+
+ThreadContextBase *ThreadRegistry::QuarantinePop() {
+ if (invalid_threads_.size() == 0)
+ return 0;
+ ThreadContextBase *tctx = invalid_threads_.front();
+ invalid_threads_.pop_front();
+ return tctx;
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_thread_registry.h b/lib/sanitizer_common/sanitizer_thread_registry.h
new file mode 100644
index 000000000000..6072e7c0a002
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_thread_registry.h
@@ -0,0 +1,145 @@
+//===-- sanitizer_thread_registry.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// General thread bookkeeping functionality.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_THREAD_REGISTRY_H
+#define SANITIZER_THREAD_REGISTRY_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+enum ThreadStatus {
+ ThreadStatusInvalid, // Non-existent thread, data is invalid.
+ ThreadStatusCreated, // Created but not yet running.
+ ThreadStatusRunning, // The thread is currently running.
+ ThreadStatusFinished, // Joinable thread is finished but not yet joined.
+ ThreadStatusDead // Joined, but some info is still available.
+};
+
+// Generic thread context. Specific sanitizer tools may inherit from it.
+// If thread is dead, context may optionally be reused for a new thread.
+class ThreadContextBase {
+ public:
+ explicit ThreadContextBase(u32 tid);
+ ~ThreadContextBase(); // Should never be called.
+
+ const u32 tid; // Thread ID. Main thread should have tid = 0.
+ u64 unique_id; // Unique thread ID.
+ uptr os_id; // PID (used for reporting).
+ uptr user_id; // Some opaque user thread id (e.g. pthread_t).
+ char name[64]; // As annotated by user.
+
+ ThreadStatus status;
+ bool detached;
+ int reuse_count;
+
+ u32 parent_tid;
+ ThreadContextBase *next; // For storing thread contexts in a list.
+
+ void SetName(const char *new_name);
+
+ void SetDead();
+ void SetJoined(void *arg);
+ void SetFinished();
+ void SetStarted(uptr _os_id, void *arg);
+ void SetCreated(uptr _user_id, u64 _unique_id, bool _detached,
+ u32 _parent_tid, void *arg);
+ void Reset();
+
+ // The following methods may be overriden by subclasses.
+ // Some of them take opaque arg that may be optionally be used
+ // by subclasses.
+ virtual void OnDead() {}
+ virtual void OnJoined(void *arg) {}
+ virtual void OnFinished() {}
+ virtual void OnStarted(void *arg) {}
+ virtual void OnCreated(void *arg) {}
+ virtual void OnReset() {}
+};
+
+typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
+
+class ThreadRegistry {
+ public:
+ static const u32 kUnknownTid;
+
+ ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
+ u32 thread_quarantine_size);
+ void GetNumberOfThreads(uptr *total = 0, uptr *running = 0, uptr *alive = 0);
+ uptr GetMaxAliveThreads();
+
+ void Lock() { mtx_.Lock(); }
+ void CheckLocked() { mtx_.CheckLocked(); }
+ void Unlock() { mtx_.Unlock(); }
+
+ // Should be guarded by ThreadRegistryLock.
+ ThreadContextBase *GetThreadLocked(u32 tid) {
+ DCHECK_LT(tid, n_contexts_);
+ return threads_[tid];
+ }
+
+ u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
+
+ typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg);
+ // Invokes callback with a specified arg for each thread context.
+ // Should be guarded by ThreadRegistryLock.
+ void RunCallbackForEachThreadLocked(ThreadCallback cb, void *arg);
+
+ typedef bool (*FindThreadCallback)(ThreadContextBase *tctx, void *arg);
+ // Finds a thread using the provided callback. Returns kUnknownTid if no
+ // thread is found.
+ u32 FindThread(FindThreadCallback cb, void *arg);
+ // Should be guarded by ThreadRegistryLock. Return 0 if no thread
+ // is found.
+ ThreadContextBase *FindThreadContextLocked(FindThreadCallback cb,
+ void *arg);
+ ThreadContextBase *FindThreadContextByOsIDLocked(uptr os_id);
+
+ void SetThreadName(u32 tid, const char *name);
+ void DetachThread(u32 tid);
+ void JoinThread(u32 tid, void *arg);
+ void FinishThread(u32 tid);
+ void StartThread(u32 tid, uptr os_id, void *arg);
+
+ private:
+ const ThreadContextFactory context_factory_;
+ const u32 max_threads_;
+ const u32 thread_quarantine_size_;
+
+ BlockingMutex mtx_;
+
+ u32 n_contexts_; // Number of created thread contexts,
+ // at most max_threads_.
+ u64 total_threads_; // Total number of created threads. May be greater than
+ // max_threads_ if contexts were reused.
+ uptr alive_threads_; // Created or running.
+ uptr max_alive_threads_;
+ uptr running_threads_;
+
+ ThreadContextBase **threads_; // Array of thread contexts is leaked.
+ IntrusiveList<ThreadContextBase> dead_threads_;
+ IntrusiveList<ThreadContextBase> invalid_threads_;
+
+ void QuarantinePush(ThreadContextBase *tctx);
+ ThreadContextBase *QuarantinePop();
+};
+
+typedef GenericScopedLock<ThreadRegistry> ThreadRegistryLock;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_THREAD_REGISTRY_H
+
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index 2ae37af8847c..e76f1d1f7fa6 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -11,7 +11,10 @@
// run-time libraries and implements windows-specific functions from
// sanitizer_libc.h.
//===----------------------------------------------------------------------===//
-#ifdef _WIN32
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
#define WIN32_LEAN_AND_MEAN
#define NOGDI
#include <stdlib.h>
@@ -20,11 +23,14 @@
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
-#include "sanitizer_placement_new.h"
#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_stacktrace.h"
namespace __sanitizer {
+#include "sanitizer_syscall_generic.inc"
+
// --------------------- sanitizer_common.h
uptr GetPageSize() {
return 1U << 14; // FIXME: is this configurable?
@@ -38,14 +44,20 @@ bool FileExists(const char *filename) {
UNIMPLEMENTED();
}
-int GetPid() {
+uptr internal_getpid() {
return GetProcessId(GetCurrentProcess());
}
-uptr GetThreadSelf() {
+// In contrast to POSIX, on Windows GetCurrentThreadId()
+// returns a system-unique identifier.
+uptr GetTid() {
return GetCurrentThreadId();
}
+uptr GetThreadSelf() {
+ return GetTid();
+}
+
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
CHECK(stack_top);
@@ -97,6 +109,11 @@ void *Mprotect(uptr fixed_addr, uptr size) {
MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
}
+void FlushUnneededShadowMemory(uptr addr, uptr size) {
+ // This is almost useless on 32-bits.
+ // FIXME: add madvice-analog when we move to 64-bits.
+}
+
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
// FIXME: shall we do anything here on Windows?
return true;
@@ -106,19 +123,38 @@ void *MapFileToMemory(const char *file_name, uptr *buff_size) {
UNIMPLEMENTED();
}
-const char *GetEnv(const char *name) {
- static char env_buffer[32767] = {};
+static const int kMaxEnvNameLength = 128;
+static const int kMaxEnvValueLength = 32767;
- // Note: this implementation stores the result in a static buffer so we only
- // allow it to be called just once.
- static bool called_once = false;
- if (called_once)
- UNIMPLEMENTED();
- called_once = true;
+namespace {
+
+struct EnvVariable {
+ char name[kMaxEnvNameLength];
+ char value[kMaxEnvValueLength];
+};
- DWORD rv = GetEnvironmentVariableA(name, env_buffer, sizeof(env_buffer));
- if (rv > 0 && rv < sizeof(env_buffer))
- return env_buffer;
+} // namespace
+
+static const int kEnvVariables = 5;
+static EnvVariable env_vars[kEnvVariables];
+static int num_env_vars;
+
+const char *GetEnv(const char *name) {
+ // Note: this implementation caches the values of the environment variables
+ // and limits their quantity.
+ for (int i = 0; i < num_env_vars; i++) {
+ if (0 == internal_strcmp(name, env_vars[i].name))
+ return env_vars[i].value;
+ }
+ CHECK_LT(num_env_vars, kEnvVariables);
+ DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
+ kMaxEnvValueLength);
+ if (rv > 0 && rv < kMaxEnvValueLength) {
+ CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
+ internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
+ num_env_vars++;
+ return env_vars[num_env_vars - 1].value;
+ }
return 0;
}
@@ -126,6 +162,10 @@ const char *GetPwd() {
UNIMPLEMENTED();
}
+u32 GetUid() {
+ UNIMPLEMENTED();
+}
+
void DumpProcessMap() {
UNIMPLEMENTED();
}
@@ -158,10 +198,6 @@ void SleepForMillis(int millis) {
Sleep(millis);
}
-void Exit(int exitcode) {
- _exit(exitcode);
-}
-
void Abort() {
abort();
_exit(-1); // abort is not NORETURN on Windows.
@@ -174,16 +210,16 @@ int Atexit(void (*function)(void)) {
#endif
// ------------------ sanitizer_libc.h
-void *internal_mmap(void *addr, uptr length, int prot, int flags,
- int fd, u64 offset) {
+uptr internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, u64 offset) {
UNIMPLEMENTED();
}
-int internal_munmap(void *addr, uptr length) {
+uptr internal_munmap(void *addr, uptr length) {
UNIMPLEMENTED();
}
-int internal_close(fd_t fd) {
+uptr internal_close(fd_t fd) {
UNIMPLEMENTED();
}
@@ -191,7 +227,15 @@ int internal_isatty(fd_t fd) {
return _isatty(fd);
}
-fd_t internal_open(const char *filename, bool write) {
+uptr internal_open(const char *filename, int flags) {
+ UNIMPLEMENTED();
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+ UNIMPLEMENTED();
+}
+
+uptr OpenFile(const char *filename, bool write) {
UNIMPLEMENTED();
}
@@ -211,11 +255,23 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) {
return ret;
}
+uptr internal_stat(const char *path, void *buf) {
+ UNIMPLEMENTED();
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+ UNIMPLEMENTED();
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+ UNIMPLEMENTED();
+}
+
uptr internal_filesize(fd_t fd) {
UNIMPLEMENTED();
}
-int internal_dup2(int oldfd, int newfd) {
+uptr internal_dup2(int oldfd, int newfd) {
UNIMPLEMENTED();
}
@@ -223,16 +279,18 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
UNIMPLEMENTED();
}
-int internal_sched_yield() {
+uptr internal_sched_yield() {
Sleep(0);
return 0;
}
+void internal__exit(int exitcode) {
+ _exit(exitcode);
+}
+
// ---------------------- BlockingMutex ---------------- {{{1
-enum LockState {
- LOCK_UNINITIALIZED = 0,
- LOCK_READY = -1,
-};
+const uptr LOCK_UNINITIALIZED = 0;
+const uptr LOCK_READY = (uptr)-1;
BlockingMutex::BlockingMutex(LinkerInitialized li) {
// FIXME: see comments in BlockingMutex::Lock() for the details.
@@ -243,6 +301,12 @@ BlockingMutex::BlockingMutex(LinkerInitialized li) {
owner_ = LOCK_READY;
}
+BlockingMutex::BlockingMutex() {
+ CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
+ InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+ owner_ = LOCK_READY;
+}
+
void BlockingMutex::Lock() {
if (owner_ == LOCK_UNINITIALIZED) {
// FIXME: hm, global BlockingMutex objects are not initialized?!?
@@ -254,16 +318,64 @@ void BlockingMutex::Lock() {
// locks while we're starting in one thread to avoid double-init races.
}
EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
- CHECK(owner_ == LOCK_READY);
+ CHECK_EQ(owner_, LOCK_READY);
owner_ = GetThreadSelf();
}
void BlockingMutex::Unlock() {
- CHECK(owner_ == GetThreadSelf());
+ CHECK_EQ(owner_, GetThreadSelf());
owner_ = LOCK_READY;
LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
}
+void BlockingMutex::CheckLocked() {
+ CHECK_EQ(owner_, GetThreadSelf());
+}
+
+uptr GetTlsSize() {
+ return 0;
+}
+
+void InitTlsSize() {
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+ *tls_addr = 0;
+ *tls_size = 0;
+}
+
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
+ uptr stack_top, uptr stack_bottom, bool fast) {
+ (void)fast;
+ (void)stack_top;
+ (void)stack_bottom;
+ stack->max_size = max_s;
+ void *tmp[kStackTraceMax];
+
+ // FIXME: CaptureStackBackTrace might be too slow for us.
+ // FIXME: Compare with StackWalk64.
+ // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
+ uptr cs_ret = CaptureStackBackTrace(1, stack->max_size, tmp, 0);
+ uptr offset = 0;
+ // Skip the RTL frames by searching for the PC in the stacktrace.
+ // FIXME: this doesn't work well for the malloc/free stacks yet.
+ for (uptr i = 0; i < cs_ret; i++) {
+ if (pc != (uptr)tmp[i])
+ continue;
+ offset = i;
+ break;
+ }
+
+ stack->size = cs_ret - offset;
+ for (uptr i = 0; i < stack->size; i++)
+ stack->trace[i] = (uptr)tmp[i + offset];
+}
+
} // namespace __sanitizer
#endif // _WIN32
diff --git a/lib/sanitizer_common/scripts/check_lint.sh b/lib/sanitizer_common/scripts/check_lint.sh
index e65794df0ce7..3240f6f18cee 100755
--- a/lib/sanitizer_common/scripts/check_lint.sh
+++ b/lib/sanitizer_common/scripts/check_lint.sh
@@ -13,20 +13,27 @@ fi
# Cpplint setup
cd ${SCRIPT_DIR}
if [ ! -d cpplint ]; then
- svn co -r83 http://google-styleguide.googlecode.com/svn/trunk/cpplint cpplint
+ svn co http://google-styleguide.googlecode.com/svn/trunk/cpplint cpplint
+else
+ (cd cpplint && svn up)
fi
CPPLINT=${SCRIPT_DIR}/cpplint/cpplint.py
# Filters
# TODO: remove some of these filters
-ASAN_RTL_LINT_FILTER=-readability/casting,-readability/check,-build/include,-build/header_guard,-build/class,-legal/copyright,-build/namespaces
-ASAN_TEST_LINT_FILTER=-readability/casting,-build/include,-legal/copyright,-whitespace/newline,-runtime/sizeof,-runtime/int,-runtime/printf,-build/header_guard
+COMMON_LINT_FILTER=-build/include,-build/header_guard,-legal/copyright,-whitespace/comments,-readability/casting,\
+-build/namespaces
+ASAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/int
+ASAN_TEST_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/sizeof,-runtime/int,-runtime/printf
ASAN_LIT_TEST_LINT_FILTER=${ASAN_TEST_LINT_FILTER},-whitespace/line_length
-TSAN_RTL_LINT_FILTER=-legal/copyright,-build/include,-readability/casting,-build/header_guard,-build/namespaces
+TSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER}
TSAN_TEST_LINT_FILTER=${TSAN_RTL_LINT_FILTER},-runtime/threadsafe_fn,-runtime/int
TSAN_LIT_TEST_LINT_FILTER=${TSAN_TEST_LINT_FILTER},-whitespace/line_length
-MSAN_RTL_LINT_FILTER=-legal/copyright,-build/include,-readability/casting,-build/header_guard,-build/namespaces
-TSAN_RTL_INC_LINT_FILTER=${TSAN_TEST_LINT_FILTER},-runtime/sizeof
+MSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER}
+LSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER}
+LSAN_LIT_TEST_LINT_FILTER=${LSAN_RTL_LINT_FILTER},-whitespace/line_length
+COMMON_RTL_INC_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/int,-runtime/sizeof,-runtime/printf
+SANITIZER_INCLUDES_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/int
cd ${LLVM_CHECKOUT}
@@ -40,12 +47,12 @@ COMPILER_RT=projects/compiler-rt
# Headers
SANITIZER_INCLUDES=${COMPILER_RT}/include/sanitizer
-${CPPLINT} --filter=${TSAN_RTL_LINT_FILTER} ${SANITIZER_INCLUDES}/*.h
+${CPPLINT} --filter=${SANITIZER_INCLUDES_LINT_FILTER} ${SANITIZER_INCLUDES}/*.h
# Sanitizer_common
COMMON_RTL=${COMPILER_RT}/lib/sanitizer_common
-${CPPLINT} --filter=${ASAN_RTL_LINT_FILTER} ${COMMON_RTL}/*.{cc,h}
-${CPPLINT} --filter=${TSAN_RTL_LINT_FILTER} ${COMMON_RTL}/tests/*.cc
+${CPPLINT} --filter=${COMMON_RTL_INC_LINT_FILTER} ${COMMON_RTL}/*.{cc,h}
+${CPPLINT} --filter=${COMMON_RTL_INC_LINT_FILTER} ${COMMON_RTL}/tests/*.cc
# Interception
INTERCEPTION=${COMPILER_RT}/lib/interception
@@ -69,6 +76,12 @@ ${CPPLINT} --filter=${TSAN_LIT_TEST_LINT_FILTER} ${TSAN_RTL}/lit_tests/*.cc
MSAN_RTL=${COMPILER_RT}/lib/msan
${CPPLINT} --filter=${MSAN_RTL_LINT_FILTER} ${MSAN_RTL}/*.{cc,h}
+# LSan
+LSAN_RTL=${COMPILER_RT}/lib/lsan
+${CPPLINT} --filter=${LSAN_RTL_LINT_FILTER} ${LSAN_RTL}/*.{cc,h}
+${CPPLINT} --filter=${LSAN_RTL_LINT_FILTER} ${LSAN_RTL}/tests/*.{cc,h}
+${CPPLINT} --filter=${LSAN_LIT_TEST_LINT_FILTER} ${LSAN_RTL}/lit_tests/*.{cc,h}
+
set +e
# Misc files
@@ -77,6 +90,6 @@ for FILE in $FILES; do
TMPFILE=$(mktemp -u ${FILE}.XXXXX).cc
echo "Checking $FILE"
cp -f $FILE $TMPFILE && \
- ${CPPLINT} --filter=${TSAN_RTL_INC_LINT_FILTER} $TMPFILE
+ ${CPPLINT} --filter=${COMMON_RTL_INC_LINT_FILTER} $TMPFILE
rm $TMPFILE
done
diff --git a/lib/sanitizer_common/tests/CMakeLists.txt b/lib/sanitizer_common/tests/CMakeLists.txt
index f83a89cbe37c..25e57507ad14 100644
--- a/lib/sanitizer_common/tests/CMakeLists.txt
+++ b/lib/sanitizer_common/tests/CMakeLists.txt
@@ -2,15 +2,20 @@ include(CompilerRTCompile)
set(SANITIZER_UNITTESTS
sanitizer_allocator_test.cc
+ sanitizer_atomic_test.cc
sanitizer_common_test.cc
sanitizer_flags_test.cc
sanitizer_libc_test.cc
+ sanitizer_linux_test.cc
sanitizer_list_test.cc
sanitizer_mutex_test.cc
sanitizer_printf_test.cc
sanitizer_scanf_interceptor_test.cc
sanitizer_stackdepot_test.cc
+ sanitizer_stacktrace_test.cc
+ sanitizer_stoptheworld_test.cc
sanitizer_test_main.cc
+ sanitizer_thread_registry_test.cc
)
set(SANITIZER_TEST_HEADERS)
@@ -18,6 +23,18 @@ foreach(header ${SANITIZER_HEADERS})
list(APPEND SANITIZER_TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
endforeach()
+set(SANITIZER_TEST_CFLAGS_COMMON
+ ${COMPILER_RT_GTEST_INCLUDE_CFLAGS}
+ -I${COMPILER_RT_SOURCE_DIR}/include
+ -I${COMPILER_RT_SOURCE_DIR}/lib
+ -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common
+ -DGTEST_HAS_RTTI=0
+ -O2 -g -fno-rtti
+ -Wall -Werror -Werror=sign-compare)
+
+set(SANITIZER_TEST_LINK_FLAGS_COMMON
+ -lstdc++ -ldl)
+
include_directories(..)
include_directories(../..)
@@ -49,18 +66,12 @@ macro(add_sanitizer_tests_for_arch arch)
get_target_flags_for_arch(${arch} TARGET_FLAGS)
set(SANITIZER_TEST_SOURCES ${SANITIZER_UNITTESTS}
${COMPILER_RT_GTEST_SOURCE})
- set(SANITIZER_TEST_CFLAGS ${COMPILER_RT_GTEST_INCLUDE_CFLAGS}
- -I${COMPILER_RT_SOURCE_DIR}/include
- -I${COMPILER_RT_SOURCE_DIR}/lib
- -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common
- -O2 -g ${TARGET_FLAGS})
- set(SANITIZER_TEST_LINK_FLAGS -lstdc++ -lpthread ${TARGET_FLAGS})
set(SANITIZER_TEST_OBJECTS)
foreach(source ${SANITIZER_TEST_SOURCES})
get_filename_component(basename ${source} NAME)
set(output_obj "${basename}.${arch}.o")
clang_compile(${output_obj} ${source}
- CFLAGS ${SANITIZER_TEST_CFLAGS}
+ CFLAGS ${SANITIZER_TEST_CFLAGS_COMMON} ${TARGET_FLAGS}
DEPS gtest ${SANITIZER_RUNTIME_LIBRARIES}
${SANITIZER_TEST_HEADERS})
list(APPEND SANITIZER_TEST_OBJECTS ${output_obj})
@@ -73,7 +84,8 @@ macro(add_sanitizer_tests_for_arch arch)
OBJECTS ${SANITIZER_TEST_OBJECTS}
${SANITIZER_COMMON_LIB_NAME}
DEPS ${SANITIZER_TEST_OBJECTS} ${SANITIZER_COMMON_LIB}
- LINK_FLAGS ${SANITIZER_TEST_LINK_FLAGS})
+ LINK_FLAGS ${SANITIZER_TEST_LINK_FLAGS_COMMON}
+ -lpthread ${TARGET_FLAGS})
endmacro()
if(COMPILER_RT_CAN_EXECUTE_TESTS)
@@ -85,11 +97,13 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS)
else()
if(CAN_TARGET_x86_64)
add_sanitizer_common_lib("RTSanitizerCommon.test.x86_64"
- $<TARGET_OBJECTS:RTSanitizerCommon.x86_64>)
+ $<TARGET_OBJECTS:RTSanitizerCommon.x86_64>
+ $<TARGET_OBJECTS:RTSanitizerCommonLibc.x86_64>)
endif()
if(CAN_TARGET_i386)
add_sanitizer_common_lib("RTSanitizerCommon.test.i386"
- $<TARGET_OBJECTS:RTSanitizerCommon.i386>)
+ $<TARGET_OBJECTS:RTSanitizerCommon.i386>
+ $<TARGET_OBJECTS:RTSanitizerCommonLibc.i386>)
endif()
endif()
if(CAN_TARGET_x86_64)
@@ -118,21 +132,14 @@ if(ANDROID)
add_executable(SanitizerTest
${SANITIZER_UNITTESTS}
${COMPILER_RT_GTEST_SOURCE}
- $<TARGET_OBJECTS:RTSanitizerCommon.arm.android>
- )
+ $<TARGET_OBJECTS:RTSanitizerCommon.arm.android>)
set_target_compile_flags(SanitizerTest
${SANITIZER_COMMON_CFLAGS}
- ${COMPILER_RT_GTEST_INCLUDE_CFLAGS}
- -I${COMPILER_RT_SOURCE_DIR}/include
- -I${COMPILER_RT_SOURCE_DIR}/lib
- -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common
- -O2 -g
- )
+ ${SANITIZER_TEST_CFLAGS_COMMON})
# Setup correct output directory and link flags.
- get_unittest_directory(OUTPUT_DIR)
set_target_properties(SanitizerTest PROPERTIES
- RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_DIR})
- set_target_link_flags(SanitizerTest ${SANITIZER_TEST_LINK_FLAGS})
+ RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+ set_target_link_flags(SanitizerTest ${SANITIZER_TEST_LINK_FLAGS_COMMON})
# Add unit test to test suite.
add_dependencies(SanitizerUnitTests SanitizerTest)
endif()
diff --git a/lib/sanitizer_common/tests/lit.cfg b/lib/sanitizer_common/tests/lit.cfg
index d774753985ac..303d56c91079 100644
--- a/lib/sanitizer_common/tests/lit.cfg
+++ b/lib/sanitizer_common/tests/lit.cfg
@@ -11,9 +11,8 @@ def get_required_attr(config, attr_name):
return attr_value
# Setup attributes common for all compiler-rt projects.
-llvm_src_root = get_required_attr(config, 'llvm_src_root')
-compiler_rt_lit_unit_cfg = os.path.join(llvm_src_root, "projects",
- "compiler-rt", "lib",
+compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root')
+compiler_rt_lit_unit_cfg = os.path.join(compiler_rt_src_root, "lib",
"lit.common.unit.cfg")
lit.load_config(config, compiler_rt_lit_unit_cfg)
diff --git a/lib/sanitizer_common/tests/lit.site.cfg.in b/lib/sanitizer_common/tests/lit.site.cfg.in
index bb9a28d6a6cb..50485aa16ec2 100644
--- a/lib/sanitizer_common/tests/lit.site.cfg.in
+++ b/lib/sanitizer_common/tests/lit.site.cfg.in
@@ -1,9 +1,16 @@
## Autogenerated by LLVM/Clang configuration.
# Do not edit!
-config.build_type = "@CMAKE_BUILD_TYPE@"
config.llvm_obj_root = "@LLVM_BINARY_DIR@"
config.llvm_src_root = "@LLVM_SOURCE_DIR@"
+config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@"
+config.llvm_build_mode = "@LLVM_BUILD_MODE@"
+
+try:
+ config.llvm_build_mode = config.llvm_build_mode % lit.params
+except KeyError,e:
+ key, = e.args
+ lit.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key))
# Let the main config do the real work.
lit.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg")
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index d67f4636ef4f..de949ca7defe 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -22,6 +22,7 @@
#include <pthread.h>
#include <algorithm>
#include <vector>
+#include <set>
// Too slow for debug build
#if TSAN_DEBUG == 0
@@ -40,8 +41,16 @@ typedef SizeClassAllocator64<
static const u64 kAddressSpaceSize = 1ULL << 32;
#endif
+static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
+static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
+
typedef SizeClassAllocator32<
- 0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
+ 0, kAddressSpaceSize,
+ /*kMetadataSize*/16,
+ CompactSizeClassMap,
+ kRegionSizeLog,
+ FlatByteMap<kFlatByteMapSize> >
+ Allocator32Compact;
template <class SizeClassMap>
void TestSizeClassMap() {
@@ -63,7 +72,8 @@ void TestSizeClassAllocator() {
Allocator *a = new Allocator;
a->Init();
SizeClassAllocatorLocalCache<Allocator> cache;
- cache.Init();
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
@@ -77,7 +87,7 @@ void TestSizeClassAllocator() {
uptr size = sizes[s];
if (!a->CanAllocate(size, 1)) continue;
// printf("s = %ld\n", size);
- uptr n_iter = std::max((uptr)6, 10000000 / size);
+ uptr n_iter = std::max((uptr)6, 8000000 / size);
// fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
for (uptr i = 0; i < n_iter; i++) {
uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
@@ -114,6 +124,12 @@ void TestSizeClassAllocator() {
CHECK_EQ(last_total_allocated, total_allocated);
}
+ // Check that GetBlockBegin never crashes.
+ for (uptr x = 0, step = kAddressSpaceSize / 100000;
+ x < kAddressSpaceSize - step; x += step)
+ if (a->PointerIsMine(reinterpret_cast<void *>(x)))
+ Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
+
a->TestOnlyUnmap();
delete a;
}
@@ -137,25 +153,28 @@ void SizeClassAllocatorMetadataStress() {
Allocator *a = new Allocator;
a->Init();
SizeClassAllocatorLocalCache<Allocator> cache;
- cache.Init();
- static volatile void *sink;
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
- const uptr kNumAllocs = 10000;
+ const uptr kNumAllocs = 1 << 13;
void *allocated[kNumAllocs];
+ void *meta[kNumAllocs];
for (uptr i = 0; i < kNumAllocs; i++) {
void *x = cache.Allocate(a, 1 + i % 50);
allocated[i] = x;
+ meta[i] = a->GetMetaData(x);
}
// Get Metadata kNumAllocs^2 times.
for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
- sink = a->GetMetaData(allocated[i % kNumAllocs]);
+ uptr idx = i % kNumAllocs;
+ void *m = a->GetMetaData(allocated[idx]);
+ EXPECT_EQ(m, meta[idx]);
}
for (uptr i = 0; i < kNumAllocs; i++) {
cache.Deallocate(a, 1 + i % 50, allocated[i]);
}
a->TestOnlyUnmap();
- (void)sink;
delete a;
}
@@ -167,11 +186,47 @@ TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Compact>();
}
-#endif
+#endif // SANITIZER_WORDSIZE == 64
TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator32Compact>();
}
+template <class Allocator>
+void SizeClassAllocatorGetBlockBeginStress() {
+ Allocator *a = new Allocator;
+ a->Init();
+ SizeClassAllocatorLocalCache<Allocator> cache;
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+
+ uptr max_size_class = Allocator::kNumClasses - 1;
+ uptr size = Allocator::SizeClassMapT::Size(max_size_class);
+ u64 G8 = 1ULL << 33;
+ // Make sure we correctly compute GetBlockBegin() w/o overflow.
+ for (size_t i = 0; i <= G8 / size; i++) {
+ void *x = cache.Allocate(a, max_size_class);
+ void *beg = a->GetBlockBegin(x);
+ // if ((i & (i - 1)) == 0)
+ // fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
+ EXPECT_EQ(x, beg);
+ }
+
+ a->TestOnlyUnmap();
+ delete a;
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
+ SizeClassAllocatorGetBlockBeginStress<Allocator64>();
+}
+TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
+ SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
+}
+TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
+ SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
+}
+#endif // SANITIZER_WORDSIZE == 64
+
struct TestMapUnmapCallback {
static int map_count, unmap_count;
void OnMap(uptr p, uptr size) const { map_count++; }
@@ -191,8 +246,11 @@ TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
a->Init();
EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
- cache.Init();
- a->AllocateBatch(&cache, 64);
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+ AllocatorStats stats;
+ stats.Init();
+ a->AllocateBatch(&stats, &cache, 32);
EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
a->TestOnlyUnmap();
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
@@ -204,17 +262,25 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
TestMapUnmapCallback::unmap_count = 0;
typedef SizeClassAllocator32<
- 0, kAddressSpaceSize, 16, CompactSizeClassMap,
- TestMapUnmapCallback> Allocator32WithCallBack;
+ 0, kAddressSpaceSize,
+ /*kMetadataSize*/16,
+ CompactSizeClassMap,
+ kRegionSizeLog,
+ FlatByteMap<kFlatByteMapSize>,
+ TestMapUnmapCallback>
+ Allocator32WithCallBack;
Allocator32WithCallBack *a = new Allocator32WithCallBack;
a->Init();
- EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
- cache.Init();
- a->AllocateBatch(&cache, 64);
- EXPECT_EQ(TestMapUnmapCallback::map_count, 2); // alloc.
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+ AllocatorStats stats;
+ stats.Init();
+ a->AllocateBatch(&stats, &cache, 32);
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
a->TestOnlyUnmap();
- EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2); // The whole thing + alloc.
+ EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
delete a;
// fprintf(stderr, "Map: %d Unmap: %d\n",
// TestMapUnmapCallback::map_count,
@@ -226,9 +292,11 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
TestMapUnmapCallback::unmap_count = 0;
LargeMmapAllocator<TestMapUnmapCallback> a;
a.Init();
- void *x = a.Allocate(1 << 20, 1);
+ AllocatorStats stats;
+ stats.Init();
+ void *x = a.Allocate(&stats, 1 << 20, 1);
EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
- a.Deallocate(x);
+ a.Deallocate(&stats, x);
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
}
@@ -237,9 +305,12 @@ void FailInAssertionOnOOM() {
Allocator a;
a.Init();
SizeClassAllocatorLocalCache<Allocator> cache;
- cache.Init();
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+ AllocatorStats stats;
+ stats.Init();
for (int i = 0; i < 1000000; i++) {
- a.AllocateBatch(&cache, 64);
+ a.AllocateBatch(&stats, &cache, 52);
}
a.TestOnlyUnmap();
@@ -254,13 +325,15 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
TEST(SanitizerCommon, LargeMmapAllocator) {
LargeMmapAllocator<> a;
a.Init();
+ AllocatorStats stats;
+ stats.Init();
static const int kNumAllocs = 1000;
char *allocated[kNumAllocs];
static const uptr size = 4000;
// Allocate some.
for (int i = 0; i < kNumAllocs; i++) {
- allocated[i] = (char *)a.Allocate(size, 1);
+ allocated[i] = (char *)a.Allocate(&stats, size, 1);
CHECK(a.PointerIsMine(allocated[i]));
}
// Deallocate all.
@@ -268,14 +341,14 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
for (int i = 0; i < kNumAllocs; i++) {
char *p = allocated[i];
CHECK(a.PointerIsMine(p));
- a.Deallocate(p);
+ a.Deallocate(&stats, p);
}
// Check that non left.
CHECK_EQ(a.TotalMemoryUsed(), 0);
// Allocate some more, also add metadata.
for (int i = 0; i < kNumAllocs; i++) {
- char *x = (char *)a.Allocate(size, 1);
+ char *x = (char *)a.Allocate(&stats, size, 1);
CHECK_GE(a.GetActuallyAllocatedSize(x), size);
uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
*meta = i;
@@ -294,7 +367,7 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
CHECK_EQ(*meta, idx);
CHECK(a.PointerIsMine(p));
- a.Deallocate(p);
+ a.Deallocate(&stats, p);
}
CHECK_EQ(a.TotalMemoryUsed(), 0);
@@ -304,7 +377,7 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
const uptr kNumAlignedAllocs = 100;
for (uptr i = 0; i < kNumAlignedAllocs; i++) {
uptr size = ((i % 10) + 1) * 4096;
- char *p = allocated[i] = (char *)a.Allocate(size, alignment);
+ char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
CHECK_EQ(p, a.GetBlockBegin(p));
CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
@@ -312,9 +385,17 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
p[0] = p[size - 1] = 0;
}
for (uptr i = 0; i < kNumAlignedAllocs; i++) {
- a.Deallocate(allocated[i]);
+ a.Deallocate(&stats, allocated[i]);
}
}
+
+ // Regression test for boundary condition in GetBlockBegin().
+ uptr page_size = GetPageSizeCached();
+ char *p = (char *)a.Allocate(&stats, page_size, 1);
+ CHECK_EQ(p, a.GetBlockBegin(p));
+ CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
+ CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
+ a.Deallocate(&stats, p);
}
template
@@ -327,7 +408,8 @@ void TestCombinedAllocator() {
a->Init();
AllocatorCache cache;
- cache.Init();
+ memset(&cache, 0, sizeof(cache));
+ a->InitCache(&cache);
EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
@@ -363,6 +445,7 @@ void TestCombinedAllocator() {
allocated.clear();
a->SwallowCache(&cache);
}
+ a->DestroyCache(&cache);
a->TestOnlyUnmap();
}
@@ -388,14 +471,13 @@ TEST(SanitizerCommon, CombinedAllocator32Compact) {
template <class AllocatorCache>
void TestSizeClassAllocatorLocalCache() {
- static AllocatorCache static_allocator_cache;
- static_allocator_cache.Init();
AllocatorCache cache;
typedef typename AllocatorCache::Allocator Allocator;
Allocator *a = new Allocator();
a->Init();
- cache.Init();
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
const uptr kNumAllocs = 10000;
const int kNumIter = 100;
@@ -466,6 +548,42 @@ TEST(SanitizerCommon, AllocatorLeakTest) {
a.TestOnlyUnmap();
}
+
+// Struct which is allocated to pass info to new threads. The new thread frees
+// it.
+struct NewThreadParams {
+ AllocatorCache *thread_cache;
+ AllocatorCache::Allocator *allocator;
+ uptr class_id;
+};
+
+// Called in a new thread. Just frees its argument.
+static void *DeallocNewThreadWorker(void *arg) {
+ NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
+ params->thread_cache->Deallocate(params->allocator, params->class_id, params);
+ return NULL;
+}
+
+// The allocator cache is supposed to be POD and zero initialized. We should be
+// able to call Deallocate on a zeroed cache, and it will self-initialize.
+TEST(Allocator, AllocatorCacheDeallocNewThread) {
+ AllocatorCache::Allocator allocator;
+ allocator.Init();
+ AllocatorCache main_cache;
+ AllocatorCache child_cache;
+ memset(&main_cache, 0, sizeof(main_cache));
+ memset(&child_cache, 0, sizeof(child_cache));
+
+ uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
+ NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
+ main_cache.Allocate(&allocator, class_id));
+ params->thread_cache = &child_cache;
+ params->allocator = &allocator;
+ params->class_id = class_id;
+ pthread_t t;
+ EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
+ EXPECT_EQ(0, pthread_join(t, 0));
+}
#endif
TEST(Allocator, Basic) {
@@ -507,4 +625,122 @@ TEST(Allocator, ScopedBuffer) {
}
}
+class IterationTestCallback {
+ public:
+ explicit IterationTestCallback(std::set<void *> *chunks)
+ : chunks_(chunks) {}
+ void operator()(void *chunk) const {
+ chunks_->insert(chunk);
+ }
+ private:
+ std::set<void *> *chunks_;
+};
+
+template <class Allocator>
+void TestSizeClassAllocatorIteration() {
+ Allocator *a = new Allocator;
+ a->Init();
+ SizeClassAllocatorLocalCache<Allocator> cache;
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+
+ static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
+ 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
+
+ std::vector<void *> allocated;
+
+ // Allocate a bunch of chunks.
+ for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
+ uptr size = sizes[s];
+ if (!a->CanAllocate(size, 1)) continue;
+ // printf("s = %ld\n", size);
+ uptr n_iter = std::max((uptr)6, 80000 / size);
+ // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
+ for (uptr j = 0; j < n_iter; j++) {
+ uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
+ void *x = cache.Allocate(a, class_id0);
+ allocated.push_back(x);
+ }
+ }
+
+ std::set<void *> reported_chunks;
+ IterationTestCallback callback(&reported_chunks);
+ a->ForceLock();
+ a->ForEachChunk(callback);
+ a->ForceUnlock();
+
+ for (uptr i = 0; i < allocated.size(); i++) {
+ // Don't use EXPECT_NE. Reporting the first mismatch is enough.
+ ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
+ }
+
+ a->TestOnlyUnmap();
+ delete a;
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
+ TestSizeClassAllocatorIteration<Allocator64>();
+}
+#endif
+
+TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
+ TestSizeClassAllocatorIteration<Allocator32Compact>();
+}
+
+TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
+ LargeMmapAllocator<> a;
+ a.Init();
+ AllocatorStats stats;
+ stats.Init();
+
+ static const uptr kNumAllocs = 1000;
+ char *allocated[kNumAllocs];
+ static const uptr size = 40;
+ // Allocate some.
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ allocated[i] = (char *)a.Allocate(&stats, size, 1);
+ }
+
+ std::set<void *> reported_chunks;
+ IterationTestCallback callback(&reported_chunks);
+ a.ForceLock();
+ a.ForEachChunk(callback);
+ a.ForceUnlock();
+
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ // Don't use EXPECT_NE. Reporting the first mismatch is enough.
+ ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
+ }
+}
+
+#if SANITIZER_WORDSIZE == 64
+// Regression test for out-of-memory condition in PopulateFreeList().
+TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
+ // In a world where regions are small and chunks are huge...
+ typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
+ typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
+ SpecialSizeClassMap> SpecialAllocator64;
+ const uptr kRegionSize =
+ kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
+ SpecialAllocator64 *a = new SpecialAllocator64;
+ a->Init();
+ SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+
+ // ...one man is on a mission to overflow a region with a series of
+ // successive allocations.
+ const uptr kClassID = 107;
+ const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
+ ASSERT_LT(2 * kAllocationSize, kRegionSize);
+ ASSERT_GT(3 * kAllocationSize, kRegionSize);
+ cache.Allocate(a, kClassID);
+ EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
+ "The process has exhausted");
+ a->TestOnlyUnmap();
+ delete a;
+}
+#endif
+
#endif // #if TSAN_DEBUG==0
diff --git a/lib/sanitizer_common/tests/sanitizer_atomic_test.cc b/lib/sanitizer_common/tests/sanitizer_atomic_test.cc
new file mode 100644
index 000000000000..a4a97c43e00f
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_atomic_test.cc
@@ -0,0 +1,55 @@
+//===-- sanitizer_atomic_test.cc ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "gtest/gtest.h"
+
+namespace __sanitizer {
+
+// Clang crashes while compiling this test for Android:
+// http://llvm.org/bugs/show_bug.cgi?id=15587
+#if !SANITIZER_ANDROID
+template<typename T>
+void CheckAtomicCompareExchange() {
+ typedef typename T::Type Type;
+ {
+ Type old_val = 42;
+ Type new_val = 24;
+ Type var = old_val;
+ EXPECT_TRUE(atomic_compare_exchange_strong((T*)&var, &old_val, new_val,
+ memory_order_relaxed));
+ EXPECT_FALSE(atomic_compare_exchange_strong((T*)&var, &old_val, new_val,
+ memory_order_relaxed));
+ EXPECT_EQ(new_val, old_val);
+ }
+ {
+ Type old_val = 42;
+ Type new_val = 24;
+ Type var = old_val;
+ EXPECT_TRUE(atomic_compare_exchange_weak((T*)&var, &old_val, new_val,
+ memory_order_relaxed));
+ EXPECT_FALSE(atomic_compare_exchange_weak((T*)&var, &old_val, new_val,
+ memory_order_relaxed));
+ EXPECT_EQ(new_val, old_val);
+ }
+}
+
+TEST(SanitizerCommon, AtomicCompareExchangeTest) {
+ CheckAtomicCompareExchange<atomic_uint8_t>();
+ CheckAtomicCompareExchange<atomic_uint16_t>();
+ CheckAtomicCompareExchange<atomic_uint32_t>();
+ CheckAtomicCompareExchange<atomic_uint64_t>();
+ CheckAtomicCompareExchange<atomic_uintptr_t>();
+}
+#endif //!SANITIZER_ANDROID
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_common_test.cc b/lib/sanitizer_common/tests/sanitizer_common_test.cc
index 01d8b5a87c01..424c279d4ada 100644
--- a/lib/sanitizer_common/tests/sanitizer_common_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_common_test.cc
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_platform.h"
#include "gtest/gtest.h"
namespace __sanitizer {
@@ -79,7 +80,7 @@ TEST(SanitizerCommon, MmapAlignedOrDie) {
}
}
-#ifdef __linux__
+#if SANITIZER_LINUX
TEST(SanitizerCommon, SanitizerSetThreadName) {
const char *names[] = {
"0123456789012",
@@ -96,4 +97,65 @@ TEST(SanitizerCommon, SanitizerSetThreadName) {
}
#endif
-} // namespace sanitizer
+TEST(SanitizerCommon, InternalVector) {
+ InternalVector<uptr> vector(1);
+ for (uptr i = 0; i < 100; i++) {
+ EXPECT_EQ(i, vector.size());
+ vector.push_back(i);
+ }
+ for (uptr i = 0; i < 100; i++) {
+ EXPECT_EQ(i, vector[i]);
+ }
+ for (int i = 99; i >= 0; i--) {
+ EXPECT_EQ((uptr)i, vector.back());
+ vector.pop_back();
+ EXPECT_EQ((uptr)i, vector.size());
+ }
+}
+
+void TestThreadInfo(bool main) {
+ uptr stk_addr = 0;
+ uptr stk_size = 0;
+ uptr tls_addr = 0;
+ uptr tls_size = 0;
+ GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size);
+
+ int stack_var;
+ EXPECT_NE(stk_addr, (uptr)0);
+ EXPECT_NE(stk_size, (uptr)0);
+ EXPECT_GT((uptr)&stack_var, stk_addr);
+ EXPECT_LT((uptr)&stack_var, stk_addr + stk_size);
+
+#if SANITIZER_LINUX && defined(__x86_64__)
+ static __thread int thread_var;
+ EXPECT_NE(tls_addr, (uptr)0);
+ EXPECT_NE(tls_size, (uptr)0);
+ EXPECT_GT((uptr)&thread_var, tls_addr);
+ EXPECT_LT((uptr)&thread_var, tls_addr + tls_size);
+
+ // Ensure that tls and stack do not intersect.
+ uptr tls_end = tls_addr + tls_size;
+ EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size);
+ EXPECT_TRUE(tls_end < stk_addr || tls_end >= stk_addr + stk_size);
+ EXPECT_TRUE((tls_addr < stk_addr) == (tls_end < stk_addr));
+#endif
+}
+
+static void *WorkerThread(void *arg) {
+ TestThreadInfo(false);
+ return 0;
+}
+
+TEST(SanitizerCommon, ThreadStackTlsMain) {
+ InitTlsSize();
+ TestThreadInfo(true);
+}
+
+TEST(SanitizerCommon, ThreadStackTlsWorker) {
+ InitTlsSize();
+ pthread_t t;
+ pthread_create(&t, 0, WorkerThread, 0);
+ pthread_join(t, 0);
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_flags_test.cc b/lib/sanitizer_common/tests/sanitizer_flags_test.cc
index c0589f4d2e90..cd3cac11bc80 100644
--- a/lib/sanitizer_common/tests/sanitizer_flags_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_flags_test.cc
@@ -32,7 +32,7 @@ static void TestStrFlag(const char *start_value, const char *env,
const char *final_value) {
const char *flag = start_value;
ParseFlag(env, &flag, kFlagName);
- EXPECT_EQ(internal_strcmp(final_value, flag), 0);
+ EXPECT_EQ(0, internal_strcmp(final_value, flag));
}
TEST(SanitizerCommon, BooleanFlags) {
@@ -63,6 +63,24 @@ TEST(SanitizerCommon, StrFlags) {
TestStrFlag("", "--flag_name='abc zxc'", "abc zxc");
TestStrFlag("", "--flag_name='abc zxcc'", "abc zxcc");
TestStrFlag("", "--flag_name=\"abc qwe\" asd", "abc qwe");
+ TestStrFlag("", "other_flag_name=zzz", "");
+}
+
+static void TestTwoFlags(const char *env, bool expected_flag1,
+ const char *expected_flag2) {
+ bool flag1 = !expected_flag1;
+ const char *flag2 = "";
+ ParseFlag(env, &flag1, "flag1");
+ ParseFlag(env, &flag2, "flag2");
+ EXPECT_EQ(expected_flag1, flag1);
+ EXPECT_EQ(0, internal_strcmp(flag2, expected_flag2));
+}
+
+TEST(SanitizerCommon, MultipleFlags) {
+ TestTwoFlags("flag1=1 flag2='zzz'", true, "zzz");
+ TestTwoFlags("flag2='qxx' flag1=0", false, "qxx");
+ TestTwoFlags("flag1=false:flag2='zzz'", false, "zzz");
+ TestTwoFlags("flag2=qxx:flag1=yes", true, "qxx");
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_libc_test.cc b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
index b9d8414e0cbf..39c29d357327 100644
--- a/lib/sanitizer_common/tests/sanitizer_libc_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
@@ -9,9 +9,18 @@
// Tests for sanitizer_libc.h.
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_platform.h"
#include "gtest/gtest.h"
+#if SANITIZER_LINUX || SANITIZER_MAC
+# define SANITIZER_TEST_HAS_STAT_H 1
+# include <sys/stat.h>
+#else
+# define SANITIZER_TEST_HAS_STAT_H 0
+#endif
+
// A regression test for internal_memmove() implementation.
TEST(SanitizerCommon, InternalMemmoveRegression) {
char src[] = "Hello World";
@@ -40,3 +49,69 @@ TEST(SanitizerCommon, mem_is_zero) {
}
delete [] x;
}
+
+struct stat_and_more {
+ struct stat st;
+ unsigned char z;
+};
+
+TEST(SanitizerCommon, FileOps) {
+ const char *str1 = "qwerty";
+ uptr len1 = internal_strlen(str1);
+ const char *str2 = "zxcv";
+ uptr len2 = internal_strlen(str2);
+
+ u32 uid = GetUid();
+ char temp_filename[128];
+#if SANITIZER_ANDROID
+ // I don't know a way to query temp directory location on Android without
+ // going through Java interfaces. The code below is not ideal, but should
+ // work. May require "adb root", but it is needed for almost any use of ASan
+ // on Android already.
+ internal_snprintf(temp_filename, sizeof(temp_filename),
+ "%s/sanitizer_common.tmp.%d",
+ GetEnv("EXTERNAL_STORAGE"), uid);
+#else
+ internal_snprintf(temp_filename, sizeof(temp_filename),
+ "/tmp/sanitizer_common.tmp.%d", uid);
+#endif
+ uptr openrv = OpenFile(temp_filename, true);
+ EXPECT_FALSE(internal_iserror(openrv));
+ fd_t fd = openrv;
+ EXPECT_EQ(len1, internal_write(fd, str1, len1));
+ EXPECT_EQ(len2, internal_write(fd, str2, len2));
+ internal_close(fd);
+
+ openrv = OpenFile(temp_filename, false);
+ EXPECT_FALSE(internal_iserror(openrv));
+ fd = openrv;
+ uptr fsize = internal_filesize(fd);
+ EXPECT_EQ(len1 + len2, fsize);
+
+#if SANITIZER_TEST_HAS_STAT_H
+ struct stat st1, st2, st3;
+ EXPECT_EQ(0u, internal_stat(temp_filename, &st1));
+ EXPECT_EQ(0u, internal_lstat(temp_filename, &st2));
+ EXPECT_EQ(0u, internal_fstat(fd, &st3));
+ EXPECT_EQ(fsize, (uptr)st3.st_size);
+
+ // Verify that internal_fstat does not write beyond the end of the supplied
+ // buffer.
+ struct stat_and_more sam;
+ memset(&sam, 0xAB, sizeof(sam));
+ EXPECT_EQ(0u, internal_fstat(fd, &sam.st));
+ EXPECT_EQ(0xAB, sam.z);
+ EXPECT_NE(0xAB, sam.st.st_size);
+ EXPECT_NE(0, sam.st.st_size);
+#endif
+
+ char buf[64] = {};
+ EXPECT_EQ(len1, internal_read(fd, buf, len1));
+ EXPECT_EQ(0, internal_memcmp(buf, str1, len1));
+ EXPECT_EQ((char)0, buf[len1 + 1]);
+ internal_memset(buf, 0, len1);
+ EXPECT_EQ(len2, internal_read(fd, buf, len2));
+ EXPECT_EQ(0, internal_memcmp(buf, str2, len2));
+ internal_close(fd);
+}
+
diff --git a/lib/sanitizer_common/tests/sanitizer_linux_test.cc b/lib/sanitizer_common/tests/sanitizer_linux_test.cc
new file mode 100644
index 000000000000..b18aeb030acf
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_linux_test.cc
@@ -0,0 +1,253 @@
+//===-- sanitizer_linux_test.cc -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Tests for sanitizer_linux.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "sanitizer_common/sanitizer_linux.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "gtest/gtest.h"
+
+#ifdef __x86_64__
+#include <asm/prctl.h>
+#endif
+#include <pthread.h>
+#include <sched.h>
+#include <stdlib.h>
+
+#include <algorithm>
+#include <vector>
+
+#ifdef __x86_64__
+extern "C" int arch_prctl(int code, __sanitizer::uptr *addr);
+#endif
+
+namespace __sanitizer {
+
+struct TidReporterArgument {
+ TidReporterArgument() {
+ pthread_mutex_init(&terminate_thread_mutex, NULL);
+ pthread_mutex_init(&tid_reported_mutex, NULL);
+ pthread_cond_init(&terminate_thread_cond, NULL);
+ pthread_cond_init(&tid_reported_cond, NULL);
+ terminate_thread = false;
+ }
+
+ ~TidReporterArgument() {
+ pthread_mutex_destroy(&terminate_thread_mutex);
+ pthread_mutex_destroy(&tid_reported_mutex);
+ pthread_cond_destroy(&terminate_thread_cond);
+ pthread_cond_destroy(&tid_reported_cond);
+ }
+
+ pid_t reported_tid;
+ // For signaling to spawned threads that they should terminate.
+ pthread_cond_t terminate_thread_cond;
+ pthread_mutex_t terminate_thread_mutex;
+ bool terminate_thread;
+ // For signaling to main thread that a child thread has reported its tid.
+ pthread_cond_t tid_reported_cond;
+ pthread_mutex_t tid_reported_mutex;
+
+ private:
+ // Disallow evil constructors
+ TidReporterArgument(const TidReporterArgument &);
+ void operator=(const TidReporterArgument &);
+};
+
+class ThreadListerTest : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ pthread_t pthread_id;
+ pid_t tid;
+ for (uptr i = 0; i < kThreadCount; i++) {
+ SpawnTidReporter(&pthread_id, &tid);
+ pthread_ids_.push_back(pthread_id);
+ tids_.push_back(tid);
+ }
+ }
+
+ virtual void TearDown() {
+ pthread_mutex_lock(&thread_arg.terminate_thread_mutex);
+ thread_arg.terminate_thread = true;
+ pthread_cond_broadcast(&thread_arg.terminate_thread_cond);
+ pthread_mutex_unlock(&thread_arg.terminate_thread_mutex);
+ for (uptr i = 0; i < pthread_ids_.size(); i++)
+ pthread_join(pthread_ids_[i], NULL);
+ }
+
+ void SpawnTidReporter(pthread_t *pthread_id, pid_t *tid);
+
+ static const uptr kThreadCount = 20;
+
+ std::vector<pthread_t> pthread_ids_;
+ std::vector<pid_t> tids_;
+
+ TidReporterArgument thread_arg;
+};
+
+// Writes its TID once to reported_tid and waits until signaled to terminate.
+void *TidReporterThread(void *argument) {
+ TidReporterArgument *arg = reinterpret_cast<TidReporterArgument *>(argument);
+ pthread_mutex_lock(&arg->tid_reported_mutex);
+ arg->reported_tid = GetTid();
+ pthread_cond_broadcast(&arg->tid_reported_cond);
+ pthread_mutex_unlock(&arg->tid_reported_mutex);
+
+ pthread_mutex_lock(&arg->terminate_thread_mutex);
+ while (!arg->terminate_thread)
+ pthread_cond_wait(&arg->terminate_thread_cond,
+ &arg->terminate_thread_mutex);
+ pthread_mutex_unlock(&arg->terminate_thread_mutex);
+ return NULL;
+}
+
+void ThreadListerTest::SpawnTidReporter(pthread_t *pthread_id,
+ pid_t *tid) {
+ pthread_mutex_lock(&thread_arg.tid_reported_mutex);
+ thread_arg.reported_tid = -1;
+ ASSERT_EQ(0, pthread_create(pthread_id, NULL,
+ TidReporterThread,
+ &thread_arg));
+ while (thread_arg.reported_tid == -1)
+ pthread_cond_wait(&thread_arg.tid_reported_cond,
+ &thread_arg.tid_reported_mutex);
+ pthread_mutex_unlock(&thread_arg.tid_reported_mutex);
+ *tid = thread_arg.reported_tid;
+}
+
+static std::vector<pid_t> ReadTidsToVector(ThreadLister *thread_lister) {
+ std::vector<pid_t> listed_tids;
+ pid_t tid;
+ while ((tid = thread_lister->GetNextTID()) >= 0)
+ listed_tids.push_back(tid);
+ EXPECT_FALSE(thread_lister->error());
+ return listed_tids;
+}
+
+static bool Includes(std::vector<pid_t> first, std::vector<pid_t> second) {
+ std::sort(first.begin(), first.end());
+ std::sort(second.begin(), second.end());
+ return std::includes(first.begin(), first.end(),
+ second.begin(), second.end());
+}
+
+static bool HasElement(std::vector<pid_t> vector, pid_t element) {
+ return std::find(vector.begin(), vector.end(), element) != vector.end();
+}
+
+// ThreadLister's output should include the current thread's TID and the TID of
+// every thread we spawned.
+TEST_F(ThreadListerTest, ThreadListerSeesAllSpawnedThreads) {
+ pid_t self_tid = GetTid();
+ ThreadLister thread_lister(getpid());
+ std::vector<pid_t> listed_tids = ReadTidsToVector(&thread_lister);
+ ASSERT_TRUE(HasElement(listed_tids, self_tid));
+ ASSERT_TRUE(Includes(listed_tids, tids_));
+}
+
+// Calling Reset() should not cause ThreadLister to forget any threads it's
+// supposed to know about.
+TEST_F(ThreadListerTest, ResetDoesNotForgetThreads) {
+ ThreadLister thread_lister(getpid());
+
+ // Run the loop body twice, because Reset() might behave differently if called
+ // on a freshly created object.
+ for (uptr i = 0; i < 2; i++) {
+ thread_lister.Reset();
+ std::vector<pid_t> listed_tids = ReadTidsToVector(&thread_lister);
+ ASSERT_TRUE(Includes(listed_tids, tids_));
+ }
+}
+
+// If new threads have spawned during ThreadLister object's lifetime, calling
+// Reset() should cause ThreadLister to recognize their existence.
+TEST_F(ThreadListerTest, ResetMakesNewThreadsKnown) {
+ ThreadLister thread_lister(getpid());
+ std::vector<pid_t> threads_before_extra = ReadTidsToVector(&thread_lister);
+
+ pthread_t extra_pthread_id;
+ pid_t extra_tid;
+ SpawnTidReporter(&extra_pthread_id, &extra_tid);
+ // Register the new thread so it gets terminated in TearDown().
+ pthread_ids_.push_back(extra_pthread_id);
+
+ // It would be very bizarre if the new TID had been listed before we even
+ // spawned that thread, but it would also cause a false success in this test,
+ // so better check for that.
+ ASSERT_FALSE(HasElement(threads_before_extra, extra_tid));
+
+ thread_lister.Reset();
+
+ std::vector<pid_t> threads_after_extra = ReadTidsToVector(&thread_lister);
+ ASSERT_TRUE(HasElement(threads_after_extra, extra_tid));
+}
+
+TEST(SanitizerCommon, SetEnvTest) {
+ const char kEnvName[] = "ENV_FOO";
+ SetEnv(kEnvName, "value");
+ EXPECT_STREQ("value", getenv(kEnvName));
+ unsetenv(kEnvName);
+ EXPECT_EQ(0, getenv(kEnvName));
+}
+
+#ifdef __x86_64__
+// libpthread puts the thread descriptor (%fs:0x0) at the end of stack space.
+void *thread_descriptor_test_func(void *arg) {
+ uptr fs;
+ arch_prctl(ARCH_GET_FS, &fs);
+ pthread_attr_t attr;
+ pthread_getattr_np(pthread_self(), &attr);
+ void *stackaddr;
+ uptr stacksize;
+ pthread_attr_getstack(&attr, &stackaddr, &stacksize);
+ return (void *)((uptr)stackaddr + stacksize - fs);
+}
+
+TEST(SanitizerLinux, ThreadDescriptorSize) {
+ pthread_t tid;
+ void *result;
+ pthread_create(&tid, 0, thread_descriptor_test_func, 0);
+ ASSERT_EQ(0, pthread_join(tid, &result));
+ EXPECT_EQ((uptr)result, ThreadDescriptorSize());
+}
+#endif
+
+TEST(SanitizerCommon, LibraryNameIs) {
+ EXPECT_FALSE(LibraryNameIs("", ""));
+
+ char full_name[256];
+ const char *paths[] = { "", "/", "/path/to/" };
+ const char *suffixes[] = { "", "-linux", ".1.2", "-linux.1.2" };
+ const char *base_names[] = { "lib", "lib.0", "lib-i386" };
+ const char *wrong_names[] = { "", "lib.9", "lib-x86_64" };
+ for (uptr i = 0; i < ARRAY_SIZE(paths); i++)
+ for (uptr j = 0; j < ARRAY_SIZE(suffixes); j++) {
+ for (uptr k = 0; k < ARRAY_SIZE(base_names); k++) {
+ internal_snprintf(full_name, ARRAY_SIZE(full_name), "%s%s%s.so",
+ paths[i], base_names[k], suffixes[j]);
+ EXPECT_TRUE(LibraryNameIs(full_name, base_names[k]))
+ << "Full name " << full_name
+ << " doesn't match base name " << base_names[k];
+ for (uptr m = 0; m < ARRAY_SIZE(wrong_names); m++)
+ EXPECT_FALSE(LibraryNameIs(full_name, wrong_names[m]))
+ << "Full name " << full_name
+ << " matches base name " << wrong_names[m];
+ }
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX
diff --git a/lib/sanitizer_common/tests/sanitizer_mutex_test.cc b/lib/sanitizer_common/tests/sanitizer_mutex_test.cc
index 6bb2ae29a188..1dc9bef20710 100644
--- a/lib/sanitizer_common/tests/sanitizer_mutex_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_mutex_test.cc
@@ -92,6 +92,12 @@ static void *try_thread(void *param) {
return 0;
}
+template<typename MutexType>
+static void check_locked(MutexType *mtx) {
+ GenericScopedLock<MutexType> l(mtx);
+ mtx->CheckLocked();
+}
+
TEST(SanitizerCommon, SpinMutex) {
SpinMutex mtx;
mtx.Init();
@@ -123,6 +129,7 @@ TEST(SanitizerCommon, BlockingMutex) {
pthread_create(&threads[i], 0, lock_thread<BlockingMutex>, &data);
for (int i = 0; i < kThreads; i++)
pthread_join(threads[i], 0);
+ check_locked(mtx);
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc b/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc
index 00b260479da9..1df2bcfd4bec 100644
--- a/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc
@@ -19,45 +19,72 @@
using namespace __sanitizer;
-#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
((std::vector<unsigned> *)ctx)->push_back(size)
#include "sanitizer_common/sanitizer_common_interceptors_scanf.inc"
-static void testScanf2(void *ctx, const char *format, ...) {
+static const char scanf_buf[] = "Test string.";
+static size_t scanf_buf_size = sizeof(scanf_buf);
+static const unsigned SCANF_ARGS_MAX = 16;
+
+static void testScanf3(void *ctx, int result, bool allowGnuMalloc,
+ const char *format, ...) {
va_list ap;
va_start(ap, format);
- scanf_common(ctx, format, ap);
+ scanf_common(ctx, result, allowGnuMalloc, format, ap);
va_end(ap);
}
-static void testScanf(const char *format, unsigned n, ...) {
+static void testScanf2(const char *format, int scanf_result,
+ bool allowGnuMalloc, unsigned n,
+ va_list expected_sizes) {
std::vector<unsigned> scanf_sizes;
// 16 args should be enough.
- testScanf2((void *)&scanf_sizes, format,
- (void*)0, (void*)0, (void*)0, (void*)0,
- (void*)0, (void*)0, (void*)0, (void*)0,
- (void*)0, (void*)0, (void*)0, (void*)0,
- (void*)0, (void*)0, (void*)0, (void*)0);
- ASSERT_EQ(n, scanf_sizes.size()) <<
- "Unexpected number of format arguments: '" << format << "'";
+ testScanf3((void *)&scanf_sizes, scanf_result, allowGnuMalloc, format,
+ scanf_buf, scanf_buf, scanf_buf, scanf_buf, scanf_buf, scanf_buf,
+ scanf_buf, scanf_buf, scanf_buf, scanf_buf, scanf_buf, scanf_buf,
+ scanf_buf, scanf_buf, scanf_buf, scanf_buf);
+ ASSERT_EQ(n, scanf_sizes.size()) << "Unexpected number of format arguments: '"
+ << format << "'";
+ for (unsigned i = 0; i < n; ++i)
+ EXPECT_EQ(va_arg(expected_sizes, unsigned), scanf_sizes[i])
+ << "Unexpect write size for argument " << i << ", format string '"
+ << format << "'";
+}
+
+static void testScanf(const char *format, unsigned n, ...) {
va_list ap;
va_start(ap, n);
- for (unsigned i = 0; i < n; ++i)
- EXPECT_EQ(va_arg(ap, unsigned), scanf_sizes[i]) <<
- "Unexpect write size for argument " << i << ", format string '" <<
- format << "'";
+ testScanf2(format, SCANF_ARGS_MAX, /* allowGnuMalloc */ true, n, ap);
+ va_end(ap);
+}
+
+static void testScanfPartial(const char *format, int scanf_result, unsigned n,
+ ...) {
+ va_list ap;
+ va_start(ap, n);
+ testScanf2(format, scanf_result, /* allowGnuMalloc */ true, n, ap);
+ va_end(ap);
+}
+
+static void testScanfNoGnuMalloc(const char *format, unsigned n, ...) {
+ va_list ap;
+ va_start(ap, n);
+ testScanf2(format, SCANF_ARGS_MAX, /* allowGnuMalloc */ false, n, ap);
va_end(ap);
}
TEST(SanitizerCommonInterceptors, Scanf) {
- const unsigned I = sizeof(int); // NOLINT
- const unsigned L = sizeof(long); // NOLINT
- const unsigned LL = sizeof(long long); // NOLINT
- const unsigned S = sizeof(short); // NOLINT
- const unsigned C = sizeof(char); // NOLINT
- const unsigned D = sizeof(double); // NOLINT
- const unsigned F = sizeof(float); // NOLINT
+ const unsigned I = sizeof(int); // NOLINT
+ const unsigned L = sizeof(long); // NOLINT
+ const unsigned LL = sizeof(long long); // NOLINT
+ const unsigned S = sizeof(short); // NOLINT
+ const unsigned C = sizeof(char); // NOLINT
+ const unsigned D = sizeof(double); // NOLINT
+ const unsigned LD = sizeof(long double); // NOLINT
+ const unsigned F = sizeof(float); // NOLINT
+ const unsigned P = sizeof(char *); // NOLINT
testScanf("%d", 1, I);
testScanf("%d%d%d", 3, I, I, I);
@@ -65,6 +92,7 @@ TEST(SanitizerCommonInterceptors, Scanf) {
testScanf("%ld", 1, L);
testScanf("%llu", 1, LL);
testScanf("a %hd%hhx", 2, S, C);
+ testScanf("%c", 1, C);
testScanf("%%", 0);
testScanf("a%%", 0);
@@ -79,7 +107,72 @@ TEST(SanitizerCommonInterceptors, Scanf) {
testScanf("%nf", 1, I);
testScanf("%10s", 1, 11);
+ testScanf("%10c", 1, 10);
testScanf("%%10s", 0);
testScanf("%*10s", 0);
testScanf("%*d", 0);
+
+ testScanf("%4d%8f%c", 3, I, F, C);
+ testScanf("%s%d", 2, scanf_buf_size, I);
+ testScanf("%[abc]", 1, scanf_buf_size);
+ testScanf("%4[bcdef]", 1, 5);
+ testScanf("%[]]", 1, scanf_buf_size);
+ testScanf("%8[^]%d0-9-]%c", 2, 9, C);
+
+ testScanf("%*[^:]%n:%d:%1[ ]%n", 4, I, I, 2, I);
+
+ testScanf("%*d%u", 1, I);
+
+ testScanf("%c%d", 2, C, I);
+ testScanf("%A%lf", 2, F, D);
+
+ testScanf("%ms %Lf", 2, P, LD);
+ testScanf("s%Las", 1, LD);
+ testScanf("%ar", 1, F);
+
+ // In the cases with std::min below the format spec can be interpreted as
+ // either floating-something, or (GNU extension) callee-allocated string.
+ // Our conservative implementation reports one of the two possibilities with
+ // the least store range.
+ testScanf("%a[", 0);
+ testScanf("%a[]", 0);
+ testScanf("%a[]]", 1, std::min(F, P));
+ testScanf("%a[abc]", 1, std::min(F, P));
+ testScanf("%a[^abc]", 1, std::min(F, P));
+ testScanf("%a[ab%c] %d", 0);
+ testScanf("%a[^ab%c] %d", 0);
+ testScanf("%as", 1, std::min(F, P));
+ testScanf("%aS", 1, std::min(F, P));
+ testScanf("%a13S", 1, std::min(F, P));
+ testScanf("%alS", 1, std::min(F, P));
+
+ testScanfNoGnuMalloc("s%Las", 1, LD);
+ testScanfNoGnuMalloc("%ar", 1, F);
+ testScanfNoGnuMalloc("%a[", 1, F);
+ testScanfNoGnuMalloc("%a[]", 1, F);
+ testScanfNoGnuMalloc("%a[]]", 1, F);
+ testScanfNoGnuMalloc("%a[abc]", 1, F);
+ testScanfNoGnuMalloc("%a[^abc]", 1, F);
+ testScanfNoGnuMalloc("%a[ab%c] %d", 3, F, C, I);
+ testScanfNoGnuMalloc("%a[^ab%c] %d", 3, F, C, I);
+ testScanfNoGnuMalloc("%as", 1, F);
+ testScanfNoGnuMalloc("%aS", 1, F);
+ testScanfNoGnuMalloc("%a13S", 1, F);
+ testScanfNoGnuMalloc("%alS", 1, F);
+
+ testScanf("%5$d", 0);
+ testScanf("%md", 0);
+ testScanf("%m10s", 0);
+
+ testScanfPartial("%d%d%d%d //1\n", 1, 1, I);
+ testScanfPartial("%d%d%d%d //2\n", 2, 2, I, I);
+ testScanfPartial("%d%d%d%d //3\n", 3, 3, I, I, I);
+ testScanfPartial("%d%d%d%d //4\n", 4, 4, I, I, I, I);
+
+ testScanfPartial("%d%n%n%d //1\n", 1, 1, I);
+ testScanfPartial("%d%n%n%d //2\n", 2, 4, I, I, I, I);
+
+ testScanfPartial("%d%n%n%d %s %s", 3, 5, I, I, I, I, scanf_buf_size);
+ testScanfPartial("%d%n%n%d %s %s", 4, 6, I, I, I, I, scanf_buf_size,
+ scanf_buf_size);
}
diff --git a/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc b/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc
new file mode 100644
index 000000000000..3d352cb97a5e
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc
@@ -0,0 +1,96 @@
+//===-- sanitizer_stacktrace_test.cc --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "gtest/gtest.h"
+
+namespace __sanitizer {
+
+class FastUnwindTest : public ::testing::Test {
+ protected:
+ virtual void SetUp();
+
+ uptr fake_stack[10];
+ uptr start_pc;
+ uptr fake_top;
+ uptr fake_bottom;
+ StackTrace trace;
+};
+
+static uptr PC(uptr idx) {
+ return (1<<20) + idx;
+}
+
+void FastUnwindTest::SetUp() {
+ // Fill an array of pointers with fake fp+retaddr pairs. Frame pointers have
+ // even indices.
+ for (uptr i = 0; i+1 < ARRAY_SIZE(fake_stack); i += 2) {
+ fake_stack[i] = (uptr)&fake_stack[i+2]; // fp
+ fake_stack[i+1] = PC(i + 1); // retaddr
+ }
+ // Mark the last fp as zero to terminate the stack trace.
+ fake_stack[RoundDownTo(ARRAY_SIZE(fake_stack) - 1, 2)] = 0;
+
+ // Top is two slots past the end because FastUnwindStack subtracts two.
+ fake_top = (uptr)&fake_stack[ARRAY_SIZE(fake_stack) + 2];
+ // Bottom is one slot before the start because FastUnwindStack uses >.
+ fake_bottom = (uptr)&fake_stack[-1];
+ start_pc = PC(0);
+
+ // This is common setup done by __asan::GetStackTrace().
+ trace.size = 0;
+ trace.max_size = ARRAY_SIZE(fake_stack);
+ trace.trace[0] = start_pc;
+}
+
+TEST_F(FastUnwindTest, Basic) {
+ trace.FastUnwindStack(start_pc, (uptr)&fake_stack[0],
+ fake_top, fake_bottom);
+ // Should get all on-stack retaddrs and start_pc.
+ EXPECT_EQ(6U, trace.size);
+ EXPECT_EQ(start_pc, trace.trace[0]);
+ for (uptr i = 1; i <= 5; i++) {
+ EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
+ }
+}
+
+// From: http://code.google.com/p/address-sanitizer/issues/detail?id=162
+TEST_F(FastUnwindTest, FramePointerLoop) {
+ // Make one fp point to itself.
+ fake_stack[4] = (uptr)&fake_stack[4];
+ trace.FastUnwindStack(start_pc, (uptr)&fake_stack[0],
+ fake_top, fake_bottom);
+ // Should get all on-stack retaddrs up to the 4th slot and start_pc.
+ EXPECT_EQ(4U, trace.size);
+ EXPECT_EQ(start_pc, trace.trace[0]);
+ for (uptr i = 1; i <= 3; i++) {
+ EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
+ }
+}
+
+TEST_F(FastUnwindTest, MisalignedFramePointer) {
+ // Make one fp misaligned.
+ fake_stack[4] += 3;
+ trace.FastUnwindStack(start_pc, (uptr)&fake_stack[0],
+ fake_top, fake_bottom);
+ // Should get all on-stack retaddrs up to the 4th slot and start_pc.
+ EXPECT_EQ(4U, trace.size);
+ EXPECT_EQ(start_pc, trace.trace[0]);
+ for (uptr i = 1; i < 4U; i++) {
+ EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
+ }
+}
+
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_stoptheworld_test.cc b/lib/sanitizer_common/tests/sanitizer_stoptheworld_test.cc
new file mode 100644
index 000000000000..a5f8516df575
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_stoptheworld_test.cc
@@ -0,0 +1,194 @@
+//===-- sanitizer_stoptheworld_test.cc ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Tests for sanitizer_stoptheworld.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "gtest/gtest.h"
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+#include <pthread.h>
+#include <sched.h>
+
+namespace __sanitizer {
+
+static pthread_mutex_t incrementer_thread_exit_mutex;
+
+struct CallbackArgument {
+ volatile int counter;
+ volatile bool threads_stopped;
+ volatile bool callback_executed;
+ CallbackArgument()
+ : counter(0),
+ threads_stopped(false),
+ callback_executed(false) {}
+};
+
+void *IncrementerThread(void *argument) {
+ CallbackArgument *callback_argument = (CallbackArgument *)argument;
+ while (true) {
+ __sync_fetch_and_add(&callback_argument->counter, 1);
+ if (pthread_mutex_trylock(&incrementer_thread_exit_mutex) == 0) {
+ pthread_mutex_unlock(&incrementer_thread_exit_mutex);
+ return NULL;
+ } else {
+ sched_yield();
+ }
+ }
+}
+
+// This callback checks that IncrementerThread is suspended at the time of its
+// execution.
+void Callback(const SuspendedThreadsList &suspended_threads_list,
+ void *argument) {
+ CallbackArgument *callback_argument = (CallbackArgument *)argument;
+ callback_argument->callback_executed = true;
+ int counter_at_init = __sync_fetch_and_add(&callback_argument->counter, 0);
+ for (uptr i = 0; i < 1000; i++) {
+ sched_yield();
+ if (__sync_fetch_and_add(&callback_argument->counter, 0) !=
+ counter_at_init) {
+ callback_argument->threads_stopped = false;
+ return;
+ }
+ }
+ callback_argument->threads_stopped = true;
+}
+
+TEST(StopTheWorld, SuspendThreadsSimple) {
+ pthread_mutex_init(&incrementer_thread_exit_mutex, NULL);
+ CallbackArgument argument;
+ pthread_t thread_id;
+ int pthread_create_result;
+ pthread_mutex_lock(&incrementer_thread_exit_mutex);
+ pthread_create_result = pthread_create(&thread_id, NULL, IncrementerThread,
+ &argument);
+ ASSERT_EQ(0, pthread_create_result);
+ StopTheWorld(&Callback, &argument);
+ pthread_mutex_unlock(&incrementer_thread_exit_mutex);
+ EXPECT_TRUE(argument.callback_executed);
+ EXPECT_TRUE(argument.threads_stopped);
+ // argument is on stack, so we have to wait for the incrementer thread to
+ // terminate before we can return from this function.
+ ASSERT_EQ(0, pthread_join(thread_id, NULL));
+ pthread_mutex_destroy(&incrementer_thread_exit_mutex);
+}
+
+// A more comprehensive test where we spawn a bunch of threads while executing
+// StopTheWorld in parallel.
+static const uptr kThreadCount = 50;
+static const uptr kStopWorldAfter = 10; // let this many threads spawn first
+
+static pthread_mutex_t advanced_incrementer_thread_exit_mutex;
+
+struct AdvancedCallbackArgument {
+ volatile uptr thread_index;
+ volatile int counters[kThreadCount];
+ pthread_t thread_ids[kThreadCount];
+ volatile bool threads_stopped;
+ volatile bool callback_executed;
+ volatile bool fatal_error;
+ AdvancedCallbackArgument()
+ : thread_index(0),
+ threads_stopped(false),
+ callback_executed(false),
+ fatal_error(false) {}
+};
+
+void *AdvancedIncrementerThread(void *argument) {
+ AdvancedCallbackArgument *callback_argument =
+ (AdvancedCallbackArgument *)argument;
+ uptr this_thread_index = __sync_fetch_and_add(
+ &callback_argument->thread_index, 1);
+ // Spawn the next thread.
+ int pthread_create_result;
+ if (this_thread_index + 1 < kThreadCount) {
+ pthread_create_result =
+ pthread_create(&callback_argument->thread_ids[this_thread_index + 1],
+ NULL, AdvancedIncrementerThread, argument);
+ // Cannot use ASSERT_EQ in non-void-returning functions. If there's a
+ // problem, defer failing to the main thread.
+ if (pthread_create_result != 0) {
+ callback_argument->fatal_error = true;
+ __sync_fetch_and_add(&callback_argument->thread_index,
+ kThreadCount - callback_argument->thread_index);
+ }
+ }
+ // Do the actual work.
+ while (true) {
+ __sync_fetch_and_add(&callback_argument->counters[this_thread_index], 1);
+ if (pthread_mutex_trylock(&advanced_incrementer_thread_exit_mutex) == 0) {
+ pthread_mutex_unlock(&advanced_incrementer_thread_exit_mutex);
+ return NULL;
+ } else {
+ sched_yield();
+ }
+ }
+}
+
+void AdvancedCallback(const SuspendedThreadsList &suspended_threads_list,
+ void *argument) {
+ AdvancedCallbackArgument *callback_argument =
+ (AdvancedCallbackArgument *)argument;
+ callback_argument->callback_executed = true;
+
+ int counters_at_init[kThreadCount];
+ for (uptr j = 0; j < kThreadCount; j++)
+ counters_at_init[j] = __sync_fetch_and_add(&callback_argument->counters[j],
+ 0);
+ for (uptr i = 0; i < 10; i++) {
+ sched_yield();
+ for (uptr j = 0; j < kThreadCount; j++)
+ if (__sync_fetch_and_add(&callback_argument->counters[j], 0) !=
+ counters_at_init[j]) {
+ callback_argument->threads_stopped = false;
+ return;
+ }
+ }
+ callback_argument->threads_stopped = true;
+}
+
+TEST(StopTheWorld, SuspendThreadsAdvanced) {
+ pthread_mutex_init(&advanced_incrementer_thread_exit_mutex, NULL);
+ AdvancedCallbackArgument argument;
+
+ pthread_mutex_lock(&advanced_incrementer_thread_exit_mutex);
+ int pthread_create_result;
+ pthread_create_result = pthread_create(&argument.thread_ids[0], NULL,
+ AdvancedIncrementerThread,
+ &argument);
+ ASSERT_EQ(0, pthread_create_result);
+ // Wait for several threads to spawn before proceeding.
+ while (__sync_fetch_and_add(&argument.thread_index, 0) < kStopWorldAfter)
+ sched_yield();
+ StopTheWorld(&AdvancedCallback, &argument);
+ EXPECT_TRUE(argument.callback_executed);
+ EXPECT_TRUE(argument.threads_stopped);
+
+ // Wait for all threads to spawn before we start terminating them.
+ while (__sync_fetch_and_add(&argument.thread_index, 0) < kThreadCount)
+ sched_yield();
+ ASSERT_FALSE(argument.fatal_error); // a pthread_create has failed
+ // Signal the threads to terminate.
+ pthread_mutex_unlock(&advanced_incrementer_thread_exit_mutex);
+ for (uptr i = 0; i < kThreadCount; i++)
+ ASSERT_EQ(0, pthread_join(argument.thread_ids[i], NULL));
+ pthread_mutex_destroy(&advanced_incrementer_thread_exit_mutex);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX
diff --git a/lib/sanitizer_common/tests/sanitizer_stoptheworld_testlib.cc b/lib/sanitizer_common/tests/sanitizer_stoptheworld_testlib.cc
new file mode 100644
index 000000000000..d8be2afb19e9
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_stoptheworld_testlib.cc
@@ -0,0 +1,53 @@
+//===-- sanitizer_stoptheworld_testlib.cc ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Dynamic library to test StopTheWorld functionality.
+// When loaded with LD_PRELOAD, it will periodically suspend all threads.
+//===----------------------------------------------------------------------===//
+/* Usage:
+clang++ -fno-exceptions -g -fPIC -I. \
+ sanitizer_common/tests/sanitizer_stoptheworld_testlib.cc \
+ sanitizer_common/sanitizer_*.cc -shared -lpthread -o teststoptheworld.so
+LD_PRELOAD=`pwd`/teststoptheworld.so /your/app
+*/
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include <dlfcn.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+
+namespace {
+const uptr kSuspendDuration = 3;
+const uptr kRunDuration = 3;
+
+void Callback(const SuspendedThreadsList &suspended_threads_list,
+ void *argument) {
+ sleep(kSuspendDuration);
+}
+
+void *SuspenderThread(void *argument) {
+ while (true) {
+ sleep(kRunDuration);
+ StopTheWorld(Callback, NULL);
+ }
+ return NULL;
+}
+
+__attribute__((constructor)) void StopTheWorldTestLibConstructor(void) {
+ pthread_t thread_id;
+ pthread_create(&thread_id, NULL, SuspenderThread, NULL);
+}
+} // namespace
+
+#endif // SANITIZER_LINUX
diff --git a/lib/sanitizer_common/tests/sanitizer_test_utils.h b/lib/sanitizer_common/tests/sanitizer_test_utils.h
index 6129ea8a5370..a770d0fbd39e 100644
--- a/lib/sanitizer_common/tests/sanitizer_test_utils.h
+++ b/lib/sanitizer_common/tests/sanitizer_test_utils.h
@@ -36,12 +36,14 @@ typedef __int64 int64_t;
#define __has_feature(x) 0
#endif
-#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
-# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
- __attribute__((no_address_safety_analysis))
-#else
-# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
-#endif
+#ifndef ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
+# if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
+ __attribute__((no_sanitize_address))
+# else
+# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
+# endif
+#endif // ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
diff --git a/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc b/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc
new file mode 100644
index 000000000000..e080403fb56c
--- /dev/null
+++ b/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc
@@ -0,0 +1,230 @@
+//===-- sanitizer_thread_registry_test.cc ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of shared sanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "gtest/gtest.h"
+
+#include <vector>
+
+namespace __sanitizer {
+
+static BlockingMutex tctx_allocator_lock(LINKER_INITIALIZED);
+static LowLevelAllocator tctx_allocator;
+
+template<typename TCTX>
+static ThreadContextBase *GetThreadContext(u32 tid) {
+ BlockingMutexLock l(&tctx_allocator_lock);
+ void *mem = tctx_allocator.Allocate(sizeof(TCTX));
+ return new(mem) TCTX(tid);
+}
+
+static const u32 kMaxRegistryThreads = 1000;
+static const u32 kRegistryQuarantine = 2;
+
+static void CheckThreadQuantity(ThreadRegistry *registry, uptr exp_total,
+ uptr exp_running, uptr exp_alive) {
+ uptr total, running, alive;
+ registry->GetNumberOfThreads(&total, &running, &alive);
+ EXPECT_EQ(exp_total, total);
+ EXPECT_EQ(exp_running, running);
+ EXPECT_EQ(exp_alive, alive);
+}
+
+static bool is_detached(u32 tid) {
+ return (tid % 2 == 0);
+}
+
+static uptr get_uid(u32 tid) {
+ return tid * 2;
+}
+
+static bool HasName(ThreadContextBase *tctx, void *arg) {
+ char *name = (char*)arg;
+ return (tctx->name && 0 == internal_strcmp(tctx->name, name));
+}
+
+static bool HasUid(ThreadContextBase *tctx, void *arg) {
+ uptr uid = (uptr)arg;
+ return (tctx->user_id == uid);
+}
+
+static void MarkUidAsPresent(ThreadContextBase *tctx, void *arg) {
+ bool *arr = (bool*)arg;
+ arr[tctx->tid] = true;
+}
+
+static void TestRegistry(ThreadRegistry *registry, bool has_quarantine) {
+ // Create and start a main thread.
+ EXPECT_EQ(0U, registry->CreateThread(get_uid(0), true, -1, 0));
+ registry->StartThread(0, 0, 0);
+ // Create a bunch of threads.
+ for (u32 i = 1; i <= 10; i++) {
+ EXPECT_EQ(i, registry->CreateThread(get_uid(i), is_detached(i), 0, 0));
+ }
+ CheckThreadQuantity(registry, 11, 1, 11);
+ // Start some of them.
+ for (u32 i = 1; i <= 5; i++) {
+ registry->StartThread(i, 0, 0);
+ }
+ CheckThreadQuantity(registry, 11, 6, 11);
+ // Finish, create and start more threads.
+ for (u32 i = 1; i <= 5; i++) {
+ registry->FinishThread(i);
+ if (!is_detached(i))
+ registry->JoinThread(i, 0);
+ }
+ for (u32 i = 6; i <= 10; i++) {
+ registry->StartThread(i, 0, 0);
+ }
+ std::vector<u32> new_tids;
+ for (u32 i = 11; i <= 15; i++) {
+ new_tids.push_back(
+ registry->CreateThread(get_uid(i), is_detached(i), 0, 0));
+ }
+ ASSERT_LE(kRegistryQuarantine, 5U);
+ u32 exp_total = 16 - (has_quarantine ? 5 - kRegistryQuarantine : 0);
+ CheckThreadQuantity(registry, exp_total, 6, 11);
+ // Test SetThreadName and FindThread.
+ registry->SetThreadName(6, "six");
+ registry->SetThreadName(7, "seven");
+ EXPECT_EQ(7U, registry->FindThread(HasName, (void*)"seven"));
+ EXPECT_EQ(ThreadRegistry::kUnknownTid,
+ registry->FindThread(HasName, (void*)"none"));
+ EXPECT_EQ(0U, registry->FindThread(HasUid, (void*)get_uid(0)));
+ EXPECT_EQ(10U, registry->FindThread(HasUid, (void*)get_uid(10)));
+ EXPECT_EQ(ThreadRegistry::kUnknownTid,
+ registry->FindThread(HasUid, (void*)0x1234));
+ // Detach and finish and join remaining threads.
+ for (u32 i = 6; i <= 10; i++) {
+ registry->DetachThread(i);
+ registry->FinishThread(i);
+ }
+ for (u32 i = 0; i < new_tids.size(); i++) {
+ u32 tid = new_tids[i];
+ registry->StartThread(tid, 0, 0);
+ registry->DetachThread(tid);
+ registry->FinishThread(tid);
+ }
+ CheckThreadQuantity(registry, exp_total, 1, 1);
+ // Test methods that require the caller to hold a ThreadRegistryLock.
+ bool has_tid[16];
+ internal_memset(&has_tid[0], 0, sizeof(has_tid));
+ {
+ ThreadRegistryLock l(registry);
+ registry->RunCallbackForEachThreadLocked(MarkUidAsPresent, &has_tid[0]);
+ }
+ for (u32 i = 0; i < exp_total; i++) {
+ EXPECT_TRUE(has_tid[i]);
+ }
+ {
+ ThreadRegistryLock l(registry);
+ registry->CheckLocked();
+ ThreadContextBase *main_thread = registry->GetThreadLocked(0);
+ EXPECT_EQ(main_thread, registry->FindThreadContextLocked(
+ HasUid, (void*)get_uid(0)));
+ }
+ EXPECT_EQ(11U, registry->GetMaxAliveThreads());
+}
+
+TEST(SanitizerCommon, ThreadRegistryTest) {
+ ThreadRegistry quarantine_registry(GetThreadContext<ThreadContextBase>,
+ kMaxRegistryThreads,
+ kRegistryQuarantine);
+ TestRegistry(&quarantine_registry, true);
+
+ ThreadRegistry no_quarantine_registry(GetThreadContext<ThreadContextBase>,
+ kMaxRegistryThreads,
+ kMaxRegistryThreads);
+ TestRegistry(&no_quarantine_registry, false);
+}
+
+static const int kThreadsPerShard = 20;
+static const int kNumShards = 25;
+
+static int num_created[kNumShards + 1];
+static int num_started[kNumShards + 1];
+static int num_joined[kNumShards + 1];
+
+namespace {
+
+struct RunThreadArgs {
+ ThreadRegistry *registry;
+ uptr shard; // started from 1.
+};
+
+class TestThreadContext : public ThreadContextBase {
+ public:
+ explicit TestThreadContext(int tid) : ThreadContextBase(tid) {}
+ void OnJoined(void *arg) {
+ uptr shard = (uptr)arg;
+ num_joined[shard]++;
+ }
+ void OnStarted(void *arg) {
+ uptr shard = (uptr)arg;
+ num_started[shard]++;
+ }
+ void OnCreated(void *arg) {
+ uptr shard = (uptr)arg;
+ num_created[shard]++;
+ }
+};
+
+} // namespace
+
+void *RunThread(void *arg) {
+ RunThreadArgs *args = static_cast<RunThreadArgs*>(arg);
+ std::vector<int> tids;
+ for (int i = 0; i < kThreadsPerShard; i++)
+ tids.push_back(
+ args->registry->CreateThread(0, false, 0, (void*)args->shard));
+ for (int i = 0; i < kThreadsPerShard; i++)
+ args->registry->StartThread(tids[i], 0, (void*)args->shard);
+ for (int i = 0; i < kThreadsPerShard; i++)
+ args->registry->FinishThread(tids[i]);
+ for (int i = 0; i < kThreadsPerShard; i++)
+ args->registry->JoinThread(tids[i], (void*)args->shard);
+ return 0;
+}
+
+static void ThreadedTestRegistry(ThreadRegistry *registry) {
+ // Create and start a main thread.
+ EXPECT_EQ(0U, registry->CreateThread(0, true, -1, 0));
+ registry->StartThread(0, 0, 0);
+ pthread_t threads[kNumShards];
+ RunThreadArgs args[kNumShards];
+ for (int i = 0; i < kNumShards; i++) {
+ args[i].registry = registry;
+ args[i].shard = i + 1;
+ pthread_create(&threads[i], 0, RunThread, &args[i]);
+ }
+ for (int i = 0; i < kNumShards; i++) {
+ pthread_join(threads[i], 0);
+ }
+ // Check that each thread created/started/joined correct amount
+ // of "threads" in thread_registry.
+ EXPECT_EQ(1, num_created[0]);
+ EXPECT_EQ(1, num_started[0]);
+ EXPECT_EQ(0, num_joined[0]);
+ for (int i = 1; i <= kNumShards; i++) {
+ EXPECT_EQ(kThreadsPerShard, num_created[i]);
+ EXPECT_EQ(kThreadsPerShard, num_started[i]);
+ EXPECT_EQ(kThreadsPerShard, num_joined[i]);
+ }
+}
+
+TEST(SanitizerCommon, ThreadRegistryThreadedTest) {
+ ThreadRegistry registry(GetThreadContext<TestThreadContext>,
+ kThreadsPerShard * kNumShards + 1, 10);
+ ThreadedTestRegistry(&registry);
+}
+
+} // namespace __sanitizer